aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-06-21 20:14:44 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-21 21:46:15 -0400
commit63551ae0feaaa23807ebea60de1901564bbef32e (patch)
treef6f97f60f83c3e9813bdfcc6039c499997b1ea10 /arch/sparc64
parent1e7e5a9048b30c57ba1ddaa6cdf59b21b65cde99 (diff)
[PATCH] Hugepage consolidation
A lot of the code in arch/*/mm/hugetlbpage.c is quite similar. This patch attempts to consolidate a lot of the code across the arch's, putting the combined version in mm/hugetlb.c. There are a couple of uglyish hacks in order to covert all the hugepage archs, but the result is a very large reduction in the total amount of code. It also means things like hugepage lazy allocation could be implemented in one place, instead of six. Tested, at least a little, on ppc64, i386 and x86_64. Notes: - this patch changes the meaning of set_huge_pte() to be more analagous to set_pte() - does SH4 need s special huge_ptep_get_and_clear()?? Acked-by: William Lee Irwin <wli@holomorphy.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/mm/hugetlbpage.c195
1 files changed, 24 insertions, 171 deletions
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c
index 5a1f831b2de..625cbb336a2 100644
--- a/arch/sparc64/mm/hugetlbpage.c
+++ b/arch/sparc64/mm/hugetlbpage.c
@@ -22,7 +22,7 @@
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
24 24
25static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) 25pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
26{ 26{
27 pgd_t *pgd; 27 pgd_t *pgd;
28 pud_t *pud; 28 pud_t *pud;
@@ -41,7 +41,7 @@ static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr)
41 return pte; 41 return pte;
42} 42}
43 43
44static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 44pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
45{ 45{
46 pgd_t *pgd; 46 pgd_t *pgd;
47 pud_t *pud; 47 pud_t *pud;
@@ -62,30 +62,34 @@ static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
62 62
63#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0) 63#define mk_pte_huge(entry) do { pte_val(entry) |= _PAGE_SZHUGE; } while (0)
64 64
65static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, 65void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
66 unsigned long addr, 66 pte_t *ptep, pte_t entry)
67 struct page *page, pte_t * page_table, int write_access)
68{ 67{
69 unsigned long i; 68 int i;
70 pte_t entry; 69
70 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
71 set_pte_at(mm, addr, ptep, entry);
72 ptep++;
73 addr += PAGE_SIZE;
74 pte_val(entry) += PAGE_SIZE;
75 }
76}
71 77
72 add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); 78pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
79 pte_t *ptep)
80{
81 pte_t entry;
82 int i;
73 83
74 if (write_access) 84 entry = *ptep;
75 entry = pte_mkwrite(pte_mkdirty(mk_pte(page,
76 vma->vm_page_prot)));
77 else
78 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
79 entry = pte_mkyoung(entry);
80 mk_pte_huge(entry);
81 85
82 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) { 86 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
83 set_pte_at(mm, addr, page_table, entry); 87 pte_clear(mm, addr, ptep);
84 page_table++;
85 addr += PAGE_SIZE; 88 addr += PAGE_SIZE;
86 89 ptep++;
87 pte_val(entry) += PAGE_SIZE;
88 } 90 }
91
92 return entry;
89} 93}
90 94
91/* 95/*
@@ -100,79 +104,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
100 return 0; 104 return 0;
101} 105}
102 106
103int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
104 struct vm_area_struct *vma)
105{
106 pte_t *src_pte, *dst_pte, entry;
107 struct page *ptepage;
108 unsigned long addr = vma->vm_start;
109 unsigned long end = vma->vm_end;
110 int i;
111
112 while (addr < end) {
113 dst_pte = huge_pte_alloc(dst, addr);
114 if (!dst_pte)
115 goto nomem;
116 src_pte = huge_pte_offset(src, addr);
117 BUG_ON(!src_pte || pte_none(*src_pte));
118 entry = *src_pte;
119 ptepage = pte_page(entry);
120 get_page(ptepage);
121 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
122 set_pte_at(dst, addr, dst_pte, entry);
123 pte_val(entry) += PAGE_SIZE;
124 dst_pte++;
125 addr += PAGE_SIZE;
126 }
127 add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE);
128 }
129 return 0;
130
131nomem:
132 return -ENOMEM;
133}
134
135int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
136 struct page **pages, struct vm_area_struct **vmas,
137 unsigned long *position, int *length, int i)
138{
139 unsigned long vaddr = *position;
140 int remainder = *length;
141
142 WARN_ON(!is_vm_hugetlb_page(vma));
143
144 while (vaddr < vma->vm_end && remainder) {
145 if (pages) {
146 pte_t *pte;
147 struct page *page;
148
149 pte = huge_pte_offset(mm, vaddr);
150
151 /* hugetlb should be locked, and hence, prefaulted */
152 BUG_ON(!pte || pte_none(*pte));
153
154 page = pte_page(*pte);
155
156 WARN_ON(!PageCompound(page));
157
158 get_page(page);
159 pages[i] = page;
160 }
161
162 if (vmas)
163 vmas[i] = vma;
164
165 vaddr += PAGE_SIZE;
166 --remainder;
167 ++i;
168 }
169
170 *length = remainder;
171 *position = vaddr;
172
173 return i;
174}
175
176struct page *follow_huge_addr(struct mm_struct *mm, 107struct page *follow_huge_addr(struct mm_struct *mm,
177 unsigned long address, int write) 108 unsigned long address, int write)
178{ 109{
@@ -190,34 +121,6 @@ struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
190 return NULL; 121 return NULL;
191} 122}
192 123
193void unmap_hugepage_range(struct vm_area_struct *vma,
194 unsigned long start, unsigned long end)
195{
196 struct mm_struct *mm = vma->vm_mm;
197 unsigned long address;
198 pte_t *pte;
199 struct page *page;
200 int i;
201
202 BUG_ON(start & (HPAGE_SIZE - 1));
203 BUG_ON(end & (HPAGE_SIZE - 1));
204
205 for (address = start; address < end; address += HPAGE_SIZE) {
206 pte = huge_pte_offset(mm, address);
207 BUG_ON(!pte);
208 if (pte_none(*pte))
209 continue;
210 page = pte_page(*pte);
211 put_page(page);
212 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
213 pte_clear(mm, address+(i*PAGE_SIZE), pte);
214 pte++;
215 }
216 }
217 add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT));
218 flush_tlb_range(vma, start, end);
219}
220
221static void context_reload(void *__data) 124static void context_reload(void *__data)
222{ 125{
223 struct mm_struct *mm = __data; 126 struct mm_struct *mm = __data;
@@ -226,12 +129,8 @@ static void context_reload(void *__data)
226 load_secondary_context(mm); 129 load_secondary_context(mm);
227} 130}
228 131
229int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) 132void hugetlb_prefault_arch_hook(struct mm_struct *mm)
230{ 133{
231 struct mm_struct *mm = current->mm;
232 unsigned long addr;
233 int ret = 0;
234
235 /* On UltraSPARC-III+ and later, configure the second half of 134 /* On UltraSPARC-III+ and later, configure the second half of
236 * the Data-TLB for huge pages. 135 * the Data-TLB for huge pages.
237 */ 136 */
@@ -261,50 +160,4 @@ int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma)
261 } 160 }
262 spin_unlock(&ctx_alloc_lock); 161 spin_unlock(&ctx_alloc_lock);
263 } 162 }
264
265 BUG_ON(vma->vm_start & ~HPAGE_MASK);
266 BUG_ON(vma->vm_end & ~HPAGE_MASK);
267
268 spin_lock(&mm->page_table_lock);
269 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
270 unsigned long idx;
271 pte_t *pte = huge_pte_alloc(mm, addr);
272 struct page *page;
273
274 if (!pte) {
275 ret = -ENOMEM;
276 goto out;
277 }
278 if (!pte_none(*pte))
279 continue;
280
281 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
282 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
283 page = find_get_page(mapping, idx);
284 if (!page) {
285 /* charge the fs quota first */
286 if (hugetlb_get_quota(mapping)) {
287 ret = -ENOMEM;
288 goto out;
289 }
290 page = alloc_huge_page();
291 if (!page) {
292 hugetlb_put_quota(mapping);
293 ret = -ENOMEM;
294 goto out;
295 }
296 ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC);
297 if (! ret) {
298 unlock_page(page);
299 } else {
300 hugetlb_put_quota(mapping);
301 free_huge_page(page);
302 goto out;
303 }
304 }
305 set_huge_pte(mm, vma, addr, page, pte, vma->vm_flags & VM_WRITE);
306 }
307out:
308 spin_unlock(&mm->page_table_lock);
309 return ret;
310} 163}