diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2005-06-21 20:14:44 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-21 21:46:15 -0400 |
commit | 63551ae0feaaa23807ebea60de1901564bbef32e (patch) | |
tree | f6f97f60f83c3e9813bdfcc6039c499997b1ea10 /arch/ia64/mm | |
parent | 1e7e5a9048b30c57ba1ddaa6cdf59b21b65cde99 (diff) |
[PATCH] Hugepage consolidation
A lot of the code in arch/*/mm/hugetlbpage.c is quite similar. This patch
attempts to consolidate a lot of the code across the arch's, putting the
combined version in mm/hugetlb.c. There are a couple of uglyish hacks in
order to covert all the hugepage archs, but the result is a very large
reduction in the total amount of code. It also means things like hugepage
lazy allocation could be implemented in one place, instead of six.
Tested, at least a little, on ppc64, i386 and x86_64.
Notes:
- this patch changes the meaning of set_huge_pte() to be more
analagous to set_pte()
- does SH4 need s special huge_ptep_get_and_clear()??
Acked-by: William Lee Irwin <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r-- | arch/ia64/mm/hugetlbpage.c | 158 |
1 files changed, 2 insertions, 156 deletions
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index df08ae7634b6..e0a776a3044c 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT; | 25 | unsigned int hpage_shift=HPAGE_SHIFT_DEFAULT; |
26 | 26 | ||
27 | static pte_t * | 27 | pte_t * |
28 | huge_pte_alloc (struct mm_struct *mm, unsigned long addr) | 28 | huge_pte_alloc (struct mm_struct *mm, unsigned long addr) |
29 | { | 29 | { |
30 | unsigned long taddr = htlbpage_to_page(addr); | 30 | unsigned long taddr = htlbpage_to_page(addr); |
@@ -43,7 +43,7 @@ huge_pte_alloc (struct mm_struct *mm, unsigned long addr) | |||
43 | return pte; | 43 | return pte; |
44 | } | 44 | } |
45 | 45 | ||
46 | static pte_t * | 46 | pte_t * |
47 | huge_pte_offset (struct mm_struct *mm, unsigned long addr) | 47 | huge_pte_offset (struct mm_struct *mm, unsigned long addr) |
48 | { | 48 | { |
49 | unsigned long taddr = htlbpage_to_page(addr); | 49 | unsigned long taddr = htlbpage_to_page(addr); |
@@ -67,23 +67,6 @@ huge_pte_offset (struct mm_struct *mm, unsigned long addr) | |||
67 | 67 | ||
68 | #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } | 68 | #define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; } |
69 | 69 | ||
70 | static void | ||
71 | set_huge_pte (struct mm_struct *mm, struct vm_area_struct *vma, | ||
72 | struct page *page, pte_t * page_table, int write_access) | ||
73 | { | ||
74 | pte_t entry; | ||
75 | |||
76 | add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); | ||
77 | if (write_access) { | ||
78 | entry = | ||
79 | pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); | ||
80 | } else | ||
81 | entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); | ||
82 | entry = pte_mkyoung(entry); | ||
83 | mk_pte_huge(entry); | ||
84 | set_pte(page_table, entry); | ||
85 | return; | ||
86 | } | ||
87 | /* | 70 | /* |
88 | * This function checks for proper alignment of input addr and len parameters. | 71 | * This function checks for proper alignment of input addr and len parameters. |
89 | */ | 72 | */ |
@@ -99,68 +82,6 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len) | |||
99 | return 0; | 82 | return 0; |
100 | } | 83 | } |
101 | 84 | ||
102 | int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | ||
103 | struct vm_area_struct *vma) | ||
104 | { | ||
105 | pte_t *src_pte, *dst_pte, entry; | ||
106 | struct page *ptepage; | ||
107 | unsigned long addr = vma->vm_start; | ||
108 | unsigned long end = vma->vm_end; | ||
109 | |||
110 | while (addr < end) { | ||
111 | dst_pte = huge_pte_alloc(dst, addr); | ||
112 | if (!dst_pte) | ||
113 | goto nomem; | ||
114 | src_pte = huge_pte_offset(src, addr); | ||
115 | entry = *src_pte; | ||
116 | ptepage = pte_page(entry); | ||
117 | get_page(ptepage); | ||
118 | set_pte(dst_pte, entry); | ||
119 | add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); | ||
120 | addr += HPAGE_SIZE; | ||
121 | } | ||
122 | return 0; | ||
123 | nomem: | ||
124 | return -ENOMEM; | ||
125 | } | ||
126 | |||
127 | int | ||
128 | follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | ||
129 | struct page **pages, struct vm_area_struct **vmas, | ||
130 | unsigned long *st, int *length, int i) | ||
131 | { | ||
132 | pte_t *ptep, pte; | ||
133 | unsigned long start = *st; | ||
134 | unsigned long pstart; | ||
135 | int len = *length; | ||
136 | struct page *page; | ||
137 | |||
138 | do { | ||
139 | pstart = start & HPAGE_MASK; | ||
140 | ptep = huge_pte_offset(mm, start); | ||
141 | pte = *ptep; | ||
142 | |||
143 | back1: | ||
144 | page = pte_page(pte); | ||
145 | if (pages) { | ||
146 | page += ((start & ~HPAGE_MASK) >> PAGE_SHIFT); | ||
147 | get_page(page); | ||
148 | pages[i] = page; | ||
149 | } | ||
150 | if (vmas) | ||
151 | vmas[i] = vma; | ||
152 | i++; | ||
153 | len--; | ||
154 | start += PAGE_SIZE; | ||
155 | if (((start & HPAGE_MASK) == pstart) && len && | ||
156 | (start < vma->vm_end)) | ||
157 | goto back1; | ||
158 | } while (len && start < vma->vm_end); | ||
159 | *length = len; | ||
160 | *st = start; | ||
161 | return i; | ||
162 | } | ||
163 | |||
164 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write) | 85 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write) |
165 | { | 86 | { |
166 | struct page *page; | 87 | struct page *page; |
@@ -212,81 +133,6 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, | |||
212 | free_pgd_range(tlb, addr, end, floor, ceiling); | 133 | free_pgd_range(tlb, addr, end, floor, ceiling); |
213 | } | 134 | } |
214 | 135 | ||
215 | void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | ||
216 | { | ||
217 | struct mm_struct *mm = vma->vm_mm; | ||
218 | unsigned long address; | ||
219 | pte_t *pte; | ||
220 | struct page *page; | ||
221 | |||
222 | BUG_ON(start & (HPAGE_SIZE - 1)); | ||
223 | BUG_ON(end & (HPAGE_SIZE - 1)); | ||
224 | |||
225 | for (address = start; address < end; address += HPAGE_SIZE) { | ||
226 | pte = huge_pte_offset(mm, address); | ||
227 | if (pte_none(*pte)) | ||
228 | continue; | ||
229 | page = pte_page(*pte); | ||
230 | put_page(page); | ||
231 | pte_clear(mm, address, pte); | ||
232 | } | ||
233 | add_mm_counter(mm, rss, - ((end - start) >> PAGE_SHIFT)); | ||
234 | flush_tlb_range(vma, start, end); | ||
235 | } | ||
236 | |||
237 | int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) | ||
238 | { | ||
239 | struct mm_struct *mm = current->mm; | ||
240 | unsigned long addr; | ||
241 | int ret = 0; | ||
242 | |||
243 | BUG_ON(vma->vm_start & ~HPAGE_MASK); | ||
244 | BUG_ON(vma->vm_end & ~HPAGE_MASK); | ||
245 | |||
246 | spin_lock(&mm->page_table_lock); | ||
247 | for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { | ||
248 | unsigned long idx; | ||
249 | pte_t *pte = huge_pte_alloc(mm, addr); | ||
250 | struct page *page; | ||
251 | |||
252 | if (!pte) { | ||
253 | ret = -ENOMEM; | ||
254 | goto out; | ||
255 | } | ||
256 | if (!pte_none(*pte)) | ||
257 | continue; | ||
258 | |||
259 | idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) | ||
260 | + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); | ||
261 | page = find_get_page(mapping, idx); | ||
262 | if (!page) { | ||
263 | /* charge the fs quota first */ | ||
264 | if (hugetlb_get_quota(mapping)) { | ||
265 | ret = -ENOMEM; | ||
266 | goto out; | ||
267 | } | ||
268 | page = alloc_huge_page(); | ||
269 | if (!page) { | ||
270 | hugetlb_put_quota(mapping); | ||
271 | ret = -ENOMEM; | ||
272 | goto out; | ||
273 | } | ||
274 | ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); | ||
275 | if (! ret) { | ||
276 | unlock_page(page); | ||
277 | } else { | ||
278 | hugetlb_put_quota(mapping); | ||
279 | page_cache_release(page); | ||
280 | goto out; | ||
281 | } | ||
282 | } | ||
283 | set_huge_pte(mm, vma, page, pte, vma->vm_flags & VM_WRITE); | ||
284 | } | ||
285 | out: | ||
286 | spin_unlock(&mm->page_table_lock); | ||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, | 136 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, |
291 | unsigned long pgoff, unsigned long flags) | 137 | unsigned long pgoff, unsigned long flags) |
292 | { | 138 | { |