aboutsummaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-01-10 21:43:52 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-10 21:43:52 -0500
commit99cd7074891f87c49660e3b2880564324a4733ac (patch)
tree903d2665bcb445f1f265d1adf7a99f265bcefc15 /mm/hugetlb.c
parente8a9cbf6ae620d9e5ba9cb42001c033287a284a3 (diff)
parentc59765042f53a79a7a65585042ff463b69cb248c (diff)
Merge commit 'v2.6.29-rc1' into tracing/urgent
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c46
1 files changed, 39 insertions, 7 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 6058b53dcb89..618e98304080 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -220,6 +220,35 @@ static pgoff_t vma_hugecache_offset(struct hstate *h,
220} 220}
221 221
222/* 222/*
223 * Return the size of the pages allocated when backing a VMA. In the majority
224 * cases this will be same size as used by the page table entries.
225 */
226unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
227{
228 struct hstate *hstate;
229
230 if (!is_vm_hugetlb_page(vma))
231 return PAGE_SIZE;
232
233 hstate = hstate_vma(vma);
234
235 return 1UL << (hstate->order + PAGE_SHIFT);
236}
237
238/*
239 * Return the page size being used by the MMU to back a VMA. In the majority
240 * of cases, the page size used by the kernel matches the MMU size. On
241 * architectures where it differs, an architecture-specific version of this
242 * function is required.
243 */
244#ifndef vma_mmu_pagesize
245unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
246{
247 return vma_kernel_pagesize(vma);
248}
249#endif
250
251/*
223 * Flags for MAP_PRIVATE reservations. These are stored in the bottom 252 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
224 * bits of the reservation map pointer, which are always clear due to 253 * bits of the reservation map pointer, which are always clear due to
225 * alignment. 254 * alignment.
@@ -371,8 +400,10 @@ static void clear_huge_page(struct page *page,
371{ 400{
372 int i; 401 int i;
373 402
374 if (unlikely(sz > MAX_ORDER_NR_PAGES)) 403 if (unlikely(sz > MAX_ORDER_NR_PAGES)) {
375 return clear_gigantic_page(page, addr, sz); 404 clear_gigantic_page(page, addr, sz);
405 return;
406 }
376 407
377 might_sleep(); 408 might_sleep();
378 for (i = 0; i < sz/PAGE_SIZE; i++) { 409 for (i = 0; i < sz/PAGE_SIZE; i++) {
@@ -404,8 +435,10 @@ static void copy_huge_page(struct page *dst, struct page *src,
404 int i; 435 int i;
405 struct hstate *h = hstate_vma(vma); 436 struct hstate *h = hstate_vma(vma);
406 437
407 if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) 438 if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
408 return copy_gigantic_page(dst, src, addr, vma); 439 copy_gigantic_page(dst, src, addr, vma);
440 return;
441 }
409 442
410 might_sleep(); 443 might_sleep();
411 for (i = 0; i < pages_per_huge_page(h); i++) { 444 for (i = 0; i < pages_per_huge_page(h); i++) {
@@ -972,7 +1005,7 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
972 return page; 1005 return page;
973} 1006}
974 1007
975__attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h) 1008int __weak alloc_bootmem_huge_page(struct hstate *h)
976{ 1009{
977 struct huge_bootmem_page *m; 1010 struct huge_bootmem_page *m;
978 int nr_nodes = nodes_weight(node_online_map); 1011 int nr_nodes = nodes_weight(node_online_map);
@@ -991,8 +1024,7 @@ __attribute__((weak)) int alloc_bootmem_huge_page(struct hstate *h)
991 * puts them into the mem_map). 1024 * puts them into the mem_map).
992 */ 1025 */
993 m = addr; 1026 m = addr;
994 if (m) 1027 goto found;
995 goto found;
996 } 1028 }
997 hstate_next_node(h); 1029 hstate_next_node(h);
998 nr_nodes--; 1030 nr_nodes--;