diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2006-03-22 03:08:56 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-22 10:54:03 -0500 |
commit | 27a85ef1b81300cfff06b4c8037e9914dfb09acc (patch) | |
tree | 1a8d9bc4c5611542273e57fc2606c12905906a57 | |
parent | b45b5bd65f668a665db40d093e4e1fe563533608 (diff) |
[PATCH] hugepage: Make {alloc,free}_huge_page() local
Originally, mm/hugetlb.c just handled the hugepage physical allocation path
and its {alloc,free}_huge_page() functions were used from the arch specific
hugepage code. These days those functions are only used with mm/hugetlb.c
itself. Therefore, this patch makes them static and removes their
prototypes from hugetlb.h. This requires a small rearrangement of code in
mm/hugetlb.c to avoid a forward declaration.
This patch causes no regressions on the libhugetlbfs testsuite (ppc64,
POWER5).
Signed-off-by: David Gibson <dwg@au1.ibm.com>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/hugetlb.h | 4 | ||||
-rw-r--r-- | mm/hugetlb.c | 25 |
2 files changed, 13 insertions, 16 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index cafe73eecb05..5d84c368ffe4 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -21,8 +21,6 @@ int hugetlb_prefault(struct address_space *, struct vm_area_struct *); | |||
21 | int hugetlb_report_meminfo(char *); | 21 | int hugetlb_report_meminfo(char *); |
22 | int hugetlb_report_node_meminfo(int, char *); | 22 | int hugetlb_report_node_meminfo(int, char *); |
23 | unsigned long hugetlb_total_pages(void); | 23 | unsigned long hugetlb_total_pages(void); |
24 | struct page *alloc_huge_page(struct vm_area_struct *, unsigned long); | ||
25 | void free_huge_page(struct page *); | ||
26 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | 24 | int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
27 | unsigned long address, int write_access); | 25 | unsigned long address, int write_access); |
28 | 26 | ||
@@ -97,8 +95,6 @@ static inline unsigned long hugetlb_total_pages(void) | |||
97 | #define is_hugepage_only_range(mm, addr, len) 0 | 95 | #define is_hugepage_only_range(mm, addr, len) 0 |
98 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ | 96 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) \ |
99 | do { } while (0) | 97 | do { } while (0) |
100 | #define alloc_huge_page(vma, addr) ({ NULL; }) | ||
101 | #define free_huge_page(p) ({ (void)(p); BUG(); }) | ||
102 | #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) | 98 | #define hugetlb_fault(mm, vma, addr, write) ({ BUG(); 0; }) |
103 | 99 | ||
104 | #define hugetlb_change_protection(vma, address, end, newprot) | 100 | #define hugetlb_change_protection(vma, address, end, newprot) |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 27fad5d9bcf6..075877b1cbc0 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -88,6 +88,17 @@ static struct page *dequeue_huge_page(struct vm_area_struct *vma, | |||
88 | return page; | 88 | return page; |
89 | } | 89 | } |
90 | 90 | ||
91 | static void free_huge_page(struct page *page) | ||
92 | { | ||
93 | BUG_ON(page_count(page)); | ||
94 | |||
95 | INIT_LIST_HEAD(&page->lru); | ||
96 | |||
97 | spin_lock(&hugetlb_lock); | ||
98 | enqueue_huge_page(page); | ||
99 | spin_unlock(&hugetlb_lock); | ||
100 | } | ||
101 | |||
91 | static int alloc_fresh_huge_page(void) | 102 | static int alloc_fresh_huge_page(void) |
92 | { | 103 | { |
93 | static int nid = 0; | 104 | static int nid = 0; |
@@ -107,18 +118,8 @@ static int alloc_fresh_huge_page(void) | |||
107 | return 0; | 118 | return 0; |
108 | } | 119 | } |
109 | 120 | ||
110 | void free_huge_page(struct page *page) | 121 | static struct page *alloc_huge_page(struct vm_area_struct *vma, |
111 | { | 122 | unsigned long addr) |
112 | BUG_ON(page_count(page)); | ||
113 | |||
114 | INIT_LIST_HEAD(&page->lru); | ||
115 | |||
116 | spin_lock(&hugetlb_lock); | ||
117 | enqueue_huge_page(page); | ||
118 | spin_unlock(&hugetlb_lock); | ||
119 | } | ||
120 | |||
121 | struct page *alloc_huge_page(struct vm_area_struct *vma, unsigned long addr) | ||
122 | { | 123 | { |
123 | struct inode *inode = vma->vm_file->f_dentry->d_inode; | 124 | struct inode *inode = vma->vm_file->f_dentry->d_inode; |
124 | struct page *page; | 125 | struct page *page; |