aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ia64
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-05-11 01:42:53 -0400
committerTony Luck <tony.luck@intel.com>2007-05-11 12:40:00 -0400
commit2bd62a40f63bd628c43a2f3637b252d0967659b0 (patch)
tree73b5dbda741421812f487ff0a9b753109fb105b4 /include/asm-ia64
parentcdc7dbdfe6edac177acb32e4ca56b525d0743fe7 (diff)
[IA64] Quicklist support for IA64
IA64 is the origin of the quicklist implementation. So cut out the pieces that are now in core code and modify the functions called. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'include/asm-ia64')
-rw-r--r--include/asm-ia64/pgalloc.h82
1 files changed, 16 insertions, 66 deletions
diff --git a/include/asm-ia64/pgalloc.h b/include/asm-ia64/pgalloc.h
index 560c287b1233..67552cad5173 100644
--- a/include/asm-ia64/pgalloc.h
+++ b/include/asm-ia64/pgalloc.h
@@ -18,71 +18,18 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/page-flags.h> 19#include <linux/page-flags.h>
20#include <linux/threads.h> 20#include <linux/threads.h>
21#include <linux/quicklist.h>
21 22
22#include <asm/mmu_context.h> 23#include <asm/mmu_context.h>
23 24
24DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
25#define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
26DECLARE_PER_CPU(long, __pgtable_quicklist_size);
27#define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
28
29static inline long pgtable_quicklist_total_size(void)
30{
31 long ql_size = 0;
32 int cpuid;
33
34 for_each_online_cpu(cpuid) {
35 ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
36 }
37 return ql_size;
38}
39
40static inline void *pgtable_quicklist_alloc(void)
41{
42 unsigned long *ret = NULL;
43
44 preempt_disable();
45
46 ret = pgtable_quicklist;
47 if (likely(ret != NULL)) {
48 pgtable_quicklist = (unsigned long *)(*ret);
49 ret[0] = 0;
50 --pgtable_quicklist_size;
51 preempt_enable();
52 } else {
53 preempt_enable();
54 ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
55 }
56
57 return ret;
58}
59
60static inline void pgtable_quicklist_free(void *pgtable_entry)
61{
62#ifdef CONFIG_NUMA
63 int nid = page_to_nid(virt_to_page(pgtable_entry));
64
65 if (unlikely(nid != numa_node_id())) {
66 free_page((unsigned long)pgtable_entry);
67 return;
68 }
69#endif
70
71 preempt_disable();
72 *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
73 pgtable_quicklist = (unsigned long *)pgtable_entry;
74 ++pgtable_quicklist_size;
75 preempt_enable();
76}
77
78static inline pgd_t *pgd_alloc(struct mm_struct *mm) 25static inline pgd_t *pgd_alloc(struct mm_struct *mm)
79{ 26{
80 return pgtable_quicklist_alloc(); 27 return quicklist_alloc(0, GFP_KERNEL, NULL);
81} 28}
82 29
83static inline void pgd_free(pgd_t * pgd) 30static inline void pgd_free(pgd_t * pgd)
84{ 31{
85 pgtable_quicklist_free(pgd); 32 quicklist_free(0, NULL, pgd);
86} 33}
87 34
88#ifdef CONFIG_PGTABLE_4 35#ifdef CONFIG_PGTABLE_4
@@ -94,12 +41,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
94 41
95static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) 42static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
96{ 43{
97 return pgtable_quicklist_alloc(); 44 return quicklist_alloc(0, GFP_KERNEL, NULL);
98} 45}
99 46
100static inline void pud_free(pud_t * pud) 47static inline void pud_free(pud_t * pud)
101{ 48{
102 pgtable_quicklist_free(pud); 49 quicklist_free(0, NULL, pud);
103} 50}
104#define __pud_free_tlb(tlb, pud) pud_free(pud) 51#define __pud_free_tlb(tlb, pud) pud_free(pud)
105#endif /* CONFIG_PGTABLE_4 */ 52#endif /* CONFIG_PGTABLE_4 */
@@ -112,12 +59,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
112 59
113static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) 60static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
114{ 61{
115 return pgtable_quicklist_alloc(); 62 return quicklist_alloc(0, GFP_KERNEL, NULL);
116} 63}
117 64
118static inline void pmd_free(pmd_t * pmd) 65static inline void pmd_free(pmd_t * pmd)
119{ 66{
120 pgtable_quicklist_free(pmd); 67 quicklist_free(0, NULL, pmd);
121} 68}
122 69
123#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd) 70#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
@@ -137,28 +84,31 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
137static inline struct page *pte_alloc_one(struct mm_struct *mm, 84static inline struct page *pte_alloc_one(struct mm_struct *mm,
138 unsigned long addr) 85 unsigned long addr)
139{ 86{
140 void *pg = pgtable_quicklist_alloc(); 87 void *pg = quicklist_alloc(0, GFP_KERNEL, NULL);
141 return pg ? virt_to_page(pg) : NULL; 88 return pg ? virt_to_page(pg) : NULL;
142} 89}
143 90
144static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 91static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
145 unsigned long addr) 92 unsigned long addr)
146{ 93{
147 return pgtable_quicklist_alloc(); 94 return quicklist_alloc(0, GFP_KERNEL, NULL);
148} 95}
149 96
150static inline void pte_free(struct page *pte) 97static inline void pte_free(struct page *pte)
151{ 98{
152 pgtable_quicklist_free(page_address(pte)); 99 quicklist_free_page(0, NULL, pte);
153} 100}
154 101
155static inline void pte_free_kernel(pte_t * pte) 102static inline void pte_free_kernel(pte_t * pte)
156{ 103{
157 pgtable_quicklist_free(pte); 104 quicklist_free(0, NULL, pte);
158} 105}
159 106
160#define __pte_free_tlb(tlb, pte) pte_free(pte) 107static inline void check_pgt_cache(void)
108{
109 quicklist_trim(0, NULL, 25, 16);
110}
161 111
162extern void check_pgt_cache(void); 112#define __pte_free_tlb(tlb, pte) pte_free(pte)
163 113
164#endif /* _ASM_IA64_PGALLOC_H */ 114#endif /* _ASM_IA64_PGALLOC_H */