diff options
author | David S. Miller <davem@davemloft.net> | 2006-01-31 21:30:27 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2006-03-20 04:11:14 -0500 |
commit | 3c936465249f863f322154ff1aaa628b84ee5750 (patch) | |
tree | 2bd7a229236f197d20a655133370e5d0c1bf886c /include/asm-sparc64/pgalloc.h | |
parent | 05e28f9de65a38bb0c769080e91b6976e7e1e70c (diff) |
[SPARC64]: Kill pgtable quicklists and use SLAB.
Taking a nod from the powerpc port.
With the per-cpu caching of both the page allocator and SLAB, the
pgtable quicklist scheme becomes relatively silly and primitive.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64/pgalloc.h')
-rw-r--r-- | include/asm-sparc64/pgalloc.h | 158 |
1 files changed, 27 insertions, 131 deletions
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h index ecea1bbdc115..12e4a273bd43 100644 --- a/include/asm-sparc64/pgalloc.h +++ b/include/asm-sparc64/pgalloc.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/kernel.h> | 6 | #include <linux/kernel.h> |
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
9 | #include <linux/slab.h> | ||
9 | 10 | ||
10 | #include <asm/spitfire.h> | 11 | #include <asm/spitfire.h> |
11 | #include <asm/cpudata.h> | 12 | #include <asm/cpudata.h> |
@@ -13,164 +14,59 @@ | |||
13 | #include <asm/page.h> | 14 | #include <asm/page.h> |
14 | 15 | ||
15 | /* Page table allocation/freeing. */ | 16 | /* Page table allocation/freeing. */ |
16 | #ifdef CONFIG_SMP | 17 | extern kmem_cache_t *pgtable_cache; |
17 | /* Sliiiicck */ | ||
18 | #define pgt_quicklists local_cpu_data() | ||
19 | #else | ||
20 | extern struct pgtable_cache_struct { | ||
21 | unsigned long *pgd_cache; | ||
22 | unsigned long *pte_cache; | ||
23 | unsigned int pgcache_size; | ||
24 | } pgt_quicklists; | ||
25 | #endif | ||
26 | #define pgd_quicklist (pgt_quicklists.pgd_cache) | ||
27 | #define pte_quicklist (pgt_quicklists.pte_cache) | ||
28 | #define pgtable_cache_size (pgt_quicklists.pgcache_size) | ||
29 | 18 | ||
30 | static inline void free_pgd_fast(pgd_t *pgd) | 19 | static inline pgd_t *pgd_alloc(struct mm_struct *mm) |
31 | { | 20 | { |
32 | preempt_disable(); | 21 | return kmem_cache_alloc(pgtable_cache, GFP_KERNEL); |
33 | *(unsigned long *)pgd = (unsigned long) pgd_quicklist; | ||
34 | pgd_quicklist = (unsigned long *) pgd; | ||
35 | pgtable_cache_size++; | ||
36 | preempt_enable(); | ||
37 | } | 22 | } |
38 | 23 | ||
39 | static inline pgd_t *get_pgd_fast(void) | 24 | static inline void pgd_free(pgd_t *pgd) |
40 | { | 25 | { |
41 | unsigned long *ret; | 26 | kmem_cache_free(pgtable_cache, pgd); |
42 | |||
43 | preempt_disable(); | ||
44 | if((ret = pgd_quicklist) != NULL) { | ||
45 | pgd_quicklist = (unsigned long *)(*ret); | ||
46 | ret[0] = 0; | ||
47 | pgtable_cache_size--; | ||
48 | preempt_enable(); | ||
49 | } else { | ||
50 | preempt_enable(); | ||
51 | ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
52 | if(ret) | ||
53 | memset(ret, 0, PAGE_SIZE); | ||
54 | } | ||
55 | return (pgd_t *)ret; | ||
56 | } | ||
57 | |||
58 | static inline void free_pgd_slow(pgd_t *pgd) | ||
59 | { | ||
60 | free_page((unsigned long)pgd); | ||
61 | } | 27 | } |
62 | 28 | ||
63 | #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) | 29 | #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) |
64 | 30 | ||
65 | static inline pmd_t *pmd_alloc_one_fast(void) | 31 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
66 | { | ||
67 | unsigned long *ret; | ||
68 | |||
69 | preempt_disable(); | ||
70 | ret = (unsigned long *) pte_quicklist; | ||
71 | if (likely(ret)) { | ||
72 | pte_quicklist = (unsigned long *)(*ret); | ||
73 | ret[0] = 0; | ||
74 | pgtable_cache_size--; | ||
75 | } | ||
76 | preempt_enable(); | ||
77 | |||
78 | return (pmd_t *) ret; | ||
79 | } | ||
80 | |||
81 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | ||
82 | { | ||
83 | pmd_t *pmd; | ||
84 | |||
85 | pmd = pmd_alloc_one_fast(); | ||
86 | if (unlikely(!pmd)) { | ||
87 | pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | ||
88 | if (pmd) | ||
89 | memset(pmd, 0, PAGE_SIZE); | ||
90 | } | ||
91 | return pmd; | ||
92 | } | ||
93 | |||
94 | static inline void free_pmd_fast(pmd_t *pmd) | ||
95 | { | ||
96 | preempt_disable(); | ||
97 | *(unsigned long *)pmd = (unsigned long) pte_quicklist; | ||
98 | pte_quicklist = (unsigned long *) pmd; | ||
99 | pgtable_cache_size++; | ||
100 | preempt_enable(); | ||
101 | } | ||
102 | |||
103 | static inline void free_pmd_slow(pmd_t *pmd) | ||
104 | { | ||
105 | free_page((unsigned long)pmd); | ||
106 | } | ||
107 | |||
108 | #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) | ||
109 | #define pmd_populate(MM,PMD,PTE_PAGE) \ | ||
110 | pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) | ||
111 | |||
112 | static inline pte_t *pte_alloc_one_fast(void) | ||
113 | { | 32 | { |
114 | unsigned long *ret; | 33 | return kmem_cache_alloc(pgtable_cache, |
115 | 34 | GFP_KERNEL|__GFP_REPEAT); | |
116 | preempt_disable(); | ||
117 | ret = (unsigned long *) pte_quicklist; | ||
118 | if (likely(ret)) { | ||
119 | pte_quicklist = (unsigned long *)(*ret); | ||
120 | ret[0] = 0; | ||
121 | pgtable_cache_size--; | ||
122 | } | ||
123 | preempt_enable(); | ||
124 | |||
125 | return (pte_t *) ret; | ||
126 | } | 35 | } |
127 | 36 | ||
128 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | 37 | static inline void pmd_free(pmd_t *pmd) |
129 | { | 38 | { |
130 | pte_t *ptep = pte_alloc_one_fast(); | 39 | kmem_cache_free(pgtable_cache, pmd); |
131 | |||
132 | if (likely(ptep)) | ||
133 | return ptep; | ||
134 | |||
135 | return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT); | ||
136 | } | 40 | } |
137 | 41 | ||
138 | static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr) | 42 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
43 | unsigned long address) | ||
139 | { | 44 | { |
140 | pte_t *pte = pte_alloc_one_fast(); | 45 | return kmem_cache_alloc(pgtable_cache, |
141 | 46 | GFP_KERNEL|__GFP_REPEAT); | |
142 | if (likely(pte)) | ||
143 | return virt_to_page(pte); | ||
144 | |||
145 | return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | ||
146 | } | 47 | } |
147 | 48 | ||
148 | static inline void free_pte_fast(pte_t *pte) | 49 | static inline struct page *pte_alloc_one(struct mm_struct *mm, |
50 | unsigned long address) | ||
149 | { | 51 | { |
150 | preempt_disable(); | 52 | return virt_to_page(pte_alloc_one_kernel(mm, address)); |
151 | *(unsigned long *)pte = (unsigned long) pte_quicklist; | ||
152 | pte_quicklist = (unsigned long *) pte; | ||
153 | pgtable_cache_size++; | ||
154 | preempt_enable(); | ||
155 | } | 53 | } |
156 | 54 | ||
157 | static inline void free_pte_slow(pte_t *pte) | ||
158 | { | ||
159 | free_page((unsigned long) pte); | ||
160 | } | ||
161 | |||
162 | static inline void pte_free_kernel(pte_t *pte) | 55 | static inline void pte_free_kernel(pte_t *pte) |
163 | { | 56 | { |
164 | free_pte_fast(pte); | 57 | kmem_cache_free(pgtable_cache, pte); |
165 | } | 58 | } |
166 | 59 | ||
167 | static inline void pte_free(struct page *ptepage) | 60 | static inline void pte_free(struct page *ptepage) |
168 | { | 61 | { |
169 | free_pte_fast(page_address(ptepage)); | 62 | pte_free_kernel(page_address(ptepage)); |
170 | } | 63 | } |
171 | 64 | ||
172 | #define pmd_free(pmd) free_pmd_fast(pmd) | 65 | |
173 | #define pgd_free(pgd) free_pgd_fast(pgd) | 66 | #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE) |
174 | #define pgd_alloc(mm) get_pgd_fast() | 67 | #define pmd_populate(MM,PMD,PTE_PAGE) \ |
68 | pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) | ||
69 | |||
70 | #define check_pgt_cache() do { } while (0) | ||
175 | 71 | ||
176 | #endif /* _SPARC64_PGALLOC_H */ | 72 | #endif /* _SPARC64_PGALLOC_H */ |