aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-05-13 20:55:35 -0400
committerPaul Mundt <lethal@linux-sh.org>2007-05-13 20:55:35 -0400
commit6c645ac72582bacb85b90a1cf88e81a13045aba4 (patch)
treea6101de4bd5426ebfb06b097749251ff6f9e98b2 /include
parente827f20f1d34e91fbbb0df4674ddd8c3aad517da (diff)
sh64: generic quicklist support.
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-sh64/pgalloc.h100
1 files changed, 23 insertions, 77 deletions
diff --git a/include/asm-sh64/pgalloc.h b/include/asm-sh64/pgalloc.h
index cb803e56cb64..6eccab770a6d 100644
--- a/include/asm-sh64/pgalloc.h
+++ b/include/asm-sh64/pgalloc.h
@@ -14,13 +14,9 @@
14 * 14 *
15 */ 15 */
16 16
17#include <linux/threads.h>
18#include <linux/mm.h> 17#include <linux/mm.h>
19 18#include <linux/quicklist.h>
20#define pgd_quicklist (current_cpu_data.pgd_quick) 19#include <asm/page.h>
21#define pmd_quicklist (current_cpu_data.pmd_quick)
22#define pte_quicklist (current_cpu_data.pte_quick)
23#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
24 20
25static inline void pgd_init(unsigned long page) 21static inline void pgd_init(unsigned long page)
26{ 22{
@@ -45,84 +41,37 @@ static inline pgd_t *get_pgd_slow(void)
45 return ret; 41 return ret;
46} 42}
47 43
48static inline pgd_t *get_pgd_fast(void) 44static inline pgd_t *pgd_alloc(struct mm_struct *mm)
49{
50 unsigned long *ret;
51
52 if ((ret = pgd_quicklist) != NULL) {
53 pgd_quicklist = (unsigned long *)(*ret);
54 ret[0] = 0;
55 pgtable_cache_size--;
56 } else
57 ret = (unsigned long *)get_pgd_slow();
58
59 if (ret) {
60 memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
61 }
62 return (pgd_t *)ret;
63}
64
65static inline void free_pgd_fast(pgd_t *pgd)
66{
67 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
68 pgd_quicklist = (unsigned long *) pgd;
69 pgtable_cache_size++;
70}
71
72static inline void free_pgd_slow(pgd_t *pgd)
73{ 45{
74 kfree((void *)pgd); 46 return quicklist_alloc(0, GFP_KERNEL, NULL);
75} 47}
76 48
77extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted); 49static inline void pgd_free(pgd_t *pgd)
78extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
79
80static inline pte_t *get_pte_fast(void)
81{ 50{
82 unsigned long *ret; 51 quicklist_free(0, NULL, pgd);
83
84 if((ret = (unsigned long *)pte_quicklist) != NULL) {
85 pte_quicklist = (unsigned long *)(*ret);
86 ret[0] = ret[1];
87 pgtable_cache_size--;
88 }
89 return (pte_t *)ret;
90} 52}
91 53
92static inline void free_pte_fast(pte_t *pte) 54static inline struct page *pte_alloc_one(struct mm_struct *mm,
55 unsigned long address)
93{ 56{
94 *(unsigned long *)pte = (unsigned long) pte_quicklist; 57 void *pg = quicklist_alloc(0, GFP_KERNEL, NULL);
95 pte_quicklist = (unsigned long *) pte; 58 return pg ? virt_to_page(pg) : NULL;
96 pgtable_cache_size++;
97} 59}
98 60
99static inline void pte_free_kernel(pte_t *pte) 61static inline void pte_free_kernel(pte_t *pte)
100{ 62{
101 free_page((unsigned long)pte); 63 quicklist_free(0, NULL, pte);
102} 64}
103 65
104static inline void pte_free(struct page *pte) 66static inline void pte_free(struct page *pte)
105{ 67{
106 __free_page(pte); 68 quicklist_free_page(0, NULL, pte);
107} 69}
108 70
109static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 71static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
110 unsigned long address) 72 unsigned long address)
111{ 73{
112 pte_t *pte; 74 return quicklist_alloc(0, GFP_KERNEL, NULL);
113
114 pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT|__GFP_ZERO);
115
116 return pte;
117}
118
119static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
120{
121 struct page *pte;
122
123 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
124
125 return pte;
126} 75}
127 76
128#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) 77#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
@@ -142,31 +91,23 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long add
142 91
143#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL) 92#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
144 93
145static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 94static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
146{ 95{
147 pmd_t *pmd; 96 return quicklist_alloc(0, GFP_KERNEL, NULL);
148 pmd = (pmd_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
149 return pmd;
150} 97}
151 98
152static __inline__ void pmd_free(pmd_t *pmd) 99static inline void pmd_free(pmd_t *pmd)
153{ 100{
154 free_page((unsigned long) pmd); 101 quicklist_free(0, NULL, pmd);
155} 102}
156 103
157#define pgd_populate(mm, pgd, pmd) pgd_set(pgd, pmd) 104#define pgd_populate(mm, pgd, pmd) pgd_set(pgd, pmd)
158#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd) 105#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd)
159 106
160#else 107#else
161#error "No defined page table size" 108#error "No defined page table size"
162#endif 109#endif
163 110
164#define check_pgt_cache() do { } while (0)
165#define pgd_free(pgd) free_pgd_slow(pgd)
166#define pgd_alloc(mm) get_pgd_fast()
167
168extern int do_check_pgt_cache(int, int);
169
170#define pmd_populate_kernel(mm, pmd, pte) \ 111#define pmd_populate_kernel(mm, pmd, pte) \
171 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte))) 112 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte)))
172 113
@@ -176,4 +117,9 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
176 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) page_address (pte))); 117 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) page_address (pte)));
177} 118}
178 119
120static inline void check_pgt_cache(void)
121{
122 quicklist_trim(0, NULL, 25, 16);
123}
124
179#endif /* __ASM_SH64_PGALLOC_H */ 125#endif /* __ASM_SH64_PGALLOC_H */