aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-sparc64/pgalloc.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 21:30:13 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:13 -0500
commit05e28f9de65a38bb0c769080e91b6976e7e1e70c (patch)
treee1d3fcc1381ea6612ce4c082ca8596e84b637216 /include/asm-sparc64/pgalloc.h
parent74bf4312fff083ab25c3f357cc653ada7995e5f6 (diff)
[SPARC64]: No need to D-cache color page tables any longer.
Unlike the virtual page tables, the new TSB scheme does not require this ugly hack. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64/pgalloc.h')
-rw-r--r--include/asm-sparc64/pgalloc.h101
1 files changed, 46 insertions, 55 deletions
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index baf59c00ea47..ecea1bbdc115 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -19,16 +19,15 @@
19#else 19#else
20extern struct pgtable_cache_struct { 20extern struct pgtable_cache_struct {
21 unsigned long *pgd_cache; 21 unsigned long *pgd_cache;
22 unsigned long *pte_cache[2]; 22 unsigned long *pte_cache;
23 unsigned int pgcache_size; 23 unsigned int pgcache_size;
24} pgt_quicklists; 24} pgt_quicklists;
25#endif 25#endif
26#define pgd_quicklist (pgt_quicklists.pgd_cache) 26#define pgd_quicklist (pgt_quicklists.pgd_cache)
27#define pmd_quicklist ((unsigned long *)0)
28#define pte_quicklist (pgt_quicklists.pte_cache) 27#define pte_quicklist (pgt_quicklists.pte_cache)
29#define pgtable_cache_size (pgt_quicklists.pgcache_size) 28#define pgtable_cache_size (pgt_quicklists.pgcache_size)
30 29
31static __inline__ void free_pgd_fast(pgd_t *pgd) 30static inline void free_pgd_fast(pgd_t *pgd)
32{ 31{
33 preempt_disable(); 32 preempt_disable();
34 *(unsigned long *)pgd = (unsigned long) pgd_quicklist; 33 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
@@ -37,7 +36,7 @@ static __inline__ void free_pgd_fast(pgd_t *pgd)
37 preempt_enable(); 36 preempt_enable();
38} 37}
39 38
40static __inline__ pgd_t *get_pgd_fast(void) 39static inline pgd_t *get_pgd_fast(void)
41{ 40{
42 unsigned long *ret; 41 unsigned long *ret;
43 42
@@ -56,47 +55,35 @@ static __inline__ pgd_t *get_pgd_fast(void)
56 return (pgd_t *)ret; 55 return (pgd_t *)ret;
57} 56}
58 57
59static __inline__ void free_pgd_slow(pgd_t *pgd) 58static inline void free_pgd_slow(pgd_t *pgd)
60{ 59{
61 free_page((unsigned long)pgd); 60 free_page((unsigned long)pgd);
62} 61}
63 62
64/* XXX This crap can die, no longer using virtual page tables... */
65#ifdef DCACHE_ALIASING_POSSIBLE
66#define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
67#define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
68#else
69#define VPTE_COLOR(address) 0
70#define DCACHE_COLOR(address) 0
71#endif
72
73#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) 63#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
74 64
75static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address) 65static inline pmd_t *pmd_alloc_one_fast(void)
76{ 66{
77 unsigned long *ret; 67 unsigned long *ret;
78 int color = 0;
79 68
80 preempt_disable(); 69 preempt_disable();
81 if (pte_quicklist[color] == NULL) 70 ret = (unsigned long *) pte_quicklist;
82 color = 1; 71 if (likely(ret)) {
83 72 pte_quicklist = (unsigned long *)(*ret);
84 if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
85 pte_quicklist[color] = (unsigned long *)(*ret);
86 ret[0] = 0; 73 ret[0] = 0;
87 pgtable_cache_size--; 74 pgtable_cache_size--;
88 } 75 }
89 preempt_enable(); 76 preempt_enable();
90 77
91 return (pmd_t *)ret; 78 return (pmd_t *) ret;
92} 79}
93 80
94static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
95{ 82{
96 pmd_t *pmd; 83 pmd_t *pmd;
97 84
98 pmd = pmd_alloc_one_fast(mm, address); 85 pmd = pmd_alloc_one_fast();
99 if (!pmd) { 86 if (unlikely(!pmd)) {
100 pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 87 pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
101 if (pmd) 88 if (pmd)
102 memset(pmd, 0, PAGE_SIZE); 89 memset(pmd, 0, PAGE_SIZE);
@@ -104,18 +91,16 @@ static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addre
104 return pmd; 91 return pmd;
105} 92}
106 93
107static __inline__ void free_pmd_fast(pmd_t *pmd) 94static inline void free_pmd_fast(pmd_t *pmd)
108{ 95{
109 unsigned long color = DCACHE_COLOR((unsigned long)pmd);
110
111 preempt_disable(); 96 preempt_disable();
112 *(unsigned long *)pmd = (unsigned long) pte_quicklist[color]; 97 *(unsigned long *)pmd = (unsigned long) pte_quicklist;
113 pte_quicklist[color] = (unsigned long *) pmd; 98 pte_quicklist = (unsigned long *) pmd;
114 pgtable_cache_size++; 99 pgtable_cache_size++;
115 preempt_enable(); 100 preempt_enable();
116} 101}
117 102
118static __inline__ void free_pmd_slow(pmd_t *pmd) 103static inline void free_pmd_slow(pmd_t *pmd)
119{ 104{
120 free_page((unsigned long)pmd); 105 free_page((unsigned long)pmd);
121} 106}
@@ -124,48 +109,54 @@ static __inline__ void free_pmd_slow(pmd_t *pmd)
124#define pmd_populate(MM,PMD,PTE_PAGE) \ 109#define pmd_populate(MM,PMD,PTE_PAGE) \
125 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) 110 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
126 111
127extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address); 112static inline pte_t *pte_alloc_one_fast(void)
128
129static inline struct page *
130pte_alloc_one(struct mm_struct *mm, unsigned long addr)
131{
132 pte_t *pte = pte_alloc_one_kernel(mm, addr);
133
134 if (pte)
135 return virt_to_page(pte);
136
137 return NULL;
138}
139
140static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
141{ 113{
142 unsigned long color = VPTE_COLOR(address);
143 unsigned long *ret; 114 unsigned long *ret;
144 115
145 preempt_disable(); 116 preempt_disable();
146 if((ret = (unsigned long *)pte_quicklist[color]) != NULL) { 117 ret = (unsigned long *) pte_quicklist;
147 pte_quicklist[color] = (unsigned long *)(*ret); 118 if (likely(ret)) {
119 pte_quicklist = (unsigned long *)(*ret);
148 ret[0] = 0; 120 ret[0] = 0;
149 pgtable_cache_size--; 121 pgtable_cache_size--;
150 } 122 }
151 preempt_enable(); 123 preempt_enable();
152 return (pte_t *)ret; 124
125 return (pte_t *) ret;
126}
127
128static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
129{
130 pte_t *ptep = pte_alloc_one_fast();
131
132 if (likely(ptep))
133 return ptep;
134
135 return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
153} 136}
154 137
155static __inline__ void free_pte_fast(pte_t *pte) 138static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr)
156{ 139{
157 unsigned long color = DCACHE_COLOR((unsigned long)pte); 140 pte_t *pte = pte_alloc_one_fast();
158 141
142 if (likely(pte))
143 return virt_to_page(pte);
144
145 return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
146}
147
148static inline void free_pte_fast(pte_t *pte)
149{
159 preempt_disable(); 150 preempt_disable();
160 *(unsigned long *)pte = (unsigned long) pte_quicklist[color]; 151 *(unsigned long *)pte = (unsigned long) pte_quicklist;
161 pte_quicklist[color] = (unsigned long *) pte; 152 pte_quicklist = (unsigned long *) pte;
162 pgtable_cache_size++; 153 pgtable_cache_size++;
163 preempt_enable(); 154 preempt_enable();
164} 155}
165 156
166static __inline__ void free_pte_slow(pte_t *pte) 157static inline void free_pte_slow(pte_t *pte)
167{ 158{
168 free_page((unsigned long)pte); 159 free_page((unsigned long) pte);
169} 160}
170 161
171static inline void pte_free_kernel(pte_t *pte) 162static inline void pte_free_kernel(pte_t *pte)