aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-01-31 21:30:13 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:11:13 -0500
commit05e28f9de65a38bb0c769080e91b6976e7e1e70c (patch)
treee1d3fcc1381ea6612ce4c082ca8596e84b637216
parent74bf4312fff083ab25c3f357cc653ada7995e5f6 (diff)
[SPARC64]: No need to D-cache color page tables any longer.
Unlike the virtual page tables, the new TSB scheme does not require this ugly hack. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/mm/init.c71
-rw-r--r--include/asm-sparc64/cpudata.h5
-rw-r--r--include/asm-sparc64/pgalloc.h101
3 files changed, 55 insertions, 122 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index da068f6b259..936ae1a594a 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -145,6 +145,10 @@ int bigkernel = 0;
145#define PGT_CACHE_LOW 25 145#define PGT_CACHE_LOW 25
146#define PGT_CACHE_HIGH 50 146#define PGT_CACHE_HIGH 50
147 147
148#ifndef CONFIG_SMP
149struct pgtable_cache_struct pgt_quicklists;
150#endif
151
148void check_pgt_cache(void) 152void check_pgt_cache(void)
149{ 153{
150 preempt_disable(); 154 preempt_disable();
@@ -152,10 +156,8 @@ void check_pgt_cache(void)
152 do { 156 do {
153 if (pgd_quicklist) 157 if (pgd_quicklist)
154 free_pgd_slow(get_pgd_fast()); 158 free_pgd_slow(get_pgd_fast());
155 if (pte_quicklist[0]) 159 if (pte_quicklist)
156 free_pte_slow(pte_alloc_one_fast(NULL, 0)); 160 free_pte_slow(pte_alloc_one_fast());
157 if (pte_quicklist[1])
158 free_pte_slow(pte_alloc_one_fast(NULL, 1 << (PAGE_SHIFT + 10)));
159 } while (pgtable_cache_size > PGT_CACHE_LOW); 161 } while (pgtable_cache_size > PGT_CACHE_LOW);
160 } 162 }
161 preempt_enable(); 163 preempt_enable();
@@ -962,67 +964,6 @@ out:
962 spin_unlock(&ctx_alloc_lock); 964 spin_unlock(&ctx_alloc_lock);
963} 965}
964 966
965#ifndef CONFIG_SMP
966struct pgtable_cache_struct pgt_quicklists;
967#endif
968
969/* XXX We don't need to color these things in the D-cache any longer. */
970#ifdef DCACHE_ALIASING_POSSIBLE
971#define DC_ALIAS_SHIFT 1
972#else
973#define DC_ALIAS_SHIFT 0
974#endif
975pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
976{
977 struct page *page;
978 unsigned long color;
979
980 {
981 pte_t *ptep = pte_alloc_one_fast(mm, address);
982
983 if (ptep)
984 return ptep;
985 }
986
987 color = VPTE_COLOR(address);
988 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, DC_ALIAS_SHIFT);
989 if (page) {
990 unsigned long *to_free;
991 unsigned long paddr;
992 pte_t *pte;
993
994#ifdef DCACHE_ALIASING_POSSIBLE
995 set_page_count(page, 1);
996 ClearPageCompound(page);
997
998 set_page_count((page + 1), 1);
999 ClearPageCompound(page + 1);
1000#endif
1001 paddr = (unsigned long) page_address(page);
1002 memset((char *)paddr, 0, (PAGE_SIZE << DC_ALIAS_SHIFT));
1003
1004 if (!color) {
1005 pte = (pte_t *) paddr;
1006 to_free = (unsigned long *) (paddr + PAGE_SIZE);
1007 } else {
1008 pte = (pte_t *) (paddr + PAGE_SIZE);
1009 to_free = (unsigned long *) paddr;
1010 }
1011
1012#ifdef DCACHE_ALIASING_POSSIBLE
1013 /* Now free the other one up, adjust cache size. */
1014 preempt_disable();
1015 *to_free = (unsigned long) pte_quicklist[color ^ 0x1];
1016 pte_quicklist[color ^ 0x1] = to_free;
1017 pgtable_cache_size++;
1018 preempt_enable();
1019#endif
1020
1021 return pte;
1022 }
1023 return NULL;
1024}
1025
1026void sparc_ultra_dump_itlb(void) 967void sparc_ultra_dump_itlb(void)
1027{ 968{
1028 int slot; 969 int slot;
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index 74de79dca91..45a9a2cfaf7 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -20,8 +20,9 @@ typedef struct {
20 /* Dcache line 2 */ 20 /* Dcache line 2 */
21 unsigned int pgcache_size; 21 unsigned int pgcache_size;
22 unsigned int __pad1; 22 unsigned int __pad1;
23 unsigned long *pte_cache[2]; 23 unsigned long *pte_cache;
24 unsigned long *pgd_cache; 24 unsigned long *pgd_cache;
25 unsigned long __pad2;
25 26
26 /* Dcache line 3, rarely used */ 27 /* Dcache line 3, rarely used */
27 unsigned int dcache_size; 28 unsigned int dcache_size;
@@ -30,8 +31,8 @@ typedef struct {
30 unsigned int icache_line_size; 31 unsigned int icache_line_size;
31 unsigned int ecache_size; 32 unsigned int ecache_size;
32 unsigned int ecache_line_size; 33 unsigned int ecache_line_size;
33 unsigned int __pad2;
34 unsigned int __pad3; 34 unsigned int __pad3;
35 unsigned int __pad4;
35} cpuinfo_sparc; 36} cpuinfo_sparc;
36 37
37DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); 38DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
diff --git a/include/asm-sparc64/pgalloc.h b/include/asm-sparc64/pgalloc.h
index baf59c00ea4..ecea1bbdc11 100644
--- a/include/asm-sparc64/pgalloc.h
+++ b/include/asm-sparc64/pgalloc.h
@@ -19,16 +19,15 @@
19#else 19#else
20extern struct pgtable_cache_struct { 20extern struct pgtable_cache_struct {
21 unsigned long *pgd_cache; 21 unsigned long *pgd_cache;
22 unsigned long *pte_cache[2]; 22 unsigned long *pte_cache;
23 unsigned int pgcache_size; 23 unsigned int pgcache_size;
24} pgt_quicklists; 24} pgt_quicklists;
25#endif 25#endif
26#define pgd_quicklist (pgt_quicklists.pgd_cache) 26#define pgd_quicklist (pgt_quicklists.pgd_cache)
27#define pmd_quicklist ((unsigned long *)0)
28#define pte_quicklist (pgt_quicklists.pte_cache) 27#define pte_quicklist (pgt_quicklists.pte_cache)
29#define pgtable_cache_size (pgt_quicklists.pgcache_size) 28#define pgtable_cache_size (pgt_quicklists.pgcache_size)
30 29
31static __inline__ void free_pgd_fast(pgd_t *pgd) 30static inline void free_pgd_fast(pgd_t *pgd)
32{ 31{
33 preempt_disable(); 32 preempt_disable();
34 *(unsigned long *)pgd = (unsigned long) pgd_quicklist; 33 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
@@ -37,7 +36,7 @@ static __inline__ void free_pgd_fast(pgd_t *pgd)
37 preempt_enable(); 36 preempt_enable();
38} 37}
39 38
40static __inline__ pgd_t *get_pgd_fast(void) 39static inline pgd_t *get_pgd_fast(void)
41{ 40{
42 unsigned long *ret; 41 unsigned long *ret;
43 42
@@ -56,47 +55,35 @@ static __inline__ pgd_t *get_pgd_fast(void)
56 return (pgd_t *)ret; 55 return (pgd_t *)ret;
57} 56}
58 57
59static __inline__ void free_pgd_slow(pgd_t *pgd) 58static inline void free_pgd_slow(pgd_t *pgd)
60{ 59{
61 free_page((unsigned long)pgd); 60 free_page((unsigned long)pgd);
62} 61}
63 62
64/* XXX This crap can die, no longer using virtual page tables... */
65#ifdef DCACHE_ALIASING_POSSIBLE
66#define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
67#define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
68#else
69#define VPTE_COLOR(address) 0
70#define DCACHE_COLOR(address) 0
71#endif
72
73#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) 63#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
74 64
75static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address) 65static inline pmd_t *pmd_alloc_one_fast(void)
76{ 66{
77 unsigned long *ret; 67 unsigned long *ret;
78 int color = 0;
79 68
80 preempt_disable(); 69 preempt_disable();
81 if (pte_quicklist[color] == NULL) 70 ret = (unsigned long *) pte_quicklist;
82 color = 1; 71 if (likely(ret)) {
83 72 pte_quicklist = (unsigned long *)(*ret);
84 if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
85 pte_quicklist[color] = (unsigned long *)(*ret);
86 ret[0] = 0; 73 ret[0] = 0;
87 pgtable_cache_size--; 74 pgtable_cache_size--;
88 } 75 }
89 preempt_enable(); 76 preempt_enable();
90 77
91 return (pmd_t *)ret; 78 return (pmd_t *) ret;
92} 79}
93 80
94static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) 81static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
95{ 82{
96 pmd_t *pmd; 83 pmd_t *pmd;
97 84
98 pmd = pmd_alloc_one_fast(mm, address); 85 pmd = pmd_alloc_one_fast();
99 if (!pmd) { 86 if (unlikely(!pmd)) {
100 pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); 87 pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
101 if (pmd) 88 if (pmd)
102 memset(pmd, 0, PAGE_SIZE); 89 memset(pmd, 0, PAGE_SIZE);
@@ -104,18 +91,16 @@ static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addre
104 return pmd; 91 return pmd;
105} 92}
106 93
107static __inline__ void free_pmd_fast(pmd_t *pmd) 94static inline void free_pmd_fast(pmd_t *pmd)
108{ 95{
109 unsigned long color = DCACHE_COLOR((unsigned long)pmd);
110
111 preempt_disable(); 96 preempt_disable();
112 *(unsigned long *)pmd = (unsigned long) pte_quicklist[color]; 97 *(unsigned long *)pmd = (unsigned long) pte_quicklist;
113 pte_quicklist[color] = (unsigned long *) pmd; 98 pte_quicklist = (unsigned long *) pmd;
114 pgtable_cache_size++; 99 pgtable_cache_size++;
115 preempt_enable(); 100 preempt_enable();
116} 101}
117 102
118static __inline__ void free_pmd_slow(pmd_t *pmd) 103static inline void free_pmd_slow(pmd_t *pmd)
119{ 104{
120 free_page((unsigned long)pmd); 105 free_page((unsigned long)pmd);
121} 106}
@@ -124,48 +109,54 @@ static __inline__ void free_pmd_slow(pmd_t *pmd)
124#define pmd_populate(MM,PMD,PTE_PAGE) \ 109#define pmd_populate(MM,PMD,PTE_PAGE) \
125 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE)) 110 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
126 111
127extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address); 112static inline pte_t *pte_alloc_one_fast(void)
128
129static inline struct page *
130pte_alloc_one(struct mm_struct *mm, unsigned long addr)
131{
132 pte_t *pte = pte_alloc_one_kernel(mm, addr);
133
134 if (pte)
135 return virt_to_page(pte);
136
137 return NULL;
138}
139
140static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
141{ 113{
142 unsigned long color = VPTE_COLOR(address);
143 unsigned long *ret; 114 unsigned long *ret;
144 115
145 preempt_disable(); 116 preempt_disable();
146 if((ret = (unsigned long *)pte_quicklist[color]) != NULL) { 117 ret = (unsigned long *) pte_quicklist;
147 pte_quicklist[color] = (unsigned long *)(*ret); 118 if (likely(ret)) {
119 pte_quicklist = (unsigned long *)(*ret);
148 ret[0] = 0; 120 ret[0] = 0;
149 pgtable_cache_size--; 121 pgtable_cache_size--;
150 } 122 }
151 preempt_enable(); 123 preempt_enable();
152 return (pte_t *)ret; 124
125 return (pte_t *) ret;
126}
127
128static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
129{
130 pte_t *ptep = pte_alloc_one_fast();
131
132 if (likely(ptep))
133 return ptep;
134
135 return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
153} 136}
154 137
155static __inline__ void free_pte_fast(pte_t *pte) 138static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr)
156{ 139{
157 unsigned long color = DCACHE_COLOR((unsigned long)pte); 140 pte_t *pte = pte_alloc_one_fast();
158 141
142 if (likely(pte))
143 return virt_to_page(pte);
144
145 return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
146}
147
148static inline void free_pte_fast(pte_t *pte)
149{
159 preempt_disable(); 150 preempt_disable();
160 *(unsigned long *)pte = (unsigned long) pte_quicklist[color]; 151 *(unsigned long *)pte = (unsigned long) pte_quicklist;
161 pte_quicklist[color] = (unsigned long *) pte; 152 pte_quicklist = (unsigned long *) pte;
162 pgtable_cache_size++; 153 pgtable_cache_size++;
163 preempt_enable(); 154 preempt_enable();
164} 155}
165 156
166static __inline__ void free_pte_slow(pte_t *pte) 157static inline void free_pte_slow(pte_t *pte)
167{ 158{
168 free_page((unsigned long)pte); 159 free_page((unsigned long) pte);
169} 160}
170 161
171static inline void pte_free_kernel(pte_t *pte) 162static inline void pte_free_kernel(pte_t *pte)