aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/init.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-07-27 19:08:44 -0400
committerDavid S. Miller <davem@davemloft.net>2005-07-27 19:08:44 -0400
commit48b0e5487fcdcb3421bda67666277348b2bd2661 (patch)
tree37822b91e0ded470f4728aa7511e8d3f72d977e5 /arch/sparc64/mm/init.c
parent40a085c41dc3d5991fdf90ed2557cc06cce0590a (diff)
[SPARC64]: Fix ugly dependency on NR_CPUS being a power-of-2.
The page->flags D-cache dirty state tracking depended upon NR_CPUS being a power-of-2 via it's "NR_CPUS - 1" masking. Fix that to use a fixed (256 - 1) mask as that is the limit imposed by thread_info->cpu which is a "u8". Finally, add a compile time check that NR_CPUS is not greater than 256. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r--arch/sparc64/mm/init.c23
1 files changed, 17 insertions, 6 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 8fc413cb6acd..3fbaf342a452 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -121,15 +121,24 @@ __inline__ void flush_dcache_page_impl(struct page *page)
121} 121}
122 122
123#define PG_dcache_dirty PG_arch_1 123#define PG_dcache_dirty PG_arch_1
124#define PG_dcache_cpu_shift 24
125#define PG_dcache_cpu_mask (256 - 1)
126
127#if NR_CPUS > 256
128#error D-cache dirty tracking and thread_info->cpu need fixing for > 256 cpus
129#endif
124 130
125#define dcache_dirty_cpu(page) \ 131#define dcache_dirty_cpu(page) \
126 (((page)->flags >> 24) & (NR_CPUS - 1UL)) 132 (((page)->flags >> PG_dcache_cpu_shift) & PG_dcache_cpu_mask)
127 133
128static __inline__ void set_dcache_dirty(struct page *page, int this_cpu) 134static __inline__ void set_dcache_dirty(struct page *page, int this_cpu)
129{ 135{
130 unsigned long mask = this_cpu; 136 unsigned long mask = this_cpu;
131 unsigned long non_cpu_bits = ~((NR_CPUS - 1UL) << 24UL); 137 unsigned long non_cpu_bits;
132 mask = (mask << 24) | (1UL << PG_dcache_dirty); 138
139 non_cpu_bits = ~(PG_dcache_cpu_mask << PG_dcache_cpu_shift);
140 mask = (mask << PG_dcache_cpu_shift) | (1UL << PG_dcache_dirty);
141
133 __asm__ __volatile__("1:\n\t" 142 __asm__ __volatile__("1:\n\t"
134 "ldx [%2], %%g7\n\t" 143 "ldx [%2], %%g7\n\t"
135 "and %%g7, %1, %%g1\n\t" 144 "and %%g7, %1, %%g1\n\t"
@@ -151,7 +160,7 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
151 __asm__ __volatile__("! test_and_clear_dcache_dirty\n" 160 __asm__ __volatile__("! test_and_clear_dcache_dirty\n"
152 "1:\n\t" 161 "1:\n\t"
153 "ldx [%2], %%g7\n\t" 162 "ldx [%2], %%g7\n\t"
154 "srlx %%g7, 24, %%g1\n\t" 163 "srlx %%g7, %4, %%g1\n\t"
155 "and %%g1, %3, %%g1\n\t" 164 "and %%g1, %3, %%g1\n\t"
156 "cmp %%g1, %0\n\t" 165 "cmp %%g1, %0\n\t"
157 "bne,pn %%icc, 2f\n\t" 166 "bne,pn %%icc, 2f\n\t"
@@ -164,7 +173,8 @@ static __inline__ void clear_dcache_dirty_cpu(struct page *page, unsigned long c
164 "2:" 173 "2:"
165 : /* no outputs */ 174 : /* no outputs */
166 : "r" (cpu), "r" (mask), "r" (&page->flags), 175 : "r" (cpu), "r" (mask), "r" (&page->flags),
167 "i" (NR_CPUS - 1UL) 176 "i" (PG_dcache_cpu_mask),
177 "i" (PG_dcache_cpu_shift)
168 : "g1", "g7"); 178 : "g1", "g7");
169} 179}
170 180
@@ -180,7 +190,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t p
180 if (pfn_valid(pfn) && 190 if (pfn_valid(pfn) &&
181 (page = pfn_to_page(pfn), page_mapping(page)) && 191 (page = pfn_to_page(pfn), page_mapping(page)) &&
182 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) { 192 ((pg_flags = page->flags) & (1UL << PG_dcache_dirty))) {
183 int cpu = ((pg_flags >> 24) & (NR_CPUS - 1UL)); 193 int cpu = ((pg_flags >> PG_dcache_cpu_shift) &
194 PG_dcache_cpu_mask);
184 int this_cpu = get_cpu(); 195 int this_cpu = get_cpu();
185 196
186 /* This is just to optimize away some function calls 197 /* This is just to optimize away some function calls