diff options
author | Chris Dearman <chris@mips.com> | 2006-06-20 13:06:52 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2006-06-29 16:10:53 -0400 |
commit | 73f403527b9ec5367376076eafb3d2f505d8e2e3 (patch) | |
tree | bc3f080e75661231d9f30ecb385193bb57f8bc4e /arch/mips | |
parent | c09b47d8a99104897afd682b48f292e05461a0c2 (diff) |
[MIPS] Fix handling of 0 length I & D caches.
Don't ask.
Signed-off-by: Chris Dearman <chris@mips.com>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/mm/c-r4k.c | 64 |
1 files changed, 41 insertions, 23 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 01450938db8b..a78355a44c41 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -60,13 +60,13 @@ static unsigned long scache_size __read_mostly; | |||
60 | /* | 60 | /* |
61 | * Dummy cache handling routines for machines without boardcaches | 61 | * Dummy cache handling routines for machines without boardcaches |
62 | */ | 62 | */ |
63 | static void no_sc_noop(void) {} | 63 | static void cache_noop(void) {} |
64 | 64 | ||
65 | static struct bcache_ops no_sc_ops = { | 65 | static struct bcache_ops no_sc_ops = { |
66 | .bc_enable = (void *)no_sc_noop, | 66 | .bc_enable = (void *)cache_noop, |
67 | .bc_disable = (void *)no_sc_noop, | 67 | .bc_disable = (void *)cache_noop, |
68 | .bc_wback_inv = (void *)no_sc_noop, | 68 | .bc_wback_inv = (void *)cache_noop, |
69 | .bc_inv = (void *)no_sc_noop | 69 | .bc_inv = (void *)cache_noop |
70 | }; | 70 | }; |
71 | 71 | ||
72 | struct bcache_ops *bcops = &no_sc_ops; | 72 | struct bcache_ops *bcops = &no_sc_ops; |
@@ -94,7 +94,9 @@ static inline void r4k_blast_dcache_page_setup(void) | |||
94 | { | 94 | { |
95 | unsigned long dc_lsize = cpu_dcache_line_size(); | 95 | unsigned long dc_lsize = cpu_dcache_line_size(); |
96 | 96 | ||
97 | if (dc_lsize == 16) | 97 | if (dc_lsize == 0) |
98 | r4k_blast_dcache_page = (void *)cache_noop; | ||
99 | else if (dc_lsize == 16) | ||
98 | r4k_blast_dcache_page = blast_dcache16_page; | 100 | r4k_blast_dcache_page = blast_dcache16_page; |
99 | else if (dc_lsize == 32) | 101 | else if (dc_lsize == 32) |
100 | r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; | 102 | r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; |
@@ -106,7 +108,9 @@ static inline void r4k_blast_dcache_page_indexed_setup(void) | |||
106 | { | 108 | { |
107 | unsigned long dc_lsize = cpu_dcache_line_size(); | 109 | unsigned long dc_lsize = cpu_dcache_line_size(); |
108 | 110 | ||
109 | if (dc_lsize == 16) | 111 | if (dc_lsize == 0) |
112 | r4k_blast_dcache_page_indexed = (void *)cache_noop; | ||
113 | else if (dc_lsize == 16) | ||
110 | r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; | 114 | r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; |
111 | else if (dc_lsize == 32) | 115 | else if (dc_lsize == 32) |
112 | r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; | 116 | r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; |
@@ -118,7 +122,9 @@ static inline void r4k_blast_dcache_setup(void) | |||
118 | { | 122 | { |
119 | unsigned long dc_lsize = cpu_dcache_line_size(); | 123 | unsigned long dc_lsize = cpu_dcache_line_size(); |
120 | 124 | ||
121 | if (dc_lsize == 16) | 125 | if (dc_lsize == 0) |
126 | r4k_blast_dcache = (void *)cache_noop; | ||
127 | else if (dc_lsize == 16) | ||
122 | r4k_blast_dcache = blast_dcache16; | 128 | r4k_blast_dcache = blast_dcache16; |
123 | else if (dc_lsize == 32) | 129 | else if (dc_lsize == 32) |
124 | r4k_blast_dcache = blast_dcache32; | 130 | r4k_blast_dcache = blast_dcache32; |
@@ -201,7 +207,9 @@ static inline void r4k_blast_icache_page_setup(void) | |||
201 | { | 207 | { |
202 | unsigned long ic_lsize = cpu_icache_line_size(); | 208 | unsigned long ic_lsize = cpu_icache_line_size(); |
203 | 209 | ||
204 | if (ic_lsize == 16) | 210 | if (ic_lsize == 0) |
211 | r4k_blast_icache_page = (void *)cache_noop; | ||
212 | else if (ic_lsize == 16) | ||
205 | r4k_blast_icache_page = blast_icache16_page; | 213 | r4k_blast_icache_page = blast_icache16_page; |
206 | else if (ic_lsize == 32) | 214 | else if (ic_lsize == 32) |
207 | r4k_blast_icache_page = blast_icache32_page; | 215 | r4k_blast_icache_page = blast_icache32_page; |
@@ -216,7 +224,9 @@ static inline void r4k_blast_icache_page_indexed_setup(void) | |||
216 | { | 224 | { |
217 | unsigned long ic_lsize = cpu_icache_line_size(); | 225 | unsigned long ic_lsize = cpu_icache_line_size(); |
218 | 226 | ||
219 | if (ic_lsize == 16) | 227 | if (ic_lsize == 0) |
228 | r4k_blast_icache_page_indexed = (void *)cache_noop; | ||
229 | else if (ic_lsize == 16) | ||
220 | r4k_blast_icache_page_indexed = blast_icache16_page_indexed; | 230 | r4k_blast_icache_page_indexed = blast_icache16_page_indexed; |
221 | else if (ic_lsize == 32) { | 231 | else if (ic_lsize == 32) { |
222 | if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) | 232 | if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) |
@@ -238,7 +248,9 @@ static inline void r4k_blast_icache_setup(void) | |||
238 | { | 248 | { |
239 | unsigned long ic_lsize = cpu_icache_line_size(); | 249 | unsigned long ic_lsize = cpu_icache_line_size(); |
240 | 250 | ||
241 | if (ic_lsize == 16) | 251 | if (ic_lsize == 0) |
252 | r4k_blast_icache = (void *)cache_noop; | ||
253 | else if (ic_lsize == 16) | ||
242 | r4k_blast_icache = blast_icache16; | 254 | r4k_blast_icache = blast_icache16; |
243 | else if (ic_lsize == 32) { | 255 | else if (ic_lsize == 32) { |
244 | if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) | 256 | if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) |
@@ -258,7 +270,7 @@ static inline void r4k_blast_scache_page_setup(void) | |||
258 | unsigned long sc_lsize = cpu_scache_line_size(); | 270 | unsigned long sc_lsize = cpu_scache_line_size(); |
259 | 271 | ||
260 | if (scache_size == 0) | 272 | if (scache_size == 0) |
261 | r4k_blast_scache_page = (void *)no_sc_noop; | 273 | r4k_blast_scache_page = (void *)cache_noop; |
262 | else if (sc_lsize == 16) | 274 | else if (sc_lsize == 16) |
263 | r4k_blast_scache_page = blast_scache16_page; | 275 | r4k_blast_scache_page = blast_scache16_page; |
264 | else if (sc_lsize == 32) | 276 | else if (sc_lsize == 32) |
@@ -276,7 +288,7 @@ static inline void r4k_blast_scache_page_indexed_setup(void) | |||
276 | unsigned long sc_lsize = cpu_scache_line_size(); | 288 | unsigned long sc_lsize = cpu_scache_line_size(); |
277 | 289 | ||
278 | if (scache_size == 0) | 290 | if (scache_size == 0) |
279 | r4k_blast_scache_page_indexed = (void *)no_sc_noop; | 291 | r4k_blast_scache_page_indexed = (void *)cache_noop; |
280 | else if (sc_lsize == 16) | 292 | else if (sc_lsize == 16) |
281 | r4k_blast_scache_page_indexed = blast_scache16_page_indexed; | 293 | r4k_blast_scache_page_indexed = blast_scache16_page_indexed; |
282 | else if (sc_lsize == 32) | 294 | else if (sc_lsize == 32) |
@@ -294,7 +306,7 @@ static inline void r4k_blast_scache_setup(void) | |||
294 | unsigned long sc_lsize = cpu_scache_line_size(); | 306 | unsigned long sc_lsize = cpu_scache_line_size(); |
295 | 307 | ||
296 | if (scache_size == 0) | 308 | if (scache_size == 0) |
297 | r4k_blast_scache = (void *)no_sc_noop; | 309 | r4k_blast_scache = (void *)cache_noop; |
298 | else if (sc_lsize == 16) | 310 | else if (sc_lsize == 16) |
299 | r4k_blast_scache = blast_scache16; | 311 | r4k_blast_scache = blast_scache16; |
300 | else if (sc_lsize == 32) | 312 | else if (sc_lsize == 32) |
@@ -508,7 +520,7 @@ static inline void local_r4k_flush_icache_range(void *args) | |||
508 | unsigned long end = fir_args->end; | 520 | unsigned long end = fir_args->end; |
509 | 521 | ||
510 | if (!cpu_has_ic_fills_f_dc) { | 522 | if (!cpu_has_ic_fills_f_dc) { |
511 | if (end - start > dcache_size) { | 523 | if (end - start >= dcache_size) { |
512 | r4k_blast_dcache(); | 524 | r4k_blast_dcache(); |
513 | } else { | 525 | } else { |
514 | R4600_HIT_CACHEOP_WAR_IMPL; | 526 | R4600_HIT_CACHEOP_WAR_IMPL; |
@@ -683,10 +695,12 @@ static void local_r4k_flush_cache_sigtramp(void * arg) | |||
683 | unsigned long addr = (unsigned long) arg; | 695 | unsigned long addr = (unsigned long) arg; |
684 | 696 | ||
685 | R4600_HIT_CACHEOP_WAR_IMPL; | 697 | R4600_HIT_CACHEOP_WAR_IMPL; |
686 | protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); | 698 | if (dc_lsize) |
699 | protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); | ||
687 | if (!cpu_icache_snoops_remote_store && scache_size) | 700 | if (!cpu_icache_snoops_remote_store && scache_size) |
688 | protected_writeback_scache_line(addr & ~(sc_lsize - 1)); | 701 | protected_writeback_scache_line(addr & ~(sc_lsize - 1)); |
689 | protected_flush_icache_line(addr & ~(ic_lsize - 1)); | 702 | if (ic_lsize) |
703 | protected_flush_icache_line(addr & ~(ic_lsize - 1)); | ||
690 | if (MIPS4K_ICACHE_REFILL_WAR) { | 704 | if (MIPS4K_ICACHE_REFILL_WAR) { |
691 | __asm__ __volatile__ ( | 705 | __asm__ __volatile__ ( |
692 | ".set push\n\t" | 706 | ".set push\n\t" |
@@ -973,8 +987,10 @@ static void __init probe_pcache(void) | |||
973 | c->icache.waysize = icache_size / c->icache.ways; | 987 | c->icache.waysize = icache_size / c->icache.ways; |
974 | c->dcache.waysize = dcache_size / c->dcache.ways; | 988 | c->dcache.waysize = dcache_size / c->dcache.ways; |
975 | 989 | ||
976 | c->icache.sets = icache_size / (c->icache.linesz * c->icache.ways); | 990 | c->icache.sets = c->icache.linesz ? |
977 | c->dcache.sets = dcache_size / (c->dcache.linesz * c->dcache.ways); | 991 | icache_size / (c->icache.linesz * c->icache.ways) : 0; |
992 | c->dcache.sets = c->dcache.linesz ? | ||
993 | dcache_size / (c->dcache.linesz * c->dcache.ways) : 0; | ||
978 | 994 | ||
979 | /* | 995 | /* |
980 | * R10000 and R12000 P-caches are odd in a positive way. They're 32kB | 996 | * R10000 and R12000 P-caches are odd in a positive way. They're 32kB |
@@ -1259,10 +1275,12 @@ void __init r4k_cache_init(void) | |||
1259 | * This code supports virtually indexed processors and will be | 1275 | * This code supports virtually indexed processors and will be |
1260 | * unnecessarily inefficient on physically indexed processors. | 1276 | * unnecessarily inefficient on physically indexed processors. |
1261 | */ | 1277 | */ |
1262 | shm_align_mask = max_t( unsigned long, | 1278 | if (c->dcache.linesz) |
1263 | c->dcache.sets * c->dcache.linesz - 1, | 1279 | shm_align_mask = max_t( unsigned long, |
1264 | PAGE_SIZE - 1); | 1280 | c->dcache.sets * c->dcache.linesz - 1, |
1265 | 1281 | PAGE_SIZE - 1); | |
1282 | else | ||
1283 | shm_align_mask = PAGE_SIZE-1; | ||
1266 | flush_cache_all = r4k_flush_cache_all; | 1284 | flush_cache_all = r4k_flush_cache_all; |
1267 | __flush_cache_all = r4k___flush_cache_all; | 1285 | __flush_cache_all = r4k___flush_cache_all; |
1268 | flush_cache_mm = r4k_flush_cache_mm; | 1286 | flush_cache_mm = r4k_flush_cache_mm; |