diff options
author | Thomas Bogendoerfer <tsbogend@alpha.franken.de> | 2008-07-08 08:46:34 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2008-07-08 14:33:46 -0400 |
commit | 14defd90f5281da8a1bf43bc789efbafe5991cd8 (patch) | |
tree | 82d4cd90fe401ab70331cb94cd6c3f8fd53bf96c /arch/mips/mm | |
parent | b32dfbb9c54393af32761add16e249664193621f (diff) |
[MIPS] Fix 32bit kernels on R4k with 128 byte cache line size
The generated copy_page for R4k CPU with a 128 byte cache line size used
Create Dirty Exclusive cache line operations even if only part of the
cache line was filled. This change avoids generating cache operations,
if only part of the cache line size is copied in one loop. It also
increases the maxmimum loop size, because the generated code even fits
into the available space for r4k CPUs with 128 byte cache line size.
Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/page.c | 61 |
1 files changed, 32 insertions, 29 deletions
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index 1edf0cbbeede..1417c6494858 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c | |||
@@ -235,13 +235,12 @@ static void __cpuinit set_prefetch_parameters(void) | |||
235 | } | 235 | } |
236 | /* | 236 | /* |
237 | * Too much unrolling will overflow the available space in | 237 | * Too much unrolling will overflow the available space in |
238 | * clear_space_array / copy_page_array. 8 words sounds generous, | 238 | * clear_space_array / copy_page_array. |
239 | * but a R4000 with 128 byte L2 line length can exceed even that. | ||
240 | */ | 239 | */ |
241 | half_clear_loop_size = min(8 * clear_word_size, | 240 | half_clear_loop_size = min(16 * clear_word_size, |
242 | max(cache_line_size >> 1, | 241 | max(cache_line_size >> 1, |
243 | 4 * clear_word_size)); | 242 | 4 * clear_word_size)); |
244 | half_copy_loop_size = min(8 * copy_word_size, | 243 | half_copy_loop_size = min(16 * copy_word_size, |
245 | max(cache_line_size >> 1, | 244 | max(cache_line_size >> 1, |
246 | 4 * copy_word_size)); | 245 | 4 * copy_word_size)); |
247 | } | 246 | } |
@@ -263,21 +262,23 @@ static inline void __cpuinit build_clear_pref(u32 **buf, int off) | |||
263 | if (pref_bias_clear_store) { | 262 | if (pref_bias_clear_store) { |
264 | uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, | 263 | uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, |
265 | A0); | 264 | A0); |
266 | } else if (cpu_has_cache_cdex_s) { | 265 | } else if (cache_line_size == (half_clear_loop_size << 1)) { |
267 | uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); | 266 | if (cpu_has_cache_cdex_s) { |
268 | } else if (cpu_has_cache_cdex_p) { | 267 | uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); |
269 | if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { | 268 | } else if (cpu_has_cache_cdex_p) { |
270 | uasm_i_nop(buf); | 269 | if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { |
271 | uasm_i_nop(buf); | 270 | uasm_i_nop(buf); |
272 | uasm_i_nop(buf); | 271 | uasm_i_nop(buf); |
273 | uasm_i_nop(buf); | 272 | uasm_i_nop(buf); |
274 | } | 273 | uasm_i_nop(buf); |
274 | } | ||
275 | 275 | ||
276 | if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) | 276 | if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) |
277 | uasm_i_lw(buf, ZERO, ZERO, AT); | 277 | uasm_i_lw(buf, ZERO, ZERO, AT); |
278 | 278 | ||
279 | uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); | 279 | uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); |
280 | } | 280 | } |
281 | } | ||
281 | } | 282 | } |
282 | 283 | ||
283 | void __cpuinit build_clear_page(void) | 284 | void __cpuinit build_clear_page(void) |
@@ -403,20 +404,22 @@ static inline void build_copy_store_pref(u32 **buf, int off) | |||
403 | if (pref_bias_copy_store) { | 404 | if (pref_bias_copy_store) { |
404 | uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, | 405 | uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, |
405 | A0); | 406 | A0); |
406 | } else if (cpu_has_cache_cdex_s) { | 407 | } else if (cache_line_size == (half_copy_loop_size << 1)) { |
407 | uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); | 408 | if (cpu_has_cache_cdex_s) { |
408 | } else if (cpu_has_cache_cdex_p) { | 409 | uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); |
409 | if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { | 410 | } else if (cpu_has_cache_cdex_p) { |
410 | uasm_i_nop(buf); | 411 | if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) { |
411 | uasm_i_nop(buf); | 412 | uasm_i_nop(buf); |
412 | uasm_i_nop(buf); | 413 | uasm_i_nop(buf); |
413 | uasm_i_nop(buf); | 414 | uasm_i_nop(buf); |
414 | } | 415 | uasm_i_nop(buf); |
416 | } | ||
415 | 417 | ||
416 | if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) | 418 | if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) |
417 | uasm_i_lw(buf, ZERO, ZERO, AT); | 419 | uasm_i_lw(buf, ZERO, ZERO, AT); |
418 | 420 | ||
419 | uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); | 421 | uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); |
422 | } | ||
420 | } | 423 | } |
421 | } | 424 | } |
422 | 425 | ||