diff options
author | Lennert Buytenhek <buytenh@wantstofly.org> | 2006-09-16 05:52:02 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2006-09-25 05:25:27 -0400 |
commit | 197c9444d6093b70c8faa24e7ab04a2423c9d14d (patch) | |
tree | 581916e32828c2454c099fdb443e2a7fefc5172e /arch/arm | |
parent | 51635ad282ead58b9d164f07e1fb62a9456b427c (diff) |
[ARM] 3814/1: move 80200 dma_inv_range() erratum check out of line
On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
clear the dirty bits, which means that if we invalidate a dirty line,
the dirty data can still be written back to memory later on.
To work around this, dma_inv_range() on these two processors is
implemented as dma_flush_range() (i.e. do a clean D-cache line before
doing the invalidate D-cache line.) For this, we currently have a
processor ID check in xscale_dma_inv_range(), but a better solution
is to add a separate cache_fns and proc_info for A0/A1 80200.
Signed-off-by: Lennert Buytenhek <buytenh@wantstofly.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/mm/proc-xscale.S | 58 |
1 files changed, 52 insertions, 6 deletions
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 3ca0c92e98a2..e8b377d637f6 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
@@ -311,12 +311,6 @@ ENTRY(xscale_flush_kern_dcache_page) | |||
311 | * - end - virtual end address | 311 | * - end - virtual end address |
312 | */ | 312 | */ |
313 | ENTRY(xscale_dma_inv_range) | 313 | ENTRY(xscale_dma_inv_range) |
314 | mrc p15, 0, r2, c0, c0, 0 @ read ID | ||
315 | eor r2, r2, #0x69000000 | ||
316 | eor r2, r2, #0x00052000 | ||
317 | bics r2, r2, #1 | ||
318 | beq xscale_dma_flush_range | ||
319 | |||
320 | tst r0, #CACHELINESIZE - 1 | 314 | tst r0, #CACHELINESIZE - 1 |
321 | bic r0, r0, #CACHELINESIZE - 1 | 315 | bic r0, r0, #CACHELINESIZE - 1 |
322 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry | 316 | mcrne p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -375,6 +369,30 @@ ENTRY(xscale_cache_fns) | |||
375 | .long xscale_dma_clean_range | 369 | .long xscale_dma_clean_range |
376 | .long xscale_dma_flush_range | 370 | .long xscale_dma_flush_range |
377 | 371 | ||
372 | /* | ||
373 | * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't | ||
374 | * clear the dirty bits, which means that if we invalidate a dirty line, | ||
375 | * the dirty data can still be written back to external memory later on. | ||
376 | * | ||
377 | * The recommended workaround is to always do a clean D-cache line before | ||
378 | * doing an invalidate D-cache line, so on the affected processors, | ||
379 | * dma_inv_range() is implemented as dma_flush_range(). | ||
380 | * | ||
381 | * See erratum #25 of "Intel 80200 Processor Specification Update", | ||
382 | * revision January 22, 2003, available at: | ||
383 | * http://www.intel.com/design/iio/specupdt/273415.htm | ||
384 | */ | ||
385 | ENTRY(xscale_80200_A0_A1_cache_fns) | ||
386 | .long xscale_flush_kern_cache_all | ||
387 | .long xscale_flush_user_cache_all | ||
388 | .long xscale_flush_user_cache_range | ||
389 | .long xscale_coherent_kern_range | ||
390 | .long xscale_coherent_user_range | ||
391 | .long xscale_flush_kern_dcache_page | ||
392 | .long xscale_dma_flush_range | ||
393 | .long xscale_dma_clean_range | ||
394 | .long xscale_dma_flush_range | ||
395 | |||
378 | ENTRY(cpu_xscale_dcache_clean_area) | 396 | ENTRY(cpu_xscale_dcache_clean_area) |
379 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 397 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
380 | add r0, r0, #CACHELINESIZE | 398 | add r0, r0, #CACHELINESIZE |
@@ -531,6 +549,11 @@ cpu_elf_name: | |||
531 | .asciz "v5" | 549 | .asciz "v5" |
532 | .size cpu_elf_name, . - cpu_elf_name | 550 | .size cpu_elf_name, . - cpu_elf_name |
533 | 551 | ||
552 | .type cpu_80200_A0_A1_name, #object | ||
553 | cpu_80200_A0_A1_name: | ||
554 | .asciz "XScale-80200 A0/A1" | ||
555 | .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name | ||
556 | |||
534 | .type cpu_80200_name, #object | 557 | .type cpu_80200_name, #object |
535 | cpu_80200_name: | 558 | cpu_80200_name: |
536 | .asciz "XScale-80200" | 559 | .asciz "XScale-80200" |
@@ -595,6 +618,29 @@ cpu_pxa270_name: | |||
595 | 618 | ||
596 | .section ".proc.info.init", #alloc, #execinstr | 619 | .section ".proc.info.init", #alloc, #execinstr |
597 | 620 | ||
621 | .type __80200_A0_A1_proc_info,#object | ||
622 | __80200_A0_A1_proc_info: | ||
623 | .long 0x69052000 | ||
624 | .long 0xfffffffe | ||
625 | .long PMD_TYPE_SECT | \ | ||
626 | PMD_SECT_BUFFERABLE | \ | ||
627 | PMD_SECT_CACHEABLE | \ | ||
628 | PMD_SECT_AP_WRITE | \ | ||
629 | PMD_SECT_AP_READ | ||
630 | .long PMD_TYPE_SECT | \ | ||
631 | PMD_SECT_AP_WRITE | \ | ||
632 | PMD_SECT_AP_READ | ||
633 | b __xscale_setup | ||
634 | .long cpu_arch_name | ||
635 | .long cpu_elf_name | ||
636 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
637 | .long cpu_80200_name | ||
638 | .long xscale_processor_functions | ||
639 | .long v4wbi_tlb_fns | ||
640 | .long xscale_mc_user_fns | ||
641 | .long xscale_80200_A0_A1_cache_fns | ||
642 | .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info | ||
643 | |||
598 | .type __80200_proc_info,#object | 644 | .type __80200_proc_info,#object |
599 | __80200_proc_info: | 645 | __80200_proc_info: |
600 | .long 0x69052000 | 646 | .long 0x69052000 |