aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig96
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/abort-ev6.S6
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c37
-rw-r--r--arch/arm/mm/cache-l2x0.c38
-rw-r--r--arch/arm/mm/cache-v6.S28
-rw-r--r--arch/arm/mm/cache-v7.S27
-rw-r--r--arch/arm/mm/cache-xsc3l2.c57
-rw-r--r--arch/arm/mm/dma-mapping.c37
-rw-r--r--arch/arm/mm/flush.c8
-rw-r--r--arch/arm/mm/highmem.c87
-rw-r--r--arch/arm/mm/init.c6
-rw-r--r--arch/arm/mm/ioremap.c8
-rw-r--r--arch/arm/mm/mmap.c2
-rw-r--r--arch/arm/mm/mmu.c16
-rw-r--r--arch/arm/mm/pgd.c2
-rw-r--r--arch/arm/mm/proc-arm1020.S3
-rw-r--r--arch/arm/mm/proc-arm1020e.S3
-rw-r--r--arch/arm/mm/proc-arm1022.S3
-rw-r--r--arch/arm/mm/proc-arm1026.S3
-rw-r--r--arch/arm/mm/proc-arm6_7.S6
-rw-r--r--arch/arm/mm/proc-arm720.S3
-rw-r--r--arch/arm/mm/proc-arm740.S3
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S3
-rw-r--r--arch/arm/mm/proc-arm920.S37
-rw-r--r--arch/arm/mm/proc-arm922.S3
-rw-r--r--arch/arm/mm/proc-arm925.S3
-rw-r--r--arch/arm/mm/proc-arm926.S37
-rw-r--r--arch/arm/mm/proc-arm940.S3
-rw-r--r--arch/arm/mm/proc-arm946.S3
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S3
-rw-r--r--arch/arm/mm/proc-fa526.S3
-rw-r--r--arch/arm/mm/proc-feroceon.S3
-rw-r--r--arch/arm/mm/proc-macros.S29
-rw-r--r--arch/arm/mm/proc-mohawk.S3
-rw-r--r--arch/arm/mm/proc-sa110.S3
-rw-r--r--arch/arm/mm/proc-sa1100.S39
-rw-r--r--arch/arm/mm/proc-v6.S50
-rw-r--r--arch/arm/mm/proc-v7.S145
-rw-r--r--arch/arm/mm/proc-xsc3.S48
-rw-r--r--arch/arm/mm/proc-xscale.S45
-rw-r--r--arch/arm/mm/vmregion.c17
42 files changed, 673 insertions, 284 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 4414a01e1e8a..89266382b536 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -382,9 +382,15 @@ config CPU_FEROCEON_OLD_ID
382 for which the CPU ID is equal to the ARM926 ID. 382 for which the CPU ID is equal to the ARM926 ID.
383 Relevant for Feroceon-1850 and early Feroceon-2850. 383 Relevant for Feroceon-1850 and early Feroceon-2850.
384 384
385# Marvell PJ4
386config CPU_PJ4
387 bool
388 select CPU_V7
389 select ARM_THUMBEE
390
385# ARMv6 391# ARMv6
386config CPU_V6 392config CPU_V6
387 bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE 393 bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
388 select CPU_32v6 394 select CPU_32v6
389 select CPU_ABRT_EV6 395 select CPU_ABRT_EV6
390 select CPU_PABRT_V6 396 select CPU_PABRT_V6
@@ -396,21 +402,23 @@ config CPU_V6
396 select CPU_TLB_V6 if MMU 402 select CPU_TLB_V6 if MMU
397 403
398# ARMv6k 404# ARMv6k
399config CPU_32v6K 405config CPU_V6K
400 bool "Support ARM V6K processor extensions" if !SMP 406 bool "Support ARM V6K processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
401 depends on CPU_V6 || CPU_V7 407 select CPU_32v6
402 default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) 408 select CPU_32v6K
403 help 409 select CPU_ABRT_EV6
404 Say Y here if your ARMv6 processor supports the 'K' extension. 410 select CPU_PABRT_V6
405 This enables the kernel to use some instructions not present 411 select CPU_CACHE_V6
406 on previous processors, and as such a kernel build with this 412 select CPU_CACHE_VIPT
407 enabled will not boot on processors with do not support these 413 select CPU_CP15_MMU
408 instructions. 414 select CPU_HAS_ASID if MMU
415 select CPU_COPY_V6 if MMU
416 select CPU_TLB_V6 if MMU
409 417
410# ARMv7 418# ARMv7
411config CPU_V7 419config CPU_V7
412 bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX 420 bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
413 select CPU_32v6K if !ARCH_OMAP2 421 select CPU_32v6K
414 select CPU_32v7 422 select CPU_32v7
415 select CPU_ABRT_EV7 423 select CPU_ABRT_EV7
416 select CPU_PABRT_V7 424 select CPU_PABRT_V7
@@ -427,25 +435,33 @@ config CPU_32v3
427 bool 435 bool
428 select TLS_REG_EMUL if SMP || !MMU 436 select TLS_REG_EMUL if SMP || !MMU
429 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 437 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
438 select CPU_USE_DOMAINS if MMU
430 439
431config CPU_32v4 440config CPU_32v4
432 bool 441 bool
433 select TLS_REG_EMUL if SMP || !MMU 442 select TLS_REG_EMUL if SMP || !MMU
434 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 443 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
444 select CPU_USE_DOMAINS if MMU
435 445
436config CPU_32v4T 446config CPU_32v4T
437 bool 447 bool
438 select TLS_REG_EMUL if SMP || !MMU 448 select TLS_REG_EMUL if SMP || !MMU
439 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 449 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
450 select CPU_USE_DOMAINS if MMU
440 451
441config CPU_32v5 452config CPU_32v5
442 bool 453 bool
443 select TLS_REG_EMUL if SMP || !MMU 454 select TLS_REG_EMUL if SMP || !MMU
444 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP 455 select NEEDS_SYSCALL_FOR_CMPXCHG if SMP
456 select CPU_USE_DOMAINS if MMU
445 457
446config CPU_32v6 458config CPU_32v6
447 bool 459 bool
448 select TLS_REG_EMUL if !CPU_32v6K && !MMU 460 select TLS_REG_EMUL if !CPU_32v6K && !MMU
461 select CPU_USE_DOMAINS if CPU_V6 && MMU
462
463config CPU_32v6K
464 bool
449 465
450config CPU_32v7 466config CPU_32v7
451 bool 467 bool
@@ -599,6 +615,12 @@ config CPU_CP15_MPU
599 help 615 help
600 Processor has the CP15 register, which has MPU related registers. 616 Processor has the CP15 register, which has MPU related registers.
601 617
618config CPU_USE_DOMAINS
619 bool
620 help
621 This option enables or disables the use of domain switching
622 via the set_fs() function.
623
602# 624#
603# CPU supports 36-bit I/O 625# CPU supports 36-bit I/O
604# 626#
@@ -609,7 +631,7 @@ comment "Processor Features"
609 631
610config ARM_THUMB 632config ARM_THUMB
611 bool "Support Thumb user binaries" 633 bool "Support Thumb user binaries"
612 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V7 || CPU_FEROCEON 634 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON
613 default y 635 default y
614 help 636 help
615 Say Y if you want to include kernel support for running user space 637 Say Y if you want to include kernel support for running user space
@@ -628,6 +650,33 @@ config ARM_THUMBEE
628 Say Y here if you have a CPU with the ThumbEE extension and code to 650 Say Y here if you have a CPU with the ThumbEE extension and code to
629 make use of it. Say N for code that can run on CPUs without ThumbEE. 651 make use of it. Say N for code that can run on CPUs without ThumbEE.
630 652
653config SWP_EMULATE
654 bool "Emulate SWP/SWPB instructions"
655 depends on !CPU_USE_DOMAINS && CPU_V7
656 select HAVE_PROC_CPU if PROC_FS
657 default y if SMP
658 help
659 ARMv6 architecture deprecates use of the SWP/SWPB instructions.
660 ARMv7 multiprocessing extensions introduce the ability to disable
661 these instructions, triggering an undefined instruction exception
662 when executed. Say Y here to enable software emulation of these
663 instructions for userspace (not kernel) using LDREX/STREX.
664 Also creates /proc/cpu/swp_emulation for statistics.
665
666 In some older versions of glibc [<=2.8] SWP is used during futex
667 trylock() operations with the assumption that the code will not
668 be preempted. This invalid assumption may be more likely to fail
669 with SWP emulation enabled, leading to deadlock of the user
670 application.
671
672 NOTE: when accessing uncached shared regions, LDREX/STREX rely
673 on an external transaction monitoring block called a global
674 monitor to maintain update atomicity. If your system does not
675 implement a global monitor, this option can cause programs that
676 perform SWP operations to uncached memory to deadlock.
677
678 If unsure, say Y.
679
631config CPU_BIG_ENDIAN 680config CPU_BIG_ENDIAN
632 bool "Build big-endian kernel" 681 bool "Build big-endian kernel"
633 depends on ARCH_SUPPORTS_BIG_ENDIAN 682 depends on ARCH_SUPPORTS_BIG_ENDIAN
@@ -640,7 +689,7 @@ config CPU_BIG_ENDIAN
640config CPU_ENDIAN_BE8 689config CPU_ENDIAN_BE8
641 bool 690 bool
642 depends on CPU_BIG_ENDIAN 691 depends on CPU_BIG_ENDIAN
643 default CPU_V6 || CPU_V7 692 default CPU_V6 || CPU_V6K || CPU_V7
644 help 693 help
645 Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. 694 Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors.
646 695
@@ -706,7 +755,7 @@ config CPU_CACHE_ROUND_ROBIN
706 755
707config CPU_BPREDICT_DISABLE 756config CPU_BPREDICT_DISABLE
708 bool "Disable branch prediction" 757 bool "Disable branch prediction"
709 depends on CPU_ARM1020 || CPU_V6 || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 758 depends on CPU_ARM1020 || CPU_V6 || CPU_V6K || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526
710 help 759 help
711 Say Y here to disable branch prediction. If unsure, say N. 760 Say Y here to disable branch prediction. If unsure, say N.
712 761
@@ -726,7 +775,7 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
726 775
727config DMA_CACHE_RWFO 776config DMA_CACHE_RWFO
728 bool "Enable read/write for ownership DMA cache maintenance" 777 bool "Enable read/write for ownership DMA cache maintenance"
729 depends on CPU_V6 && SMP 778 depends on CPU_V6K && SMP
730 default y 779 default y
731 help 780 help
732 The Snoop Control Unit on ARM11MPCore does not detect the 781 The Snoop Control Unit on ARM11MPCore does not detect the
@@ -772,7 +821,7 @@ config CACHE_L2X0
772 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ 821 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
773 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \ 822 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \
774 ARCH_NOMADIK || ARCH_OMAP4 || ARCH_S5PV310 || ARCH_TEGRA || \ 823 ARCH_NOMADIK || ARCH_OMAP4 || ARCH_S5PV310 || ARCH_TEGRA || \
775 ARCH_U8500 || ARCH_VEXPRESS_CA9X4 824 ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE
776 default y 825 default y
777 select OUTER_CACHE 826 select OUTER_CACHE
778 select OUTER_CACHE_SYNC 827 select OUTER_CACHE_SYNC
@@ -782,14 +831,14 @@ config CACHE_L2X0
782config CACHE_PL310 831config CACHE_PL310
783 bool 832 bool
784 depends on CACHE_L2X0 833 depends on CACHE_L2X0
785 default y if CPU_V7 && !CPU_V6 834 default y if CPU_V7 && !(CPU_V6 || CPU_V6K)
786 help 835 help
787 This option enables optimisations for the PL310 cache 836 This option enables optimisations for the PL310 cache
788 controller. 837 controller.
789 838
790config CACHE_TAUROS2 839config CACHE_TAUROS2
791 bool "Enable the Tauros2 L2 cache controller" 840 bool "Enable the Tauros2 L2 cache controller"
792 depends on (ARCH_DOVE || ARCH_MMP) 841 depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4)
793 default y 842 default y
794 select OUTER_CACHE 843 select OUTER_CACHE
795 help 844 help
@@ -804,16 +853,21 @@ config CACHE_XSC3L2
804 help 853 help
805 This option enables the L2 cache on XScale3. 854 This option enables the L2 cache on XScale3.
806 855
856config ARM_L1_CACHE_SHIFT_6
857 bool
858 help
859 Setting ARM L1 cache line size to 64 Bytes.
860
807config ARM_L1_CACHE_SHIFT 861config ARM_L1_CACHE_SHIFT
808 int 862 int
809 default 6 if ARM_L1_CACHE_SHIFT_6 863 default 6 if ARM_L1_CACHE_SHIFT_6
810 default 5 864 default 5
811 865
812config ARM_DMA_MEM_BUFFERABLE 866config ARM_DMA_MEM_BUFFERABLE
813 bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7 867 bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K) && !CPU_V7
814 depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \ 868 depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \
815 MACH_REALVIEW_PB11MP) 869 MACH_REALVIEW_PB11MP)
816 default y if CPU_V6 || CPU_V7 870 default y if CPU_V6 || CPU_V6K || CPU_V7
817 help 871 help
818 Historically, the kernel has used strongly ordered mappings to 872 Historically, the kernel has used strongly ordered mappings to
819 provide DMA coherent memory. With the advent of ARMv7, mapping 873 provide DMA coherent memory. With the advent of ARMv7, mapping
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 00d74a04af3a..bca7e61928c7 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -90,6 +90,7 @@ obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o
90obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o 90obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o
91obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o 91obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o
92obj-$(CONFIG_CPU_V6) += proc-v6.o 92obj-$(CONFIG_CPU_V6) += proc-v6.o
93obj-$(CONFIG_CPU_V6K) += proc-v6.o
93obj-$(CONFIG_CPU_V7) += proc-v7.o 94obj-$(CONFIG_CPU_V7) += proc-v7.o
94 95
95AFLAGS_proc-v6.o :=-Wa,-march=armv6 96AFLAGS_proc-v6.o :=-Wa,-march=armv6
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index f332df7f0d37..1478aa522144 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -20,11 +20,11 @@
20 */ 20 */
21 .align 5 21 .align 5
22ENTRY(v6_early_abort) 22ENTRY(v6_early_abort)
23#ifdef CONFIG_CPU_32v6K 23#ifdef CONFIG_CPU_V6
24 clrex
25#else
26 sub r1, sp, #4 @ Get unused stack location 24 sub r1, sp, #4 @ Get unused stack location
27 strex r0, r1, [r1] @ Clear the exclusive monitor 25 strex r0, r1, [r1] @ Clear the exclusive monitor
26#elif defined(CONFIG_CPU_32v6K)
27 clrex
28#endif 28#endif
29 mrc p15, 0, r1, c5, c0, 0 @ get FSR 29 mrc p15, 0, r1, c5, c0, 0 @ get FSR
30 mrc p15, 0, r0, c6, c0, 0 @ get FAR 30 mrc p15, 0, r0, c6, c0, 0 @ get FAR
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index 6e77c042d8e9..e0b0e7a4ec68 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -13,13 +13,9 @@
13 */ 13 */
14 14
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/highmem.h>
16#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
17#include <asm/kmap_types.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
21#include <plat/cache-feroceon-l2.h> 18#include <plat/cache-feroceon-l2.h>
22#include "mm.h"
23 19
24/* 20/*
25 * Low-level cache maintenance operations. 21 * Low-level cache maintenance operations.
@@ -39,27 +35,30 @@
39 * between which we don't want to be preempted. 35 * between which we don't want to be preempted.
40 */ 36 */
41 37
42static inline unsigned long l2_start_va(unsigned long paddr) 38static inline unsigned long l2_get_va(unsigned long paddr)
43{ 39{
44#ifdef CONFIG_HIGHMEM 40#ifdef CONFIG_HIGHMEM
45 /* 41 /*
46 * Let's do our own fixmap stuff in a minimal way here.
47 * Because range ops can't be done on physical addresses, 42 * Because range ops can't be done on physical addresses,
48 * we simply install a virtual mapping for it only for the 43 * we simply install a virtual mapping for it only for the
49 * TLB lookup to occur, hence no need to flush the untouched 44 * TLB lookup to occur, hence no need to flush the untouched
50 * memory mapping. This is protected with the disabling of 45 * memory mapping afterwards (note: a cache flush may happen
51 * interrupts by the caller. 46 * in some circumstances depending on the path taken in kunmap_atomic).
52 */ 47 */
53 unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); 48 void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
54 unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 49 return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
55 set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
56 local_flush_tlb_kernel_page(vaddr);
57 return vaddr + (paddr & ~PAGE_MASK);
58#else 50#else
59 return __phys_to_virt(paddr); 51 return __phys_to_virt(paddr);
60#endif 52#endif
61} 53}
62 54
55static inline void l2_put_va(unsigned long vaddr)
56{
57#ifdef CONFIG_HIGHMEM
58 kunmap_atomic((void *)vaddr);
59#endif
60}
61
63static inline void l2_clean_pa(unsigned long addr) 62static inline void l2_clean_pa(unsigned long addr)
64{ 63{
65 __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); 64 __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
@@ -76,13 +75,14 @@ static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
76 */ 75 */
77 BUG_ON((start ^ end) >> PAGE_SHIFT); 76 BUG_ON((start ^ end) >> PAGE_SHIFT);
78 77
79 raw_local_irq_save(flags); 78 va_start = l2_get_va(start);
80 va_start = l2_start_va(start);
81 va_end = va_start + (end - start); 79 va_end = va_start + (end - start);
80 raw_local_irq_save(flags);
82 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" 81 __asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
83 "mcr p15, 1, %1, c15, c9, 5" 82 "mcr p15, 1, %1, c15, c9, 5"
84 : : "r" (va_start), "r" (va_end)); 83 : : "r" (va_start), "r" (va_end));
85 raw_local_irq_restore(flags); 84 raw_local_irq_restore(flags);
85 l2_put_va(va_start);
86} 86}
87 87
88static inline void l2_clean_inv_pa(unsigned long addr) 88static inline void l2_clean_inv_pa(unsigned long addr)
@@ -106,13 +106,14 @@ static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
106 */ 106 */
107 BUG_ON((start ^ end) >> PAGE_SHIFT); 107 BUG_ON((start ^ end) >> PAGE_SHIFT);
108 108
109 raw_local_irq_save(flags); 109 va_start = l2_get_va(start);
110 va_start = l2_start_va(start);
111 va_end = va_start + (end - start); 110 va_end = va_start + (end - start);
111 raw_local_irq_save(flags);
112 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" 112 __asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
113 "mcr p15, 1, %1, c15, c11, 5" 113 "mcr p15, 1, %1, c15, c11, 5"
114 : : "r" (va_start), "r" (va_end)); 114 : : "r" (va_start), "r" (va_end));
115 raw_local_irq_restore(flags); 115 raw_local_irq_restore(flags);
116 l2_put_va(va_start);
116} 117}
117 118
118static inline void l2_inv_all(void) 119static inline void l2_inv_all(void)
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 170c9bb95866..ef59099a5463 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -49,7 +49,13 @@ static inline void cache_wait(void __iomem *reg, unsigned long mask)
49static inline void cache_sync(void) 49static inline void cache_sync(void)
50{ 50{
51 void __iomem *base = l2x0_base; 51 void __iomem *base = l2x0_base;
52
53#ifdef CONFIG_ARM_ERRATA_753970
54 /* write to an unmmapped register */
55 writel_relaxed(0, base + L2X0_DUMMY_REG);
56#else
52 writel_relaxed(0, base + L2X0_CACHE_SYNC); 57 writel_relaxed(0, base + L2X0_CACHE_SYNC);
58#endif
53 cache_wait(base + L2X0_CACHE_SYNC, 1); 59 cache_wait(base + L2X0_CACHE_SYNC, 1);
54} 60}
55 61
@@ -67,18 +73,24 @@ static inline void l2x0_inv_line(unsigned long addr)
67 writel_relaxed(addr, base + L2X0_INV_LINE_PA); 73 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
68} 74}
69 75
70#ifdef CONFIG_PL310_ERRATA_588369 76#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
71static void debug_writel(unsigned long val)
72{
73 extern void omap_smc1(u32 fn, u32 arg);
74 77
75 /* 78#define debug_writel(val) outer_cache.set_debug(val)
76 * Texas Instrument secure monitor api to modify the 79
77 * PL310 Debug Control Register. 80static void l2x0_set_debug(unsigned long val)
78 */ 81{
79 omap_smc1(0x100, val); 82 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
83}
84#else
85/* Optimised out for non-errata case */
86static inline void debug_writel(unsigned long val)
87{
80} 88}
81 89
90#define l2x0_set_debug NULL
91#endif
92
93#ifdef CONFIG_PL310_ERRATA_588369
82static inline void l2x0_flush_line(unsigned long addr) 94static inline void l2x0_flush_line(unsigned long addr)
83{ 95{
84 void __iomem *base = l2x0_base; 96 void __iomem *base = l2x0_base;
@@ -91,11 +103,6 @@ static inline void l2x0_flush_line(unsigned long addr)
91} 103}
92#else 104#else
93 105
94/* Optimised out for non-errata case */
95static inline void debug_writel(unsigned long val)
96{
97}
98
99static inline void l2x0_flush_line(unsigned long addr) 106static inline void l2x0_flush_line(unsigned long addr)
100{ 107{
101 void __iomem *base = l2x0_base; 108 void __iomem *base = l2x0_base;
@@ -119,9 +126,11 @@ static void l2x0_flush_all(void)
119 126
120 /* clean all ways */ 127 /* clean all ways */
121 spin_lock_irqsave(&l2x0_lock, flags); 128 spin_lock_irqsave(&l2x0_lock, flags);
129 debug_writel(0x03);
122 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); 130 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
123 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); 131 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
124 cache_sync(); 132 cache_sync();
133 debug_writel(0x00);
125 spin_unlock_irqrestore(&l2x0_lock, flags); 134 spin_unlock_irqrestore(&l2x0_lock, flags);
126} 135}
127 136
@@ -329,6 +338,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
329 outer_cache.flush_all = l2x0_flush_all; 338 outer_cache.flush_all = l2x0_flush_all;
330 outer_cache.inv_all = l2x0_inv_all; 339 outer_cache.inv_all = l2x0_inv_all;
331 outer_cache.disable = l2x0_disable; 340 outer_cache.disable = l2x0_disable;
341 outer_cache.set_debug = l2x0_set_debug;
332 342
333 printk(KERN_INFO "%s cache controller enabled\n", type); 343 printk(KERN_INFO "%s cache controller enabled\n", type);
334 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", 344 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 99fa688dfadd..c96fa1b3f49f 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -203,6 +203,10 @@ ENTRY(v6_flush_kern_dcache_area)
203 * - end - virtual end address of region 203 * - end - virtual end address of region
204 */ 204 */
205v6_dma_inv_range: 205v6_dma_inv_range:
206#ifdef CONFIG_DMA_CACHE_RWFO
207 ldrb r2, [r0] @ read for ownership
208 strb r2, [r0] @ write for ownership
209#endif
206 tst r0, #D_CACHE_LINE_SIZE - 1 210 tst r0, #D_CACHE_LINE_SIZE - 1
207 bic r0, r0, #D_CACHE_LINE_SIZE - 1 211 bic r0, r0, #D_CACHE_LINE_SIZE - 1
208#ifdef HARVARD_CACHE 212#ifdef HARVARD_CACHE
@@ -211,6 +215,10 @@ v6_dma_inv_range:
211 mcrne p15, 0, r0, c7, c11, 1 @ clean unified line 215 mcrne p15, 0, r0, c7, c11, 1 @ clean unified line
212#endif 216#endif
213 tst r1, #D_CACHE_LINE_SIZE - 1 217 tst r1, #D_CACHE_LINE_SIZE - 1
218#ifdef CONFIG_DMA_CACHE_RWFO
219 ldrneb r2, [r1, #-1] @ read for ownership
220 strneb r2, [r1, #-1] @ write for ownership
221#endif
214 bic r1, r1, #D_CACHE_LINE_SIZE - 1 222 bic r1, r1, #D_CACHE_LINE_SIZE - 1
215#ifdef HARVARD_CACHE 223#ifdef HARVARD_CACHE
216 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line 224 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line
@@ -218,10 +226,6 @@ v6_dma_inv_range:
218 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line 226 mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line
219#endif 227#endif
2201: 2281:
221#ifdef CONFIG_DMA_CACHE_RWFO
222 ldr r2, [r0] @ read for ownership
223 str r2, [r0] @ write for ownership
224#endif
225#ifdef HARVARD_CACHE 229#ifdef HARVARD_CACHE
226 mcr p15, 0, r0, c7, c6, 1 @ invalidate D line 230 mcr p15, 0, r0, c7, c6, 1 @ invalidate D line
227#else 231#else
@@ -229,6 +233,10 @@ v6_dma_inv_range:
229#endif 233#endif
230 add r0, r0, #D_CACHE_LINE_SIZE 234 add r0, r0, #D_CACHE_LINE_SIZE
231 cmp r0, r1 235 cmp r0, r1
236#ifdef CONFIG_DMA_CACHE_RWFO
237 ldrlo r2, [r0] @ read for ownership
238 strlo r2, [r0] @ write for ownership
239#endif
232 blo 1b 240 blo 1b
233 mov r0, #0 241 mov r0, #0
234 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 242 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
@@ -263,12 +271,12 @@ v6_dma_clean_range:
263 * - end - virtual end address of region 271 * - end - virtual end address of region
264 */ 272 */
265ENTRY(v6_dma_flush_range) 273ENTRY(v6_dma_flush_range)
266 bic r0, r0, #D_CACHE_LINE_SIZE - 1
2671:
268#ifdef CONFIG_DMA_CACHE_RWFO 274#ifdef CONFIG_DMA_CACHE_RWFO
269 ldr r2, [r0] @ read for ownership 275 ldrb r2, [r0] @ read for ownership
270 str r2, [r0] @ write for ownership 276 strb r2, [r0] @ write for ownership
271#endif 277#endif
278 bic r0, r0, #D_CACHE_LINE_SIZE - 1
2791:
272#ifdef HARVARD_CACHE 280#ifdef HARVARD_CACHE
273 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line 281 mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line
274#else 282#else
@@ -276,6 +284,10 @@ ENTRY(v6_dma_flush_range)
276#endif 284#endif
277 add r0, r0, #D_CACHE_LINE_SIZE 285 add r0, r0, #D_CACHE_LINE_SIZE
278 cmp r0, r1 286 cmp r0, r1
287#ifdef CONFIG_DMA_CACHE_RWFO
288 ldrlob r2, [r0] @ read for ownership
289 strlob r2, [r0] @ write for ownership
290#endif
279 blo 1b 291 blo 1b
280 mov r0, #0 292 mov r0, #0
281 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 293 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index a3ebf7a4f49b..6136e68ce953 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -173,15 +173,22 @@ ENTRY(v7_coherent_user_range)
173 UNWIND(.fnstart ) 173 UNWIND(.fnstart )
174 dcache_line_size r2, r3 174 dcache_line_size r2, r3
175 sub r3, r2, #1 175 sub r3, r2, #1
176 bic r0, r0, r3 176 bic r12, r0, r3
1771: 1771:
178 USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification 178 USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
179 add r12, r12, r2
180 cmp r12, r1
181 blo 1b
179 dsb 182 dsb
180 USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line 183 icache_line_size r2, r3
181 add r0, r0, r2 184 sub r3, r2, #1
185 bic r12, r0, r3
1822: 1862:
183 cmp r0, r1 187 USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line
184 blo 1b 188 add r12, r12, r2
189 cmp r12, r1
190 blo 2b
1913:
185 mov r0, #0 192 mov r0, #0
186 ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable 193 ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable
187 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB 194 ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB
@@ -194,10 +201,10 @@ ENTRY(v7_coherent_user_range)
194 * isn't mapped, just try the next page. 201 * isn't mapped, just try the next page.
195 */ 202 */
1969001: 2039001:
197 mov r0, r0, lsr #12 204 mov r12, r12, lsr #12
198 mov r0, r0, lsl #12 205 mov r12, r12, lsl #12
199 add r0, r0, #4096 206 add r12, r12, #4096
200 b 2b 207 b 3b
201 UNWIND(.fnend ) 208 UNWIND(.fnend )
202ENDPROC(v7_coherent_kern_range) 209ENDPROC(v7_coherent_kern_range)
203ENDPROC(v7_coherent_user_range) 210ENDPROC(v7_coherent_user_range)
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
index c3154928bccd..5a32020471e3 100644
--- a/arch/arm/mm/cache-xsc3l2.c
+++ b/arch/arm/mm/cache-xsc3l2.c
@@ -17,14 +17,10 @@
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/highmem.h>
20#include <asm/system.h> 21#include <asm/system.h>
21#include <asm/cputype.h> 22#include <asm/cputype.h>
22#include <asm/cacheflush.h> 23#include <asm/cacheflush.h>
23#include <asm/kmap_types.h>
24#include <asm/fixmap.h>
25#include <asm/pgtable.h>
26#include <asm/tlbflush.h>
27#include "mm.h"
28 24
29#define CR_L2 (1 << 26) 25#define CR_L2 (1 << 26)
30 26
@@ -71,16 +67,15 @@ static inline void xsc3_l2_inv_all(void)
71 dsb(); 67 dsb();
72} 68}
73 69
70static inline void l2_unmap_va(unsigned long va)
71{
74#ifdef CONFIG_HIGHMEM 72#ifdef CONFIG_HIGHMEM
75#define l2_map_save_flags(x) raw_local_save_flags(x) 73 if (va != -1)
76#define l2_map_restore_flags(x) raw_local_irq_restore(x) 74 kunmap_atomic((void *)va);
77#else
78#define l2_map_save_flags(x) ((x) = 0)
79#define l2_map_restore_flags(x) ((void)(x))
80#endif 75#endif
76}
81 77
82static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, 78static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
83 unsigned long flags)
84{ 79{
85#ifdef CONFIG_HIGHMEM 80#ifdef CONFIG_HIGHMEM
86 unsigned long va = prev_va & PAGE_MASK; 81 unsigned long va = prev_va & PAGE_MASK;
@@ -89,17 +84,10 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
89 /* 84 /*
90 * Switching to a new page. Because cache ops are 85 * Switching to a new page. Because cache ops are
91 * using virtual addresses only, we must put a mapping 86 * using virtual addresses only, we must put a mapping
92 * in place for it. We also enable interrupts for a 87 * in place for it.
93 * short while and disable them again to protect this
94 * mapping.
95 */ 88 */
96 unsigned long idx; 89 l2_unmap_va(prev_va);
97 raw_local_irq_restore(flags); 90 va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
98 idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
99 va = __fix_to_virt(FIX_KMAP_BEGIN + idx);
100 raw_local_irq_restore(flags | PSR_I_BIT);
101 set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0);
102 local_flush_tlb_kernel_page(va);
103 } 91 }
104 return va + (pa_offset >> (32 - PAGE_SHIFT)); 92 return va + (pa_offset >> (32 - PAGE_SHIFT));
105#else 93#else
@@ -109,7 +97,7 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
109 97
110static void xsc3_l2_inv_range(unsigned long start, unsigned long end) 98static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
111{ 99{
112 unsigned long vaddr, flags; 100 unsigned long vaddr;
113 101
114 if (start == 0 && end == -1ul) { 102 if (start == 0 && end == -1ul) {
115 xsc3_l2_inv_all(); 103 xsc3_l2_inv_all();
@@ -117,13 +105,12 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
117 } 105 }
118 106
119 vaddr = -1; /* to force the first mapping */ 107 vaddr = -1; /* to force the first mapping */
120 l2_map_save_flags(flags);
121 108
122 /* 109 /*
123 * Clean and invalidate partial first cache line. 110 * Clean and invalidate partial first cache line.
124 */ 111 */
125 if (start & (CACHE_LINE_SIZE - 1)) { 112 if (start & (CACHE_LINE_SIZE - 1)) {
126 vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags); 113 vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
127 xsc3_l2_clean_mva(vaddr); 114 xsc3_l2_clean_mva(vaddr);
128 xsc3_l2_inv_mva(vaddr); 115 xsc3_l2_inv_mva(vaddr);
129 start = (start | (CACHE_LINE_SIZE - 1)) + 1; 116 start = (start | (CACHE_LINE_SIZE - 1)) + 1;
@@ -133,7 +120,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
133 * Invalidate all full cache lines between 'start' and 'end'. 120 * Invalidate all full cache lines between 'start' and 'end'.
134 */ 121 */
135 while (start < (end & ~(CACHE_LINE_SIZE - 1))) { 122 while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
136 vaddr = l2_map_va(start, vaddr, flags); 123 vaddr = l2_map_va(start, vaddr);
137 xsc3_l2_inv_mva(vaddr); 124 xsc3_l2_inv_mva(vaddr);
138 start += CACHE_LINE_SIZE; 125 start += CACHE_LINE_SIZE;
139 } 126 }
@@ -142,31 +129,30 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
142 * Clean and invalidate partial last cache line. 129 * Clean and invalidate partial last cache line.
143 */ 130 */
144 if (start < end) { 131 if (start < end) {
145 vaddr = l2_map_va(start, vaddr, flags); 132 vaddr = l2_map_va(start, vaddr);
146 xsc3_l2_clean_mva(vaddr); 133 xsc3_l2_clean_mva(vaddr);
147 xsc3_l2_inv_mva(vaddr); 134 xsc3_l2_inv_mva(vaddr);
148 } 135 }
149 136
150 l2_map_restore_flags(flags); 137 l2_unmap_va(vaddr);
151 138
152 dsb(); 139 dsb();
153} 140}
154 141
155static void xsc3_l2_clean_range(unsigned long start, unsigned long end) 142static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
156{ 143{
157 unsigned long vaddr, flags; 144 unsigned long vaddr;
158 145
159 vaddr = -1; /* to force the first mapping */ 146 vaddr = -1; /* to force the first mapping */
160 l2_map_save_flags(flags);
161 147
162 start &= ~(CACHE_LINE_SIZE - 1); 148 start &= ~(CACHE_LINE_SIZE - 1);
163 while (start < end) { 149 while (start < end) {
164 vaddr = l2_map_va(start, vaddr, flags); 150 vaddr = l2_map_va(start, vaddr);
165 xsc3_l2_clean_mva(vaddr); 151 xsc3_l2_clean_mva(vaddr);
166 start += CACHE_LINE_SIZE; 152 start += CACHE_LINE_SIZE;
167 } 153 }
168 154
169 l2_map_restore_flags(flags); 155 l2_unmap_va(vaddr);
170 156
171 dsb(); 157 dsb();
172} 158}
@@ -193,7 +179,7 @@ static inline void xsc3_l2_flush_all(void)
193 179
194static void xsc3_l2_flush_range(unsigned long start, unsigned long end) 180static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
195{ 181{
196 unsigned long vaddr, flags; 182 unsigned long vaddr;
197 183
198 if (start == 0 && end == -1ul) { 184 if (start == 0 && end == -1ul) {
199 xsc3_l2_flush_all(); 185 xsc3_l2_flush_all();
@@ -201,17 +187,16 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
201 } 187 }
202 188
203 vaddr = -1; /* to force the first mapping */ 189 vaddr = -1; /* to force the first mapping */
204 l2_map_save_flags(flags);
205 190
206 start &= ~(CACHE_LINE_SIZE - 1); 191 start &= ~(CACHE_LINE_SIZE - 1);
207 while (start < end) { 192 while (start < end) {
208 vaddr = l2_map_va(start, vaddr, flags); 193 vaddr = l2_map_va(start, vaddr);
209 xsc3_l2_clean_mva(vaddr); 194 xsc3_l2_clean_mva(vaddr);
210 xsc3_l2_inv_mva(vaddr); 195 xsc3_l2_inv_mva(vaddr);
211 start += CACHE_LINE_SIZE; 196 start += CACHE_LINE_SIZE;
212 } 197 }
213 198
214 l2_map_restore_flags(flags); 199 l2_unmap_va(vaddr);
215 200
216 dsb(); 201 dsb();
217} 202}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index a9bdfcda23f4..82a093cee09a 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/highmem.h>
20 21
21#include <asm/memory.h> 22#include <asm/memory.h>
22#include <asm/highmem.h> 23#include <asm/highmem.h>
@@ -320,7 +321,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
320 addr = page_address(page); 321 addr = page_address(page);
321 322
322 if (addr) 323 if (addr)
323 *handle = page_to_dma(dev, page); 324 *handle = pfn_to_dma(dev, page_to_pfn(page));
324 325
325 return addr; 326 return addr;
326} 327}
@@ -415,7 +416,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
415 if (!arch_is_coherent()) 416 if (!arch_is_coherent())
416 __dma_free_remap(cpu_addr, size); 417 __dma_free_remap(cpu_addr, size);
417 418
418 __dma_free_buffer(dma_to_page(dev, handle), size); 419 __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
419} 420}
420EXPORT_SYMBOL(dma_free_coherent); 421EXPORT_SYMBOL(dma_free_coherent);
421 422
@@ -489,10 +490,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
489 op(vaddr, len, dir); 490 op(vaddr, len, dir);
490 kunmap_high(page); 491 kunmap_high(page);
491 } else if (cache_is_vipt()) { 492 } else if (cache_is_vipt()) {
492 pte_t saved_pte; 493 /* unmapped pages might still be cached */
493 vaddr = kmap_high_l1_vipt(page, &saved_pte); 494 vaddr = kmap_atomic(page);
494 op(vaddr + offset, len, dir); 495 op(vaddr + offset, len, dir);
495 kunmap_high_l1_vipt(page, saved_pte); 496 kunmap_atomic(vaddr);
496 } 497 }
497 } else { 498 } else {
498 vaddr = page_address(page) + offset; 499 vaddr = page_address(page) + offset;
@@ -563,17 +564,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
563 struct scatterlist *s; 564 struct scatterlist *s;
564 int i, j; 565 int i, j;
565 566
567 BUG_ON(!valid_dma_direction(dir));
568
566 for_each_sg(sg, s, nents, i) { 569 for_each_sg(sg, s, nents, i) {
567 s->dma_address = dma_map_page(dev, sg_page(s), s->offset, 570 s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
568 s->length, dir); 571 s->length, dir);
569 if (dma_mapping_error(dev, s->dma_address)) 572 if (dma_mapping_error(dev, s->dma_address))
570 goto bad_mapping; 573 goto bad_mapping;
571 } 574 }
575 debug_dma_map_sg(dev, sg, nents, nents, dir);
572 return nents; 576 return nents;
573 577
574 bad_mapping: 578 bad_mapping:
575 for_each_sg(sg, s, i, j) 579 for_each_sg(sg, s, i, j)
576 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 580 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
577 return 0; 581 return 0;
578} 582}
579EXPORT_SYMBOL(dma_map_sg); 583EXPORT_SYMBOL(dma_map_sg);
@@ -582,7 +586,7 @@ EXPORT_SYMBOL(dma_map_sg);
582 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 586 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
583 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 587 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
584 * @sg: list of buffers 588 * @sg: list of buffers
585 * @nents: number of buffers to unmap (returned from dma_map_sg) 589 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
586 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 590 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
587 * 591 *
588 * Unmap a set of streaming mode DMA translations. Again, CPU access 592 * Unmap a set of streaming mode DMA translations. Again, CPU access
@@ -594,8 +598,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
594 struct scatterlist *s; 598 struct scatterlist *s;
595 int i; 599 int i;
596 600
601 debug_dma_unmap_sg(dev, sg, nents, dir);
602
597 for_each_sg(sg, s, nents, i) 603 for_each_sg(sg, s, nents, i)
598 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 604 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
599} 605}
600EXPORT_SYMBOL(dma_unmap_sg); 606EXPORT_SYMBOL(dma_unmap_sg);
601 607
@@ -620,6 +626,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
620 __dma_page_dev_to_cpu(sg_page(s), s->offset, 626 __dma_page_dev_to_cpu(sg_page(s), s->offset,
621 s->length, dir); 627 s->length, dir);
622 } 628 }
629
630 debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
623} 631}
624EXPORT_SYMBOL(dma_sync_sg_for_cpu); 632EXPORT_SYMBOL(dma_sync_sg_for_cpu);
625 633
@@ -644,5 +652,16 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
644 __dma_page_cpu_to_dev(sg_page(s), s->offset, 652 __dma_page_cpu_to_dev(sg_page(s), s->offset,
645 s->length, dir); 653 s->length, dir);
646 } 654 }
655
656 debug_dma_sync_sg_for_device(dev, sg, nents, dir);
647} 657}
648EXPORT_SYMBOL(dma_sync_sg_for_device); 658EXPORT_SYMBOL(dma_sync_sg_for_device);
659
660#define PREALLOC_DMA_DEBUG_ENTRIES 4096
661
662static int __init dma_debug_do_init(void)
663{
664 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
665 return 0;
666}
667fs_initcall(dma_debug_do_init);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 391ffae75098..2b269c955524 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/pagemap.h> 12#include <linux/pagemap.h>
13#include <linux/highmem.h>
13 14
14#include <asm/cacheflush.h> 15#include <asm/cacheflush.h>
15#include <asm/cachetype.h> 16#include <asm/cachetype.h>
@@ -17,7 +18,6 @@
17#include <asm/smp_plat.h> 18#include <asm/smp_plat.h>
18#include <asm/system.h> 19#include <asm/system.h>
19#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
20#include <asm/smp_plat.h>
21 21
22#include "mm.h" 22#include "mm.h"
23 23
@@ -180,10 +180,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
180 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 180 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
181 kunmap_high(page); 181 kunmap_high(page);
182 } else if (cache_is_vipt()) { 182 } else if (cache_is_vipt()) {
183 pte_t saved_pte; 183 /* unmapped pages might still be cached */
184 addr = kmap_high_l1_vipt(page, &saved_pte); 184 addr = kmap_atomic(page);
185 __cpuc_flush_dcache_area(addr, PAGE_SIZE); 185 __cpuc_flush_dcache_area(addr, PAGE_SIZE);
186 kunmap_high_l1_vipt(page, saved_pte); 186 kunmap_atomic(addr);
187 } 187 }
188 } 188 }
189 189
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index c435fd9e1da9..807c0573abbe 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr)
140 pte = TOP_PTE(vaddr); 140 pte = TOP_PTE(vaddr);
141 return pte_page(*pte); 141 return pte_page(*pte);
142} 142}
143
144#ifdef CONFIG_CPU_CACHE_VIPT
145
146#include <linux/percpu.h>
147
148/*
149 * The VIVT cache of a highmem page is always flushed before the page
150 * is unmapped. Hence unmapped highmem pages need no cache maintenance
151 * in that case.
152 *
153 * However unmapped pages may still be cached with a VIPT cache, and
154 * it is not possible to perform cache maintenance on them using physical
155 * addresses unfortunately. So we have no choice but to set up a temporary
156 * virtual mapping for that purpose.
157 *
158 * Yet this VIPT cache maintenance may be triggered from DMA support
159 * functions which are possibly called from interrupt context. As we don't
160 * want to keep interrupt disabled all the time when such maintenance is
161 * taking place, we therefore allow for some reentrancy by preserving and
162 * restoring the previous fixmap entry before the interrupted context is
163 * resumed. If the reentrancy depth is 0 then there is no need to restore
164 * the previous fixmap, and leaving the current one in place allow it to
165 * be reused the next time without a TLB flush (common with DMA).
166 */
167
168static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
169
170void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
171{
172 unsigned int idx, cpu;
173 int *depth;
174 unsigned long vaddr, flags;
175 pte_t pte, *ptep;
176
177 if (!in_interrupt())
178 preempt_disable();
179
180 cpu = smp_processor_id();
181 depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
182
183 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
184 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
185 ptep = TOP_PTE(vaddr);
186 pte = mk_pte(page, kmap_prot);
187
188 raw_local_irq_save(flags);
189 (*depth)++;
190 if (pte_val(*ptep) == pte_val(pte)) {
191 *saved_pte = pte;
192 } else {
193 *saved_pte = *ptep;
194 set_pte_ext(ptep, pte, 0);
195 local_flush_tlb_kernel_page(vaddr);
196 }
197 raw_local_irq_restore(flags);
198
199 return (void *)vaddr;
200}
201
202void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
203{
204 unsigned int idx, cpu = smp_processor_id();
205 int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
206 unsigned long vaddr, flags;
207 pte_t pte, *ptep;
208
209 idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
210 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
211 ptep = TOP_PTE(vaddr);
212 pte = mk_pte(page, kmap_prot);
213
214 BUG_ON(pte_val(*ptep) != pte_val(pte));
215 BUG_ON(*depth <= 0);
216
217 raw_local_irq_save(flags);
218 (*depth)--;
219 if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
220 set_pte_ext(ptep, saved_pte, 0);
221 local_flush_tlb_kernel_page(vaddr);
222 }
223 raw_local_irq_restore(flags);
224
225 if (!in_interrupt())
226 preempt_enable();
227}
228
229#endif /* CONFIG_CPU_CACHE_VIPT */
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 14a00a1ef52f..b3b0f0f5053d 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -297,6 +297,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
297 memblock_reserve(__pa(_stext), _end - _stext); 297 memblock_reserve(__pa(_stext), _end - _stext);
298#endif 298#endif
299#ifdef CONFIG_BLK_DEV_INITRD 299#ifdef CONFIG_BLK_DEV_INITRD
300 if (phys_initrd_size &&
301 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
302 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
303 phys_initrd_start, phys_initrd_size);
304 phys_initrd_start = phys_initrd_size = 0;
305 }
300 if (phys_initrd_size) { 306 if (phys_initrd_size) {
301 memblock_reserve(phys_initrd_start, phys_initrd_size); 307 memblock_reserve(phys_initrd_start, phys_initrd_size);
302 308
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 17e7b0b57e49..ab506272b2d3 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -204,12 +204,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
204 /* 204 /*
205 * Don't allow RAM to be mapped - this causes problems with ARMv6+ 205 * Don't allow RAM to be mapped - this causes problems with ARMv6+
206 */ 206 */
207 if (pfn_valid(pfn)) { 207 if (WARN_ON(pfn_valid(pfn)))
208 printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n" 208 return NULL;
209 KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
210 KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n");
211 WARN_ON(1);
212 }
213 209
214 type = get_mem_type(mtype); 210 type = get_mem_type(mtype);
215 if (!type) 211 if (!type)
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index b0a98305055c..afe209e1e1f8 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -31,7 +31,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
31 struct mm_struct *mm = current->mm; 31 struct mm_struct *mm = current->mm;
32 struct vm_area_struct *vma; 32 struct vm_area_struct *vma;
33 unsigned long start_addr; 33 unsigned long start_addr;
34#ifdef CONFIG_CPU_V6 34#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K)
35 unsigned int cache_type; 35 unsigned int cache_type;
36 int do_align = 0, aliasing = 0; 36 int do_align = 0, aliasing = 0;
37 37
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 82ef6966ae09..6cf76b3b68d1 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -24,6 +24,7 @@
24#include <asm/smp_plat.h> 24#include <asm/smp_plat.h>
25#include <asm/tlb.h> 25#include <asm/tlb.h>
26#include <asm/highmem.h> 26#include <asm/highmem.h>
27#include <asm/traps.h>
27 28
28#include <asm/mach/arch.h> 29#include <asm/mach/arch.h>
29#include <asm/mach/map.h> 30#include <asm/mach/map.h>
@@ -843,16 +844,6 @@ static void __init sanity_check_meminfo(void)
843 * rather difficult. 844 * rather difficult.
844 */ 845 */
845 reason = "with VIPT aliasing cache"; 846 reason = "with VIPT aliasing cache";
846 } else if (is_smp() && tlb_ops_need_broadcast()) {
847 /*
848 * kmap_high needs to occasionally flush TLB entries,
849 * however, if the TLB entries need to be broadcast
850 * we may deadlock:
851 * kmap_high(irqs off)->flush_all_zero_pkmaps->
852 * flush_tlb_kernel_range->smp_call_function_many
853 * (must not be called with irqs off)
854 */
855 reason = "without hardware TLB ops broadcasting";
856 } 847 }
857 if (reason) { 848 if (reason) {
858 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", 849 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
@@ -930,12 +921,11 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
930{ 921{
931 struct map_desc map; 922 struct map_desc map;
932 unsigned long addr; 923 unsigned long addr;
933 void *vectors;
934 924
935 /* 925 /*
936 * Allocate the vector page early. 926 * Allocate the vector page early.
937 */ 927 */
938 vectors = early_alloc(PAGE_SIZE); 928 vectors_page = early_alloc(PAGE_SIZE);
939 929
940 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 930 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
941 pmd_clear(pmd_off_k(addr)); 931 pmd_clear(pmd_off_k(addr));
@@ -975,7 +965,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
975 * location (0xffff0000). If we aren't using high-vectors, also 965 * location (0xffff0000). If we aren't using high-vectors, also
976 * create a mapping at the low-vectors virtual address. 966 * create a mapping at the low-vectors virtual address.
977 */ 967 */
978 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); 968 map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
979 map.virtual = 0xffff0000; 969 map.virtual = 0xffff0000;
980 map.length = PAGE_SIZE; 970 map.length = PAGE_SIZE;
981 map.type = MT_HIGH_VECTORS; 971 map.type = MT_HIGH_VECTORS;
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
index f7fafb1741f4..b2027c154b2a 100644
--- a/arch/arm/mm/pgd.c
+++ b/arch/arm/mm/pgd.c
@@ -55,7 +55,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
55 if (!new_pmd) 55 if (!new_pmd)
56 goto no_pmd; 56 goto no_pmd;
57 57
58 new_pte = pte_alloc_map(mm, new_pmd, 0); 58 new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
59 if (!new_pte) 59 if (!new_pte)
60 goto no_pte; 60 goto no_pte;
61 61
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index bcf748d9f4e2..226e3d8351c2 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -493,6 +493,9 @@ arm1020_processor_functions:
493 .word cpu_arm1020_dcache_clean_area 493 .word cpu_arm1020_dcache_clean_area
494 .word cpu_arm1020_switch_mm 494 .word cpu_arm1020_switch_mm
495 .word cpu_arm1020_set_pte_ext 495 .word cpu_arm1020_set_pte_ext
496 .word 0
497 .word 0
498 .word 0
496 .size arm1020_processor_functions, . - arm1020_processor_functions 499 .size arm1020_processor_functions, . - arm1020_processor_functions
497 500
498 .section ".rodata" 501 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index ab7ec26657ea..86d9c2cf0bce 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -474,6 +474,9 @@ arm1020e_processor_functions:
474 .word cpu_arm1020e_dcache_clean_area 474 .word cpu_arm1020e_dcache_clean_area
475 .word cpu_arm1020e_switch_mm 475 .word cpu_arm1020e_switch_mm
476 .word cpu_arm1020e_set_pte_ext 476 .word cpu_arm1020e_set_pte_ext
477 .word 0
478 .word 0
479 .word 0
477 .size arm1020e_processor_functions, . - arm1020e_processor_functions 480 .size arm1020e_processor_functions, . - arm1020e_processor_functions
478 481
479 .section ".rodata" 482 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S
index 831c5e54e22f..83d3dd34f846 100644
--- a/arch/arm/mm/proc-arm1022.S
+++ b/arch/arm/mm/proc-arm1022.S
@@ -457,6 +457,9 @@ arm1022_processor_functions:
457 .word cpu_arm1022_dcache_clean_area 457 .word cpu_arm1022_dcache_clean_area
458 .word cpu_arm1022_switch_mm 458 .word cpu_arm1022_switch_mm
459 .word cpu_arm1022_set_pte_ext 459 .word cpu_arm1022_set_pte_ext
460 .word 0
461 .word 0
462 .word 0
460 .size arm1022_processor_functions, . - arm1022_processor_functions 463 .size arm1022_processor_functions, . - arm1022_processor_functions
461 464
462 .section ".rodata" 465 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S
index e3f7e9a166bf..686043ee7281 100644
--- a/arch/arm/mm/proc-arm1026.S
+++ b/arch/arm/mm/proc-arm1026.S
@@ -452,6 +452,9 @@ arm1026_processor_functions:
452 .word cpu_arm1026_dcache_clean_area 452 .word cpu_arm1026_dcache_clean_area
453 .word cpu_arm1026_switch_mm 453 .word cpu_arm1026_switch_mm
454 .word cpu_arm1026_set_pte_ext 454 .word cpu_arm1026_set_pte_ext
455 .word 0
456 .word 0
457 .word 0
455 .size arm1026_processor_functions, . - arm1026_processor_functions 458 .size arm1026_processor_functions, . - arm1026_processor_functions
456 459
457 .section .rodata 460 .section .rodata
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 6a7be1863edd..5f79dc4ce3fb 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -284,6 +284,9 @@ ENTRY(arm6_processor_functions)
284 .word cpu_arm6_dcache_clean_area 284 .word cpu_arm6_dcache_clean_area
285 .word cpu_arm6_switch_mm 285 .word cpu_arm6_switch_mm
286 .word cpu_arm6_set_pte_ext 286 .word cpu_arm6_set_pte_ext
287 .word 0
288 .word 0
289 .word 0
287 .size arm6_processor_functions, . - arm6_processor_functions 290 .size arm6_processor_functions, . - arm6_processor_functions
288 291
289/* 292/*
@@ -301,6 +304,9 @@ ENTRY(arm7_processor_functions)
301 .word cpu_arm7_dcache_clean_area 304 .word cpu_arm7_dcache_clean_area
302 .word cpu_arm7_switch_mm 305 .word cpu_arm7_switch_mm
303 .word cpu_arm7_set_pte_ext 306 .word cpu_arm7_set_pte_ext
307 .word 0
308 .word 0
309 .word 0
304 .size arm7_processor_functions, . - arm7_processor_functions 310 .size arm7_processor_functions, . - arm7_processor_functions
305 311
306 .section ".rodata" 312 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S
index c285395f44b2..665266da143c 100644
--- a/arch/arm/mm/proc-arm720.S
+++ b/arch/arm/mm/proc-arm720.S
@@ -185,6 +185,9 @@ ENTRY(arm720_processor_functions)
185 .word cpu_arm720_dcache_clean_area 185 .word cpu_arm720_dcache_clean_area
186 .word cpu_arm720_switch_mm 186 .word cpu_arm720_switch_mm
187 .word cpu_arm720_set_pte_ext 187 .word cpu_arm720_set_pte_ext
188 .word 0
189 .word 0
190 .word 0
188 .size arm720_processor_functions, . - arm720_processor_functions 191 .size arm720_processor_functions, . - arm720_processor_functions
189 192
190 .section ".rodata" 193 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
index 38b27dcba727..6f9d12effee1 100644
--- a/arch/arm/mm/proc-arm740.S
+++ b/arch/arm/mm/proc-arm740.S
@@ -130,6 +130,9 @@ ENTRY(arm740_processor_functions)
130 .word cpu_arm740_dcache_clean_area 130 .word cpu_arm740_dcache_clean_area
131 .word cpu_arm740_switch_mm 131 .word cpu_arm740_switch_mm
132 .word 0 @ cpu_*_set_pte 132 .word 0 @ cpu_*_set_pte
133 .word 0
134 .word 0
135 .word 0
133 .size arm740_processor_functions, . - arm740_processor_functions 136 .size arm740_processor_functions, . - arm740_processor_functions
134 137
135 .section ".rodata" 138 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index 0c9786de20af..e4c165ca6696 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -70,6 +70,9 @@ ENTRY(arm7tdmi_processor_functions)
70 .word cpu_arm7tdmi_dcache_clean_area 70 .word cpu_arm7tdmi_dcache_clean_area
71 .word cpu_arm7tdmi_switch_mm 71 .word cpu_arm7tdmi_switch_mm
72 .word 0 @ cpu_*_set_pte 72 .word 0 @ cpu_*_set_pte
73 .word 0
74 .word 0
75 .word 0
73 .size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions 76 .size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions
74 77
75 .section ".rodata" 78 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S
index 6109f278a904..219980ec8b6e 100644
--- a/arch/arm/mm/proc-arm920.S
+++ b/arch/arm/mm/proc-arm920.S
@@ -387,6 +387,40 @@ ENTRY(cpu_arm920_set_pte_ext)
387#endif 387#endif
388 mov pc, lr 388 mov pc, lr
389 389
390/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
391.globl cpu_arm920_suspend_size
392.equ cpu_arm920_suspend_size, 4 * 3
393#ifdef CONFIG_PM
394ENTRY(cpu_arm920_do_suspend)
395 stmfd sp!, {r4 - r7, lr}
396 mrc p15, 0, r4, c13, c0, 0 @ PID
397 mrc p15, 0, r5, c3, c0, 0 @ Domain ID
398 mrc p15, 0, r6, c2, c0, 0 @ TTB address
399 mrc p15, 0, r7, c1, c0, 0 @ Control register
400 stmia r0, {r4 - r7}
401 ldmfd sp!, {r4 - r7, pc}
402ENDPROC(cpu_arm920_do_suspend)
403
404ENTRY(cpu_arm920_do_resume)
405 mov ip, #0
406 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
407 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
408 ldmia r0, {r4 - r7}
409 mcr p15, 0, r4, c13, c0, 0 @ PID
410 mcr p15, 0, r5, c3, c0, 0 @ Domain ID
411 mcr p15, 0, r6, c2, c0, 0 @ TTB address
412 mov r0, r7 @ control register
413 mov r2, r6, lsr #14 @ get TTB0 base
414 mov r2, r2, lsl #14
415 ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \
416 PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE
417 b cpu_resume_mmu
418ENDPROC(cpu_arm920_do_resume)
419#else
420#define cpu_arm920_do_suspend 0
421#define cpu_arm920_do_resume 0
422#endif
423
390 __CPUINIT 424 __CPUINIT
391 425
392 .type __arm920_setup, #function 426 .type __arm920_setup, #function
@@ -432,6 +466,9 @@ arm920_processor_functions:
432 .word cpu_arm920_dcache_clean_area 466 .word cpu_arm920_dcache_clean_area
433 .word cpu_arm920_switch_mm 467 .word cpu_arm920_switch_mm
434 .word cpu_arm920_set_pte_ext 468 .word cpu_arm920_set_pte_ext
469 .word cpu_arm920_suspend_size
470 .word cpu_arm920_do_suspend
471 .word cpu_arm920_do_resume
435 .size arm920_processor_functions, . - arm920_processor_functions 472 .size arm920_processor_functions, . - arm920_processor_functions
436 473
437 .section ".rodata" 474 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S
index bb2f0f46a5e6..36154b1e792a 100644
--- a/arch/arm/mm/proc-arm922.S
+++ b/arch/arm/mm/proc-arm922.S
@@ -436,6 +436,9 @@ arm922_processor_functions:
436 .word cpu_arm922_dcache_clean_area 436 .word cpu_arm922_dcache_clean_area
437 .word cpu_arm922_switch_mm 437 .word cpu_arm922_switch_mm
438 .word cpu_arm922_set_pte_ext 438 .word cpu_arm922_set_pte_ext
439 .word 0
440 .word 0
441 .word 0
439 .size arm922_processor_functions, . - arm922_processor_functions 442 .size arm922_processor_functions, . - arm922_processor_functions
440 443
441 .section ".rodata" 444 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index c13e01accfe2..89c5e0009c4c 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -503,6 +503,9 @@ arm925_processor_functions:
503 .word cpu_arm925_dcache_clean_area 503 .word cpu_arm925_dcache_clean_area
504 .word cpu_arm925_switch_mm 504 .word cpu_arm925_switch_mm
505 .word cpu_arm925_set_pte_ext 505 .word cpu_arm925_set_pte_ext
506 .word 0
507 .word 0
508 .word 0
506 .size arm925_processor_functions, . - arm925_processor_functions 509 .size arm925_processor_functions, . - arm925_processor_functions
507 510
508 .section ".rodata" 511 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 42eb4315740b..6a4bdb2c94a7 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -401,6 +401,40 @@ ENTRY(cpu_arm926_set_pte_ext)
401#endif 401#endif
402 mov pc, lr 402 mov pc, lr
403 403
404/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
405.globl cpu_arm926_suspend_size
406.equ cpu_arm926_suspend_size, 4 * 3
407#ifdef CONFIG_PM
408ENTRY(cpu_arm926_do_suspend)
409 stmfd sp!, {r4 - r7, lr}
410 mrc p15, 0, r4, c13, c0, 0 @ PID
411 mrc p15, 0, r5, c3, c0, 0 @ Domain ID
412 mrc p15, 0, r6, c2, c0, 0 @ TTB address
413 mrc p15, 0, r7, c1, c0, 0 @ Control register
414 stmia r0, {r4 - r7}
415 ldmfd sp!, {r4 - r7, pc}
416ENDPROC(cpu_arm926_do_suspend)
417
418ENTRY(cpu_arm926_do_resume)
419 mov ip, #0
420 mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs
421 mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches
422 ldmia r0, {r4 - r7}
423 mcr p15, 0, r4, c13, c0, 0 @ PID
424 mcr p15, 0, r5, c3, c0, 0 @ Domain ID
425 mcr p15, 0, r6, c2, c0, 0 @ TTB address
426 mov r0, r7 @ control register
427 mov r2, r6, lsr #14 @ get TTB0 base
428 mov r2, r2, lsl #14
429 ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \
430 PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE
431 b cpu_resume_mmu
432ENDPROC(cpu_arm926_do_resume)
433#else
434#define cpu_arm926_do_suspend 0
435#define cpu_arm926_do_resume 0
436#endif
437
404 __CPUINIT 438 __CPUINIT
405 439
406 .type __arm926_setup, #function 440 .type __arm926_setup, #function
@@ -456,6 +490,9 @@ arm926_processor_functions:
456 .word cpu_arm926_dcache_clean_area 490 .word cpu_arm926_dcache_clean_area
457 .word cpu_arm926_switch_mm 491 .word cpu_arm926_switch_mm
458 .word cpu_arm926_set_pte_ext 492 .word cpu_arm926_set_pte_ext
493 .word cpu_arm926_suspend_size
494 .word cpu_arm926_do_suspend
495 .word cpu_arm926_do_resume
459 .size arm926_processor_functions, . - arm926_processor_functions 496 .size arm926_processor_functions, . - arm926_processor_functions
460 497
461 .section ".rodata" 498 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
index 7b11cdb9935f..26aea3f71c26 100644
--- a/arch/arm/mm/proc-arm940.S
+++ b/arch/arm/mm/proc-arm940.S
@@ -363,6 +363,9 @@ ENTRY(arm940_processor_functions)
363 .word cpu_arm940_dcache_clean_area 363 .word cpu_arm940_dcache_clean_area
364 .word cpu_arm940_switch_mm 364 .word cpu_arm940_switch_mm
365 .word 0 @ cpu_*_set_pte 365 .word 0 @ cpu_*_set_pte
366 .word 0
367 .word 0
368 .word 0
366 .size arm940_processor_functions, . - arm940_processor_functions 369 .size arm940_processor_functions, . - arm940_processor_functions
367 370
368 .section ".rodata" 371 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
index 1a5bbf080342..8063345406fe 100644
--- a/arch/arm/mm/proc-arm946.S
+++ b/arch/arm/mm/proc-arm946.S
@@ -419,6 +419,9 @@ ENTRY(arm946_processor_functions)
419 .word cpu_arm946_dcache_clean_area 419 .word cpu_arm946_dcache_clean_area
420 .word cpu_arm946_switch_mm 420 .word cpu_arm946_switch_mm
421 .word 0 @ cpu_*_set_pte 421 .word 0 @ cpu_*_set_pte
422 .word 0
423 .word 0
424 .word 0
422 .size arm946_processor_functions, . - arm946_processor_functions 425 .size arm946_processor_functions, . - arm946_processor_functions
423 426
424 .section ".rodata" 427 .section ".rodata"
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index db67e3134d7a..7b7ebd4d096d 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -70,6 +70,9 @@ ENTRY(arm9tdmi_processor_functions)
70 .word cpu_arm9tdmi_dcache_clean_area 70 .word cpu_arm9tdmi_dcache_clean_area
71 .word cpu_arm9tdmi_switch_mm 71 .word cpu_arm9tdmi_switch_mm
72 .word 0 @ cpu_*_set_pte 72 .word 0 @ cpu_*_set_pte
73 .word 0
74 .word 0
75 .word 0
73 .size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions 76 .size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions
74 77
75 .section ".rodata" 78 .section ".rodata"
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index 7c9ad621f0e6..fc2a4ae15cf4 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -195,6 +195,9 @@ fa526_processor_functions:
195 .word cpu_fa526_dcache_clean_area 195 .word cpu_fa526_dcache_clean_area
196 .word cpu_fa526_switch_mm 196 .word cpu_fa526_switch_mm
197 .word cpu_fa526_set_pte_ext 197 .word cpu_fa526_set_pte_ext
198 .word 0
199 .word 0
200 .word 0
198 .size fa526_processor_functions, . - fa526_processor_functions 201 .size fa526_processor_functions, . - fa526_processor_functions
199 202
200 .section ".rodata" 203 .section ".rodata"
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index b4597edbff97..d3883eed7a4a 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -554,6 +554,9 @@ feroceon_processor_functions:
554 .word cpu_feroceon_dcache_clean_area 554 .word cpu_feroceon_dcache_clean_area
555 .word cpu_feroceon_switch_mm 555 .word cpu_feroceon_switch_mm
556 .word cpu_feroceon_set_pte_ext 556 .word cpu_feroceon_set_pte_ext
557 .word 0
558 .word 0
559 .word 0
557 .size feroceon_processor_functions, . - feroceon_processor_functions 560 .size feroceon_processor_functions, . - feroceon_processor_functions
558 561
559 .section ".rodata" 562 .section ".rodata"
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index f5ca6aaecdbd..e32fa499194c 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -61,17 +61,27 @@
61 .endm 61 .endm
62 62
63/* 63/*
64 * cache_line_size - get the cache line size from the CSIDR register 64 * dcache_line_size - get the minimum D-cache line size from the CTR register
65 * (available on ARMv7+). It assumes that the CSSR register was configured 65 * on ARMv7.
66 * to access the L1 data cache CSIDR.
67 */ 66 */
68 .macro dcache_line_size, reg, tmp 67 .macro dcache_line_size, reg, tmp
69 mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR 68 mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
70 and \tmp, \tmp, #7 @ cache line size encoding 69 lsr \tmp, \tmp, #16
71 mov \reg, #16 @ size offset 70 and \tmp, \tmp, #0xf @ cache line size encoding
71 mov \reg, #4 @ bytes per word
72 mov \reg, \reg, lsl \tmp @ actual cache line size 72 mov \reg, \reg, lsl \tmp @ actual cache line size
73 .endm 73 .endm
74 74
75/*
76 * icache_line_size - get the minimum I-cache line size from the CTR register
77 * on ARMv7.
78 */
79 .macro icache_line_size, reg, tmp
80 mrc p15, 0, \tmp, c0, c0, 1 @ read ctr
81 and \tmp, \tmp, #0xf @ cache line size encoding
82 mov \reg, #4 @ bytes per word
83 mov \reg, \reg, lsl \tmp @ actual cache line size
84 .endm
75 85
76/* 86/*
77 * Sanity check the PTE configuration for the code below - which makes 87 * Sanity check the PTE configuration for the code below - which makes
@@ -99,6 +109,10 @@
99 * 110x 0 1 0 r/w r/o 109 * 110x 0 1 0 r/w r/o
100 * 11x0 0 1 0 r/w r/o 110 * 11x0 0 1 0 r/w r/o
101 * 1111 0 1 1 r/w r/w 111 * 1111 0 1 1 r/w r/w
112 *
113 * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed:
114 * 110x 1 1 1 r/o r/o
115 * 11x0 1 1 1 r/o r/o
102 */ 116 */
103 .macro armv6_mt_table pfx 117 .macro armv6_mt_table pfx
104\pfx\()_mt_table: 118\pfx\()_mt_table:
@@ -138,8 +152,11 @@
138 152
139 tst r1, #L_PTE_USER 153 tst r1, #L_PTE_USER
140 orrne r3, r3, #PTE_EXT_AP1 154 orrne r3, r3, #PTE_EXT_AP1
155#ifdef CONFIG_CPU_USE_DOMAINS
156 @ allow kernel read/write access to read-only user pages
141 tstne r3, #PTE_EXT_APX 157 tstne r3, #PTE_EXT_APX
142 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 158 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
159#endif
143 160
144 tst r1, #L_PTE_XN 161 tst r1, #L_PTE_XN
145 orrne r3, r3, #PTE_EXT_XN 162 orrne r3, r3, #PTE_EXT_XN
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
index 4458ee6aa713..9d4f2ae63370 100644
--- a/arch/arm/mm/proc-mohawk.S
+++ b/arch/arm/mm/proc-mohawk.S
@@ -388,6 +388,9 @@ mohawk_processor_functions:
388 .word cpu_mohawk_dcache_clean_area 388 .word cpu_mohawk_dcache_clean_area
389 .word cpu_mohawk_switch_mm 389 .word cpu_mohawk_switch_mm
390 .word cpu_mohawk_set_pte_ext 390 .word cpu_mohawk_set_pte_ext
391 .word 0
392 .word 0
393 .word 0
391 .size mohawk_processor_functions, . - mohawk_processor_functions 394 .size mohawk_processor_functions, . - mohawk_processor_functions
392 395
393 .section ".rodata" 396 .section ".rodata"
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S
index 5aa8d59c2e85..46f09ed16b98 100644
--- a/arch/arm/mm/proc-sa110.S
+++ b/arch/arm/mm/proc-sa110.S
@@ -203,6 +203,9 @@ ENTRY(sa110_processor_functions)
203 .word cpu_sa110_dcache_clean_area 203 .word cpu_sa110_dcache_clean_area
204 .word cpu_sa110_switch_mm 204 .word cpu_sa110_switch_mm
205 .word cpu_sa110_set_pte_ext 205 .word cpu_sa110_set_pte_ext
206 .word 0
207 .word 0
208 .word 0
206 .size sa110_processor_functions, . - sa110_processor_functions 209 .size sa110_processor_functions, . - sa110_processor_functions
207 210
208 .section ".rodata" 211 .section ".rodata"
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 2ac4e6f10713..74483d1977fe 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -169,6 +169,42 @@ ENTRY(cpu_sa1100_set_pte_ext)
169#endif 169#endif
170 mov pc, lr 170 mov pc, lr
171 171
172.globl cpu_sa1100_suspend_size
173.equ cpu_sa1100_suspend_size, 4*4
174#ifdef CONFIG_PM
175ENTRY(cpu_sa1100_do_suspend)
176 stmfd sp!, {r4 - r7, lr}
177 mrc p15, 0, r4, c3, c0, 0 @ domain ID
178 mrc p15, 0, r5, c2, c0, 0 @ translation table base addr
179 mrc p15, 0, r6, c13, c0, 0 @ PID
180 mrc p15, 0, r7, c1, c0, 0 @ control reg
181 stmia r0, {r4 - r7} @ store cp regs
182 ldmfd sp!, {r4 - r7, pc}
183ENDPROC(cpu_sa1100_do_suspend)
184
185ENTRY(cpu_sa1100_do_resume)
186 ldmia r0, {r4 - r7} @ load cp regs
187 mov r1, #0
188 mcr p15, 0, r1, c8, c7, 0 @ flush I+D TLBs
189 mcr p15, 0, r1, c7, c7, 0 @ flush I&D cache
190 mcr p15, 0, r1, c9, c0, 0 @ invalidate RB
191 mcr p15, 0, r1, c9, c0, 5 @ allow user space to use RB
192
193 mcr p15, 0, r4, c3, c0, 0 @ domain ID
194 mcr p15, 0, r5, c2, c0, 0 @ translation table base addr
195 mcr p15, 0, r6, c13, c0, 0 @ PID
196 mov r0, r7 @ control register
197 mov r2, r5, lsr #14 @ get TTB0 base
198 mov r2, r2, lsl #14
199 ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \
200 PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE
201 b cpu_resume_mmu
202ENDPROC(cpu_sa1100_do_resume)
203#else
204#define cpu_sa1100_do_suspend 0
205#define cpu_sa1100_do_resume 0
206#endif
207
172 __CPUINIT 208 __CPUINIT
173 209
174 .type __sa1100_setup, #function 210 .type __sa1100_setup, #function
@@ -218,6 +254,9 @@ ENTRY(sa1100_processor_functions)
218 .word cpu_sa1100_dcache_clean_area 254 .word cpu_sa1100_dcache_clean_area
219 .word cpu_sa1100_switch_mm 255 .word cpu_sa1100_switch_mm
220 .word cpu_sa1100_set_pte_ext 256 .word cpu_sa1100_set_pte_ext
257 .word cpu_sa1100_suspend_size
258 .word cpu_sa1100_do_suspend
259 .word cpu_sa1100_do_resume
221 .size sa1100_processor_functions, . - sa1100_processor_functions 260 .size sa1100_processor_functions, . - sa1100_processor_functions
222 261
223 .section ".rodata" 262 .section ".rodata"
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 59a7e1ffe7bc..832b6bdc192c 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -121,6 +121,53 @@ ENTRY(cpu_v6_set_pte_ext)
121#endif 121#endif
122 mov pc, lr 122 mov pc, lr
123 123
124/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
125.globl cpu_v6_suspend_size
126.equ cpu_v6_suspend_size, 4 * 8
127#ifdef CONFIG_PM
128ENTRY(cpu_v6_do_suspend)
129 stmfd sp!, {r4 - r11, lr}
130 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
131 mrc p15, 0, r5, c13, c0, 1 @ Context ID
132 mrc p15, 0, r6, c3, c0, 0 @ Domain ID
133 mrc p15, 0, r7, c2, c0, 0 @ Translation table base 0
134 mrc p15, 0, r8, c2, c0, 1 @ Translation table base 1
135 mrc p15, 0, r9, c1, c0, 1 @ auxillary control register
136 mrc p15, 0, r10, c1, c0, 2 @ co-processor access control
137 mrc p15, 0, r11, c1, c0, 0 @ control register
138 stmia r0, {r4 - r11}
139 ldmfd sp!, {r4- r11, pc}
140ENDPROC(cpu_v6_do_suspend)
141
142ENTRY(cpu_v6_do_resume)
143 mov ip, #0
144 mcr p15, 0, ip, c7, c14, 0 @ clean+invalidate D cache
145 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
146 mcr p15, 0, ip, c7, c15, 0 @ clean+invalidate cache
147 mcr p15, 0, ip, c7, c10, 4 @ drain write buffer
148 ldmia r0, {r4 - r11}
149 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
150 mcr p15, 0, r5, c13, c0, 1 @ Context ID
151 mcr p15, 0, r6, c3, c0, 0 @ Domain ID
152 mcr p15, 0, r7, c2, c0, 0 @ Translation table base 0
153 mcr p15, 0, r8, c2, c0, 1 @ Translation table base 1
154 mcr p15, 0, r9, c1, c0, 1 @ auxillary control register
155 mcr p15, 0, r10, c1, c0, 2 @ co-processor access control
156 mcr p15, 0, ip, c2, c0, 2 @ TTB control register
157 mcr p15, 0, ip, c7, c5, 4 @ ISB
158 mov r0, r11 @ control register
159 mov r2, r7, lsr #14 @ get TTB0 base
160 mov r2, r2, lsl #14
161 ldr r3, cpu_resume_l1_flags
162 b cpu_resume_mmu
163ENDPROC(cpu_v6_do_resume)
164cpu_resume_l1_flags:
165 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP)
166 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP)
167#else
168#define cpu_v6_do_suspend 0
169#define cpu_v6_do_resume 0
170#endif
124 171
125 172
126 .type cpu_v6_name, #object 173 .type cpu_v6_name, #object
@@ -206,6 +253,9 @@ ENTRY(v6_processor_functions)
206 .word cpu_v6_dcache_clean_area 253 .word cpu_v6_dcache_clean_area
207 .word cpu_v6_switch_mm 254 .word cpu_v6_switch_mm
208 .word cpu_v6_set_pte_ext 255 .word cpu_v6_set_pte_ext
256 .word cpu_v6_suspend_size
257 .word cpu_v6_do_suspend
258 .word cpu_v6_do_resume
209 .size v6_processor_functions, . - v6_processor_functions 259 .size v6_processor_functions, . - v6_processor_functions
210 260
211 .section ".rodata" 261 .section ".rodata"
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 210d051c54d7..262fa88a7439 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -108,10 +108,16 @@ ENTRY(cpu_v7_switch_mm)
108#ifdef CONFIG_ARM_ERRATA_430973 108#ifdef CONFIG_ARM_ERRATA_430973
109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
110#endif 110#endif
111#ifdef CONFIG_ARM_ERRATA_754322
112 dsb
113#endif
111 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID 114 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
112 isb 115 isb
1131: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 1161: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
114 isb 117 isb
118#ifdef CONFIG_ARM_ERRATA_754322
119 dsb
120#endif
115 mcr p15, 0, r1, c13, c0, 1 @ set context ID 121 mcr p15, 0, r1, c13, c0, 1 @ set context ID
116 isb 122 isb
117#endif 123#endif
@@ -146,8 +152,11 @@ ENTRY(cpu_v7_set_pte_ext)
146 152
147 tst r1, #L_PTE_USER 153 tst r1, #L_PTE_USER
148 orrne r3, r3, #PTE_EXT_AP1 154 orrne r3, r3, #PTE_EXT_AP1
155#ifdef CONFIG_CPU_USE_DOMAINS
156 @ allow kernel read/write access to read-only user pages
149 tstne r3, #PTE_EXT_APX 157 tstne r3, #PTE_EXT_APX
150 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 158 bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
159#endif
151 160
152 tst r1, #L_PTE_XN 161 tst r1, #L_PTE_XN
153 orrne r3, r3, #PTE_EXT_XN 162 orrne r3, r3, #PTE_EXT_XN
@@ -156,7 +165,9 @@ ENTRY(cpu_v7_set_pte_ext)
156 tstne r1, #L_PTE_PRESENT 165 tstne r1, #L_PTE_PRESENT
157 moveq r3, #0 166 moveq r3, #0
158 167
159 str r3, [r0, #2048]! 168 ARM( str r3, [r0, #2048]! )
169 THUMB( add r0, r0, #2048 )
170 THUMB( str r3, [r0] )
160 mcr p15, 0, r0, c7, c10, 1 @ flush_pte 171 mcr p15, 0, r0, c7, c10, 1 @ flush_pte
161#endif 172#endif
162 mov pc, lr 173 mov pc, lr
@@ -166,6 +177,87 @@ cpu_v7_name:
166 .ascii "ARMv7 Processor" 177 .ascii "ARMv7 Processor"
167 .align 178 .align
168 179
180 /*
181 * Memory region attributes with SCTLR.TRE=1
182 *
183 * n = TEX[0],C,B
184 * TR = PRRR[2n+1:2n] - memory type
185 * IR = NMRR[2n+1:2n] - inner cacheable property
186 * OR = NMRR[2n+17:2n+16] - outer cacheable property
187 *
188 * n TR IR OR
189 * UNCACHED 000 00
190 * BUFFERABLE 001 10 00 00
191 * WRITETHROUGH 010 10 10 10
192 * WRITEBACK 011 10 11 11
193 * reserved 110
194 * WRITEALLOC 111 10 01 01
195 * DEV_SHARED 100 01
196 * DEV_NONSHARED 100 01
197 * DEV_WC 001 10
198 * DEV_CACHED 011 10
199 *
200 * Other attributes:
201 *
202 * DS0 = PRRR[16] = 0 - device shareable property
203 * DS1 = PRRR[17] = 1 - device shareable property
204 * NS0 = PRRR[18] = 0 - normal shareable property
205 * NS1 = PRRR[19] = 1 - normal shareable property
206 * NOS = PRRR[24+n] = 1 - not outer shareable
207 */
208.equ PRRR, 0xff0a81a8
209.equ NMRR, 0x40e040e0
210
211/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
212.globl cpu_v7_suspend_size
213.equ cpu_v7_suspend_size, 4 * 8
214#ifdef CONFIG_PM
215ENTRY(cpu_v7_do_suspend)
216 stmfd sp!, {r4 - r11, lr}
217 mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
218 mrc p15, 0, r5, c13, c0, 1 @ Context ID
219 mrc p15, 0, r6, c3, c0, 0 @ Domain ID
220 mrc p15, 0, r7, c2, c0, 0 @ TTB 0
221 mrc p15, 0, r8, c2, c0, 1 @ TTB 1
222 mrc p15, 0, r9, c1, c0, 0 @ Control register
223 mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register
224 mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control
225 stmia r0, {r4 - r11}
226 ldmfd sp!, {r4 - r11, pc}
227ENDPROC(cpu_v7_do_suspend)
228
229ENTRY(cpu_v7_do_resume)
230 mov ip, #0
231 mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs
232 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
233 ldmia r0, {r4 - r11}
234 mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
235 mcr p15, 0, r5, c13, c0, 1 @ Context ID
236 mcr p15, 0, r6, c3, c0, 0 @ Domain ID
237 mcr p15, 0, r7, c2, c0, 0 @ TTB 0
238 mcr p15, 0, r8, c2, c0, 1 @ TTB 1
239 mcr p15, 0, ip, c2, c0, 2 @ TTB control register
240 mcr p15, 0, r10, c1, c0, 1 @ Auxillary control register
241 mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control
242 ldr r4, =PRRR @ PRRR
243 ldr r5, =NMRR @ NMRR
244 mcr p15, 0, r4, c10, c2, 0 @ write PRRR
245 mcr p15, 0, r5, c10, c2, 1 @ write NMRR
246 isb
247 mov r0, r9 @ control register
248 mov r2, r7, lsr #14 @ get TTB0 base
249 mov r2, r2, lsl #14
250 ldr r3, cpu_resume_l1_flags
251 b cpu_resume_mmu
252ENDPROC(cpu_v7_do_resume)
253cpu_resume_l1_flags:
254 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP)
255 ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP)
256#else
257#define cpu_v7_do_suspend 0
258#define cpu_v7_do_resume 0
259#endif
260
169 __CPUINIT 261 __CPUINIT
170 262
171/* 263/*
@@ -259,6 +351,12 @@ __v7_setup:
259 orreq r10, r10, #1 << 6 @ set bit #6 351 orreq r10, r10, #1 << 6 @ set bit #6
260 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register 352 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
261#endif 353#endif
354#ifdef CONFIG_ARM_ERRATA_751472
355 cmp r6, #0x30 @ present prior to r3p0
356 mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
357 orrlt r10, r10, #1 << 11 @ set bit #11
358 mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
359#endif
262 360
2633: mov r10, #0 3613: mov r10, #0
264#ifdef HARVARD_CACHE 362#ifdef HARVARD_CACHE
@@ -271,38 +369,8 @@ __v7_setup:
271 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) 369 ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
272 ALT_UP(orr r4, r4, #TTB_FLAGS_UP) 370 ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
273 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 371 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
274 mov r10, #0x1f @ domains 0, 1 = manager 372 ldr r5, =PRRR @ PRRR
275 mcr p15, 0, r10, c3, c0, 0 @ load domain access register 373 ldr r6, =NMRR @ NMRR
276 /*
277 * Memory region attributes with SCTLR.TRE=1
278 *
279 * n = TEX[0],C,B
280 * TR = PRRR[2n+1:2n] - memory type
281 * IR = NMRR[2n+1:2n] - inner cacheable property
282 * OR = NMRR[2n+17:2n+16] - outer cacheable property
283 *
284 * n TR IR OR
285 * UNCACHED 000 00
286 * BUFFERABLE 001 10 00 00
287 * WRITETHROUGH 010 10 10 10
288 * WRITEBACK 011 10 11 11
289 * reserved 110
290 * WRITEALLOC 111 10 01 01
291 * DEV_SHARED 100 01
292 * DEV_NONSHARED 100 01
293 * DEV_WC 001 10
294 * DEV_CACHED 011 10
295 *
296 * Other attributes:
297 *
298 * DS0 = PRRR[16] = 0 - device shareable property
299 * DS1 = PRRR[17] = 1 - device shareable property
300 * NS0 = PRRR[18] = 0 - normal shareable property
301 * NS1 = PRRR[19] = 1 - normal shareable property
302 * NOS = PRRR[24+n] = 1 - not outer shareable
303 */
304 ldr r5, =0xff0a81a8 @ PRRR
305 ldr r6, =0x40e040e0 @ NMRR
306 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 374 mcr p15, 0, r5, c10, c2, 0 @ write PRRR
307 mcr p15, 0, r6, c10, c2, 1 @ write NMRR 375 mcr p15, 0, r6, c10, c2, 1 @ write NMRR
308#endif 376#endif
@@ -311,6 +379,10 @@ __v7_setup:
311#ifdef CONFIG_CPU_ENDIAN_BE8 379#ifdef CONFIG_CPU_ENDIAN_BE8
312 orr r6, r6, #1 << 25 @ big-endian page tables 380 orr r6, r6, #1 << 25 @ big-endian page tables
313#endif 381#endif
382#ifdef CONFIG_SWP_EMULATE
383 orr r5, r5, #(1 << 10) @ set SW bit in "clear"
384 bic r6, r6, #(1 << 10) @ clear it in "mmuset"
385#endif
314 mrc p15, 0, r0, c1, c0, 0 @ read control register 386 mrc p15, 0, r0, c1, c0, 0 @ read control register
315 bic r0, r0, r5 @ clear bits them 387 bic r0, r0, r5 @ clear bits them
316 orr r0, r0, r6 @ set them 388 orr r0, r0, r6 @ set them
@@ -344,6 +416,9 @@ ENTRY(v7_processor_functions)
344 .word cpu_v7_dcache_clean_area 416 .word cpu_v7_dcache_clean_area
345 .word cpu_v7_switch_mm 417 .word cpu_v7_switch_mm
346 .word cpu_v7_set_pte_ext 418 .word cpu_v7_set_pte_ext
419 .word 0
420 .word 0
421 .word 0
347 .size v7_processor_functions, . - v7_processor_functions 422 .size v7_processor_functions, . - v7_processor_functions
348 423
349 .section ".rodata" 424 .section ".rodata"
@@ -379,7 +454,7 @@ __v7_ca9mp_proc_info:
379 PMD_SECT_XN | \ 454 PMD_SECT_XN | \
380 PMD_SECT_AP_WRITE | \ 455 PMD_SECT_AP_WRITE | \
381 PMD_SECT_AP_READ 456 PMD_SECT_AP_READ
382 b __v7_ca9mp_setup 457 W(b) __v7_ca9mp_setup
383 .long cpu_arch_name 458 .long cpu_arch_name
384 .long cpu_elf_name 459 .long cpu_elf_name
385 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS 460 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
@@ -411,7 +486,7 @@ __v7_proc_info:
411 PMD_SECT_XN | \ 486 PMD_SECT_XN | \
412 PMD_SECT_AP_WRITE | \ 487 PMD_SECT_AP_WRITE | \
413 PMD_SECT_AP_READ 488 PMD_SECT_AP_READ
414 b __v7_setup 489 W(b) __v7_setup
415 .long cpu_arch_name 490 .long cpu_arch_name
416 .long cpu_elf_name 491 .long cpu_elf_name
417 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS 492 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S
index ec26355cb7c2..63d8b2044e84 100644
--- a/arch/arm/mm/proc-xsc3.S
+++ b/arch/arm/mm/proc-xsc3.S
@@ -413,9 +413,52 @@ ENTRY(cpu_xsc3_set_pte_ext)
413 mov pc, lr 413 mov pc, lr
414 414
415 .ltorg 415 .ltorg
416
417 .align 416 .align
418 417
418.globl cpu_xsc3_suspend_size
419.equ cpu_xsc3_suspend_size, 4 * 8
420#ifdef CONFIG_PM
421ENTRY(cpu_xsc3_do_suspend)
422 stmfd sp!, {r4 - r10, lr}
423 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
424 mrc p15, 0, r5, c15, c1, 0 @ CP access reg
425 mrc p15, 0, r6, c13, c0, 0 @ PID
426 mrc p15, 0, r7, c3, c0, 0 @ domain ID
427 mrc p15, 0, r8, c2, c0, 0 @ translation table base addr
428 mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg
429 mrc p15, 0, r10, c1, c0, 0 @ control reg
430 bic r4, r4, #2 @ clear frequency change bit
431 stmia r0, {r1, r4 - r10} @ store v:p offset + cp regs
432 ldmia sp!, {r4 - r10, pc}
433ENDPROC(cpu_xsc3_do_suspend)
434
435ENTRY(cpu_xsc3_do_resume)
436 ldmia r0, {r1, r4 - r10} @ load v:p offset + cp regs
437 mov ip, #0
438 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
439 mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer
440 mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer
441 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
442 mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode.
443 mcr p15, 0, r5, c15, c1, 0 @ CP access reg
444 mcr p15, 0, r6, c13, c0, 0 @ PID
445 mcr p15, 0, r7, c3, c0, 0 @ domain ID
446 mcr p15, 0, r8, c2, c0, 0 @ translation table base addr
447 mcr p15, 0, r9, c1, c0, 1 @ auxiliary control reg
448
449 @ temporarily map resume_turn_on_mmu into the page table,
450 @ otherwise prefetch abort occurs after MMU is turned on
451 mov r0, r10 @ control register
452 mov r2, r8, lsr #14 @ get TTB0 base
453 mov r2, r2, lsl #14
454 ldr r3, =0x542e @ section flags
455 b cpu_resume_mmu
456ENDPROC(cpu_xsc3_do_resume)
457#else
458#define cpu_xsc3_do_suspend 0
459#define cpu_xsc3_do_resume 0
460#endif
461
419 __CPUINIT 462 __CPUINIT
420 463
421 .type __xsc3_setup, #function 464 .type __xsc3_setup, #function
@@ -476,6 +519,9 @@ ENTRY(xsc3_processor_functions)
476 .word cpu_xsc3_dcache_clean_area 519 .word cpu_xsc3_dcache_clean_area
477 .word cpu_xsc3_switch_mm 520 .word cpu_xsc3_switch_mm
478 .word cpu_xsc3_set_pte_ext 521 .word cpu_xsc3_set_pte_ext
522 .word cpu_xsc3_suspend_size
523 .word cpu_xsc3_do_suspend
524 .word cpu_xsc3_do_resume
479 .size xsc3_processor_functions, . - xsc3_processor_functions 525 .size xsc3_processor_functions, . - xsc3_processor_functions
480 526
481 .section ".rodata" 527 .section ".rodata"
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 5a37c5e45c41..086038cd86ab 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -513,11 +513,49 @@ ENTRY(cpu_xscale_set_pte_ext)
513 xscale_set_pte_ext_epilogue 513 xscale_set_pte_ext_epilogue
514 mov pc, lr 514 mov pc, lr
515 515
516
517 .ltorg 516 .ltorg
518
519 .align 517 .align
520 518
519.globl cpu_xscale_suspend_size
520.equ cpu_xscale_suspend_size, 4 * 7
521#ifdef CONFIG_PM
522ENTRY(cpu_xscale_do_suspend)
523 stmfd sp!, {r4 - r10, lr}
524 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
525 mrc p15, 0, r5, c15, c1, 0 @ CP access reg
526 mrc p15, 0, r6, c13, c0, 0 @ PID
527 mrc p15, 0, r7, c3, c0, 0 @ domain ID
528 mrc p15, 0, r8, c2, c0, 0 @ translation table base addr
529 mrc p15, 0, r9, c1, c1, 0 @ auxiliary control reg
530 mrc p15, 0, r10, c1, c0, 0 @ control reg
531 bic r4, r4, #2 @ clear frequency change bit
532 stmia r0, {r4 - r10} @ store cp regs
533 ldmfd sp!, {r4 - r10, pc}
534ENDPROC(cpu_xscale_do_suspend)
535
536ENTRY(cpu_xscale_do_resume)
537 ldmia r0, {r4 - r10} @ load cp regs
538 mov ip, #0
539 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
540 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
541 mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode.
542 mcr p15, 0, r5, c15, c1, 0 @ CP access reg
543 mcr p15, 0, r6, c13, c0, 0 @ PID
544 mcr p15, 0, r7, c3, c0, 0 @ domain ID
545 mcr p15, 0, r8, c2, c0, 0 @ translation table base addr
546 mcr p15, 0, r9, c1, c1, 0 @ auxiliary control reg
547 mov r0, r10 @ control register
548 mov r2, r8, lsr #14 @ get TTB0 base
549 mov r2, r2, lsl #14
550 ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \
551 PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE
552 b cpu_resume_mmu
553ENDPROC(cpu_xscale_do_resume)
554#else
555#define cpu_xscale_do_suspend 0
556#define cpu_xscale_do_resume 0
557#endif
558
521 __CPUINIT 559 __CPUINIT
522 560
523 .type __xscale_setup, #function 561 .type __xscale_setup, #function
@@ -565,6 +603,9 @@ ENTRY(xscale_processor_functions)
565 .word cpu_xscale_dcache_clean_area 603 .word cpu_xscale_dcache_clean_area
566 .word cpu_xscale_switch_mm 604 .word cpu_xscale_switch_mm
567 .word cpu_xscale_set_pte_ext 605 .word cpu_xscale_set_pte_ext
606 .word cpu_xscale_suspend_size
607 .word cpu_xscale_do_suspend
608 .word cpu_xscale_do_resume
568 .size xscale_processor_functions, . - xscale_processor_functions 609 .size xscale_processor_functions, . - xscale_processor_functions
569 610
570 .section ".rodata" 611 .section ".rodata"
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
index 935993e1b1ef..036fdbfdd62f 100644
--- a/arch/arm/mm/vmregion.c
+++ b/arch/arm/mm/vmregion.c
@@ -38,7 +38,7 @@ struct arm_vmregion *
38arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, 38arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
39 size_t size, gfp_t gfp) 39 size_t size, gfp_t gfp)
40{ 40{
41 unsigned long addr = head->vm_start, end = head->vm_end - size; 41 unsigned long start = head->vm_start, addr = head->vm_end;
42 unsigned long flags; 42 unsigned long flags;
43 struct arm_vmregion *c, *new; 43 struct arm_vmregion *c, *new;
44 44
@@ -54,21 +54,20 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
54 54
55 spin_lock_irqsave(&head->vm_lock, flags); 55 spin_lock_irqsave(&head->vm_lock, flags);
56 56
57 list_for_each_entry(c, &head->vm_list, vm_list) { 57 addr = rounddown(addr - size, align);
58 if ((addr + size) < addr) 58 list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
59 goto nospc; 59 if (addr >= c->vm_end)
60 if ((addr + size) <= c->vm_start)
61 goto found; 60 goto found;
62 addr = ALIGN(c->vm_end, align); 61 addr = rounddown(c->vm_start - size, align);
63 if (addr > end) 62 if (addr < start)
64 goto nospc; 63 goto nospc;
65 } 64 }
66 65
67 found: 66 found:
68 /* 67 /*
69 * Insert this entry _before_ the one we found. 68 * Insert this entry after the one we found.
70 */ 69 */
71 list_add_tail(&new->vm_list, &c->vm_list); 70 list_add(&new->vm_list, &c->vm_list);
72 new->vm_start = addr; 71 new->vm_start = addr;
73 new->vm_end = addr + size; 72 new->vm_end = addr + size;
74 new->vm_active = 1; 73 new->vm_active = 1;