diff options
author | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
---|---|---|
committer | Glenn Elliott <gelliott@cs.unc.edu> | 2012-03-04 19:47:13 -0500 |
commit | c71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch) | |
tree | ecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /arch/arm/mm | |
parent | ea53c912f8a86a8567697115b6a0d8152beee5c8 (diff) | |
parent | 6a00f206debf8a5c8899055726ad127dbeeed098 (diff) |
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts:
litmus/sched_cedf.c
Diffstat (limited to 'arch/arm/mm')
56 files changed, 1806 insertions, 747 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index a0a2928ae4dd..0074b8dba793 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -382,9 +382,15 @@ config CPU_FEROCEON_OLD_ID | |||
382 | for which the CPU ID is equal to the ARM926 ID. | 382 | for which the CPU ID is equal to the ARM926 ID. |
383 | Relevant for Feroceon-1850 and early Feroceon-2850. | 383 | Relevant for Feroceon-1850 and early Feroceon-2850. |
384 | 384 | ||
385 | # Marvell PJ4 | ||
386 | config CPU_PJ4 | ||
387 | bool | ||
388 | select CPU_V7 | ||
389 | select ARM_THUMBEE | ||
390 | |||
385 | # ARMv6 | 391 | # ARMv6 |
386 | config CPU_V6 | 392 | config CPU_V6 |
387 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE | 393 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
388 | select CPU_32v6 | 394 | select CPU_32v6 |
389 | select CPU_ABRT_EV6 | 395 | select CPU_ABRT_EV6 |
390 | select CPU_PABRT_V6 | 396 | select CPU_PABRT_V6 |
@@ -396,21 +402,23 @@ config CPU_V6 | |||
396 | select CPU_TLB_V6 if MMU | 402 | select CPU_TLB_V6 if MMU |
397 | 403 | ||
398 | # ARMv6k | 404 | # ARMv6k |
399 | config CPU_32v6K | 405 | config CPU_V6K |
400 | bool "Support ARM V6K processor extensions" if !SMP | 406 | bool "Support ARM V6K processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
401 | depends on CPU_V6 || CPU_V7 | 407 | select CPU_32v6 |
402 | default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) | 408 | select CPU_32v6K |
403 | help | 409 | select CPU_ABRT_EV6 |
404 | Say Y here if your ARMv6 processor supports the 'K' extension. | 410 | select CPU_PABRT_V6 |
405 | This enables the kernel to use some instructions not present | 411 | select CPU_CACHE_V6 |
406 | on previous processors, and as such a kernel build with this | 412 | select CPU_CACHE_VIPT |
407 | enabled will not boot on processors with do not support these | 413 | select CPU_CP15_MMU |
408 | instructions. | 414 | select CPU_HAS_ASID if MMU |
415 | select CPU_COPY_V6 if MMU | ||
416 | select CPU_TLB_V6 if MMU | ||
409 | 417 | ||
410 | # ARMv7 | 418 | # ARMv7 |
411 | config CPU_V7 | 419 | config CPU_V7 |
412 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX | 420 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
413 | select CPU_32v6K if !ARCH_OMAP2 | 421 | select CPU_32v6K |
414 | select CPU_32v7 | 422 | select CPU_32v7 |
415 | select CPU_ABRT_EV7 | 423 | select CPU_ABRT_EV7 |
416 | select CPU_PABRT_V7 | 424 | select CPU_PABRT_V7 |
@@ -427,25 +435,33 @@ config CPU_32v3 | |||
427 | bool | 435 | bool |
428 | select TLS_REG_EMUL if SMP || !MMU | 436 | select TLS_REG_EMUL if SMP || !MMU |
429 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 437 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
438 | select CPU_USE_DOMAINS if MMU | ||
430 | 439 | ||
431 | config CPU_32v4 | 440 | config CPU_32v4 |
432 | bool | 441 | bool |
433 | select TLS_REG_EMUL if SMP || !MMU | 442 | select TLS_REG_EMUL if SMP || !MMU |
434 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 443 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
444 | select CPU_USE_DOMAINS if MMU | ||
435 | 445 | ||
436 | config CPU_32v4T | 446 | config CPU_32v4T |
437 | bool | 447 | bool |
438 | select TLS_REG_EMUL if SMP || !MMU | 448 | select TLS_REG_EMUL if SMP || !MMU |
439 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 449 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
450 | select CPU_USE_DOMAINS if MMU | ||
440 | 451 | ||
441 | config CPU_32v5 | 452 | config CPU_32v5 |
442 | bool | 453 | bool |
443 | select TLS_REG_EMUL if SMP || !MMU | 454 | select TLS_REG_EMUL if SMP || !MMU |
444 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 455 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
456 | select CPU_USE_DOMAINS if MMU | ||
445 | 457 | ||
446 | config CPU_32v6 | 458 | config CPU_32v6 |
447 | bool | 459 | bool |
448 | select TLS_REG_EMUL if !CPU_32v6K && !MMU | 460 | select TLS_REG_EMUL if !CPU_32v6K && !MMU |
461 | select CPU_USE_DOMAINS if CPU_V6 && MMU | ||
462 | |||
463 | config CPU_32v6K | ||
464 | bool | ||
449 | 465 | ||
450 | config CPU_32v7 | 466 | config CPU_32v7 |
451 | bool | 467 | bool |
@@ -599,6 +615,12 @@ config CPU_CP15_MPU | |||
599 | help | 615 | help |
600 | Processor has the CP15 register, which has MPU related registers. | 616 | Processor has the CP15 register, which has MPU related registers. |
601 | 617 | ||
618 | config CPU_USE_DOMAINS | ||
619 | bool | ||
620 | help | ||
621 | This option enables or disables the use of domain switching | ||
622 | via the set_fs() function. | ||
623 | |||
602 | # | 624 | # |
603 | # CPU supports 36-bit I/O | 625 | # CPU supports 36-bit I/O |
604 | # | 626 | # |
@@ -609,7 +631,7 @@ comment "Processor Features" | |||
609 | 631 | ||
610 | config ARM_THUMB | 632 | config ARM_THUMB |
611 | bool "Support Thumb user binaries" | 633 | bool "Support Thumb user binaries" |
612 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V7 || CPU_FEROCEON | 634 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON |
613 | default y | 635 | default y |
614 | help | 636 | help |
615 | Say Y if you want to include kernel support for running user space | 637 | Say Y if you want to include kernel support for running user space |
@@ -628,6 +650,33 @@ config ARM_THUMBEE | |||
628 | Say Y here if you have a CPU with the ThumbEE extension and code to | 650 | Say Y here if you have a CPU with the ThumbEE extension and code to |
629 | make use of it. Say N for code that can run on CPUs without ThumbEE. | 651 | make use of it. Say N for code that can run on CPUs without ThumbEE. |
630 | 652 | ||
653 | config SWP_EMULATE | ||
654 | bool "Emulate SWP/SWPB instructions" | ||
655 | depends on !CPU_USE_DOMAINS && CPU_V7 | ||
656 | select HAVE_PROC_CPU if PROC_FS | ||
657 | default y if SMP | ||
658 | help | ||
659 | ARMv6 architecture deprecates use of the SWP/SWPB instructions. | ||
660 | ARMv7 multiprocessing extensions introduce the ability to disable | ||
661 | these instructions, triggering an undefined instruction exception | ||
662 | when executed. Say Y here to enable software emulation of these | ||
663 | instructions for userspace (not kernel) using LDREX/STREX. | ||
664 | Also creates /proc/cpu/swp_emulation for statistics. | ||
665 | |||
666 | In some older versions of glibc [<=2.8] SWP is used during futex | ||
667 | trylock() operations with the assumption that the code will not | ||
668 | be preempted. This invalid assumption may be more likely to fail | ||
669 | with SWP emulation enabled, leading to deadlock of the user | ||
670 | application. | ||
671 | |||
672 | NOTE: when accessing uncached shared regions, LDREX/STREX rely | ||
673 | on an external transaction monitoring block called a global | ||
674 | monitor to maintain update atomicity. If your system does not | ||
675 | implement a global monitor, this option can cause programs that | ||
676 | perform SWP operations to uncached memory to deadlock. | ||
677 | |||
678 | If unsure, say Y. | ||
679 | |||
631 | config CPU_BIG_ENDIAN | 680 | config CPU_BIG_ENDIAN |
632 | bool "Build big-endian kernel" | 681 | bool "Build big-endian kernel" |
633 | depends on ARCH_SUPPORTS_BIG_ENDIAN | 682 | depends on ARCH_SUPPORTS_BIG_ENDIAN |
@@ -640,7 +689,7 @@ config CPU_BIG_ENDIAN | |||
640 | config CPU_ENDIAN_BE8 | 689 | config CPU_ENDIAN_BE8 |
641 | bool | 690 | bool |
642 | depends on CPU_BIG_ENDIAN | 691 | depends on CPU_BIG_ENDIAN |
643 | default CPU_V6 || CPU_V7 | 692 | default CPU_V6 || CPU_V6K || CPU_V7 |
644 | help | 693 | help |
645 | Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. | 694 | Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. |
646 | 695 | ||
@@ -706,7 +755,7 @@ config CPU_CACHE_ROUND_ROBIN | |||
706 | 755 | ||
707 | config CPU_BPREDICT_DISABLE | 756 | config CPU_BPREDICT_DISABLE |
708 | bool "Disable branch prediction" | 757 | bool "Disable branch prediction" |
709 | depends on CPU_ARM1020 || CPU_V6 || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 | 758 | depends on CPU_ARM1020 || CPU_V6 || CPU_V6K || CPU_MOHAWK || CPU_XSC3 || CPU_V7 || CPU_FA526 |
710 | help | 759 | help |
711 | Say Y here to disable branch prediction. If unsure, say N. | 760 | Say Y here to disable branch prediction. If unsure, say N. |
712 | 761 | ||
@@ -726,7 +775,7 @@ config NEEDS_SYSCALL_FOR_CMPXCHG | |||
726 | 775 | ||
727 | config DMA_CACHE_RWFO | 776 | config DMA_CACHE_RWFO |
728 | bool "Enable read/write for ownership DMA cache maintenance" | 777 | bool "Enable read/write for ownership DMA cache maintenance" |
729 | depends on CPU_V6 && SMP | 778 | depends on CPU_V6K && SMP |
730 | default y | 779 | default y |
731 | help | 780 | help |
732 | The Snoop Control Unit on ARM11MPCore does not detect the | 781 | The Snoop Control Unit on ARM11MPCore does not detect the |
@@ -770,18 +819,26 @@ config CACHE_FEROCEON_L2_WRITETHROUGH | |||
770 | config CACHE_L2X0 | 819 | config CACHE_L2X0 |
771 | bool "Enable the L2x0 outer cache controller" | 820 | bool "Enable the L2x0 outer cache controller" |
772 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ | 821 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ |
773 | REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || \ | 822 | REALVIEW_EB_A9MP || SOC_IMX35 || SOC_IMX31 || MACH_REALVIEW_PBX || \ |
774 | ARCH_NOMADIK || ARCH_OMAP4 || ARCH_S5PV310 || ARCH_TEGRA || \ | 823 | ARCH_NOMADIK || ARCH_OMAP4 || ARCH_EXYNOS4 || ARCH_TEGRA || \ |
775 | ARCH_U8500 || ARCH_VEXPRESS_CA9X4 | 824 | ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE |
776 | default y | 825 | default y |
777 | select OUTER_CACHE | 826 | select OUTER_CACHE |
778 | select OUTER_CACHE_SYNC | 827 | select OUTER_CACHE_SYNC |
779 | help | 828 | help |
780 | This option enables the L2x0 PrimeCell. | 829 | This option enables the L2x0 PrimeCell. |
781 | 830 | ||
831 | config CACHE_PL310 | ||
832 | bool | ||
833 | depends on CACHE_L2X0 | ||
834 | default y if CPU_V7 && !(CPU_V6 || CPU_V6K) | ||
835 | help | ||
836 | This option enables optimisations for the PL310 cache | ||
837 | controller. | ||
838 | |||
782 | config CACHE_TAUROS2 | 839 | config CACHE_TAUROS2 |
783 | bool "Enable the Tauros2 L2 cache controller" | 840 | bool "Enable the Tauros2 L2 cache controller" |
784 | depends on (ARCH_DOVE || ARCH_MMP) | 841 | depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4) |
785 | default y | 842 | default y |
786 | select OUTER_CACHE | 843 | select OUTER_CACHE |
787 | help | 844 | help |
@@ -796,16 +853,21 @@ config CACHE_XSC3L2 | |||
796 | help | 853 | help |
797 | This option enables the L2 cache on XScale3. | 854 | This option enables the L2 cache on XScale3. |
798 | 855 | ||
856 | config ARM_L1_CACHE_SHIFT_6 | ||
857 | bool | ||
858 | help | ||
859 | Setting ARM L1 cache line size to 64 Bytes. | ||
860 | |||
799 | config ARM_L1_CACHE_SHIFT | 861 | config ARM_L1_CACHE_SHIFT |
800 | int | 862 | int |
801 | default 6 if ARM_L1_CACHE_SHIFT_6 | 863 | default 6 if ARM_L1_CACHE_SHIFT_6 |
802 | default 5 | 864 | default 5 |
803 | 865 | ||
804 | config ARM_DMA_MEM_BUFFERABLE | 866 | config ARM_DMA_MEM_BUFFERABLE |
805 | bool "Use non-cacheable memory for DMA" if CPU_V6 && !CPU_V7 | 867 | bool "Use non-cacheable memory for DMA" if (CPU_V6 || CPU_V6K) && !CPU_V7 |
806 | depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \ | 868 | depends on !(MACH_REALVIEW_PB1176 || REALVIEW_EB_ARM11MP || \ |
807 | MACH_REALVIEW_PB11MP) | 869 | MACH_REALVIEW_PB11MP) |
808 | default y if CPU_V6 || CPU_V7 | 870 | default y if CPU_V6 || CPU_V6K || CPU_V7 |
809 | help | 871 | help |
810 | Historically, the kernel has used strongly ordered mappings to | 872 | Historically, the kernel has used strongly ordered mappings to |
811 | provide DMA coherent memory. With the advent of ARMv7, mapping | 873 | provide DMA coherent memory. With the advent of ARMv7, mapping |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index d63b6c413758..bca7e61928c7 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -5,8 +5,8 @@ | |||
5 | obj-y := dma-mapping.o extable.o fault.o init.o \ | 5 | obj-y := dma-mapping.o extable.o fault.o init.o \ |
6 | iomap.o | 6 | iomap.o |
7 | 7 | ||
8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ | 8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ |
9 | pgd.o mmu.o vmregion.o | 9 | mmap.o pgd.o mmu.o vmregion.o |
10 | 10 | ||
11 | ifneq ($(CONFIG_MMU),y) | 11 | ifneq ($(CONFIG_MMU),y) |
12 | obj-y += nommu.o | 12 | obj-y += nommu.o |
@@ -90,6 +90,7 @@ obj-$(CONFIG_CPU_XSC3) += proc-xsc3.o | |||
90 | obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o | 90 | obj-$(CONFIG_CPU_MOHAWK) += proc-mohawk.o |
91 | obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o | 91 | obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o |
92 | obj-$(CONFIG_CPU_V6) += proc-v6.o | 92 | obj-$(CONFIG_CPU_V6) += proc-v6.o |
93 | obj-$(CONFIG_CPU_V6K) += proc-v6.o | ||
93 | obj-$(CONFIG_CPU_V7) += proc-v7.o | 94 | obj-$(CONFIG_CPU_V7) += proc-v7.o |
94 | 95 | ||
95 | AFLAGS_proc-v6.o :=-Wa,-march=armv6 | 96 | AFLAGS_proc-v6.o :=-Wa,-march=armv6 |
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index f332df7f0d37..1478aa522144 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S | |||
@@ -20,11 +20,11 @@ | |||
20 | */ | 20 | */ |
21 | .align 5 | 21 | .align 5 |
22 | ENTRY(v6_early_abort) | 22 | ENTRY(v6_early_abort) |
23 | #ifdef CONFIG_CPU_32v6K | 23 | #ifdef CONFIG_CPU_V6 |
24 | clrex | ||
25 | #else | ||
26 | sub r1, sp, #4 @ Get unused stack location | 24 | sub r1, sp, #4 @ Get unused stack location |
27 | strex r0, r1, [r1] @ Clear the exclusive monitor | 25 | strex r0, r1, [r1] @ Clear the exclusive monitor |
26 | #elif defined(CONFIG_CPU_32v6K) | ||
27 | clrex | ||
28 | #endif | 28 | #endif |
29 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 29 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
30 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 30 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index 7148e53e6078..1fa6f71470de 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S | |||
@@ -38,6 +38,17 @@ | |||
38 | #define CACHE_DLIMIT (CACHE_DSIZE * 2) | 38 | #define CACHE_DLIMIT (CACHE_DSIZE * 2) |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * flush_icache_all() | ||
42 | * | ||
43 | * Unconditionally clean and invalidate the entire icache. | ||
44 | */ | ||
45 | ENTRY(fa_flush_icache_all) | ||
46 | mov r0, #0 | ||
47 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
48 | mov pc, lr | ||
49 | ENDPROC(fa_flush_icache_all) | ||
50 | |||
51 | /* | ||
41 | * flush_user_cache_all() | 52 | * flush_user_cache_all() |
42 | * | 53 | * |
43 | * Clean and invalidate all cache entries in a particular address | 54 | * Clean and invalidate all cache entries in a particular address |
@@ -233,6 +244,7 @@ ENDPROC(fa_dma_unmap_area) | |||
233 | 244 | ||
234 | .type fa_cache_fns, #object | 245 | .type fa_cache_fns, #object |
235 | ENTRY(fa_cache_fns) | 246 | ENTRY(fa_cache_fns) |
247 | .long fa_flush_icache_all | ||
236 | .long fa_flush_kern_cache_all | 248 | .long fa_flush_kern_cache_all |
237 | .long fa_flush_user_cache_all | 249 | .long fa_flush_user_cache_all |
238 | .long fa_flush_user_cache_range | 250 | .long fa_flush_user_cache_range |
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index 6e77c042d8e9..e0b0e7a4ec68 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c | |||
@@ -13,13 +13,9 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/highmem.h> | ||
16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
17 | #include <asm/kmap_types.h> | ||
18 | #include <asm/fixmap.h> | ||
19 | #include <asm/pgtable.h> | ||
20 | #include <asm/tlbflush.h> | ||
21 | #include <plat/cache-feroceon-l2.h> | 18 | #include <plat/cache-feroceon-l2.h> |
22 | #include "mm.h" | ||
23 | 19 | ||
24 | /* | 20 | /* |
25 | * Low-level cache maintenance operations. | 21 | * Low-level cache maintenance operations. |
@@ -39,27 +35,30 @@ | |||
39 | * between which we don't want to be preempted. | 35 | * between which we don't want to be preempted. |
40 | */ | 36 | */ |
41 | 37 | ||
42 | static inline unsigned long l2_start_va(unsigned long paddr) | 38 | static inline unsigned long l2_get_va(unsigned long paddr) |
43 | { | 39 | { |
44 | #ifdef CONFIG_HIGHMEM | 40 | #ifdef CONFIG_HIGHMEM |
45 | /* | 41 | /* |
46 | * Let's do our own fixmap stuff in a minimal way here. | ||
47 | * Because range ops can't be done on physical addresses, | 42 | * Because range ops can't be done on physical addresses, |
48 | * we simply install a virtual mapping for it only for the | 43 | * we simply install a virtual mapping for it only for the |
49 | * TLB lookup to occur, hence no need to flush the untouched | 44 | * TLB lookup to occur, hence no need to flush the untouched |
50 | * memory mapping. This is protected with the disabling of | 45 | * memory mapping afterwards (note: a cache flush may happen |
51 | * interrupts by the caller. | 46 | * in some circumstances depending on the path taken in kunmap_atomic). |
52 | */ | 47 | */ |
53 | unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); | 48 | void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT); |
54 | unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 49 | return (unsigned long)vaddr + (paddr & ~PAGE_MASK); |
55 | set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0); | ||
56 | local_flush_tlb_kernel_page(vaddr); | ||
57 | return vaddr + (paddr & ~PAGE_MASK); | ||
58 | #else | 50 | #else |
59 | return __phys_to_virt(paddr); | 51 | return __phys_to_virt(paddr); |
60 | #endif | 52 | #endif |
61 | } | 53 | } |
62 | 54 | ||
55 | static inline void l2_put_va(unsigned long vaddr) | ||
56 | { | ||
57 | #ifdef CONFIG_HIGHMEM | ||
58 | kunmap_atomic((void *)vaddr); | ||
59 | #endif | ||
60 | } | ||
61 | |||
63 | static inline void l2_clean_pa(unsigned long addr) | 62 | static inline void l2_clean_pa(unsigned long addr) |
64 | { | 63 | { |
65 | __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); | 64 | __asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr)); |
@@ -76,13 +75,14 @@ static inline void l2_clean_pa_range(unsigned long start, unsigned long end) | |||
76 | */ | 75 | */ |
77 | BUG_ON((start ^ end) >> PAGE_SHIFT); | 76 | BUG_ON((start ^ end) >> PAGE_SHIFT); |
78 | 77 | ||
79 | raw_local_irq_save(flags); | 78 | va_start = l2_get_va(start); |
80 | va_start = l2_start_va(start); | ||
81 | va_end = va_start + (end - start); | 79 | va_end = va_start + (end - start); |
80 | raw_local_irq_save(flags); | ||
82 | __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" | 81 | __asm__("mcr p15, 1, %0, c15, c9, 4\n\t" |
83 | "mcr p15, 1, %1, c15, c9, 5" | 82 | "mcr p15, 1, %1, c15, c9, 5" |
84 | : : "r" (va_start), "r" (va_end)); | 83 | : : "r" (va_start), "r" (va_end)); |
85 | raw_local_irq_restore(flags); | 84 | raw_local_irq_restore(flags); |
85 | l2_put_va(va_start); | ||
86 | } | 86 | } |
87 | 87 | ||
88 | static inline void l2_clean_inv_pa(unsigned long addr) | 88 | static inline void l2_clean_inv_pa(unsigned long addr) |
@@ -106,13 +106,14 @@ static inline void l2_inv_pa_range(unsigned long start, unsigned long end) | |||
106 | */ | 106 | */ |
107 | BUG_ON((start ^ end) >> PAGE_SHIFT); | 107 | BUG_ON((start ^ end) >> PAGE_SHIFT); |
108 | 108 | ||
109 | raw_local_irq_save(flags); | 109 | va_start = l2_get_va(start); |
110 | va_start = l2_start_va(start); | ||
111 | va_end = va_start + (end - start); | 110 | va_end = va_start + (end - start); |
111 | raw_local_irq_save(flags); | ||
112 | __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" | 112 | __asm__("mcr p15, 1, %0, c15, c11, 4\n\t" |
113 | "mcr p15, 1, %1, c15, c11, 5" | 113 | "mcr p15, 1, %1, c15, c11, 5" |
114 | : : "r" (va_start), "r" (va_end)); | 114 | : : "r" (va_start), "r" (va_end)); |
115 | raw_local_irq_restore(flags); | 115 | raw_local_irq_restore(flags); |
116 | l2_put_va(va_start); | ||
116 | } | 117 | } |
117 | 118 | ||
118 | static inline void l2_inv_all(void) | 119 | static inline void l2_inv_all(void) |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 9982eb385c0f..44c086710d2b 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -28,18 +28,34 @@ | |||
28 | static void __iomem *l2x0_base; | 28 | static void __iomem *l2x0_base; |
29 | static DEFINE_SPINLOCK(l2x0_lock); | 29 | static DEFINE_SPINLOCK(l2x0_lock); |
30 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ | 30 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ |
31 | static uint32_t l2x0_size; | ||
31 | 32 | ||
32 | static inline void cache_wait(void __iomem *reg, unsigned long mask) | 33 | static inline void cache_wait_way(void __iomem *reg, unsigned long mask) |
33 | { | 34 | { |
34 | /* wait for the operation to complete */ | 35 | /* wait for cache operation by line or way to complete */ |
35 | while (readl_relaxed(reg) & mask) | 36 | while (readl_relaxed(reg) & mask) |
36 | ; | 37 | ; |
37 | } | 38 | } |
38 | 39 | ||
40 | #ifdef CONFIG_CACHE_PL310 | ||
41 | static inline void cache_wait(void __iomem *reg, unsigned long mask) | ||
42 | { | ||
43 | /* cache operations by line are atomic on PL310 */ | ||
44 | } | ||
45 | #else | ||
46 | #define cache_wait cache_wait_way | ||
47 | #endif | ||
48 | |||
39 | static inline void cache_sync(void) | 49 | static inline void cache_sync(void) |
40 | { | 50 | { |
41 | void __iomem *base = l2x0_base; | 51 | void __iomem *base = l2x0_base; |
52 | |||
53 | #ifdef CONFIG_ARM_ERRATA_753970 | ||
54 | /* write to an unmmapped register */ | ||
55 | writel_relaxed(0, base + L2X0_DUMMY_REG); | ||
56 | #else | ||
42 | writel_relaxed(0, base + L2X0_CACHE_SYNC); | 57 | writel_relaxed(0, base + L2X0_CACHE_SYNC); |
58 | #endif | ||
43 | cache_wait(base + L2X0_CACHE_SYNC, 1); | 59 | cache_wait(base + L2X0_CACHE_SYNC, 1); |
44 | } | 60 | } |
45 | 61 | ||
@@ -57,18 +73,24 @@ static inline void l2x0_inv_line(unsigned long addr) | |||
57 | writel_relaxed(addr, base + L2X0_INV_LINE_PA); | 73 | writel_relaxed(addr, base + L2X0_INV_LINE_PA); |
58 | } | 74 | } |
59 | 75 | ||
60 | #ifdef CONFIG_PL310_ERRATA_588369 | 76 | #if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) |
61 | static void debug_writel(unsigned long val) | ||
62 | { | ||
63 | extern void omap_smc1(u32 fn, u32 arg); | ||
64 | 77 | ||
65 | /* | 78 | #define debug_writel(val) outer_cache.set_debug(val) |
66 | * Texas Instrument secure monitor api to modify the | 79 | |
67 | * PL310 Debug Control Register. | 80 | static void l2x0_set_debug(unsigned long val) |
68 | */ | 81 | { |
69 | omap_smc1(0x100, val); | 82 | writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); |
70 | } | 83 | } |
84 | #else | ||
85 | /* Optimised out for non-errata case */ | ||
86 | static inline void debug_writel(unsigned long val) | ||
87 | { | ||
88 | } | ||
89 | |||
90 | #define l2x0_set_debug NULL | ||
91 | #endif | ||
71 | 92 | ||
93 | #ifdef CONFIG_PL310_ERRATA_588369 | ||
72 | static inline void l2x0_flush_line(unsigned long addr) | 94 | static inline void l2x0_flush_line(unsigned long addr) |
73 | { | 95 | { |
74 | void __iomem *base = l2x0_base; | 96 | void __iomem *base = l2x0_base; |
@@ -81,11 +103,6 @@ static inline void l2x0_flush_line(unsigned long addr) | |||
81 | } | 103 | } |
82 | #else | 104 | #else |
83 | 105 | ||
84 | /* Optimised out for non-errata case */ | ||
85 | static inline void debug_writel(unsigned long val) | ||
86 | { | ||
87 | } | ||
88 | |||
89 | static inline void l2x0_flush_line(unsigned long addr) | 106 | static inline void l2x0_flush_line(unsigned long addr) |
90 | { | 107 | { |
91 | void __iomem *base = l2x0_base; | 108 | void __iomem *base = l2x0_base; |
@@ -103,14 +120,47 @@ static void l2x0_cache_sync(void) | |||
103 | spin_unlock_irqrestore(&l2x0_lock, flags); | 120 | spin_unlock_irqrestore(&l2x0_lock, flags); |
104 | } | 121 | } |
105 | 122 | ||
106 | static inline void l2x0_inv_all(void) | 123 | static void __l2x0_flush_all(void) |
124 | { | ||
125 | debug_writel(0x03); | ||
126 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); | ||
127 | cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); | ||
128 | cache_sync(); | ||
129 | debug_writel(0x00); | ||
130 | } | ||
131 | |||
132 | static void l2x0_flush_all(void) | ||
133 | { | ||
134 | unsigned long flags; | ||
135 | |||
136 | /* clean all ways */ | ||
137 | spin_lock_irqsave(&l2x0_lock, flags); | ||
138 | __l2x0_flush_all(); | ||
139 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
140 | } | ||
141 | |||
142 | static void l2x0_clean_all(void) | ||
143 | { | ||
144 | unsigned long flags; | ||
145 | |||
146 | /* clean all ways */ | ||
147 | spin_lock_irqsave(&l2x0_lock, flags); | ||
148 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); | ||
149 | cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); | ||
150 | cache_sync(); | ||
151 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
152 | } | ||
153 | |||
154 | static void l2x0_inv_all(void) | ||
107 | { | 155 | { |
108 | unsigned long flags; | 156 | unsigned long flags; |
109 | 157 | ||
110 | /* invalidate all ways */ | 158 | /* invalidate all ways */ |
111 | spin_lock_irqsave(&l2x0_lock, flags); | 159 | spin_lock_irqsave(&l2x0_lock, flags); |
160 | /* Invalidating when L2 is enabled is a nono */ | ||
161 | BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); | ||
112 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); | 162 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); |
113 | cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); | 163 | cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); |
114 | cache_sync(); | 164 | cache_sync(); |
115 | spin_unlock_irqrestore(&l2x0_lock, flags); | 165 | spin_unlock_irqrestore(&l2x0_lock, flags); |
116 | } | 166 | } |
@@ -159,6 +209,11 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) | |||
159 | void __iomem *base = l2x0_base; | 209 | void __iomem *base = l2x0_base; |
160 | unsigned long flags; | 210 | unsigned long flags; |
161 | 211 | ||
212 | if ((end - start) >= l2x0_size) { | ||
213 | l2x0_clean_all(); | ||
214 | return; | ||
215 | } | ||
216 | |||
162 | spin_lock_irqsave(&l2x0_lock, flags); | 217 | spin_lock_irqsave(&l2x0_lock, flags); |
163 | start &= ~(CACHE_LINE_SIZE - 1); | 218 | start &= ~(CACHE_LINE_SIZE - 1); |
164 | while (start < end) { | 219 | while (start < end) { |
@@ -184,6 +239,11 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
184 | void __iomem *base = l2x0_base; | 239 | void __iomem *base = l2x0_base; |
185 | unsigned long flags; | 240 | unsigned long flags; |
186 | 241 | ||
242 | if ((end - start) >= l2x0_size) { | ||
243 | l2x0_flush_all(); | ||
244 | return; | ||
245 | } | ||
246 | |||
187 | spin_lock_irqsave(&l2x0_lock, flags); | 247 | spin_lock_irqsave(&l2x0_lock, flags); |
188 | start &= ~(CACHE_LINE_SIZE - 1); | 248 | start &= ~(CACHE_LINE_SIZE - 1); |
189 | while (start < end) { | 249 | while (start < end) { |
@@ -206,10 +266,22 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
206 | spin_unlock_irqrestore(&l2x0_lock, flags); | 266 | spin_unlock_irqrestore(&l2x0_lock, flags); |
207 | } | 267 | } |
208 | 268 | ||
269 | static void l2x0_disable(void) | ||
270 | { | ||
271 | unsigned long flags; | ||
272 | |||
273 | spin_lock_irqsave(&l2x0_lock, flags); | ||
274 | __l2x0_flush_all(); | ||
275 | writel_relaxed(0, l2x0_base + L2X0_CTRL); | ||
276 | dsb(); | ||
277 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
278 | } | ||
279 | |||
209 | void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | 280 | void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) |
210 | { | 281 | { |
211 | __u32 aux; | 282 | __u32 aux; |
212 | __u32 cache_id; | 283 | __u32 cache_id; |
284 | __u32 way_size = 0; | ||
213 | int ways; | 285 | int ways; |
214 | const char *type; | 286 | const char *type; |
215 | 287 | ||
@@ -244,6 +316,13 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | |||
244 | l2x0_way_mask = (1 << ways) - 1; | 316 | l2x0_way_mask = (1 << ways) - 1; |
245 | 317 | ||
246 | /* | 318 | /* |
319 | * L2 cache Size = Way size * Number of ways | ||
320 | */ | ||
321 | way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; | ||
322 | way_size = 1 << (way_size + 3); | ||
323 | l2x0_size = ways * way_size * SZ_1K; | ||
324 | |||
325 | /* | ||
247 | * Check if l2x0 controller is already enabled. | 326 | * Check if l2x0 controller is already enabled. |
248 | * If you are booting from non-secure mode | 327 | * If you are booting from non-secure mode |
249 | * accessing the below registers will fault. | 328 | * accessing the below registers will fault. |
@@ -263,8 +342,12 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | |||
263 | outer_cache.clean_range = l2x0_clean_range; | 342 | outer_cache.clean_range = l2x0_clean_range; |
264 | outer_cache.flush_range = l2x0_flush_range; | 343 | outer_cache.flush_range = l2x0_flush_range; |
265 | outer_cache.sync = l2x0_cache_sync; | 344 | outer_cache.sync = l2x0_cache_sync; |
345 | outer_cache.flush_all = l2x0_flush_all; | ||
346 | outer_cache.inv_all = l2x0_inv_all; | ||
347 | outer_cache.disable = l2x0_disable; | ||
348 | outer_cache.set_debug = l2x0_set_debug; | ||
266 | 349 | ||
267 | printk(KERN_INFO "%s cache controller enabled\n", type); | 350 | printk(KERN_INFO "%s cache controller enabled\n", type); |
268 | printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", | 351 | printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", |
269 | ways, cache_id, aux); | 352 | ways, cache_id, aux, l2x0_size); |
270 | } | 353 | } |
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index c2ff3c599fee..2e2bc406a18d 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S | |||
@@ -13,6 +13,15 @@ | |||
13 | #include "proc-macros.S" | 13 | #include "proc-macros.S" |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * flush_icache_all() | ||
17 | * | ||
18 | * Unconditionally clean and invalidate the entire icache. | ||
19 | */ | ||
20 | ENTRY(v3_flush_icache_all) | ||
21 | mov pc, lr | ||
22 | ENDPROC(v3_flush_icache_all) | ||
23 | |||
24 | /* | ||
16 | * flush_user_cache_all() | 25 | * flush_user_cache_all() |
17 | * | 26 | * |
18 | * Invalidate all cache entries in a particular address | 27 | * Invalidate all cache entries in a particular address |
@@ -122,6 +131,7 @@ ENDPROC(v3_dma_map_area) | |||
122 | 131 | ||
123 | .type v3_cache_fns, #object | 132 | .type v3_cache_fns, #object |
124 | ENTRY(v3_cache_fns) | 133 | ENTRY(v3_cache_fns) |
134 | .long v3_flush_icache_all | ||
125 | .long v3_flush_kern_cache_all | 135 | .long v3_flush_kern_cache_all |
126 | .long v3_flush_user_cache_all | 136 | .long v3_flush_user_cache_all |
127 | .long v3_flush_user_cache_range | 137 | .long v3_flush_user_cache_range |
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index 4810f7e3e813..a8fefb523f19 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S | |||
@@ -13,6 +13,15 @@ | |||
13 | #include "proc-macros.S" | 13 | #include "proc-macros.S" |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * flush_icache_all() | ||
17 | * | ||
18 | * Unconditionally clean and invalidate the entire icache. | ||
19 | */ | ||
20 | ENTRY(v4_flush_icache_all) | ||
21 | mov pc, lr | ||
22 | ENDPROC(v4_flush_icache_all) | ||
23 | |||
24 | /* | ||
16 | * flush_user_cache_all() | 25 | * flush_user_cache_all() |
17 | * | 26 | * |
18 | * Invalidate all cache entries in a particular address | 27 | * Invalidate all cache entries in a particular address |
@@ -134,6 +143,7 @@ ENDPROC(v4_dma_map_area) | |||
134 | 143 | ||
135 | .type v4_cache_fns, #object | 144 | .type v4_cache_fns, #object |
136 | ENTRY(v4_cache_fns) | 145 | ENTRY(v4_cache_fns) |
146 | .long v4_flush_icache_all | ||
137 | .long v4_flush_kern_cache_all | 147 | .long v4_flush_kern_cache_all |
138 | .long v4_flush_user_cache_all | 148 | .long v4_flush_user_cache_all |
139 | .long v4_flush_user_cache_range | 149 | .long v4_flush_user_cache_range |
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index df8368afa102..f40c69656d8d 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S | |||
@@ -32,7 +32,7 @@ | |||
32 | /* | 32 | /* |
33 | * This is the size at which it becomes more efficient to | 33 | * This is the size at which it becomes more efficient to |
34 | * clean the whole cache, rather than using the individual | 34 | * clean the whole cache, rather than using the individual |
35 | * cache line maintainence instructions. | 35 | * cache line maintenance instructions. |
36 | * | 36 | * |
37 | * Size Clean (ticks) Dirty (ticks) | 37 | * Size Clean (ticks) Dirty (ticks) |
38 | * 4096 21 20 21 53 55 54 | 38 | * 4096 21 20 21 53 55 54 |
@@ -51,6 +51,17 @@ flush_base: | |||
51 | .text | 51 | .text |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * flush_icache_all() | ||
55 | * | ||
56 | * Unconditionally clean and invalidate the entire icache. | ||
57 | */ | ||
58 | ENTRY(v4wb_flush_icache_all) | ||
59 | mov r0, #0 | ||
60 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
61 | mov pc, lr | ||
62 | ENDPROC(v4wb_flush_icache_all) | ||
63 | |||
64 | /* | ||
54 | * flush_user_cache_all() | 65 | * flush_user_cache_all() |
55 | * | 66 | * |
56 | * Clean and invalidate all cache entries in a particular address | 67 | * Clean and invalidate all cache entries in a particular address |
@@ -244,6 +255,7 @@ ENDPROC(v4wb_dma_unmap_area) | |||
244 | 255 | ||
245 | .type v4wb_cache_fns, #object | 256 | .type v4wb_cache_fns, #object |
246 | ENTRY(v4wb_cache_fns) | 257 | ENTRY(v4wb_cache_fns) |
258 | .long v4wb_flush_icache_all | ||
247 | .long v4wb_flush_kern_cache_all | 259 | .long v4wb_flush_kern_cache_all |
248 | .long v4wb_flush_user_cache_all | 260 | .long v4wb_flush_user_cache_all |
249 | .long v4wb_flush_user_cache_range | 261 | .long v4wb_flush_user_cache_range |
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index 45c70312f43b..a7b276dbda11 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S | |||
@@ -34,13 +34,24 @@ | |||
34 | /* | 34 | /* |
35 | * This is the size at which it becomes more efficient to | 35 | * This is the size at which it becomes more efficient to |
36 | * clean the whole cache, rather than using the individual | 36 | * clean the whole cache, rather than using the individual |
37 | * cache line maintainence instructions. | 37 | * cache line maintenance instructions. |
38 | * | 38 | * |
39 | * *** This needs benchmarking | 39 | * *** This needs benchmarking |
40 | */ | 40 | */ |
41 | #define CACHE_DLIMIT 16384 | 41 | #define CACHE_DLIMIT 16384 |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * flush_icache_all() | ||
45 | * | ||
46 | * Unconditionally clean and invalidate the entire icache. | ||
47 | */ | ||
48 | ENTRY(v4wt_flush_icache_all) | ||
49 | mov r0, #0 | ||
50 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
51 | mov pc, lr | ||
52 | ENDPROC(v4wt_flush_icache_all) | ||
53 | |||
54 | /* | ||
44 | * flush_user_cache_all() | 55 | * flush_user_cache_all() |
45 | * | 56 | * |
46 | * Invalidate all cache entries in a particular address | 57 | * Invalidate all cache entries in a particular address |
@@ -188,6 +199,7 @@ ENDPROC(v4wt_dma_map_area) | |||
188 | 199 | ||
189 | .type v4wt_cache_fns, #object | 200 | .type v4wt_cache_fns, #object |
190 | ENTRY(v4wt_cache_fns) | 201 | ENTRY(v4wt_cache_fns) |
202 | .long v4wt_flush_icache_all | ||
191 | .long v4wt_flush_kern_cache_all | 203 | .long v4wt_flush_kern_cache_all |
192 | .long v4wt_flush_user_cache_all | 204 | .long v4wt_flush_user_cache_all |
193 | .long v4wt_flush_user_cache_range | 205 | .long v4wt_flush_user_cache_range |
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 86aa689ef1aa..73b4a8b66a57 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S | |||
@@ -21,18 +21,22 @@ | |||
21 | #define D_CACHE_LINE_SIZE 32 | 21 | #define D_CACHE_LINE_SIZE 32 |
22 | #define BTB_FLUSH_SIZE 8 | 22 | #define BTB_FLUSH_SIZE 8 |
23 | 23 | ||
24 | #ifdef CONFIG_ARM_ERRATA_411920 | ||
25 | /* | 24 | /* |
26 | * Invalidate the entire I cache (this code is a workaround for the ARM1136 | 25 | * v6_flush_icache_all() |
27 | * erratum 411920 - Invalidate Instruction Cache operation can fail. This | 26 | * |
28 | * erratum is present in 1136, 1156 and 1176. It does not affect the MPCore. | 27 | * Flush the whole I-cache. |
28 | * | ||
29 | * ARM1136 erratum 411920 - Invalidate Instruction Cache operation can fail. | ||
30 | * This erratum is present in 1136, 1156 and 1176. It does not affect the | ||
31 | * MPCore. | ||
29 | * | 32 | * |
30 | * Registers: | 33 | * Registers: |
31 | * r0 - set to 0 | 34 | * r0 - set to 0 |
32 | * r1 - corrupted | 35 | * r1 - corrupted |
33 | */ | 36 | */ |
34 | ENTRY(v6_icache_inval_all) | 37 | ENTRY(v6_flush_icache_all) |
35 | mov r0, #0 | 38 | mov r0, #0 |
39 | #ifdef CONFIG_ARM_ERRATA_411920 | ||
36 | mrs r1, cpsr | 40 | mrs r1, cpsr |
37 | cpsid ifa @ disable interrupts | 41 | cpsid ifa @ disable interrupts |
38 | mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache | 42 | mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache |
@@ -43,8 +47,11 @@ ENTRY(v6_icache_inval_all) | |||
43 | .rept 11 @ ARM Ltd recommends at least | 47 | .rept 11 @ ARM Ltd recommends at least |
44 | nop @ 11 NOPs | 48 | nop @ 11 NOPs |
45 | .endr | 49 | .endr |
46 | mov pc, lr | 50 | #else |
51 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I-cache | ||
47 | #endif | 52 | #endif |
53 | mov pc, lr | ||
54 | ENDPROC(v6_flush_icache_all) | ||
48 | 55 | ||
49 | /* | 56 | /* |
50 | * v6_flush_cache_all() | 57 | * v6_flush_cache_all() |
@@ -60,7 +67,7 @@ ENTRY(v6_flush_kern_cache_all) | |||
60 | #ifndef CONFIG_ARM_ERRATA_411920 | 67 | #ifndef CONFIG_ARM_ERRATA_411920 |
61 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate | 68 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate |
62 | #else | 69 | #else |
63 | b v6_icache_inval_all | 70 | b v6_flush_icache_all |
64 | #endif | 71 | #endif |
65 | #else | 72 | #else |
66 | mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate | 73 | mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate |
@@ -138,7 +145,7 @@ ENTRY(v6_coherent_user_range) | |||
138 | #ifndef CONFIG_ARM_ERRATA_411920 | 145 | #ifndef CONFIG_ARM_ERRATA_411920 |
139 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate | 146 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate |
140 | #else | 147 | #else |
141 | b v6_icache_inval_all | 148 | b v6_flush_icache_all |
142 | #endif | 149 | #endif |
143 | #else | 150 | #else |
144 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB | 151 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB |
@@ -169,6 +176,7 @@ ENDPROC(v6_coherent_kern_range) | |||
169 | */ | 176 | */ |
170 | ENTRY(v6_flush_kern_dcache_area) | 177 | ENTRY(v6_flush_kern_dcache_area) |
171 | add r1, r0, r1 | 178 | add r1, r0, r1 |
179 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | ||
172 | 1: | 180 | 1: |
173 | #ifdef HARVARD_CACHE | 181 | #ifdef HARVARD_CACHE |
174 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line | 182 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line |
@@ -196,6 +204,10 @@ ENTRY(v6_flush_kern_dcache_area) | |||
196 | * - end - virtual end address of region | 204 | * - end - virtual end address of region |
197 | */ | 205 | */ |
198 | v6_dma_inv_range: | 206 | v6_dma_inv_range: |
207 | #ifdef CONFIG_DMA_CACHE_RWFO | ||
208 | ldrb r2, [r0] @ read for ownership | ||
209 | strb r2, [r0] @ write for ownership | ||
210 | #endif | ||
199 | tst r0, #D_CACHE_LINE_SIZE - 1 | 211 | tst r0, #D_CACHE_LINE_SIZE - 1 |
200 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | 212 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 |
201 | #ifdef HARVARD_CACHE | 213 | #ifdef HARVARD_CACHE |
@@ -204,6 +216,10 @@ v6_dma_inv_range: | |||
204 | mcrne p15, 0, r0, c7, c11, 1 @ clean unified line | 216 | mcrne p15, 0, r0, c7, c11, 1 @ clean unified line |
205 | #endif | 217 | #endif |
206 | tst r1, #D_CACHE_LINE_SIZE - 1 | 218 | tst r1, #D_CACHE_LINE_SIZE - 1 |
219 | #ifdef CONFIG_DMA_CACHE_RWFO | ||
220 | ldrneb r2, [r1, #-1] @ read for ownership | ||
221 | strneb r2, [r1, #-1] @ write for ownership | ||
222 | #endif | ||
207 | bic r1, r1, #D_CACHE_LINE_SIZE - 1 | 223 | bic r1, r1, #D_CACHE_LINE_SIZE - 1 |
208 | #ifdef HARVARD_CACHE | 224 | #ifdef HARVARD_CACHE |
209 | mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line | 225 | mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line |
@@ -211,10 +227,6 @@ v6_dma_inv_range: | |||
211 | mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line | 227 | mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line |
212 | #endif | 228 | #endif |
213 | 1: | 229 | 1: |
214 | #ifdef CONFIG_DMA_CACHE_RWFO | ||
215 | ldr r2, [r0] @ read for ownership | ||
216 | str r2, [r0] @ write for ownership | ||
217 | #endif | ||
218 | #ifdef HARVARD_CACHE | 230 | #ifdef HARVARD_CACHE |
219 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D line | 231 | mcr p15, 0, r0, c7, c6, 1 @ invalidate D line |
220 | #else | 232 | #else |
@@ -222,6 +234,10 @@ v6_dma_inv_range: | |||
222 | #endif | 234 | #endif |
223 | add r0, r0, #D_CACHE_LINE_SIZE | 235 | add r0, r0, #D_CACHE_LINE_SIZE |
224 | cmp r0, r1 | 236 | cmp r0, r1 |
237 | #ifdef CONFIG_DMA_CACHE_RWFO | ||
238 | ldrlo r2, [r0] @ read for ownership | ||
239 | strlo r2, [r0] @ write for ownership | ||
240 | #endif | ||
225 | blo 1b | 241 | blo 1b |
226 | mov r0, #0 | 242 | mov r0, #0 |
227 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | 243 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
@@ -256,12 +272,12 @@ v6_dma_clean_range: | |||
256 | * - end - virtual end address of region | 272 | * - end - virtual end address of region |
257 | */ | 273 | */ |
258 | ENTRY(v6_dma_flush_range) | 274 | ENTRY(v6_dma_flush_range) |
259 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | ||
260 | 1: | ||
261 | #ifdef CONFIG_DMA_CACHE_RWFO | 275 | #ifdef CONFIG_DMA_CACHE_RWFO |
262 | ldr r2, [r0] @ read for ownership | 276 | ldrb r2, [r0] @ read for ownership |
263 | str r2, [r0] @ write for ownership | 277 | strb r2, [r0] @ write for ownership |
264 | #endif | 278 | #endif |
279 | bic r0, r0, #D_CACHE_LINE_SIZE - 1 | ||
280 | 1: | ||
265 | #ifdef HARVARD_CACHE | 281 | #ifdef HARVARD_CACHE |
266 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line | 282 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line |
267 | #else | 283 | #else |
@@ -269,6 +285,10 @@ ENTRY(v6_dma_flush_range) | |||
269 | #endif | 285 | #endif |
270 | add r0, r0, #D_CACHE_LINE_SIZE | 286 | add r0, r0, #D_CACHE_LINE_SIZE |
271 | cmp r0, r1 | 287 | cmp r0, r1 |
288 | #ifdef CONFIG_DMA_CACHE_RWFO | ||
289 | ldrlob r2, [r0] @ read for ownership | ||
290 | strlob r2, [r0] @ write for ownership | ||
291 | #endif | ||
272 | blo 1b | 292 | blo 1b |
273 | mov r0, #0 | 293 | mov r0, #0 |
274 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | 294 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
@@ -312,6 +332,7 @@ ENDPROC(v6_dma_unmap_area) | |||
312 | 332 | ||
313 | .type v6_cache_fns, #object | 333 | .type v6_cache_fns, #object |
314 | ENTRY(v6_cache_fns) | 334 | ENTRY(v6_cache_fns) |
335 | .long v6_flush_icache_all | ||
315 | .long v6_flush_kern_cache_all | 336 | .long v6_flush_kern_cache_all |
316 | .long v6_flush_user_cache_all | 337 | .long v6_flush_user_cache_all |
317 | .long v6_flush_user_cache_range | 338 | .long v6_flush_user_cache_range |
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 37c8157e116e..d32f02b61866 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
@@ -18,6 +18,21 @@ | |||
18 | #include "proc-macros.S" | 18 | #include "proc-macros.S" |
19 | 19 | ||
20 | /* | 20 | /* |
21 | * v7_flush_icache_all() | ||
22 | * | ||
23 | * Flush the whole I-cache. | ||
24 | * | ||
25 | * Registers: | ||
26 | * r0 - set to 0 | ||
27 | */ | ||
28 | ENTRY(v7_flush_icache_all) | ||
29 | mov r0, #0 | ||
30 | ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable | ||
31 | ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate | ||
32 | mov pc, lr | ||
33 | ENDPROC(v7_flush_icache_all) | ||
34 | |||
35 | /* | ||
21 | * v7_flush_dcache_all() | 36 | * v7_flush_dcache_all() |
22 | * | 37 | * |
23 | * Flush the whole D-cache. | 38 | * Flush the whole D-cache. |
@@ -81,7 +96,7 @@ ENDPROC(v7_flush_dcache_all) | |||
81 | * Flush the entire cache system. | 96 | * Flush the entire cache system. |
82 | * The data cache flush is now achieved using atomic clean / invalidates | 97 | * The data cache flush is now achieved using atomic clean / invalidates |
83 | * working outwards from L1 cache. This is done using Set/Way based cache | 98 | * working outwards from L1 cache. This is done using Set/Way based cache |
84 | * maintainance instructions. | 99 | * maintenance instructions. |
85 | * The instruction cache can still be invalidated back to the point of | 100 | * The instruction cache can still be invalidated back to the point of |
86 | * unification in a single instruction. | 101 | * unification in a single instruction. |
87 | * | 102 | * |
@@ -91,11 +106,8 @@ ENTRY(v7_flush_kern_cache_all) | |||
91 | THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) | 106 | THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) |
92 | bl v7_flush_dcache_all | 107 | bl v7_flush_dcache_all |
93 | mov r0, #0 | 108 | mov r0, #0 |
94 | #ifdef CONFIG_SMP | 109 | ALT_SMP(mcr p15, 0, r0, c7, c1, 0) @ invalidate I-cache inner shareable |
95 | mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable | 110 | ALT_UP(mcr p15, 0, r0, c7, c5, 0) @ I+BTB cache invalidate |
96 | #else | ||
97 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate | ||
98 | #endif | ||
99 | ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) | 111 | ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) |
100 | THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) | 112 | THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) |
101 | mov pc, lr | 113 | mov pc, lr |
@@ -161,21 +173,25 @@ ENTRY(v7_coherent_user_range) | |||
161 | UNWIND(.fnstart ) | 173 | UNWIND(.fnstart ) |
162 | dcache_line_size r2, r3 | 174 | dcache_line_size r2, r3 |
163 | sub r3, r2, #1 | 175 | sub r3, r2, #1 |
164 | bic r0, r0, r3 | 176 | bic r12, r0, r3 |
165 | 1: | 177 | 1: |
166 | USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification | 178 | USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification |
179 | add r12, r12, r2 | ||
180 | cmp r12, r1 | ||
181 | blo 1b | ||
167 | dsb | 182 | dsb |
168 | USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line | 183 | icache_line_size r2, r3 |
169 | add r0, r0, r2 | 184 | sub r3, r2, #1 |
185 | bic r12, r0, r3 | ||
170 | 2: | 186 | 2: |
171 | cmp r0, r1 | 187 | USER( mcr p15, 0, r12, c7, c5, 1 ) @ invalidate I line |
172 | blo 1b | 188 | add r12, r12, r2 |
189 | cmp r12, r1 | ||
190 | blo 2b | ||
191 | 3: | ||
173 | mov r0, #0 | 192 | mov r0, #0 |
174 | #ifdef CONFIG_SMP | 193 | ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable |
175 | mcr p15, 0, r0, c7, c1, 6 @ invalidate BTB Inner Shareable | 194 | ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB |
176 | #else | ||
177 | mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB | ||
178 | #endif | ||
179 | dsb | 195 | dsb |
180 | isb | 196 | isb |
181 | mov pc, lr | 197 | mov pc, lr |
@@ -185,10 +201,10 @@ ENTRY(v7_coherent_user_range) | |||
185 | * isn't mapped, just try the next page. | 201 | * isn't mapped, just try the next page. |
186 | */ | 202 | */ |
187 | 9001: | 203 | 9001: |
188 | mov r0, r0, lsr #12 | 204 | mov r12, r12, lsr #12 |
189 | mov r0, r0, lsl #12 | 205 | mov r12, r12, lsl #12 |
190 | add r0, r0, #4096 | 206 | add r12, r12, #4096 |
191 | b 2b | 207 | b 3b |
192 | UNWIND(.fnend ) | 208 | UNWIND(.fnend ) |
193 | ENDPROC(v7_coherent_kern_range) | 209 | ENDPROC(v7_coherent_kern_range) |
194 | ENDPROC(v7_coherent_user_range) | 210 | ENDPROC(v7_coherent_user_range) |
@@ -205,6 +221,8 @@ ENDPROC(v7_coherent_user_range) | |||
205 | ENTRY(v7_flush_kern_dcache_area) | 221 | ENTRY(v7_flush_kern_dcache_area) |
206 | dcache_line_size r2, r3 | 222 | dcache_line_size r2, r3 |
207 | add r1, r0, r1 | 223 | add r1, r0, r1 |
224 | sub r3, r2, #1 | ||
225 | bic r0, r0, r3 | ||
208 | 1: | 226 | 1: |
209 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line | 227 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line |
210 | add r0, r0, r2 | 228 | add r0, r0, r2 |
@@ -309,6 +327,7 @@ ENDPROC(v7_dma_unmap_area) | |||
309 | 327 | ||
310 | .type v7_cache_fns, #object | 328 | .type v7_cache_fns, #object |
311 | ENTRY(v7_cache_fns) | 329 | ENTRY(v7_cache_fns) |
330 | .long v7_flush_icache_all | ||
312 | .long v7_flush_kern_cache_all | 331 | .long v7_flush_kern_cache_all |
313 | .long v7_flush_user_cache_all | 332 | .long v7_flush_user_cache_all |
314 | .long v7_flush_user_cache_range | 333 | .long v7_flush_user_cache_range |
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index c3154928bccd..5a32020471e3 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c | |||
@@ -17,14 +17,10 @@ | |||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
18 | */ | 18 | */ |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/highmem.h> | ||
20 | #include <asm/system.h> | 21 | #include <asm/system.h> |
21 | #include <asm/cputype.h> | 22 | #include <asm/cputype.h> |
22 | #include <asm/cacheflush.h> | 23 | #include <asm/cacheflush.h> |
23 | #include <asm/kmap_types.h> | ||
24 | #include <asm/fixmap.h> | ||
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/tlbflush.h> | ||
27 | #include "mm.h" | ||
28 | 24 | ||
29 | #define CR_L2 (1 << 26) | 25 | #define CR_L2 (1 << 26) |
30 | 26 | ||
@@ -71,16 +67,15 @@ static inline void xsc3_l2_inv_all(void) | |||
71 | dsb(); | 67 | dsb(); |
72 | } | 68 | } |
73 | 69 | ||
70 | static inline void l2_unmap_va(unsigned long va) | ||
71 | { | ||
74 | #ifdef CONFIG_HIGHMEM | 72 | #ifdef CONFIG_HIGHMEM |
75 | #define l2_map_save_flags(x) raw_local_save_flags(x) | 73 | if (va != -1) |
76 | #define l2_map_restore_flags(x) raw_local_irq_restore(x) | 74 | kunmap_atomic((void *)va); |
77 | #else | ||
78 | #define l2_map_save_flags(x) ((x) = 0) | ||
79 | #define l2_map_restore_flags(x) ((void)(x)) | ||
80 | #endif | 75 | #endif |
76 | } | ||
81 | 77 | ||
82 | static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, | 78 | static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va) |
83 | unsigned long flags) | ||
84 | { | 79 | { |
85 | #ifdef CONFIG_HIGHMEM | 80 | #ifdef CONFIG_HIGHMEM |
86 | unsigned long va = prev_va & PAGE_MASK; | 81 | unsigned long va = prev_va & PAGE_MASK; |
@@ -89,17 +84,10 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, | |||
89 | /* | 84 | /* |
90 | * Switching to a new page. Because cache ops are | 85 | * Switching to a new page. Because cache ops are |
91 | * using virtual addresses only, we must put a mapping | 86 | * using virtual addresses only, we must put a mapping |
92 | * in place for it. We also enable interrupts for a | 87 | * in place for it. |
93 | * short while and disable them again to protect this | ||
94 | * mapping. | ||
95 | */ | 88 | */ |
96 | unsigned long idx; | 89 | l2_unmap_va(prev_va); |
97 | raw_local_irq_restore(flags); | 90 | va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT); |
98 | idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id(); | ||
99 | va = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
100 | raw_local_irq_restore(flags | PSR_I_BIT); | ||
101 | set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0); | ||
102 | local_flush_tlb_kernel_page(va); | ||
103 | } | 91 | } |
104 | return va + (pa_offset >> (32 - PAGE_SHIFT)); | 92 | return va + (pa_offset >> (32 - PAGE_SHIFT)); |
105 | #else | 93 | #else |
@@ -109,7 +97,7 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va, | |||
109 | 97 | ||
110 | static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | 98 | static void xsc3_l2_inv_range(unsigned long start, unsigned long end) |
111 | { | 99 | { |
112 | unsigned long vaddr, flags; | 100 | unsigned long vaddr; |
113 | 101 | ||
114 | if (start == 0 && end == -1ul) { | 102 | if (start == 0 && end == -1ul) { |
115 | xsc3_l2_inv_all(); | 103 | xsc3_l2_inv_all(); |
@@ -117,13 +105,12 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | |||
117 | } | 105 | } |
118 | 106 | ||
119 | vaddr = -1; /* to force the first mapping */ | 107 | vaddr = -1; /* to force the first mapping */ |
120 | l2_map_save_flags(flags); | ||
121 | 108 | ||
122 | /* | 109 | /* |
123 | * Clean and invalidate partial first cache line. | 110 | * Clean and invalidate partial first cache line. |
124 | */ | 111 | */ |
125 | if (start & (CACHE_LINE_SIZE - 1)) { | 112 | if (start & (CACHE_LINE_SIZE - 1)) { |
126 | vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags); | 113 | vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr); |
127 | xsc3_l2_clean_mva(vaddr); | 114 | xsc3_l2_clean_mva(vaddr); |
128 | xsc3_l2_inv_mva(vaddr); | 115 | xsc3_l2_inv_mva(vaddr); |
129 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; | 116 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; |
@@ -133,7 +120,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | |||
133 | * Invalidate all full cache lines between 'start' and 'end'. | 120 | * Invalidate all full cache lines between 'start' and 'end'. |
134 | */ | 121 | */ |
135 | while (start < (end & ~(CACHE_LINE_SIZE - 1))) { | 122 | while (start < (end & ~(CACHE_LINE_SIZE - 1))) { |
136 | vaddr = l2_map_va(start, vaddr, flags); | 123 | vaddr = l2_map_va(start, vaddr); |
137 | xsc3_l2_inv_mva(vaddr); | 124 | xsc3_l2_inv_mva(vaddr); |
138 | start += CACHE_LINE_SIZE; | 125 | start += CACHE_LINE_SIZE; |
139 | } | 126 | } |
@@ -142,31 +129,30 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end) | |||
142 | * Clean and invalidate partial last cache line. | 129 | * Clean and invalidate partial last cache line. |
143 | */ | 130 | */ |
144 | if (start < end) { | 131 | if (start < end) { |
145 | vaddr = l2_map_va(start, vaddr, flags); | 132 | vaddr = l2_map_va(start, vaddr); |
146 | xsc3_l2_clean_mva(vaddr); | 133 | xsc3_l2_clean_mva(vaddr); |
147 | xsc3_l2_inv_mva(vaddr); | 134 | xsc3_l2_inv_mva(vaddr); |
148 | } | 135 | } |
149 | 136 | ||
150 | l2_map_restore_flags(flags); | 137 | l2_unmap_va(vaddr); |
151 | 138 | ||
152 | dsb(); | 139 | dsb(); |
153 | } | 140 | } |
154 | 141 | ||
155 | static void xsc3_l2_clean_range(unsigned long start, unsigned long end) | 142 | static void xsc3_l2_clean_range(unsigned long start, unsigned long end) |
156 | { | 143 | { |
157 | unsigned long vaddr, flags; | 144 | unsigned long vaddr; |
158 | 145 | ||
159 | vaddr = -1; /* to force the first mapping */ | 146 | vaddr = -1; /* to force the first mapping */ |
160 | l2_map_save_flags(flags); | ||
161 | 147 | ||
162 | start &= ~(CACHE_LINE_SIZE - 1); | 148 | start &= ~(CACHE_LINE_SIZE - 1); |
163 | while (start < end) { | 149 | while (start < end) { |
164 | vaddr = l2_map_va(start, vaddr, flags); | 150 | vaddr = l2_map_va(start, vaddr); |
165 | xsc3_l2_clean_mva(vaddr); | 151 | xsc3_l2_clean_mva(vaddr); |
166 | start += CACHE_LINE_SIZE; | 152 | start += CACHE_LINE_SIZE; |
167 | } | 153 | } |
168 | 154 | ||
169 | l2_map_restore_flags(flags); | 155 | l2_unmap_va(vaddr); |
170 | 156 | ||
171 | dsb(); | 157 | dsb(); |
172 | } | 158 | } |
@@ -193,7 +179,7 @@ static inline void xsc3_l2_flush_all(void) | |||
193 | 179 | ||
194 | static void xsc3_l2_flush_range(unsigned long start, unsigned long end) | 180 | static void xsc3_l2_flush_range(unsigned long start, unsigned long end) |
195 | { | 181 | { |
196 | unsigned long vaddr, flags; | 182 | unsigned long vaddr; |
197 | 183 | ||
198 | if (start == 0 && end == -1ul) { | 184 | if (start == 0 && end == -1ul) { |
199 | xsc3_l2_flush_all(); | 185 | xsc3_l2_flush_all(); |
@@ -201,17 +187,16 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end) | |||
201 | } | 187 | } |
202 | 188 | ||
203 | vaddr = -1; /* to force the first mapping */ | 189 | vaddr = -1; /* to force the first mapping */ |
204 | l2_map_save_flags(flags); | ||
205 | 190 | ||
206 | start &= ~(CACHE_LINE_SIZE - 1); | 191 | start &= ~(CACHE_LINE_SIZE - 1); |
207 | while (start < end) { | 192 | while (start < end) { |
208 | vaddr = l2_map_va(start, vaddr, flags); | 193 | vaddr = l2_map_va(start, vaddr); |
209 | xsc3_l2_clean_mva(vaddr); | 194 | xsc3_l2_clean_mva(vaddr); |
210 | xsc3_l2_inv_mva(vaddr); | 195 | xsc3_l2_inv_mva(vaddr); |
211 | start += CACHE_LINE_SIZE; | 196 | start += CACHE_LINE_SIZE; |
212 | } | 197 | } |
213 | 198 | ||
214 | l2_map_restore_flags(flags); | 199 | l2_unmap_va(vaddr); |
215 | 200 | ||
216 | dsb(); | 201 | dsb(); |
217 | } | 202 | } |
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 598c51ad5071..b8061519ce77 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
@@ -73,7 +73,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, | |||
73 | { | 73 | { |
74 | void *kto = kmap_atomic(to, KM_USER1); | 74 | void *kto = kmap_atomic(to, KM_USER1); |
75 | 75 | ||
76 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) | 76 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
77 | __flush_dcache_page(page_mapping(from), from); | 77 | __flush_dcache_page(page_mapping(from), from); |
78 | 78 | ||
79 | spin_lock(&minicache_lock); | 79 | spin_lock(&minicache_lock); |
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index f55fa1044f72..bdba6c65c901 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -79,7 +79,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, | |||
79 | unsigned int offset = CACHE_COLOUR(vaddr); | 79 | unsigned int offset = CACHE_COLOUR(vaddr); |
80 | unsigned long kfrom, kto; | 80 | unsigned long kfrom, kto; |
81 | 81 | ||
82 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) | 82 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
83 | __flush_dcache_page(page_mapping(from), from); | 83 | __flush_dcache_page(page_mapping(from), from); |
84 | 84 | ||
85 | /* FIXME: not highmem safe */ | 85 | /* FIXME: not highmem safe */ |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 9920c0ae2096..649bbcd325bf 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
@@ -95,7 +95,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | |||
95 | { | 95 | { |
96 | void *kto = kmap_atomic(to, KM_USER1); | 96 | void *kto = kmap_atomic(to, KM_USER1); |
97 | 97 | ||
98 | if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) | 98 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
99 | __flush_dcache_page(page_mapping(from), from); | 99 | __flush_dcache_page(page_mapping(from), from); |
100 | 100 | ||
101 | spin_lock(&minicache_lock); | 101 | spin_lock(&minicache_lock); |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4bc43e535d3b..82a093cee09a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/device.h> | 18 | #include <linux/device.h> |
19 | #include <linux/dma-mapping.h> | 19 | #include <linux/dma-mapping.h> |
20 | #include <linux/highmem.h> | ||
20 | 21 | ||
21 | #include <asm/memory.h> | 22 | #include <asm/memory.h> |
22 | #include <asm/highmem.h> | 23 | #include <asm/highmem.h> |
@@ -148,6 +149,7 @@ static int __init consistent_init(void) | |||
148 | { | 149 | { |
149 | int ret = 0; | 150 | int ret = 0; |
150 | pgd_t *pgd; | 151 | pgd_t *pgd; |
152 | pud_t *pud; | ||
151 | pmd_t *pmd; | 153 | pmd_t *pmd; |
152 | pte_t *pte; | 154 | pte_t *pte; |
153 | int i = 0; | 155 | int i = 0; |
@@ -155,7 +157,15 @@ static int __init consistent_init(void) | |||
155 | 157 | ||
156 | do { | 158 | do { |
157 | pgd = pgd_offset(&init_mm, base); | 159 | pgd = pgd_offset(&init_mm, base); |
158 | pmd = pmd_alloc(&init_mm, pgd, base); | 160 | |
161 | pud = pud_alloc(&init_mm, pgd, base); | ||
162 | if (!pud) { | ||
163 | printk(KERN_ERR "%s: no pud tables\n", __func__); | ||
164 | ret = -ENOMEM; | ||
165 | break; | ||
166 | } | ||
167 | |||
168 | pmd = pmd_alloc(&init_mm, pud, base); | ||
159 | if (!pmd) { | 169 | if (!pmd) { |
160 | printk(KERN_ERR "%s: no pmd tables\n", __func__); | 170 | printk(KERN_ERR "%s: no pmd tables\n", __func__); |
161 | ret = -ENOMEM; | 171 | ret = -ENOMEM; |
@@ -198,7 +208,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) | |||
198 | * fragmentation of the DMA space, and also prevents allocations | 208 | * fragmentation of the DMA space, and also prevents allocations |
199 | * smaller than a section from crossing a section boundary. | 209 | * smaller than a section from crossing a section boundary. |
200 | */ | 210 | */ |
201 | bit = fls(size - 1) + 1; | 211 | bit = fls(size - 1); |
202 | if (bit > SECTION_SHIFT) | 212 | if (bit > SECTION_SHIFT) |
203 | bit = SECTION_SHIFT; | 213 | bit = SECTION_SHIFT; |
204 | align = 1 << bit; | 214 | align = 1 << bit; |
@@ -311,7 +321,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
311 | addr = page_address(page); | 321 | addr = page_address(page); |
312 | 322 | ||
313 | if (addr) | 323 | if (addr) |
314 | *handle = page_to_dma(dev, page); | 324 | *handle = pfn_to_dma(dev, page_to_pfn(page)); |
315 | 325 | ||
316 | return addr; | 326 | return addr; |
317 | } | 327 | } |
@@ -406,7 +416,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr | |||
406 | if (!arch_is_coherent()) | 416 | if (!arch_is_coherent()) |
407 | __dma_free_remap(cpu_addr, size); | 417 | __dma_free_remap(cpu_addr, size); |
408 | 418 | ||
409 | __dma_free_buffer(dma_to_page(dev, handle), size); | 419 | __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size); |
410 | } | 420 | } |
411 | EXPORT_SYMBOL(dma_free_coherent); | 421 | EXPORT_SYMBOL(dma_free_coherent); |
412 | 422 | ||
@@ -480,10 +490,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
480 | op(vaddr, len, dir); | 490 | op(vaddr, len, dir); |
481 | kunmap_high(page); | 491 | kunmap_high(page); |
482 | } else if (cache_is_vipt()) { | 492 | } else if (cache_is_vipt()) { |
483 | pte_t saved_pte; | 493 | /* unmapped pages might still be cached */ |
484 | vaddr = kmap_high_l1_vipt(page, &saved_pte); | 494 | vaddr = kmap_atomic(page); |
485 | op(vaddr + offset, len, dir); | 495 | op(vaddr + offset, len, dir); |
486 | kunmap_high_l1_vipt(page, saved_pte); | 496 | kunmap_atomic(vaddr); |
487 | } | 497 | } |
488 | } else { | 498 | } else { |
489 | vaddr = page_address(page) + offset; | 499 | vaddr = page_address(page) + offset; |
@@ -523,6 +533,12 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
523 | outer_inv_range(paddr, paddr + size); | 533 | outer_inv_range(paddr, paddr + size); |
524 | 534 | ||
525 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); | 535 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); |
536 | |||
537 | /* | ||
538 | * Mark the D-cache clean for this page to avoid extra flushing. | ||
539 | */ | ||
540 | if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) | ||
541 | set_bit(PG_dcache_clean, &page->flags); | ||
526 | } | 542 | } |
527 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); | 543 | EXPORT_SYMBOL(___dma_page_dev_to_cpu); |
528 | 544 | ||
@@ -548,17 +564,20 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
548 | struct scatterlist *s; | 564 | struct scatterlist *s; |
549 | int i, j; | 565 | int i, j; |
550 | 566 | ||
567 | BUG_ON(!valid_dma_direction(dir)); | ||
568 | |||
551 | for_each_sg(sg, s, nents, i) { | 569 | for_each_sg(sg, s, nents, i) { |
552 | s->dma_address = dma_map_page(dev, sg_page(s), s->offset, | 570 | s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, |
553 | s->length, dir); | 571 | s->length, dir); |
554 | if (dma_mapping_error(dev, s->dma_address)) | 572 | if (dma_mapping_error(dev, s->dma_address)) |
555 | goto bad_mapping; | 573 | goto bad_mapping; |
556 | } | 574 | } |
575 | debug_dma_map_sg(dev, sg, nents, nents, dir); | ||
557 | return nents; | 576 | return nents; |
558 | 577 | ||
559 | bad_mapping: | 578 | bad_mapping: |
560 | for_each_sg(sg, s, i, j) | 579 | for_each_sg(sg, s, i, j) |
561 | dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | 580 | __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); |
562 | return 0; | 581 | return 0; |
563 | } | 582 | } |
564 | EXPORT_SYMBOL(dma_map_sg); | 583 | EXPORT_SYMBOL(dma_map_sg); |
@@ -567,7 +586,7 @@ EXPORT_SYMBOL(dma_map_sg); | |||
567 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg | 586 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg |
568 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 587 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
569 | * @sg: list of buffers | 588 | * @sg: list of buffers |
570 | * @nents: number of buffers to unmap (returned from dma_map_sg) | 589 | * @nents: number of buffers to unmap (same as was passed to dma_map_sg) |
571 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) | 590 | * @dir: DMA transfer direction (same as was passed to dma_map_sg) |
572 | * | 591 | * |
573 | * Unmap a set of streaming mode DMA translations. Again, CPU access | 592 | * Unmap a set of streaming mode DMA translations. Again, CPU access |
@@ -579,8 +598,10 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |||
579 | struct scatterlist *s; | 598 | struct scatterlist *s; |
580 | int i; | 599 | int i; |
581 | 600 | ||
601 | debug_dma_unmap_sg(dev, sg, nents, dir); | ||
602 | |||
582 | for_each_sg(sg, s, nents, i) | 603 | for_each_sg(sg, s, nents, i) |
583 | dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); | 604 | __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); |
584 | } | 605 | } |
585 | EXPORT_SYMBOL(dma_unmap_sg); | 606 | EXPORT_SYMBOL(dma_unmap_sg); |
586 | 607 | ||
@@ -605,6 +626,8 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | |||
605 | __dma_page_dev_to_cpu(sg_page(s), s->offset, | 626 | __dma_page_dev_to_cpu(sg_page(s), s->offset, |
606 | s->length, dir); | 627 | s->length, dir); |
607 | } | 628 | } |
629 | |||
630 | debug_dma_sync_sg_for_cpu(dev, sg, nents, dir); | ||
608 | } | 631 | } |
609 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); | 632 | EXPORT_SYMBOL(dma_sync_sg_for_cpu); |
610 | 633 | ||
@@ -629,5 +652,16 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
629 | __dma_page_cpu_to_dev(sg_page(s), s->offset, | 652 | __dma_page_cpu_to_dev(sg_page(s), s->offset, |
630 | s->length, dir); | 653 | s->length, dir); |
631 | } | 654 | } |
655 | |||
656 | debug_dma_sync_sg_for_device(dev, sg, nents, dir); | ||
632 | } | 657 | } |
633 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 658 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
659 | |||
660 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | ||
661 | |||
662 | static int __init dma_debug_do_init(void) | ||
663 | { | ||
664 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | ||
665 | return 0; | ||
666 | } | ||
667 | fs_initcall(dma_debug_do_init); | ||
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 9b906dec1ca1..7cab79179421 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
@@ -26,8 +26,9 @@ | |||
26 | 26 | ||
27 | #include "mm.h" | 27 | #include "mm.h" |
28 | 28 | ||
29 | static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; | 29 | static pteval_t shared_pte_mask = L_PTE_MT_BUFFERABLE; |
30 | 30 | ||
31 | #if __LINUX_ARM_ARCH__ < 6 | ||
31 | /* | 32 | /* |
32 | * We take the easy way out of this problem - we make the | 33 | * We take the easy way out of this problem - we make the |
33 | * PTE uncacheable. However, we leave the write buffer on. | 34 | * PTE uncacheable. However, we leave the write buffer on. |
@@ -65,11 +66,36 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, | |||
65 | return ret; | 66 | return ret; |
66 | } | 67 | } |
67 | 68 | ||
69 | #if USE_SPLIT_PTLOCKS | ||
70 | /* | ||
71 | * If we are using split PTE locks, then we need to take the page | ||
72 | * lock here. Otherwise we are using shared mm->page_table_lock | ||
73 | * which is already locked, thus cannot take it. | ||
74 | */ | ||
75 | static inline void do_pte_lock(spinlock_t *ptl) | ||
76 | { | ||
77 | /* | ||
78 | * Use nested version here to indicate that we are already | ||
79 | * holding one similar spinlock. | ||
80 | */ | ||
81 | spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); | ||
82 | } | ||
83 | |||
84 | static inline void do_pte_unlock(spinlock_t *ptl) | ||
85 | { | ||
86 | spin_unlock(ptl); | ||
87 | } | ||
88 | #else /* !USE_SPLIT_PTLOCKS */ | ||
89 | static inline void do_pte_lock(spinlock_t *ptl) {} | ||
90 | static inline void do_pte_unlock(spinlock_t *ptl) {} | ||
91 | #endif /* USE_SPLIT_PTLOCKS */ | ||
92 | |||
68 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | 93 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address, |
69 | unsigned long pfn) | 94 | unsigned long pfn) |
70 | { | 95 | { |
71 | spinlock_t *ptl; | 96 | spinlock_t *ptl; |
72 | pgd_t *pgd; | 97 | pgd_t *pgd; |
98 | pud_t *pud; | ||
73 | pmd_t *pmd; | 99 | pmd_t *pmd; |
74 | pte_t *pte; | 100 | pte_t *pte; |
75 | int ret; | 101 | int ret; |
@@ -78,7 +104,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | |||
78 | if (pgd_none_or_clear_bad(pgd)) | 104 | if (pgd_none_or_clear_bad(pgd)) |
79 | return 0; | 105 | return 0; |
80 | 106 | ||
81 | pmd = pmd_offset(pgd, address); | 107 | pud = pud_offset(pgd, address); |
108 | if (pud_none_or_clear_bad(pud)) | ||
109 | return 0; | ||
110 | |||
111 | pmd = pmd_offset(pud, address); | ||
82 | if (pmd_none_or_clear_bad(pmd)) | 112 | if (pmd_none_or_clear_bad(pmd)) |
83 | return 0; | 113 | return 0; |
84 | 114 | ||
@@ -88,13 +118,13 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, | |||
88 | * open-code the spin-locking. | 118 | * open-code the spin-locking. |
89 | */ | 119 | */ |
90 | ptl = pte_lockptr(vma->vm_mm, pmd); | 120 | ptl = pte_lockptr(vma->vm_mm, pmd); |
91 | pte = pte_offset_map_nested(pmd, address); | 121 | pte = pte_offset_map(pmd, address); |
92 | spin_lock(ptl); | 122 | do_pte_lock(ptl); |
93 | 123 | ||
94 | ret = do_adjust_pte(vma, address, pfn, pte); | 124 | ret = do_adjust_pte(vma, address, pfn, pte); |
95 | 125 | ||
96 | spin_unlock(ptl); | 126 | do_pte_unlock(ptl); |
97 | pte_unmap_nested(pte); | 127 | pte_unmap(pte); |
98 | 128 | ||
99 | return ret; | 129 | return ret; |
100 | } | 130 | } |
@@ -141,7 +171,7 @@ make_coherent(struct address_space *mapping, struct vm_area_struct *vma, | |||
141 | * a page table, or changing an existing PTE. Basically, there are two | 171 | * a page table, or changing an existing PTE. Basically, there are two |
142 | * things that we need to take care of: | 172 | * things that we need to take care of: |
143 | * | 173 | * |
144 | * 1. If PG_dcache_dirty is set for the page, we need to ensure | 174 | * 1. If PG_dcache_clean is not set for the page, we need to ensure |
145 | * that any cache entries for the kernels virtual memory | 175 | * that any cache entries for the kernels virtual memory |
146 | * range are written back to the page. | 176 | * range are written back to the page. |
147 | * 2. If we have multiple shared mappings of the same space in | 177 | * 2. If we have multiple shared mappings of the same space in |
@@ -168,10 +198,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | |||
168 | return; | 198 | return; |
169 | 199 | ||
170 | mapping = page_mapping(page); | 200 | mapping = page_mapping(page); |
171 | #ifndef CONFIG_SMP | 201 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) |
172 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | ||
173 | __flush_dcache_page(mapping, page); | 202 | __flush_dcache_page(mapping, page); |
174 | #endif | ||
175 | if (mapping) { | 203 | if (mapping) { |
176 | if (cache_is_vivt()) | 204 | if (cache_is_vivt()) |
177 | make_coherent(mapping, vma, addr, ptep, pfn); | 205 | make_coherent(mapping, vma, addr, ptep, pfn); |
@@ -179,6 +207,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, | |||
179 | __flush_icache_all(); | 207 | __flush_icache_all(); |
180 | } | 208 | } |
181 | } | 209 | } |
210 | #endif /* __LINUX_ARM_ARCH__ < 6 */ | ||
182 | 211 | ||
183 | /* | 212 | /* |
184 | * Check whether the write buffer has physical address aliasing | 213 | * Check whether the write buffer has physical address aliasing |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 23b0b03af5ea..bc0e1d88fd3b 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -76,9 +76,11 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |||
76 | 76 | ||
77 | printk(KERN_ALERT "pgd = %p\n", mm->pgd); | 77 | printk(KERN_ALERT "pgd = %p\n", mm->pgd); |
78 | pgd = pgd_offset(mm, addr); | 78 | pgd = pgd_offset(mm, addr); |
79 | printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd)); | 79 | printk(KERN_ALERT "[%08lx] *pgd=%08llx", |
80 | addr, (long long)pgd_val(*pgd)); | ||
80 | 81 | ||
81 | do { | 82 | do { |
83 | pud_t *pud; | ||
82 | pmd_t *pmd; | 84 | pmd_t *pmd; |
83 | pte_t *pte; | 85 | pte_t *pte; |
84 | 86 | ||
@@ -90,9 +92,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |||
90 | break; | 92 | break; |
91 | } | 93 | } |
92 | 94 | ||
93 | pmd = pmd_offset(pgd, addr); | 95 | pud = pud_offset(pgd, addr); |
96 | if (PTRS_PER_PUD != 1) | ||
97 | printk(", *pud=%08lx", pud_val(*pud)); | ||
98 | |||
99 | if (pud_none(*pud)) | ||
100 | break; | ||
101 | |||
102 | if (pud_bad(*pud)) { | ||
103 | printk("(bad)"); | ||
104 | break; | ||
105 | } | ||
106 | |||
107 | pmd = pmd_offset(pud, addr); | ||
94 | if (PTRS_PER_PMD != 1) | 108 | if (PTRS_PER_PMD != 1) |
95 | printk(", *pmd=%08lx", pmd_val(*pmd)); | 109 | printk(", *pmd=%08llx", (long long)pmd_val(*pmd)); |
96 | 110 | ||
97 | if (pmd_none(*pmd)) | 111 | if (pmd_none(*pmd)) |
98 | break; | 112 | break; |
@@ -107,8 +121,9 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |||
107 | break; | 121 | break; |
108 | 122 | ||
109 | pte = pte_offset_map(pmd, addr); | 123 | pte = pte_offset_map(pmd, addr); |
110 | printk(", *pte=%08lx", pte_val(*pte)); | 124 | printk(", *pte=%08llx", (long long)pte_val(*pte)); |
111 | printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE])); | 125 | printk(", *ppte=%08llx", |
126 | (long long)pte_val(pte[PTE_HWTABLE_PTRS])); | ||
112 | pte_unmap(pte); | 127 | pte_unmap(pte); |
113 | } while(0); | 128 | } while(0); |
114 | 129 | ||
@@ -388,6 +403,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |||
388 | { | 403 | { |
389 | unsigned int index; | 404 | unsigned int index; |
390 | pgd_t *pgd, *pgd_k; | 405 | pgd_t *pgd, *pgd_k; |
406 | pud_t *pud, *pud_k; | ||
391 | pmd_t *pmd, *pmd_k; | 407 | pmd_t *pmd, *pmd_k; |
392 | 408 | ||
393 | if (addr < TASK_SIZE) | 409 | if (addr < TASK_SIZE) |
@@ -406,12 +422,19 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |||
406 | 422 | ||
407 | if (pgd_none(*pgd_k)) | 423 | if (pgd_none(*pgd_k)) |
408 | goto bad_area; | 424 | goto bad_area; |
409 | |||
410 | if (!pgd_present(*pgd)) | 425 | if (!pgd_present(*pgd)) |
411 | set_pgd(pgd, *pgd_k); | 426 | set_pgd(pgd, *pgd_k); |
412 | 427 | ||
413 | pmd_k = pmd_offset(pgd_k, addr); | 428 | pud = pud_offset(pgd, addr); |
414 | pmd = pmd_offset(pgd, addr); | 429 | pud_k = pud_offset(pgd_k, addr); |
430 | |||
431 | if (pud_none(*pud_k)) | ||
432 | goto bad_area; | ||
433 | if (!pud_present(*pud)) | ||
434 | set_pud(pud, *pud_k); | ||
435 | |||
436 | pmd = pmd_offset(pud, addr); | ||
437 | pmd_k = pmd_offset(pud_k, addr); | ||
415 | 438 | ||
416 | /* | 439 | /* |
417 | * On ARM one Linux PGD entry contains two hardware entries (see page | 440 | * On ARM one Linux PGD entry contains two hardware entries (see page |
@@ -581,6 +604,19 @@ static struct fsr_info ifsr_info[] = { | |||
581 | { do_bad, SIGBUS, 0, "unknown 31" }, | 604 | { do_bad, SIGBUS, 0, "unknown 31" }, |
582 | }; | 605 | }; |
583 | 606 | ||
607 | void __init | ||
608 | hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), | ||
609 | int sig, int code, const char *name) | ||
610 | { | ||
611 | if (nr < 0 || nr >= ARRAY_SIZE(ifsr_info)) | ||
612 | BUG(); | ||
613 | |||
614 | ifsr_info[nr].fn = fn; | ||
615 | ifsr_info[nr].sig = sig; | ||
616 | ifsr_info[nr].code = code; | ||
617 | ifsr_info[nr].name = name; | ||
618 | } | ||
619 | |||
584 | asmlinkage void __exception | 620 | asmlinkage void __exception |
585 | do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) | 621 | do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) |
586 | { | 622 | { |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index c6844cb9b508..1a8d4aa821be 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <linux/pagemap.h> | 12 | #include <linux/pagemap.h> |
13 | #include <linux/highmem.h> | ||
13 | 14 | ||
14 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
15 | #include <asm/cachetype.h> | 16 | #include <asm/cachetype.h> |
@@ -39,6 +40,18 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | |||
39 | : "cc"); | 40 | : "cc"); |
40 | } | 41 | } |
41 | 42 | ||
43 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) | ||
44 | { | ||
45 | unsigned long colour = CACHE_COLOUR(vaddr); | ||
46 | unsigned long offset = vaddr & (PAGE_SIZE - 1); | ||
47 | unsigned long to; | ||
48 | |||
49 | set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); | ||
50 | to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset; | ||
51 | flush_tlb_kernel_page(to); | ||
52 | flush_icache_range(to, to + len); | ||
53 | } | ||
54 | |||
42 | void flush_cache_mm(struct mm_struct *mm) | 55 | void flush_cache_mm(struct mm_struct *mm) |
43 | { | 56 | { |
44 | if (cache_is_vivt()) { | 57 | if (cache_is_vivt()) { |
@@ -89,16 +102,16 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsig | |||
89 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) | 102 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) |
90 | __flush_icache_all(); | 103 | __flush_icache_all(); |
91 | } | 104 | } |
105 | |||
92 | #else | 106 | #else |
93 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) | 107 | #define flush_pfn_alias(pfn,vaddr) do { } while (0) |
108 | #define flush_icache_alias(pfn,vaddr,len) do { } while (0) | ||
94 | #endif | 109 | #endif |
95 | 110 | ||
96 | #ifdef CONFIG_SMP | ||
97 | static void flush_ptrace_access_other(void *args) | 111 | static void flush_ptrace_access_other(void *args) |
98 | { | 112 | { |
99 | __flush_icache_all(); | 113 | __flush_icache_all(); |
100 | } | 114 | } |
101 | #endif | ||
102 | 115 | ||
103 | static | 116 | static |
104 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | 117 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
@@ -118,15 +131,16 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
118 | return; | 131 | return; |
119 | } | 132 | } |
120 | 133 | ||
121 | /* VIPT non-aliasing cache */ | 134 | /* VIPT non-aliasing D-cache */ |
122 | if (vma->vm_flags & VM_EXEC) { | 135 | if (vma->vm_flags & VM_EXEC) { |
123 | unsigned long addr = (unsigned long)kaddr; | 136 | unsigned long addr = (unsigned long)kaddr; |
124 | __cpuc_coherent_kern_range(addr, addr + len); | 137 | if (icache_is_vipt_aliasing()) |
125 | #ifdef CONFIG_SMP | 138 | flush_icache_alias(page_to_pfn(page), uaddr, len); |
139 | else | ||
140 | __cpuc_coherent_kern_range(addr, addr + len); | ||
126 | if (cache_ops_need_broadcast()) | 141 | if (cache_ops_need_broadcast()) |
127 | smp_call_function(flush_ptrace_access_other, | 142 | smp_call_function(flush_ptrace_access_other, |
128 | NULL, 1); | 143 | NULL, 1); |
129 | #endif | ||
130 | } | 144 | } |
131 | } | 145 | } |
132 | 146 | ||
@@ -166,10 +180,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
166 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 180 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
167 | kunmap_high(page); | 181 | kunmap_high(page); |
168 | } else if (cache_is_vipt()) { | 182 | } else if (cache_is_vipt()) { |
169 | pte_t saved_pte; | 183 | /* unmapped pages might still be cached */ |
170 | addr = kmap_high_l1_vipt(page, &saved_pte); | 184 | addr = kmap_atomic(page); |
171 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 185 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
172 | kunmap_high_l1_vipt(page, saved_pte); | 186 | kunmap_atomic(addr); |
173 | } | 187 | } |
174 | } | 188 | } |
175 | 189 | ||
@@ -215,6 +229,36 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p | |||
215 | flush_dcache_mmap_unlock(mapping); | 229 | flush_dcache_mmap_unlock(mapping); |
216 | } | 230 | } |
217 | 231 | ||
232 | #if __LINUX_ARM_ARCH__ >= 6 | ||
233 | void __sync_icache_dcache(pte_t pteval) | ||
234 | { | ||
235 | unsigned long pfn; | ||
236 | struct page *page; | ||
237 | struct address_space *mapping; | ||
238 | |||
239 | if (!pte_present_user(pteval)) | ||
240 | return; | ||
241 | if (cache_is_vipt_nonaliasing() && !pte_exec(pteval)) | ||
242 | /* only flush non-aliasing VIPT caches for exec mappings */ | ||
243 | return; | ||
244 | pfn = pte_pfn(pteval); | ||
245 | if (!pfn_valid(pfn)) | ||
246 | return; | ||
247 | |||
248 | page = pfn_to_page(pfn); | ||
249 | if (cache_is_vipt_aliasing()) | ||
250 | mapping = page_mapping(page); | ||
251 | else | ||
252 | mapping = NULL; | ||
253 | |||
254 | if (!test_and_set_bit(PG_dcache_clean, &page->flags)) | ||
255 | __flush_dcache_page(mapping, page); | ||
256 | |||
257 | if (pte_exec(pteval)) | ||
258 | __flush_icache_all(); | ||
259 | } | ||
260 | #endif | ||
261 | |||
218 | /* | 262 | /* |
219 | * Ensure cache coherency between kernel mapping and userspace mapping | 263 | * Ensure cache coherency between kernel mapping and userspace mapping |
220 | * of this page. | 264 | * of this page. |
@@ -231,7 +275,8 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p | |||
231 | * kernel cache lines for later. Otherwise, we assume we have | 275 | * kernel cache lines for later. Otherwise, we assume we have |
232 | * aliasing mappings. | 276 | * aliasing mappings. |
233 | * | 277 | * |
234 | * Note that we disable the lazy flush for SMP. | 278 | * Note that we disable the lazy flush for SMP configurations where |
279 | * the cache maintenance operations are not automatically broadcasted. | ||
235 | */ | 280 | */ |
236 | void flush_dcache_page(struct page *page) | 281 | void flush_dcache_page(struct page *page) |
237 | { | 282 | { |
@@ -246,17 +291,16 @@ void flush_dcache_page(struct page *page) | |||
246 | 291 | ||
247 | mapping = page_mapping(page); | 292 | mapping = page_mapping(page); |
248 | 293 | ||
249 | #ifndef CONFIG_SMP | 294 | if (!cache_ops_need_broadcast() && |
250 | if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) | 295 | mapping && !mapping_mapped(mapping)) |
251 | set_bit(PG_dcache_dirty, &page->flags); | 296 | clear_bit(PG_dcache_clean, &page->flags); |
252 | else | 297 | else { |
253 | #endif | ||
254 | { | ||
255 | __flush_dcache_page(mapping, page); | 298 | __flush_dcache_page(mapping, page); |
256 | if (mapping && cache_is_vivt()) | 299 | if (mapping && cache_is_vivt()) |
257 | __flush_dcache_aliases(mapping, page); | 300 | __flush_dcache_aliases(mapping, page); |
258 | else if (mapping) | 301 | else if (mapping) |
259 | __flush_icache_all(); | 302 | __flush_icache_all(); |
303 | set_bit(PG_dcache_clean, &page->flags); | ||
260 | } | 304 | } |
261 | } | 305 | } |
262 | EXPORT_SYMBOL(flush_dcache_page); | 306 | EXPORT_SYMBOL(flush_dcache_page); |
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 1fbdb55bfd1b..807c0573abbe 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -36,18 +36,17 @@ void kunmap(struct page *page) | |||
36 | } | 36 | } |
37 | EXPORT_SYMBOL(kunmap); | 37 | EXPORT_SYMBOL(kunmap); |
38 | 38 | ||
39 | void *kmap_atomic(struct page *page, enum km_type type) | 39 | void *__kmap_atomic(struct page *page) |
40 | { | 40 | { |
41 | unsigned int idx; | 41 | unsigned int idx; |
42 | unsigned long vaddr; | 42 | unsigned long vaddr; |
43 | void *kmap; | 43 | void *kmap; |
44 | int type; | ||
44 | 45 | ||
45 | pagefault_disable(); | 46 | pagefault_disable(); |
46 | if (!PageHighMem(page)) | 47 | if (!PageHighMem(page)) |
47 | return page_address(page); | 48 | return page_address(page); |
48 | 49 | ||
49 | debug_kmap_atomic(type); | ||
50 | |||
51 | #ifdef CONFIG_DEBUG_HIGHMEM | 50 | #ifdef CONFIG_DEBUG_HIGHMEM |
52 | /* | 51 | /* |
53 | * There is no cache coherency issue when non VIVT, so force the | 52 | * There is no cache coherency issue when non VIVT, so force the |
@@ -61,6 +60,8 @@ void *kmap_atomic(struct page *page, enum km_type type) | |||
61 | if (kmap) | 60 | if (kmap) |
62 | return kmap; | 61 | return kmap; |
63 | 62 | ||
63 | type = kmap_atomic_idx_push(); | ||
64 | |||
64 | idx = type + KM_TYPE_NR * smp_processor_id(); | 65 | idx = type + KM_TYPE_NR * smp_processor_id(); |
65 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 66 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
66 | #ifdef CONFIG_DEBUG_HIGHMEM | 67 | #ifdef CONFIG_DEBUG_HIGHMEM |
@@ -80,14 +81,17 @@ void *kmap_atomic(struct page *page, enum km_type type) | |||
80 | 81 | ||
81 | return (void *)vaddr; | 82 | return (void *)vaddr; |
82 | } | 83 | } |
83 | EXPORT_SYMBOL(kmap_atomic); | 84 | EXPORT_SYMBOL(__kmap_atomic); |
84 | 85 | ||
85 | void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | 86 | void __kunmap_atomic(void *kvaddr) |
86 | { | 87 | { |
87 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 88 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
88 | unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); | 89 | int idx, type; |
89 | 90 | ||
90 | if (kvaddr >= (void *)FIXADDR_START) { | 91 | if (kvaddr >= (void *)FIXADDR_START) { |
92 | type = kmap_atomic_idx(); | ||
93 | idx = type + KM_TYPE_NR * smp_processor_id(); | ||
94 | |||
91 | if (cache_is_vivt()) | 95 | if (cache_is_vivt()) |
92 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | 96 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); |
93 | #ifdef CONFIG_DEBUG_HIGHMEM | 97 | #ifdef CONFIG_DEBUG_HIGHMEM |
@@ -97,21 +101,23 @@ void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | |||
97 | #else | 101 | #else |
98 | (void) idx; /* to kill a warning */ | 102 | (void) idx; /* to kill a warning */ |
99 | #endif | 103 | #endif |
104 | kmap_atomic_idx_pop(); | ||
100 | } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { | 105 | } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { |
101 | /* this address was obtained through kmap_high_get() */ | 106 | /* this address was obtained through kmap_high_get() */ |
102 | kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); | 107 | kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); |
103 | } | 108 | } |
104 | pagefault_enable(); | 109 | pagefault_enable(); |
105 | } | 110 | } |
106 | EXPORT_SYMBOL(kunmap_atomic_notypecheck); | 111 | EXPORT_SYMBOL(__kunmap_atomic); |
107 | 112 | ||
108 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) | 113 | void *kmap_atomic_pfn(unsigned long pfn) |
109 | { | 114 | { |
110 | unsigned int idx; | ||
111 | unsigned long vaddr; | 115 | unsigned long vaddr; |
116 | int idx, type; | ||
112 | 117 | ||
113 | pagefault_disable(); | 118 | pagefault_disable(); |
114 | 119 | ||
120 | type = kmap_atomic_idx_push(); | ||
115 | idx = type + KM_TYPE_NR * smp_processor_id(); | 121 | idx = type + KM_TYPE_NR * smp_processor_id(); |
116 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 122 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
117 | #ifdef CONFIG_DEBUG_HIGHMEM | 123 | #ifdef CONFIG_DEBUG_HIGHMEM |
@@ -134,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr) | |||
134 | pte = TOP_PTE(vaddr); | 140 | pte = TOP_PTE(vaddr); |
135 | return pte_page(*pte); | 141 | return pte_page(*pte); |
136 | } | 142 | } |
137 | |||
138 | #ifdef CONFIG_CPU_CACHE_VIPT | ||
139 | |||
140 | #include <linux/percpu.h> | ||
141 | |||
142 | /* | ||
143 | * The VIVT cache of a highmem page is always flushed before the page | ||
144 | * is unmapped. Hence unmapped highmem pages need no cache maintenance | ||
145 | * in that case. | ||
146 | * | ||
147 | * However unmapped pages may still be cached with a VIPT cache, and | ||
148 | * it is not possible to perform cache maintenance on them using physical | ||
149 | * addresses unfortunately. So we have no choice but to set up a temporary | ||
150 | * virtual mapping for that purpose. | ||
151 | * | ||
152 | * Yet this VIPT cache maintenance may be triggered from DMA support | ||
153 | * functions which are possibly called from interrupt context. As we don't | ||
154 | * want to keep interrupt disabled all the time when such maintenance is | ||
155 | * taking place, we therefore allow for some reentrancy by preserving and | ||
156 | * restoring the previous fixmap entry before the interrupted context is | ||
157 | * resumed. If the reentrancy depth is 0 then there is no need to restore | ||
158 | * the previous fixmap, and leaving the current one in place allow it to | ||
159 | * be reused the next time without a TLB flush (common with DMA). | ||
160 | */ | ||
161 | |||
162 | static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); | ||
163 | |||
164 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) | ||
165 | { | ||
166 | unsigned int idx, cpu; | ||
167 | int *depth; | ||
168 | unsigned long vaddr, flags; | ||
169 | pte_t pte, *ptep; | ||
170 | |||
171 | if (!in_interrupt()) | ||
172 | preempt_disable(); | ||
173 | |||
174 | cpu = smp_processor_id(); | ||
175 | depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
176 | |||
177 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
178 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
179 | ptep = TOP_PTE(vaddr); | ||
180 | pte = mk_pte(page, kmap_prot); | ||
181 | |||
182 | raw_local_irq_save(flags); | ||
183 | (*depth)++; | ||
184 | if (pte_val(*ptep) == pte_val(pte)) { | ||
185 | *saved_pte = pte; | ||
186 | } else { | ||
187 | *saved_pte = *ptep; | ||
188 | set_pte_ext(ptep, pte, 0); | ||
189 | local_flush_tlb_kernel_page(vaddr); | ||
190 | } | ||
191 | raw_local_irq_restore(flags); | ||
192 | |||
193 | return (void *)vaddr; | ||
194 | } | ||
195 | |||
196 | void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) | ||
197 | { | ||
198 | unsigned int idx, cpu = smp_processor_id(); | ||
199 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
200 | unsigned long vaddr, flags; | ||
201 | pte_t pte, *ptep; | ||
202 | |||
203 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | ||
204 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | ||
205 | ptep = TOP_PTE(vaddr); | ||
206 | pte = mk_pte(page, kmap_prot); | ||
207 | |||
208 | BUG_ON(pte_val(*ptep) != pte_val(pte)); | ||
209 | BUG_ON(*depth <= 0); | ||
210 | |||
211 | raw_local_irq_save(flags); | ||
212 | (*depth)--; | ||
213 | if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { | ||
214 | set_pte_ext(ptep, saved_pte, 0); | ||
215 | local_flush_tlb_kernel_page(vaddr); | ||
216 | } | ||
217 | raw_local_irq_restore(flags); | ||
218 | |||
219 | if (!in_interrupt()) | ||
220 | preempt_enable(); | ||
221 | } | ||
222 | |||
223 | #endif /* CONFIG_CPU_CACHE_VIPT */ | ||
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c new file mode 100644 index 000000000000..2be9139a4ef3 --- /dev/null +++ b/arch/arm/mm/idmap.c | |||
@@ -0,0 +1,90 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | |||
3 | #include <asm/cputype.h> | ||
4 | #include <asm/pgalloc.h> | ||
5 | #include <asm/pgtable.h> | ||
6 | |||
7 | static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, | ||
8 | unsigned long prot) | ||
9 | { | ||
10 | pmd_t *pmd = pmd_offset(pud, addr); | ||
11 | |||
12 | addr = (addr & PMD_MASK) | prot; | ||
13 | pmd[0] = __pmd(addr); | ||
14 | addr += SECTION_SIZE; | ||
15 | pmd[1] = __pmd(addr); | ||
16 | flush_pmd_entry(pmd); | ||
17 | } | ||
18 | |||
19 | static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, | ||
20 | unsigned long prot) | ||
21 | { | ||
22 | pud_t *pud = pud_offset(pgd, addr); | ||
23 | unsigned long next; | ||
24 | |||
25 | do { | ||
26 | next = pud_addr_end(addr, end); | ||
27 | idmap_add_pmd(pud, addr, next, prot); | ||
28 | } while (pud++, addr = next, addr != end); | ||
29 | } | ||
30 | |||
31 | void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) | ||
32 | { | ||
33 | unsigned long prot, next; | ||
34 | |||
35 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; | ||
36 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | ||
37 | prot |= PMD_BIT4; | ||
38 | |||
39 | pgd += pgd_index(addr); | ||
40 | do { | ||
41 | next = pgd_addr_end(addr, end); | ||
42 | idmap_add_pud(pgd, addr, next, prot); | ||
43 | } while (pgd++, addr = next, addr != end); | ||
44 | } | ||
45 | |||
46 | #ifdef CONFIG_SMP | ||
47 | static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end) | ||
48 | { | ||
49 | pmd_t *pmd = pmd_offset(pud, addr); | ||
50 | pmd_clear(pmd); | ||
51 | } | ||
52 | |||
53 | static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end) | ||
54 | { | ||
55 | pud_t *pud = pud_offset(pgd, addr); | ||
56 | unsigned long next; | ||
57 | |||
58 | do { | ||
59 | next = pud_addr_end(addr, end); | ||
60 | idmap_del_pmd(pud, addr, next); | ||
61 | } while (pud++, addr = next, addr != end); | ||
62 | } | ||
63 | |||
64 | void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) | ||
65 | { | ||
66 | unsigned long next; | ||
67 | |||
68 | pgd += pgd_index(addr); | ||
69 | do { | ||
70 | next = pgd_addr_end(addr, end); | ||
71 | idmap_del_pud(pgd, addr, next); | ||
72 | } while (pgd++, addr = next, addr != end); | ||
73 | } | ||
74 | #endif | ||
75 | |||
76 | /* | ||
77 | * In order to soft-boot, we need to insert a 1:1 mapping in place of | ||
78 | * the user-mode pages. This will then ensure that we have predictable | ||
79 | * results when turning the mmu off | ||
80 | */ | ||
81 | void setup_mm_for_reboot(char mode) | ||
82 | { | ||
83 | /* | ||
84 | * We need to access to user-mode page tables here. For kernel threads | ||
85 | * we don't have any user-mode mappings so we use the context that we | ||
86 | * "borrowed". | ||
87 | */ | ||
88 | identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE); | ||
89 | local_flush_tlb_all(); | ||
90 | } | ||
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7185b00650fe..c19571c40a21 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -15,11 +15,14 @@ | |||
15 | #include <linux/mman.h> | 15 | #include <linux/mman.h> |
16 | #include <linux/nodemask.h> | 16 | #include <linux/nodemask.h> |
17 | #include <linux/initrd.h> | 17 | #include <linux/initrd.h> |
18 | #include <linux/of_fdt.h> | ||
18 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
19 | #include <linux/gfp.h> | 20 | #include <linux/gfp.h> |
20 | #include <linux/memblock.h> | 21 | #include <linux/memblock.h> |
22 | #include <linux/sort.h> | ||
21 | 23 | ||
22 | #include <asm/mach-types.h> | 24 | #include <asm/mach-types.h> |
25 | #include <asm/prom.h> | ||
23 | #include <asm/sections.h> | 26 | #include <asm/sections.h> |
24 | #include <asm/setup.h> | 27 | #include <asm/setup.h> |
25 | #include <asm/sizes.h> | 28 | #include <asm/sizes.h> |
@@ -70,6 +73,14 @@ static int __init parse_tag_initrd2(const struct tag *tag) | |||
70 | 73 | ||
71 | __tagtable(ATAG_INITRD2, parse_tag_initrd2); | 74 | __tagtable(ATAG_INITRD2, parse_tag_initrd2); |
72 | 75 | ||
76 | #ifdef CONFIG_OF_FLATTREE | ||
77 | void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end) | ||
78 | { | ||
79 | phys_initrd_start = start; | ||
80 | phys_initrd_size = end - start; | ||
81 | } | ||
82 | #endif /* CONFIG_OF_FLATTREE */ | ||
83 | |||
73 | /* | 84 | /* |
74 | * This keeps memory configuration data used by a couple memory | 85 | * This keeps memory configuration data used by a couple memory |
75 | * initialization functions, as well as show_mem() for the skipping | 86 | * initialization functions, as well as show_mem() for the skipping |
@@ -77,14 +88,14 @@ __tagtable(ATAG_INITRD2, parse_tag_initrd2); | |||
77 | */ | 88 | */ |
78 | struct meminfo meminfo; | 89 | struct meminfo meminfo; |
79 | 90 | ||
80 | void show_mem(void) | 91 | void show_mem(unsigned int filter) |
81 | { | 92 | { |
82 | int free = 0, total = 0, reserved = 0; | 93 | int free = 0, total = 0, reserved = 0; |
83 | int shared = 0, cached = 0, slab = 0, i; | 94 | int shared = 0, cached = 0, slab = 0, i; |
84 | struct meminfo * mi = &meminfo; | 95 | struct meminfo * mi = &meminfo; |
85 | 96 | ||
86 | printk("Mem-info:\n"); | 97 | printk("Mem-info:\n"); |
87 | show_free_areas(); | 98 | show_free_areas(filter); |
88 | 99 | ||
89 | for_each_bank (i, mi) { | 100 | for_each_bank (i, mi) { |
90 | struct membank *bank = &mi->bank[i]; | 101 | struct membank *bank = &mi->bank[i]; |
@@ -121,9 +132,10 @@ void show_mem(void) | |||
121 | printk("%d pages swap cached\n", cached); | 132 | printk("%d pages swap cached\n", cached); |
122 | } | 133 | } |
123 | 134 | ||
124 | static void __init find_limits(struct meminfo *mi, | 135 | static void __init find_limits(unsigned long *min, unsigned long *max_low, |
125 | unsigned long *min, unsigned long *max_low, unsigned long *max_high) | 136 | unsigned long *max_high) |
126 | { | 137 | { |
138 | struct meminfo *mi = &meminfo; | ||
127 | int i; | 139 | int i; |
128 | 140 | ||
129 | *min = -1UL; | 141 | *min = -1UL; |
@@ -147,13 +159,13 @@ static void __init find_limits(struct meminfo *mi, | |||
147 | } | 159 | } |
148 | } | 160 | } |
149 | 161 | ||
150 | static void __init arm_bootmem_init(struct meminfo *mi, | 162 | static void __init arm_bootmem_init(unsigned long start_pfn, |
151 | unsigned long start_pfn, unsigned long end_pfn) | 163 | unsigned long end_pfn) |
152 | { | 164 | { |
165 | struct memblock_region *reg; | ||
153 | unsigned int boot_pages; | 166 | unsigned int boot_pages; |
154 | phys_addr_t bitmap; | 167 | phys_addr_t bitmap; |
155 | pg_data_t *pgdat; | 168 | pg_data_t *pgdat; |
156 | int i; | ||
157 | 169 | ||
158 | /* | 170 | /* |
159 | * Allocate the bootmem bitmap page. This must be in a region | 171 | * Allocate the bootmem bitmap page. This must be in a region |
@@ -171,30 +183,53 @@ static void __init arm_bootmem_init(struct meminfo *mi, | |||
171 | pgdat = NODE_DATA(0); | 183 | pgdat = NODE_DATA(0); |
172 | init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); | 184 | init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn); |
173 | 185 | ||
174 | for_each_bank(i, mi) { | 186 | /* Free the lowmem regions from memblock into bootmem. */ |
175 | struct membank *bank = &mi->bank[i]; | 187 | for_each_memblock(memory, reg) { |
176 | if (!bank->highmem) | 188 | unsigned long start = memblock_region_memory_base_pfn(reg); |
177 | free_bootmem(bank_phys_start(bank), bank_phys_size(bank)); | 189 | unsigned long end = memblock_region_memory_end_pfn(reg); |
190 | |||
191 | if (end >= end_pfn) | ||
192 | end = end_pfn; | ||
193 | if (start >= end) | ||
194 | break; | ||
195 | |||
196 | free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT); | ||
178 | } | 197 | } |
179 | 198 | ||
180 | /* | 199 | /* Reserve the lowmem memblock reserved regions in bootmem. */ |
181 | * Reserve the memblock reserved regions in bootmem. | 200 | for_each_memblock(reserved, reg) { |
182 | */ | 201 | unsigned long start = memblock_region_reserved_base_pfn(reg); |
183 | for (i = 0; i < memblock.reserved.cnt; i++) { | 202 | unsigned long end = memblock_region_reserved_end_pfn(reg); |
184 | phys_addr_t start = memblock_start_pfn(&memblock.reserved, i); | 203 | |
185 | if (start >= start_pfn && | 204 | if (end >= end_pfn) |
186 | memblock_end_pfn(&memblock.reserved, i) <= end_pfn) | 205 | end = end_pfn; |
187 | reserve_bootmem_node(pgdat, __pfn_to_phys(start), | 206 | if (start >= end) |
188 | memblock_size_bytes(&memblock.reserved, i), | 207 | break; |
189 | BOOTMEM_DEFAULT); | 208 | |
209 | reserve_bootmem(__pfn_to_phys(start), | ||
210 | (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT); | ||
190 | } | 211 | } |
191 | } | 212 | } |
192 | 213 | ||
193 | static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, | 214 | #ifdef CONFIG_ZONE_DMA |
194 | unsigned long max_low, unsigned long max_high) | 215 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, |
216 | unsigned long dma_size) | ||
217 | { | ||
218 | if (size[0] <= dma_size) | ||
219 | return; | ||
220 | |||
221 | size[ZONE_NORMAL] = size[0] - dma_size; | ||
222 | size[ZONE_DMA] = dma_size; | ||
223 | hole[ZONE_NORMAL] = hole[0]; | ||
224 | hole[ZONE_DMA] = 0; | ||
225 | } | ||
226 | #endif | ||
227 | |||
228 | static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, | ||
229 | unsigned long max_high) | ||
195 | { | 230 | { |
196 | unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; | 231 | unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; |
197 | int i; | 232 | struct memblock_region *reg; |
198 | 233 | ||
199 | /* | 234 | /* |
200 | * initialise the zones. | 235 | * initialise the zones. |
@@ -216,72 +251,97 @@ static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min, | |||
216 | * holes = node_size - sum(bank_sizes) | 251 | * holes = node_size - sum(bank_sizes) |
217 | */ | 252 | */ |
218 | memcpy(zhole_size, zone_size, sizeof(zhole_size)); | 253 | memcpy(zhole_size, zone_size, sizeof(zhole_size)); |
219 | for_each_bank(i, mi) { | 254 | for_each_memblock(memory, reg) { |
220 | int idx = 0; | 255 | unsigned long start = memblock_region_memory_base_pfn(reg); |
256 | unsigned long end = memblock_region_memory_end_pfn(reg); | ||
257 | |||
258 | if (start < max_low) { | ||
259 | unsigned long low_end = min(end, max_low); | ||
260 | zhole_size[0] -= low_end - start; | ||
261 | } | ||
221 | #ifdef CONFIG_HIGHMEM | 262 | #ifdef CONFIG_HIGHMEM |
222 | if (mi->bank[i].highmem) | 263 | if (end > max_low) { |
223 | idx = ZONE_HIGHMEM; | 264 | unsigned long high_start = max(start, max_low); |
265 | zhole_size[ZONE_HIGHMEM] -= end - high_start; | ||
266 | } | ||
224 | #endif | 267 | #endif |
225 | zhole_size[idx] -= bank_pfn_size(&mi->bank[i]); | ||
226 | } | 268 | } |
227 | 269 | ||
270 | #ifdef ARM_DMA_ZONE_SIZE | ||
271 | #ifndef CONFIG_ZONE_DMA | ||
272 | #error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations | ||
273 | #endif | ||
274 | |||
228 | /* | 275 | /* |
229 | * Adjust the sizes according to any special requirements for | 276 | * Adjust the sizes according to any special requirements for |
230 | * this machine type. | 277 | * this machine type. |
231 | */ | 278 | */ |
232 | arch_adjust_zones(zone_size, zhole_size); | 279 | arm_adjust_dma_zone(zone_size, zhole_size, |
280 | ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); | ||
281 | #endif | ||
233 | 282 | ||
234 | free_area_init_node(0, zone_size, min, zhole_size); | 283 | free_area_init_node(0, zone_size, min, zhole_size); |
235 | } | 284 | } |
236 | 285 | ||
237 | #ifndef CONFIG_SPARSEMEM | 286 | #ifdef CONFIG_HAVE_ARCH_PFN_VALID |
238 | int pfn_valid(unsigned long pfn) | 287 | int pfn_valid(unsigned long pfn) |
239 | { | 288 | { |
240 | struct memblock_region *mem = &memblock.memory; | 289 | return memblock_is_memory(pfn << PAGE_SHIFT); |
241 | unsigned int left = 0, right = mem->cnt; | ||
242 | |||
243 | do { | ||
244 | unsigned int mid = (right + left) / 2; | ||
245 | |||
246 | if (pfn < memblock_start_pfn(mem, mid)) | ||
247 | right = mid; | ||
248 | else if (pfn >= memblock_end_pfn(mem, mid)) | ||
249 | left = mid + 1; | ||
250 | else | ||
251 | return 1; | ||
252 | } while (left < right); | ||
253 | return 0; | ||
254 | } | 290 | } |
255 | EXPORT_SYMBOL(pfn_valid); | 291 | EXPORT_SYMBOL(pfn_valid); |
292 | #endif | ||
256 | 293 | ||
294 | #ifndef CONFIG_SPARSEMEM | ||
257 | static void arm_memory_present(void) | 295 | static void arm_memory_present(void) |
258 | { | 296 | { |
259 | } | 297 | } |
260 | #else | 298 | #else |
261 | static void arm_memory_present(void) | 299 | static void arm_memory_present(void) |
262 | { | 300 | { |
263 | int i; | 301 | struct memblock_region *reg; |
264 | for (i = 0; i < memblock.memory.cnt; i++) | 302 | |
265 | memory_present(0, memblock_start_pfn(&memblock.memory, i), | 303 | for_each_memblock(memory, reg) |
266 | memblock_end_pfn(&memblock.memory, i)); | 304 | memory_present(0, memblock_region_memory_base_pfn(reg), |
305 | memblock_region_memory_end_pfn(reg)); | ||
267 | } | 306 | } |
268 | #endif | 307 | #endif |
269 | 308 | ||
309 | static int __init meminfo_cmp(const void *_a, const void *_b) | ||
310 | { | ||
311 | const struct membank *a = _a, *b = _b; | ||
312 | long cmp = bank_pfn_start(a) - bank_pfn_start(b); | ||
313 | return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; | ||
314 | } | ||
315 | |||
270 | void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) | 316 | void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) |
271 | { | 317 | { |
272 | int i; | 318 | int i; |
273 | 319 | ||
320 | sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); | ||
321 | |||
274 | memblock_init(); | 322 | memblock_init(); |
275 | for (i = 0; i < mi->nr_banks; i++) | 323 | for (i = 0; i < mi->nr_banks; i++) |
276 | memblock_add(mi->bank[i].start, mi->bank[i].size); | 324 | memblock_add(mi->bank[i].start, mi->bank[i].size); |
277 | 325 | ||
278 | /* Register the kernel text, kernel data and initrd with memblock. */ | 326 | /* Register the kernel text, kernel data and initrd with memblock. */ |
279 | #ifdef CONFIG_XIP_KERNEL | 327 | #ifdef CONFIG_XIP_KERNEL |
280 | memblock_reserve(__pa(_data), _end - _data); | 328 | memblock_reserve(__pa(_sdata), _end - _sdata); |
281 | #else | 329 | #else |
282 | memblock_reserve(__pa(_stext), _end - _stext); | 330 | memblock_reserve(__pa(_stext), _end - _stext); |
283 | #endif | 331 | #endif |
284 | #ifdef CONFIG_BLK_DEV_INITRD | 332 | #ifdef CONFIG_BLK_DEV_INITRD |
333 | if (phys_initrd_size && | ||
334 | !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { | ||
335 | pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n", | ||
336 | phys_initrd_start, phys_initrd_size); | ||
337 | phys_initrd_start = phys_initrd_size = 0; | ||
338 | } | ||
339 | if (phys_initrd_size && | ||
340 | memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { | ||
341 | pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", | ||
342 | phys_initrd_start, phys_initrd_size); | ||
343 | phys_initrd_start = phys_initrd_size = 0; | ||
344 | } | ||
285 | if (phys_initrd_size) { | 345 | if (phys_initrd_size) { |
286 | memblock_reserve(phys_initrd_start, phys_initrd_size); | 346 | memblock_reserve(phys_initrd_start, phys_initrd_size); |
287 | 347 | ||
@@ -292,6 +352,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) | |||
292 | #endif | 352 | #endif |
293 | 353 | ||
294 | arm_mm_memblock_reserve(); | 354 | arm_mm_memblock_reserve(); |
355 | arm_dt_memblock_reserve(); | ||
295 | 356 | ||
296 | /* reserve any platform specific memblock areas */ | 357 | /* reserve any platform specific memblock areas */ |
297 | if (mdesc->reserve) | 358 | if (mdesc->reserve) |
@@ -303,14 +364,13 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) | |||
303 | 364 | ||
304 | void __init bootmem_init(void) | 365 | void __init bootmem_init(void) |
305 | { | 366 | { |
306 | struct meminfo *mi = &meminfo; | ||
307 | unsigned long min, max_low, max_high; | 367 | unsigned long min, max_low, max_high; |
308 | 368 | ||
309 | max_low = max_high = 0; | 369 | max_low = max_high = 0; |
310 | 370 | ||
311 | find_limits(mi, &min, &max_low, &max_high); | 371 | find_limits(&min, &max_low, &max_high); |
312 | 372 | ||
313 | arm_bootmem_init(mi, min, max_low); | 373 | arm_bootmem_init(min, max_low); |
314 | 374 | ||
315 | /* | 375 | /* |
316 | * Sparsemem tries to allocate bootmem in memory_present(), | 376 | * Sparsemem tries to allocate bootmem in memory_present(), |
@@ -328,9 +388,9 @@ void __init bootmem_init(void) | |||
328 | * the sparse mem_map arrays initialized by sparse_init() | 388 | * the sparse mem_map arrays initialized by sparse_init() |
329 | * for memmap_init_zone(), otherwise all PFNs are invalid. | 389 | * for memmap_init_zone(), otherwise all PFNs are invalid. |
330 | */ | 390 | */ |
331 | arm_bootmem_free(mi, min, max_low, max_high); | 391 | arm_bootmem_free(min, max_low, max_high); |
332 | 392 | ||
333 | high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; | 393 | high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1; |
334 | 394 | ||
335 | /* | 395 | /* |
336 | * This doesn't seem to be used by the Linux memory manager any | 396 | * This doesn't seem to be used by the Linux memory manager any |
@@ -372,14 +432,14 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |||
372 | * Convert start_pfn/end_pfn to a struct page pointer. | 432 | * Convert start_pfn/end_pfn to a struct page pointer. |
373 | */ | 433 | */ |
374 | start_pg = pfn_to_page(start_pfn - 1) + 1; | 434 | start_pg = pfn_to_page(start_pfn - 1) + 1; |
375 | end_pg = pfn_to_page(end_pfn); | 435 | end_pg = pfn_to_page(end_pfn - 1) + 1; |
376 | 436 | ||
377 | /* | 437 | /* |
378 | * Convert to physical addresses, and | 438 | * Convert to physical addresses, and |
379 | * round start upwards and end downwards. | 439 | * round start upwards and end downwards. |
380 | */ | 440 | */ |
381 | pg = PAGE_ALIGN(__pa(start_pg)); | 441 | pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); |
382 | pgend = __pa(end_pg) & PAGE_MASK; | 442 | pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; |
383 | 443 | ||
384 | /* | 444 | /* |
385 | * If there are free pages between these, | 445 | * If there are free pages between these, |
@@ -406,6 +466,14 @@ static void __init free_unused_memmap(struct meminfo *mi) | |||
406 | 466 | ||
407 | bank_start = bank_pfn_start(bank); | 467 | bank_start = bank_pfn_start(bank); |
408 | 468 | ||
469 | #ifdef CONFIG_SPARSEMEM | ||
470 | /* | ||
471 | * Take care not to free memmap entries that don't exist | ||
472 | * due to SPARSEMEM sections which aren't present. | ||
473 | */ | ||
474 | bank_start = min(bank_start, | ||
475 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); | ||
476 | #endif | ||
409 | /* | 477 | /* |
410 | * If we had a previous bank, and there is a space | 478 | * If we had a previous bank, and there is a space |
411 | * between the current bank and the previous, free it. | 479 | * between the current bank and the previous, free it. |
@@ -420,6 +488,62 @@ static void __init free_unused_memmap(struct meminfo *mi) | |||
420 | */ | 488 | */ |
421 | prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); | 489 | prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES); |
422 | } | 490 | } |
491 | |||
492 | #ifdef CONFIG_SPARSEMEM | ||
493 | if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION)) | ||
494 | free_memmap(prev_bank_end, | ||
495 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); | ||
496 | #endif | ||
497 | } | ||
498 | |||
499 | static void __init free_highpages(void) | ||
500 | { | ||
501 | #ifdef CONFIG_HIGHMEM | ||
502 | unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET; | ||
503 | struct memblock_region *mem, *res; | ||
504 | |||
505 | /* set highmem page free */ | ||
506 | for_each_memblock(memory, mem) { | ||
507 | unsigned long start = memblock_region_memory_base_pfn(mem); | ||
508 | unsigned long end = memblock_region_memory_end_pfn(mem); | ||
509 | |||
510 | /* Ignore complete lowmem entries */ | ||
511 | if (end <= max_low) | ||
512 | continue; | ||
513 | |||
514 | /* Truncate partial highmem entries */ | ||
515 | if (start < max_low) | ||
516 | start = max_low; | ||
517 | |||
518 | /* Find and exclude any reserved regions */ | ||
519 | for_each_memblock(reserved, res) { | ||
520 | unsigned long res_start, res_end; | ||
521 | |||
522 | res_start = memblock_region_reserved_base_pfn(res); | ||
523 | res_end = memblock_region_reserved_end_pfn(res); | ||
524 | |||
525 | if (res_end < start) | ||
526 | continue; | ||
527 | if (res_start < start) | ||
528 | res_start = start; | ||
529 | if (res_start > end) | ||
530 | res_start = end; | ||
531 | if (res_end > end) | ||
532 | res_end = end; | ||
533 | if (res_start != start) | ||
534 | totalhigh_pages += free_area(start, res_start, | ||
535 | NULL); | ||
536 | start = res_end; | ||
537 | if (start == end) | ||
538 | break; | ||
539 | } | ||
540 | |||
541 | /* And now free anything which remains */ | ||
542 | if (start < end) | ||
543 | totalhigh_pages += free_area(start, end, NULL); | ||
544 | } | ||
545 | totalram_pages += totalhigh_pages; | ||
546 | #endif | ||
423 | } | 547 | } |
424 | 548 | ||
425 | /* | 549 | /* |
@@ -430,6 +554,7 @@ static void __init free_unused_memmap(struct meminfo *mi) | |||
430 | void __init mem_init(void) | 554 | void __init mem_init(void) |
431 | { | 555 | { |
432 | unsigned long reserved_pages, free_pages; | 556 | unsigned long reserved_pages, free_pages; |
557 | struct memblock_region *reg; | ||
433 | int i; | 558 | int i; |
434 | #ifdef CONFIG_HAVE_TCM | 559 | #ifdef CONFIG_HAVE_TCM |
435 | /* These pointers are filled in on TCM detection */ | 560 | /* These pointers are filled in on TCM detection */ |
@@ -450,16 +575,7 @@ void __init mem_init(void) | |||
450 | __phys_to_pfn(__pa(swapper_pg_dir)), NULL); | 575 | __phys_to_pfn(__pa(swapper_pg_dir)), NULL); |
451 | #endif | 576 | #endif |
452 | 577 | ||
453 | #ifdef CONFIG_HIGHMEM | 578 | free_highpages(); |
454 | /* set highmem page free */ | ||
455 | for_each_bank (i, &meminfo) { | ||
456 | unsigned long start = bank_pfn_start(&meminfo.bank[i]); | ||
457 | unsigned long end = bank_pfn_end(&meminfo.bank[i]); | ||
458 | if (start >= max_low_pfn + PHYS_PFN_OFFSET) | ||
459 | totalhigh_pages += free_area(start, end, NULL); | ||
460 | } | ||
461 | totalram_pages += totalhigh_pages; | ||
462 | #endif | ||
463 | 579 | ||
464 | reserved_pages = free_pages = 0; | 580 | reserved_pages = free_pages = 0; |
465 | 581 | ||
@@ -489,9 +605,11 @@ void __init mem_init(void) | |||
489 | */ | 605 | */ |
490 | printk(KERN_INFO "Memory:"); | 606 | printk(KERN_INFO "Memory:"); |
491 | num_physpages = 0; | 607 | num_physpages = 0; |
492 | for (i = 0; i < meminfo.nr_banks; i++) { | 608 | for_each_memblock(memory, reg) { |
493 | num_physpages += bank_pfn_size(&meminfo.bank[i]); | 609 | unsigned long pages = memblock_region_memory_end_pfn(reg) - |
494 | printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); | 610 | memblock_region_memory_base_pfn(reg); |
611 | num_physpages += pages; | ||
612 | printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); | ||
495 | } | 613 | } |
496 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | 614 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); |
497 | 615 | ||
@@ -523,7 +641,8 @@ void __init mem_init(void) | |||
523 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" | 641 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" |
524 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" | 642 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" |
525 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" | 643 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" |
526 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n", | 644 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n" |
645 | " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", | ||
527 | 646 | ||
528 | MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + | 647 | MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + |
529 | (PAGE_SIZE)), | 648 | (PAGE_SIZE)), |
@@ -545,7 +664,8 @@ void __init mem_init(void) | |||
545 | 664 | ||
546 | MLK_ROUNDUP(__init_begin, __init_end), | 665 | MLK_ROUNDUP(__init_begin, __init_end), |
547 | MLK_ROUNDUP(_text, _etext), | 666 | MLK_ROUNDUP(_text, _etext), |
548 | MLK_ROUNDUP(_data, _edata)); | 667 | MLK_ROUNDUP(_sdata, _edata), |
668 | MLK_ROUNDUP(__bss_start, __bss_stop)); | ||
549 | 669 | ||
550 | #undef MLK | 670 | #undef MLK |
551 | #undef MLM | 671 | #undef MLM |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 17e7b0b57e49..ab506272b2d3 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -204,12 +204,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
204 | /* | 204 | /* |
205 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | 205 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ |
206 | */ | 206 | */ |
207 | if (pfn_valid(pfn)) { | 207 | if (WARN_ON(pfn_valid(pfn))) |
208 | printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n" | 208 | return NULL; |
209 | KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n" | ||
210 | KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n"); | ||
211 | WARN_ON(1); | ||
212 | } | ||
213 | 209 | ||
214 | type = get_mem_type(mtype); | 210 | type = get_mem_type(mtype); |
215 | if (!type) | 211 | if (!type) |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 6630620380a4..5b3d7d543659 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -5,18 +5,13 @@ extern pmd_t *top_pmd; | |||
5 | 5 | ||
6 | #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) | 6 | #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) |
7 | 7 | ||
8 | static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) | ||
9 | { | ||
10 | return pmd_offset(pgd, virt); | ||
11 | } | ||
12 | |||
13 | static inline pmd_t *pmd_off_k(unsigned long virt) | 8 | static inline pmd_t *pmd_off_k(unsigned long virt) |
14 | { | 9 | { |
15 | return pmd_off(pgd_offset_k(virt), virt); | 10 | return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt); |
16 | } | 11 | } |
17 | 12 | ||
18 | struct mem_type { | 13 | struct mem_type { |
19 | unsigned int prot_pte; | 14 | pteval_t prot_pte; |
20 | unsigned int prot_l1; | 15 | unsigned int prot_l1; |
21 | unsigned int prot_sect; | 16 | unsigned int prot_sect; |
22 | unsigned int domain; | 17 | unsigned int domain; |
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 4f5b39687df5..74be05f3e03a 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/shm.h> | 7 | #include <linux/shm.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/io.h> | 9 | #include <linux/io.h> |
10 | #include <linux/personality.h> | ||
10 | #include <linux/random.h> | 11 | #include <linux/random.h> |
11 | #include <asm/cputype.h> | 12 | #include <asm/cputype.h> |
12 | #include <asm/system.h> | 13 | #include <asm/system.h> |
@@ -31,7 +32,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
31 | struct mm_struct *mm = current->mm; | 32 | struct mm_struct *mm = current->mm; |
32 | struct vm_area_struct *vma; | 33 | struct vm_area_struct *vma; |
33 | unsigned long start_addr; | 34 | unsigned long start_addr; |
34 | #ifdef CONFIG_CPU_V6 | 35 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) |
35 | unsigned int cache_type; | 36 | unsigned int cache_type; |
36 | int do_align = 0, aliasing = 0; | 37 | int do_align = 0, aliasing = 0; |
37 | 38 | ||
@@ -82,7 +83,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
82 | mm->cached_hole_size = 0; | 83 | mm->cached_hole_size = 0; |
83 | } | 84 | } |
84 | /* 8 bits of randomness in 20 address space bits */ | 85 | /* 8 bits of randomness in 20 address space bits */ |
85 | if (current->flags & PF_RANDOMIZE) | 86 | if ((current->flags & PF_RANDOMIZE) && |
87 | !(current->personality & ADDR_NO_RANDOMIZE)) | ||
86 | addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; | 88 | addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; |
87 | 89 | ||
88 | full_search: | 90 | full_search: |
@@ -144,3 +146,25 @@ int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) | |||
144 | { | 146 | { |
145 | return !(pfn + (size >> PAGE_SHIFT) > 0x00100000); | 147 | return !(pfn + (size >> PAGE_SHIFT) > 0x00100000); |
146 | } | 148 | } |
149 | |||
150 | #ifdef CONFIG_STRICT_DEVMEM | ||
151 | |||
152 | #include <linux/ioport.h> | ||
153 | |||
154 | /* | ||
155 | * devmem_is_allowed() checks to see if /dev/mem access to a certain | ||
156 | * address is valid. The argument is a physical page number. | ||
157 | * We mimic x86 here by disallowing access to system RAM as well as | ||
158 | * device-exclusive MMIO regions. This effectively disable read()/write() | ||
159 | * on /dev/mem. | ||
160 | */ | ||
161 | int devmem_is_allowed(unsigned long pfn) | ||
162 | { | ||
163 | if (iomem_is_exclusive(pfn << PAGE_SHIFT)) | ||
164 | return 0; | ||
165 | if (!page_is_ram(pfn)) | ||
166 | return 1; | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | #endif | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e8ed9dc461fe..594d677b92c8 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -14,7 +14,6 @@ | |||
14 | #include <linux/mman.h> | 14 | #include <linux/mman.h> |
15 | #include <linux/nodemask.h> | 15 | #include <linux/nodemask.h> |
16 | #include <linux/memblock.h> | 16 | #include <linux/memblock.h> |
17 | #include <linux/sort.h> | ||
18 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
19 | 18 | ||
20 | #include <asm/cputype.h> | 19 | #include <asm/cputype.h> |
@@ -25,14 +24,13 @@ | |||
25 | #include <asm/smp_plat.h> | 24 | #include <asm/smp_plat.h> |
26 | #include <asm/tlb.h> | 25 | #include <asm/tlb.h> |
27 | #include <asm/highmem.h> | 26 | #include <asm/highmem.h> |
27 | #include <asm/traps.h> | ||
28 | 28 | ||
29 | #include <asm/mach/arch.h> | 29 | #include <asm/mach/arch.h> |
30 | #include <asm/mach/map.h> | 30 | #include <asm/mach/map.h> |
31 | 31 | ||
32 | #include "mm.h" | 32 | #include "mm.h" |
33 | 33 | ||
34 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
35 | |||
36 | /* | 34 | /* |
37 | * empty_zero_page is a special page that is used for | 35 | * empty_zero_page is a special page that is used for |
38 | * zero-initialized data and COW. | 36 | * zero-initialized data and COW. |
@@ -63,7 +61,7 @@ struct cachepolicy { | |||
63 | const char policy[16]; | 61 | const char policy[16]; |
64 | unsigned int cr_mask; | 62 | unsigned int cr_mask; |
65 | unsigned int pmd; | 63 | unsigned int pmd; |
66 | unsigned int pte; | 64 | pteval_t pte; |
67 | }; | 65 | }; |
68 | 66 | ||
69 | static struct cachepolicy cache_policies[] __initdata = { | 67 | static struct cachepolicy cache_policies[] __initdata = { |
@@ -191,7 +189,7 @@ void adjust_cr(unsigned long mask, unsigned long set) | |||
191 | } | 189 | } |
192 | #endif | 190 | #endif |
193 | 191 | ||
194 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE | 192 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN |
195 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE | 193 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE |
196 | 194 | ||
197 | static struct mem_type mem_types[] = { | 195 | static struct mem_type mem_types[] = { |
@@ -236,19 +234,18 @@ static struct mem_type mem_types[] = { | |||
236 | }, | 234 | }, |
237 | [MT_LOW_VECTORS] = { | 235 | [MT_LOW_VECTORS] = { |
238 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 236 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
239 | L_PTE_EXEC, | 237 | L_PTE_RDONLY, |
240 | .prot_l1 = PMD_TYPE_TABLE, | 238 | .prot_l1 = PMD_TYPE_TABLE, |
241 | .domain = DOMAIN_USER, | 239 | .domain = DOMAIN_USER, |
242 | }, | 240 | }, |
243 | [MT_HIGH_VECTORS] = { | 241 | [MT_HIGH_VECTORS] = { |
244 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 242 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
245 | L_PTE_USER | L_PTE_EXEC, | 243 | L_PTE_USER | L_PTE_RDONLY, |
246 | .prot_l1 = PMD_TYPE_TABLE, | 244 | .prot_l1 = PMD_TYPE_TABLE, |
247 | .domain = DOMAIN_USER, | 245 | .domain = DOMAIN_USER, |
248 | }, | 246 | }, |
249 | [MT_MEMORY] = { | 247 | [MT_MEMORY] = { |
250 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 248 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, |
251 | L_PTE_WRITE | L_PTE_EXEC, | ||
252 | .prot_l1 = PMD_TYPE_TABLE, | 249 | .prot_l1 = PMD_TYPE_TABLE, |
253 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 250 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
254 | .domain = DOMAIN_KERNEL, | 251 | .domain = DOMAIN_KERNEL, |
@@ -259,23 +256,22 @@ static struct mem_type mem_types[] = { | |||
259 | }, | 256 | }, |
260 | [MT_MEMORY_NONCACHED] = { | 257 | [MT_MEMORY_NONCACHED] = { |
261 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 258 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
262 | L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, | 259 | L_PTE_MT_BUFFERABLE, |
263 | .prot_l1 = PMD_TYPE_TABLE, | 260 | .prot_l1 = PMD_TYPE_TABLE, |
264 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, | 261 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
265 | .domain = DOMAIN_KERNEL, | 262 | .domain = DOMAIN_KERNEL, |
266 | }, | 263 | }, |
267 | [MT_MEMORY_DTCM] = { | 264 | [MT_MEMORY_DTCM] = { |
268 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | | 265 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
269 | L_PTE_DIRTY | L_PTE_WRITE, | 266 | L_PTE_XN, |
270 | .prot_l1 = PMD_TYPE_TABLE, | 267 | .prot_l1 = PMD_TYPE_TABLE, |
271 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, | 268 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, |
272 | .domain = DOMAIN_KERNEL, | 269 | .domain = DOMAIN_KERNEL, |
273 | }, | 270 | }, |
274 | [MT_MEMORY_ITCM] = { | 271 | [MT_MEMORY_ITCM] = { |
275 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 272 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY, |
276 | L_PTE_USER | L_PTE_EXEC, | ||
277 | .prot_l1 = PMD_TYPE_TABLE, | 273 | .prot_l1 = PMD_TYPE_TABLE, |
278 | .domain = DOMAIN_IO, | 274 | .domain = DOMAIN_KERNEL, |
279 | }, | 275 | }, |
280 | }; | 276 | }; |
281 | 277 | ||
@@ -310,9 +306,8 @@ static void __init build_mem_type_table(void) | |||
310 | cachepolicy = CPOLICY_WRITEBACK; | 306 | cachepolicy = CPOLICY_WRITEBACK; |
311 | ecc_mask = 0; | 307 | ecc_mask = 0; |
312 | } | 308 | } |
313 | #ifdef CONFIG_SMP | 309 | if (is_smp()) |
314 | cachepolicy = CPOLICY_WRITEALLOC; | 310 | cachepolicy = CPOLICY_WRITEALLOC; |
315 | #endif | ||
316 | 311 | ||
317 | /* | 312 | /* |
318 | * Strip out features not present on earlier architectures. | 313 | * Strip out features not present on earlier architectures. |
@@ -406,13 +401,11 @@ static void __init build_mem_type_table(void) | |||
406 | cp = &cache_policies[cachepolicy]; | 401 | cp = &cache_policies[cachepolicy]; |
407 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; | 402 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
408 | 403 | ||
409 | #ifndef CONFIG_SMP | ||
410 | /* | 404 | /* |
411 | * Only use write-through for non-SMP systems | 405 | * Only use write-through for non-SMP systems |
412 | */ | 406 | */ |
413 | if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) | 407 | if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) |
414 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; | 408 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; |
415 | #endif | ||
416 | 409 | ||
417 | /* | 410 | /* |
418 | * Enable CPU-specific coherency if supported. | 411 | * Enable CPU-specific coherency if supported. |
@@ -436,22 +429,23 @@ static void __init build_mem_type_table(void) | |||
436 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 429 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
437 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; | 430 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
438 | 431 | ||
439 | #ifdef CONFIG_SMP | 432 | if (is_smp()) { |
440 | /* | 433 | /* |
441 | * Mark memory with the "shared" attribute for SMP systems | 434 | * Mark memory with the "shared" attribute |
442 | */ | 435 | * for SMP systems |
443 | user_pgprot |= L_PTE_SHARED; | 436 | */ |
444 | kern_pgprot |= L_PTE_SHARED; | 437 | user_pgprot |= L_PTE_SHARED; |
445 | vecs_pgprot |= L_PTE_SHARED; | 438 | kern_pgprot |= L_PTE_SHARED; |
446 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; | 439 | vecs_pgprot |= L_PTE_SHARED; |
447 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; | 440 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; |
448 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; | 441 | mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; |
449 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; | 442 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; |
450 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; | 443 | mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; |
451 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; | 444 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
452 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; | 445 | mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; |
453 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; | 446 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; |
454 | #endif | 447 | mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; |
448 | } | ||
455 | } | 449 | } |
456 | 450 | ||
457 | /* | 451 | /* |
@@ -482,7 +476,7 @@ static void __init build_mem_type_table(void) | |||
482 | 476 | ||
483 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); | 477 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); |
484 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | | 478 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
485 | L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot); | 479 | L_PTE_DIRTY | kern_pgprot); |
486 | 480 | ||
487 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; | 481 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
488 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; | 482 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
@@ -537,8 +531,8 @@ static void __init *early_alloc(unsigned long sz) | |||
537 | static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) | 531 | static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) |
538 | { | 532 | { |
539 | if (pmd_none(*pmd)) { | 533 | if (pmd_none(*pmd)) { |
540 | pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); | 534 | pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); |
541 | __pmd_populate(pmd, __pa(pte) | prot); | 535 | __pmd_populate(pmd, __pa(pte), prot); |
542 | } | 536 | } |
543 | BUG_ON(pmd_bad(*pmd)); | 537 | BUG_ON(pmd_bad(*pmd)); |
544 | return pte_offset_kernel(pmd, addr); | 538 | return pte_offset_kernel(pmd, addr); |
@@ -555,11 +549,11 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
555 | } while (pte++, addr += PAGE_SIZE, addr != end); | 549 | } while (pte++, addr += PAGE_SIZE, addr != end); |
556 | } | 550 | } |
557 | 551 | ||
558 | static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, | 552 | static void __init alloc_init_section(pud_t *pud, unsigned long addr, |
559 | unsigned long end, unsigned long phys, | 553 | unsigned long end, phys_addr_t phys, |
560 | const struct mem_type *type) | 554 | const struct mem_type *type) |
561 | { | 555 | { |
562 | pmd_t *pmd = pmd_offset(pgd, addr); | 556 | pmd_t *pmd = pmd_offset(pud, addr); |
563 | 557 | ||
564 | /* | 558 | /* |
565 | * Try a section mapping - end, addr and phys must all be aligned | 559 | * Try a section mapping - end, addr and phys must all be aligned |
@@ -588,20 +582,34 @@ static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, | |||
588 | } | 582 | } |
589 | } | 583 | } |
590 | 584 | ||
585 | static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, | ||
586 | unsigned long phys, const struct mem_type *type) | ||
587 | { | ||
588 | pud_t *pud = pud_offset(pgd, addr); | ||
589 | unsigned long next; | ||
590 | |||
591 | do { | ||
592 | next = pud_addr_end(addr, end); | ||
593 | alloc_init_section(pud, addr, next, phys, type); | ||
594 | phys += next - addr; | ||
595 | } while (pud++, addr = next, addr != end); | ||
596 | } | ||
597 | |||
591 | static void __init create_36bit_mapping(struct map_desc *md, | 598 | static void __init create_36bit_mapping(struct map_desc *md, |
592 | const struct mem_type *type) | 599 | const struct mem_type *type) |
593 | { | 600 | { |
594 | unsigned long phys, addr, length, end; | 601 | unsigned long addr, length, end; |
602 | phys_addr_t phys; | ||
595 | pgd_t *pgd; | 603 | pgd_t *pgd; |
596 | 604 | ||
597 | addr = md->virtual; | 605 | addr = md->virtual; |
598 | phys = (unsigned long)__pfn_to_phys(md->pfn); | 606 | phys = __pfn_to_phys(md->pfn); |
599 | length = PAGE_ALIGN(md->length); | 607 | length = PAGE_ALIGN(md->length); |
600 | 608 | ||
601 | if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { | 609 | if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { |
602 | printk(KERN_ERR "MM: CPU does not support supersection " | 610 | printk(KERN_ERR "MM: CPU does not support supersection " |
603 | "mapping for 0x%08llx at 0x%08lx\n", | 611 | "mapping for 0x%08llx at 0x%08lx\n", |
604 | __pfn_to_phys((u64)md->pfn), addr); | 612 | (long long)__pfn_to_phys((u64)md->pfn), addr); |
605 | return; | 613 | return; |
606 | } | 614 | } |
607 | 615 | ||
@@ -614,14 +622,14 @@ static void __init create_36bit_mapping(struct map_desc *md, | |||
614 | if (type->domain) { | 622 | if (type->domain) { |
615 | printk(KERN_ERR "MM: invalid domain in supersection " | 623 | printk(KERN_ERR "MM: invalid domain in supersection " |
616 | "mapping for 0x%08llx at 0x%08lx\n", | 624 | "mapping for 0x%08llx at 0x%08lx\n", |
617 | __pfn_to_phys((u64)md->pfn), addr); | 625 | (long long)__pfn_to_phys((u64)md->pfn), addr); |
618 | return; | 626 | return; |
619 | } | 627 | } |
620 | 628 | ||
621 | if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { | 629 | if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { |
622 | printk(KERN_ERR "MM: cannot create mapping for " | 630 | printk(KERN_ERR "MM: cannot create mapping for 0x%08llx" |
623 | "0x%08llx at 0x%08lx invalid alignment\n", | 631 | " at 0x%08lx invalid alignment\n", |
624 | __pfn_to_phys((u64)md->pfn), addr); | 632 | (long long)__pfn_to_phys((u64)md->pfn), addr); |
625 | return; | 633 | return; |
626 | } | 634 | } |
627 | 635 | ||
@@ -634,7 +642,8 @@ static void __init create_36bit_mapping(struct map_desc *md, | |||
634 | pgd = pgd_offset_k(addr); | 642 | pgd = pgd_offset_k(addr); |
635 | end = addr + length; | 643 | end = addr + length; |
636 | do { | 644 | do { |
637 | pmd_t *pmd = pmd_offset(pgd, addr); | 645 | pud_t *pud = pud_offset(pgd, addr); |
646 | pmd_t *pmd = pmd_offset(pud, addr); | ||
638 | int i; | 647 | int i; |
639 | 648 | ||
640 | for (i = 0; i < 16; i++) | 649 | for (i = 0; i < 16; i++) |
@@ -655,22 +664,23 @@ static void __init create_36bit_mapping(struct map_desc *md, | |||
655 | */ | 664 | */ |
656 | static void __init create_mapping(struct map_desc *md) | 665 | static void __init create_mapping(struct map_desc *md) |
657 | { | 666 | { |
658 | unsigned long phys, addr, length, end; | 667 | unsigned long addr, length, end; |
668 | phys_addr_t phys; | ||
659 | const struct mem_type *type; | 669 | const struct mem_type *type; |
660 | pgd_t *pgd; | 670 | pgd_t *pgd; |
661 | 671 | ||
662 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { | 672 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { |
663 | printk(KERN_WARNING "BUG: not creating mapping for " | 673 | printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx" |
664 | "0x%08llx at 0x%08lx in user region\n", | 674 | " at 0x%08lx in user region\n", |
665 | __pfn_to_phys((u64)md->pfn), md->virtual); | 675 | (long long)__pfn_to_phys((u64)md->pfn), md->virtual); |
666 | return; | 676 | return; |
667 | } | 677 | } |
668 | 678 | ||
669 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && | 679 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && |
670 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { | 680 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { |
671 | printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " | 681 | printk(KERN_WARNING "BUG: mapping for 0x%08llx" |
672 | "overlaps vmalloc space\n", | 682 | " at 0x%08lx overlaps vmalloc space\n", |
673 | __pfn_to_phys((u64)md->pfn), md->virtual); | 683 | (long long)__pfn_to_phys((u64)md->pfn), md->virtual); |
674 | } | 684 | } |
675 | 685 | ||
676 | type = &mem_types[md->type]; | 686 | type = &mem_types[md->type]; |
@@ -684,13 +694,13 @@ static void __init create_mapping(struct map_desc *md) | |||
684 | } | 694 | } |
685 | 695 | ||
686 | addr = md->virtual & PAGE_MASK; | 696 | addr = md->virtual & PAGE_MASK; |
687 | phys = (unsigned long)__pfn_to_phys(md->pfn); | 697 | phys = __pfn_to_phys(md->pfn); |
688 | length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); | 698 | length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); |
689 | 699 | ||
690 | if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { | 700 | if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { |
691 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " | 701 | printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not " |
692 | "be mapped using pages, ignoring.\n", | 702 | "be mapped using pages, ignoring.\n", |
693 | __pfn_to_phys(md->pfn), addr); | 703 | (long long)__pfn_to_phys(md->pfn), addr); |
694 | return; | 704 | return; |
695 | } | 705 | } |
696 | 706 | ||
@@ -699,7 +709,7 @@ static void __init create_mapping(struct map_desc *md) | |||
699 | do { | 709 | do { |
700 | unsigned long next = pgd_addr_end(addr, end); | 710 | unsigned long next = pgd_addr_end(addr, end); |
701 | 711 | ||
702 | alloc_init_section(pgd, addr, next, phys, type); | 712 | alloc_init_pud(pgd, addr, next, phys, type); |
703 | 713 | ||
704 | phys += next - addr; | 714 | phys += next - addr; |
705 | addr = next; | 715 | addr = next; |
@@ -747,20 +757,18 @@ static int __init early_vmalloc(char *arg) | |||
747 | } | 757 | } |
748 | early_param("vmalloc", early_vmalloc); | 758 | early_param("vmalloc", early_vmalloc); |
749 | 759 | ||
750 | phys_addr_t lowmem_end_addr; | 760 | static phys_addr_t lowmem_limit __initdata = 0; |
751 | 761 | ||
752 | static void __init sanity_check_meminfo(void) | 762 | void __init sanity_check_meminfo(void) |
753 | { | 763 | { |
754 | int i, j, highmem = 0; | 764 | int i, j, highmem = 0; |
755 | 765 | ||
756 | lowmem_end_addr = __pa(vmalloc_min - 1) + 1; | ||
757 | |||
758 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { | 766 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { |
759 | struct membank *bank = &meminfo.bank[j]; | 767 | struct membank *bank = &meminfo.bank[j]; |
760 | *bank = meminfo.bank[i]; | 768 | *bank = meminfo.bank[i]; |
761 | 769 | ||
762 | #ifdef CONFIG_HIGHMEM | 770 | #ifdef CONFIG_HIGHMEM |
763 | if (__va(bank->start) > vmalloc_min || | 771 | if (__va(bank->start) >= vmalloc_min || |
764 | __va(bank->start) < (void *)PAGE_OFFSET) | 772 | __va(bank->start) < (void *)PAGE_OFFSET) |
765 | highmem = 1; | 773 | highmem = 1; |
766 | 774 | ||
@@ -796,9 +804,10 @@ static void __init sanity_check_meminfo(void) | |||
796 | */ | 804 | */ |
797 | if (__va(bank->start) >= vmalloc_min || | 805 | if (__va(bank->start) >= vmalloc_min || |
798 | __va(bank->start) < (void *)PAGE_OFFSET) { | 806 | __va(bank->start) < (void *)PAGE_OFFSET) { |
799 | printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " | 807 | printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " |
800 | "(vmalloc region overlap).\n", | 808 | "(vmalloc region overlap).\n", |
801 | bank->start, bank->start + bank->size - 1); | 809 | (unsigned long long)bank->start, |
810 | (unsigned long long)bank->start + bank->size - 1); | ||
802 | continue; | 811 | continue; |
803 | } | 812 | } |
804 | 813 | ||
@@ -809,13 +818,17 @@ static void __init sanity_check_meminfo(void) | |||
809 | if (__va(bank->start + bank->size) > vmalloc_min || | 818 | if (__va(bank->start + bank->size) > vmalloc_min || |
810 | __va(bank->start + bank->size) < __va(bank->start)) { | 819 | __va(bank->start + bank->size) < __va(bank->start)) { |
811 | unsigned long newsize = vmalloc_min - __va(bank->start); | 820 | unsigned long newsize = vmalloc_min - __va(bank->start); |
812 | printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " | 821 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " |
813 | "to -%.8lx (vmalloc region overlap).\n", | 822 | "to -%.8llx (vmalloc region overlap).\n", |
814 | bank->start, bank->start + bank->size - 1, | 823 | (unsigned long long)bank->start, |
815 | bank->start + newsize - 1); | 824 | (unsigned long long)bank->start + bank->size - 1, |
825 | (unsigned long long)bank->start + newsize - 1); | ||
816 | bank->size = newsize; | 826 | bank->size = newsize; |
817 | } | 827 | } |
818 | #endif | 828 | #endif |
829 | if (!bank->highmem && bank->start + bank->size > lowmem_limit) | ||
830 | lowmem_limit = bank->start + bank->size; | ||
831 | |||
819 | j++; | 832 | j++; |
820 | } | 833 | } |
821 | #ifdef CONFIG_HIGHMEM | 834 | #ifdef CONFIG_HIGHMEM |
@@ -829,18 +842,6 @@ static void __init sanity_check_meminfo(void) | |||
829 | * rather difficult. | 842 | * rather difficult. |
830 | */ | 843 | */ |
831 | reason = "with VIPT aliasing cache"; | 844 | reason = "with VIPT aliasing cache"; |
832 | #ifdef CONFIG_SMP | ||
833 | } else if (tlb_ops_need_broadcast()) { | ||
834 | /* | ||
835 | * kmap_high needs to occasionally flush TLB entries, | ||
836 | * however, if the TLB entries need to be broadcast | ||
837 | * we may deadlock: | ||
838 | * kmap_high(irqs off)->flush_all_zero_pkmaps-> | ||
839 | * flush_tlb_kernel_range->smp_call_function_many | ||
840 | * (must not be called with irqs off) | ||
841 | */ | ||
842 | reason = "without hardware TLB ops broadcasting"; | ||
843 | #endif | ||
844 | } | 845 | } |
845 | if (reason) { | 846 | if (reason) { |
846 | printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", | 847 | printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", |
@@ -851,11 +852,13 @@ static void __init sanity_check_meminfo(void) | |||
851 | } | 852 | } |
852 | #endif | 853 | #endif |
853 | meminfo.nr_banks = j; | 854 | meminfo.nr_banks = j; |
855 | memblock_set_current_limit(lowmem_limit); | ||
854 | } | 856 | } |
855 | 857 | ||
856 | static inline void prepare_page_table(void) | 858 | static inline void prepare_page_table(void) |
857 | { | 859 | { |
858 | unsigned long addr; | 860 | unsigned long addr; |
861 | phys_addr_t end; | ||
859 | 862 | ||
860 | /* | 863 | /* |
861 | * Clear out all the mappings below the kernel image. | 864 | * Clear out all the mappings below the kernel image. |
@@ -871,10 +874,17 @@ static inline void prepare_page_table(void) | |||
871 | pmd_clear(pmd_off_k(addr)); | 874 | pmd_clear(pmd_off_k(addr)); |
872 | 875 | ||
873 | /* | 876 | /* |
877 | * Find the end of the first block of lowmem. | ||
878 | */ | ||
879 | end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; | ||
880 | if (end >= lowmem_limit) | ||
881 | end = lowmem_limit; | ||
882 | |||
883 | /* | ||
874 | * Clear out all the kernel space mappings, except for the first | 884 | * Clear out all the kernel space mappings, except for the first |
875 | * memory bank, up to the end of the vmalloc region. | 885 | * memory bank, up to the end of the vmalloc region. |
876 | */ | 886 | */ |
877 | for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); | 887 | for (addr = __phys_to_virt(end); |
878 | addr < VMALLOC_END; addr += PGDIR_SIZE) | 888 | addr < VMALLOC_END; addr += PGDIR_SIZE) |
879 | pmd_clear(pmd_off_k(addr)); | 889 | pmd_clear(pmd_off_k(addr)); |
880 | } | 890 | } |
@@ -910,12 +920,11 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
910 | { | 920 | { |
911 | struct map_desc map; | 921 | struct map_desc map; |
912 | unsigned long addr; | 922 | unsigned long addr; |
913 | void *vectors; | ||
914 | 923 | ||
915 | /* | 924 | /* |
916 | * Allocate the vector page early. | 925 | * Allocate the vector page early. |
917 | */ | 926 | */ |
918 | vectors = early_alloc(PAGE_SIZE); | 927 | vectors_page = early_alloc(PAGE_SIZE); |
919 | 928 | ||
920 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) | 929 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) |
921 | pmd_clear(pmd_off_k(addr)); | 930 | pmd_clear(pmd_off_k(addr)); |
@@ -955,7 +964,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
955 | * location (0xffff0000). If we aren't using high-vectors, also | 964 | * location (0xffff0000). If we aren't using high-vectors, also |
956 | * create a mapping at the low-vectors virtual address. | 965 | * create a mapping at the low-vectors virtual address. |
957 | */ | 966 | */ |
958 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | 967 | map.pfn = __phys_to_pfn(virt_to_phys(vectors_page)); |
959 | map.virtual = 0xffff0000; | 968 | map.virtual = 0xffff0000; |
960 | map.length = PAGE_SIZE; | 969 | map.length = PAGE_SIZE; |
961 | map.type = MT_HIGH_VECTORS; | 970 | map.type = MT_HIGH_VECTORS; |
@@ -991,37 +1000,28 @@ static void __init kmap_init(void) | |||
991 | #endif | 1000 | #endif |
992 | } | 1001 | } |
993 | 1002 | ||
994 | static inline void map_memory_bank(struct membank *bank) | ||
995 | { | ||
996 | struct map_desc map; | ||
997 | |||
998 | map.pfn = bank_pfn_start(bank); | ||
999 | map.virtual = __phys_to_virt(bank_phys_start(bank)); | ||
1000 | map.length = bank_phys_size(bank); | ||
1001 | map.type = MT_MEMORY; | ||
1002 | |||
1003 | create_mapping(&map); | ||
1004 | } | ||
1005 | |||
1006 | static void __init map_lowmem(void) | 1003 | static void __init map_lowmem(void) |
1007 | { | 1004 | { |
1008 | struct meminfo *mi = &meminfo; | 1005 | struct memblock_region *reg; |
1009 | int i; | ||
1010 | 1006 | ||
1011 | /* Map all the lowmem memory banks. */ | 1007 | /* Map all the lowmem memory banks. */ |
1012 | for (i = 0; i < mi->nr_banks; i++) { | 1008 | for_each_memblock(memory, reg) { |
1013 | struct membank *bank = &mi->bank[i]; | 1009 | phys_addr_t start = reg->base; |
1010 | phys_addr_t end = start + reg->size; | ||
1011 | struct map_desc map; | ||
1012 | |||
1013 | if (end > lowmem_limit) | ||
1014 | end = lowmem_limit; | ||
1015 | if (start >= end) | ||
1016 | break; | ||
1014 | 1017 | ||
1015 | if (!bank->highmem) | 1018 | map.pfn = __phys_to_pfn(start); |
1016 | map_memory_bank(bank); | 1019 | map.virtual = __phys_to_virt(start); |
1017 | } | 1020 | map.length = end - start; |
1018 | } | 1021 | map.type = MT_MEMORY; |
1019 | 1022 | ||
1020 | static int __init meminfo_cmp(const void *_a, const void *_b) | 1023 | create_mapping(&map); |
1021 | { | 1024 | } |
1022 | const struct membank *a = _a, *b = _b; | ||
1023 | long cmp = bank_pfn_start(a) - bank_pfn_start(b); | ||
1024 | return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; | ||
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | /* | 1027 | /* |
@@ -1032,10 +1032,9 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1032 | { | 1032 | { |
1033 | void *zero_page; | 1033 | void *zero_page; |
1034 | 1034 | ||
1035 | sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); | 1035 | memblock_set_current_limit(lowmem_limit); |
1036 | 1036 | ||
1037 | build_mem_type_table(); | 1037 | build_mem_type_table(); |
1038 | sanity_check_meminfo(); | ||
1039 | prepare_page_table(); | 1038 | prepare_page_table(); |
1040 | map_lowmem(); | 1039 | map_lowmem(); |
1041 | devicemaps_init(mdesc); | 1040 | devicemaps_init(mdesc); |
@@ -1051,38 +1050,3 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1051 | empty_zero_page = virt_to_page(zero_page); | 1050 | empty_zero_page = virt_to_page(zero_page); |
1052 | __flush_dcache_page(NULL, empty_zero_page); | 1051 | __flush_dcache_page(NULL, empty_zero_page); |
1053 | } | 1052 | } |
1054 | |||
1055 | /* | ||
1056 | * In order to soft-boot, we need to insert a 1:1 mapping in place of | ||
1057 | * the user-mode pages. This will then ensure that we have predictable | ||
1058 | * results when turning the mmu off | ||
1059 | */ | ||
1060 | void setup_mm_for_reboot(char mode) | ||
1061 | { | ||
1062 | unsigned long base_pmdval; | ||
1063 | pgd_t *pgd; | ||
1064 | int i; | ||
1065 | |||
1066 | /* | ||
1067 | * We need to access to user-mode page tables here. For kernel threads | ||
1068 | * we don't have any user-mode mappings so we use the context that we | ||
1069 | * "borrowed". | ||
1070 | */ | ||
1071 | pgd = current->active_mm->pgd; | ||
1072 | |||
1073 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; | ||
1074 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | ||
1075 | base_pmdval |= PMD_BIT4; | ||
1076 | |||
1077 | for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { | ||
1078 | unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; | ||
1079 | pmd_t *pmd; | ||
1080 | |||
1081 | pmd = pmd_off(pgd, i << PGDIR_SHIFT); | ||
1082 | pmd[0] = __pmd(pmdval); | ||
1083 | pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); | ||
1084 | flush_pmd_entry(pmd); | ||
1085 | } | ||
1086 | |||
1087 | local_flush_tlb_all(); | ||
1088 | } | ||
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 687d02319a41..941a98c9e8aa 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -27,6 +27,10 @@ void __init arm_mm_memblock_reserve(void) | |||
27 | memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); | 27 | memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); |
28 | } | 28 | } |
29 | 29 | ||
30 | void __init sanity_check_meminfo(void) | ||
31 | { | ||
32 | } | ||
33 | |||
30 | /* | 34 | /* |
31 | * paging_init() sets up the page tables, initialises the zone memory | 35 | * paging_init() sets up the page tables, initialises the zone memory |
32 | * maps, and sets up the zero page, bad page and bad page tables. | 36 | * maps, and sets up the zero page, bad page and bad page tables. |
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index be5f58e153bf..b2027c154b2a 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c | |||
@@ -17,14 +17,13 @@ | |||
17 | 17 | ||
18 | #include "mm.h" | 18 | #include "mm.h" |
19 | 19 | ||
20 | #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) | ||
21 | |||
22 | /* | 20 | /* |
23 | * need to get a 16k page for level 1 | 21 | * need to get a 16k page for level 1 |
24 | */ | 22 | */ |
25 | pgd_t *get_pgd_slow(struct mm_struct *mm) | 23 | pgd_t *pgd_alloc(struct mm_struct *mm) |
26 | { | 24 | { |
27 | pgd_t *new_pgd, *init_pgd; | 25 | pgd_t *new_pgd, *init_pgd; |
26 | pud_t *new_pud, *init_pud; | ||
28 | pmd_t *new_pmd, *init_pmd; | 27 | pmd_t *new_pmd, *init_pmd; |
29 | pte_t *new_pte, *init_pte; | 28 | pte_t *new_pte, *init_pte; |
30 | 29 | ||
@@ -32,14 +31,14 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
32 | if (!new_pgd) | 31 | if (!new_pgd) |
33 | goto no_pgd; | 32 | goto no_pgd; |
34 | 33 | ||
35 | memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); | 34 | memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); |
36 | 35 | ||
37 | /* | 36 | /* |
38 | * Copy over the kernel and IO PGD entries | 37 | * Copy over the kernel and IO PGD entries |
39 | */ | 38 | */ |
40 | init_pgd = pgd_offset_k(0); | 39 | init_pgd = pgd_offset_k(0); |
41 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, | 40 | memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, |
42 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); | 41 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
43 | 42 | ||
44 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | 43 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); |
45 | 44 | ||
@@ -48,18 +47,23 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
48 | * On ARM, first page must always be allocated since it | 47 | * On ARM, first page must always be allocated since it |
49 | * contains the machine vectors. | 48 | * contains the machine vectors. |
50 | */ | 49 | */ |
51 | new_pmd = pmd_alloc(mm, new_pgd, 0); | 50 | new_pud = pud_alloc(mm, new_pgd, 0); |
51 | if (!new_pud) | ||
52 | goto no_pud; | ||
53 | |||
54 | new_pmd = pmd_alloc(mm, new_pud, 0); | ||
52 | if (!new_pmd) | 55 | if (!new_pmd) |
53 | goto no_pmd; | 56 | goto no_pmd; |
54 | 57 | ||
55 | new_pte = pte_alloc_map(mm, new_pmd, 0); | 58 | new_pte = pte_alloc_map(mm, NULL, new_pmd, 0); |
56 | if (!new_pte) | 59 | if (!new_pte) |
57 | goto no_pte; | 60 | goto no_pte; |
58 | 61 | ||
59 | init_pmd = pmd_offset(init_pgd, 0); | 62 | init_pud = pud_offset(init_pgd, 0); |
60 | init_pte = pte_offset_map_nested(init_pmd, 0); | 63 | init_pmd = pmd_offset(init_pud, 0); |
64 | init_pte = pte_offset_map(init_pmd, 0); | ||
61 | set_pte_ext(new_pte, *init_pte, 0); | 65 | set_pte_ext(new_pte, *init_pte, 0); |
62 | pte_unmap_nested(init_pte); | 66 | pte_unmap(init_pte); |
63 | pte_unmap(new_pte); | 67 | pte_unmap(new_pte); |
64 | } | 68 | } |
65 | 69 | ||
@@ -68,33 +72,44 @@ pgd_t *get_pgd_slow(struct mm_struct *mm) | |||
68 | no_pte: | 72 | no_pte: |
69 | pmd_free(mm, new_pmd); | 73 | pmd_free(mm, new_pmd); |
70 | no_pmd: | 74 | no_pmd: |
75 | pud_free(mm, new_pud); | ||
76 | no_pud: | ||
71 | free_pages((unsigned long)new_pgd, 2); | 77 | free_pages((unsigned long)new_pgd, 2); |
72 | no_pgd: | 78 | no_pgd: |
73 | return NULL; | 79 | return NULL; |
74 | } | 80 | } |
75 | 81 | ||
76 | void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd) | 82 | void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) |
77 | { | 83 | { |
84 | pgd_t *pgd; | ||
85 | pud_t *pud; | ||
78 | pmd_t *pmd; | 86 | pmd_t *pmd; |
79 | pgtable_t pte; | 87 | pgtable_t pte; |
80 | 88 | ||
81 | if (!pgd) | 89 | if (!pgd_base) |
82 | return; | 90 | return; |
83 | 91 | ||
84 | /* pgd is always present and good */ | 92 | pgd = pgd_base + pgd_index(0); |
85 | pmd = pmd_off(pgd, 0); | 93 | if (pgd_none_or_clear_bad(pgd)) |
86 | if (pmd_none(*pmd)) | 94 | goto no_pgd; |
87 | goto free; | 95 | |
88 | if (pmd_bad(*pmd)) { | 96 | pud = pud_offset(pgd, 0); |
89 | pmd_ERROR(*pmd); | 97 | if (pud_none_or_clear_bad(pud)) |
90 | pmd_clear(pmd); | 98 | goto no_pud; |
91 | goto free; | 99 | |
92 | } | 100 | pmd = pmd_offset(pud, 0); |
101 | if (pmd_none_or_clear_bad(pmd)) | ||
102 | goto no_pmd; | ||
93 | 103 | ||
94 | pte = pmd_pgtable(*pmd); | 104 | pte = pmd_pgtable(*pmd); |
95 | pmd_clear(pmd); | 105 | pmd_clear(pmd); |
96 | pte_free(mm, pte); | 106 | pte_free(mm, pte); |
107 | no_pmd: | ||
108 | pud_clear(pud); | ||
97 | pmd_free(mm, pmd); | 109 | pmd_free(mm, pmd); |
98 | free: | 110 | no_pud: |
99 | free_pages((unsigned long) pgd, 2); | 111 | pgd_clear(pgd); |
112 | pud_free(mm, pud); | ||
113 | no_pgd: | ||
114 | free_pages((unsigned long) pgd_base, 2); | ||
100 | } | 115 | } |
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 203a4e944d9e..6c4e7fd6c8af 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S | |||
@@ -64,7 +64,7 @@ | |||
64 | /* | 64 | /* |
65 | * This is the size at which it becomes more efficient to | 65 | * This is the size at which it becomes more efficient to |
66 | * clean the whole cache, rather than using the individual | 66 | * clean the whole cache, rather than using the individual |
67 | * cache line maintainence instructions. | 67 | * cache line maintenance instructions. |
68 | */ | 68 | */ |
69 | #define CACHE_DLIMIT 32768 | 69 | #define CACHE_DLIMIT 32768 |
70 | 70 | ||
@@ -119,6 +119,20 @@ ENTRY(cpu_arm1020_do_idle) | |||
119 | /* ================================= CACHE ================================ */ | 119 | /* ================================= CACHE ================================ */ |
120 | 120 | ||
121 | .align 5 | 121 | .align 5 |
122 | |||
123 | /* | ||
124 | * flush_icache_all() | ||
125 | * | ||
126 | * Unconditionally clean and invalidate the entire icache. | ||
127 | */ | ||
128 | ENTRY(arm1020_flush_icache_all) | ||
129 | #ifndef CONFIG_CPU_ICACHE_DISABLE | ||
130 | mov r0, #0 | ||
131 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
132 | #endif | ||
133 | mov pc, lr | ||
134 | ENDPROC(arm1020_flush_icache_all) | ||
135 | |||
122 | /* | 136 | /* |
123 | * flush_user_cache_all() | 137 | * flush_user_cache_all() |
124 | * | 138 | * |
@@ -351,6 +365,7 @@ ENTRY(arm1020_dma_unmap_area) | |||
351 | ENDPROC(arm1020_dma_unmap_area) | 365 | ENDPROC(arm1020_dma_unmap_area) |
352 | 366 | ||
353 | ENTRY(arm1020_cache_fns) | 367 | ENTRY(arm1020_cache_fns) |
368 | .long arm1020_flush_icache_all | ||
354 | .long arm1020_flush_kern_cache_all | 369 | .long arm1020_flush_kern_cache_all |
355 | .long arm1020_flush_user_cache_all | 370 | .long arm1020_flush_user_cache_all |
356 | .long arm1020_flush_user_cache_range | 371 | .long arm1020_flush_user_cache_range |
@@ -430,7 +445,7 @@ ENTRY(cpu_arm1020_set_pte_ext) | |||
430 | #endif /* CONFIG_MMU */ | 445 | #endif /* CONFIG_MMU */ |
431 | mov pc, lr | 446 | mov pc, lr |
432 | 447 | ||
433 | __INIT | 448 | __CPUINIT |
434 | 449 | ||
435 | .type __arm1020_setup, #function | 450 | .type __arm1020_setup, #function |
436 | __arm1020_setup: | 451 | __arm1020_setup: |
@@ -478,6 +493,9 @@ arm1020_processor_functions: | |||
478 | .word cpu_arm1020_dcache_clean_area | 493 | .word cpu_arm1020_dcache_clean_area |
479 | .word cpu_arm1020_switch_mm | 494 | .word cpu_arm1020_switch_mm |
480 | .word cpu_arm1020_set_pte_ext | 495 | .word cpu_arm1020_set_pte_ext |
496 | .word 0 | ||
497 | .word 0 | ||
498 | .word 0 | ||
481 | .size arm1020_processor_functions, . - arm1020_processor_functions | 499 | .size arm1020_processor_functions, . - arm1020_processor_functions |
482 | 500 | ||
483 | .section ".rodata" | 501 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 1a511e765909..4ce947c19623 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S | |||
@@ -64,7 +64,7 @@ | |||
64 | /* | 64 | /* |
65 | * This is the size at which it becomes more efficient to | 65 | * This is the size at which it becomes more efficient to |
66 | * clean the whole cache, rather than using the individual | 66 | * clean the whole cache, rather than using the individual |
67 | * cache line maintainence instructions. | 67 | * cache line maintenance instructions. |
68 | */ | 68 | */ |
69 | #define CACHE_DLIMIT 32768 | 69 | #define CACHE_DLIMIT 32768 |
70 | 70 | ||
@@ -119,6 +119,20 @@ ENTRY(cpu_arm1020e_do_idle) | |||
119 | /* ================================= CACHE ================================ */ | 119 | /* ================================= CACHE ================================ */ |
120 | 120 | ||
121 | .align 5 | 121 | .align 5 |
122 | |||
123 | /* | ||
124 | * flush_icache_all() | ||
125 | * | ||
126 | * Unconditionally clean and invalidate the entire icache. | ||
127 | */ | ||
128 | ENTRY(arm1020e_flush_icache_all) | ||
129 | #ifndef CONFIG_CPU_ICACHE_DISABLE | ||
130 | mov r0, #0 | ||
131 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
132 | #endif | ||
133 | mov pc, lr | ||
134 | ENDPROC(arm1020e_flush_icache_all) | ||
135 | |||
122 | /* | 136 | /* |
123 | * flush_user_cache_all() | 137 | * flush_user_cache_all() |
124 | * | 138 | * |
@@ -337,6 +351,7 @@ ENTRY(arm1020e_dma_unmap_area) | |||
337 | ENDPROC(arm1020e_dma_unmap_area) | 351 | ENDPROC(arm1020e_dma_unmap_area) |
338 | 352 | ||
339 | ENTRY(arm1020e_cache_fns) | 353 | ENTRY(arm1020e_cache_fns) |
354 | .long arm1020e_flush_icache_all | ||
340 | .long arm1020e_flush_kern_cache_all | 355 | .long arm1020e_flush_kern_cache_all |
341 | .long arm1020e_flush_user_cache_all | 356 | .long arm1020e_flush_user_cache_all |
342 | .long arm1020e_flush_user_cache_range | 357 | .long arm1020e_flush_user_cache_range |
@@ -412,7 +427,7 @@ ENTRY(cpu_arm1020e_set_pte_ext) | |||
412 | #endif /* CONFIG_MMU */ | 427 | #endif /* CONFIG_MMU */ |
413 | mov pc, lr | 428 | mov pc, lr |
414 | 429 | ||
415 | __INIT | 430 | __CPUINIT |
416 | 431 | ||
417 | .type __arm1020e_setup, #function | 432 | .type __arm1020e_setup, #function |
418 | __arm1020e_setup: | 433 | __arm1020e_setup: |
@@ -459,6 +474,9 @@ arm1020e_processor_functions: | |||
459 | .word cpu_arm1020e_dcache_clean_area | 474 | .word cpu_arm1020e_dcache_clean_area |
460 | .word cpu_arm1020e_switch_mm | 475 | .word cpu_arm1020e_switch_mm |
461 | .word cpu_arm1020e_set_pte_ext | 476 | .word cpu_arm1020e_set_pte_ext |
477 | .word 0 | ||
478 | .word 0 | ||
479 | .word 0 | ||
462 | .size arm1020e_processor_functions, . - arm1020e_processor_functions | 480 | .size arm1020e_processor_functions, . - arm1020e_processor_functions |
463 | 481 | ||
464 | .section ".rodata" | 482 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 1ffa4eb9c34f..c8884c5413a2 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S | |||
@@ -53,7 +53,7 @@ | |||
53 | /* | 53 | /* |
54 | * This is the size at which it becomes more efficient to | 54 | * This is the size at which it becomes more efficient to |
55 | * clean the whole cache, rather than using the individual | 55 | * clean the whole cache, rather than using the individual |
56 | * cache line maintainence instructions. | 56 | * cache line maintenance instructions. |
57 | */ | 57 | */ |
58 | #define CACHE_DLIMIT 32768 | 58 | #define CACHE_DLIMIT 32768 |
59 | 59 | ||
@@ -108,6 +108,20 @@ ENTRY(cpu_arm1022_do_idle) | |||
108 | /* ================================= CACHE ================================ */ | 108 | /* ================================= CACHE ================================ */ |
109 | 109 | ||
110 | .align 5 | 110 | .align 5 |
111 | |||
112 | /* | ||
113 | * flush_icache_all() | ||
114 | * | ||
115 | * Unconditionally clean and invalidate the entire icache. | ||
116 | */ | ||
117 | ENTRY(arm1022_flush_icache_all) | ||
118 | #ifndef CONFIG_CPU_ICACHE_DISABLE | ||
119 | mov r0, #0 | ||
120 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
121 | #endif | ||
122 | mov pc, lr | ||
123 | ENDPROC(arm1022_flush_icache_all) | ||
124 | |||
111 | /* | 125 | /* |
112 | * flush_user_cache_all() | 126 | * flush_user_cache_all() |
113 | * | 127 | * |
@@ -326,6 +340,7 @@ ENTRY(arm1022_dma_unmap_area) | |||
326 | ENDPROC(arm1022_dma_unmap_area) | 340 | ENDPROC(arm1022_dma_unmap_area) |
327 | 341 | ||
328 | ENTRY(arm1022_cache_fns) | 342 | ENTRY(arm1022_cache_fns) |
343 | .long arm1022_flush_icache_all | ||
329 | .long arm1022_flush_kern_cache_all | 344 | .long arm1022_flush_kern_cache_all |
330 | .long arm1022_flush_user_cache_all | 345 | .long arm1022_flush_user_cache_all |
331 | .long arm1022_flush_user_cache_range | 346 | .long arm1022_flush_user_cache_range |
@@ -394,7 +409,7 @@ ENTRY(cpu_arm1022_set_pte_ext) | |||
394 | #endif /* CONFIG_MMU */ | 409 | #endif /* CONFIG_MMU */ |
395 | mov pc, lr | 410 | mov pc, lr |
396 | 411 | ||
397 | __INIT | 412 | __CPUINIT |
398 | 413 | ||
399 | .type __arm1022_setup, #function | 414 | .type __arm1022_setup, #function |
400 | __arm1022_setup: | 415 | __arm1022_setup: |
@@ -442,6 +457,9 @@ arm1022_processor_functions: | |||
442 | .word cpu_arm1022_dcache_clean_area | 457 | .word cpu_arm1022_dcache_clean_area |
443 | .word cpu_arm1022_switch_mm | 458 | .word cpu_arm1022_switch_mm |
444 | .word cpu_arm1022_set_pte_ext | 459 | .word cpu_arm1022_set_pte_ext |
460 | .word 0 | ||
461 | .word 0 | ||
462 | .word 0 | ||
445 | .size arm1022_processor_functions, . - arm1022_processor_functions | 463 | .size arm1022_processor_functions, . - arm1022_processor_functions |
446 | 464 | ||
447 | .section ".rodata" | 465 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 5697c34b95b0..413684660aad 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S | |||
@@ -53,7 +53,7 @@ | |||
53 | /* | 53 | /* |
54 | * This is the size at which it becomes more efficient to | 54 | * This is the size at which it becomes more efficient to |
55 | * clean the whole cache, rather than using the individual | 55 | * clean the whole cache, rather than using the individual |
56 | * cache line maintainence instructions. | 56 | * cache line maintenance instructions. |
57 | */ | 57 | */ |
58 | #define CACHE_DLIMIT 32768 | 58 | #define CACHE_DLIMIT 32768 |
59 | 59 | ||
@@ -108,6 +108,20 @@ ENTRY(cpu_arm1026_do_idle) | |||
108 | /* ================================= CACHE ================================ */ | 108 | /* ================================= CACHE ================================ */ |
109 | 109 | ||
110 | .align 5 | 110 | .align 5 |
111 | |||
112 | /* | ||
113 | * flush_icache_all() | ||
114 | * | ||
115 | * Unconditionally clean and invalidate the entire icache. | ||
116 | */ | ||
117 | ENTRY(arm1026_flush_icache_all) | ||
118 | #ifndef CONFIG_CPU_ICACHE_DISABLE | ||
119 | mov r0, #0 | ||
120 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
121 | #endif | ||
122 | mov pc, lr | ||
123 | ENDPROC(arm1026_flush_icache_all) | ||
124 | |||
111 | /* | 125 | /* |
112 | * flush_user_cache_all() | 126 | * flush_user_cache_all() |
113 | * | 127 | * |
@@ -320,6 +334,7 @@ ENTRY(arm1026_dma_unmap_area) | |||
320 | ENDPROC(arm1026_dma_unmap_area) | 334 | ENDPROC(arm1026_dma_unmap_area) |
321 | 335 | ||
322 | ENTRY(arm1026_cache_fns) | 336 | ENTRY(arm1026_cache_fns) |
337 | .long arm1026_flush_icache_all | ||
323 | .long arm1026_flush_kern_cache_all | 338 | .long arm1026_flush_kern_cache_all |
324 | .long arm1026_flush_user_cache_all | 339 | .long arm1026_flush_user_cache_all |
325 | .long arm1026_flush_user_cache_range | 340 | .long arm1026_flush_user_cache_range |
@@ -384,7 +399,7 @@ ENTRY(cpu_arm1026_set_pte_ext) | |||
384 | mov pc, lr | 399 | mov pc, lr |
385 | 400 | ||
386 | 401 | ||
387 | __INIT | 402 | __CPUINIT |
388 | 403 | ||
389 | .type __arm1026_setup, #function | 404 | .type __arm1026_setup, #function |
390 | __arm1026_setup: | 405 | __arm1026_setup: |
@@ -437,6 +452,9 @@ arm1026_processor_functions: | |||
437 | .word cpu_arm1026_dcache_clean_area | 452 | .word cpu_arm1026_dcache_clean_area |
438 | .word cpu_arm1026_switch_mm | 453 | .word cpu_arm1026_switch_mm |
439 | .word cpu_arm1026_set_pte_ext | 454 | .word cpu_arm1026_set_pte_ext |
455 | .word 0 | ||
456 | .word 0 | ||
457 | .word 0 | ||
440 | .size arm1026_processor_functions, . - arm1026_processor_functions | 458 | .size arm1026_processor_functions, . - arm1026_processor_functions |
441 | 459 | ||
442 | .section .rodata | 460 | .section .rodata |
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 64e0b327c7c5..5f79dc4ce3fb 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S | |||
@@ -238,7 +238,7 @@ ENTRY(cpu_arm7_reset) | |||
238 | mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc | 238 | mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc |
239 | mov pc, r0 | 239 | mov pc, r0 |
240 | 240 | ||
241 | __INIT | 241 | __CPUINIT |
242 | 242 | ||
243 | .type __arm6_setup, #function | 243 | .type __arm6_setup, #function |
244 | __arm6_setup: mov r0, #0 | 244 | __arm6_setup: mov r0, #0 |
@@ -284,6 +284,9 @@ ENTRY(arm6_processor_functions) | |||
284 | .word cpu_arm6_dcache_clean_area | 284 | .word cpu_arm6_dcache_clean_area |
285 | .word cpu_arm6_switch_mm | 285 | .word cpu_arm6_switch_mm |
286 | .word cpu_arm6_set_pte_ext | 286 | .word cpu_arm6_set_pte_ext |
287 | .word 0 | ||
288 | .word 0 | ||
289 | .word 0 | ||
287 | .size arm6_processor_functions, . - arm6_processor_functions | 290 | .size arm6_processor_functions, . - arm6_processor_functions |
288 | 291 | ||
289 | /* | 292 | /* |
@@ -301,6 +304,9 @@ ENTRY(arm7_processor_functions) | |||
301 | .word cpu_arm7_dcache_clean_area | 304 | .word cpu_arm7_dcache_clean_area |
302 | .word cpu_arm7_switch_mm | 305 | .word cpu_arm7_switch_mm |
303 | .word cpu_arm7_set_pte_ext | 306 | .word cpu_arm7_set_pte_ext |
307 | .word 0 | ||
308 | .word 0 | ||
309 | .word 0 | ||
304 | .size arm7_processor_functions, . - arm7_processor_functions | 310 | .size arm7_processor_functions, . - arm7_processor_functions |
305 | 311 | ||
306 | .section ".rodata" | 312 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 9d96824134fc..7a06e5964f59 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S | |||
@@ -63,7 +63,7 @@ ENTRY(cpu_arm720_proc_fin) | |||
63 | /* | 63 | /* |
64 | * Function: arm720_proc_do_idle(void) | 64 | * Function: arm720_proc_do_idle(void) |
65 | * Params : r0 = unused | 65 | * Params : r0 = unused |
66 | * Purpose : put the processer in proper idle mode | 66 | * Purpose : put the processor in proper idle mode |
67 | */ | 67 | */ |
68 | ENTRY(cpu_arm720_do_idle) | 68 | ENTRY(cpu_arm720_do_idle) |
69 | mov pc, lr | 69 | mov pc, lr |
@@ -113,7 +113,7 @@ ENTRY(cpu_arm720_reset) | |||
113 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 113 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
114 | mov pc, r0 | 114 | mov pc, r0 |
115 | 115 | ||
116 | __INIT | 116 | __CPUINIT |
117 | 117 | ||
118 | .type __arm710_setup, #function | 118 | .type __arm710_setup, #function |
119 | __arm710_setup: | 119 | __arm710_setup: |
@@ -185,6 +185,9 @@ ENTRY(arm720_processor_functions) | |||
185 | .word cpu_arm720_dcache_clean_area | 185 | .word cpu_arm720_dcache_clean_area |
186 | .word cpu_arm720_switch_mm | 186 | .word cpu_arm720_switch_mm |
187 | .word cpu_arm720_set_pte_ext | 187 | .word cpu_arm720_set_pte_ext |
188 | .word 0 | ||
189 | .word 0 | ||
190 | .word 0 | ||
188 | .size arm720_processor_functions, . - arm720_processor_functions | 191 | .size arm720_processor_functions, . - arm720_processor_functions |
189 | 192 | ||
190 | .section ".rodata" | 193 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 6c1a9ab059ae..6f9d12effee1 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S | |||
@@ -55,7 +55,7 @@ ENTRY(cpu_arm740_reset) | |||
55 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register | 55 | mcr p15, 0, ip, c1, c0, 0 @ ctrl register |
56 | mov pc, r0 | 56 | mov pc, r0 |
57 | 57 | ||
58 | __INIT | 58 | __CPUINIT |
59 | 59 | ||
60 | .type __arm740_setup, #function | 60 | .type __arm740_setup, #function |
61 | __arm740_setup: | 61 | __arm740_setup: |
@@ -130,6 +130,9 @@ ENTRY(arm740_processor_functions) | |||
130 | .word cpu_arm740_dcache_clean_area | 130 | .word cpu_arm740_dcache_clean_area |
131 | .word cpu_arm740_switch_mm | 131 | .word cpu_arm740_switch_mm |
132 | .word 0 @ cpu_*_set_pte | 132 | .word 0 @ cpu_*_set_pte |
133 | .word 0 | ||
134 | .word 0 | ||
135 | .word 0 | ||
133 | .size arm740_processor_functions, . - arm740_processor_functions | 136 | .size arm740_processor_functions, . - arm740_processor_functions |
134 | 137 | ||
135 | .section ".rodata" | 138 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 6a850dbba22e..537ffcb0646d 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S | |||
@@ -46,7 +46,7 @@ ENTRY(cpu_arm7tdmi_proc_fin) | |||
46 | ENTRY(cpu_arm7tdmi_reset) | 46 | ENTRY(cpu_arm7tdmi_reset) |
47 | mov pc, r0 | 47 | mov pc, r0 |
48 | 48 | ||
49 | __INIT | 49 | __CPUINIT |
50 | 50 | ||
51 | .type __arm7tdmi_setup, #function | 51 | .type __arm7tdmi_setup, #function |
52 | __arm7tdmi_setup: | 52 | __arm7tdmi_setup: |
@@ -70,6 +70,9 @@ ENTRY(arm7tdmi_processor_functions) | |||
70 | .word cpu_arm7tdmi_dcache_clean_area | 70 | .word cpu_arm7tdmi_dcache_clean_area |
71 | .word cpu_arm7tdmi_switch_mm | 71 | .word cpu_arm7tdmi_switch_mm |
72 | .word 0 @ cpu_*_set_pte | 72 | .word 0 @ cpu_*_set_pte |
73 | .word 0 | ||
74 | .word 0 | ||
75 | .word 0 | ||
73 | .size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions | 76 | .size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions |
74 | 77 | ||
75 | .section ".rodata" | 78 | .section ".rodata" |
@@ -143,7 +146,7 @@ __arm7tdmi_proc_info: | |||
143 | .long 0 | 146 | .long 0 |
144 | .long 0 | 147 | .long 0 |
145 | .long v4_cache_fns | 148 | .long v4_cache_fns |
146 | .size __arm7tdmi_proc_info, . - __arm7dmi_proc_info | 149 | .size __arm7tdmi_proc_info, . - __arm7tdmi_proc_info |
147 | 150 | ||
148 | .type __triscenda7_proc_info, #object | 151 | .type __triscenda7_proc_info, #object |
149 | __triscenda7_proc_info: | 152 | __triscenda7_proc_info: |
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 86f80aa56216..bf8a1d1cccb6 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S | |||
@@ -53,7 +53,7 @@ | |||
53 | /* | 53 | /* |
54 | * This is the size at which it becomes more efficient to | 54 | * This is the size at which it becomes more efficient to |
55 | * clean the whole cache, rather than using the individual | 55 | * clean the whole cache, rather than using the individual |
56 | * cache line maintainence instructions. | 56 | * cache line maintenance instructions. |
57 | */ | 57 | */ |
58 | #define CACHE_DLIMIT 65536 | 58 | #define CACHE_DLIMIT 65536 |
59 | 59 | ||
@@ -110,6 +110,17 @@ ENTRY(cpu_arm920_do_idle) | |||
110 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 110 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
111 | 111 | ||
112 | /* | 112 | /* |
113 | * flush_icache_all() | ||
114 | * | ||
115 | * Unconditionally clean and invalidate the entire icache. | ||
116 | */ | ||
117 | ENTRY(arm920_flush_icache_all) | ||
118 | mov r0, #0 | ||
119 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
120 | mov pc, lr | ||
121 | ENDPROC(arm920_flush_icache_all) | ||
122 | |||
123 | /* | ||
113 | * flush_user_cache_all() | 124 | * flush_user_cache_all() |
114 | * | 125 | * |
115 | * Invalidate all cache entries in a particular address | 126 | * Invalidate all cache entries in a particular address |
@@ -305,6 +316,7 @@ ENTRY(arm920_dma_unmap_area) | |||
305 | ENDPROC(arm920_dma_unmap_area) | 316 | ENDPROC(arm920_dma_unmap_area) |
306 | 317 | ||
307 | ENTRY(arm920_cache_fns) | 318 | ENTRY(arm920_cache_fns) |
319 | .long arm920_flush_icache_all | ||
308 | .long arm920_flush_kern_cache_all | 320 | .long arm920_flush_kern_cache_all |
309 | .long arm920_flush_user_cache_all | 321 | .long arm920_flush_user_cache_all |
310 | .long arm920_flush_user_cache_range | 322 | .long arm920_flush_user_cache_range |
@@ -375,7 +387,41 @@ ENTRY(cpu_arm920_set_pte_ext) | |||
375 | #endif | 387 | #endif |
376 | mov pc, lr | 388 | mov pc, lr |
377 | 389 | ||
378 | __INIT | 390 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ |
391 | .globl cpu_arm920_suspend_size | ||
392 | .equ cpu_arm920_suspend_size, 4 * 3 | ||
393 | #ifdef CONFIG_PM_SLEEP | ||
394 | ENTRY(cpu_arm920_do_suspend) | ||
395 | stmfd sp!, {r4 - r7, lr} | ||
396 | mrc p15, 0, r4, c13, c0, 0 @ PID | ||
397 | mrc p15, 0, r5, c3, c0, 0 @ Domain ID | ||
398 | mrc p15, 0, r6, c2, c0, 0 @ TTB address | ||
399 | mrc p15, 0, r7, c1, c0, 0 @ Control register | ||
400 | stmia r0, {r4 - r7} | ||
401 | ldmfd sp!, {r4 - r7, pc} | ||
402 | ENDPROC(cpu_arm920_do_suspend) | ||
403 | |||
404 | ENTRY(cpu_arm920_do_resume) | ||
405 | mov ip, #0 | ||
406 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs | ||
407 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches | ||
408 | ldmia r0, {r4 - r7} | ||
409 | mcr p15, 0, r4, c13, c0, 0 @ PID | ||
410 | mcr p15, 0, r5, c3, c0, 0 @ Domain ID | ||
411 | mcr p15, 0, r6, c2, c0, 0 @ TTB address | ||
412 | mov r0, r7 @ control register | ||
413 | mov r2, r6, lsr #14 @ get TTB0 base | ||
414 | mov r2, r2, lsl #14 | ||
415 | ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ | ||
416 | PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE | ||
417 | b cpu_resume_mmu | ||
418 | ENDPROC(cpu_arm920_do_resume) | ||
419 | #else | ||
420 | #define cpu_arm920_do_suspend 0 | ||
421 | #define cpu_arm920_do_resume 0 | ||
422 | #endif | ||
423 | |||
424 | __CPUINIT | ||
379 | 425 | ||
380 | .type __arm920_setup, #function | 426 | .type __arm920_setup, #function |
381 | __arm920_setup: | 427 | __arm920_setup: |
@@ -420,6 +466,9 @@ arm920_processor_functions: | |||
420 | .word cpu_arm920_dcache_clean_area | 466 | .word cpu_arm920_dcache_clean_area |
421 | .word cpu_arm920_switch_mm | 467 | .word cpu_arm920_switch_mm |
422 | .word cpu_arm920_set_pte_ext | 468 | .word cpu_arm920_set_pte_ext |
469 | .word cpu_arm920_suspend_size | ||
470 | .word cpu_arm920_do_suspend | ||
471 | .word cpu_arm920_do_resume | ||
423 | .size arm920_processor_functions, . - arm920_processor_functions | 472 | .size arm920_processor_functions, . - arm920_processor_functions |
424 | 473 | ||
425 | .section ".rodata" | 474 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index f76ce9b62883..95ba1fc56e4d 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S | |||
@@ -54,7 +54,7 @@ | |||
54 | /* | 54 | /* |
55 | * This is the size at which it becomes more efficient to | 55 | * This is the size at which it becomes more efficient to |
56 | * clean the whole cache, rather than using the individual | 56 | * clean the whole cache, rather than using the individual |
57 | * cache line maintainence instructions. (I think this should | 57 | * cache line maintenance instructions. (I think this should |
58 | * be 32768). | 58 | * be 32768). |
59 | */ | 59 | */ |
60 | #define CACHE_DLIMIT 8192 | 60 | #define CACHE_DLIMIT 8192 |
@@ -112,6 +112,17 @@ ENTRY(cpu_arm922_do_idle) | |||
112 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 112 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
113 | 113 | ||
114 | /* | 114 | /* |
115 | * flush_icache_all() | ||
116 | * | ||
117 | * Unconditionally clean and invalidate the entire icache. | ||
118 | */ | ||
119 | ENTRY(arm922_flush_icache_all) | ||
120 | mov r0, #0 | ||
121 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
122 | mov pc, lr | ||
123 | ENDPROC(arm922_flush_icache_all) | ||
124 | |||
125 | /* | ||
115 | * flush_user_cache_all() | 126 | * flush_user_cache_all() |
116 | * | 127 | * |
117 | * Clean and invalidate all cache entries in a particular | 128 | * Clean and invalidate all cache entries in a particular |
@@ -307,6 +318,7 @@ ENTRY(arm922_dma_unmap_area) | |||
307 | ENDPROC(arm922_dma_unmap_area) | 318 | ENDPROC(arm922_dma_unmap_area) |
308 | 319 | ||
309 | ENTRY(arm922_cache_fns) | 320 | ENTRY(arm922_cache_fns) |
321 | .long arm922_flush_icache_all | ||
310 | .long arm922_flush_kern_cache_all | 322 | .long arm922_flush_kern_cache_all |
311 | .long arm922_flush_user_cache_all | 323 | .long arm922_flush_user_cache_all |
312 | .long arm922_flush_user_cache_range | 324 | .long arm922_flush_user_cache_range |
@@ -379,7 +391,7 @@ ENTRY(cpu_arm922_set_pte_ext) | |||
379 | #endif /* CONFIG_MMU */ | 391 | #endif /* CONFIG_MMU */ |
380 | mov pc, lr | 392 | mov pc, lr |
381 | 393 | ||
382 | __INIT | 394 | __CPUINIT |
383 | 395 | ||
384 | .type __arm922_setup, #function | 396 | .type __arm922_setup, #function |
385 | __arm922_setup: | 397 | __arm922_setup: |
@@ -424,6 +436,9 @@ arm922_processor_functions: | |||
424 | .word cpu_arm922_dcache_clean_area | 436 | .word cpu_arm922_dcache_clean_area |
425 | .word cpu_arm922_switch_mm | 437 | .word cpu_arm922_switch_mm |
426 | .word cpu_arm922_set_pte_ext | 438 | .word cpu_arm922_set_pte_ext |
439 | .word 0 | ||
440 | .word 0 | ||
441 | .word 0 | ||
427 | .size arm922_processor_functions, . - arm922_processor_functions | 442 | .size arm922_processor_functions, . - arm922_processor_functions |
428 | 443 | ||
429 | .section ".rodata" | 444 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 657bd3f7c153..541e4774eea1 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S | |||
@@ -77,7 +77,7 @@ | |||
77 | /* | 77 | /* |
78 | * This is the size at which it becomes more efficient to | 78 | * This is the size at which it becomes more efficient to |
79 | * clean the whole cache, rather than using the individual | 79 | * clean the whole cache, rather than using the individual |
80 | * cache line maintainence instructions. | 80 | * cache line maintenance instructions. |
81 | */ | 81 | */ |
82 | #define CACHE_DLIMIT 8192 | 82 | #define CACHE_DLIMIT 8192 |
83 | 83 | ||
@@ -145,6 +145,17 @@ ENTRY(cpu_arm925_do_idle) | |||
145 | mov pc, lr | 145 | mov pc, lr |
146 | 146 | ||
147 | /* | 147 | /* |
148 | * flush_icache_all() | ||
149 | * | ||
150 | * Unconditionally clean and invalidate the entire icache. | ||
151 | */ | ||
152 | ENTRY(arm925_flush_icache_all) | ||
153 | mov r0, #0 | ||
154 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
155 | mov pc, lr | ||
156 | ENDPROC(arm925_flush_icache_all) | ||
157 | |||
158 | /* | ||
148 | * flush_user_cache_all() | 159 | * flush_user_cache_all() |
149 | * | 160 | * |
150 | * Clean and invalidate all cache entries in a particular | 161 | * Clean and invalidate all cache entries in a particular |
@@ -362,6 +373,7 @@ ENTRY(arm925_dma_unmap_area) | |||
362 | ENDPROC(arm925_dma_unmap_area) | 373 | ENDPROC(arm925_dma_unmap_area) |
363 | 374 | ||
364 | ENTRY(arm925_cache_fns) | 375 | ENTRY(arm925_cache_fns) |
376 | .long arm925_flush_icache_all | ||
365 | .long arm925_flush_kern_cache_all | 377 | .long arm925_flush_kern_cache_all |
366 | .long arm925_flush_user_cache_all | 378 | .long arm925_flush_user_cache_all |
367 | .long arm925_flush_user_cache_range | 379 | .long arm925_flush_user_cache_range |
@@ -428,7 +440,7 @@ ENTRY(cpu_arm925_set_pte_ext) | |||
428 | #endif /* CONFIG_MMU */ | 440 | #endif /* CONFIG_MMU */ |
429 | mov pc, lr | 441 | mov pc, lr |
430 | 442 | ||
431 | __INIT | 443 | __CPUINIT |
432 | 444 | ||
433 | .type __arm925_setup, #function | 445 | .type __arm925_setup, #function |
434 | __arm925_setup: | 446 | __arm925_setup: |
@@ -491,6 +503,9 @@ arm925_processor_functions: | |||
491 | .word cpu_arm925_dcache_clean_area | 503 | .word cpu_arm925_dcache_clean_area |
492 | .word cpu_arm925_switch_mm | 504 | .word cpu_arm925_switch_mm |
493 | .word cpu_arm925_set_pte_ext | 505 | .word cpu_arm925_set_pte_ext |
506 | .word 0 | ||
507 | .word 0 | ||
508 | .word 0 | ||
494 | .size arm925_processor_functions, . - arm925_processor_functions | 509 | .size arm925_processor_functions, . - arm925_processor_functions |
495 | 510 | ||
496 | .section ".rodata" | 511 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 73f1f3c68910..0ed85d930c09 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S | |||
@@ -111,6 +111,17 @@ ENTRY(cpu_arm926_do_idle) | |||
111 | mov pc, lr | 111 | mov pc, lr |
112 | 112 | ||
113 | /* | 113 | /* |
114 | * flush_icache_all() | ||
115 | * | ||
116 | * Unconditionally clean and invalidate the entire icache. | ||
117 | */ | ||
118 | ENTRY(arm926_flush_icache_all) | ||
119 | mov r0, #0 | ||
120 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
121 | mov pc, lr | ||
122 | ENDPROC(arm926_flush_icache_all) | ||
123 | |||
124 | /* | ||
114 | * flush_user_cache_all() | 125 | * flush_user_cache_all() |
115 | * | 126 | * |
116 | * Clean and invalidate all cache entries in a particular | 127 | * Clean and invalidate all cache entries in a particular |
@@ -325,6 +336,7 @@ ENTRY(arm926_dma_unmap_area) | |||
325 | ENDPROC(arm926_dma_unmap_area) | 336 | ENDPROC(arm926_dma_unmap_area) |
326 | 337 | ||
327 | ENTRY(arm926_cache_fns) | 338 | ENTRY(arm926_cache_fns) |
339 | .long arm926_flush_icache_all | ||
328 | .long arm926_flush_kern_cache_all | 340 | .long arm926_flush_kern_cache_all |
329 | .long arm926_flush_user_cache_all | 341 | .long arm926_flush_user_cache_all |
330 | .long arm926_flush_user_cache_range | 342 | .long arm926_flush_user_cache_range |
@@ -389,7 +401,41 @@ ENTRY(cpu_arm926_set_pte_ext) | |||
389 | #endif | 401 | #endif |
390 | mov pc, lr | 402 | mov pc, lr |
391 | 403 | ||
392 | __INIT | 404 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ |
405 | .globl cpu_arm926_suspend_size | ||
406 | .equ cpu_arm926_suspend_size, 4 * 3 | ||
407 | #ifdef CONFIG_PM_SLEEP | ||
408 | ENTRY(cpu_arm926_do_suspend) | ||
409 | stmfd sp!, {r4 - r7, lr} | ||
410 | mrc p15, 0, r4, c13, c0, 0 @ PID | ||
411 | mrc p15, 0, r5, c3, c0, 0 @ Domain ID | ||
412 | mrc p15, 0, r6, c2, c0, 0 @ TTB address | ||
413 | mrc p15, 0, r7, c1, c0, 0 @ Control register | ||
414 | stmia r0, {r4 - r7} | ||
415 | ldmfd sp!, {r4 - r7, pc} | ||
416 | ENDPROC(cpu_arm926_do_suspend) | ||
417 | |||
418 | ENTRY(cpu_arm926_do_resume) | ||
419 | mov ip, #0 | ||
420 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs | ||
421 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches | ||
422 | ldmia r0, {r4 - r7} | ||
423 | mcr p15, 0, r4, c13, c0, 0 @ PID | ||
424 | mcr p15, 0, r5, c3, c0, 0 @ Domain ID | ||
425 | mcr p15, 0, r6, c2, c0, 0 @ TTB address | ||
426 | mov r0, r7 @ control register | ||
427 | mov r2, r6, lsr #14 @ get TTB0 base | ||
428 | mov r2, r2, lsl #14 | ||
429 | ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ | ||
430 | PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE | ||
431 | b cpu_resume_mmu | ||
432 | ENDPROC(cpu_arm926_do_resume) | ||
433 | #else | ||
434 | #define cpu_arm926_do_suspend 0 | ||
435 | #define cpu_arm926_do_resume 0 | ||
436 | #endif | ||
437 | |||
438 | __CPUINIT | ||
393 | 439 | ||
394 | .type __arm926_setup, #function | 440 | .type __arm926_setup, #function |
395 | __arm926_setup: | 441 | __arm926_setup: |
@@ -444,6 +490,9 @@ arm926_processor_functions: | |||
444 | .word cpu_arm926_dcache_clean_area | 490 | .word cpu_arm926_dcache_clean_area |
445 | .word cpu_arm926_switch_mm | 491 | .word cpu_arm926_switch_mm |
446 | .word cpu_arm926_set_pte_ext | 492 | .word cpu_arm926_set_pte_ext |
493 | .word cpu_arm926_suspend_size | ||
494 | .word cpu_arm926_do_suspend | ||
495 | .word cpu_arm926_do_resume | ||
447 | .size arm926_processor_functions, . - arm926_processor_functions | 496 | .size arm926_processor_functions, . - arm926_processor_functions |
448 | 497 | ||
449 | .section ".rodata" | 498 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index fffb061a45a5..26aea3f71c26 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S | |||
@@ -68,6 +68,17 @@ ENTRY(cpu_arm940_do_idle) | |||
68 | mov pc, lr | 68 | mov pc, lr |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * flush_icache_all() | ||
72 | * | ||
73 | * Unconditionally clean and invalidate the entire icache. | ||
74 | */ | ||
75 | ENTRY(arm940_flush_icache_all) | ||
76 | mov r0, #0 | ||
77 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
78 | mov pc, lr | ||
79 | ENDPROC(arm940_flush_icache_all) | ||
80 | |||
81 | /* | ||
71 | * flush_user_cache_all() | 82 | * flush_user_cache_all() |
72 | */ | 83 | */ |
73 | ENTRY(arm940_flush_user_cache_all) | 84 | ENTRY(arm940_flush_user_cache_all) |
@@ -254,6 +265,7 @@ ENTRY(arm940_dma_unmap_area) | |||
254 | ENDPROC(arm940_dma_unmap_area) | 265 | ENDPROC(arm940_dma_unmap_area) |
255 | 266 | ||
256 | ENTRY(arm940_cache_fns) | 267 | ENTRY(arm940_cache_fns) |
268 | .long arm940_flush_icache_all | ||
257 | .long arm940_flush_kern_cache_all | 269 | .long arm940_flush_kern_cache_all |
258 | .long arm940_flush_user_cache_all | 270 | .long arm940_flush_user_cache_all |
259 | .long arm940_flush_user_cache_range | 271 | .long arm940_flush_user_cache_range |
@@ -264,7 +276,7 @@ ENTRY(arm940_cache_fns) | |||
264 | .long arm940_dma_unmap_area | 276 | .long arm940_dma_unmap_area |
265 | .long arm940_dma_flush_range | 277 | .long arm940_dma_flush_range |
266 | 278 | ||
267 | __INIT | 279 | __CPUINIT |
268 | 280 | ||
269 | .type __arm940_setup, #function | 281 | .type __arm940_setup, #function |
270 | __arm940_setup: | 282 | __arm940_setup: |
@@ -351,6 +363,9 @@ ENTRY(arm940_processor_functions) | |||
351 | .word cpu_arm940_dcache_clean_area | 363 | .word cpu_arm940_dcache_clean_area |
352 | .word cpu_arm940_switch_mm | 364 | .word cpu_arm940_switch_mm |
353 | .word 0 @ cpu_*_set_pte | 365 | .word 0 @ cpu_*_set_pte |
366 | .word 0 | ||
367 | .word 0 | ||
368 | .word 0 | ||
354 | .size arm940_processor_functions, . - arm940_processor_functions | 369 | .size arm940_processor_functions, . - arm940_processor_functions |
355 | 370 | ||
356 | .section ".rodata" | 371 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 249a6053760a..8063345406fe 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S | |||
@@ -75,6 +75,17 @@ ENTRY(cpu_arm946_do_idle) | |||
75 | mov pc, lr | 75 | mov pc, lr |
76 | 76 | ||
77 | /* | 77 | /* |
78 | * flush_icache_all() | ||
79 | * | ||
80 | * Unconditionally clean and invalidate the entire icache. | ||
81 | */ | ||
82 | ENTRY(arm946_flush_icache_all) | ||
83 | mov r0, #0 | ||
84 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
85 | mov pc, lr | ||
86 | ENDPROC(arm946_flush_icache_all) | ||
87 | |||
88 | /* | ||
78 | * flush_user_cache_all() | 89 | * flush_user_cache_all() |
79 | */ | 90 | */ |
80 | ENTRY(arm946_flush_user_cache_all) | 91 | ENTRY(arm946_flush_user_cache_all) |
@@ -296,6 +307,7 @@ ENTRY(arm946_dma_unmap_area) | |||
296 | ENDPROC(arm946_dma_unmap_area) | 307 | ENDPROC(arm946_dma_unmap_area) |
297 | 308 | ||
298 | ENTRY(arm946_cache_fns) | 309 | ENTRY(arm946_cache_fns) |
310 | .long arm946_flush_icache_all | ||
299 | .long arm946_flush_kern_cache_all | 311 | .long arm946_flush_kern_cache_all |
300 | .long arm946_flush_user_cache_all | 312 | .long arm946_flush_user_cache_all |
301 | .long arm946_flush_user_cache_range | 313 | .long arm946_flush_user_cache_range |
@@ -317,7 +329,7 @@ ENTRY(cpu_arm946_dcache_clean_area) | |||
317 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 329 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
318 | mov pc, lr | 330 | mov pc, lr |
319 | 331 | ||
320 | __INIT | 332 | __CPUINIT |
321 | 333 | ||
322 | .type __arm946_setup, #function | 334 | .type __arm946_setup, #function |
323 | __arm946_setup: | 335 | __arm946_setup: |
@@ -407,6 +419,9 @@ ENTRY(arm946_processor_functions) | |||
407 | .word cpu_arm946_dcache_clean_area | 419 | .word cpu_arm946_dcache_clean_area |
408 | .word cpu_arm946_switch_mm | 420 | .word cpu_arm946_switch_mm |
409 | .word 0 @ cpu_*_set_pte | 421 | .word 0 @ cpu_*_set_pte |
422 | .word 0 | ||
423 | .word 0 | ||
424 | .word 0 | ||
410 | .size arm946_processor_functions, . - arm946_processor_functions | 425 | .size arm946_processor_functions, . - arm946_processor_functions |
411 | 426 | ||
412 | .section ".rodata" | 427 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index db475667fac2..546b54da1005 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S | |||
@@ -46,7 +46,7 @@ ENTRY(cpu_arm9tdmi_proc_fin) | |||
46 | ENTRY(cpu_arm9tdmi_reset) | 46 | ENTRY(cpu_arm9tdmi_reset) |
47 | mov pc, r0 | 47 | mov pc, r0 |
48 | 48 | ||
49 | __INIT | 49 | __CPUINIT |
50 | 50 | ||
51 | .type __arm9tdmi_setup, #function | 51 | .type __arm9tdmi_setup, #function |
52 | __arm9tdmi_setup: | 52 | __arm9tdmi_setup: |
@@ -70,6 +70,9 @@ ENTRY(arm9tdmi_processor_functions) | |||
70 | .word cpu_arm9tdmi_dcache_clean_area | 70 | .word cpu_arm9tdmi_dcache_clean_area |
71 | .word cpu_arm9tdmi_switch_mm | 71 | .word cpu_arm9tdmi_switch_mm |
72 | .word 0 @ cpu_*_set_pte | 72 | .word 0 @ cpu_*_set_pte |
73 | .word 0 | ||
74 | .word 0 | ||
75 | .word 0 | ||
73 | .size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions | 76 | .size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions |
74 | 77 | ||
75 | .section ".rodata" | 78 | .section ".rodata" |
@@ -113,7 +116,7 @@ __arm9tdmi_proc_info: | |||
113 | .long 0 | 116 | .long 0 |
114 | .long 0 | 117 | .long 0 |
115 | .long v4_cache_fns | 118 | .long v4_cache_fns |
116 | .size __arm9tdmi_proc_info, . - __arm9dmi_proc_info | 119 | .size __arm9tdmi_proc_info, . - __arm9tdmi_proc_info |
117 | 120 | ||
118 | .type __p2001_proc_info, #object | 121 | .type __p2001_proc_info, #object |
119 | __p2001_proc_info: | 122 | __p2001_proc_info: |
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 7803fdf70029..fc2a4ae15cf4 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S | |||
@@ -134,7 +134,7 @@ ENTRY(cpu_fa526_set_pte_ext) | |||
134 | #endif | 134 | #endif |
135 | mov pc, lr | 135 | mov pc, lr |
136 | 136 | ||
137 | __INIT | 137 | __CPUINIT |
138 | 138 | ||
139 | .type __fa526_setup, #function | 139 | .type __fa526_setup, #function |
140 | __fa526_setup: | 140 | __fa526_setup: |
@@ -195,6 +195,9 @@ fa526_processor_functions: | |||
195 | .word cpu_fa526_dcache_clean_area | 195 | .word cpu_fa526_dcache_clean_area |
196 | .word cpu_fa526_switch_mm | 196 | .word cpu_fa526_switch_mm |
197 | .word cpu_fa526_set_pte_ext | 197 | .word cpu_fa526_set_pte_ext |
198 | .word 0 | ||
199 | .word 0 | ||
200 | .word 0 | ||
198 | .size fa526_processor_functions, . - fa526_processor_functions | 201 | .size fa526_processor_functions, . - fa526_processor_functions |
199 | 202 | ||
200 | .section ".rodata" | 203 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index b304d0104a4e..d3883eed7a4a 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S | |||
@@ -124,6 +124,17 @@ ENTRY(cpu_feroceon_do_idle) | |||
124 | mov pc, lr | 124 | mov pc, lr |
125 | 125 | ||
126 | /* | 126 | /* |
127 | * flush_icache_all() | ||
128 | * | ||
129 | * Unconditionally clean and invalidate the entire icache. | ||
130 | */ | ||
131 | ENTRY(feroceon_flush_icache_all) | ||
132 | mov r0, #0 | ||
133 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
134 | mov pc, lr | ||
135 | ENDPROC(feroceon_flush_icache_all) | ||
136 | |||
137 | /* | ||
127 | * flush_user_cache_all() | 138 | * flush_user_cache_all() |
128 | * | 139 | * |
129 | * Clean and invalidate all cache entries in a particular | 140 | * Clean and invalidate all cache entries in a particular |
@@ -401,6 +412,7 @@ ENTRY(feroceon_dma_unmap_area) | |||
401 | ENDPROC(feroceon_dma_unmap_area) | 412 | ENDPROC(feroceon_dma_unmap_area) |
402 | 413 | ||
403 | ENTRY(feroceon_cache_fns) | 414 | ENTRY(feroceon_cache_fns) |
415 | .long feroceon_flush_icache_all | ||
404 | .long feroceon_flush_kern_cache_all | 416 | .long feroceon_flush_kern_cache_all |
405 | .long feroceon_flush_user_cache_all | 417 | .long feroceon_flush_user_cache_all |
406 | .long feroceon_flush_user_cache_range | 418 | .long feroceon_flush_user_cache_range |
@@ -412,6 +424,7 @@ ENTRY(feroceon_cache_fns) | |||
412 | .long feroceon_dma_flush_range | 424 | .long feroceon_dma_flush_range |
413 | 425 | ||
414 | ENTRY(feroceon_range_cache_fns) | 426 | ENTRY(feroceon_range_cache_fns) |
427 | .long feroceon_flush_icache_all | ||
415 | .long feroceon_flush_kern_cache_all | 428 | .long feroceon_flush_kern_cache_all |
416 | .long feroceon_flush_user_cache_all | 429 | .long feroceon_flush_user_cache_all |
417 | .long feroceon_flush_user_cache_range | 430 | .long feroceon_flush_user_cache_range |
@@ -494,7 +507,7 @@ ENTRY(cpu_feroceon_set_pte_ext) | |||
494 | #endif | 507 | #endif |
495 | mov pc, lr | 508 | mov pc, lr |
496 | 509 | ||
497 | __INIT | 510 | __CPUINIT |
498 | 511 | ||
499 | .type __feroceon_setup, #function | 512 | .type __feroceon_setup, #function |
500 | __feroceon_setup: | 513 | __feroceon_setup: |
@@ -541,6 +554,9 @@ feroceon_processor_functions: | |||
541 | .word cpu_feroceon_dcache_clean_area | 554 | .word cpu_feroceon_dcache_clean_area |
542 | .word cpu_feroceon_switch_mm | 555 | .word cpu_feroceon_switch_mm |
543 | .word cpu_feroceon_set_pte_ext | 556 | .word cpu_feroceon_set_pte_ext |
557 | .word 0 | ||
558 | .word 0 | ||
559 | .word 0 | ||
544 | .size feroceon_processor_functions, . - feroceon_processor_functions | 560 | .size feroceon_processor_functions, . - feroceon_processor_functions |
545 | 561 | ||
546 | .section ".rodata" | 562 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 7d63beaf9745..34261f9486b9 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S | |||
@@ -61,27 +61,37 @@ | |||
61 | .endm | 61 | .endm |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * cache_line_size - get the cache line size from the CSIDR register | 64 | * dcache_line_size - get the minimum D-cache line size from the CTR register |
65 | * (available on ARMv7+). It assumes that the CSSR register was configured | 65 | * on ARMv7. |
66 | * to access the L1 data cache CSIDR. | ||
67 | */ | 66 | */ |
68 | .macro dcache_line_size, reg, tmp | 67 | .macro dcache_line_size, reg, tmp |
69 | mrc p15, 1, \tmp, c0, c0, 0 @ read CSIDR | 68 | mrc p15, 0, \tmp, c0, c0, 1 @ read ctr |
70 | and \tmp, \tmp, #7 @ cache line size encoding | 69 | lsr \tmp, \tmp, #16 |
71 | mov \reg, #16 @ size offset | 70 | and \tmp, \tmp, #0xf @ cache line size encoding |
71 | mov \reg, #4 @ bytes per word | ||
72 | mov \reg, \reg, lsl \tmp @ actual cache line size | 72 | mov \reg, \reg, lsl \tmp @ actual cache line size |
73 | .endm | 73 | .endm |
74 | 74 | ||
75 | /* | ||
76 | * icache_line_size - get the minimum I-cache line size from the CTR register | ||
77 | * on ARMv7. | ||
78 | */ | ||
79 | .macro icache_line_size, reg, tmp | ||
80 | mrc p15, 0, \tmp, c0, c0, 1 @ read ctr | ||
81 | and \tmp, \tmp, #0xf @ cache line size encoding | ||
82 | mov \reg, #4 @ bytes per word | ||
83 | mov \reg, \reg, lsl \tmp @ actual cache line size | ||
84 | .endm | ||
75 | 85 | ||
76 | /* | 86 | /* |
77 | * Sanity check the PTE configuration for the code below - which makes | 87 | * Sanity check the PTE configuration for the code below - which makes |
78 | * certain assumptions about how these bits are layed out. | 88 | * certain assumptions about how these bits are laid out. |
79 | */ | 89 | */ |
80 | #ifdef CONFIG_MMU | 90 | #ifdef CONFIG_MMU |
81 | #if L_PTE_SHARED != PTE_EXT_SHARED | 91 | #if L_PTE_SHARED != PTE_EXT_SHARED |
82 | #error PTE shared bit mismatch | 92 | #error PTE shared bit mismatch |
83 | #endif | 93 | #endif |
84 | #if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\ | 94 | #if (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\ |
85 | L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED | 95 | L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED |
86 | #error Invalid Linux PTE bit settings | 96 | #error Invalid Linux PTE bit settings |
87 | #endif | 97 | #endif |
@@ -99,6 +109,10 @@ | |||
99 | * 110x 0 1 0 r/w r/o | 109 | * 110x 0 1 0 r/w r/o |
100 | * 11x0 0 1 0 r/w r/o | 110 | * 11x0 0 1 0 r/w r/o |
101 | * 1111 0 1 1 r/w r/w | 111 | * 1111 0 1 1 r/w r/w |
112 | * | ||
113 | * If !CONFIG_CPU_USE_DOMAINS, the following permissions are changed: | ||
114 | * 110x 1 1 1 r/o r/o | ||
115 | * 11x0 1 1 1 r/o r/o | ||
102 | */ | 116 | */ |
103 | .macro armv6_mt_table pfx | 117 | .macro armv6_mt_table pfx |
104 | \pfx\()_mt_table: | 118 | \pfx\()_mt_table: |
@@ -121,7 +135,7 @@ | |||
121 | .endm | 135 | .endm |
122 | 136 | ||
123 | .macro armv6_set_pte_ext pfx | 137 | .macro armv6_set_pte_ext pfx |
124 | str r1, [r0], #-2048 @ linux version | 138 | str r1, [r0], #2048 @ linux version |
125 | 139 | ||
126 | bic r3, r1, #0x000003fc | 140 | bic r3, r1, #0x000003fc |
127 | bic r3, r3, #PTE_TYPE_MASK | 141 | bic r3, r3, #PTE_TYPE_MASK |
@@ -132,17 +146,20 @@ | |||
132 | and r2, r1, #L_PTE_MT_MASK | 146 | and r2, r1, #L_PTE_MT_MASK |
133 | ldr r2, [ip, r2] | 147 | ldr r2, [ip, r2] |
134 | 148 | ||
135 | tst r1, #L_PTE_WRITE | 149 | eor r1, r1, #L_PTE_DIRTY |
136 | tstne r1, #L_PTE_DIRTY | 150 | tst r1, #L_PTE_DIRTY|L_PTE_RDONLY |
137 | orreq r3, r3, #PTE_EXT_APX | 151 | orrne r3, r3, #PTE_EXT_APX |
138 | 152 | ||
139 | tst r1, #L_PTE_USER | 153 | tst r1, #L_PTE_USER |
140 | orrne r3, r3, #PTE_EXT_AP1 | 154 | orrne r3, r3, #PTE_EXT_AP1 |
155 | #ifdef CONFIG_CPU_USE_DOMAINS | ||
156 | @ allow kernel read/write access to read-only user pages | ||
141 | tstne r3, #PTE_EXT_APX | 157 | tstne r3, #PTE_EXT_APX |
142 | bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 | 158 | bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 |
159 | #endif | ||
143 | 160 | ||
144 | tst r1, #L_PTE_EXEC | 161 | tst r1, #L_PTE_XN |
145 | orreq r3, r3, #PTE_EXT_XN | 162 | orrne r3, r3, #PTE_EXT_XN |
146 | 163 | ||
147 | orr r3, r3, r2 | 164 | orr r3, r3, r2 |
148 | 165 | ||
@@ -170,9 +187,9 @@ | |||
170 | * 1111 0xff r/w r/w | 187 | * 1111 0xff r/w r/w |
171 | */ | 188 | */ |
172 | .macro armv3_set_pte_ext wc_disable=1 | 189 | .macro armv3_set_pte_ext wc_disable=1 |
173 | str r1, [r0], #-2048 @ linux version | 190 | str r1, [r0], #2048 @ linux version |
174 | 191 | ||
175 | eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY | 192 | eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
176 | 193 | ||
177 | bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits | 194 | bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits |
178 | bic r2, r2, #PTE_TYPE_MASK | 195 | bic r2, r2, #PTE_TYPE_MASK |
@@ -181,7 +198,7 @@ | |||
181 | tst r3, #L_PTE_USER @ user? | 198 | tst r3, #L_PTE_USER @ user? |
182 | orrne r2, r2, #PTE_SMALL_AP_URO_SRW | 199 | orrne r2, r2, #PTE_SMALL_AP_URO_SRW |
183 | 200 | ||
184 | tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty? | 201 | tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty? |
185 | orreq r2, r2, #PTE_SMALL_AP_UNO_SRW | 202 | orreq r2, r2, #PTE_SMALL_AP_UNO_SRW |
186 | 203 | ||
187 | tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? | 204 | tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? |
@@ -193,7 +210,7 @@ | |||
193 | bicne r2, r2, #PTE_BUFFERABLE | 210 | bicne r2, r2, #PTE_BUFFERABLE |
194 | #endif | 211 | #endif |
195 | .endif | 212 | .endif |
196 | str r2, [r0] @ hardware version | 213 | str r2, [r0] @ hardware version |
197 | .endm | 214 | .endm |
198 | 215 | ||
199 | 216 | ||
@@ -213,9 +230,9 @@ | |||
213 | * 1111 11 r/w r/w | 230 | * 1111 11 r/w r/w |
214 | */ | 231 | */ |
215 | .macro xscale_set_pte_ext_prologue | 232 | .macro xscale_set_pte_ext_prologue |
216 | str r1, [r0], #-2048 @ linux version | 233 | str r1, [r0] @ linux version |
217 | 234 | ||
218 | eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY | 235 | eor r3, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
219 | 236 | ||
220 | bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits | 237 | bic r2, r1, #PTE_SMALL_AP_MASK @ keep C, B bits |
221 | orr r2, r2, #PTE_TYPE_EXT @ extended page | 238 | orr r2, r2, #PTE_TYPE_EXT @ extended page |
@@ -223,7 +240,7 @@ | |||
223 | tst r3, #L_PTE_USER @ user? | 240 | tst r3, #L_PTE_USER @ user? |
224 | orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w | 241 | orrne r2, r2, #PTE_EXT_AP_URO_SRW @ yes -> user r/o, system r/w |
225 | 242 | ||
226 | tst r3, #L_PTE_WRITE | L_PTE_DIRTY @ write and dirty? | 243 | tst r3, #L_PTE_RDONLY | L_PTE_DIRTY @ write and dirty? |
227 | orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w | 244 | orreq r2, r2, #PTE_EXT_AP_UNO_SRW @ yes -> user n/a, system r/w |
228 | @ combined with user -> user r/w | 245 | @ combined with user -> user r/w |
229 | .endm | 246 | .endm |
@@ -232,7 +249,7 @@ | |||
232 | tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? | 249 | tst r3, #L_PTE_PRESENT | L_PTE_YOUNG @ present and young? |
233 | movne r2, #0 @ no -> fault | 250 | movne r2, #0 @ no -> fault |
234 | 251 | ||
235 | str r2, [r0] @ hardware version | 252 | str r2, [r0, #2048]! @ hardware version |
236 | mov ip, #0 | 253 | mov ip, #0 |
237 | mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line | 254 | mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line |
238 | mcr p15, 0, ip, c7, c10, 4 @ data write barrier | 255 | mcr p15, 0, ip, c7, c10, 4 @ data write barrier |
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 5f6892fcc167..9d4f2ae63370 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S | |||
@@ -338,7 +338,7 @@ ENTRY(cpu_mohawk_set_pte_ext) | |||
338 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 338 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
339 | mov pc, lr | 339 | mov pc, lr |
340 | 340 | ||
341 | __INIT | 341 | __CPUINIT |
342 | 342 | ||
343 | .type __mohawk_setup, #function | 343 | .type __mohawk_setup, #function |
344 | __mohawk_setup: | 344 | __mohawk_setup: |
@@ -388,6 +388,9 @@ mohawk_processor_functions: | |||
388 | .word cpu_mohawk_dcache_clean_area | 388 | .word cpu_mohawk_dcache_clean_area |
389 | .word cpu_mohawk_switch_mm | 389 | .word cpu_mohawk_switch_mm |
390 | .word cpu_mohawk_set_pte_ext | 390 | .word cpu_mohawk_set_pte_ext |
391 | .word 0 | ||
392 | .word 0 | ||
393 | .word 0 | ||
391 | .size mohawk_processor_functions, . - mohawk_processor_functions | 394 | .size mohawk_processor_functions, . - mohawk_processor_functions |
392 | 395 | ||
393 | .section ".rodata" | 396 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index a201eb04b5e1..46f09ed16b98 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S | |||
@@ -156,7 +156,7 @@ ENTRY(cpu_sa110_set_pte_ext) | |||
156 | #endif | 156 | #endif |
157 | mov pc, lr | 157 | mov pc, lr |
158 | 158 | ||
159 | __INIT | 159 | __CPUINIT |
160 | 160 | ||
161 | .type __sa110_setup, #function | 161 | .type __sa110_setup, #function |
162 | __sa110_setup: | 162 | __sa110_setup: |
@@ -203,6 +203,9 @@ ENTRY(sa110_processor_functions) | |||
203 | .word cpu_sa110_dcache_clean_area | 203 | .word cpu_sa110_dcache_clean_area |
204 | .word cpu_sa110_switch_mm | 204 | .word cpu_sa110_switch_mm |
205 | .word cpu_sa110_set_pte_ext | 205 | .word cpu_sa110_set_pte_ext |
206 | .word 0 | ||
207 | .word 0 | ||
208 | .word 0 | ||
206 | .size sa110_processor_functions, . - sa110_processor_functions | 209 | .size sa110_processor_functions, . - sa110_processor_functions |
207 | 210 | ||
208 | .section ".rodata" | 211 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 7ddc4805bf97..184a9c997e36 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S | |||
@@ -169,7 +169,43 @@ ENTRY(cpu_sa1100_set_pte_ext) | |||
169 | #endif | 169 | #endif |
170 | mov pc, lr | 170 | mov pc, lr |
171 | 171 | ||
172 | __INIT | 172 | .globl cpu_sa1100_suspend_size |
173 | .equ cpu_sa1100_suspend_size, 4*4 | ||
174 | #ifdef CONFIG_PM_SLEEP | ||
175 | ENTRY(cpu_sa1100_do_suspend) | ||
176 | stmfd sp!, {r4 - r7, lr} | ||
177 | mrc p15, 0, r4, c3, c0, 0 @ domain ID | ||
178 | mrc p15, 0, r5, c2, c0, 0 @ translation table base addr | ||
179 | mrc p15, 0, r6, c13, c0, 0 @ PID | ||
180 | mrc p15, 0, r7, c1, c0, 0 @ control reg | ||
181 | stmia r0, {r4 - r7} @ store cp regs | ||
182 | ldmfd sp!, {r4 - r7, pc} | ||
183 | ENDPROC(cpu_sa1100_do_suspend) | ||
184 | |||
185 | ENTRY(cpu_sa1100_do_resume) | ||
186 | ldmia r0, {r4 - r7} @ load cp regs | ||
187 | mov r1, #0 | ||
188 | mcr p15, 0, r1, c8, c7, 0 @ flush I+D TLBs | ||
189 | mcr p15, 0, r1, c7, c7, 0 @ flush I&D cache | ||
190 | mcr p15, 0, r1, c9, c0, 0 @ invalidate RB | ||
191 | mcr p15, 0, r1, c9, c0, 5 @ allow user space to use RB | ||
192 | |||
193 | mcr p15, 0, r4, c3, c0, 0 @ domain ID | ||
194 | mcr p15, 0, r5, c2, c0, 0 @ translation table base addr | ||
195 | mcr p15, 0, r6, c13, c0, 0 @ PID | ||
196 | mov r0, r7 @ control register | ||
197 | mov r2, r5, lsr #14 @ get TTB0 base | ||
198 | mov r2, r2, lsl #14 | ||
199 | ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ | ||
200 | PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE | ||
201 | b cpu_resume_mmu | ||
202 | ENDPROC(cpu_sa1100_do_resume) | ||
203 | #else | ||
204 | #define cpu_sa1100_do_suspend 0 | ||
205 | #define cpu_sa1100_do_resume 0 | ||
206 | #endif | ||
207 | |||
208 | __CPUINIT | ||
173 | 209 | ||
174 | .type __sa1100_setup, #function | 210 | .type __sa1100_setup, #function |
175 | __sa1100_setup: | 211 | __sa1100_setup: |
@@ -218,6 +254,9 @@ ENTRY(sa1100_processor_functions) | |||
218 | .word cpu_sa1100_dcache_clean_area | 254 | .word cpu_sa1100_dcache_clean_area |
219 | .word cpu_sa1100_switch_mm | 255 | .word cpu_sa1100_switch_mm |
220 | .word cpu_sa1100_set_pte_ext | 256 | .word cpu_sa1100_set_pte_ext |
257 | .word cpu_sa1100_suspend_size | ||
258 | .word cpu_sa1100_do_suspend | ||
259 | .word cpu_sa1100_do_resume | ||
221 | .size sa1100_processor_functions, . - sa1100_processor_functions | 260 | .size sa1100_processor_functions, . - sa1100_processor_functions |
222 | 261 | ||
223 | .section ".rodata" | 262 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 22aac8515196..1d2b8451bf25 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -30,13 +30,10 @@ | |||
30 | #define TTB_RGN_WT (2 << 3) | 30 | #define TTB_RGN_WT (2 << 3) |
31 | #define TTB_RGN_WB (3 << 3) | 31 | #define TTB_RGN_WB (3 << 3) |
32 | 32 | ||
33 | #ifndef CONFIG_SMP | 33 | #define TTB_FLAGS_UP TTB_RGN_WBWA |
34 | #define TTB_FLAGS TTB_RGN_WBWA | 34 | #define PMD_FLAGS_UP PMD_SECT_WB |
35 | #define PMD_FLAGS PMD_SECT_WB | 35 | #define TTB_FLAGS_SMP TTB_RGN_WBWA|TTB_S |
36 | #else | 36 | #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S |
37 | #define TTB_FLAGS TTB_RGN_WBWA|TTB_S | ||
38 | #define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S | ||
39 | #endif | ||
40 | 37 | ||
41 | ENTRY(cpu_v6_proc_init) | 38 | ENTRY(cpu_v6_proc_init) |
42 | mov pc, lr | 39 | mov pc, lr |
@@ -97,7 +94,8 @@ ENTRY(cpu_v6_switch_mm) | |||
97 | #ifdef CONFIG_MMU | 94 | #ifdef CONFIG_MMU |
98 | mov r2, #0 | 95 | mov r2, #0 |
99 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | 96 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id |
100 | orr r0, r0, #TTB_FLAGS | 97 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) |
98 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) | ||
101 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | 99 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB |
102 | mcr p15, 0, r2, c7, c10, 4 @ drain write buffer | 100 | mcr p15, 0, r2, c7, c10, 4 @ drain write buffer |
103 | mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | 101 | mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 |
@@ -123,6 +121,53 @@ ENTRY(cpu_v6_set_pte_ext) | |||
123 | #endif | 121 | #endif |
124 | mov pc, lr | 122 | mov pc, lr |
125 | 123 | ||
124 | /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ | ||
125 | .globl cpu_v6_suspend_size | ||
126 | .equ cpu_v6_suspend_size, 4 * 8 | ||
127 | #ifdef CONFIG_PM_SLEEP | ||
128 | ENTRY(cpu_v6_do_suspend) | ||
129 | stmfd sp!, {r4 - r11, lr} | ||
130 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | ||
131 | mrc p15, 0, r5, c13, c0, 1 @ Context ID | ||
132 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID | ||
133 | mrc p15, 0, r7, c2, c0, 0 @ Translation table base 0 | ||
134 | mrc p15, 0, r8, c2, c0, 1 @ Translation table base 1 | ||
135 | mrc p15, 0, r9, c1, c0, 1 @ auxiliary control register | ||
136 | mrc p15, 0, r10, c1, c0, 2 @ co-processor access control | ||
137 | mrc p15, 0, r11, c1, c0, 0 @ control register | ||
138 | stmia r0, {r4 - r11} | ||
139 | ldmfd sp!, {r4- r11, pc} | ||
140 | ENDPROC(cpu_v6_do_suspend) | ||
141 | |||
142 | ENTRY(cpu_v6_do_resume) | ||
143 | mov ip, #0 | ||
144 | mcr p15, 0, ip, c7, c14, 0 @ clean+invalidate D cache | ||
145 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache | ||
146 | mcr p15, 0, ip, c7, c15, 0 @ clean+invalidate cache | ||
147 | mcr p15, 0, ip, c7, c10, 4 @ drain write buffer | ||
148 | ldmia r0, {r4 - r11} | ||
149 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID | ||
150 | mcr p15, 0, r5, c13, c0, 1 @ Context ID | ||
151 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID | ||
152 | mcr p15, 0, r7, c2, c0, 0 @ Translation table base 0 | ||
153 | mcr p15, 0, r8, c2, c0, 1 @ Translation table base 1 | ||
154 | mcr p15, 0, r9, c1, c0, 1 @ auxiliary control register | ||
155 | mcr p15, 0, r10, c1, c0, 2 @ co-processor access control | ||
156 | mcr p15, 0, ip, c2, c0, 2 @ TTB control register | ||
157 | mcr p15, 0, ip, c7, c5, 4 @ ISB | ||
158 | mov r0, r11 @ control register | ||
159 | mov r2, r7, lsr #14 @ get TTB0 base | ||
160 | mov r2, r2, lsl #14 | ||
161 | ldr r3, cpu_resume_l1_flags | ||
162 | b cpu_resume_mmu | ||
163 | ENDPROC(cpu_v6_do_resume) | ||
164 | cpu_resume_l1_flags: | ||
165 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) | ||
166 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) | ||
167 | #else | ||
168 | #define cpu_v6_do_suspend 0 | ||
169 | #define cpu_v6_do_resume 0 | ||
170 | #endif | ||
126 | 171 | ||
127 | 172 | ||
128 | .type cpu_v6_name, #object | 173 | .type cpu_v6_name, #object |
@@ -130,14 +175,9 @@ cpu_v6_name: | |||
130 | .asciz "ARMv6-compatible processor" | 175 | .asciz "ARMv6-compatible processor" |
131 | .size cpu_v6_name, . - cpu_v6_name | 176 | .size cpu_v6_name, . - cpu_v6_name |
132 | 177 | ||
133 | .type cpu_pj4_name, #object | ||
134 | cpu_pj4_name: | ||
135 | .asciz "Marvell PJ4 processor" | ||
136 | .size cpu_pj4_name, . - cpu_pj4_name | ||
137 | |||
138 | .align | 178 | .align |
139 | 179 | ||
140 | __INIT | 180 | __CPUINIT |
141 | 181 | ||
142 | /* | 182 | /* |
143 | * __v6_setup | 183 | * __v6_setup |
@@ -156,9 +196,11 @@ cpu_pj4_name: | |||
156 | */ | 196 | */ |
157 | __v6_setup: | 197 | __v6_setup: |
158 | #ifdef CONFIG_SMP | 198 | #ifdef CONFIG_SMP |
159 | mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode | 199 | ALT_SMP(mrc p15, 0, r0, c1, c0, 1) @ Enable SMP/nAMP mode |
200 | ALT_UP(nop) | ||
160 | orr r0, r0, #0x20 | 201 | orr r0, r0, #0x20 |
161 | mcr p15, 0, r0, c1, c0, 1 | 202 | ALT_SMP(mcr p15, 0, r0, c1, c0, 1) |
203 | ALT_UP(nop) | ||
162 | #endif | 204 | #endif |
163 | 205 | ||
164 | mov r0, #0 | 206 | mov r0, #0 |
@@ -169,8 +211,11 @@ __v6_setup: | |||
169 | #ifdef CONFIG_MMU | 211 | #ifdef CONFIG_MMU |
170 | mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs | 212 | mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs |
171 | mcr p15, 0, r0, c2, c0, 2 @ TTB control register | 213 | mcr p15, 0, r0, c2, c0, 2 @ TTB control register |
172 | orr r4, r4, #TTB_FLAGS | 214 | ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) |
173 | mcr p15, 0, r4, c2, c0, 1 @ load TTB1 | 215 | ALT_UP(orr r4, r4, #TTB_FLAGS_UP) |
216 | ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP) | ||
217 | ALT_UP(orr r8, r8, #TTB_FLAGS_UP) | ||
218 | mcr p15, 0, r8, c2, c0, 1 @ load TTB1 | ||
174 | #endif /* CONFIG_MMU */ | 219 | #endif /* CONFIG_MMU */ |
175 | adr r5, v6_crval | 220 | adr r5, v6_crval |
176 | ldmia r5, {r5, r6} | 221 | ldmia r5, {r5, r6} |
@@ -192,6 +237,8 @@ __v6_setup: | |||
192 | v6_crval: | 237 | v6_crval: |
193 | crval clear=0x01e0fb7f, mmuset=0x00c0387d, ucset=0x00c0187c | 238 | crval clear=0x01e0fb7f, mmuset=0x00c0387d, ucset=0x00c0187c |
194 | 239 | ||
240 | __INITDATA | ||
241 | |||
195 | .type v6_processor_functions, #object | 242 | .type v6_processor_functions, #object |
196 | ENTRY(v6_processor_functions) | 243 | ENTRY(v6_processor_functions) |
197 | .word v6_early_abort | 244 | .word v6_early_abort |
@@ -203,8 +250,13 @@ ENTRY(v6_processor_functions) | |||
203 | .word cpu_v6_dcache_clean_area | 250 | .word cpu_v6_dcache_clean_area |
204 | .word cpu_v6_switch_mm | 251 | .word cpu_v6_switch_mm |
205 | .word cpu_v6_set_pte_ext | 252 | .word cpu_v6_set_pte_ext |
253 | .word cpu_v6_suspend_size | ||
254 | .word cpu_v6_do_suspend | ||
255 | .word cpu_v6_do_resume | ||
206 | .size v6_processor_functions, . - v6_processor_functions | 256 | .size v6_processor_functions, . - v6_processor_functions |
207 | 257 | ||
258 | .section ".rodata" | ||
259 | |||
208 | .type cpu_arch_name, #object | 260 | .type cpu_arch_name, #object |
209 | cpu_arch_name: | 261 | cpu_arch_name: |
210 | .asciz "armv6" | 262 | .asciz "armv6" |
@@ -225,10 +277,16 @@ cpu_elf_name: | |||
225 | __v6_proc_info: | 277 | __v6_proc_info: |
226 | .long 0x0007b000 | 278 | .long 0x0007b000 |
227 | .long 0x0007f000 | 279 | .long 0x0007f000 |
228 | .long PMD_TYPE_SECT | \ | 280 | ALT_SMP(.long \ |
281 | PMD_TYPE_SECT | \ | ||
282 | PMD_SECT_AP_WRITE | \ | ||
283 | PMD_SECT_AP_READ | \ | ||
284 | PMD_FLAGS_SMP) | ||
285 | ALT_UP(.long \ | ||
286 | PMD_TYPE_SECT | \ | ||
229 | PMD_SECT_AP_WRITE | \ | 287 | PMD_SECT_AP_WRITE | \ |
230 | PMD_SECT_AP_READ | \ | 288 | PMD_SECT_AP_READ | \ |
231 | PMD_FLAGS | 289 | PMD_FLAGS_UP) |
232 | .long PMD_TYPE_SECT | \ | 290 | .long PMD_TYPE_SECT | \ |
233 | PMD_SECT_XN | \ | 291 | PMD_SECT_XN | \ |
234 | PMD_SECT_AP_WRITE | \ | 292 | PMD_SECT_AP_WRITE | \ |
@@ -244,26 +302,3 @@ __v6_proc_info: | |||
244 | .long v6_user_fns | 302 | .long v6_user_fns |
245 | .long v6_cache_fns | 303 | .long v6_cache_fns |
246 | .size __v6_proc_info, . - __v6_proc_info | 304 | .size __v6_proc_info, . - __v6_proc_info |
247 | |||
248 | .type __pj4_v6_proc_info, #object | ||
249 | __pj4_v6_proc_info: | ||
250 | .long 0x560f5810 | ||
251 | .long 0xff0ffff0 | ||
252 | .long PMD_TYPE_SECT | \ | ||
253 | PMD_SECT_AP_WRITE | \ | ||
254 | PMD_SECT_AP_READ | \ | ||
255 | PMD_FLAGS | ||
256 | .long PMD_TYPE_SECT | \ | ||
257 | PMD_SECT_XN | \ | ||
258 | PMD_SECT_AP_WRITE | \ | ||
259 | PMD_SECT_AP_READ | ||
260 | b __v6_setup | ||
261 | .long cpu_arch_name | ||
262 | .long cpu_elf_name | ||
263 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS | ||
264 | .long cpu_pj4_name | ||
265 | .long v6_processor_functions | ||
266 | .long v6wbi_tlb_fns | ||
267 | .long v6_user_fns | ||
268 | .long v6_cache_fns | ||
269 | .size __pj4_v6_proc_info, . - __pj4_v6_proc_info | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 197f21bed5e9..089c0b5e454f 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -30,15 +30,13 @@ | |||
30 | #define TTB_IRGN_WT ((1 << 0) | (0 << 6)) | 30 | #define TTB_IRGN_WT ((1 << 0) | (0 << 6)) |
31 | #define TTB_IRGN_WB ((1 << 0) | (1 << 6)) | 31 | #define TTB_IRGN_WB ((1 << 0) | (1 << 6)) |
32 | 32 | ||
33 | #ifndef CONFIG_SMP | ||
34 | /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ | 33 | /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ |
35 | #define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB | 34 | #define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB |
36 | #define PMD_FLAGS PMD_SECT_WB | 35 | #define PMD_FLAGS_UP PMD_SECT_WB |
37 | #else | 36 | |
38 | /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ | 37 | /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ |
39 | #define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA | 38 | #define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA |
40 | #define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S | 39 | #define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S |
41 | #endif | ||
42 | 40 | ||
43 | ENTRY(cpu_v7_proc_init) | 41 | ENTRY(cpu_v7_proc_init) |
44 | mov pc, lr | 42 | mov pc, lr |
@@ -105,14 +103,21 @@ ENTRY(cpu_v7_switch_mm) | |||
105 | #ifdef CONFIG_MMU | 103 | #ifdef CONFIG_MMU |
106 | mov r2, #0 | 104 | mov r2, #0 |
107 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id | 105 | ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id |
108 | orr r0, r0, #TTB_FLAGS | 106 | ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) |
107 | ALT_UP(orr r0, r0, #TTB_FLAGS_UP) | ||
109 | #ifdef CONFIG_ARM_ERRATA_430973 | 108 | #ifdef CONFIG_ARM_ERRATA_430973 |
110 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | 109 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB |
111 | #endif | 110 | #endif |
111 | #ifdef CONFIG_ARM_ERRATA_754322 | ||
112 | dsb | ||
113 | #endif | ||
112 | mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID | 114 | mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID |
113 | isb | 115 | isb |
114 | 1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 | 116 | 1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 |
115 | isb | 117 | isb |
118 | #ifdef CONFIG_ARM_ERRATA_754322 | ||
119 | dsb | ||
120 | #endif | ||
116 | mcr p15, 0, r1, c13, c0, 1 @ set context ID | 121 | mcr p15, 0, r1, c13, c0, 1 @ set context ID |
117 | isb | 122 | isb |
118 | #endif | 123 | #endif |
@@ -125,15 +130,13 @@ ENDPROC(cpu_v7_switch_mm) | |||
125 | * Set a level 2 translation table entry. | 130 | * Set a level 2 translation table entry. |
126 | * | 131 | * |
127 | * - ptep - pointer to level 2 translation table entry | 132 | * - ptep - pointer to level 2 translation table entry |
128 | * (hardware version is stored at -1024 bytes) | 133 | * (hardware version is stored at +2048 bytes) |
129 | * - pte - PTE value to store | 134 | * - pte - PTE value to store |
130 | * - ext - value for extended PTE bits | 135 | * - ext - value for extended PTE bits |
131 | */ | 136 | */ |
132 | ENTRY(cpu_v7_set_pte_ext) | 137 | ENTRY(cpu_v7_set_pte_ext) |
133 | #ifdef CONFIG_MMU | 138 | #ifdef CONFIG_MMU |
134 | ARM( str r1, [r0], #-2048 ) @ linux version | 139 | str r1, [r0] @ linux version |
135 | THUMB( str r1, [r0] ) @ linux version | ||
136 | THUMB( sub r0, r0, #2048 ) | ||
137 | 140 | ||
138 | bic r3, r1, #0x000003f0 | 141 | bic r3, r1, #0x000003f0 |
139 | bic r3, r3, #PTE_TYPE_MASK | 142 | bic r3, r3, #PTE_TYPE_MASK |
@@ -143,23 +146,28 @@ ENTRY(cpu_v7_set_pte_ext) | |||
143 | tst r1, #1 << 4 | 146 | tst r1, #1 << 4 |
144 | orrne r3, r3, #PTE_EXT_TEX(1) | 147 | orrne r3, r3, #PTE_EXT_TEX(1) |
145 | 148 | ||
146 | tst r1, #L_PTE_WRITE | 149 | eor r1, r1, #L_PTE_DIRTY |
147 | tstne r1, #L_PTE_DIRTY | 150 | tst r1, #L_PTE_RDONLY | L_PTE_DIRTY |
148 | orreq r3, r3, #PTE_EXT_APX | 151 | orrne r3, r3, #PTE_EXT_APX |
149 | 152 | ||
150 | tst r1, #L_PTE_USER | 153 | tst r1, #L_PTE_USER |
151 | orrne r3, r3, #PTE_EXT_AP1 | 154 | orrne r3, r3, #PTE_EXT_AP1 |
155 | #ifdef CONFIG_CPU_USE_DOMAINS | ||
156 | @ allow kernel read/write access to read-only user pages | ||
152 | tstne r3, #PTE_EXT_APX | 157 | tstne r3, #PTE_EXT_APX |
153 | bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 | 158 | bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 |
159 | #endif | ||
154 | 160 | ||
155 | tst r1, #L_PTE_EXEC | 161 | tst r1, #L_PTE_XN |
156 | orreq r3, r3, #PTE_EXT_XN | 162 | orrne r3, r3, #PTE_EXT_XN |
157 | 163 | ||
158 | tst r1, #L_PTE_YOUNG | 164 | tst r1, #L_PTE_YOUNG |
159 | tstne r1, #L_PTE_PRESENT | 165 | tstne r1, #L_PTE_PRESENT |
160 | moveq r3, #0 | 166 | moveq r3, #0 |
161 | 167 | ||
162 | str r3, [r0] | 168 | ARM( str r3, [r0, #2048]! ) |
169 | THUMB( add r0, r0, #2048 ) | ||
170 | THUMB( str r3, [r0] ) | ||
163 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte | 171 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte |
164 | #endif | 172 | #endif |
165 | mov pc, lr | 173 | mov pc, lr |
@@ -169,7 +177,92 @@ cpu_v7_name: | |||
169 | .ascii "ARMv7 Processor" | 177 | .ascii "ARMv7 Processor" |
170 | .align | 178 | .align |
171 | 179 | ||
172 | __INIT | 180 | /* |
181 | * Memory region attributes with SCTLR.TRE=1 | ||
182 | * | ||
183 | * n = TEX[0],C,B | ||
184 | * TR = PRRR[2n+1:2n] - memory type | ||
185 | * IR = NMRR[2n+1:2n] - inner cacheable property | ||
186 | * OR = NMRR[2n+17:2n+16] - outer cacheable property | ||
187 | * | ||
188 | * n TR IR OR | ||
189 | * UNCACHED 000 00 | ||
190 | * BUFFERABLE 001 10 00 00 | ||
191 | * WRITETHROUGH 010 10 10 10 | ||
192 | * WRITEBACK 011 10 11 11 | ||
193 | * reserved 110 | ||
194 | * WRITEALLOC 111 10 01 01 | ||
195 | * DEV_SHARED 100 01 | ||
196 | * DEV_NONSHARED 100 01 | ||
197 | * DEV_WC 001 10 | ||
198 | * DEV_CACHED 011 10 | ||
199 | * | ||
200 | * Other attributes: | ||
201 | * | ||
202 | * DS0 = PRRR[16] = 0 - device shareable property | ||
203 | * DS1 = PRRR[17] = 1 - device shareable property | ||
204 | * NS0 = PRRR[18] = 0 - normal shareable property | ||
205 | * NS1 = PRRR[19] = 1 - normal shareable property | ||
206 | * NOS = PRRR[24+n] = 1 - not outer shareable | ||
207 | */ | ||
208 | .equ PRRR, 0xff0a81a8 | ||
209 | .equ NMRR, 0x40e040e0 | ||
210 | |||
211 | /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ | ||
212 | .globl cpu_v7_suspend_size | ||
213 | .equ cpu_v7_suspend_size, 4 * 9 | ||
214 | #ifdef CONFIG_PM_SLEEP | ||
215 | ENTRY(cpu_v7_do_suspend) | ||
216 | stmfd sp!, {r4 - r11, lr} | ||
217 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | ||
218 | mrc p15, 0, r5, c13, c0, 1 @ Context ID | ||
219 | mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID | ||
220 | stmia r0!, {r4 - r6} | ||
221 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID | ||
222 | mrc p15, 0, r7, c2, c0, 0 @ TTB 0 | ||
223 | mrc p15, 0, r8, c2, c0, 1 @ TTB 1 | ||
224 | mrc p15, 0, r9, c1, c0, 0 @ Control register | ||
225 | mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register | ||
226 | mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control | ||
227 | stmia r0, {r6 - r11} | ||
228 | ldmfd sp!, {r4 - r11, pc} | ||
229 | ENDPROC(cpu_v7_do_suspend) | ||
230 | |||
231 | ENTRY(cpu_v7_do_resume) | ||
232 | mov ip, #0 | ||
233 | mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs | ||
234 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache | ||
235 | ldmia r0!, {r4 - r6} | ||
236 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID | ||
237 | mcr p15, 0, r5, c13, c0, 1 @ Context ID | ||
238 | mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID | ||
239 | ldmia r0, {r6 - r11} | ||
240 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID | ||
241 | mcr p15, 0, r7, c2, c0, 0 @ TTB 0 | ||
242 | mcr p15, 0, r8, c2, c0, 1 @ TTB 1 | ||
243 | mcr p15, 0, ip, c2, c0, 2 @ TTB control register | ||
244 | mcr p15, 0, r10, c1, c0, 1 @ Auxiliary control register | ||
245 | mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control | ||
246 | ldr r4, =PRRR @ PRRR | ||
247 | ldr r5, =NMRR @ NMRR | ||
248 | mcr p15, 0, r4, c10, c2, 0 @ write PRRR | ||
249 | mcr p15, 0, r5, c10, c2, 1 @ write NMRR | ||
250 | isb | ||
251 | mov r0, r9 @ control register | ||
252 | mov r2, r7, lsr #14 @ get TTB0 base | ||
253 | mov r2, r2, lsl #14 | ||
254 | ldr r3, cpu_resume_l1_flags | ||
255 | b cpu_resume_mmu | ||
256 | ENDPROC(cpu_v7_do_resume) | ||
257 | cpu_resume_l1_flags: | ||
258 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) | ||
259 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) | ||
260 | #else | ||
261 | #define cpu_v7_do_suspend 0 | ||
262 | #define cpu_v7_do_resume 0 | ||
263 | #endif | ||
264 | |||
265 | __CPUINIT | ||
173 | 266 | ||
174 | /* | 267 | /* |
175 | * __v7_setup | 268 | * __v7_setup |
@@ -188,7 +281,8 @@ cpu_v7_name: | |||
188 | */ | 281 | */ |
189 | __v7_ca9mp_setup: | 282 | __v7_ca9mp_setup: |
190 | #ifdef CONFIG_SMP | 283 | #ifdef CONFIG_SMP |
191 | mrc p15, 0, r0, c1, c0, 1 | 284 | ALT_SMP(mrc p15, 0, r0, c1, c0, 1) |
285 | ALT_UP(mov r0, #(1 << 6)) @ fake it for UP | ||
192 | tst r0, #(1 << 6) @ SMP/nAMP mode enabled? | 286 | tst r0, #(1 << 6) @ SMP/nAMP mode enabled? |
193 | orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and | 287 | orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and |
194 | mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting | 288 | mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting |
@@ -261,6 +355,12 @@ __v7_setup: | |||
261 | orreq r10, r10, #1 << 6 @ set bit #6 | 355 | orreq r10, r10, #1 << 6 @ set bit #6 |
262 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | 356 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register |
263 | #endif | 357 | #endif |
358 | #ifdef CONFIG_ARM_ERRATA_751472 | ||
359 | cmp r6, #0x30 @ present prior to r3p0 | ||
360 | mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
361 | orrlt r10, r10, #1 << 11 @ set bit #11 | ||
362 | mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
363 | #endif | ||
264 | 364 | ||
265 | 3: mov r10, #0 | 365 | 3: mov r10, #0 |
266 | #ifdef HARVARD_CACHE | 366 | #ifdef HARVARD_CACHE |
@@ -270,40 +370,13 @@ __v7_setup: | |||
270 | #ifdef CONFIG_MMU | 370 | #ifdef CONFIG_MMU |
271 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs | 371 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs |
272 | mcr p15, 0, r10, c2, c0, 2 @ TTB control register | 372 | mcr p15, 0, r10, c2, c0, 2 @ TTB control register |
273 | orr r4, r4, #TTB_FLAGS | 373 | ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) |
274 | mcr p15, 0, r4, c2, c0, 1 @ load TTB1 | 374 | ALT_UP(orr r4, r4, #TTB_FLAGS_UP) |
275 | mov r10, #0x1f @ domains 0, 1 = manager | 375 | ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP) |
276 | mcr p15, 0, r10, c3, c0, 0 @ load domain access register | 376 | ALT_UP(orr r8, r8, #TTB_FLAGS_UP) |
277 | /* | 377 | mcr p15, 0, r8, c2, c0, 1 @ load TTB1 |
278 | * Memory region attributes with SCTLR.TRE=1 | 378 | ldr r5, =PRRR @ PRRR |
279 | * | 379 | ldr r6, =NMRR @ NMRR |
280 | * n = TEX[0],C,B | ||
281 | * TR = PRRR[2n+1:2n] - memory type | ||
282 | * IR = NMRR[2n+1:2n] - inner cacheable property | ||
283 | * OR = NMRR[2n+17:2n+16] - outer cacheable property | ||
284 | * | ||
285 | * n TR IR OR | ||
286 | * UNCACHED 000 00 | ||
287 | * BUFFERABLE 001 10 00 00 | ||
288 | * WRITETHROUGH 010 10 10 10 | ||
289 | * WRITEBACK 011 10 11 11 | ||
290 | * reserved 110 | ||
291 | * WRITEALLOC 111 10 01 01 | ||
292 | * DEV_SHARED 100 01 | ||
293 | * DEV_NONSHARED 100 01 | ||
294 | * DEV_WC 001 10 | ||
295 | * DEV_CACHED 011 10 | ||
296 | * | ||
297 | * Other attributes: | ||
298 | * | ||
299 | * DS0 = PRRR[16] = 0 - device shareable property | ||
300 | * DS1 = PRRR[17] = 1 - device shareable property | ||
301 | * NS0 = PRRR[18] = 0 - normal shareable property | ||
302 | * NS1 = PRRR[19] = 1 - normal shareable property | ||
303 | * NOS = PRRR[24+n] = 1 - not outer shareable | ||
304 | */ | ||
305 | ldr r5, =0xff0a81a8 @ PRRR | ||
306 | ldr r6, =0x40e040e0 @ NMRR | ||
307 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR | 380 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR |
308 | mcr p15, 0, r6, c10, c2, 1 @ write NMRR | 381 | mcr p15, 0, r6, c10, c2, 1 @ write NMRR |
309 | #endif | 382 | #endif |
@@ -312,6 +385,10 @@ __v7_setup: | |||
312 | #ifdef CONFIG_CPU_ENDIAN_BE8 | 385 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
313 | orr r6, r6, #1 << 25 @ big-endian page tables | 386 | orr r6, r6, #1 << 25 @ big-endian page tables |
314 | #endif | 387 | #endif |
388 | #ifdef CONFIG_SWP_EMULATE | ||
389 | orr r5, r5, #(1 << 10) @ set SW bit in "clear" | ||
390 | bic r6, r6, #(1 << 10) @ clear it in "mmuset" | ||
391 | #endif | ||
315 | mrc p15, 0, r0, c1, c0, 0 @ read control register | 392 | mrc p15, 0, r0, c1, c0, 0 @ read control register |
316 | bic r0, r0, r5 @ clear bits them | 393 | bic r0, r0, r5 @ clear bits them |
317 | orr r0, r0, r6 @ set them | 394 | orr r0, r0, r6 @ set them |
@@ -332,6 +409,8 @@ v7_crval: | |||
332 | __v7_setup_stack: | 409 | __v7_setup_stack: |
333 | .space 4 * 11 @ 11 registers | 410 | .space 4 * 11 @ 11 registers |
334 | 411 | ||
412 | __INITDATA | ||
413 | |||
335 | .type v7_processor_functions, #object | 414 | .type v7_processor_functions, #object |
336 | ENTRY(v7_processor_functions) | 415 | ENTRY(v7_processor_functions) |
337 | .word v7_early_abort | 416 | .word v7_early_abort |
@@ -343,8 +422,13 @@ ENTRY(v7_processor_functions) | |||
343 | .word cpu_v7_dcache_clean_area | 422 | .word cpu_v7_dcache_clean_area |
344 | .word cpu_v7_switch_mm | 423 | .word cpu_v7_switch_mm |
345 | .word cpu_v7_set_pte_ext | 424 | .word cpu_v7_set_pte_ext |
425 | .word cpu_v7_suspend_size | ||
426 | .word cpu_v7_do_suspend | ||
427 | .word cpu_v7_do_resume | ||
346 | .size v7_processor_functions, . - v7_processor_functions | 428 | .size v7_processor_functions, . - v7_processor_functions |
347 | 429 | ||
430 | .section ".rodata" | ||
431 | |||
348 | .type cpu_arch_name, #object | 432 | .type cpu_arch_name, #object |
349 | cpu_arch_name: | 433 | cpu_arch_name: |
350 | .asciz "armv7" | 434 | .asciz "armv7" |
@@ -362,15 +446,21 @@ cpu_elf_name: | |||
362 | __v7_ca9mp_proc_info: | 446 | __v7_ca9mp_proc_info: |
363 | .long 0x410fc090 @ Required ID value | 447 | .long 0x410fc090 @ Required ID value |
364 | .long 0xff0ffff0 @ Mask for ID | 448 | .long 0xff0ffff0 @ Mask for ID |
365 | .long PMD_TYPE_SECT | \ | 449 | ALT_SMP(.long \ |
450 | PMD_TYPE_SECT | \ | ||
366 | PMD_SECT_AP_WRITE | \ | 451 | PMD_SECT_AP_WRITE | \ |
367 | PMD_SECT_AP_READ | \ | 452 | PMD_SECT_AP_READ | \ |
368 | PMD_FLAGS | 453 | PMD_FLAGS_SMP) |
454 | ALT_UP(.long \ | ||
455 | PMD_TYPE_SECT | \ | ||
456 | PMD_SECT_AP_WRITE | \ | ||
457 | PMD_SECT_AP_READ | \ | ||
458 | PMD_FLAGS_UP) | ||
369 | .long PMD_TYPE_SECT | \ | 459 | .long PMD_TYPE_SECT | \ |
370 | PMD_SECT_XN | \ | 460 | PMD_SECT_XN | \ |
371 | PMD_SECT_AP_WRITE | \ | 461 | PMD_SECT_AP_WRITE | \ |
372 | PMD_SECT_AP_READ | 462 | PMD_SECT_AP_READ |
373 | b __v7_ca9mp_setup | 463 | W(b) __v7_ca9mp_setup |
374 | .long cpu_arch_name | 464 | .long cpu_arch_name |
375 | .long cpu_elf_name | 465 | .long cpu_elf_name |
376 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS | 466 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS |
@@ -388,15 +478,21 @@ __v7_ca9mp_proc_info: | |||
388 | __v7_proc_info: | 478 | __v7_proc_info: |
389 | .long 0x000f0000 @ Required ID value | 479 | .long 0x000f0000 @ Required ID value |
390 | .long 0x000f0000 @ Mask for ID | 480 | .long 0x000f0000 @ Mask for ID |
391 | .long PMD_TYPE_SECT | \ | 481 | ALT_SMP(.long \ |
482 | PMD_TYPE_SECT | \ | ||
483 | PMD_SECT_AP_WRITE | \ | ||
484 | PMD_SECT_AP_READ | \ | ||
485 | PMD_FLAGS_SMP) | ||
486 | ALT_UP(.long \ | ||
487 | PMD_TYPE_SECT | \ | ||
392 | PMD_SECT_AP_WRITE | \ | 488 | PMD_SECT_AP_WRITE | \ |
393 | PMD_SECT_AP_READ | \ | 489 | PMD_SECT_AP_READ | \ |
394 | PMD_FLAGS | 490 | PMD_FLAGS_UP) |
395 | .long PMD_TYPE_SECT | \ | 491 | .long PMD_TYPE_SECT | \ |
396 | PMD_SECT_XN | \ | 492 | PMD_SECT_XN | \ |
397 | PMD_SECT_AP_WRITE | \ | 493 | PMD_SECT_AP_WRITE | \ |
398 | PMD_SECT_AP_READ | 494 | PMD_SECT_AP_READ |
399 | b __v7_setup | 495 | W(b) __v7_setup |
400 | .long cpu_arch_name | 496 | .long cpu_arch_name |
401 | .long cpu_elf_name | 497 | .long cpu_elf_name |
402 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS | 498 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS |
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 361a51e49030..596213699f37 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
@@ -141,6 +141,17 @@ ENTRY(cpu_xsc3_do_idle) | |||
141 | /* ================================= CACHE ================================ */ | 141 | /* ================================= CACHE ================================ */ |
142 | 142 | ||
143 | /* | 143 | /* |
144 | * flush_icache_all() | ||
145 | * | ||
146 | * Unconditionally clean and invalidate the entire icache. | ||
147 | */ | ||
148 | ENTRY(xsc3_flush_icache_all) | ||
149 | mov r0, #0 | ||
150 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
151 | mov pc, lr | ||
152 | ENDPROC(xsc3_flush_icache_all) | ||
153 | |||
154 | /* | ||
144 | * flush_user_cache_all() | 155 | * flush_user_cache_all() |
145 | * | 156 | * |
146 | * Invalidate all cache entries in a particular address | 157 | * Invalidate all cache entries in a particular address |
@@ -325,6 +336,7 @@ ENTRY(xsc3_dma_unmap_area) | |||
325 | ENDPROC(xsc3_dma_unmap_area) | 336 | ENDPROC(xsc3_dma_unmap_area) |
326 | 337 | ||
327 | ENTRY(xsc3_cache_fns) | 338 | ENTRY(xsc3_cache_fns) |
339 | .long xsc3_flush_icache_all | ||
328 | .long xsc3_flush_kern_cache_all | 340 | .long xsc3_flush_kern_cache_all |
329 | .long xsc3_flush_user_cache_all | 341 | .long xsc3_flush_user_cache_all |
330 | .long xsc3_flush_user_cache_range | 342 | .long xsc3_flush_user_cache_range |
@@ -401,10 +413,53 @@ ENTRY(cpu_xsc3_set_pte_ext) | |||
401 | mov pc, lr | 413 | mov pc, lr |
402 | 414 | ||
403 | .ltorg | 415 | .ltorg |
404 | |||
405 | .align | 416 | .align |
406 | 417 | ||
407 | __INIT | 418 | .globl cpu_xsc3_suspend_size |
419 | .equ cpu_xsc3_suspend_size, 4 * 8 | ||
420 | #ifdef CONFIG_PM_SLEEP | ||
421 | ENTRY(cpu_xsc3_do_suspend) | ||
422 | stmfd sp!, {r4 - r10, lr} | ||
423 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode | ||
424 | mrc p15, 0, r5, c15, c1, 0 @ CP access reg | ||
425 | mrc p15, 0, r6, c13, c0, 0 @ PID | ||
426 | mrc p15, 0, r7, c3, c0, 0 @ domain ID | ||
427 | mrc p15, 0, r8, c2, c0, 0 @ translation table base addr | ||
428 | mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg | ||
429 | mrc p15, 0, r10, c1, c0, 0 @ control reg | ||
430 | bic r4, r4, #2 @ clear frequency change bit | ||
431 | stmia r0, {r1, r4 - r10} @ store v:p offset + cp regs | ||
432 | ldmia sp!, {r4 - r10, pc} | ||
433 | ENDPROC(cpu_xsc3_do_suspend) | ||
434 | |||
435 | ENTRY(cpu_xsc3_do_resume) | ||
436 | ldmia r0, {r1, r4 - r10} @ load v:p offset + cp regs | ||
437 | mov ip, #0 | ||
438 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB | ||
439 | mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer | ||
440 | mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer | ||
441 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | ||
442 | mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. | ||
443 | mcr p15, 0, r5, c15, c1, 0 @ CP access reg | ||
444 | mcr p15, 0, r6, c13, c0, 0 @ PID | ||
445 | mcr p15, 0, r7, c3, c0, 0 @ domain ID | ||
446 | mcr p15, 0, r8, c2, c0, 0 @ translation table base addr | ||
447 | mcr p15, 0, r9, c1, c0, 1 @ auxiliary control reg | ||
448 | |||
449 | @ temporarily map resume_turn_on_mmu into the page table, | ||
450 | @ otherwise prefetch abort occurs after MMU is turned on | ||
451 | mov r0, r10 @ control register | ||
452 | mov r2, r8, lsr #14 @ get TTB0 base | ||
453 | mov r2, r2, lsl #14 | ||
454 | ldr r3, =0x542e @ section flags | ||
455 | b cpu_resume_mmu | ||
456 | ENDPROC(cpu_xsc3_do_resume) | ||
457 | #else | ||
458 | #define cpu_xsc3_do_suspend 0 | ||
459 | #define cpu_xsc3_do_resume 0 | ||
460 | #endif | ||
461 | |||
462 | __CPUINIT | ||
408 | 463 | ||
409 | .type __xsc3_setup, #function | 464 | .type __xsc3_setup, #function |
410 | __xsc3_setup: | 465 | __xsc3_setup: |
@@ -464,6 +519,9 @@ ENTRY(xsc3_processor_functions) | |||
464 | .word cpu_xsc3_dcache_clean_area | 519 | .word cpu_xsc3_dcache_clean_area |
465 | .word cpu_xsc3_switch_mm | 520 | .word cpu_xsc3_switch_mm |
466 | .word cpu_xsc3_set_pte_ext | 521 | .word cpu_xsc3_set_pte_ext |
522 | .word cpu_xsc3_suspend_size | ||
523 | .word cpu_xsc3_do_suspend | ||
524 | .word cpu_xsc3_do_resume | ||
467 | .size xsc3_processor_functions, . - xsc3_processor_functions | 525 | .size xsc3_processor_functions, . - xsc3_processor_functions |
468 | 526 | ||
469 | .section ".rodata" | 527 | .section ".rodata" |
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 14075979bcba..42af97664c9d 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
@@ -181,6 +181,17 @@ ENTRY(cpu_xscale_do_idle) | |||
181 | /* ================================= CACHE ================================ */ | 181 | /* ================================= CACHE ================================ */ |
182 | 182 | ||
183 | /* | 183 | /* |
184 | * flush_icache_all() | ||
185 | * | ||
186 | * Unconditionally clean and invalidate the entire icache. | ||
187 | */ | ||
188 | ENTRY(xscale_flush_icache_all) | ||
189 | mov r0, #0 | ||
190 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
191 | mov pc, lr | ||
192 | ENDPROC(xscale_flush_icache_all) | ||
193 | |||
194 | /* | ||
184 | * flush_user_cache_all() | 195 | * flush_user_cache_all() |
185 | * | 196 | * |
186 | * Invalidate all cache entries in a particular address | 197 | * Invalidate all cache entries in a particular address |
@@ -384,7 +395,7 @@ ENTRY(xscale_dma_a0_map_area) | |||
384 | teq r2, #DMA_TO_DEVICE | 395 | teq r2, #DMA_TO_DEVICE |
385 | beq xscale_dma_clean_range | 396 | beq xscale_dma_clean_range |
386 | b xscale_dma_flush_range | 397 | b xscale_dma_flush_range |
387 | ENDPROC(xscsale_dma_a0_map_area) | 398 | ENDPROC(xscale_dma_a0_map_area) |
388 | 399 | ||
389 | /* | 400 | /* |
390 | * dma_unmap_area(start, size, dir) | 401 | * dma_unmap_area(start, size, dir) |
@@ -397,6 +408,7 @@ ENTRY(xscale_dma_unmap_area) | |||
397 | ENDPROC(xscale_dma_unmap_area) | 408 | ENDPROC(xscale_dma_unmap_area) |
398 | 409 | ||
399 | ENTRY(xscale_cache_fns) | 410 | ENTRY(xscale_cache_fns) |
411 | .long xscale_flush_icache_all | ||
400 | .long xscale_flush_kern_cache_all | 412 | .long xscale_flush_kern_cache_all |
401 | .long xscale_flush_user_cache_all | 413 | .long xscale_flush_user_cache_all |
402 | .long xscale_flush_user_cache_range | 414 | .long xscale_flush_user_cache_range |
@@ -488,8 +500,8 @@ ENTRY(cpu_xscale_set_pte_ext) | |||
488 | @ | 500 | @ |
489 | @ Erratum 40: must set memory to write-through for user read-only pages | 501 | @ Erratum 40: must set memory to write-through for user read-only pages |
490 | @ | 502 | @ |
491 | and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_WRITE) & ~(4 << 2) | 503 | and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_RDONLY) & ~(4 << 2) |
492 | teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | 504 | teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER | L_PTE_RDONLY |
493 | 505 | ||
494 | moveq r1, #L_PTE_MT_WRITETHROUGH | 506 | moveq r1, #L_PTE_MT_WRITETHROUGH |
495 | and r1, r1, #L_PTE_MT_MASK | 507 | and r1, r1, #L_PTE_MT_MASK |
@@ -501,12 +513,50 @@ ENTRY(cpu_xscale_set_pte_ext) | |||
501 | xscale_set_pte_ext_epilogue | 513 | xscale_set_pte_ext_epilogue |
502 | mov pc, lr | 514 | mov pc, lr |
503 | 515 | ||
504 | |||
505 | .ltorg | 516 | .ltorg |
506 | |||
507 | .align | 517 | .align |
508 | 518 | ||
509 | __INIT | 519 | .globl cpu_xscale_suspend_size |
520 | .equ cpu_xscale_suspend_size, 4 * 7 | ||
521 | #ifdef CONFIG_PM_SLEEP | ||
522 | ENTRY(cpu_xscale_do_suspend) | ||
523 | stmfd sp!, {r4 - r10, lr} | ||
524 | mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode | ||
525 | mrc p15, 0, r5, c15, c1, 0 @ CP access reg | ||
526 | mrc p15, 0, r6, c13, c0, 0 @ PID | ||
527 | mrc p15, 0, r7, c3, c0, 0 @ domain ID | ||
528 | mrc p15, 0, r8, c2, c0, 0 @ translation table base addr | ||
529 | mrc p15, 0, r9, c1, c1, 0 @ auxiliary control reg | ||
530 | mrc p15, 0, r10, c1, c0, 0 @ control reg | ||
531 | bic r4, r4, #2 @ clear frequency change bit | ||
532 | stmia r0, {r4 - r10} @ store cp regs | ||
533 | ldmfd sp!, {r4 - r10, pc} | ||
534 | ENDPROC(cpu_xscale_do_suspend) | ||
535 | |||
536 | ENTRY(cpu_xscale_do_resume) | ||
537 | ldmia r0, {r4 - r10} @ load cp regs | ||
538 | mov ip, #0 | ||
539 | mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs | ||
540 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB | ||
541 | mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode. | ||
542 | mcr p15, 0, r5, c15, c1, 0 @ CP access reg | ||
543 | mcr p15, 0, r6, c13, c0, 0 @ PID | ||
544 | mcr p15, 0, r7, c3, c0, 0 @ domain ID | ||
545 | mcr p15, 0, r8, c2, c0, 0 @ translation table base addr | ||
546 | mcr p15, 0, r9, c1, c1, 0 @ auxiliary control reg | ||
547 | mov r0, r10 @ control register | ||
548 | mov r2, r8, lsr #14 @ get TTB0 base | ||
549 | mov r2, r2, lsl #14 | ||
550 | ldr r3, =PMD_TYPE_SECT | PMD_SECT_BUFFERABLE | \ | ||
551 | PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE | ||
552 | b cpu_resume_mmu | ||
553 | ENDPROC(cpu_xscale_do_resume) | ||
554 | #else | ||
555 | #define cpu_xscale_do_suspend 0 | ||
556 | #define cpu_xscale_do_resume 0 | ||
557 | #endif | ||
558 | |||
559 | __CPUINIT | ||
510 | 560 | ||
511 | .type __xscale_setup, #function | 561 | .type __xscale_setup, #function |
512 | __xscale_setup: | 562 | __xscale_setup: |
@@ -553,6 +603,9 @@ ENTRY(xscale_processor_functions) | |||
553 | .word cpu_xscale_dcache_clean_area | 603 | .word cpu_xscale_dcache_clean_area |
554 | .word cpu_xscale_switch_mm | 604 | .word cpu_xscale_switch_mm |
555 | .word cpu_xscale_set_pte_ext | 605 | .word cpu_xscale_set_pte_ext |
606 | .word cpu_xscale_suspend_size | ||
607 | .word cpu_xscale_do_suspend | ||
608 | .word cpu_xscale_do_resume | ||
556 | .size xscale_processor_functions, . - xscale_processor_functions | 609 | .size xscale_processor_functions, . - xscale_processor_functions |
557 | 610 | ||
558 | .section ".rodata" | 611 | .section ".rodata" |
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index f3f288a9546d..53cd5b454673 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S | |||
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
16 | #include <asm/assembler.h> | ||
16 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
17 | #include <asm/page.h> | 18 | #include <asm/page.h> |
18 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
@@ -41,20 +42,15 @@ ENTRY(v7wbi_flush_user_tlb_range) | |||
41 | orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA | 42 | orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA |
42 | mov r1, r1, lsl #PAGE_SHIFT | 43 | mov r1, r1, lsl #PAGE_SHIFT |
43 | 1: | 44 | 1: |
44 | #ifdef CONFIG_SMP | 45 | ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) |
45 | mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) | 46 | ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA |
46 | #else | 47 | |
47 | mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA | ||
48 | #endif | ||
49 | add r0, r0, #PAGE_SZ | 48 | add r0, r0, #PAGE_SZ |
50 | cmp r0, r1 | 49 | cmp r0, r1 |
51 | blo 1b | 50 | blo 1b |
52 | mov ip, #0 | 51 | mov ip, #0 |
53 | #ifdef CONFIG_SMP | 52 | ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable |
54 | mcr p15, 0, ip, c7, c1, 6 @ flush BTAC/BTB Inner Shareable | 53 | ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB |
55 | #else | ||
56 | mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB | ||
57 | #endif | ||
58 | dsb | 54 | dsb |
59 | mov pc, lr | 55 | mov pc, lr |
60 | ENDPROC(v7wbi_flush_user_tlb_range) | 56 | ENDPROC(v7wbi_flush_user_tlb_range) |
@@ -74,20 +70,14 @@ ENTRY(v7wbi_flush_kern_tlb_range) | |||
74 | mov r0, r0, lsl #PAGE_SHIFT | 70 | mov r0, r0, lsl #PAGE_SHIFT |
75 | mov r1, r1, lsl #PAGE_SHIFT | 71 | mov r1, r1, lsl #PAGE_SHIFT |
76 | 1: | 72 | 1: |
77 | #ifdef CONFIG_SMP | 73 | ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable) |
78 | mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) | 74 | ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA |
79 | #else | ||
80 | mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA | ||
81 | #endif | ||
82 | add r0, r0, #PAGE_SZ | 75 | add r0, r0, #PAGE_SZ |
83 | cmp r0, r1 | 76 | cmp r0, r1 |
84 | blo 1b | 77 | blo 1b |
85 | mov r2, #0 | 78 | mov r2, #0 |
86 | #ifdef CONFIG_SMP | 79 | ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable |
87 | mcr p15, 0, r2, c7, c1, 6 @ flush BTAC/BTB Inner Shareable | 80 | ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB |
88 | #else | ||
89 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | ||
90 | #endif | ||
91 | dsb | 81 | dsb |
92 | isb | 82 | isb |
93 | mov pc, lr | 83 | mov pc, lr |
@@ -99,5 +89,6 @@ ENDPROC(v7wbi_flush_kern_tlb_range) | |||
99 | ENTRY(v7wbi_tlb_fns) | 89 | ENTRY(v7wbi_tlb_fns) |
100 | .long v7wbi_flush_user_tlb_range | 90 | .long v7wbi_flush_user_tlb_range |
101 | .long v7wbi_flush_kern_tlb_range | 91 | .long v7wbi_flush_kern_tlb_range |
102 | .long v7wbi_tlb_flags | 92 | ALT_SMP(.long v7wbi_tlb_flags_smp) |
93 | ALT_UP(.long v7wbi_tlb_flags_up) | ||
103 | .size v7wbi_tlb_fns, . - v7wbi_tlb_fns | 94 | .size v7wbi_tlb_fns, . - v7wbi_tlb_fns |
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c index 935993e1b1ef..036fdbfdd62f 100644 --- a/arch/arm/mm/vmregion.c +++ b/arch/arm/mm/vmregion.c | |||
@@ -38,7 +38,7 @@ struct arm_vmregion * | |||
38 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, | 38 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, |
39 | size_t size, gfp_t gfp) | 39 | size_t size, gfp_t gfp) |
40 | { | 40 | { |
41 | unsigned long addr = head->vm_start, end = head->vm_end - size; | 41 | unsigned long start = head->vm_start, addr = head->vm_end; |
42 | unsigned long flags; | 42 | unsigned long flags; |
43 | struct arm_vmregion *c, *new; | 43 | struct arm_vmregion *c, *new; |
44 | 44 | ||
@@ -54,21 +54,20 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, | |||
54 | 54 | ||
55 | spin_lock_irqsave(&head->vm_lock, flags); | 55 | spin_lock_irqsave(&head->vm_lock, flags); |
56 | 56 | ||
57 | list_for_each_entry(c, &head->vm_list, vm_list) { | 57 | addr = rounddown(addr - size, align); |
58 | if ((addr + size) < addr) | 58 | list_for_each_entry_reverse(c, &head->vm_list, vm_list) { |
59 | goto nospc; | 59 | if (addr >= c->vm_end) |
60 | if ((addr + size) <= c->vm_start) | ||
61 | goto found; | 60 | goto found; |
62 | addr = ALIGN(c->vm_end, align); | 61 | addr = rounddown(c->vm_start - size, align); |
63 | if (addr > end) | 62 | if (addr < start) |
64 | goto nospc; | 63 | goto nospc; |
65 | } | 64 | } |
66 | 65 | ||
67 | found: | 66 | found: |
68 | /* | 67 | /* |
69 | * Insert this entry _before_ the one we found. | 68 | * Insert this entry after the one we found. |
70 | */ | 69 | */ |
71 | list_add_tail(&new->vm_list, &c->vm_list); | 70 | list_add(&new->vm_list, &c->vm_list); |
72 | new->vm_start = addr; | 71 | new->vm_start = addr; |
73 | new->vm_end = addr + size; | 72 | new->vm_end = addr + size; |
74 | new->vm_active = 1; | 73 | new->vm_active = 1; |