diff options
author | Sage Weil <sage@inktank.com> | 2013-08-15 14:11:45 -0400 |
---|---|---|
committer | Sage Weil <sage@inktank.com> | 2013-08-15 14:11:45 -0400 |
commit | ee3e542fec6e69bc9fb668698889a37d93950ddf (patch) | |
tree | e74ee766a4764769ef1d3d45d266b4dea64101d3 /arch/arm/mm | |
parent | fe2a801b50c0bb8039d627e5ae1fec249d10ff39 (diff) | |
parent | f1d6e17f540af37bb1891480143669ba7636c4cf (diff) |
Merge remote-tracking branch 'linus/master' into testing
Diffstat (limited to 'arch/arm/mm')
41 files changed, 1065 insertions, 249 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 35955b54944c..db5c2cab8fda 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -392,11 +392,21 @@ config CPU_V7 | |||
392 | select CPU_CACHE_V7 | 392 | select CPU_CACHE_V7 |
393 | select CPU_CACHE_VIPT | 393 | select CPU_CACHE_VIPT |
394 | select CPU_COPY_V6 if MMU | 394 | select CPU_COPY_V6 if MMU |
395 | select CPU_CP15_MMU | 395 | select CPU_CP15_MMU if MMU |
396 | select CPU_CP15_MPU if !MMU | ||
396 | select CPU_HAS_ASID if MMU | 397 | select CPU_HAS_ASID if MMU |
397 | select CPU_PABRT_V7 | 398 | select CPU_PABRT_V7 |
398 | select CPU_TLB_V7 if MMU | 399 | select CPU_TLB_V7 if MMU |
399 | 400 | ||
401 | # ARMv7M | ||
402 | config CPU_V7M | ||
403 | bool | ||
404 | select CPU_32v7M | ||
405 | select CPU_ABRT_NOMMU | ||
406 | select CPU_CACHE_NOP | ||
407 | select CPU_PABRT_LEGACY | ||
408 | select CPU_THUMBONLY | ||
409 | |||
400 | config CPU_THUMBONLY | 410 | config CPU_THUMBONLY |
401 | bool | 411 | bool |
402 | # There are no CPUs available with MMU that don't implement an ARM ISA: | 412 | # There are no CPUs available with MMU that don't implement an ARM ISA: |
@@ -411,24 +421,28 @@ config CPU_32v3 | |||
411 | select CPU_USE_DOMAINS if MMU | 421 | select CPU_USE_DOMAINS if MMU |
412 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 422 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
413 | select TLS_REG_EMUL if SMP || !MMU | 423 | select TLS_REG_EMUL if SMP || !MMU |
424 | select NEED_KUSER_HELPERS | ||
414 | 425 | ||
415 | config CPU_32v4 | 426 | config CPU_32v4 |
416 | bool | 427 | bool |
417 | select CPU_USE_DOMAINS if MMU | 428 | select CPU_USE_DOMAINS if MMU |
418 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 429 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
419 | select TLS_REG_EMUL if SMP || !MMU | 430 | select TLS_REG_EMUL if SMP || !MMU |
431 | select NEED_KUSER_HELPERS | ||
420 | 432 | ||
421 | config CPU_32v4T | 433 | config CPU_32v4T |
422 | bool | 434 | bool |
423 | select CPU_USE_DOMAINS if MMU | 435 | select CPU_USE_DOMAINS if MMU |
424 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 436 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
425 | select TLS_REG_EMUL if SMP || !MMU | 437 | select TLS_REG_EMUL if SMP || !MMU |
438 | select NEED_KUSER_HELPERS | ||
426 | 439 | ||
427 | config CPU_32v5 | 440 | config CPU_32v5 |
428 | bool | 441 | bool |
429 | select CPU_USE_DOMAINS if MMU | 442 | select CPU_USE_DOMAINS if MMU |
430 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP | 443 | select NEEDS_SYSCALL_FOR_CMPXCHG if SMP |
431 | select TLS_REG_EMUL if SMP || !MMU | 444 | select TLS_REG_EMUL if SMP || !MMU |
445 | select NEED_KUSER_HELPERS | ||
432 | 446 | ||
433 | config CPU_32v6 | 447 | config CPU_32v6 |
434 | bool | 448 | bool |
@@ -441,6 +455,9 @@ config CPU_32v6K | |||
441 | config CPU_32v7 | 455 | config CPU_32v7 |
442 | bool | 456 | bool |
443 | 457 | ||
458 | config CPU_32v7M | ||
459 | bool | ||
460 | |||
444 | # The abort model | 461 | # The abort model |
445 | config CPU_ABRT_NOMMU | 462 | config CPU_ABRT_NOMMU |
446 | bool | 463 | bool |
@@ -491,6 +508,9 @@ config CPU_CACHE_V6 | |||
491 | config CPU_CACHE_V7 | 508 | config CPU_CACHE_V7 |
492 | bool | 509 | bool |
493 | 510 | ||
511 | config CPU_CACHE_NOP | ||
512 | bool | ||
513 | |||
494 | config CPU_CACHE_VIVT | 514 | config CPU_CACHE_VIVT |
495 | bool | 515 | bool |
496 | 516 | ||
@@ -613,7 +633,11 @@ config ARCH_DMA_ADDR_T_64BIT | |||
613 | 633 | ||
614 | config ARM_THUMB | 634 | config ARM_THUMB |
615 | bool "Support Thumb user binaries" if !CPU_THUMBONLY | 635 | bool "Support Thumb user binaries" if !CPU_THUMBONLY |
616 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON | 636 | depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || \ |
637 | CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || \ | ||
638 | CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || \ | ||
639 | CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || \ | ||
640 | CPU_V7 || CPU_FEROCEON || CPU_V7M | ||
617 | default y | 641 | default y |
618 | help | 642 | help |
619 | Say Y if you want to include kernel support for running user space | 643 | Say Y if you want to include kernel support for running user space |
@@ -756,6 +780,7 @@ config CPU_BPREDICT_DISABLE | |||
756 | 780 | ||
757 | config TLS_REG_EMUL | 781 | config TLS_REG_EMUL |
758 | bool | 782 | bool |
783 | select NEED_KUSER_HELPERS | ||
759 | help | 784 | help |
760 | An SMP system using a pre-ARMv6 processor (there are apparently | 785 | An SMP system using a pre-ARMv6 processor (there are apparently |
761 | a few prototypes like that in existence) and therefore access to | 786 | a few prototypes like that in existence) and therefore access to |
@@ -763,11 +788,40 @@ config TLS_REG_EMUL | |||
763 | 788 | ||
764 | config NEEDS_SYSCALL_FOR_CMPXCHG | 789 | config NEEDS_SYSCALL_FOR_CMPXCHG |
765 | bool | 790 | bool |
791 | select NEED_KUSER_HELPERS | ||
766 | help | 792 | help |
767 | SMP on a pre-ARMv6 processor? Well OK then. | 793 | SMP on a pre-ARMv6 processor? Well OK then. |
768 | Forget about fast user space cmpxchg support. | 794 | Forget about fast user space cmpxchg support. |
769 | It is just not possible. | 795 | It is just not possible. |
770 | 796 | ||
797 | config NEED_KUSER_HELPERS | ||
798 | bool | ||
799 | |||
800 | config KUSER_HELPERS | ||
801 | bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS | ||
802 | default y | ||
803 | help | ||
804 | Warning: disabling this option may break user programs. | ||
805 | |||
806 | Provide kuser helpers in the vector page. The kernel provides | ||
807 | helper code to userspace in read only form at a fixed location | ||
808 | in the high vector page to allow userspace to be independent of | ||
809 | the CPU type fitted to the system. This permits binaries to be | ||
810 | run on ARMv4 through to ARMv7 without modification. | ||
811 | |||
812 | However, the fixed address nature of these helpers can be used | ||
813 | by ROP (return orientated programming) authors when creating | ||
814 | exploits. | ||
815 | |||
816 | If all of the binaries and libraries which run on your platform | ||
817 | are built specifically for your platform, and make no use of | ||
818 | these helpers, then you can turn this option off. However, | ||
819 | when such an binary or library is run, it will receive a SIGILL | ||
820 | signal, which will terminate the program. | ||
821 | |||
822 | Say N here only if you are absolutely certain that you do not | ||
823 | need these helpers; otherwise, the safe option is to say Y. | ||
824 | |||
771 | config DMA_CACHE_RWFO | 825 | config DMA_CACHE_RWFO |
772 | bool "Enable read/write for ownership DMA cache maintenance" | 826 | bool "Enable read/write for ownership DMA cache maintenance" |
773 | depends on CPU_V6K && SMP | 827 | depends on CPU_V6K && SMP |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 9e51be96f635..ecfe6e53f6e0 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -16,6 +16,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o | |||
16 | 16 | ||
17 | obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o | 17 | obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o |
18 | obj-$(CONFIG_HIGHMEM) += highmem.o | 18 | obj-$(CONFIG_HIGHMEM) += highmem.o |
19 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | ||
19 | 20 | ||
20 | obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o | 21 | obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o |
21 | obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o | 22 | obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o |
@@ -39,6 +40,7 @@ obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o | |||
39 | obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o | 40 | obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o |
40 | obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o | 41 | obj-$(CONFIG_CPU_CACHE_V7) += cache-v7.o |
41 | obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o | 42 | obj-$(CONFIG_CPU_CACHE_FA) += cache-fa.o |
43 | obj-$(CONFIG_CPU_CACHE_NOP) += cache-nop.o | ||
42 | 44 | ||
43 | AFLAGS_cache-v6.o :=-Wa,-march=armv6 | 45 | AFLAGS_cache-v6.o :=-Wa,-march=armv6 |
44 | AFLAGS_cache-v7.o :=-Wa,-march=armv7-a | 46 | AFLAGS_cache-v7.o :=-Wa,-march=armv7-a |
@@ -87,6 +89,7 @@ obj-$(CONFIG_CPU_FEROCEON) += proc-feroceon.o | |||
87 | obj-$(CONFIG_CPU_V6) += proc-v6.o | 89 | obj-$(CONFIG_CPU_V6) += proc-v6.o |
88 | obj-$(CONFIG_CPU_V6K) += proc-v6.o | 90 | obj-$(CONFIG_CPU_V6K) += proc-v6.o |
89 | obj-$(CONFIG_CPU_V7) += proc-v7.o | 91 | obj-$(CONFIG_CPU_V7) += proc-v7.o |
92 | obj-$(CONFIG_CPU_V7M) += proc-v7m.o | ||
90 | 93 | ||
91 | AFLAGS_proc-v6.o :=-Wa,-march=armv6 | 94 | AFLAGS_proc-v6.o :=-Wa,-march=armv6 |
92 | AFLAGS_proc-v7.o :=-Wa,-march=armv7-a | 95 | AFLAGS_proc-v7.o :=-Wa,-march=armv7-a |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c465faca51b0..d70e0aba0c9d 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -523,6 +523,147 @@ static void aurora_flush_range(unsigned long start, unsigned long end) | |||
523 | } | 523 | } |
524 | } | 524 | } |
525 | 525 | ||
526 | /* | ||
527 | * For certain Broadcom SoCs, depending on the address range, different offsets | ||
528 | * need to be added to the address before passing it to L2 for | ||
529 | * invalidation/clean/flush | ||
530 | * | ||
531 | * Section Address Range Offset EMI | ||
532 | * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC | ||
533 | * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS | ||
534 | * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC | ||
535 | * | ||
536 | * When the start and end addresses have crossed two different sections, we | ||
537 | * need to break the L2 operation into two, each within its own section. | ||
538 | * For example, if we need to invalidate addresses starts at 0xBFFF0000 and | ||
539 | * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) | ||
540 | * 0xC0000000 - 0xC0001000 | ||
541 | * | ||
542 | * Note 1: | ||
543 | * By breaking a single L2 operation into two, we may potentially suffer some | ||
544 | * performance hit, but keep in mind the cross section case is very rare | ||
545 | * | ||
546 | * Note 2: | ||
547 | * We do not need to handle the case when the start address is in | ||
548 | * Section 1 and the end address is in Section 3, since it is not a valid use | ||
549 | * case | ||
550 | * | ||
551 | * Note 3: | ||
552 | * Section 1 in practical terms can no longer be used on rev A2. Because of | ||
553 | * that the code does not need to handle section 1 at all. | ||
554 | * | ||
555 | */ | ||
556 | #define BCM_SYS_EMI_START_ADDR 0x40000000UL | ||
557 | #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL | ||
558 | |||
559 | #define BCM_SYS_EMI_OFFSET 0x40000000UL | ||
560 | #define BCM_VC_EMI_OFFSET 0x80000000UL | ||
561 | |||
562 | static inline int bcm_addr_is_sys_emi(unsigned long addr) | ||
563 | { | ||
564 | return (addr >= BCM_SYS_EMI_START_ADDR) && | ||
565 | (addr < BCM_VC_EMI_SEC3_START_ADDR); | ||
566 | } | ||
567 | |||
568 | static inline unsigned long bcm_l2_phys_addr(unsigned long addr) | ||
569 | { | ||
570 | if (bcm_addr_is_sys_emi(addr)) | ||
571 | return addr + BCM_SYS_EMI_OFFSET; | ||
572 | else | ||
573 | return addr + BCM_VC_EMI_OFFSET; | ||
574 | } | ||
575 | |||
576 | static void bcm_inv_range(unsigned long start, unsigned long end) | ||
577 | { | ||
578 | unsigned long new_start, new_end; | ||
579 | |||
580 | BUG_ON(start < BCM_SYS_EMI_START_ADDR); | ||
581 | |||
582 | if (unlikely(end <= start)) | ||
583 | return; | ||
584 | |||
585 | new_start = bcm_l2_phys_addr(start); | ||
586 | new_end = bcm_l2_phys_addr(end); | ||
587 | |||
588 | /* normal case, no cross section between start and end */ | ||
589 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { | ||
590 | l2x0_inv_range(new_start, new_end); | ||
591 | return; | ||
592 | } | ||
593 | |||
594 | /* They cross sections, so it can only be a cross from section | ||
595 | * 2 to section 3 | ||
596 | */ | ||
597 | l2x0_inv_range(new_start, | ||
598 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); | ||
599 | l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), | ||
600 | new_end); | ||
601 | } | ||
602 | |||
603 | static void bcm_clean_range(unsigned long start, unsigned long end) | ||
604 | { | ||
605 | unsigned long new_start, new_end; | ||
606 | |||
607 | BUG_ON(start < BCM_SYS_EMI_START_ADDR); | ||
608 | |||
609 | if (unlikely(end <= start)) | ||
610 | return; | ||
611 | |||
612 | if ((end - start) >= l2x0_size) { | ||
613 | l2x0_clean_all(); | ||
614 | return; | ||
615 | } | ||
616 | |||
617 | new_start = bcm_l2_phys_addr(start); | ||
618 | new_end = bcm_l2_phys_addr(end); | ||
619 | |||
620 | /* normal case, no cross section between start and end */ | ||
621 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { | ||
622 | l2x0_clean_range(new_start, new_end); | ||
623 | return; | ||
624 | } | ||
625 | |||
626 | /* They cross sections, so it can only be a cross from section | ||
627 | * 2 to section 3 | ||
628 | */ | ||
629 | l2x0_clean_range(new_start, | ||
630 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); | ||
631 | l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), | ||
632 | new_end); | ||
633 | } | ||
634 | |||
635 | static void bcm_flush_range(unsigned long start, unsigned long end) | ||
636 | { | ||
637 | unsigned long new_start, new_end; | ||
638 | |||
639 | BUG_ON(start < BCM_SYS_EMI_START_ADDR); | ||
640 | |||
641 | if (unlikely(end <= start)) | ||
642 | return; | ||
643 | |||
644 | if ((end - start) >= l2x0_size) { | ||
645 | l2x0_flush_all(); | ||
646 | return; | ||
647 | } | ||
648 | |||
649 | new_start = bcm_l2_phys_addr(start); | ||
650 | new_end = bcm_l2_phys_addr(end); | ||
651 | |||
652 | /* normal case, no cross section between start and end */ | ||
653 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { | ||
654 | l2x0_flush_range(new_start, new_end); | ||
655 | return; | ||
656 | } | ||
657 | |||
658 | /* They cross sections, so it can only be a cross from section | ||
659 | * 2 to section 3 | ||
660 | */ | ||
661 | l2x0_flush_range(new_start, | ||
662 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); | ||
663 | l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), | ||
664 | new_end); | ||
665 | } | ||
666 | |||
526 | static void __init l2x0_of_setup(const struct device_node *np, | 667 | static void __init l2x0_of_setup(const struct device_node *np, |
527 | u32 *aux_val, u32 *aux_mask) | 668 | u32 *aux_val, u32 *aux_mask) |
528 | { | 669 | { |
@@ -765,6 +906,21 @@ static const struct l2x0_of_data aurora_no_outer_data = { | |||
765 | }, | 906 | }, |
766 | }; | 907 | }; |
767 | 908 | ||
909 | static const struct l2x0_of_data bcm_l2x0_data = { | ||
910 | .setup = pl310_of_setup, | ||
911 | .save = pl310_save, | ||
912 | .outer_cache = { | ||
913 | .resume = pl310_resume, | ||
914 | .inv_range = bcm_inv_range, | ||
915 | .clean_range = bcm_clean_range, | ||
916 | .flush_range = bcm_flush_range, | ||
917 | .sync = l2x0_cache_sync, | ||
918 | .flush_all = l2x0_flush_all, | ||
919 | .inv_all = l2x0_inv_all, | ||
920 | .disable = l2x0_disable, | ||
921 | }, | ||
922 | }; | ||
923 | |||
768 | static const struct of_device_id l2x0_ids[] __initconst = { | 924 | static const struct of_device_id l2x0_ids[] __initconst = { |
769 | { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, | 925 | { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, |
770 | { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, | 926 | { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, |
@@ -773,6 +929,8 @@ static const struct of_device_id l2x0_ids[] __initconst = { | |||
773 | .data = (void *)&aurora_no_outer_data}, | 929 | .data = (void *)&aurora_no_outer_data}, |
774 | { .compatible = "marvell,aurora-outer-cache", | 930 | { .compatible = "marvell,aurora-outer-cache", |
775 | .data = (void *)&aurora_with_outer_data}, | 931 | .data = (void *)&aurora_with_outer_data}, |
932 | { .compatible = "bcm,bcm11351-a2-pl310-cache", | ||
933 | .data = (void *)&bcm_l2x0_data}, | ||
776 | {} | 934 | {} |
777 | }; | 935 | }; |
778 | 936 | ||
diff --git a/arch/arm/mm/cache-nop.S b/arch/arm/mm/cache-nop.S new file mode 100644 index 000000000000..8e12ddca0031 --- /dev/null +++ b/arch/arm/mm/cache-nop.S | |||
@@ -0,0 +1,50 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License version 2 as | ||
4 | * published by the Free Software Foundation. | ||
5 | */ | ||
6 | #include <linux/linkage.h> | ||
7 | #include <linux/init.h> | ||
8 | |||
9 | #include "proc-macros.S" | ||
10 | |||
11 | ENTRY(nop_flush_icache_all) | ||
12 | mov pc, lr | ||
13 | ENDPROC(nop_flush_icache_all) | ||
14 | |||
15 | .globl nop_flush_kern_cache_all | ||
16 | .equ nop_flush_kern_cache_all, nop_flush_icache_all | ||
17 | |||
18 | .globl nop_flush_kern_cache_louis | ||
19 | .equ nop_flush_kern_cache_louis, nop_flush_icache_all | ||
20 | |||
21 | .globl nop_flush_user_cache_all | ||
22 | .equ nop_flush_user_cache_all, nop_flush_icache_all | ||
23 | |||
24 | .globl nop_flush_user_cache_range | ||
25 | .equ nop_flush_user_cache_range, nop_flush_icache_all | ||
26 | |||
27 | .globl nop_coherent_kern_range | ||
28 | .equ nop_coherent_kern_range, nop_flush_icache_all | ||
29 | |||
30 | ENTRY(nop_coherent_user_range) | ||
31 | mov r0, 0 | ||
32 | mov pc, lr | ||
33 | ENDPROC(nop_coherent_user_range) | ||
34 | |||
35 | .globl nop_flush_kern_dcache_area | ||
36 | .equ nop_flush_kern_dcache_area, nop_flush_icache_all | ||
37 | |||
38 | .globl nop_dma_flush_range | ||
39 | .equ nop_dma_flush_range, nop_flush_icache_all | ||
40 | |||
41 | .globl nop_dma_map_area | ||
42 | .equ nop_dma_map_area, nop_flush_icache_all | ||
43 | |||
44 | .globl nop_dma_unmap_area | ||
45 | .equ nop_dma_unmap_area, nop_flush_icache_all | ||
46 | |||
47 | __INITDATA | ||
48 | |||
49 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | ||
50 | define_cache_functions nop | ||
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 2ac37372ef52..4a0544492f10 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <asm/smp_plat.h> | 20 | #include <asm/smp_plat.h> |
21 | #include <asm/thread_notify.h> | 21 | #include <asm/thread_notify.h> |
22 | #include <asm/tlbflush.h> | 22 | #include <asm/tlbflush.h> |
23 | #include <asm/proc-fns.h> | ||
23 | 24 | ||
24 | /* | 25 | /* |
25 | * On ARMv6, we have the following structure in the Context ID: | 26 | * On ARMv6, we have the following structure in the Context ID: |
@@ -39,33 +40,51 @@ | |||
39 | * non 64-bit operations. | 40 | * non 64-bit operations. |
40 | */ | 41 | */ |
41 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) | 42 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) |
42 | #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) | 43 | #define NUM_USER_ASIDS ASID_FIRST_VERSION |
43 | |||
44 | #define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1) | ||
45 | #define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK) | ||
46 | 44 | ||
47 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | 45 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | 46 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
49 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | 47 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); |
50 | 48 | ||
51 | DEFINE_PER_CPU(atomic64_t, active_asids); | 49 | static DEFINE_PER_CPU(atomic64_t, active_asids); |
52 | static DEFINE_PER_CPU(u64, reserved_asids); | 50 | static DEFINE_PER_CPU(u64, reserved_asids); |
53 | static cpumask_t tlb_flush_pending; | 51 | static cpumask_t tlb_flush_pending; |
54 | 52 | ||
53 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
54 | void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | ||
55 | cpumask_t *mask) | ||
56 | { | ||
57 | int cpu; | ||
58 | unsigned long flags; | ||
59 | u64 context_id, asid; | ||
60 | |||
61 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); | ||
62 | context_id = mm->context.id.counter; | ||
63 | for_each_online_cpu(cpu) { | ||
64 | if (cpu == this_cpu) | ||
65 | continue; | ||
66 | /* | ||
67 | * We only need to send an IPI if the other CPUs are | ||
68 | * running the same ASID as the one being invalidated. | ||
69 | */ | ||
70 | asid = per_cpu(active_asids, cpu).counter; | ||
71 | if (asid == 0) | ||
72 | asid = per_cpu(reserved_asids, cpu); | ||
73 | if (context_id == asid) | ||
74 | cpumask_set_cpu(cpu, mask); | ||
75 | } | ||
76 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); | ||
77 | } | ||
78 | #endif | ||
79 | |||
55 | #ifdef CONFIG_ARM_LPAE | 80 | #ifdef CONFIG_ARM_LPAE |
56 | static void cpu_set_reserved_ttbr0(void) | 81 | static void cpu_set_reserved_ttbr0(void) |
57 | { | 82 | { |
58 | unsigned long ttbl = __pa(swapper_pg_dir); | ||
59 | unsigned long ttbh = 0; | ||
60 | |||
61 | /* | 83 | /* |
62 | * Set TTBR0 to swapper_pg_dir which contains only global entries. The | 84 | * Set TTBR0 to swapper_pg_dir which contains only global entries. The |
63 | * ASID is set to 0. | 85 | * ASID is set to 0. |
64 | */ | 86 | */ |
65 | asm volatile( | 87 | cpu_set_ttbr(0, __pa(swapper_pg_dir)); |
66 | " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" | ||
67 | : | ||
68 | : "r" (ttbl), "r" (ttbh)); | ||
69 | isb(); | 88 | isb(); |
70 | } | 89 | } |
71 | #else | 90 | #else |
@@ -128,7 +147,16 @@ static void flush_context(unsigned int cpu) | |||
128 | asid = 0; | 147 | asid = 0; |
129 | } else { | 148 | } else { |
130 | asid = atomic64_xchg(&per_cpu(active_asids, i), 0); | 149 | asid = atomic64_xchg(&per_cpu(active_asids, i), 0); |
131 | __set_bit(ASID_TO_IDX(asid), asid_map); | 150 | /* |
151 | * If this CPU has already been through a | ||
152 | * rollover, but hasn't run another task in | ||
153 | * the meantime, we must preserve its reserved | ||
154 | * ASID, as this is the only trace we have of | ||
155 | * the process it is still running. | ||
156 | */ | ||
157 | if (asid == 0) | ||
158 | asid = per_cpu(reserved_asids, i); | ||
159 | __set_bit(asid & ~ASID_MASK, asid_map); | ||
132 | } | 160 | } |
133 | per_cpu(reserved_asids, i) = asid; | 161 | per_cpu(reserved_asids, i) = asid; |
134 | } | 162 | } |
@@ -167,17 +195,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
167 | /* | 195 | /* |
168 | * Allocate a free ASID. If we can't find one, take a | 196 | * Allocate a free ASID. If we can't find one, take a |
169 | * note of the currently active ASIDs and mark the TLBs | 197 | * note of the currently active ASIDs and mark the TLBs |
170 | * as requiring flushes. | 198 | * as requiring flushes. We always count from ASID #1, |
199 | * as we reserve ASID #0 to switch via TTBR0 and indicate | ||
200 | * rollover events. | ||
171 | */ | 201 | */ |
172 | asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); | 202 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
173 | if (asid == NUM_USER_ASIDS) { | 203 | if (asid == NUM_USER_ASIDS) { |
174 | generation = atomic64_add_return(ASID_FIRST_VERSION, | 204 | generation = atomic64_add_return(ASID_FIRST_VERSION, |
175 | &asid_generation); | 205 | &asid_generation); |
176 | flush_context(cpu); | 206 | flush_context(cpu); |
177 | asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); | 207 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
178 | } | 208 | } |
179 | __set_bit(asid, asid_map); | 209 | __set_bit(asid, asid_map); |
180 | asid = generation | IDX_TO_ASID(asid); | 210 | asid |= generation; |
181 | cpumask_clear(mm_cpumask(mm)); | 211 | cpumask_clear(mm_cpumask(mm)); |
182 | } | 212 | } |
183 | 213 | ||
@@ -215,7 +245,8 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |||
215 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { | 245 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { |
216 | local_flush_bp_all(); | 246 | local_flush_bp_all(); |
217 | local_flush_tlb_all(); | 247 | local_flush_tlb_all(); |
218 | dummy_flush_tlb_a15_erratum(); | 248 | if (erratum_a15_798181()) |
249 | dummy_flush_tlb_a15_erratum(); | ||
219 | } | 250 | } |
220 | 251 | ||
221 | atomic64_set(&per_cpu(active_asids, cpu), asid); | 252 | atomic64_set(&per_cpu(active_asids, cpu), asid); |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ef3e0f3aac96..7f9b1798c6cf 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -250,7 +250,7 @@ static void __dma_free_buffer(struct page *page, size_t size) | |||
250 | 250 | ||
251 | #ifdef CONFIG_MMU | 251 | #ifdef CONFIG_MMU |
252 | #ifdef CONFIG_HUGETLB_PAGE | 252 | #ifdef CONFIG_HUGETLB_PAGE |
253 | #error ARM Coherent DMA allocator does not (yet) support huge TLB | 253 | #warning ARM Coherent DMA allocator does not (yet) support huge TLB |
254 | #endif | 254 | #endif |
255 | 255 | ||
256 | static void *__alloc_from_contiguous(struct device *dev, size_t size, | 256 | static void *__alloc_from_contiguous(struct device *dev, size_t size, |
@@ -880,10 +880,24 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
880 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); | 880 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); |
881 | 881 | ||
882 | /* | 882 | /* |
883 | * Mark the D-cache clean for this page to avoid extra flushing. | 883 | * Mark the D-cache clean for these pages to avoid extra flushing. |
884 | */ | 884 | */ |
885 | if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) | 885 | if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { |
886 | set_bit(PG_dcache_clean, &page->flags); | 886 | unsigned long pfn; |
887 | size_t left = size; | ||
888 | |||
889 | pfn = page_to_pfn(page) + off / PAGE_SIZE; | ||
890 | off %= PAGE_SIZE; | ||
891 | if (off) { | ||
892 | pfn++; | ||
893 | left -= PAGE_SIZE - off; | ||
894 | } | ||
895 | while (left >= PAGE_SIZE) { | ||
896 | page = pfn_to_page(pfn++); | ||
897 | set_bit(PG_dcache_clean, &page->flags); | ||
898 | left -= PAGE_SIZE; | ||
899 | } | ||
900 | } | ||
887 | } | 901 | } |
888 | 902 | ||
889 | /** | 903 | /** |
@@ -1314,6 +1328,15 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, | |||
1314 | if (gfp & GFP_ATOMIC) | 1328 | if (gfp & GFP_ATOMIC) |
1315 | return __iommu_alloc_atomic(dev, size, handle); | 1329 | return __iommu_alloc_atomic(dev, size, handle); |
1316 | 1330 | ||
1331 | /* | ||
1332 | * Following is a work-around (a.k.a. hack) to prevent pages | ||
1333 | * with __GFP_COMP being passed to split_page() which cannot | ||
1334 | * handle them. The real problem is that this flag probably | ||
1335 | * should be 0 on ARM as it is not supported on this | ||
1336 | * platform; see CONFIG_HUGETLBFS. | ||
1337 | */ | ||
1338 | gfp &= ~(__GFP_COMP); | ||
1339 | |||
1317 | pages = __iommu_alloc_buffer(dev, size, gfp, attrs); | 1340 | pages = __iommu_alloc_buffer(dev, size, gfp, attrs); |
1318 | if (!pages) | 1341 | if (!pages) |
1319 | return NULL; | 1342 | return NULL; |
@@ -1372,16 +1395,17 @@ static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | |||
1372 | void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, | 1395 | void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
1373 | dma_addr_t handle, struct dma_attrs *attrs) | 1396 | dma_addr_t handle, struct dma_attrs *attrs) |
1374 | { | 1397 | { |
1375 | struct page **pages = __iommu_get_pages(cpu_addr, attrs); | 1398 | struct page **pages; |
1376 | size = PAGE_ALIGN(size); | 1399 | size = PAGE_ALIGN(size); |
1377 | 1400 | ||
1378 | if (!pages) { | 1401 | if (__in_atomic_pool(cpu_addr, size)) { |
1379 | WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); | 1402 | __iommu_free_atomic(dev, cpu_addr, handle, size); |
1380 | return; | 1403 | return; |
1381 | } | 1404 | } |
1382 | 1405 | ||
1383 | if (__in_atomic_pool(cpu_addr, size)) { | 1406 | pages = __iommu_get_pages(cpu_addr, attrs); |
1384 | __iommu_free_atomic(dev, cpu_addr, handle, size); | 1407 | if (!pages) { |
1408 | WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); | ||
1385 | return; | 1409 | return; |
1386 | } | 1410 | } |
1387 | 1411 | ||
@@ -1636,13 +1660,27 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p | |||
1636 | { | 1660 | { |
1637 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | 1661 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; |
1638 | dma_addr_t dma_addr; | 1662 | dma_addr_t dma_addr; |
1639 | int ret, len = PAGE_ALIGN(size + offset); | 1663 | int ret, prot, len = PAGE_ALIGN(size + offset); |
1640 | 1664 | ||
1641 | dma_addr = __alloc_iova(mapping, len); | 1665 | dma_addr = __alloc_iova(mapping, len); |
1642 | if (dma_addr == DMA_ERROR_CODE) | 1666 | if (dma_addr == DMA_ERROR_CODE) |
1643 | return dma_addr; | 1667 | return dma_addr; |
1644 | 1668 | ||
1645 | ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0); | 1669 | switch (dir) { |
1670 | case DMA_BIDIRECTIONAL: | ||
1671 | prot = IOMMU_READ | IOMMU_WRITE; | ||
1672 | break; | ||
1673 | case DMA_TO_DEVICE: | ||
1674 | prot = IOMMU_READ; | ||
1675 | break; | ||
1676 | case DMA_FROM_DEVICE: | ||
1677 | prot = IOMMU_WRITE; | ||
1678 | break; | ||
1679 | default: | ||
1680 | prot = 0; | ||
1681 | } | ||
1682 | |||
1683 | ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot); | ||
1646 | if (ret < 0) | 1684 | if (ret < 0) |
1647 | goto fail; | 1685 | goto fail; |
1648 | 1686 | ||
@@ -1907,7 +1945,7 @@ void arm_iommu_detach_device(struct device *dev) | |||
1907 | 1945 | ||
1908 | iommu_detach_device(mapping->domain, dev); | 1946 | iommu_detach_device(mapping->domain, dev); |
1909 | kref_put(&mapping->kref, release_iommu_mapping); | 1947 | kref_put(&mapping->kref, release_iommu_mapping); |
1910 | mapping = NULL; | 1948 | dev->archdata.mapping = NULL; |
1911 | set_dma_ops(dev, NULL); | 1949 | set_dma_ops(dev, NULL); |
1912 | 1950 | ||
1913 | pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); | 1951 | pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 5dbf13f954f6..c97f7940cb95 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -491,12 +491,14 @@ do_translation_fault(unsigned long addr, unsigned int fsr, | |||
491 | * Some section permission faults need to be handled gracefully. | 491 | * Some section permission faults need to be handled gracefully. |
492 | * They can happen due to a __{get,put}_user during an oops. | 492 | * They can happen due to a __{get,put}_user during an oops. |
493 | */ | 493 | */ |
494 | #ifndef CONFIG_ARM_LPAE | ||
494 | static int | 495 | static int |
495 | do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | 496 | do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) |
496 | { | 497 | { |
497 | do_bad_area(addr, fsr, regs); | 498 | do_bad_area(addr, fsr, regs); |
498 | return 0; | 499 | return 0; |
499 | } | 500 | } |
501 | #endif /* CONFIG_ARM_LPAE */ | ||
500 | 502 | ||
501 | /* | 503 | /* |
502 | * This abort handler always returns "fault". | 504 | * This abort handler always returns "fault". |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 32aa5861119f..6d5ba9afb16a 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/highmem.h> | 17 | #include <asm/highmem.h> |
18 | #include <asm/smp_plat.h> | 18 | #include <asm/smp_plat.h> |
19 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
20 | #include <linux/hugetlb.h> | ||
20 | 21 | ||
21 | #include "mm.h" | 22 | #include "mm.h" |
22 | 23 | ||
@@ -168,19 +169,23 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
168 | * coherent with the kernels mapping. | 169 | * coherent with the kernels mapping. |
169 | */ | 170 | */ |
170 | if (!PageHighMem(page)) { | 171 | if (!PageHighMem(page)) { |
171 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); | 172 | size_t page_size = PAGE_SIZE << compound_order(page); |
173 | __cpuc_flush_dcache_area(page_address(page), page_size); | ||
172 | } else { | 174 | } else { |
173 | void *addr; | 175 | unsigned long i; |
174 | |||
175 | if (cache_is_vipt_nonaliasing()) { | 176 | if (cache_is_vipt_nonaliasing()) { |
176 | addr = kmap_atomic(page); | 177 | for (i = 0; i < (1 << compound_order(page)); i++) { |
177 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 178 | void *addr = kmap_atomic(page); |
178 | kunmap_atomic(addr); | ||
179 | } else { | ||
180 | addr = kmap_high_get(page); | ||
181 | if (addr) { | ||
182 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 179 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
183 | kunmap_high(page); | 180 | kunmap_atomic(addr); |
181 | } | ||
182 | } else { | ||
183 | for (i = 0; i < (1 << compound_order(page)); i++) { | ||
184 | void *addr = kmap_high_get(page); | ||
185 | if (addr) { | ||
186 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | ||
187 | kunmap_high(page); | ||
188 | } | ||
184 | } | 189 | } |
185 | } | 190 | } |
186 | } | 191 | } |
@@ -287,7 +292,7 @@ void flush_dcache_page(struct page *page) | |||
287 | mapping = page_mapping(page); | 292 | mapping = page_mapping(page); |
288 | 293 | ||
289 | if (!cache_ops_need_broadcast() && | 294 | if (!cache_ops_need_broadcast() && |
290 | mapping && !mapping_mapped(mapping)) | 295 | mapping && !page_mapped(page)) |
291 | clear_bit(PG_dcache_clean, &page->flags); | 296 | clear_bit(PG_dcache_clean, &page->flags); |
292 | else { | 297 | else { |
293 | __flush_dcache_page(mapping, page); | 298 | __flush_dcache_page(mapping, page); |
diff --git a/arch/arm/mm/fsr-3level.c b/arch/arm/mm/fsr-3level.c index 05a4e9431836..ab4409a2307e 100644 --- a/arch/arm/mm/fsr-3level.c +++ b/arch/arm/mm/fsr-3level.c | |||
@@ -9,11 +9,11 @@ static struct fsr_info fsr_info[] = { | |||
9 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, | 9 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, |
10 | { do_bad, SIGBUS, 0, "reserved access flag fault" }, | 10 | { do_bad, SIGBUS, 0, "reserved access flag fault" }, |
11 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, | 11 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, |
12 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, | 12 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, |
13 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, | 13 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, |
14 | { do_bad, SIGBUS, 0, "reserved permission fault" }, | 14 | { do_bad, SIGBUS, 0, "reserved permission fault" }, |
15 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, | 15 | { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, |
16 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, | 16 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, |
17 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, | 17 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, |
18 | { do_bad, SIGBUS, 0, "synchronous external abort" }, | 18 | { do_bad, SIGBUS, 0, "synchronous external abort" }, |
19 | { do_bad, SIGBUS, 0, "asynchronous external abort" }, | 19 | { do_bad, SIGBUS, 0, "asynchronous external abort" }, |
diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c new file mode 100644 index 000000000000..3d1e4a205b0b --- /dev/null +++ b/arch/arm/mm/hugetlbpage.c | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * arch/arm/mm/hugetlbpage.c | ||
3 | * | ||
4 | * Copyright (C) 2012 ARM Ltd. | ||
5 | * | ||
6 | * Based on arch/x86/include/asm/hugetlb.h and Bill Carson's patches | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | */ | ||
21 | |||
22 | #include <linux/init.h> | ||
23 | #include <linux/fs.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/hugetlb.h> | ||
26 | #include <linux/pagemap.h> | ||
27 | #include <linux/err.h> | ||
28 | #include <linux/sysctl.h> | ||
29 | #include <asm/mman.h> | ||
30 | #include <asm/tlb.h> | ||
31 | #include <asm/tlbflush.h> | ||
32 | #include <asm/pgalloc.h> | ||
33 | |||
34 | /* | ||
35 | * On ARM, huge pages are backed by pmd's rather than pte's, so we do a lot | ||
36 | * of type casting from pmd_t * to pte_t *. | ||
37 | */ | ||
38 | |||
39 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | ||
40 | { | ||
41 | pgd_t *pgd; | ||
42 | pud_t *pud; | ||
43 | pmd_t *pmd = NULL; | ||
44 | |||
45 | pgd = pgd_offset(mm, addr); | ||
46 | if (pgd_present(*pgd)) { | ||
47 | pud = pud_offset(pgd, addr); | ||
48 | if (pud_present(*pud)) | ||
49 | pmd = pmd_offset(pud, addr); | ||
50 | } | ||
51 | |||
52 | return (pte_t *)pmd; | ||
53 | } | ||
54 | |||
55 | struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, | ||
56 | int write) | ||
57 | { | ||
58 | return ERR_PTR(-EINVAL); | ||
59 | } | ||
60 | |||
61 | int pud_huge(pud_t pud) | ||
62 | { | ||
63 | return 0; | ||
64 | } | ||
65 | |||
66 | int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) | ||
67 | { | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | pte_t *huge_pte_alloc(struct mm_struct *mm, | ||
72 | unsigned long addr, unsigned long sz) | ||
73 | { | ||
74 | pgd_t *pgd; | ||
75 | pud_t *pud; | ||
76 | pte_t *pte = NULL; | ||
77 | |||
78 | pgd = pgd_offset(mm, addr); | ||
79 | pud = pud_alloc(mm, pgd, addr); | ||
80 | if (pud) | ||
81 | pte = (pte_t *)pmd_alloc(mm, pud, addr); | ||
82 | |||
83 | return pte; | ||
84 | } | ||
85 | |||
86 | struct page * | ||
87 | follow_huge_pmd(struct mm_struct *mm, unsigned long address, | ||
88 | pmd_t *pmd, int write) | ||
89 | { | ||
90 | struct page *page; | ||
91 | |||
92 | page = pte_page(*(pte_t *)pmd); | ||
93 | if (page) | ||
94 | page += ((address & ~PMD_MASK) >> PAGE_SHIFT); | ||
95 | return page; | ||
96 | } | ||
97 | |||
98 | int pmd_huge(pmd_t pmd) | ||
99 | { | ||
100 | return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); | ||
101 | } | ||
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 9a5cdc01fcdf..15225d829d71 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -36,12 +36,13 @@ | |||
36 | 36 | ||
37 | #include "mm.h" | 37 | #include "mm.h" |
38 | 38 | ||
39 | static unsigned long phys_initrd_start __initdata = 0; | 39 | static phys_addr_t phys_initrd_start __initdata = 0; |
40 | static unsigned long phys_initrd_size __initdata = 0; | 40 | static unsigned long phys_initrd_size __initdata = 0; |
41 | 41 | ||
42 | static int __init early_initrd(char *p) | 42 | static int __init early_initrd(char *p) |
43 | { | 43 | { |
44 | unsigned long start, size; | 44 | phys_addr_t start; |
45 | unsigned long size; | ||
45 | char *endp; | 46 | char *endp; |
46 | 47 | ||
47 | start = memparse(p, &endp); | 48 | start = memparse(p, &endp); |
@@ -350,14 +351,14 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) | |||
350 | #ifdef CONFIG_BLK_DEV_INITRD | 351 | #ifdef CONFIG_BLK_DEV_INITRD |
351 | if (phys_initrd_size && | 352 | if (phys_initrd_size && |
352 | !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { | 353 | !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) { |
353 | pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n", | 354 | pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n", |
354 | phys_initrd_start, phys_initrd_size); | 355 | (u64)phys_initrd_start, phys_initrd_size); |
355 | phys_initrd_start = phys_initrd_size = 0; | 356 | phys_initrd_start = phys_initrd_size = 0; |
356 | } | 357 | } |
357 | if (phys_initrd_size && | 358 | if (phys_initrd_size && |
358 | memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { | 359 | memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { |
359 | pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", | 360 | pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n", |
360 | phys_initrd_start, phys_initrd_size); | 361 | (u64)phys_initrd_start, phys_initrd_size); |
361 | phys_initrd_start = phys_initrd_size = 0; | 362 | phys_initrd_start = phys_initrd_size = 0; |
362 | } | 363 | } |
363 | if (phys_initrd_size) { | 364 | if (phys_initrd_size) { |
@@ -442,7 +443,7 @@ static inline void | |||
442 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) | 443 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) |
443 | { | 444 | { |
444 | struct page *start_pg, *end_pg; | 445 | struct page *start_pg, *end_pg; |
445 | unsigned long pg, pgend; | 446 | phys_addr_t pg, pgend; |
446 | 447 | ||
447 | /* | 448 | /* |
448 | * Convert start_pfn/end_pfn to a struct page pointer. | 449 | * Convert start_pfn/end_pfn to a struct page pointer. |
@@ -454,8 +455,8 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |||
454 | * Convert to physical addresses, and | 455 | * Convert to physical addresses, and |
455 | * round start upwards and end downwards. | 456 | * round start upwards and end downwards. |
456 | */ | 457 | */ |
457 | pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); | 458 | pg = PAGE_ALIGN(__pa(start_pg)); |
458 | pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; | 459 | pgend = __pa(end_pg) & PAGE_MASK; |
459 | 460 | ||
460 | /* | 461 | /* |
461 | * If there are free pages between these, | 462 | * If there are free pages between these, |
@@ -582,9 +583,6 @@ static void __init free_highpages(void) | |||
582 | */ | 583 | */ |
583 | void __init mem_init(void) | 584 | void __init mem_init(void) |
584 | { | 585 | { |
585 | unsigned long reserved_pages, free_pages; | ||
586 | struct memblock_region *reg; | ||
587 | int i; | ||
588 | #ifdef CONFIG_HAVE_TCM | 586 | #ifdef CONFIG_HAVE_TCM |
589 | /* These pointers are filled in on TCM detection */ | 587 | /* These pointers are filled in on TCM detection */ |
590 | extern u32 dtcm_end; | 588 | extern u32 dtcm_end; |
@@ -595,57 +593,16 @@ void __init mem_init(void) | |||
595 | 593 | ||
596 | /* this will put all unused low memory onto the freelists */ | 594 | /* this will put all unused low memory onto the freelists */ |
597 | free_unused_memmap(&meminfo); | 595 | free_unused_memmap(&meminfo); |
598 | 596 | free_all_bootmem(); | |
599 | totalram_pages += free_all_bootmem(); | ||
600 | 597 | ||
601 | #ifdef CONFIG_SA1111 | 598 | #ifdef CONFIG_SA1111 |
602 | /* now that our DMA memory is actually so designated, we can free it */ | 599 | /* now that our DMA memory is actually so designated, we can free it */ |
603 | free_reserved_area(__va(PHYS_PFN_OFFSET), swapper_pg_dir, 0, NULL); | 600 | free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL); |
604 | #endif | 601 | #endif |
605 | 602 | ||
606 | free_highpages(); | 603 | free_highpages(); |
607 | 604 | ||
608 | reserved_pages = free_pages = 0; | 605 | mem_init_print_info(NULL); |
609 | |||
610 | for_each_bank(i, &meminfo) { | ||
611 | struct membank *bank = &meminfo.bank[i]; | ||
612 | unsigned int pfn1, pfn2; | ||
613 | struct page *page, *end; | ||
614 | |||
615 | pfn1 = bank_pfn_start(bank); | ||
616 | pfn2 = bank_pfn_end(bank); | ||
617 | |||
618 | page = pfn_to_page(pfn1); | ||
619 | end = pfn_to_page(pfn2 - 1) + 1; | ||
620 | |||
621 | do { | ||
622 | if (PageReserved(page)) | ||
623 | reserved_pages++; | ||
624 | else if (!page_count(page)) | ||
625 | free_pages++; | ||
626 | page++; | ||
627 | } while (page < end); | ||
628 | } | ||
629 | |||
630 | /* | ||
631 | * Since our memory may not be contiguous, calculate the | ||
632 | * real number of pages we have in this system | ||
633 | */ | ||
634 | printk(KERN_INFO "Memory:"); | ||
635 | num_physpages = 0; | ||
636 | for_each_memblock(memory, reg) { | ||
637 | unsigned long pages = memblock_region_memory_end_pfn(reg) - | ||
638 | memblock_region_memory_base_pfn(reg); | ||
639 | num_physpages += pages; | ||
640 | printk(" %ldMB", pages >> (20 - PAGE_SHIFT)); | ||
641 | } | ||
642 | printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); | ||
643 | |||
644 | printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", | ||
645 | nr_free_pages() << (PAGE_SHIFT-10), | ||
646 | free_pages << (PAGE_SHIFT-10), | ||
647 | reserved_pages << (PAGE_SHIFT-10), | ||
648 | totalhigh_pages << (PAGE_SHIFT-10)); | ||
649 | 606 | ||
650 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 | 607 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 |
651 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 | 608 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 |
@@ -711,7 +668,7 @@ void __init mem_init(void) | |||
711 | BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); | 668 | BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); |
712 | #endif | 669 | #endif |
713 | 670 | ||
714 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | 671 | if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { |
715 | extern int sysctl_overcommit_memory; | 672 | extern int sysctl_overcommit_memory; |
716 | /* | 673 | /* |
717 | * On a machine this small we won't get | 674 | * On a machine this small we won't get |
@@ -728,12 +685,12 @@ void free_initmem(void) | |||
728 | extern char __tcm_start, __tcm_end; | 685 | extern char __tcm_start, __tcm_end; |
729 | 686 | ||
730 | poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); | 687 | poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); |
731 | free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link"); | 688 | free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); |
732 | #endif | 689 | #endif |
733 | 690 | ||
734 | poison_init_mem(__init_begin, __init_end - __init_begin); | 691 | poison_init_mem(__init_begin, __init_end - __init_begin); |
735 | if (!machine_is_integrator() && !machine_is_cintegrator()) | 692 | if (!machine_is_integrator() && !machine_is_cintegrator()) |
736 | free_initmem_default(0); | 693 | free_initmem_default(-1); |
737 | } | 694 | } |
738 | 695 | ||
739 | #ifdef CONFIG_BLK_DEV_INITRD | 696 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -744,7 +701,7 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
744 | { | 701 | { |
745 | if (!keep_initrd) { | 702 | if (!keep_initrd) { |
746 | poison_init_mem((void *)start, PAGE_ALIGN(end) - start); | 703 | poison_init_mem((void *)start, PAGE_ALIGN(end) - start); |
747 | free_reserved_area(start, end, 0, "initrd"); | 704 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
748 | } | 705 | } |
749 | } | 706 | } |
750 | 707 | ||
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 04d9006eab1f..f123d6eb074b 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -331,10 +331,10 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
331 | return (void __iomem *) (offset + addr); | 331 | return (void __iomem *) (offset + addr); |
332 | } | 332 | } |
333 | 333 | ||
334 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, | 334 | void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, |
335 | unsigned int mtype, void *caller) | 335 | unsigned int mtype, void *caller) |
336 | { | 336 | { |
337 | unsigned long last_addr; | 337 | phys_addr_t last_addr; |
338 | unsigned long offset = phys_addr & ~PAGE_MASK; | 338 | unsigned long offset = phys_addr & ~PAGE_MASK; |
339 | unsigned long pfn = __phys_to_pfn(phys_addr); | 339 | unsigned long pfn = __phys_to_pfn(phys_addr); |
340 | 340 | ||
@@ -367,12 +367,12 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
367 | } | 367 | } |
368 | EXPORT_SYMBOL(__arm_ioremap_pfn); | 368 | EXPORT_SYMBOL(__arm_ioremap_pfn); |
369 | 369 | ||
370 | void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, | 370 | void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, |
371 | unsigned int, void *) = | 371 | unsigned int, void *) = |
372 | __arm_ioremap_caller; | 372 | __arm_ioremap_caller; |
373 | 373 | ||
374 | void __iomem * | 374 | void __iomem * |
375 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | 375 | __arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype) |
376 | { | 376 | { |
377 | return arch_ioremap_caller(phys_addr, size, mtype, | 377 | return arch_ioremap_caller(phys_addr, size, mtype, |
378 | __builtin_return_address(0)); | 378 | __builtin_return_address(0)); |
@@ -387,7 +387,7 @@ EXPORT_SYMBOL(__arm_ioremap); | |||
387 | * CONFIG_GENERIC_ALLOCATOR for allocating external memory. | 387 | * CONFIG_GENERIC_ALLOCATOR for allocating external memory. |
388 | */ | 388 | */ |
389 | void __iomem * | 389 | void __iomem * |
390 | __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) | 390 | __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) |
391 | { | 391 | { |
392 | unsigned int mtype; | 392 | unsigned int mtype; |
393 | 393 | ||
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index 10062ceadd1c..0c6356255fe3 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c | |||
@@ -181,11 +181,9 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |||
181 | if (mmap_is_legacy()) { | 181 | if (mmap_is_legacy()) { |
182 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; | 182 | mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; |
183 | mm->get_unmapped_area = arch_get_unmapped_area; | 183 | mm->get_unmapped_area = arch_get_unmapped_area; |
184 | mm->unmap_area = arch_unmap_area; | ||
185 | } else { | 184 | } else { |
186 | mm->mmap_base = mmap_base(random_factor); | 185 | mm->mmap_base = mmap_base(random_factor); |
187 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | 186 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; |
188 | mm->unmap_area = arch_unmap_area_topdown; | ||
189 | } | 187 | } |
190 | } | 188 | } |
191 | 189 | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4d409e6a552d..53cdbd39ec8e 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -675,7 +675,8 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, | |||
675 | } | 675 | } |
676 | 676 | ||
677 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | 677 | static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, |
678 | unsigned long end, unsigned long phys, const struct mem_type *type) | 678 | unsigned long end, phys_addr_t phys, |
679 | const struct mem_type *type) | ||
679 | { | 680 | { |
680 | pud_t *pud = pud_offset(pgd, addr); | 681 | pud_t *pud = pud_offset(pgd, addr); |
681 | unsigned long next; | 682 | unsigned long next; |
@@ -949,7 +950,7 @@ void __init debug_ll_io_init(void) | |||
949 | map.virtual &= PAGE_MASK; | 950 | map.virtual &= PAGE_MASK; |
950 | map.length = PAGE_SIZE; | 951 | map.length = PAGE_SIZE; |
951 | map.type = MT_DEVICE; | 952 | map.type = MT_DEVICE; |
952 | create_mapping(&map); | 953 | iotable_init(&map, 1); |
953 | } | 954 | } |
954 | #endif | 955 | #endif |
955 | 956 | ||
@@ -988,28 +989,30 @@ phys_addr_t arm_lowmem_limit __initdata = 0; | |||
988 | 989 | ||
989 | void __init sanity_check_meminfo(void) | 990 | void __init sanity_check_meminfo(void) |
990 | { | 991 | { |
992 | phys_addr_t memblock_limit = 0; | ||
991 | int i, j, highmem = 0; | 993 | int i, j, highmem = 0; |
994 | phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; | ||
992 | 995 | ||
993 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { | 996 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { |
994 | struct membank *bank = &meminfo.bank[j]; | 997 | struct membank *bank = &meminfo.bank[j]; |
998 | phys_addr_t size_limit; | ||
999 | |||
995 | *bank = meminfo.bank[i]; | 1000 | *bank = meminfo.bank[i]; |
1001 | size_limit = bank->size; | ||
996 | 1002 | ||
997 | if (bank->start > ULONG_MAX) | 1003 | if (bank->start >= vmalloc_limit) |
998 | highmem = 1; | ||
999 | |||
1000 | #ifdef CONFIG_HIGHMEM | ||
1001 | if (__va(bank->start) >= vmalloc_min || | ||
1002 | __va(bank->start) < (void *)PAGE_OFFSET) | ||
1003 | highmem = 1; | 1004 | highmem = 1; |
1005 | else | ||
1006 | size_limit = vmalloc_limit - bank->start; | ||
1004 | 1007 | ||
1005 | bank->highmem = highmem; | 1008 | bank->highmem = highmem; |
1006 | 1009 | ||
1010 | #ifdef CONFIG_HIGHMEM | ||
1007 | /* | 1011 | /* |
1008 | * Split those memory banks which are partially overlapping | 1012 | * Split those memory banks which are partially overlapping |
1009 | * the vmalloc area greatly simplifying things later. | 1013 | * the vmalloc area greatly simplifying things later. |
1010 | */ | 1014 | */ |
1011 | if (!highmem && __va(bank->start) < vmalloc_min && | 1015 | if (!highmem && bank->size > size_limit) { |
1012 | bank->size > vmalloc_min - __va(bank->start)) { | ||
1013 | if (meminfo.nr_banks >= NR_BANKS) { | 1016 | if (meminfo.nr_banks >= NR_BANKS) { |
1014 | printk(KERN_CRIT "NR_BANKS too low, " | 1017 | printk(KERN_CRIT "NR_BANKS too low, " |
1015 | "ignoring high memory\n"); | 1018 | "ignoring high memory\n"); |
@@ -1018,16 +1021,14 @@ void __init sanity_check_meminfo(void) | |||
1018 | (meminfo.nr_banks - i) * sizeof(*bank)); | 1021 | (meminfo.nr_banks - i) * sizeof(*bank)); |
1019 | meminfo.nr_banks++; | 1022 | meminfo.nr_banks++; |
1020 | i++; | 1023 | i++; |
1021 | bank[1].size -= vmalloc_min - __va(bank->start); | 1024 | bank[1].size -= size_limit; |
1022 | bank[1].start = __pa(vmalloc_min - 1) + 1; | 1025 | bank[1].start = vmalloc_limit; |
1023 | bank[1].highmem = highmem = 1; | 1026 | bank[1].highmem = highmem = 1; |
1024 | j++; | 1027 | j++; |
1025 | } | 1028 | } |
1026 | bank->size = vmalloc_min - __va(bank->start); | 1029 | bank->size = size_limit; |
1027 | } | 1030 | } |
1028 | #else | 1031 | #else |
1029 | bank->highmem = highmem; | ||
1030 | |||
1031 | /* | 1032 | /* |
1032 | * Highmem banks not allowed with !CONFIG_HIGHMEM. | 1033 | * Highmem banks not allowed with !CONFIG_HIGHMEM. |
1033 | */ | 1034 | */ |
@@ -1040,36 +1041,44 @@ void __init sanity_check_meminfo(void) | |||
1040 | } | 1041 | } |
1041 | 1042 | ||
1042 | /* | 1043 | /* |
1043 | * Check whether this memory bank would entirely overlap | ||
1044 | * the vmalloc area. | ||
1045 | */ | ||
1046 | if (__va(bank->start) >= vmalloc_min || | ||
1047 | __va(bank->start) < (void *)PAGE_OFFSET) { | ||
1048 | printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " | ||
1049 | "(vmalloc region overlap).\n", | ||
1050 | (unsigned long long)bank->start, | ||
1051 | (unsigned long long)bank->start + bank->size - 1); | ||
1052 | continue; | ||
1053 | } | ||
1054 | |||
1055 | /* | ||
1056 | * Check whether this memory bank would partially overlap | 1044 | * Check whether this memory bank would partially overlap |
1057 | * the vmalloc area. | 1045 | * the vmalloc area. |
1058 | */ | 1046 | */ |
1059 | if (__va(bank->start + bank->size - 1) >= vmalloc_min || | 1047 | if (bank->size > size_limit) { |
1060 | __va(bank->start + bank->size - 1) <= __va(bank->start)) { | ||
1061 | unsigned long newsize = vmalloc_min - __va(bank->start); | ||
1062 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " | 1048 | printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " |
1063 | "to -%.8llx (vmalloc region overlap).\n", | 1049 | "to -%.8llx (vmalloc region overlap).\n", |
1064 | (unsigned long long)bank->start, | 1050 | (unsigned long long)bank->start, |
1065 | (unsigned long long)bank->start + bank->size - 1, | 1051 | (unsigned long long)bank->start + bank->size - 1, |
1066 | (unsigned long long)bank->start + newsize - 1); | 1052 | (unsigned long long)bank->start + size_limit - 1); |
1067 | bank->size = newsize; | 1053 | bank->size = size_limit; |
1068 | } | 1054 | } |
1069 | #endif | 1055 | #endif |
1070 | if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) | 1056 | if (!bank->highmem) { |
1071 | arm_lowmem_limit = bank->start + bank->size; | 1057 | phys_addr_t bank_end = bank->start + bank->size; |
1058 | |||
1059 | if (bank_end > arm_lowmem_limit) | ||
1060 | arm_lowmem_limit = bank_end; | ||
1072 | 1061 | ||
1062 | /* | ||
1063 | * Find the first non-section-aligned page, and point | ||
1064 | * memblock_limit at it. This relies on rounding the | ||
1065 | * limit down to be section-aligned, which happens at | ||
1066 | * the end of this function. | ||
1067 | * | ||
1068 | * With this algorithm, the start or end of almost any | ||
1069 | * bank can be non-section-aligned. The only exception | ||
1070 | * is that the start of the bank 0 must be section- | ||
1071 | * aligned, since otherwise memory would need to be | ||
1072 | * allocated when mapping the start of bank 0, which | ||
1073 | * occurs before any free memory is mapped. | ||
1074 | */ | ||
1075 | if (!memblock_limit) { | ||
1076 | if (!IS_ALIGNED(bank->start, SECTION_SIZE)) | ||
1077 | memblock_limit = bank->start; | ||
1078 | else if (!IS_ALIGNED(bank_end, SECTION_SIZE)) | ||
1079 | memblock_limit = bank_end; | ||
1080 | } | ||
1081 | } | ||
1073 | j++; | 1082 | j++; |
1074 | } | 1083 | } |
1075 | #ifdef CONFIG_HIGHMEM | 1084 | #ifdef CONFIG_HIGHMEM |
@@ -1094,7 +1103,18 @@ void __init sanity_check_meminfo(void) | |||
1094 | #endif | 1103 | #endif |
1095 | meminfo.nr_banks = j; | 1104 | meminfo.nr_banks = j; |
1096 | high_memory = __va(arm_lowmem_limit - 1) + 1; | 1105 | high_memory = __va(arm_lowmem_limit - 1) + 1; |
1097 | memblock_set_current_limit(arm_lowmem_limit); | 1106 | |
1107 | /* | ||
1108 | * Round the memblock limit down to a section size. This | ||
1109 | * helps to ensure that we will allocate memory from the | ||
1110 | * last full section, which should be mapped. | ||
1111 | */ | ||
1112 | if (memblock_limit) | ||
1113 | memblock_limit = round_down(memblock_limit, SECTION_SIZE); | ||
1114 | if (!memblock_limit) | ||
1115 | memblock_limit = arm_lowmem_limit; | ||
1116 | |||
1117 | memblock_set_current_limit(memblock_limit); | ||
1098 | } | 1118 | } |
1099 | 1119 | ||
1100 | static inline void prepare_page_table(void) | 1120 | static inline void prepare_page_table(void) |
@@ -1175,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
1175 | /* | 1195 | /* |
1176 | * Allocate the vector page early. | 1196 | * Allocate the vector page early. |
1177 | */ | 1197 | */ |
1178 | vectors = early_alloc(PAGE_SIZE); | 1198 | vectors = early_alloc(PAGE_SIZE * 2); |
1179 | 1199 | ||
1180 | early_trap_init(vectors); | 1200 | early_trap_init(vectors); |
1181 | 1201 | ||
@@ -1220,20 +1240,34 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
1220 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); | 1240 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); |
1221 | map.virtual = 0xffff0000; | 1241 | map.virtual = 0xffff0000; |
1222 | map.length = PAGE_SIZE; | 1242 | map.length = PAGE_SIZE; |
1243 | #ifdef CONFIG_KUSER_HELPERS | ||
1223 | map.type = MT_HIGH_VECTORS; | 1244 | map.type = MT_HIGH_VECTORS; |
1245 | #else | ||
1246 | map.type = MT_LOW_VECTORS; | ||
1247 | #endif | ||
1224 | create_mapping(&map); | 1248 | create_mapping(&map); |
1225 | 1249 | ||
1226 | if (!vectors_high()) { | 1250 | if (!vectors_high()) { |
1227 | map.virtual = 0; | 1251 | map.virtual = 0; |
1252 | map.length = PAGE_SIZE * 2; | ||
1228 | map.type = MT_LOW_VECTORS; | 1253 | map.type = MT_LOW_VECTORS; |
1229 | create_mapping(&map); | 1254 | create_mapping(&map); |
1230 | } | 1255 | } |
1231 | 1256 | ||
1257 | /* Now create a kernel read-only mapping */ | ||
1258 | map.pfn += 1; | ||
1259 | map.virtual = 0xffff0000 + PAGE_SIZE; | ||
1260 | map.length = PAGE_SIZE; | ||
1261 | map.type = MT_LOW_VECTORS; | ||
1262 | create_mapping(&map); | ||
1263 | |||
1232 | /* | 1264 | /* |
1233 | * Ask the machine support to map in the statically mapped devices. | 1265 | * Ask the machine support to map in the statically mapped devices. |
1234 | */ | 1266 | */ |
1235 | if (mdesc->map_io) | 1267 | if (mdesc->map_io) |
1236 | mdesc->map_io(); | 1268 | mdesc->map_io(); |
1269 | else | ||
1270 | debug_ll_io_init(); | ||
1237 | fill_pmd_gaps(); | 1271 | fill_pmd_gaps(); |
1238 | 1272 | ||
1239 | /* Reserve fixed i/o space in VMALLOC region */ | 1273 | /* Reserve fixed i/o space in VMALLOC region */ |
@@ -1289,8 +1323,6 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1289 | { | 1323 | { |
1290 | void *zero_page; | 1324 | void *zero_page; |
1291 | 1325 | ||
1292 | memblock_set_current_limit(arm_lowmem_limit); | ||
1293 | |||
1294 | build_mem_type_table(); | 1326 | build_mem_type_table(); |
1295 | prepare_page_table(); | 1327 | prepare_page_table(); |
1296 | map_lowmem(); | 1328 | map_lowmem(); |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index eb5293a69a84..1fa50100ab6a 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/pagemap.h> | 8 | #include <linux/pagemap.h> |
9 | #include <linux/io.h> | 9 | #include <linux/io.h> |
10 | #include <linux/memblock.h> | 10 | #include <linux/memblock.h> |
11 | #include <linux/kernel.h> | ||
11 | 12 | ||
12 | #include <asm/cacheflush.h> | 13 | #include <asm/cacheflush.h> |
13 | #include <asm/sections.h> | 14 | #include <asm/sections.h> |
@@ -15,22 +16,282 @@ | |||
15 | #include <asm/setup.h> | 16 | #include <asm/setup.h> |
16 | #include <asm/traps.h> | 17 | #include <asm/traps.h> |
17 | #include <asm/mach/arch.h> | 18 | #include <asm/mach/arch.h> |
19 | #include <asm/cputype.h> | ||
20 | #include <asm/mpu.h> | ||
18 | 21 | ||
19 | #include "mm.h" | 22 | #include "mm.h" |
20 | 23 | ||
24 | #ifdef CONFIG_ARM_MPU | ||
25 | struct mpu_rgn_info mpu_rgn_info; | ||
26 | |||
27 | /* Region number */ | ||
28 | static void rgnr_write(u32 v) | ||
29 | { | ||
30 | asm("mcr p15, 0, %0, c6, c2, 0" : : "r" (v)); | ||
31 | } | ||
32 | |||
33 | /* Data-side / unified region attributes */ | ||
34 | |||
35 | /* Region access control register */ | ||
36 | static void dracr_write(u32 v) | ||
37 | { | ||
38 | asm("mcr p15, 0, %0, c6, c1, 4" : : "r" (v)); | ||
39 | } | ||
40 | |||
41 | /* Region size register */ | ||
42 | static void drsr_write(u32 v) | ||
43 | { | ||
44 | asm("mcr p15, 0, %0, c6, c1, 2" : : "r" (v)); | ||
45 | } | ||
46 | |||
47 | /* Region base address register */ | ||
48 | static void drbar_write(u32 v) | ||
49 | { | ||
50 | asm("mcr p15, 0, %0, c6, c1, 0" : : "r" (v)); | ||
51 | } | ||
52 | |||
53 | static u32 drbar_read(void) | ||
54 | { | ||
55 | u32 v; | ||
56 | asm("mrc p15, 0, %0, c6, c1, 0" : "=r" (v)); | ||
57 | return v; | ||
58 | } | ||
59 | /* Optional instruction-side region attributes */ | ||
60 | |||
61 | /* I-side Region access control register */ | ||
62 | static void iracr_write(u32 v) | ||
63 | { | ||
64 | asm("mcr p15, 0, %0, c6, c1, 5" : : "r" (v)); | ||
65 | } | ||
66 | |||
67 | /* I-side Region size register */ | ||
68 | static void irsr_write(u32 v) | ||
69 | { | ||
70 | asm("mcr p15, 0, %0, c6, c1, 3" : : "r" (v)); | ||
71 | } | ||
72 | |||
73 | /* I-side Region base address register */ | ||
74 | static void irbar_write(u32 v) | ||
75 | { | ||
76 | asm("mcr p15, 0, %0, c6, c1, 1" : : "r" (v)); | ||
77 | } | ||
78 | |||
79 | static unsigned long irbar_read(void) | ||
80 | { | ||
81 | unsigned long v; | ||
82 | asm("mrc p15, 0, %0, c6, c1, 1" : "=r" (v)); | ||
83 | return v; | ||
84 | } | ||
85 | |||
86 | /* MPU initialisation functions */ | ||
87 | void __init sanity_check_meminfo_mpu(void) | ||
88 | { | ||
89 | int i; | ||
90 | struct membank *bank = meminfo.bank; | ||
91 | phys_addr_t phys_offset = PHYS_OFFSET; | ||
92 | phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size; | ||
93 | |||
94 | /* Initially only use memory continuous from PHYS_OFFSET */ | ||
95 | if (bank_phys_start(&bank[0]) != phys_offset) | ||
96 | panic("First memory bank must be contiguous from PHYS_OFFSET"); | ||
97 | |||
98 | /* Banks have already been sorted by start address */ | ||
99 | for (i = 1; i < meminfo.nr_banks; i++) { | ||
100 | if (bank[i].start <= bank_phys_end(&bank[0]) && | ||
101 | bank_phys_end(&bank[i]) > bank_phys_end(&bank[0])) { | ||
102 | bank[0].size = bank_phys_end(&bank[i]) - bank[0].start; | ||
103 | } else { | ||
104 | pr_notice("Ignoring RAM after 0x%.8lx. " | ||
105 | "First non-contiguous (ignored) bank start: 0x%.8lx\n", | ||
106 | (unsigned long)bank_phys_end(&bank[0]), | ||
107 | (unsigned long)bank_phys_start(&bank[i])); | ||
108 | break; | ||
109 | } | ||
110 | } | ||
111 | /* All contiguous banks are now merged in to the first bank */ | ||
112 | meminfo.nr_banks = 1; | ||
113 | specified_mem_size = bank[0].size; | ||
114 | |||
115 | /* | ||
116 | * MPU has curious alignment requirements: Size must be power of 2, and | ||
117 | * region start must be aligned to the region size | ||
118 | */ | ||
119 | if (phys_offset != 0) | ||
120 | pr_info("PHYS_OFFSET != 0 => MPU Region size constrained by alignment requirements\n"); | ||
121 | |||
122 | /* | ||
123 | * Maximum aligned region might overflow phys_addr_t if phys_offset is | ||
124 | * 0. Hence we keep everything below 4G until we take the smaller of | ||
125 | * the aligned_region_size and rounded_mem_size, one of which is | ||
126 | * guaranteed to be smaller than the maximum physical address. | ||
127 | */ | ||
128 | aligned_region_size = (phys_offset - 1) ^ (phys_offset); | ||
129 | /* Find the max power-of-two sized region that fits inside our bank */ | ||
130 | rounded_mem_size = (1 << __fls(bank[0].size)) - 1; | ||
131 | |||
132 | /* The actual region size is the smaller of the two */ | ||
133 | aligned_region_size = aligned_region_size < rounded_mem_size | ||
134 | ? aligned_region_size + 1 | ||
135 | : rounded_mem_size + 1; | ||
136 | |||
137 | if (aligned_region_size != specified_mem_size) | ||
138 | pr_warn("Truncating memory from 0x%.8lx to 0x%.8lx (MPU region constraints)", | ||
139 | (unsigned long)specified_mem_size, | ||
140 | (unsigned long)aligned_region_size); | ||
141 | |||
142 | meminfo.bank[0].size = aligned_region_size; | ||
143 | pr_debug("MPU Region from 0x%.8lx size 0x%.8lx (end 0x%.8lx))\n", | ||
144 | (unsigned long)phys_offset, | ||
145 | (unsigned long)aligned_region_size, | ||
146 | (unsigned long)bank_phys_end(&bank[0])); | ||
147 | |||
148 | } | ||
149 | |||
150 | static int mpu_present(void) | ||
151 | { | ||
152 | return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7); | ||
153 | } | ||
154 | |||
155 | static int mpu_max_regions(void) | ||
156 | { | ||
157 | /* | ||
158 | * We don't support a different number of I/D side regions so if we | ||
159 | * have separate instruction and data memory maps then return | ||
160 | * whichever side has a smaller number of supported regions. | ||
161 | */ | ||
162 | u32 dregions, iregions, mpuir; | ||
163 | mpuir = read_cpuid(CPUID_MPUIR); | ||
164 | |||
165 | dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION; | ||
166 | |||
167 | /* Check for separate d-side and i-side memory maps */ | ||
168 | if (mpuir & MPUIR_nU) | ||
169 | iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION; | ||
170 | |||
171 | /* Use the smallest of the two maxima */ | ||
172 | return min(dregions, iregions); | ||
173 | } | ||
174 | |||
175 | static int mpu_iside_independent(void) | ||
176 | { | ||
177 | /* MPUIR.nU specifies whether there is *not* a unified memory map */ | ||
178 | return read_cpuid(CPUID_MPUIR) & MPUIR_nU; | ||
179 | } | ||
180 | |||
181 | static int mpu_min_region_order(void) | ||
182 | { | ||
183 | u32 drbar_result, irbar_result; | ||
184 | /* We've kept a region free for this probing */ | ||
185 | rgnr_write(MPU_PROBE_REGION); | ||
186 | isb(); | ||
187 | /* | ||
188 | * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum | ||
189 | * region order | ||
190 | */ | ||
191 | drbar_write(0xFFFFFFFC); | ||
192 | drbar_result = irbar_result = drbar_read(); | ||
193 | drbar_write(0x0); | ||
194 | /* If the MPU is non-unified, we use the larger of the two minima*/ | ||
195 | if (mpu_iside_independent()) { | ||
196 | irbar_write(0xFFFFFFFC); | ||
197 | irbar_result = irbar_read(); | ||
198 | irbar_write(0x0); | ||
199 | } | ||
200 | isb(); /* Ensure that MPU region operations have completed */ | ||
201 | /* Return whichever result is larger */ | ||
202 | return __ffs(max(drbar_result, irbar_result)); | ||
203 | } | ||
204 | |||
205 | static int mpu_setup_region(unsigned int number, phys_addr_t start, | ||
206 | unsigned int size_order, unsigned int properties) | ||
207 | { | ||
208 | u32 size_data; | ||
209 | |||
210 | /* We kept a region free for probing resolution of MPU regions*/ | ||
211 | if (number > mpu_max_regions() || number == MPU_PROBE_REGION) | ||
212 | return -ENOENT; | ||
213 | |||
214 | if (size_order > 32) | ||
215 | return -ENOMEM; | ||
216 | |||
217 | if (size_order < mpu_min_region_order()) | ||
218 | return -ENOMEM; | ||
219 | |||
220 | /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */ | ||
221 | size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN; | ||
222 | |||
223 | dsb(); /* Ensure all previous data accesses occur with old mappings */ | ||
224 | rgnr_write(number); | ||
225 | isb(); | ||
226 | drbar_write(start); | ||
227 | dracr_write(properties); | ||
228 | isb(); /* Propagate properties before enabling region */ | ||
229 | drsr_write(size_data); | ||
230 | |||
231 | /* Check for independent I-side registers */ | ||
232 | if (mpu_iside_independent()) { | ||
233 | irbar_write(start); | ||
234 | iracr_write(properties); | ||
235 | isb(); | ||
236 | irsr_write(size_data); | ||
237 | } | ||
238 | isb(); | ||
239 | |||
240 | /* Store region info (we treat i/d side the same, so only store d) */ | ||
241 | mpu_rgn_info.rgns[number].dracr = properties; | ||
242 | mpu_rgn_info.rgns[number].drbar = start; | ||
243 | mpu_rgn_info.rgns[number].drsr = size_data; | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Set up default MPU regions, doing nothing if there is no MPU | ||
249 | */ | ||
250 | void __init mpu_setup(void) | ||
251 | { | ||
252 | int region_err; | ||
253 | if (!mpu_present()) | ||
254 | return; | ||
255 | |||
256 | region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET, | ||
257 | ilog2(meminfo.bank[0].size), | ||
258 | MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL); | ||
259 | if (region_err) { | ||
260 | panic("MPU region initialization failure! %d", region_err); | ||
261 | } else { | ||
262 | pr_info("Using ARMv7 PMSA Compliant MPU. " | ||
263 | "Region independence: %s, Max regions: %d\n", | ||
264 | mpu_iside_independent() ? "Yes" : "No", | ||
265 | mpu_max_regions()); | ||
266 | } | ||
267 | } | ||
268 | #else | ||
269 | static void sanity_check_meminfo_mpu(void) {} | ||
270 | static void __init mpu_setup(void) {} | ||
271 | #endif /* CONFIG_ARM_MPU */ | ||
272 | |||
21 | void __init arm_mm_memblock_reserve(void) | 273 | void __init arm_mm_memblock_reserve(void) |
22 | { | 274 | { |
275 | #ifndef CONFIG_CPU_V7M | ||
23 | /* | 276 | /* |
24 | * Register the exception vector page. | 277 | * Register the exception vector page. |
25 | * some architectures which the DRAM is the exception vector to trap, | 278 | * some architectures which the DRAM is the exception vector to trap, |
26 | * alloc_page breaks with error, although it is not NULL, but "0." | 279 | * alloc_page breaks with error, although it is not NULL, but "0." |
27 | */ | 280 | */ |
28 | memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); | 281 | memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); |
282 | #else /* ifndef CONFIG_CPU_V7M */ | ||
283 | /* | ||
284 | * There is no dedicated vector page on V7-M. So nothing needs to be | ||
285 | * reserved here. | ||
286 | */ | ||
287 | #endif | ||
29 | } | 288 | } |
30 | 289 | ||
31 | void __init sanity_check_meminfo(void) | 290 | void __init sanity_check_meminfo(void) |
32 | { | 291 | { |
33 | phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); | 292 | phys_addr_t end; |
293 | sanity_check_meminfo_mpu(); | ||
294 | end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); | ||
34 | high_memory = __va(end - 1) + 1; | 295 | high_memory = __va(end - 1) + 1; |
35 | } | 296 | } |
36 | 297 | ||
@@ -41,6 +302,7 @@ void __init sanity_check_meminfo(void) | |||
41 | void __init paging_init(struct machine_desc *mdesc) | 302 | void __init paging_init(struct machine_desc *mdesc) |
42 | { | 303 | { |
43 | early_trap_init((void *)CONFIG_VECTORS_BASE); | 304 | early_trap_init((void *)CONFIG_VECTORS_BASE); |
305 | mpu_setup(); | ||
44 | bootmem_init(); | 306 | bootmem_init(); |
45 | } | 307 | } |
46 | 308 | ||
@@ -87,16 +349,16 @@ void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, | |||
87 | return __arm_ioremap_pfn(pfn, offset, size, mtype); | 349 | return __arm_ioremap_pfn(pfn, offset, size, mtype); |
88 | } | 350 | } |
89 | 351 | ||
90 | void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | 352 | void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size, |
91 | unsigned int mtype) | 353 | unsigned int mtype) |
92 | { | 354 | { |
93 | return (void __iomem *)phys_addr; | 355 | return (void __iomem *)phys_addr; |
94 | } | 356 | } |
95 | EXPORT_SYMBOL(__arm_ioremap); | 357 | EXPORT_SYMBOL(__arm_ioremap); |
96 | 358 | ||
97 | void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *); | 359 | void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); |
98 | 360 | ||
99 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, | 361 | void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, |
100 | unsigned int mtype, void *caller) | 362 | unsigned int mtype, void *caller) |
101 | { | 363 | { |
102 | return __arm_ioremap(phys_addr, size, mtype); | 364 | return __arm_ioremap(phys_addr, size, mtype); |
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 2bb61e703d6c..d1a2d05971e0 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S | |||
@@ -443,8 +443,6 @@ ENTRY(cpu_arm1020_set_pte_ext) | |||
443 | #endif /* CONFIG_MMU */ | 443 | #endif /* CONFIG_MMU */ |
444 | mov pc, lr | 444 | mov pc, lr |
445 | 445 | ||
446 | __CPUINIT | ||
447 | |||
448 | .type __arm1020_setup, #function | 446 | .type __arm1020_setup, #function |
449 | __arm1020_setup: | 447 | __arm1020_setup: |
450 | mov r0, #0 | 448 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 8f96aa40f510..9d89405c3d03 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S | |||
@@ -425,8 +425,6 @@ ENTRY(cpu_arm1020e_set_pte_ext) | |||
425 | #endif /* CONFIG_MMU */ | 425 | #endif /* CONFIG_MMU */ |
426 | mov pc, lr | 426 | mov pc, lr |
427 | 427 | ||
428 | __CPUINIT | ||
429 | |||
430 | .type __arm1020e_setup, #function | 428 | .type __arm1020e_setup, #function |
431 | __arm1020e_setup: | 429 | __arm1020e_setup: |
432 | mov r0, #0 | 430 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 8ebe4a469a22..6f01a0ae3b30 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S | |||
@@ -407,8 +407,6 @@ ENTRY(cpu_arm1022_set_pte_ext) | |||
407 | #endif /* CONFIG_MMU */ | 407 | #endif /* CONFIG_MMU */ |
408 | mov pc, lr | 408 | mov pc, lr |
409 | 409 | ||
410 | __CPUINIT | ||
411 | |||
412 | .type __arm1022_setup, #function | 410 | .type __arm1022_setup, #function |
413 | __arm1022_setup: | 411 | __arm1022_setup: |
414 | mov r0, #0 | 412 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 093fc7e520c3..4799a24b43e6 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S | |||
@@ -396,9 +396,6 @@ ENTRY(cpu_arm1026_set_pte_ext) | |||
396 | #endif /* CONFIG_MMU */ | 396 | #endif /* CONFIG_MMU */ |
397 | mov pc, lr | 397 | mov pc, lr |
398 | 398 | ||
399 | |||
400 | __CPUINIT | ||
401 | |||
402 | .type __arm1026_setup, #function | 399 | .type __arm1026_setup, #function |
403 | __arm1026_setup: | 400 | __arm1026_setup: |
404 | mov r0, #0 | 401 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 0ac908c7ade1..d42c37f9f5bc 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S | |||
@@ -116,8 +116,6 @@ ENTRY(cpu_arm720_reset) | |||
116 | ENDPROC(cpu_arm720_reset) | 116 | ENDPROC(cpu_arm720_reset) |
117 | .popsection | 117 | .popsection |
118 | 118 | ||
119 | __CPUINIT | ||
120 | |||
121 | .type __arm710_setup, #function | 119 | .type __arm710_setup, #function |
122 | __arm710_setup: | 120 | __arm710_setup: |
123 | mov r0, #0 | 121 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index fde2d2a794cf..9b0ae90cbf17 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S | |||
@@ -60,8 +60,6 @@ ENTRY(cpu_arm740_reset) | |||
60 | ENDPROC(cpu_arm740_reset) | 60 | ENDPROC(cpu_arm740_reset) |
61 | .popsection | 61 | .popsection |
62 | 62 | ||
63 | __CPUINIT | ||
64 | |||
65 | .type __arm740_setup, #function | 63 | .type __arm740_setup, #function |
66 | __arm740_setup: | 64 | __arm740_setup: |
67 | mov r0, #0 | 65 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 6ddea3e464bd..f6cc3f63ce39 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S | |||
@@ -51,8 +51,6 @@ ENTRY(cpu_arm7tdmi_reset) | |||
51 | ENDPROC(cpu_arm7tdmi_reset) | 51 | ENDPROC(cpu_arm7tdmi_reset) |
52 | .popsection | 52 | .popsection |
53 | 53 | ||
54 | __CPUINIT | ||
55 | |||
56 | .type __arm7tdmi_setup, #function | 54 | .type __arm7tdmi_setup, #function |
57 | __arm7tdmi_setup: | 55 | __arm7tdmi_setup: |
58 | mov pc, lr | 56 | mov pc, lr |
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 2556cf1c2da1..549557df6d57 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S | |||
@@ -410,8 +410,6 @@ ENTRY(cpu_arm920_do_resume) | |||
410 | ENDPROC(cpu_arm920_do_resume) | 410 | ENDPROC(cpu_arm920_do_resume) |
411 | #endif | 411 | #endif |
412 | 412 | ||
413 | __CPUINIT | ||
414 | |||
415 | .type __arm920_setup, #function | 413 | .type __arm920_setup, #function |
416 | __arm920_setup: | 414 | __arm920_setup: |
417 | mov r0, #0 | 415 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 4464c49d7449..2a758b06c6f6 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S | |||
@@ -388,8 +388,6 @@ ENTRY(cpu_arm922_set_pte_ext) | |||
388 | #endif /* CONFIG_MMU */ | 388 | #endif /* CONFIG_MMU */ |
389 | mov pc, lr | 389 | mov pc, lr |
390 | 390 | ||
391 | __CPUINIT | ||
392 | |||
393 | .type __arm922_setup, #function | 391 | .type __arm922_setup, #function |
394 | __arm922_setup: | 392 | __arm922_setup: |
395 | mov r0, #0 | 393 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 281eb9b9c1d6..97448c3acf38 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S | |||
@@ -438,8 +438,6 @@ ENTRY(cpu_arm925_set_pte_ext) | |||
438 | #endif /* CONFIG_MMU */ | 438 | #endif /* CONFIG_MMU */ |
439 | mov pc, lr | 439 | mov pc, lr |
440 | 440 | ||
441 | __CPUINIT | ||
442 | |||
443 | .type __arm925_setup, #function | 441 | .type __arm925_setup, #function |
444 | __arm925_setup: | 442 | __arm925_setup: |
445 | mov r0, #0 | 443 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 344c8a548cc0..0f098f407c9f 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S | |||
@@ -425,8 +425,6 @@ ENTRY(cpu_arm926_do_resume) | |||
425 | ENDPROC(cpu_arm926_do_resume) | 425 | ENDPROC(cpu_arm926_do_resume) |
426 | #endif | 426 | #endif |
427 | 427 | ||
428 | __CPUINIT | ||
429 | |||
430 | .type __arm926_setup, #function | 428 | .type __arm926_setup, #function |
431 | __arm926_setup: | 429 | __arm926_setup: |
432 | mov r0, #0 | 430 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 8da189d4a402..1c39a704ff6e 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S | |||
@@ -273,8 +273,6 @@ ENDPROC(arm940_dma_unmap_area) | |||
273 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | 273 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
274 | define_cache_functions arm940 | 274 | define_cache_functions arm940 |
275 | 275 | ||
276 | __CPUINIT | ||
277 | |||
278 | .type __arm940_setup, #function | 276 | .type __arm940_setup, #function |
279 | __arm940_setup: | 277 | __arm940_setup: |
280 | mov r0, #0 | 278 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index f666cf34075a..0289cd905e73 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S | |||
@@ -326,8 +326,6 @@ ENTRY(cpu_arm946_dcache_clean_area) | |||
326 | mcr p15, 0, r0, c7, c10, 4 @ drain WB | 326 | mcr p15, 0, r0, c7, c10, 4 @ drain WB |
327 | mov pc, lr | 327 | mov pc, lr |
328 | 328 | ||
329 | __CPUINIT | ||
330 | |||
331 | .type __arm946_setup, #function | 329 | .type __arm946_setup, #function |
332 | __arm946_setup: | 330 | __arm946_setup: |
333 | mov r0, #0 | 331 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index 8881391dfb9e..f51197ba754a 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S | |||
@@ -51,8 +51,6 @@ ENTRY(cpu_arm9tdmi_reset) | |||
51 | ENDPROC(cpu_arm9tdmi_reset) | 51 | ENDPROC(cpu_arm9tdmi_reset) |
52 | .popsection | 52 | .popsection |
53 | 53 | ||
54 | __CPUINIT | ||
55 | |||
56 | .type __arm9tdmi_setup, #function | 54 | .type __arm9tdmi_setup, #function |
57 | __arm9tdmi_setup: | 55 | __arm9tdmi_setup: |
58 | mov pc, lr | 56 | mov pc, lr |
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index aaeb6c127c7a..2dfc0f1d3bfd 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S | |||
@@ -135,8 +135,6 @@ ENTRY(cpu_fa526_set_pte_ext) | |||
135 | #endif | 135 | #endif |
136 | mov pc, lr | 136 | mov pc, lr |
137 | 137 | ||
138 | __CPUINIT | ||
139 | |||
140 | .type __fa526_setup, #function | 138 | .type __fa526_setup, #function |
141 | __fa526_setup: | 139 | __fa526_setup: |
142 | /* On return of this routine, r0 must carry correct flags for CFG register */ | 140 | /* On return of this routine, r0 must carry correct flags for CFG register */ |
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 4106b09e0c29..d5146b98c8d1 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S | |||
@@ -514,8 +514,6 @@ ENTRY(cpu_feroceon_set_pte_ext) | |||
514 | #endif | 514 | #endif |
515 | mov pc, lr | 515 | mov pc, lr |
516 | 516 | ||
517 | __CPUINIT | ||
518 | |||
519 | .type __feroceon_setup, #function | 517 | .type __feroceon_setup, #function |
520 | __feroceon_setup: | 518 | __feroceon_setup: |
521 | mov r0, #0 | 519 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 0b60dd3d742a..40acba595731 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S | |||
@@ -383,8 +383,6 @@ ENTRY(cpu_mohawk_do_resume) | |||
383 | ENDPROC(cpu_mohawk_do_resume) | 383 | ENDPROC(cpu_mohawk_do_resume) |
384 | #endif | 384 | #endif |
385 | 385 | ||
386 | __CPUINIT | ||
387 | |||
388 | .type __mohawk_setup, #function | 386 | .type __mohawk_setup, #function |
389 | __mohawk_setup: | 387 | __mohawk_setup: |
390 | mov r0, #0 | 388 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 775d70fba937..c45319c8f1d9 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S | |||
@@ -159,8 +159,6 @@ ENTRY(cpu_sa110_set_pte_ext) | |||
159 | #endif | 159 | #endif |
160 | mov pc, lr | 160 | mov pc, lr |
161 | 161 | ||
162 | __CPUINIT | ||
163 | |||
164 | .type __sa110_setup, #function | 162 | .type __sa110_setup, #function |
165 | __sa110_setup: | 163 | __sa110_setup: |
166 | mov r10, #0 | 164 | mov r10, #0 |
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index d92dfd081429..09d241ae2dbe 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S | |||
@@ -198,8 +198,6 @@ ENTRY(cpu_sa1100_do_resume) | |||
198 | ENDPROC(cpu_sa1100_do_resume) | 198 | ENDPROC(cpu_sa1100_do_resume) |
199 | #endif | 199 | #endif |
200 | 200 | ||
201 | __CPUINIT | ||
202 | |||
203 | .type __sa1100_setup, #function | 201 | .type __sa1100_setup, #function |
204 | __sa1100_setup: | 202 | __sa1100_setup: |
205 | mov r0, #0 | 203 | mov r0, #0 |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 919405e20b80..1128064fddcb 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -140,8 +140,10 @@ ENTRY(cpu_v6_set_pte_ext) | |||
140 | ENTRY(cpu_v6_do_suspend) | 140 | ENTRY(cpu_v6_do_suspend) |
141 | stmfd sp!, {r4 - r9, lr} | 141 | stmfd sp!, {r4 - r9, lr} |
142 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 142 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
143 | #ifdef CONFIG_MMU | ||
143 | mrc p15, 0, r5, c3, c0, 0 @ Domain ID | 144 | mrc p15, 0, r5, c3, c0, 0 @ Domain ID |
144 | mrc p15, 0, r6, c2, c0, 1 @ Translation table base 1 | 145 | mrc p15, 0, r6, c2, c0, 1 @ Translation table base 1 |
146 | #endif | ||
145 | mrc p15, 0, r7, c1, c0, 1 @ auxiliary control register | 147 | mrc p15, 0, r7, c1, c0, 1 @ auxiliary control register |
146 | mrc p15, 0, r8, c1, c0, 2 @ co-processor access control | 148 | mrc p15, 0, r8, c1, c0, 2 @ co-processor access control |
147 | mrc p15, 0, r9, c1, c0, 0 @ control register | 149 | mrc p15, 0, r9, c1, c0, 0 @ control register |
@@ -158,14 +160,16 @@ ENTRY(cpu_v6_do_resume) | |||
158 | mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID | 160 | mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID |
159 | ldmia r0, {r4 - r9} | 161 | ldmia r0, {r4 - r9} |
160 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID | 162 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID |
163 | #ifdef CONFIG_MMU | ||
161 | mcr p15, 0, r5, c3, c0, 0 @ Domain ID | 164 | mcr p15, 0, r5, c3, c0, 0 @ Domain ID |
162 | ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) | 165 | ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) |
163 | ALT_UP(orr r1, r1, #TTB_FLAGS_UP) | 166 | ALT_UP(orr r1, r1, #TTB_FLAGS_UP) |
164 | mcr p15, 0, r1, c2, c0, 0 @ Translation table base 0 | 167 | mcr p15, 0, r1, c2, c0, 0 @ Translation table base 0 |
165 | mcr p15, 0, r6, c2, c0, 1 @ Translation table base 1 | 168 | mcr p15, 0, r6, c2, c0, 1 @ Translation table base 1 |
169 | mcr p15, 0, ip, c2, c0, 2 @ TTB control register | ||
170 | #endif | ||
166 | mcr p15, 0, r7, c1, c0, 1 @ auxiliary control register | 171 | mcr p15, 0, r7, c1, c0, 1 @ auxiliary control register |
167 | mcr p15, 0, r8, c1, c0, 2 @ co-processor access control | 172 | mcr p15, 0, r8, c1, c0, 2 @ co-processor access control |
168 | mcr p15, 0, ip, c2, c0, 2 @ TTB control register | ||
169 | mcr p15, 0, ip, c7, c5, 4 @ ISB | 173 | mcr p15, 0, ip, c7, c5, 4 @ ISB |
170 | mov r0, r9 @ control register | 174 | mov r0, r9 @ control register |
171 | b cpu_resume_mmu | 175 | b cpu_resume_mmu |
@@ -176,8 +180,6 @@ ENDPROC(cpu_v6_do_resume) | |||
176 | 180 | ||
177 | .align | 181 | .align |
178 | 182 | ||
179 | __CPUINIT | ||
180 | |||
181 | /* | 183 | /* |
182 | * __v6_setup | 184 | * __v6_setup |
183 | * | 185 | * |
diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index 9704097c450e..bdd3be4be77a 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S | |||
@@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext) | |||
110 | ARM( str r3, [r0, #2048]! ) | 110 | ARM( str r3, [r0, #2048]! ) |
111 | THUMB( add r0, r0, #2048 ) | 111 | THUMB( add r0, r0, #2048 ) |
112 | THUMB( str r3, [r0] ) | 112 | THUMB( str r3, [r0] ) |
113 | ALT_SMP(mov pc,lr) | 113 | ALT_SMP(W(nop)) |
114 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte | 114 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte |
115 | #endif | 115 | #endif |
116 | mov pc, lr | 116 | mov pc, lr |
@@ -160,8 +160,6 @@ ENDPROC(cpu_v7_set_pte_ext) | |||
160 | mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1 | 160 | mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1 |
161 | .endm | 161 | .endm |
162 | 162 | ||
163 | __CPUINIT | ||
164 | |||
165 | /* AT | 163 | /* AT |
166 | * TFR EV X F I D LR S | 164 | * TFR EV X F I D LR S |
167 | * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM | 165 | * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM |
@@ -172,5 +170,3 @@ ENDPROC(cpu_v7_set_pte_ext) | |||
172 | .type v7_crval, #object | 170 | .type v7_crval, #object |
173 | v7_crval: | 171 | v7_crval: |
174 | crval clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c | 172 | crval clear=0x2120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c |
175 | |||
176 | .previous | ||
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 363027e811d6..01a719e18bb0 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
@@ -39,6 +39,14 @@ | |||
39 | #define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA) | 39 | #define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA) |
40 | #define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S) | 40 | #define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S) |
41 | 41 | ||
42 | #ifndef __ARMEB__ | ||
43 | # define rpgdl r0 | ||
44 | # define rpgdh r1 | ||
45 | #else | ||
46 | # define rpgdl r1 | ||
47 | # define rpgdh r0 | ||
48 | #endif | ||
49 | |||
42 | /* | 50 | /* |
43 | * cpu_v7_switch_mm(pgd_phys, tsk) | 51 | * cpu_v7_switch_mm(pgd_phys, tsk) |
44 | * | 52 | * |
@@ -47,10 +55,10 @@ | |||
47 | */ | 55 | */ |
48 | ENTRY(cpu_v7_switch_mm) | 56 | ENTRY(cpu_v7_switch_mm) |
49 | #ifdef CONFIG_MMU | 57 | #ifdef CONFIG_MMU |
50 | mmid r1, r1 @ get mm->context.id | 58 | mmid r2, r2 |
51 | asid r3, r1 | 59 | asid r2, r2 |
52 | mov r3, r3, lsl #(48 - 32) @ ASID | 60 | orr rpgdh, rpgdh, r2, lsl #(48 - 32) @ upper 32-bits of pgd |
53 | mcrr p15, 0, r0, r3, c2 @ set TTB 0 | 61 | mcrr p15, 0, rpgdl, rpgdh, c2 @ set TTB 0 |
54 | isb | 62 | isb |
55 | #endif | 63 | #endif |
56 | mov pc, lr | 64 | mov pc, lr |
@@ -73,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext) | |||
73 | tst r3, #1 << (55 - 32) @ L_PTE_DIRTY | 81 | tst r3, #1 << (55 - 32) @ L_PTE_DIRTY |
74 | orreq r2, #L_PTE_RDONLY | 82 | orreq r2, #L_PTE_RDONLY |
75 | 1: strd r2, r3, [r0] | 83 | 1: strd r2, r3, [r0] |
76 | ALT_SMP(mov pc, lr) | 84 | ALT_SMP(W(nop)) |
77 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte | 85 | ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte |
78 | #endif | 86 | #endif |
79 | mov pc, lr | 87 | mov pc, lr |
@@ -106,7 +114,8 @@ ENDPROC(cpu_v7_set_pte_ext) | |||
106 | */ | 114 | */ |
107 | .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp | 115 | .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp |
108 | ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address | 116 | ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address |
109 | cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? (branch below) | 117 | mov \tmp, \tmp, lsr #ARCH_PGD_SHIFT |
118 | cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? | ||
110 | mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register | 119 | mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register |
111 | orr \tmp, \tmp, #TTB_EAE | 120 | orr \tmp, \tmp, #TTB_EAE |
112 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) | 121 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) |
@@ -114,31 +123,23 @@ ENDPROC(cpu_v7_set_pte_ext) | |||
114 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) | 123 | ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) |
115 | ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16) | 124 | ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16) |
116 | /* | 125 | /* |
117 | * TTBR0/TTBR1 split (PAGE_OFFSET): | 126 | * Only use split TTBRs if PHYS_OFFSET <= PAGE_OFFSET (cmp above), |
118 | * 0x40000000: T0SZ = 2, T1SZ = 0 (not used) | 127 | * otherwise booting secondary CPUs would end up using TTBR1 for the |
119 | * 0x80000000: T0SZ = 0, T1SZ = 1 | 128 | * identity mapping set up in TTBR0. |
120 | * 0xc0000000: T0SZ = 0, T1SZ = 2 | ||
121 | * | ||
122 | * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise | ||
123 | * booting secondary CPUs would end up using TTBR1 for the identity | ||
124 | * mapping set up in TTBR0. | ||
125 | */ | 129 | */ |
126 | bhi 9001f @ PHYS_OFFSET > PAGE_OFFSET? | 130 | orrls \tmp, \tmp, #TTBR1_SIZE @ TTBCR.T1SZ |
127 | orr \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ | 131 | mcr p15, 0, \tmp, c2, c0, 2 @ TTBCR |
128 | #if defined CONFIG_VMSPLIT_2G | 132 | mov \tmp, \ttbr1, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits |
129 | /* PAGE_OFFSET == 0x80000000, T1SZ == 1 */ | 133 | mov \ttbr1, \ttbr1, lsl #ARCH_PGD_SHIFT @ lower bits |
130 | add \ttbr1, \ttbr1, #1 << 4 @ skip two L1 entries | 134 | addls \ttbr1, \ttbr1, #TTBR1_OFFSET |
131 | #elif defined CONFIG_VMSPLIT_3G | 135 | mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 |
132 | /* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */ | 136 | mov \tmp, \ttbr0, lsr #(32 - ARCH_PGD_SHIFT) @ upper bits |
133 | add \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd | 137 | mov \ttbr0, \ttbr0, lsl #ARCH_PGD_SHIFT @ lower bits |
134 | #endif | 138 | mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0 |
135 | /* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */ | 139 | mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 |
136 | 9001: mcr p15, 0, \tmp, c2, c0, 2 @ TTB control register | 140 | mcrr p15, 0, \ttbr0, \zero, c2 @ load TTBR0 |
137 | mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 | ||
138 | .endm | 141 | .endm |
139 | 142 | ||
140 | __CPUINIT | ||
141 | |||
142 | /* | 143 | /* |
143 | * AT | 144 | * AT |
144 | * TFR EV X F IHD LR S | 145 | * TFR EV X F IHD LR S |
@@ -150,5 +151,3 @@ ENDPROC(cpu_v7_set_pte_ext) | |||
150 | .type v7_crval, #object | 151 | .type v7_crval, #object |
151 | v7_crval: | 152 | v7_crval: |
152 | crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c | 153 | crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c |
153 | |||
154 | .previous | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index e35fec34453e..73398bcf9bd8 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -75,13 +75,14 @@ ENTRY(cpu_v7_do_idle) | |||
75 | ENDPROC(cpu_v7_do_idle) | 75 | ENDPROC(cpu_v7_do_idle) |
76 | 76 | ||
77 | ENTRY(cpu_v7_dcache_clean_area) | 77 | ENTRY(cpu_v7_dcache_clean_area) |
78 | ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW | 78 | ALT_SMP(W(nop)) @ MP extensions imply L1 PTW |
79 | ALT_UP(W(nop)) | 79 | ALT_UP_B(1f) |
80 | dcache_line_size r2, r3 | 80 | mov pc, lr |
81 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 81 | 1: dcache_line_size r2, r3 |
82 | 2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | ||
82 | add r0, r0, r2 | 83 | add r0, r0, r2 |
83 | subs r1, r1, r2 | 84 | subs r1, r1, r2 |
84 | bhi 1b | 85 | bhi 2b |
85 | dsb | 86 | dsb |
86 | mov pc, lr | 87 | mov pc, lr |
87 | ENDPROC(cpu_v7_dcache_clean_area) | 88 | ENDPROC(cpu_v7_dcache_clean_area) |
@@ -98,9 +99,11 @@ ENTRY(cpu_v7_do_suspend) | |||
98 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 99 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
99 | mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID | 100 | mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID |
100 | stmia r0!, {r4 - r5} | 101 | stmia r0!, {r4 - r5} |
102 | #ifdef CONFIG_MMU | ||
101 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID | 103 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID |
102 | mrc p15, 0, r7, c2, c0, 1 @ TTB 1 | 104 | mrc p15, 0, r7, c2, c0, 1 @ TTB 1 |
103 | mrc p15, 0, r11, c2, c0, 2 @ TTB control register | 105 | mrc p15, 0, r11, c2, c0, 2 @ TTB control register |
106 | #endif | ||
104 | mrc p15, 0, r8, c1, c0, 0 @ Control register | 107 | mrc p15, 0, r8, c1, c0, 0 @ Control register |
105 | mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register | 108 | mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register |
106 | mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control | 109 | mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control |
@@ -110,13 +113,14 @@ ENDPROC(cpu_v7_do_suspend) | |||
110 | 113 | ||
111 | ENTRY(cpu_v7_do_resume) | 114 | ENTRY(cpu_v7_do_resume) |
112 | mov ip, #0 | 115 | mov ip, #0 |
113 | mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs | ||
114 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache | 116 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache |
115 | mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID | 117 | mcr p15, 0, ip, c13, c0, 1 @ set reserved context ID |
116 | ldmia r0!, {r4 - r5} | 118 | ldmia r0!, {r4 - r5} |
117 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID | 119 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID |
118 | mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID | 120 | mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID |
119 | ldmia r0, {r6 - r11} | 121 | ldmia r0, {r6 - r11} |
122 | #ifdef CONFIG_MMU | ||
123 | mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs | ||
120 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID | 124 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID |
121 | #ifndef CONFIG_ARM_LPAE | 125 | #ifndef CONFIG_ARM_LPAE |
122 | ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) | 126 | ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) |
@@ -125,14 +129,15 @@ ENTRY(cpu_v7_do_resume) | |||
125 | mcr p15, 0, r1, c2, c0, 0 @ TTB 0 | 129 | mcr p15, 0, r1, c2, c0, 0 @ TTB 0 |
126 | mcr p15, 0, r7, c2, c0, 1 @ TTB 1 | 130 | mcr p15, 0, r7, c2, c0, 1 @ TTB 1 |
127 | mcr p15, 0, r11, c2, c0, 2 @ TTB control register | 131 | mcr p15, 0, r11, c2, c0, 2 @ TTB control register |
128 | mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register | ||
129 | teq r4, r9 @ Is it already set? | ||
130 | mcrne p15, 0, r9, c1, c0, 1 @ No, so write it | ||
131 | mcr p15, 0, r10, c1, c0, 2 @ Co-processor access control | ||
132 | ldr r4, =PRRR @ PRRR | 132 | ldr r4, =PRRR @ PRRR |
133 | ldr r5, =NMRR @ NMRR | 133 | ldr r5, =NMRR @ NMRR |
134 | mcr p15, 0, r4, c10, c2, 0 @ write PRRR | 134 | mcr p15, 0, r4, c10, c2, 0 @ write PRRR |
135 | mcr p15, 0, r5, c10, c2, 1 @ write NMRR | 135 | mcr p15, 0, r5, c10, c2, 1 @ write NMRR |
136 | #endif /* CONFIG_MMU */ | ||
137 | mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register | ||
138 | teq r4, r9 @ Is it already set? | ||
139 | mcrne p15, 0, r9, c1, c0, 1 @ No, so write it | ||
140 | mcr p15, 0, r10, c1, c0, 2 @ Co-processor access control | ||
136 | isb | 141 | isb |
137 | dsb | 142 | dsb |
138 | mov r0, r8 @ control register | 143 | mov r0, r8 @ control register |
@@ -163,8 +168,6 @@ ENDPROC(cpu_pj4b_do_idle) | |||
163 | 168 | ||
164 | #endif | 169 | #endif |
165 | 170 | ||
166 | __CPUINIT | ||
167 | |||
168 | /* | 171 | /* |
169 | * __v7_setup | 172 | * __v7_setup |
170 | * | 173 | * |
@@ -178,7 +181,8 @@ ENDPROC(cpu_pj4b_do_idle) | |||
178 | */ | 181 | */ |
179 | __v7_ca5mp_setup: | 182 | __v7_ca5mp_setup: |
180 | __v7_ca9mp_setup: | 183 | __v7_ca9mp_setup: |
181 | mov r10, #(1 << 0) @ TLB ops broadcasting | 184 | __v7_cr7mp_setup: |
185 | mov r10, #(1 << 0) @ Cache/TLB ops broadcasting | ||
182 | b 1f | 186 | b 1f |
183 | __v7_ca7mp_setup: | 187 | __v7_ca7mp_setup: |
184 | __v7_ca15mp_setup: | 188 | __v7_ca15mp_setup: |
@@ -443,6 +447,16 @@ __v7_pj4b_proc_info: | |||
443 | #endif | 447 | #endif |
444 | 448 | ||
445 | /* | 449 | /* |
450 | * ARM Ltd. Cortex R7 processor. | ||
451 | */ | ||
452 | .type __v7_cr7mp_proc_info, #object | ||
453 | __v7_cr7mp_proc_info: | ||
454 | .long 0x410fc170 | ||
455 | .long 0xff0ffff0 | ||
456 | __v7_proc __v7_cr7mp_setup | ||
457 | .size __v7_cr7mp_proc_info, . - __v7_cr7mp_proc_info | ||
458 | |||
459 | /* | ||
446 | * ARM Ltd. Cortex A7 processor. | 460 | * ARM Ltd. Cortex A7 processor. |
447 | */ | 461 | */ |
448 | .type __v7_ca7mp_proc_info, #object | 462 | .type __v7_ca7mp_proc_info, #object |
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S new file mode 100644 index 000000000000..0c93588fcb91 --- /dev/null +++ b/arch/arm/mm/proc-v7m.S | |||
@@ -0,0 +1,157 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mm/proc-v7m.S | ||
3 | * | ||
4 | * Copyright (C) 2008 ARM Ltd. | ||
5 | * Copyright (C) 2001 Deep Blue Solutions Ltd. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This is the "shell" of the ARMv7-M processor support. | ||
12 | */ | ||
13 | #include <linux/linkage.h> | ||
14 | #include <asm/assembler.h> | ||
15 | #include <asm/v7m.h> | ||
16 | #include "proc-macros.S" | ||
17 | |||
18 | ENTRY(cpu_v7m_proc_init) | ||
19 | mov pc, lr | ||
20 | ENDPROC(cpu_v7m_proc_init) | ||
21 | |||
22 | ENTRY(cpu_v7m_proc_fin) | ||
23 | mov pc, lr | ||
24 | ENDPROC(cpu_v7m_proc_fin) | ||
25 | |||
26 | /* | ||
27 | * cpu_v7m_reset(loc) | ||
28 | * | ||
29 | * Perform a soft reset of the system. Put the CPU into the | ||
30 | * same state as it would be if it had been reset, and branch | ||
31 | * to what would be the reset vector. | ||
32 | * | ||
33 | * - loc - location to jump to for soft reset | ||
34 | */ | ||
35 | .align 5 | ||
36 | ENTRY(cpu_v7m_reset) | ||
37 | mov pc, r0 | ||
38 | ENDPROC(cpu_v7m_reset) | ||
39 | |||
40 | /* | ||
41 | * cpu_v7m_do_idle() | ||
42 | * | ||
43 | * Idle the processor (eg, wait for interrupt). | ||
44 | * | ||
45 | * IRQs are already disabled. | ||
46 | */ | ||
47 | ENTRY(cpu_v7m_do_idle) | ||
48 | wfi | ||
49 | mov pc, lr | ||
50 | ENDPROC(cpu_v7m_do_idle) | ||
51 | |||
52 | ENTRY(cpu_v7m_dcache_clean_area) | ||
53 | mov pc, lr | ||
54 | ENDPROC(cpu_v7m_dcache_clean_area) | ||
55 | |||
56 | /* | ||
57 | * There is no MMU, so here is nothing to do. | ||
58 | */ | ||
59 | ENTRY(cpu_v7m_switch_mm) | ||
60 | mov pc, lr | ||
61 | ENDPROC(cpu_v7m_switch_mm) | ||
62 | |||
63 | .globl cpu_v7m_suspend_size | ||
64 | .equ cpu_v7m_suspend_size, 0 | ||
65 | |||
66 | #ifdef CONFIG_ARM_CPU_SUSPEND | ||
67 | ENTRY(cpu_v7m_do_suspend) | ||
68 | mov pc, lr | ||
69 | ENDPROC(cpu_v7m_do_suspend) | ||
70 | |||
71 | ENTRY(cpu_v7m_do_resume) | ||
72 | mov pc, lr | ||
73 | ENDPROC(cpu_v7m_do_resume) | ||
74 | #endif | ||
75 | |||
76 | .section ".text.init", #alloc, #execinstr | ||
77 | |||
78 | /* | ||
79 | * __v7m_setup | ||
80 | * | ||
81 | * This should be able to cover all ARMv7-M cores. | ||
82 | */ | ||
83 | __v7m_setup: | ||
84 | @ Configure the vector table base address | ||
85 | ldr r0, =BASEADDR_V7M_SCB | ||
86 | ldr r12, =vector_table | ||
87 | str r12, [r0, V7M_SCB_VTOR] | ||
88 | |||
89 | @ enable UsageFault, BusFault and MemManage fault. | ||
90 | ldr r5, [r0, #V7M_SCB_SHCSR] | ||
91 | orr r5, #(V7M_SCB_SHCSR_USGFAULTENA | V7M_SCB_SHCSR_BUSFAULTENA | V7M_SCB_SHCSR_MEMFAULTENA) | ||
92 | str r5, [r0, #V7M_SCB_SHCSR] | ||
93 | |||
94 | @ Lower the priority of the SVC and PendSV exceptions | ||
95 | mov r5, #0x80000000 | ||
96 | str r5, [r0, V7M_SCB_SHPR2] @ set SVC priority | ||
97 | mov r5, #0x00800000 | ||
98 | str r5, [r0, V7M_SCB_SHPR3] @ set PendSV priority | ||
99 | |||
100 | @ SVC to run the kernel in this mode | ||
101 | adr r1, BSYM(1f) | ||
102 | ldr r5, [r12, #11 * 4] @ read the SVC vector entry | ||
103 | str r1, [r12, #11 * 4] @ write the temporary SVC vector entry | ||
104 | mov r6, lr @ save LR | ||
105 | mov r7, sp @ save SP | ||
106 | ldr sp, =__v7m_setup_stack_top | ||
107 | cpsie i | ||
108 | svc #0 | ||
109 | 1: cpsid i | ||
110 | str r5, [r12, #11 * 4] @ restore the original SVC vector entry | ||
111 | mov lr, r6 @ restore LR | ||
112 | mov sp, r7 @ restore SP | ||
113 | |||
114 | @ Special-purpose control register | ||
115 | mov r1, #1 | ||
116 | msr control, r1 @ Thread mode has unpriviledged access | ||
117 | |||
118 | @ Configure the System Control Register to ensure 8-byte stack alignment | ||
119 | @ Note the STKALIGN bit is either RW or RAO. | ||
120 | ldr r12, [r0, V7M_SCB_CCR] @ system control register | ||
121 | orr r12, #V7M_SCB_CCR_STKALIGN | ||
122 | str r12, [r0, V7M_SCB_CCR] | ||
123 | mov pc, lr | ||
124 | ENDPROC(__v7m_setup) | ||
125 | |||
126 | define_processor_functions v7m, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 | ||
127 | |||
128 | .section ".rodata" | ||
129 | string cpu_arch_name, "armv7m" | ||
130 | string cpu_elf_name "v7m" | ||
131 | string cpu_v7m_name "ARMv7-M" | ||
132 | |||
133 | .section ".proc.info.init", #alloc, #execinstr | ||
134 | |||
135 | /* | ||
136 | * Match any ARMv7-M processor core. | ||
137 | */ | ||
138 | .type __v7m_proc_info, #object | ||
139 | __v7m_proc_info: | ||
140 | .long 0x000f0000 @ Required ID value | ||
141 | .long 0x000f0000 @ Mask for ID | ||
142 | .long 0 @ proc_info_list.__cpu_mm_mmu_flags | ||
143 | .long 0 @ proc_info_list.__cpu_io_mmu_flags | ||
144 | b __v7m_setup @ proc_info_list.__cpu_flush | ||
145 | .long cpu_arch_name | ||
146 | .long cpu_elf_name | ||
147 | .long HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT | ||
148 | .long cpu_v7m_name | ||
149 | .long v7m_processor_functions @ proc_info_list.proc | ||
150 | .long 0 @ proc_info_list.tlb | ||
151 | .long 0 @ proc_info_list.user | ||
152 | .long nop_cache_fns @ proc_info_list.cache | ||
153 | .size __v7m_proc_info, . - __v7m_proc_info | ||
154 | |||
155 | __v7m_setup_stack: | ||
156 | .space 4 * 8 @ 8 registers | ||
157 | __v7m_setup_stack_top: | ||
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index e8efd83b6f25..dc1645890042 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
@@ -446,8 +446,6 @@ ENTRY(cpu_xsc3_do_resume) | |||
446 | ENDPROC(cpu_xsc3_do_resume) | 446 | ENDPROC(cpu_xsc3_do_resume) |
447 | #endif | 447 | #endif |
448 | 448 | ||
449 | __CPUINIT | ||
450 | |||
451 | .type __xsc3_setup, #function | 449 | .type __xsc3_setup, #function |
452 | __xsc3_setup: | 450 | __xsc3_setup: |
453 | mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE | 451 | mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE |
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index e766f889bfd6..d19b1cfcad91 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
@@ -558,8 +558,6 @@ ENTRY(cpu_xscale_do_resume) | |||
558 | ENDPROC(cpu_xscale_do_resume) | 558 | ENDPROC(cpu_xscale_do_resume) |
559 | #endif | 559 | #endif |
560 | 560 | ||
561 | __CPUINIT | ||
562 | |||
563 | .type __xscale_setup, #function | 561 | .type __xscale_setup, #function |
564 | __xscale_setup: | 562 | __xscale_setup: |
565 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB | 563 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB |