diff options
Diffstat (limited to 'arch/arm/mm')
| -rw-r--r-- | arch/arm/mm/Kconfig | 23 | ||||
| -rw-r--r-- | arch/arm/mm/abort-ev6.S | 3 | ||||
| -rw-r--r-- | arch/arm/mm/alignment.c | 139 | ||||
| -rw-r--r-- | arch/arm/mm/fault.c | 2 | ||||
| -rw-r--r-- | arch/arm/mm/ioremap.c | 6 | ||||
| -rw-r--r-- | arch/arm/mm/mmu.c | 22 | ||||
| -rw-r--r-- | arch/arm/mm/proc-v6.S | 3 | ||||
| -rw-r--r-- | arch/arm/mm/proc-v7.S | 59 | ||||
| -rw-r--r-- | arch/arm/mm/tlb-v7.S | 17 |
9 files changed, 230 insertions, 44 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 20979564e7ee..83c025e72ceb 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
| @@ -391,7 +391,7 @@ config CPU_FEROCEON_OLD_ID | |||
| 391 | 391 | ||
| 392 | # ARMv6 | 392 | # ARMv6 |
| 393 | config CPU_V6 | 393 | config CPU_V6 |
| 394 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB | 394 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
| 395 | select CPU_32v6 | 395 | select CPU_32v6 |
| 396 | select CPU_ABRT_EV6 | 396 | select CPU_ABRT_EV6 |
| 397 | select CPU_PABRT_NOIFAR | 397 | select CPU_PABRT_NOIFAR |
| @@ -416,7 +416,7 @@ config CPU_32v6K | |||
| 416 | 416 | ||
| 417 | # ARMv7 | 417 | # ARMv7 |
| 418 | config CPU_V7 | 418 | config CPU_V7 |
| 419 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB | 419 | bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
| 420 | select CPU_32v6K | 420 | select CPU_32v6K |
| 421 | select CPU_32v7 | 421 | select CPU_32v7 |
| 422 | select CPU_ABRT_EV7 | 422 | select CPU_ABRT_EV7 |
| @@ -639,10 +639,23 @@ config CPU_BIG_ENDIAN | |||
| 639 | port must properly enable any big-endian related features | 639 | port must properly enable any big-endian related features |
| 640 | of your chipset/board/processor. | 640 | of your chipset/board/processor. |
| 641 | 641 | ||
| 642 | config CPU_ENDIAN_BE8 | ||
| 643 | bool | ||
| 644 | depends on CPU_BIG_ENDIAN | ||
| 645 | default CPU_V6 || CPU_V7 | ||
| 646 | help | ||
| 647 | Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. | ||
| 648 | |||
| 649 | config CPU_ENDIAN_BE32 | ||
| 650 | bool | ||
| 651 | depends on CPU_BIG_ENDIAN | ||
| 652 | default !CPU_ENDIAN_BE8 | ||
| 653 | help | ||
| 654 | Support for the BE-32 (big-endian) mode on pre-ARMv6 processors. | ||
| 655 | |||
| 642 | config CPU_HIGH_VECTOR | 656 | config CPU_HIGH_VECTOR |
| 643 | depends on !MMU && CPU_CP15 && !CPU_ARM740T | 657 | depends on !MMU && CPU_CP15 && !CPU_ARM740T |
| 644 | bool "Select the High exception vector" | 658 | bool "Select the High exception vector" |
| 645 | default n | ||
| 646 | help | 659 | help |
| 647 | Say Y here to select high exception vector(0xFFFF0000~). | 660 | Say Y here to select high exception vector(0xFFFF0000~). |
| 648 | The exception vector can be vary depending on the platform | 661 | The exception vector can be vary depending on the platform |
| @@ -726,7 +739,6 @@ config NEEDS_SYSCALL_FOR_CMPXCHG | |||
| 726 | 739 | ||
| 727 | config OUTER_CACHE | 740 | config OUTER_CACHE |
| 728 | bool | 741 | bool |
| 729 | default n | ||
| 730 | 742 | ||
| 731 | config CACHE_FEROCEON_L2 | 743 | config CACHE_FEROCEON_L2 |
| 732 | bool "Enable the Feroceon L2 cache controller" | 744 | bool "Enable the Feroceon L2 cache controller" |
| @@ -739,7 +751,6 @@ config CACHE_FEROCEON_L2 | |||
| 739 | config CACHE_FEROCEON_L2_WRITETHROUGH | 751 | config CACHE_FEROCEON_L2_WRITETHROUGH |
| 740 | bool "Force Feroceon L2 cache write through" | 752 | bool "Force Feroceon L2 cache write through" |
| 741 | depends on CACHE_FEROCEON_L2 | 753 | depends on CACHE_FEROCEON_L2 |
| 742 | default n | ||
| 743 | help | 754 | help |
| 744 | Say Y here to use the Feroceon L2 cache in writethrough mode. | 755 | Say Y here to use the Feroceon L2 cache in writethrough mode. |
| 745 | Unless you specifically require this, say N for writeback mode. | 756 | Unless you specifically require this, say N for writeback mode. |
| @@ -747,7 +758,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH | |||
| 747 | config CACHE_L2X0 | 758 | config CACHE_L2X0 |
| 748 | bool "Enable the L2x0 outer cache controller" | 759 | bool "Enable the L2x0 outer cache controller" |
| 749 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ | 760 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ |
| 750 | REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 | 761 | REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX |
| 751 | default y | 762 | default y |
| 752 | select OUTER_CACHE | 763 | select OUTER_CACHE |
| 753 | help | 764 | help |
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 6f7e70907e44..f332df7f0d37 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S | |||
| @@ -37,6 +37,9 @@ ENTRY(v6_early_abort) | |||
| 37 | movne pc, lr | 37 | movne pc, lr |
| 38 | do_thumb_abort | 38 | do_thumb_abort |
| 39 | ldreq r3, [r2] @ read aborted ARM instruction | 39 | ldreq r3, [r2] @ read aborted ARM instruction |
| 40 | #ifdef CONFIG_CPU_ENDIAN_BE8 | ||
| 41 | reveq r3, r3 | ||
| 42 | #endif | ||
| 40 | do_ldrd_abort | 43 | do_ldrd_abort |
| 41 | tst r3, #1 << 20 @ L = 0 -> write | 44 | tst r3, #1 << 20 @ L = 0 -> write |
| 42 | orreq r1, r1, #1 << 11 @ yes. | 45 | orreq r1, r1, #1 << 11 @ yes. |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 3a398befed41..03cd27d917b9 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
| @@ -62,6 +62,12 @@ | |||
| 62 | #define SHIFT_ASR 0x40 | 62 | #define SHIFT_ASR 0x40 |
| 63 | #define SHIFT_RORRRX 0x60 | 63 | #define SHIFT_RORRRX 0x60 |
| 64 | 64 | ||
| 65 | #define BAD_INSTR 0xdeadc0de | ||
| 66 | |||
| 67 | /* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */ | ||
| 68 | #define IS_T32(hi16) \ | ||
| 69 | (((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800)) | ||
| 70 | |||
| 65 | static unsigned long ai_user; | 71 | static unsigned long ai_user; |
| 66 | static unsigned long ai_sys; | 72 | static unsigned long ai_sys; |
| 67 | static unsigned long ai_skipped; | 73 | static unsigned long ai_skipped; |
| @@ -332,38 +338,48 @@ do_alignment_ldrdstrd(unsigned long addr, unsigned long instr, | |||
| 332 | struct pt_regs *regs) | 338 | struct pt_regs *regs) |
| 333 | { | 339 | { |
| 334 | unsigned int rd = RD_BITS(instr); | 340 | unsigned int rd = RD_BITS(instr); |
| 335 | 341 | unsigned int rd2; | |
| 336 | if (((rd & 1) == 1) || (rd == 14)) | 342 | int load; |
| 343 | |||
| 344 | if ((instr & 0xfe000000) == 0xe8000000) { | ||
| 345 | /* ARMv7 Thumb-2 32-bit LDRD/STRD */ | ||
| 346 | rd2 = (instr >> 8) & 0xf; | ||
| 347 | load = !!(LDST_L_BIT(instr)); | ||
| 348 | } else if (((rd & 1) == 1) || (rd == 14)) | ||
| 337 | goto bad; | 349 | goto bad; |
| 350 | else { | ||
| 351 | load = ((instr & 0xf0) == 0xd0); | ||
| 352 | rd2 = rd + 1; | ||
| 353 | } | ||
| 338 | 354 | ||
| 339 | ai_dword += 1; | 355 | ai_dword += 1; |
| 340 | 356 | ||
| 341 | if (user_mode(regs)) | 357 | if (user_mode(regs)) |
| 342 | goto user; | 358 | goto user; |
| 343 | 359 | ||
| 344 | if ((instr & 0xf0) == 0xd0) { | 360 | if (load) { |
| 345 | unsigned long val; | 361 | unsigned long val; |
| 346 | get32_unaligned_check(val, addr); | 362 | get32_unaligned_check(val, addr); |
| 347 | regs->uregs[rd] = val; | 363 | regs->uregs[rd] = val; |
| 348 | get32_unaligned_check(val, addr + 4); | 364 | get32_unaligned_check(val, addr + 4); |
| 349 | regs->uregs[rd + 1] = val; | 365 | regs->uregs[rd2] = val; |
| 350 | } else { | 366 | } else { |
| 351 | put32_unaligned_check(regs->uregs[rd], addr); | 367 | put32_unaligned_check(regs->uregs[rd], addr); |
| 352 | put32_unaligned_check(regs->uregs[rd + 1], addr + 4); | 368 | put32_unaligned_check(regs->uregs[rd2], addr + 4); |
| 353 | } | 369 | } |
| 354 | 370 | ||
| 355 | return TYPE_LDST; | 371 | return TYPE_LDST; |
| 356 | 372 | ||
| 357 | user: | 373 | user: |
| 358 | if ((instr & 0xf0) == 0xd0) { | 374 | if (load) { |
| 359 | unsigned long val; | 375 | unsigned long val; |
| 360 | get32t_unaligned_check(val, addr); | 376 | get32t_unaligned_check(val, addr); |
| 361 | regs->uregs[rd] = val; | 377 | regs->uregs[rd] = val; |
| 362 | get32t_unaligned_check(val, addr + 4); | 378 | get32t_unaligned_check(val, addr + 4); |
| 363 | regs->uregs[rd + 1] = val; | 379 | regs->uregs[rd2] = val; |
| 364 | } else { | 380 | } else { |
| 365 | put32t_unaligned_check(regs->uregs[rd], addr); | 381 | put32t_unaligned_check(regs->uregs[rd], addr); |
| 366 | put32t_unaligned_check(regs->uregs[rd + 1], addr + 4); | 382 | put32t_unaligned_check(regs->uregs[rd2], addr + 4); |
| 367 | } | 383 | } |
| 368 | 384 | ||
| 369 | return TYPE_LDST; | 385 | return TYPE_LDST; |
| @@ -616,8 +632,72 @@ thumb2arm(u16 tinstr) | |||
| 616 | /* Else fall through for illegal instruction case */ | 632 | /* Else fall through for illegal instruction case */ |
| 617 | 633 | ||
| 618 | default: | 634 | default: |
| 619 | return 0xdeadc0de; | 635 | return BAD_INSTR; |
| 636 | } | ||
| 637 | } | ||
| 638 | |||
| 639 | /* | ||
| 640 | * Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction | ||
| 641 | * handlable by ARM alignment handler, also find the corresponding handler, | ||
| 642 | * so that we can reuse ARM userland alignment fault fixups for Thumb. | ||
| 643 | * | ||
| 644 | * @pinstr: original Thumb-2 instruction; returns new handlable instruction | ||
| 645 | * @regs: register context. | ||
| 646 | * @poffset: return offset from faulted addr for later writeback | ||
| 647 | * | ||
| 648 | * NOTES: | ||
| 649 | * 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections. | ||
| 650 | * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt) | ||
| 651 | */ | ||
| 652 | static void * | ||
| 653 | do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs, | ||
| 654 | union offset_union *poffset) | ||
| 655 | { | ||
| 656 | unsigned long instr = *pinstr; | ||
| 657 | u16 tinst1 = (instr >> 16) & 0xffff; | ||
| 658 | u16 tinst2 = instr & 0xffff; | ||
| 659 | poffset->un = 0; | ||
| 660 | |||
| 661 | switch (tinst1 & 0xffe0) { | ||
| 662 | /* A6.3.5 Load/Store multiple */ | ||
| 663 | case 0xe880: /* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */ | ||
| 664 | case 0xe8a0: /* ...above writeback version */ | ||
| 665 | case 0xe900: /* STMDB/STMFD, LDMDB/LDMEA */ | ||
| 666 | case 0xe920: /* ...above writeback version */ | ||
| 667 | /* no need offset decision since handler calculates it */ | ||
| 668 | return do_alignment_ldmstm; | ||
| 669 | |||
| 670 | case 0xf840: /* POP/PUSH T3 (single register) */ | ||
| 671 | if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) { | ||
| 672 | u32 L = !!(LDST_L_BIT(instr)); | ||
| 673 | const u32 subset[2] = { | ||
| 674 | 0xe92d0000, /* STMDB sp!,{registers} */ | ||
| 675 | 0xe8bd0000, /* LDMIA sp!,{registers} */ | ||
| 676 | }; | ||
| 677 | *pinstr = subset[L] | (1<<RD_BITS(instr)); | ||
| 678 | return do_alignment_ldmstm; | ||
| 679 | } | ||
| 680 | /* Else fall through for illegal instruction case */ | ||
| 681 | break; | ||
| 682 | |||
| 683 | /* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */ | ||
| 684 | case 0xe860: | ||
| 685 | case 0xe960: | ||
| 686 | case 0xe8e0: | ||
| 687 | case 0xe9e0: | ||
| 688 | poffset->un = (tinst2 & 0xff) << 2; | ||
| 689 | case 0xe940: | ||
| 690 | case 0xe9c0: | ||
| 691 | return do_alignment_ldrdstrd; | ||
| 692 | |||
| 693 | /* | ||
| 694 | * No need to handle load/store instructions up to word size | ||
| 695 | * since ARMv6 and later CPUs can perform unaligned accesses. | ||
| 696 | */ | ||
| 697 | default: | ||
| 698 | break; | ||
| 620 | } | 699 | } |
| 700 | return NULL; | ||
| 621 | } | 701 | } |
| 622 | 702 | ||
| 623 | static int | 703 | static int |
| @@ -630,6 +710,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
| 630 | mm_segment_t fs; | 710 | mm_segment_t fs; |
| 631 | unsigned int fault; | 711 | unsigned int fault; |
| 632 | u16 tinstr = 0; | 712 | u16 tinstr = 0; |
| 713 | int isize = 4; | ||
| 714 | int thumb2_32b = 0; | ||
| 633 | 715 | ||
| 634 | instrptr = instruction_pointer(regs); | 716 | instrptr = instruction_pointer(regs); |
| 635 | 717 | ||
| @@ -637,8 +719,19 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
| 637 | set_fs(KERNEL_DS); | 719 | set_fs(KERNEL_DS); |
| 638 | if (thumb_mode(regs)) { | 720 | if (thumb_mode(regs)) { |
| 639 | fault = __get_user(tinstr, (u16 *)(instrptr & ~1)); | 721 | fault = __get_user(tinstr, (u16 *)(instrptr & ~1)); |
| 640 | if (!(fault)) | 722 | if (!fault) { |
| 641 | instr = thumb2arm(tinstr); | 723 | if (cpu_architecture() >= CPU_ARCH_ARMv7 && |
| 724 | IS_T32(tinstr)) { | ||
| 725 | /* Thumb-2 32-bit */ | ||
| 726 | u16 tinst2 = 0; | ||
| 727 | fault = __get_user(tinst2, (u16 *)(instrptr+2)); | ||
| 728 | instr = (tinstr << 16) | tinst2; | ||
| 729 | thumb2_32b = 1; | ||
| 730 | } else { | ||
| 731 | isize = 2; | ||
| 732 | instr = thumb2arm(tinstr); | ||
| 733 | } | ||
| 734 | } | ||
| 642 | } else | 735 | } else |
| 643 | fault = __get_user(instr, (u32 *)instrptr); | 736 | fault = __get_user(instr, (u32 *)instrptr); |
| 644 | set_fs(fs); | 737 | set_fs(fs); |
| @@ -655,7 +748,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
| 655 | 748 | ||
| 656 | fixup: | 749 | fixup: |
| 657 | 750 | ||
| 658 | regs->ARM_pc += thumb_mode(regs) ? 2 : 4; | 751 | regs->ARM_pc += isize; |
| 659 | 752 | ||
| 660 | switch (CODING_BITS(instr)) { | 753 | switch (CODING_BITS(instr)) { |
| 661 | case 0x00000000: /* 3.13.4 load/store instruction extensions */ | 754 | case 0x00000000: /* 3.13.4 load/store instruction extensions */ |
| @@ -714,18 +807,25 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
| 714 | handler = do_alignment_ldrstr; | 807 | handler = do_alignment_ldrstr; |
| 715 | break; | 808 | break; |
| 716 | 809 | ||
| 717 | case 0x08000000: /* ldm or stm */ | 810 | case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */ |
| 718 | handler = do_alignment_ldmstm; | 811 | if (thumb2_32b) |
| 812 | handler = do_alignment_t32_to_handler(&instr, regs, &offset); | ||
| 813 | else | ||
| 814 | handler = do_alignment_ldmstm; | ||
| 719 | break; | 815 | break; |
| 720 | 816 | ||
| 721 | default: | 817 | default: |
| 722 | goto bad; | 818 | goto bad; |
| 723 | } | 819 | } |
| 724 | 820 | ||
| 821 | if (!handler) | ||
| 822 | goto bad; | ||
| 725 | type = handler(addr, instr, regs); | 823 | type = handler(addr, instr, regs); |
| 726 | 824 | ||
| 727 | if (type == TYPE_ERROR || type == TYPE_FAULT) | 825 | if (type == TYPE_ERROR || type == TYPE_FAULT) { |
| 826 | regs->ARM_pc -= isize; | ||
| 728 | goto bad_or_fault; | 827 | goto bad_or_fault; |
| 828 | } | ||
| 729 | 829 | ||
| 730 | if (type == TYPE_LDST) | 830 | if (type == TYPE_LDST) |
| 731 | do_alignment_finish_ldst(addr, instr, regs, offset); | 831 | do_alignment_finish_ldst(addr, instr, regs, offset); |
| @@ -735,7 +835,6 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
| 735 | bad_or_fault: | 835 | bad_or_fault: |
| 736 | if (type == TYPE_ERROR) | 836 | if (type == TYPE_ERROR) |
| 737 | goto bad; | 837 | goto bad; |
| 738 | regs->ARM_pc -= thumb_mode(regs) ? 2 : 4; | ||
| 739 | /* | 838 | /* |
| 740 | * We got a fault - fix it up, or die. | 839 | * We got a fault - fix it up, or die. |
| 741 | */ | 840 | */ |
| @@ -751,8 +850,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
| 751 | */ | 850 | */ |
| 752 | printk(KERN_ERR "Alignment trap: not handling instruction " | 851 | printk(KERN_ERR "Alignment trap: not handling instruction " |
| 753 | "%0*lx at [<%08lx>]\n", | 852 | "%0*lx at [<%08lx>]\n", |
| 754 | thumb_mode(regs) ? 4 : 8, | 853 | isize << 1, |
| 755 | thumb_mode(regs) ? tinstr : instr, instrptr); | 854 | isize == 2 ? tinstr : instr, instrptr); |
| 756 | ai_skipped += 1; | 855 | ai_skipped += 1; |
| 757 | return 1; | 856 | return 1; |
| 758 | 857 | ||
| @@ -763,8 +862,8 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
| 763 | printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx " | 862 | printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx " |
| 764 | "Address=0x%08lx FSR 0x%03x\n", current->comm, | 863 | "Address=0x%08lx FSR 0x%03x\n", current->comm, |
| 765 | task_pid_nr(current), instrptr, | 864 | task_pid_nr(current), instrptr, |
| 766 | thumb_mode(regs) ? 4 : 8, | 865 | isize << 1, |
| 767 | thumb_mode(regs) ? tinstr : instr, | 866 | isize == 2 ? tinstr : instr, |
| 768 | addr, fsr); | 867 | addr, fsr); |
| 769 | 868 | ||
| 770 | if (ai_usermode & UM_FIXUP) | 869 | if (ai_usermode & UM_FIXUP) |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 0455557a2899..6fdcbb709827 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
| @@ -208,7 +208,7 @@ good_area: | |||
| 208 | * than endlessly redo the fault. | 208 | * than endlessly redo the fault. |
| 209 | */ | 209 | */ |
| 210 | survive: | 210 | survive: |
| 211 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, fsr & (1 << 11)); | 211 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0); |
| 212 | if (unlikely(fault & VM_FAULT_ERROR)) { | 212 | if (unlikely(fault & VM_FAULT_ERROR)) { |
| 213 | if (fault & VM_FAULT_OOM) | 213 | if (fault & VM_FAULT_OOM) |
| 214 | goto out_of_memory; | 214 | goto out_of_memory; |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 9f88dd3be601..0ab75c60f7cf 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
| @@ -110,6 +110,12 @@ static int remap_area_pages(unsigned long start, unsigned long pfn, | |||
| 110 | return err; | 110 | return err; |
| 111 | } | 111 | } |
| 112 | 112 | ||
| 113 | int ioremap_page(unsigned long virt, unsigned long phys, | ||
| 114 | const struct mem_type *mtype) | ||
| 115 | { | ||
| 116 | return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, mtype); | ||
| 117 | } | ||
| 118 | EXPORT_SYMBOL(ioremap_page); | ||
| 113 | 119 | ||
| 114 | void __check_kvm_seq(struct mm_struct *mm) | 120 | void __check_kvm_seq(struct mm_struct *mm) |
| 115 | { | 121 | { |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e6344ece00ce..4722582b17b8 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
| @@ -255,6 +255,7 @@ const struct mem_type *get_mem_type(unsigned int type) | |||
| 255 | { | 255 | { |
| 256 | return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; | 256 | return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; |
| 257 | } | 257 | } |
| 258 | EXPORT_SYMBOL(get_mem_type); | ||
| 258 | 259 | ||
| 259 | /* | 260 | /* |
| 260 | * Adjust the PMD section entries according to the CPU in use. | 261 | * Adjust the PMD section entries according to the CPU in use. |
| @@ -835,10 +836,31 @@ void __init reserve_node_zero(pg_data_t *pgdat) | |||
| 835 | BOOTMEM_EXCLUSIVE); | 836 | BOOTMEM_EXCLUSIVE); |
| 836 | } | 837 | } |
| 837 | 838 | ||
| 839 | if (machine_is_treo680()) { | ||
| 840 | reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, | ||
| 841 | BOOTMEM_EXCLUSIVE); | ||
| 842 | reserve_bootmem_node(pgdat, 0xa2000000, 0x1000, | ||
| 843 | BOOTMEM_EXCLUSIVE); | ||
| 844 | } | ||
| 845 | |||
| 838 | if (machine_is_palmt5()) | 846 | if (machine_is_palmt5()) |
| 839 | reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, | 847 | reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, |
| 840 | BOOTMEM_EXCLUSIVE); | 848 | BOOTMEM_EXCLUSIVE); |
| 841 | 849 | ||
| 850 | /* | ||
| 851 | * U300 - This platform family can share physical memory | ||
| 852 | * between two ARM cpus, one running Linux and the other | ||
| 853 | * running another OS. | ||
| 854 | */ | ||
| 855 | if (machine_is_u300()) { | ||
| 856 | #ifdef CONFIG_MACH_U300_SINGLE_RAM | ||
| 857 | #if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) && \ | ||
| 858 | CONFIG_MACH_U300_2MB_ALIGNMENT_FIX | ||
| 859 | res_size = 0x00100000; | ||
| 860 | #endif | ||
| 861 | #endif | ||
| 862 | } | ||
| 863 | |||
| 842 | #ifdef CONFIG_SA1111 | 864 | #ifdef CONFIG_SA1111 |
| 843 | /* | 865 | /* |
| 844 | * Because of the SA1111 DMA bug, we want to preserve our | 866 | * Because of the SA1111 DMA bug, we want to preserve our |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 087e239704df..524ddae92595 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
| @@ -170,6 +170,9 @@ __v6_setup: | |||
| 170 | #endif /* CONFIG_MMU */ | 170 | #endif /* CONFIG_MMU */ |
| 171 | adr r5, v6_crval | 171 | adr r5, v6_crval |
| 172 | ldmia r5, {r5, r6} | 172 | ldmia r5, {r5, r6} |
| 173 | #ifdef CONFIG_CPU_ENDIAN_BE8 | ||
| 174 | orr r6, r6, #1 << 25 @ big-endian page tables | ||
| 175 | #endif | ||
| 173 | mrc p15, 0, r0, c1, c0, 0 @ read control register | 176 | mrc p15, 0, r0, c1, c0, 0 @ read control register |
| 174 | bic r0, r0, r5 @ clear bits them | 177 | bic r0, r0, r5 @ clear bits them |
| 175 | orr r0, r0, r6 @ set them | 178 | orr r0, r0, r6 @ set them |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index a08d9d2380d3..180a08d03a03 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
| @@ -19,17 +19,23 @@ | |||
| 19 | 19 | ||
| 20 | #include "proc-macros.S" | 20 | #include "proc-macros.S" |
| 21 | 21 | ||
| 22 | #define TTB_C (1 << 0) | ||
| 23 | #define TTB_S (1 << 1) | 22 | #define TTB_S (1 << 1) |
| 24 | #define TTB_RGN_NC (0 << 3) | 23 | #define TTB_RGN_NC (0 << 3) |
| 25 | #define TTB_RGN_OC_WBWA (1 << 3) | 24 | #define TTB_RGN_OC_WBWA (1 << 3) |
| 26 | #define TTB_RGN_OC_WT (2 << 3) | 25 | #define TTB_RGN_OC_WT (2 << 3) |
| 27 | #define TTB_RGN_OC_WB (3 << 3) | 26 | #define TTB_RGN_OC_WB (3 << 3) |
| 27 | #define TTB_NOS (1 << 5) | ||
| 28 | #define TTB_IRGN_NC ((0 << 0) | (0 << 6)) | ||
| 29 | #define TTB_IRGN_WBWA ((0 << 0) | (1 << 6)) | ||
| 30 | #define TTB_IRGN_WT ((1 << 0) | (0 << 6)) | ||
| 31 | #define TTB_IRGN_WB ((1 << 0) | (1 << 6)) | ||
| 28 | 32 | ||
| 29 | #ifndef CONFIG_SMP | 33 | #ifndef CONFIG_SMP |
| 30 | #define TTB_FLAGS TTB_C|TTB_RGN_OC_WB @ mark PTWs cacheable, outer WB | 34 | /* PTWs cacheable, inner WB not shareable, outer WB not shareable */ |
| 35 | #define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB | ||
| 31 | #else | 36 | #else |
| 32 | #define TTB_FLAGS TTB_C|TTB_S|TTB_RGN_OC_WBWA @ mark PTWs cacheable and shared, outer WBWA | 37 | /* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ |
| 38 | #define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA | ||
| 33 | #endif | 39 | #endif |
| 34 | 40 | ||
| 35 | ENTRY(cpu_v7_proc_init) | 41 | ENTRY(cpu_v7_proc_init) |
| @@ -176,8 +182,8 @@ cpu_v7_name: | |||
| 176 | */ | 182 | */ |
| 177 | __v7_setup: | 183 | __v7_setup: |
| 178 | #ifdef CONFIG_SMP | 184 | #ifdef CONFIG_SMP |
| 179 | mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode | 185 | mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode and |
| 180 | orr r0, r0, #(0x1 << 6) | 186 | orr r0, r0, #(1 << 6) | (1 << 0) @ TLB ops broadcasting |
| 181 | mcr p15, 0, r0, c1, c0, 1 | 187 | mcr p15, 0, r0, c1, c0, 1 |
| 182 | #endif | 188 | #endif |
| 183 | adr r12, __v7_setup_stack @ the local stack | 189 | adr r12, __v7_setup_stack @ the local stack |
| @@ -227,12 +233,43 @@ __v7_setup: | |||
| 227 | mov r10, #0x1f @ domains 0, 1 = manager | 233 | mov r10, #0x1f @ domains 0, 1 = manager |
| 228 | mcr p15, 0, r10, c3, c0, 0 @ load domain access register | 234 | mcr p15, 0, r10, c3, c0, 0 @ load domain access register |
| 229 | #endif | 235 | #endif |
| 230 | ldr r5, =0xff0aa1a8 | 236 | /* |
| 231 | ldr r6, =0x40e040e0 | 237 | * Memory region attributes with SCTLR.TRE=1 |
| 238 | * | ||
| 239 | * n = TEX[0],C,B | ||
| 240 | * TR = PRRR[2n+1:2n] - memory type | ||
| 241 | * IR = NMRR[2n+1:2n] - inner cacheable property | ||
| 242 | * OR = NMRR[2n+17:2n+16] - outer cacheable property | ||
| 243 | * | ||
| 244 | * n TR IR OR | ||
| 245 | * UNCACHED 000 00 | ||
| 246 | * BUFFERABLE 001 10 00 00 | ||
| 247 | * WRITETHROUGH 010 10 10 10 | ||
| 248 | * WRITEBACK 011 10 11 11 | ||
| 249 | * reserved 110 | ||
| 250 | * WRITEALLOC 111 10 01 01 | ||
| 251 | * DEV_SHARED 100 01 | ||
| 252 | * DEV_NONSHARED 100 01 | ||
| 253 | * DEV_WC 001 10 | ||
| 254 | * DEV_CACHED 011 10 | ||
| 255 | * | ||
| 256 | * Other attributes: | ||
| 257 | * | ||
| 258 | * DS0 = PRRR[16] = 0 - device shareable property | ||
| 259 | * DS1 = PRRR[17] = 1 - device shareable property | ||
| 260 | * NS0 = PRRR[18] = 0 - normal shareable property | ||
| 261 | * NS1 = PRRR[19] = 1 - normal shareable property | ||
| 262 | * NOS = PRRR[24+n] = 1 - not outer shareable | ||
| 263 | */ | ||
| 264 | ldr r5, =0xff0a81a8 @ PRRR | ||
| 265 | ldr r6, =0x40e040e0 @ NMRR | ||
| 232 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR | 266 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR |
| 233 | mcr p15, 0, r6, c10, c2, 1 @ write NMRR | 267 | mcr p15, 0, r6, c10, c2, 1 @ write NMRR |
| 234 | adr r5, v7_crval | 268 | adr r5, v7_crval |
| 235 | ldmia r5, {r5, r6} | 269 | ldmia r5, {r5, r6} |
| 270 | #ifdef CONFIG_CPU_ENDIAN_BE8 | ||
| 271 | orr r6, r6, #1 << 25 @ big-endian page tables | ||
| 272 | #endif | ||
| 236 | mrc p15, 0, r0, c1, c0, 0 @ read control register | 273 | mrc p15, 0, r0, c1, c0, 0 @ read control register |
| 237 | bic r0, r0, r5 @ clear bits them | 274 | bic r0, r0, r5 @ clear bits them |
| 238 | orr r0, r0, r6 @ set them | 275 | orr r0, r0, r6 @ set them |
| @@ -240,14 +277,14 @@ __v7_setup: | |||
| 240 | ENDPROC(__v7_setup) | 277 | ENDPROC(__v7_setup) |
| 241 | 278 | ||
| 242 | /* AT | 279 | /* AT |
| 243 | * TFR EV X F I D LR | 280 | * TFR EV X F I D LR S |
| 244 | * .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM | 281 | * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM |
| 245 | * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced | 282 | * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced |
| 246 | * 1 0 110 0011 1.00 .111 1101 < we want | 283 | * 1 0 110 0011 1100 .111 1101 < we want |
| 247 | */ | 284 | */ |
| 248 | .type v7_crval, #object | 285 | .type v7_crval, #object |
| 249 | v7_crval: | 286 | v7_crval: |
| 250 | crval clear=0x0120c302, mmuset=0x10c0387d, ucset=0x00c0187c | 287 | crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c |
| 251 | 288 | ||
| 252 | __v7_setup_stack: | 289 | __v7_setup_stack: |
| 253 | .space 4 * 11 @ 11 registers | 290 | .space 4 * 11 @ 11 registers |
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index b637e7380ab7..a26a605b73bd 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S | |||
| @@ -42,9 +42,11 @@ ENTRY(v7wbi_flush_user_tlb_range) | |||
| 42 | mov r1, r1, lsl #PAGE_SHIFT | 42 | mov r1, r1, lsl #PAGE_SHIFT |
| 43 | vma_vm_flags r2, r2 @ get vma->vm_flags | 43 | vma_vm_flags r2, r2 @ get vma->vm_flags |
| 44 | 1: | 44 | 1: |
| 45 | mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1) | 45 | #ifdef CONFIG_SMP |
| 46 | tst r2, #VM_EXEC @ Executable area ? | 46 | mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) |
| 47 | mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1) | 47 | #else |
| 48 | mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA | ||
| 49 | #endif | ||
| 48 | add r0, r0, #PAGE_SZ | 50 | add r0, r0, #PAGE_SZ |
| 49 | cmp r0, r1 | 51 | cmp r0, r1 |
| 50 | blo 1b | 52 | blo 1b |
| @@ -69,8 +71,11 @@ ENTRY(v7wbi_flush_kern_tlb_range) | |||
| 69 | mov r0, r0, lsl #PAGE_SHIFT | 71 | mov r0, r0, lsl #PAGE_SHIFT |
| 70 | mov r1, r1, lsl #PAGE_SHIFT | 72 | mov r1, r1, lsl #PAGE_SHIFT |
| 71 | 1: | 73 | 1: |
| 72 | mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA | 74 | #ifdef CONFIG_SMP |
| 73 | mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA | 75 | mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) |
| 76 | #else | ||
| 77 | mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA | ||
| 78 | #endif | ||
| 74 | add r0, r0, #PAGE_SZ | 79 | add r0, r0, #PAGE_SZ |
| 75 | cmp r0, r1 | 80 | cmp r0, r1 |
| 76 | blo 1b | 81 | blo 1b |
| @@ -87,5 +92,5 @@ ENDPROC(v7wbi_flush_kern_tlb_range) | |||
| 87 | ENTRY(v7wbi_tlb_fns) | 92 | ENTRY(v7wbi_tlb_fns) |
| 88 | .long v7wbi_flush_user_tlb_range | 93 | .long v7wbi_flush_user_tlb_range |
| 89 | .long v7wbi_flush_kern_tlb_range | 94 | .long v7wbi_flush_kern_tlb_range |
| 90 | .long v6wbi_tlb_flags | 95 | .long v7wbi_tlb_flags |
| 91 | .size v7wbi_tlb_fns, . - v7wbi_tlb_fns | 96 | .size v7wbi_tlb_fns, . - v7wbi_tlb_fns |
