diff options
Diffstat (limited to 'arch/arm/mm')
64 files changed, 2490 insertions, 2380 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 0074b8dba79..aaea6d487ba 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -821,7 +821,8 @@ config CACHE_L2X0 | |||
821 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ | 821 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ |
822 | REALVIEW_EB_A9MP || SOC_IMX35 || SOC_IMX31 || MACH_REALVIEW_PBX || \ | 822 | REALVIEW_EB_A9MP || SOC_IMX35 || SOC_IMX31 || MACH_REALVIEW_PBX || \ |
823 | ARCH_NOMADIK || ARCH_OMAP4 || ARCH_EXYNOS4 || ARCH_TEGRA || \ | 823 | ARCH_NOMADIK || ARCH_OMAP4 || ARCH_EXYNOS4 || ARCH_TEGRA || \ |
824 | ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE | 824 | ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE || \ |
825 | ARCH_PRIMA2 || ARCH_ZYNQ || ARCH_CNS3XXX | ||
825 | default y | 826 | default y |
826 | select OUTER_CACHE | 827 | select OUTER_CACHE |
827 | select OUTER_CACHE_SYNC | 828 | select OUTER_CACHE_SYNC |
@@ -889,3 +890,18 @@ config ARCH_HAS_BARRIERS | |||
889 | help | 890 | help |
890 | This option allows the use of custom mandatory barriers | 891 | This option allows the use of custom mandatory barriers |
891 | included via the mach/barriers.h file. | 892 | included via the mach/barriers.h file. |
893 | |||
894 | config ARM_SAVE_DEBUG_CONTEXT | ||
895 | bool "Save CPU debug state across suspend/resume" | ||
896 | depends on PM_SLEEP && CPU_V7 | ||
897 | help | ||
898 | This option enables save/restore of the ARM debug registers | ||
899 | across CPU powerdown. | ||
900 | |||
901 | config CPA | ||
902 | bool "Change Page Attributes" | ||
903 | depends on CPU_V7 | ||
904 | help | ||
905 | This option enables Changing Page Attibutes for low memory. | ||
906 | This is needed to avoid conflicting memory mappings for low memory, | ||
907 | One from kernel page table and others from user process page tables. | ||
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index bca7e61928c..47e2e3ba190 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ | |||
6 | iomap.o | 6 | iomap.o |
7 | 7 | ||
8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ | 8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ |
9 | mmap.o pgd.o mmu.o vmregion.o | 9 | mmap.o pgd.o mmu.o vmregion.o pageattr.o |
10 | 10 | ||
11 | ifneq ($(CONFIG_MMU),y) | 11 | ifneq ($(CONFIG_MMU),y) |
12 | obj-y += nommu.o | 12 | obj-y += nommu.o |
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S index 4f18f9e87ba..54473cd4aba 100644 --- a/arch/arm/mm/abort-ev4.S +++ b/arch/arm/mm/abort-ev4.S | |||
@@ -3,14 +3,11 @@ | |||
3 | /* | 3 | /* |
4 | * Function: v4_early_abort | 4 | * Function: v4_early_abort |
5 | * | 5 | * |
6 | * Params : r2 = address of aborted instruction | 6 | * Params : r2 = pt_regs |
7 | * : r3 = saved SPSR | 7 | * : r4 = aborted context pc |
8 | * : r5 = aborted context psr | ||
8 | * | 9 | * |
9 | * Returns : r0 = address of abort | 10 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = FSR, bit 11 = write | ||
11 | * : r2-r8 = corrupted | ||
12 | * : r9 = preserved | ||
13 | * : sp = pointer to registers | ||
14 | * | 11 | * |
15 | * Purpose : obtain information about current aborted instruction. | 12 | * Purpose : obtain information about current aborted instruction. |
16 | * Note: we read user space. This means we might cause a data | 13 | * Note: we read user space. This means we might cause a data |
@@ -21,10 +18,8 @@ | |||
21 | ENTRY(v4_early_abort) | 18 | ENTRY(v4_early_abort) |
22 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 19 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
23 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 20 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
24 | ldr r3, [r2] @ read aborted ARM instruction | 21 | ldr r3, [r4] @ read aborted ARM instruction |
25 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR | 22 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR |
26 | tst r3, #1 << 20 @ L = 1 -> write? | 23 | tst r3, #1 << 20 @ L = 1 -> write? |
27 | orreq r1, r1, #1 << 11 @ yes. | 24 | orreq r1, r1, #1 << 11 @ yes. |
28 | mov pc, lr | 25 | b do_DataAbort |
29 | |||
30 | |||
diff --git a/arch/arm/mm/abort-ev4t.S b/arch/arm/mm/abort-ev4t.S index b6282548f92..9da704e7b86 100644 --- a/arch/arm/mm/abort-ev4t.S +++ b/arch/arm/mm/abort-ev4t.S | |||
@@ -4,14 +4,11 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v4t_early_abort | 5 | * Function: v4t_early_abort |
6 | * | 6 | * |
7 | * Params : r2 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r3 = saved SPSR | 8 | * : r4 = aborted context pc |
9 | * : r5 = aborted context psr | ||
9 | * | 10 | * |
10 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
11 | * : r1 = FSR, bit 11 = write | ||
12 | * : r2-r8 = corrupted | ||
13 | * : r9 = preserved | ||
14 | * : sp = pointer to registers | ||
15 | * | 12 | * |
16 | * Purpose : obtain information about current aborted instruction. | 13 | * Purpose : obtain information about current aborted instruction. |
17 | * Note: we read user space. This means we might cause a data | 14 | * Note: we read user space. This means we might cause a data |
@@ -22,9 +19,9 @@ | |||
22 | ENTRY(v4t_early_abort) | 19 | ENTRY(v4t_early_abort) |
23 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 20 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
24 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 21 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
25 | do_thumb_abort | 22 | do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 |
26 | ldreq r3, [r2] @ read aborted ARM instruction | 23 | ldreq r3, [r4] @ read aborted ARM instruction |
27 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR | 24 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR |
28 | tst r3, #1 << 20 @ check write | 25 | tst r3, #1 << 20 @ check write |
29 | orreq r1, r1, #1 << 11 | 26 | orreq r1, r1, #1 << 11 |
30 | mov pc, lr | 27 | b do_DataAbort |
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S index 02251b526c0..a0908d4653a 100644 --- a/arch/arm/mm/abort-ev5t.S +++ b/arch/arm/mm/abort-ev5t.S | |||
@@ -4,14 +4,11 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v5t_early_abort | 5 | * Function: v5t_early_abort |
6 | * | 6 | * |
7 | * Params : r2 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r3 = saved SPSR | 8 | * : r4 = aborted context pc |
9 | * : r5 = aborted context psr | ||
9 | * | 10 | * |
10 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
11 | * : r1 = FSR, bit 11 = write | ||
12 | * : r2-r8 = corrupted | ||
13 | * : r9 = preserved | ||
14 | * : sp = pointer to registers | ||
15 | * | 12 | * |
16 | * Purpose : obtain information about current aborted instruction. | 13 | * Purpose : obtain information about current aborted instruction. |
17 | * Note: we read user space. This means we might cause a data | 14 | * Note: we read user space. This means we might cause a data |
@@ -22,10 +19,10 @@ | |||
22 | ENTRY(v5t_early_abort) | 19 | ENTRY(v5t_early_abort) |
23 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 20 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
24 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 21 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
25 | do_thumb_abort | 22 | do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 |
26 | ldreq r3, [r2] @ read aborted ARM instruction | 23 | ldreq r3, [r4] @ read aborted ARM instruction |
27 | bic r1, r1, #1 << 11 @ clear bits 11 of FSR | 24 | bic r1, r1, #1 << 11 @ clear bits 11 of FSR |
28 | do_ldrd_abort | 25 | do_ldrd_abort tmp=ip, insn=r3 |
29 | tst r3, #1 << 20 @ check write | 26 | tst r3, #1 << 20 @ check write |
30 | orreq r1, r1, #1 << 11 | 27 | orreq r1, r1, #1 << 11 |
31 | mov pc, lr | 28 | b do_DataAbort |
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S index bce68d601c8..4006b7a6126 100644 --- a/arch/arm/mm/abort-ev5tj.S +++ b/arch/arm/mm/abort-ev5tj.S | |||
@@ -4,14 +4,11 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v5tj_early_abort | 5 | * Function: v5tj_early_abort |
6 | * | 6 | * |
7 | * Params : r2 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r3 = saved SPSR | 8 | * : r4 = aborted context pc |
9 | * : r5 = aborted context psr | ||
9 | * | 10 | * |
10 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
11 | * : r1 = FSR, bit 11 = write | ||
12 | * : r2-r8 = corrupted | ||
13 | * : r9 = preserved | ||
14 | * : sp = pointer to registers | ||
15 | * | 12 | * |
16 | * Purpose : obtain information about current aborted instruction. | 13 | * Purpose : obtain information about current aborted instruction. |
17 | * Note: we read user space. This means we might cause a data | 14 | * Note: we read user space. This means we might cause a data |
@@ -23,13 +20,11 @@ ENTRY(v5tj_early_abort) | |||
23 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 20 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
24 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 21 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
25 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR | 22 | bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR |
26 | tst r3, #PSR_J_BIT @ Java? | 23 | tst r5, #PSR_J_BIT @ Java? |
27 | movne pc, lr | 24 | bne do_DataAbort |
28 | do_thumb_abort | 25 | do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 |
29 | ldreq r3, [r2] @ read aborted ARM instruction | 26 | ldreq r3, [r4] @ read aborted ARM instruction |
30 | do_ldrd_abort | 27 | do_ldrd_abort tmp=ip, insn=r3 |
31 | tst r3, #1 << 20 @ L = 0 -> write | 28 | tst r3, #1 << 20 @ L = 0 -> write |
32 | orreq r1, r1, #1 << 11 @ yes. | 29 | orreq r1, r1, #1 << 11 @ yes. |
33 | mov pc, lr | 30 | b do_DataAbort |
34 | |||
35 | |||
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 1478aa52214..ff1f7cc11f8 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S | |||
@@ -4,14 +4,11 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v6_early_abort | 5 | * Function: v6_early_abort |
6 | * | 6 | * |
7 | * Params : r2 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r3 = saved SPSR | 8 | * : r4 = aborted context pc |
9 | * : r5 = aborted context psr | ||
9 | * | 10 | * |
10 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
11 | * : r1 = FSR, bit 11 = write | ||
12 | * : r2-r8 = corrupted | ||
13 | * : r9 = preserved | ||
14 | * : sp = pointer to registers | ||
15 | * | 12 | * |
16 | * Purpose : obtain information about current aborted instruction. | 13 | * Purpose : obtain information about current aborted instruction. |
17 | * Note: we read user space. This means we might cause a data | 14 | * Note: we read user space. This means we might cause a data |
@@ -33,16 +30,14 @@ ENTRY(v6_early_abort) | |||
33 | * The test below covers all the write situations, including Java bytecodes | 30 | * The test below covers all the write situations, including Java bytecodes |
34 | */ | 31 | */ |
35 | bic r1, r1, #1 << 11 @ clear bit 11 of FSR | 32 | bic r1, r1, #1 << 11 @ clear bit 11 of FSR |
36 | tst r3, #PSR_J_BIT @ Java? | 33 | tst r5, #PSR_J_BIT @ Java? |
37 | movne pc, lr | 34 | bne do_DataAbort |
38 | do_thumb_abort | 35 | do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3 |
39 | ldreq r3, [r2] @ read aborted ARM instruction | 36 | ldreq r3, [r4] @ read aborted ARM instruction |
40 | #ifdef CONFIG_CPU_ENDIAN_BE8 | 37 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
41 | reveq r3, r3 | 38 | reveq r3, r3 |
42 | #endif | 39 | #endif |
43 | do_ldrd_abort | 40 | do_ldrd_abort tmp=ip, insn=r3 |
44 | tst r3, #1 << 20 @ L = 0 -> write | 41 | tst r3, #1 << 20 @ L = 0 -> write |
45 | orreq r1, r1, #1 << 11 @ yes. | 42 | orreq r1, r1, #1 << 11 @ yes. |
46 | mov pc, lr | 43 | b do_DataAbort |
47 | |||
48 | |||
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index ec88b157d3b..703375277ba 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S | |||
@@ -3,14 +3,11 @@ | |||
3 | /* | 3 | /* |
4 | * Function: v7_early_abort | 4 | * Function: v7_early_abort |
5 | * | 5 | * |
6 | * Params : r2 = address of aborted instruction | 6 | * Params : r2 = pt_regs |
7 | * : r3 = saved SPSR | 7 | * : r4 = aborted context pc |
8 | * : r5 = aborted context psr | ||
8 | * | 9 | * |
9 | * Returns : r0 = address of abort | 10 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = FSR, bit 11 = write | ||
11 | * : r2-r8 = corrupted | ||
12 | * : r9 = preserved | ||
13 | * : sp = pointer to registers | ||
14 | * | 11 | * |
15 | * Purpose : obtain information about current aborted instruction. | 12 | * Purpose : obtain information about current aborted instruction. |
16 | */ | 13 | */ |
@@ -37,18 +34,18 @@ ENTRY(v7_early_abort) | |||
37 | ldr r3, =0x40d @ On permission fault | 34 | ldr r3, =0x40d @ On permission fault |
38 | and r3, r1, r3 | 35 | and r3, r1, r3 |
39 | cmp r3, #0x0d | 36 | cmp r3, #0x0d |
40 | movne pc, lr | 37 | bne do_DataAbort |
41 | 38 | ||
42 | mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR | 39 | mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR |
43 | isb | 40 | isb |
44 | mrc p15, 0, r2, c7, c4, 0 @ Read the PAR | 41 | mrc p15, 0, ip, c7, c4, 0 @ Read the PAR |
45 | and r3, r2, #0x7b @ On translation fault | 42 | and r3, ip, #0x7b @ On translation fault |
46 | cmp r3, #0x0b | 43 | cmp r3, #0x0b |
47 | movne pc, lr | 44 | bne do_DataAbort |
48 | bic r1, r1, #0xf @ Fix up FSR FS[5:0] | 45 | bic r1, r1, #0xf @ Fix up FSR FS[5:0] |
49 | and r2, r2, #0x7e | 46 | and ip, ip, #0x7e |
50 | orr r1, r1, r2, LSR #1 | 47 | orr r1, r1, ip, LSR #1 |
51 | #endif | 48 | #endif |
52 | 49 | ||
53 | mov pc, lr | 50 | b do_DataAbort |
54 | ENDPROC(v7_early_abort) | 51 | ENDPROC(v7_early_abort) |
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S index 9fb7b0e25ea..f3982580c27 100644 --- a/arch/arm/mm/abort-lv4t.S +++ b/arch/arm/mm/abort-lv4t.S | |||
@@ -3,14 +3,11 @@ | |||
3 | /* | 3 | /* |
4 | * Function: v4t_late_abort | 4 | * Function: v4t_late_abort |
5 | * | 5 | * |
6 | * Params : r2 = address of aborted instruction | 6 | * Params : r2 = pt_regs |
7 | * : r3 = saved SPSR | 7 | * : r4 = aborted context pc |
8 | * : r5 = aborted context psr | ||
8 | * | 9 | * |
9 | * Returns : r0 = address of abort | 10 | * Returns : r4-r5, r10-r11, r13 preserved |
10 | * : r1 = FSR, bit 11 = write | ||
11 | * : r2-r8 = corrupted | ||
12 | * : r9 = preserved | ||
13 | * : sp = pointer to registers | ||
14 | * | 11 | * |
15 | * Purpose : obtain information about current aborted instruction. | 12 | * Purpose : obtain information about current aborted instruction. |
16 | * Note: we read user space. This means we might cause a data | 13 | * Note: we read user space. This means we might cause a data |
@@ -18,7 +15,7 @@ | |||
18 | * picture. Unfortunately, this does happen. We live with it. | 15 | * picture. Unfortunately, this does happen. We live with it. |
19 | */ | 16 | */ |
20 | ENTRY(v4t_late_abort) | 17 | ENTRY(v4t_late_abort) |
21 | tst r3, #PSR_T_BIT @ check for thumb mode | 18 | tst r5, #PSR_T_BIT @ check for thumb mode |
22 | #ifdef CONFIG_CPU_CP15_MMU | 19 | #ifdef CONFIG_CPU_CP15_MMU |
23 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 20 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
24 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 21 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
@@ -28,7 +25,7 @@ ENTRY(v4t_late_abort) | |||
28 | mov r1, #0 | 25 | mov r1, #0 |
29 | #endif | 26 | #endif |
30 | bne .data_thumb_abort | 27 | bne .data_thumb_abort |
31 | ldr r8, [r2] @ read arm instruction | 28 | ldr r8, [r4] @ read arm instruction |
32 | tst r8, #1 << 20 @ L = 1 -> write? | 29 | tst r8, #1 << 20 @ L = 1 -> write? |
33 | orreq r1, r1, #1 << 11 @ yes. | 30 | orreq r1, r1, #1 << 11 @ yes. |
34 | and r7, r8, #15 << 24 | 31 | and r7, r8, #15 << 24 |
@@ -47,86 +44,84 @@ ENTRY(v4t_late_abort) | |||
47 | /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> | 44 | /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> |
48 | /* a */ b .data_unknown | 45 | /* a */ b .data_unknown |
49 | /* b */ b .data_unknown | 46 | /* b */ b .data_unknown |
50 | /* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m | 47 | /* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m |
51 | /* d */ mov pc, lr @ ldc rd, [rn, #m] | 48 | /* d */ b do_DataAbort @ ldc rd, [rn, #m] |
52 | /* e */ b .data_unknown | 49 | /* e */ b .data_unknown |
53 | /* f */ | 50 | /* f */ |
54 | .data_unknown: @ Part of jumptable | 51 | .data_unknown: @ Part of jumptable |
55 | mov r0, r2 | 52 | mov r0, r4 |
56 | mov r1, r8 | 53 | mov r1, r8 |
57 | mov r2, sp | 54 | b baddataabort |
58 | bl baddataabort | ||
59 | b ret_from_exception | ||
60 | 55 | ||
61 | .data_arm_ldmstm: | 56 | .data_arm_ldmstm: |
62 | tst r8, #1 << 21 @ check writeback bit | 57 | tst r8, #1 << 21 @ check writeback bit |
63 | moveq pc, lr @ no writeback -> no fixup | 58 | beq do_DataAbort @ no writeback -> no fixup |
64 | mov r7, #0x11 | 59 | mov r7, #0x11 |
65 | orr r7, r7, #0x1100 | 60 | orr r7, r7, #0x1100 |
66 | and r6, r8, r7 | 61 | and r6, r8, r7 |
67 | and r2, r8, r7, lsl #1 | 62 | and r9, r8, r7, lsl #1 |
68 | add r6, r6, r2, lsr #1 | 63 | add r6, r6, r9, lsr #1 |
69 | and r2, r8, r7, lsl #2 | 64 | and r9, r8, r7, lsl #2 |
70 | add r6, r6, r2, lsr #2 | 65 | add r6, r6, r9, lsr #2 |
71 | and r2, r8, r7, lsl #3 | 66 | and r9, r8, r7, lsl #3 |
72 | add r6, r6, r2, lsr #3 | 67 | add r6, r6, r9, lsr #3 |
73 | add r6, r6, r6, lsr #8 | 68 | add r6, r6, r6, lsr #8 |
74 | add r6, r6, r6, lsr #4 | 69 | add r6, r6, r6, lsr #4 |
75 | and r6, r6, #15 @ r6 = no. of registers to transfer. | 70 | and r6, r6, #15 @ r6 = no. of registers to transfer. |
76 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 71 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
77 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 72 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
78 | tst r8, #1 << 23 @ Check U bit | 73 | tst r8, #1 << 23 @ Check U bit |
79 | subne r7, r7, r6, lsl #2 @ Undo increment | 74 | subne r7, r7, r6, lsl #2 @ Undo increment |
80 | addeq r7, r7, r6, lsl #2 @ Undo decrement | 75 | addeq r7, r7, r6, lsl #2 @ Undo decrement |
81 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 76 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
82 | mov pc, lr | 77 | b do_DataAbort |
83 | 78 | ||
84 | .data_arm_lateldrhpre: | 79 | .data_arm_lateldrhpre: |
85 | tst r8, #1 << 21 @ Check writeback bit | 80 | tst r8, #1 << 21 @ Check writeback bit |
86 | moveq pc, lr @ No writeback -> no fixup | 81 | beq do_DataAbort @ No writeback -> no fixup |
87 | .data_arm_lateldrhpost: | 82 | .data_arm_lateldrhpost: |
88 | and r5, r8, #0x00f @ get Rm / low nibble of immediate value | 83 | and r9, r8, #0x00f @ get Rm / low nibble of immediate value |
89 | tst r8, #1 << 22 @ if (immediate offset) | 84 | tst r8, #1 << 22 @ if (immediate offset) |
90 | andne r6, r8, #0xf00 @ { immediate high nibble | 85 | andne r6, r8, #0xf00 @ { immediate high nibble |
91 | orrne r6, r5, r6, lsr #4 @ combine nibbles } else | 86 | orrne r6, r9, r6, lsr #4 @ combine nibbles } else |
92 | ldreq r6, [sp, r5, lsl #2] @ { load Rm value } | 87 | ldreq r6, [r2, r9, lsl #2] @ { load Rm value } |
93 | .data_arm_apply_r6_and_rn: | 88 | .data_arm_apply_r6_and_rn: |
94 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 89 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
95 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 90 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
96 | tst r8, #1 << 23 @ Check U bit | 91 | tst r8, #1 << 23 @ Check U bit |
97 | subne r7, r7, r6 @ Undo incrmenet | 92 | subne r7, r7, r6 @ Undo incrmenet |
98 | addeq r7, r7, r6 @ Undo decrement | 93 | addeq r7, r7, r6 @ Undo decrement |
99 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 94 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
100 | mov pc, lr | 95 | b do_DataAbort |
101 | 96 | ||
102 | .data_arm_lateldrpreconst: | 97 | .data_arm_lateldrpreconst: |
103 | tst r8, #1 << 21 @ check writeback bit | 98 | tst r8, #1 << 21 @ check writeback bit |
104 | moveq pc, lr @ no writeback -> no fixup | 99 | beq do_DataAbort @ no writeback -> no fixup |
105 | .data_arm_lateldrpostconst: | 100 | .data_arm_lateldrpostconst: |
106 | movs r2, r8, lsl #20 @ Get offset | 101 | movs r6, r8, lsl #20 @ Get offset |
107 | moveq pc, lr @ zero -> no fixup | 102 | beq do_DataAbort @ zero -> no fixup |
108 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 103 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
109 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 104 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
110 | tst r8, #1 << 23 @ Check U bit | 105 | tst r8, #1 << 23 @ Check U bit |
111 | subne r7, r7, r2, lsr #20 @ Undo increment | 106 | subne r7, r7, r6, lsr #20 @ Undo increment |
112 | addeq r7, r7, r2, lsr #20 @ Undo decrement | 107 | addeq r7, r7, r6, lsr #20 @ Undo decrement |
113 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 108 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
114 | mov pc, lr | 109 | b do_DataAbort |
115 | 110 | ||
116 | .data_arm_lateldrprereg: | 111 | .data_arm_lateldrprereg: |
117 | tst r8, #1 << 21 @ check writeback bit | 112 | tst r8, #1 << 21 @ check writeback bit |
118 | moveq pc, lr @ no writeback -> no fixup | 113 | beq do_DataAbort @ no writeback -> no fixup |
119 | .data_arm_lateldrpostreg: | 114 | .data_arm_lateldrpostreg: |
120 | and r7, r8, #15 @ Extract 'm' from instruction | 115 | and r7, r8, #15 @ Extract 'm' from instruction |
121 | ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' | 116 | ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' |
122 | mov r5, r8, lsr #7 @ get shift count | 117 | mov r9, r8, lsr #7 @ get shift count |
123 | ands r5, r5, #31 | 118 | ands r9, r9, #31 |
124 | and r7, r8, #0x70 @ get shift type | 119 | and r7, r8, #0x70 @ get shift type |
125 | orreq r7, r7, #8 @ shift count = 0 | 120 | orreq r7, r7, #8 @ shift count = 0 |
126 | add pc, pc, r7 | 121 | add pc, pc, r7 |
127 | nop | 122 | nop |
128 | 123 | ||
129 | mov r6, r6, lsl r5 @ 0: LSL #!0 | 124 | mov r6, r6, lsl r9 @ 0: LSL #!0 |
130 | b .data_arm_apply_r6_and_rn | 125 | b .data_arm_apply_r6_and_rn |
131 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 | 126 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 |
132 | nop | 127 | nop |
@@ -134,7 +129,7 @@ ENTRY(v4t_late_abort) | |||
134 | nop | 129 | nop |
135 | b .data_unknown @ 3: MUL? | 130 | b .data_unknown @ 3: MUL? |
136 | nop | 131 | nop |
137 | mov r6, r6, lsr r5 @ 4: LSR #!0 | 132 | mov r6, r6, lsr r9 @ 4: LSR #!0 |
138 | b .data_arm_apply_r6_and_rn | 133 | b .data_arm_apply_r6_and_rn |
139 | mov r6, r6, lsr #32 @ 5: LSR #32 | 134 | mov r6, r6, lsr #32 @ 5: LSR #32 |
140 | b .data_arm_apply_r6_and_rn | 135 | b .data_arm_apply_r6_and_rn |
@@ -142,7 +137,7 @@ ENTRY(v4t_late_abort) | |||
142 | nop | 137 | nop |
143 | b .data_unknown @ 7: MUL? | 138 | b .data_unknown @ 7: MUL? |
144 | nop | 139 | nop |
145 | mov r6, r6, asr r5 @ 8: ASR #!0 | 140 | mov r6, r6, asr r9 @ 8: ASR #!0 |
146 | b .data_arm_apply_r6_and_rn | 141 | b .data_arm_apply_r6_and_rn |
147 | mov r6, r6, asr #32 @ 9: ASR #32 | 142 | mov r6, r6, asr #32 @ 9: ASR #32 |
148 | b .data_arm_apply_r6_and_rn | 143 | b .data_arm_apply_r6_and_rn |
@@ -150,7 +145,7 @@ ENTRY(v4t_late_abort) | |||
150 | nop | 145 | nop |
151 | b .data_unknown @ B: MUL? | 146 | b .data_unknown @ B: MUL? |
152 | nop | 147 | nop |
153 | mov r6, r6, ror r5 @ C: ROR #!0 | 148 | mov r6, r6, ror r9 @ C: ROR #!0 |
154 | b .data_arm_apply_r6_and_rn | 149 | b .data_arm_apply_r6_and_rn |
155 | mov r6, r6, rrx @ D: RRX | 150 | mov r6, r6, rrx @ D: RRX |
156 | b .data_arm_apply_r6_and_rn | 151 | b .data_arm_apply_r6_and_rn |
@@ -159,7 +154,7 @@ ENTRY(v4t_late_abort) | |||
159 | b .data_unknown @ F: MUL? | 154 | b .data_unknown @ F: MUL? |
160 | 155 | ||
161 | .data_thumb_abort: | 156 | .data_thumb_abort: |
162 | ldrh r8, [r2] @ read instruction | 157 | ldrh r8, [r4] @ read instruction |
163 | tst r8, #1 << 11 @ L = 1 -> write? | 158 | tst r8, #1 << 11 @ L = 1 -> write? |
164 | orreq r1, r1, #1 << 8 @ yes | 159 | orreq r1, r1, #1 << 8 @ yes |
165 | and r7, r8, #15 << 12 | 160 | and r7, r8, #15 << 12 |
@@ -172,10 +167,10 @@ ENTRY(v4t_late_abort) | |||
172 | /* 3 */ b .data_unknown | 167 | /* 3 */ b .data_unknown |
173 | /* 4 */ b .data_unknown | 168 | /* 4 */ b .data_unknown |
174 | /* 5 */ b .data_thumb_reg | 169 | /* 5 */ b .data_thumb_reg |
175 | /* 6 */ mov pc, lr | 170 | /* 6 */ b do_DataAbort |
176 | /* 7 */ mov pc, lr | 171 | /* 7 */ b do_DataAbort |
177 | /* 8 */ mov pc, lr | 172 | /* 8 */ b do_DataAbort |
178 | /* 9 */ mov pc, lr | 173 | /* 9 */ b do_DataAbort |
179 | /* A */ b .data_unknown | 174 | /* A */ b .data_unknown |
180 | /* B */ b .data_thumb_pushpop | 175 | /* B */ b .data_thumb_pushpop |
181 | /* C */ b .data_thumb_ldmstm | 176 | /* C */ b .data_thumb_ldmstm |
@@ -185,41 +180,41 @@ ENTRY(v4t_late_abort) | |||
185 | 180 | ||
186 | .data_thumb_reg: | 181 | .data_thumb_reg: |
187 | tst r8, #1 << 9 | 182 | tst r8, #1 << 9 |
188 | moveq pc, lr | 183 | beq do_DataAbort |
189 | tst r8, #1 << 10 @ If 'S' (signed) bit is set | 184 | tst r8, #1 << 10 @ If 'S' (signed) bit is set |
190 | movne r1, #0 @ it must be a load instr | 185 | movne r1, #0 @ it must be a load instr |
191 | mov pc, lr | 186 | b do_DataAbort |
192 | 187 | ||
193 | .data_thumb_pushpop: | 188 | .data_thumb_pushpop: |
194 | tst r8, #1 << 10 | 189 | tst r8, #1 << 10 |
195 | beq .data_unknown | 190 | beq .data_unknown |
196 | and r6, r8, #0x55 @ hweight8(r8) + R bit | 191 | and r6, r8, #0x55 @ hweight8(r8) + R bit |
197 | and r2, r8, #0xaa | 192 | and r9, r8, #0xaa |
198 | add r6, r6, r2, lsr #1 | 193 | add r6, r6, r9, lsr #1 |
199 | and r2, r6, #0xcc | 194 | and r9, r6, #0xcc |
200 | and r6, r6, #0x33 | 195 | and r6, r6, #0x33 |
201 | add r6, r6, r2, lsr #2 | 196 | add r6, r6, r9, lsr #2 |
202 | movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) | 197 | movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) |
203 | adc r6, r6, r6, lsr #4 @ high + low nibble + R bit | 198 | adc r6, r6, r6, lsr #4 @ high + low nibble + R bit |
204 | and r6, r6, #15 @ number of regs to transfer | 199 | and r6, r6, #15 @ number of regs to transfer |
205 | ldr r7, [sp, #13 << 2] | 200 | ldr r7, [r2, #13 << 2] |
206 | tst r8, #1 << 11 | 201 | tst r8, #1 << 11 |
207 | addeq r7, r7, r6, lsl #2 @ increment SP if PUSH | 202 | addeq r7, r7, r6, lsl #2 @ increment SP if PUSH |
208 | subne r7, r7, r6, lsl #2 @ decrement SP if POP | 203 | subne r7, r7, r6, lsl #2 @ decrement SP if POP |
209 | str r7, [sp, #13 << 2] | 204 | str r7, [r2, #13 << 2] |
210 | mov pc, lr | 205 | b do_DataAbort |
211 | 206 | ||
212 | .data_thumb_ldmstm: | 207 | .data_thumb_ldmstm: |
213 | and r6, r8, #0x55 @ hweight8(r8) | 208 | and r6, r8, #0x55 @ hweight8(r8) |
214 | and r2, r8, #0xaa | 209 | and r9, r8, #0xaa |
215 | add r6, r6, r2, lsr #1 | 210 | add r6, r6, r9, lsr #1 |
216 | and r2, r6, #0xcc | 211 | and r9, r6, #0xcc |
217 | and r6, r6, #0x33 | 212 | and r6, r6, #0x33 |
218 | add r6, r6, r2, lsr #2 | 213 | add r6, r6, r9, lsr #2 |
219 | add r6, r6, r6, lsr #4 | 214 | add r6, r6, r6, lsr #4 |
220 | and r5, r8, #7 << 8 | 215 | and r9, r8, #7 << 8 |
221 | ldr r7, [sp, r5, lsr #6] | 216 | ldr r7, [r2, r9, lsr #6] |
222 | and r6, r6, #15 @ number of regs to transfer | 217 | and r6, r6, #15 @ number of regs to transfer |
223 | sub r7, r7, r6, lsl #2 @ always decrement | 218 | sub r7, r7, r6, lsl #2 @ always decrement |
224 | str r7, [sp, r5, lsr #6] | 219 | str r7, [r2, r9, lsr #6] |
225 | mov pc, lr | 220 | b do_DataAbort |
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S index d7cb1bfa51a..2cbf68ef0e8 100644 --- a/arch/arm/mm/abort-macro.S +++ b/arch/arm/mm/abort-macro.S | |||
@@ -9,34 +9,32 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | .macro do_thumb_abort | 12 | .macro do_thumb_abort, fsr, pc, psr, tmp |
13 | tst r3, #PSR_T_BIT | 13 | tst \psr, #PSR_T_BIT |
14 | beq not_thumb | 14 | beq not_thumb |
15 | ldrh r3, [r2] @ Read aborted Thumb instruction | 15 | ldrh \tmp, [\pc] @ Read aborted Thumb instruction |
16 | and r3, r3, # 0xfe00 @ Mask opcode field | 16 | and \tmp, \tmp, # 0xfe00 @ Mask opcode field |
17 | cmp r3, # 0x5600 @ Is it ldrsb? | 17 | cmp \tmp, # 0x5600 @ Is it ldrsb? |
18 | orreq r3, r3, #1 << 11 @ Set L-bit if yes | 18 | orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes |
19 | tst r3, #1 << 11 @ L = 0 -> write | 19 | tst \tmp, #1 << 11 @ L = 0 -> write |
20 | orreq r1, r1, #1 << 11 @ yes. | 20 | orreq \fsr, \fsr, #1 << 11 @ yes. |
21 | mov pc, lr | 21 | b do_DataAbort |
22 | not_thumb: | 22 | not_thumb: |
23 | .endm | 23 | .endm |
24 | 24 | ||
25 | /* | 25 | /* |
26 | * We check for the following insturction encoding for LDRD. | 26 | * We check for the following instruction encoding for LDRD. |
27 | * | 27 | * |
28 | * [27:25] == 0 | 28 | * [27:25] == 000 |
29 | * [7:4] == 1101 | 29 | * [7:4] == 1101 |
30 | * [20] == 0 | 30 | * [20] == 0 |
31 | */ | 31 | */ |
32 | .macro do_ldrd_abort | 32 | .macro do_ldrd_abort, tmp, insn |
33 | tst r3, #0x0e000000 @ [27:25] == 0 | 33 | tst \insn, #0x0e100000 @ [27:25,20] == 0 |
34 | bne not_ldrd | 34 | bne not_ldrd |
35 | and r2, r3, #0x000000f0 @ [7:4] == 1101 | 35 | and \tmp, \insn, #0x000000f0 @ [7:4] == 1101 |
36 | cmp r2, #0x000000d0 | 36 | cmp \tmp, #0x000000d0 |
37 | bne not_ldrd | 37 | beq do_DataAbort |
38 | tst r3, #1 << 20 @ [20] == 0 | ||
39 | moveq pc, lr | ||
40 | not_ldrd: | 38 | not_ldrd: |
41 | .endm | 39 | .endm |
42 | 40 | ||
diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S index 625e580945b..119cb479c2a 100644 --- a/arch/arm/mm/abort-nommu.S +++ b/arch/arm/mm/abort-nommu.S | |||
@@ -3,11 +3,11 @@ | |||
3 | /* | 3 | /* |
4 | * Function: nommu_early_abort | 4 | * Function: nommu_early_abort |
5 | * | 5 | * |
6 | * Params : r2 = address of aborted instruction | 6 | * Params : r2 = pt_regs |
7 | * : r3 = saved SPSR | 7 | * : r4 = aborted context pc |
8 | * : r5 = aborted context psr | ||
8 | * | 9 | * |
9 | * Returns : r0 = 0 (abort address) | 10 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = 0 (FSR) | ||
11 | * | 11 | * |
12 | * Note: There is no FSR/FAR on !CPU_CP15_MMU cores. | 12 | * Note: There is no FSR/FAR on !CPU_CP15_MMU cores. |
13 | * Just fill zero into the registers. | 13 | * Just fill zero into the registers. |
@@ -16,5 +16,5 @@ | |||
16 | ENTRY(nommu_early_abort) | 16 | ENTRY(nommu_early_abort) |
17 | mov r0, #0 @ clear r0, r1 (no FSR/FAR) | 17 | mov r0, #0 @ clear r0, r1 (no FSR/FAR) |
18 | mov r1, #0 | 18 | mov r1, #0 |
19 | mov pc, lr | 19 | b do_DataAbort |
20 | ENDPROC(nommu_early_abort) | 20 | ENDPROC(nommu_early_abort) |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 724ba3bce72..cfbcf8b9559 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | 24 | ||
25 | #include <asm/system.h> | ||
25 | #include <asm/unaligned.h> | 26 | #include <asm/unaligned.h> |
26 | 27 | ||
27 | #include "fault.h" | 28 | #include "fault.h" |
@@ -95,6 +96,33 @@ static const char *usermode_action[] = { | |||
95 | "signal+warn" | 96 | "signal+warn" |
96 | }; | 97 | }; |
97 | 98 | ||
99 | /* Return true if and only if the ARMv6 unaligned access model is in use. */ | ||
100 | static bool cpu_is_v6_unaligned(void) | ||
101 | { | ||
102 | return cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U); | ||
103 | } | ||
104 | |||
105 | static int safe_usermode(int new_usermode, bool warn) | ||
106 | { | ||
107 | /* | ||
108 | * ARMv6 and later CPUs can perform unaligned accesses for | ||
109 | * most single load and store instructions up to word size. | ||
110 | * LDM, STM, LDRD and STRD still need to be handled. | ||
111 | * | ||
112 | * Ignoring the alignment fault is not an option on these | ||
113 | * CPUs since we spin re-faulting the instruction without | ||
114 | * making any progress. | ||
115 | */ | ||
116 | if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) { | ||
117 | new_usermode |= UM_FIXUP; | ||
118 | |||
119 | if (warn) | ||
120 | printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n"); | ||
121 | } | ||
122 | |||
123 | return new_usermode; | ||
124 | } | ||
125 | |||
98 | static int alignment_proc_show(struct seq_file *m, void *v) | 126 | static int alignment_proc_show(struct seq_file *m, void *v) |
99 | { | 127 | { |
100 | seq_printf(m, "User:\t\t%lu\n", ai_user); | 128 | seq_printf(m, "User:\t\t%lu\n", ai_user); |
@@ -125,7 +153,7 @@ static ssize_t alignment_proc_write(struct file *file, const char __user *buffer | |||
125 | if (get_user(mode, buffer)) | 153 | if (get_user(mode, buffer)) |
126 | return -EFAULT; | 154 | return -EFAULT; |
127 | if (mode >= '0' && mode <= '5') | 155 | if (mode >= '0' && mode <= '5') |
128 | ai_usermode = mode - '0'; | 156 | ai_usermode = safe_usermode(mode - '0', true); |
129 | } | 157 | } |
130 | return count; | 158 | return count; |
131 | } | 159 | } |
@@ -727,6 +755,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
727 | int isize = 4; | 755 | int isize = 4; |
728 | int thumb2_32b = 0; | 756 | int thumb2_32b = 0; |
729 | 757 | ||
758 | if (interrupts_enabled(regs)) | ||
759 | local_irq_enable(); | ||
760 | |||
730 | instrptr = instruction_pointer(regs); | 761 | instrptr = instruction_pointer(regs); |
731 | 762 | ||
732 | fs = get_fs(); | 763 | fs = get_fs(); |
@@ -883,9 +914,16 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
883 | if (ai_usermode & UM_FIXUP) | 914 | if (ai_usermode & UM_FIXUP) |
884 | goto fixup; | 915 | goto fixup; |
885 | 916 | ||
886 | if (ai_usermode & UM_SIGNAL) | 917 | if (ai_usermode & UM_SIGNAL) { |
887 | force_sig(SIGBUS, current); | 918 | siginfo_t si; |
888 | else { | 919 | |
920 | si.si_signo = SIGBUS; | ||
921 | si.si_errno = 0; | ||
922 | si.si_code = BUS_ADRALN; | ||
923 | si.si_addr = (void __user *)addr; | ||
924 | |||
925 | force_sig_info(si.si_signo, &si, current); | ||
926 | } else { | ||
889 | /* | 927 | /* |
890 | * We're about to disable the alignment trap and return to | 928 | * We're about to disable the alignment trap and return to |
891 | * user space. But if an interrupt occurs before actually | 929 | * user space. But if an interrupt occurs before actually |
@@ -923,20 +961,11 @@ static int __init alignment_init(void) | |||
923 | return -ENOMEM; | 961 | return -ENOMEM; |
924 | #endif | 962 | #endif |
925 | 963 | ||
926 | /* | 964 | if (cpu_is_v6_unaligned()) { |
927 | * ARMv6 and later CPUs can perform unaligned accesses for | ||
928 | * most single load and store instructions up to word size. | ||
929 | * LDM, STM, LDRD and STRD still need to be handled. | ||
930 | * | ||
931 | * Ignoring the alignment fault is not an option on these | ||
932 | * CPUs since we spin re-faulting the instruction without | ||
933 | * making any progress. | ||
934 | */ | ||
935 | if (cpu_architecture() >= CPU_ARCH_ARMv6 && (cr_alignment & CR_U)) { | ||
936 | cr_alignment &= ~CR_A; | 965 | cr_alignment &= ~CR_A; |
937 | cr_no_alignment &= ~CR_A; | 966 | cr_no_alignment &= ~CR_A; |
938 | set_cr(cr_alignment); | 967 | set_cr(cr_alignment); |
939 | ai_usermode = UM_FIXUP; | 968 | ai_usermode = safe_usermode(ai_usermode, false); |
940 | } | 969 | } |
941 | 970 | ||
942 | hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, | 971 | hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, |
diff --git a/arch/arm/mm/cache-fa.S b/arch/arm/mm/cache-fa.S index 1fa6f71470d..07201637109 100644 --- a/arch/arm/mm/cache-fa.S +++ b/arch/arm/mm/cache-fa.S | |||
@@ -242,16 +242,5 @@ ENDPROC(fa_dma_unmap_area) | |||
242 | 242 | ||
243 | __INITDATA | 243 | __INITDATA |
244 | 244 | ||
245 | .type fa_cache_fns, #object | 245 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
246 | ENTRY(fa_cache_fns) | 246 | define_cache_functions fa |
247 | .long fa_flush_icache_all | ||
248 | .long fa_flush_kern_cache_all | ||
249 | .long fa_flush_user_cache_all | ||
250 | .long fa_flush_user_cache_range | ||
251 | .long fa_coherent_kern_range | ||
252 | .long fa_coherent_user_range | ||
253 | .long fa_flush_kern_dcache_area | ||
254 | .long fa_dma_map_area | ||
255 | .long fa_dma_unmap_area | ||
256 | .long fa_dma_flush_range | ||
257 | .size fa_cache_fns, . - fa_cache_fns | ||
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 44c086710d2..0dddb54ea98 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -29,6 +29,16 @@ static void __iomem *l2x0_base; | |||
29 | static DEFINE_SPINLOCK(l2x0_lock); | 29 | static DEFINE_SPINLOCK(l2x0_lock); |
30 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ | 30 | static uint32_t l2x0_way_mask; /* Bitmask of active ways */ |
31 | static uint32_t l2x0_size; | 31 | static uint32_t l2x0_size; |
32 | static u32 l2x0_cache_id; | ||
33 | static unsigned int l2x0_sets; | ||
34 | static unsigned int l2x0_ways; | ||
35 | |||
36 | static inline bool is_pl310_rev(int rev) | ||
37 | { | ||
38 | return (l2x0_cache_id & | ||
39 | (L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) == | ||
40 | (L2X0_CACHE_ID_PART_L310 | rev); | ||
41 | } | ||
32 | 42 | ||
33 | static inline void cache_wait_way(void __iomem *reg, unsigned long mask) | 43 | static inline void cache_wait_way(void __iomem *reg, unsigned long mask) |
34 | { | 44 | { |
@@ -120,6 +130,23 @@ static void l2x0_cache_sync(void) | |||
120 | spin_unlock_irqrestore(&l2x0_lock, flags); | 130 | spin_unlock_irqrestore(&l2x0_lock, flags); |
121 | } | 131 | } |
122 | 132 | ||
133 | #ifdef CONFIG_PL310_ERRATA_727915 | ||
134 | static void l2x0_for_each_set_way(void __iomem *reg) | ||
135 | { | ||
136 | int set; | ||
137 | int way; | ||
138 | unsigned long flags; | ||
139 | |||
140 | for (way = 0; way < l2x0_ways; way++) { | ||
141 | spin_lock_irqsave(&l2x0_lock, flags); | ||
142 | for (set = 0; set < l2x0_sets; set++) | ||
143 | writel_relaxed((way << 28) | (set << 5), reg); | ||
144 | cache_sync(); | ||
145 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
146 | } | ||
147 | } | ||
148 | #endif | ||
149 | |||
123 | static void __l2x0_flush_all(void) | 150 | static void __l2x0_flush_all(void) |
124 | { | 151 | { |
125 | debug_writel(0x03); | 152 | debug_writel(0x03); |
@@ -133,6 +160,13 @@ static void l2x0_flush_all(void) | |||
133 | { | 160 | { |
134 | unsigned long flags; | 161 | unsigned long flags; |
135 | 162 | ||
163 | #ifdef CONFIG_PL310_ERRATA_727915 | ||
164 | if (is_pl310_rev(REV_PL310_R2P0)) { | ||
165 | l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX); | ||
166 | return; | ||
167 | } | ||
168 | #endif | ||
169 | |||
136 | /* clean all ways */ | 170 | /* clean all ways */ |
137 | spin_lock_irqsave(&l2x0_lock, flags); | 171 | spin_lock_irqsave(&l2x0_lock, flags); |
138 | __l2x0_flush_all(); | 172 | __l2x0_flush_all(); |
@@ -143,11 +177,20 @@ static void l2x0_clean_all(void) | |||
143 | { | 177 | { |
144 | unsigned long flags; | 178 | unsigned long flags; |
145 | 179 | ||
180 | #ifdef CONFIG_PL310_ERRATA_727915 | ||
181 | if (is_pl310_rev(REV_PL310_R2P0)) { | ||
182 | l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX); | ||
183 | return; | ||
184 | } | ||
185 | #endif | ||
186 | |||
146 | /* clean all ways */ | 187 | /* clean all ways */ |
147 | spin_lock_irqsave(&l2x0_lock, flags); | 188 | spin_lock_irqsave(&l2x0_lock, flags); |
189 | debug_writel(0x03); | ||
148 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); | 190 | writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); |
149 | cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); | 191 | cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); |
150 | cache_sync(); | 192 | cache_sync(); |
193 | debug_writel(0x00); | ||
151 | spin_unlock_irqrestore(&l2x0_lock, flags); | 194 | spin_unlock_irqrestore(&l2x0_lock, flags); |
152 | } | 195 | } |
153 | 196 | ||
@@ -266,6 +309,16 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) | |||
266 | spin_unlock_irqrestore(&l2x0_lock, flags); | 309 | spin_unlock_irqrestore(&l2x0_lock, flags); |
267 | } | 310 | } |
268 | 311 | ||
312 | /* enables l2x0 after l2x0_disable, does not invalidate */ | ||
313 | void l2x0_enable(void) | ||
314 | { | ||
315 | unsigned long flags; | ||
316 | |||
317 | spin_lock_irqsave(&l2x0_lock, flags); | ||
318 | writel_relaxed(1, l2x0_base + L2X0_CTRL); | ||
319 | spin_unlock_irqrestore(&l2x0_lock, flags); | ||
320 | } | ||
321 | |||
269 | static void l2x0_disable(void) | 322 | static void l2x0_disable(void) |
270 | { | 323 | { |
271 | unsigned long flags; | 324 | unsigned long flags; |
@@ -277,50 +330,68 @@ static void l2x0_disable(void) | |||
277 | spin_unlock_irqrestore(&l2x0_lock, flags); | 330 | spin_unlock_irqrestore(&l2x0_lock, flags); |
278 | } | 331 | } |
279 | 332 | ||
280 | void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | 333 | static void __init l2x0_unlock(__u32 cache_id) |
334 | { | ||
335 | int lockregs; | ||
336 | int i; | ||
337 | |||
338 | if (cache_id == L2X0_CACHE_ID_PART_L310) | ||
339 | lockregs = 8; | ||
340 | else | ||
341 | /* L210 and unknown types */ | ||
342 | lockregs = 1; | ||
343 | |||
344 | for (i = 0; i < lockregs; i++) { | ||
345 | writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + | ||
346 | i * L2X0_LOCKDOWN_STRIDE); | ||
347 | writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + | ||
348 | i * L2X0_LOCKDOWN_STRIDE); | ||
349 | } | ||
350 | } | ||
351 | |||
352 | void l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | ||
281 | { | 353 | { |
282 | __u32 aux; | 354 | __u32 aux; |
283 | __u32 cache_id; | ||
284 | __u32 way_size = 0; | 355 | __u32 way_size = 0; |
285 | int ways; | ||
286 | const char *type; | 356 | const char *type; |
287 | 357 | ||
288 | l2x0_base = base; | 358 | l2x0_base = base; |
289 | 359 | ||
290 | cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); | 360 | l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); |
291 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); | 361 | aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); |
292 | 362 | ||
293 | aux &= aux_mask; | 363 | aux &= aux_mask; |
294 | aux |= aux_val; | 364 | aux |= aux_val; |
295 | 365 | ||
296 | /* Determine the number of ways */ | 366 | /* Determine the number of ways */ |
297 | switch (cache_id & L2X0_CACHE_ID_PART_MASK) { | 367 | switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) { |
298 | case L2X0_CACHE_ID_PART_L310: | 368 | case L2X0_CACHE_ID_PART_L310: |
299 | if (aux & (1 << 16)) | 369 | if (aux & (1 << 16)) |
300 | ways = 16; | 370 | l2x0_ways = 16; |
301 | else | 371 | else |
302 | ways = 8; | 372 | l2x0_ways = 8; |
303 | type = "L310"; | 373 | type = "L310"; |
304 | break; | 374 | break; |
305 | case L2X0_CACHE_ID_PART_L210: | 375 | case L2X0_CACHE_ID_PART_L210: |
306 | ways = (aux >> 13) & 0xf; | 376 | l2x0_ways = (aux >> 13) & 0xf; |
307 | type = "L210"; | 377 | type = "L210"; |
308 | break; | 378 | break; |
309 | default: | 379 | default: |
310 | /* Assume unknown chips have 8 ways */ | 380 | /* Assume unknown chips have 8 ways */ |
311 | ways = 8; | 381 | l2x0_ways = 8; |
312 | type = "L2x0 series"; | 382 | type = "L2x0 series"; |
313 | break; | 383 | break; |
314 | } | 384 | } |
315 | 385 | ||
316 | l2x0_way_mask = (1 << ways) - 1; | 386 | l2x0_way_mask = (1 << l2x0_ways) - 1; |
317 | 387 | ||
318 | /* | 388 | /* |
319 | * L2 cache Size = Way size * Number of ways | 389 | * L2 cache Size = Way size * Number of ways |
320 | */ | 390 | */ |
321 | way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; | 391 | way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; |
322 | way_size = 1 << (way_size + 3); | 392 | way_size = SZ_1K << (way_size + 3); |
323 | l2x0_size = ways * way_size * SZ_1K; | 393 | l2x0_size = l2x0_ways * way_size; |
394 | l2x0_sets = way_size / CACHE_LINE_SIZE; | ||
324 | 395 | ||
325 | /* | 396 | /* |
326 | * Check if l2x0 controller is already enabled. | 397 | * Check if l2x0 controller is already enabled. |
@@ -328,6 +399,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | |||
328 | * accessing the below registers will fault. | 399 | * accessing the below registers will fault. |
329 | */ | 400 | */ |
330 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { | 401 | if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { |
402 | /* Make sure that I&D is not locked down when starting */ | ||
403 | l2x0_unlock(l2x0_cache_id); | ||
331 | 404 | ||
332 | /* l2x0 controller is disabled */ | 405 | /* l2x0 controller is disabled */ |
333 | writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); | 406 | writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); |
@@ -347,7 +420,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | |||
347 | outer_cache.disable = l2x0_disable; | 420 | outer_cache.disable = l2x0_disable; |
348 | outer_cache.set_debug = l2x0_set_debug; | 421 | outer_cache.set_debug = l2x0_set_debug; |
349 | 422 | ||
350 | printk(KERN_INFO "%s cache controller enabled\n", type); | 423 | pr_info_once("%s cache controller enabled\n", type); |
351 | printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", | 424 | pr_info_once("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", |
352 | ways, cache_id, aux, l2x0_size); | 425 | l2x0_ways, l2x0_cache_id, aux, l2x0_size); |
353 | } | 426 | } |
diff --git a/arch/arm/mm/cache-v3.S b/arch/arm/mm/cache-v3.S index 2e2bc406a18..c2301f22610 100644 --- a/arch/arm/mm/cache-v3.S +++ b/arch/arm/mm/cache-v3.S | |||
@@ -129,16 +129,5 @@ ENDPROC(v3_dma_map_area) | |||
129 | 129 | ||
130 | __INITDATA | 130 | __INITDATA |
131 | 131 | ||
132 | .type v3_cache_fns, #object | 132 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
133 | ENTRY(v3_cache_fns) | 133 | define_cache_functions v3 |
134 | .long v3_flush_icache_all | ||
135 | .long v3_flush_kern_cache_all | ||
136 | .long v3_flush_user_cache_all | ||
137 | .long v3_flush_user_cache_range | ||
138 | .long v3_coherent_kern_range | ||
139 | .long v3_coherent_user_range | ||
140 | .long v3_flush_kern_dcache_area | ||
141 | .long v3_dma_map_area | ||
142 | .long v3_dma_unmap_area | ||
143 | .long v3_dma_flush_range | ||
144 | .size v3_cache_fns, . - v3_cache_fns | ||
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S index a8fefb523f1..fd9bb7addc8 100644 --- a/arch/arm/mm/cache-v4.S +++ b/arch/arm/mm/cache-v4.S | |||
@@ -141,16 +141,5 @@ ENDPROC(v4_dma_map_area) | |||
141 | 141 | ||
142 | __INITDATA | 142 | __INITDATA |
143 | 143 | ||
144 | .type v4_cache_fns, #object | 144 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
145 | ENTRY(v4_cache_fns) | 145 | define_cache_functions v4 |
146 | .long v4_flush_icache_all | ||
147 | .long v4_flush_kern_cache_all | ||
148 | .long v4_flush_user_cache_all | ||
149 | .long v4_flush_user_cache_range | ||
150 | .long v4_coherent_kern_range | ||
151 | .long v4_coherent_user_range | ||
152 | .long v4_flush_kern_dcache_area | ||
153 | .long v4_dma_map_area | ||
154 | .long v4_dma_unmap_area | ||
155 | .long v4_dma_flush_range | ||
156 | .size v4_cache_fns, . - v4_cache_fns | ||
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index f40c69656d8..4f2c14151cc 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S | |||
@@ -253,16 +253,5 @@ ENDPROC(v4wb_dma_unmap_area) | |||
253 | 253 | ||
254 | __INITDATA | 254 | __INITDATA |
255 | 255 | ||
256 | .type v4wb_cache_fns, #object | 256 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
257 | ENTRY(v4wb_cache_fns) | 257 | define_cache_functions v4wb |
258 | .long v4wb_flush_icache_all | ||
259 | .long v4wb_flush_kern_cache_all | ||
260 | .long v4wb_flush_user_cache_all | ||
261 | .long v4wb_flush_user_cache_range | ||
262 | .long v4wb_coherent_kern_range | ||
263 | .long v4wb_coherent_user_range | ||
264 | .long v4wb_flush_kern_dcache_area | ||
265 | .long v4wb_dma_map_area | ||
266 | .long v4wb_dma_unmap_area | ||
267 | .long v4wb_dma_flush_range | ||
268 | .size v4wb_cache_fns, . - v4wb_cache_fns | ||
diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index a7b276dbda1..4d7b467631c 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S | |||
@@ -197,16 +197,5 @@ ENDPROC(v4wt_dma_map_area) | |||
197 | 197 | ||
198 | __INITDATA | 198 | __INITDATA |
199 | 199 | ||
200 | .type v4wt_cache_fns, #object | 200 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
201 | ENTRY(v4wt_cache_fns) | 201 | define_cache_functions v4wt |
202 | .long v4wt_flush_icache_all | ||
203 | .long v4wt_flush_kern_cache_all | ||
204 | .long v4wt_flush_user_cache_all | ||
205 | .long v4wt_flush_user_cache_range | ||
206 | .long v4wt_coherent_kern_range | ||
207 | .long v4wt_coherent_user_range | ||
208 | .long v4wt_flush_kern_dcache_area | ||
209 | .long v4wt_dma_map_area | ||
210 | .long v4wt_dma_unmap_area | ||
211 | .long v4wt_dma_flush_range | ||
212 | .size v4wt_cache_fns, . - v4wt_cache_fns | ||
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 73b4a8b66a5..2edb6f67f69 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S | |||
@@ -272,6 +272,11 @@ v6_dma_clean_range: | |||
272 | * - end - virtual end address of region | 272 | * - end - virtual end address of region |
273 | */ | 273 | */ |
274 | ENTRY(v6_dma_flush_range) | 274 | ENTRY(v6_dma_flush_range) |
275 | #ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT | ||
276 | sub r2, r1, r0 | ||
277 | cmp r2, #CONFIG_CACHE_FLUSH_RANGE_LIMIT | ||
278 | bhi v6_dma_flush_dcache_all | ||
279 | #endif | ||
275 | #ifdef CONFIG_DMA_CACHE_RWFO | 280 | #ifdef CONFIG_DMA_CACHE_RWFO |
276 | ldrb r2, [r0] @ read for ownership | 281 | ldrb r2, [r0] @ read for ownership |
277 | strb r2, [r0] @ write for ownership | 282 | strb r2, [r0] @ write for ownership |
@@ -294,6 +299,18 @@ ENTRY(v6_dma_flush_range) | |||
294 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | 299 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer |
295 | mov pc, lr | 300 | mov pc, lr |
296 | 301 | ||
302 | #ifdef CONFIG_CACHE_FLUSH_RANGE_LIMIT | ||
303 | v6_dma_flush_dcache_all: | ||
304 | mov r0, #0 | ||
305 | #ifdef HARVARD_CACHE | ||
306 | mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate | ||
307 | #else | ||
308 | mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate | ||
309 | #endif | ||
310 | mcr p15, 0, r0, c7, c10, 4 @ drain write buffer | ||
311 | mov pc, lr | ||
312 | #endif | ||
313 | |||
297 | /* | 314 | /* |
298 | * dma_map_area(start, size, dir) | 315 | * dma_map_area(start, size, dir) |
299 | * - start - kernel virtual start address | 316 | * - start - kernel virtual start address |
@@ -330,16 +347,5 @@ ENDPROC(v6_dma_unmap_area) | |||
330 | 347 | ||
331 | __INITDATA | 348 | __INITDATA |
332 | 349 | ||
333 | .type v6_cache_fns, #object | 350 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
334 | ENTRY(v6_cache_fns) | 351 | define_cache_functions v6 |
335 | .long v6_flush_icache_all | ||
336 | .long v6_flush_kern_cache_all | ||
337 | .long v6_flush_user_cache_all | ||
338 | .long v6_flush_user_cache_range | ||
339 | .long v6_coherent_kern_range | ||
340 | .long v6_coherent_user_range | ||
341 | .long v6_flush_kern_dcache_area | ||
342 | .long v6_dma_map_area | ||
343 | .long v6_dma_unmap_area | ||
344 | .long v6_dma_flush_range | ||
345 | .size v6_cache_fns, . - v6_cache_fns | ||
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index d32f02b6186..ea33896449b 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
@@ -33,30 +33,37 @@ ENTRY(v7_flush_icache_all) | |||
33 | ENDPROC(v7_flush_icache_all) | 33 | ENDPROC(v7_flush_icache_all) |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * v7_flush_dcache_all() | 36 | * v7_op_dcache_all op |
37 | * | 37 | * |
38 | * Flush the whole D-cache. | 38 | * op=c14, Flush the whole D-cache. |
39 | * op=c10, Clean the whole D-cache. | ||
39 | * | 40 | * |
40 | * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) | 41 | * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) |
41 | * | 42 | * |
42 | * - mm - mm_struct describing address space | 43 | * - mm - mm_struct describing address space |
43 | */ | 44 | */ |
44 | ENTRY(v7_flush_dcache_all) | 45 | .macro v7_op_dcache_all op @ op=c10 clean, op=c14 flush |
45 | dmb @ ensure ordering with previous memory accesses | 46 | dmb @ ensure ordering with previous memory accesses |
46 | mrc p15, 1, r0, c0, c0, 1 @ read clidr | 47 | mrc p15, 1, r0, c0, c0, 1 @ read clidr |
47 | ands r3, r0, #0x7000000 @ extract loc from clidr | 48 | ands r3, r0, #0x7000000 @ extract loc from clidr |
48 | mov r3, r3, lsr #23 @ left align loc bit field | 49 | mov r3, r3, lsr #23 @ left align loc bit field |
49 | beq finished @ if loc is 0, then no need to clean | 50 | beq 1005f @ if loc is 0, then no need to clean |
50 | mov r10, #0 @ start clean at cache level 0 | 51 | mov r10, #0 @ start clean at cache level 0 |
51 | loop1: | 52 | 1001: |
52 | add r2, r10, r10, lsr #1 @ work out 3x current cache level | 53 | add r2, r10, r10, lsr #1 @ work out 3x current cache level |
53 | mov r1, r0, lsr r2 @ extract cache type bits from clidr | 54 | mov r1, r0, lsr r2 @ extract cache type bits from clidr |
54 | and r1, r1, #7 @ mask of the bits for current cache only | 55 | and r1, r1, #7 @ mask of the bits for current cache only |
55 | cmp r1, #2 @ see what cache we have at this level | 56 | cmp r1, #2 @ see what cache we have at this level |
56 | blt skip @ skip if no cache, or just i-cache | 57 | blt 1004f @ skip if no cache, or just i-cache |
58 | #ifdef CONFIG_PREEMPT | ||
59 | save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic | ||
60 | #endif | ||
57 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr | 61 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr |
58 | isb @ isb to sych the new cssr&csidr | 62 | isb @ isb to sych the new cssr&csidr |
59 | mrc p15, 1, r1, c0, c0, 0 @ read the new csidr | 63 | mrc p15, 1, r1, c0, c0, 0 @ read the new csidr |
64 | #ifdef CONFIG_PREEMPT | ||
65 | restore_irqs_notrace r9 | ||
66 | #endif | ||
60 | and r2, r1, #7 @ extract the length of the cache lines | 67 | and r2, r1, #7 @ extract the length of the cache lines |
61 | add r2, r2, #4 @ add 4 (line length offset) | 68 | add r2, r2, #4 @ add 4 (line length offset) |
62 | ldr r4, =0x3ff | 69 | ldr r4, =0x3ff |
@@ -64,32 +71,40 @@ loop1: | |||
64 | clz r5, r4 @ find bit position of way size increment | 71 | clz r5, r4 @ find bit position of way size increment |
65 | ldr r7, =0x7fff | 72 | ldr r7, =0x7fff |
66 | ands r7, r7, r1, lsr #13 @ extract max number of the index size | 73 | ands r7, r7, r1, lsr #13 @ extract max number of the index size |
67 | loop2: | 74 | 1002: |
68 | mov r9, r4 @ create working copy of max way size | 75 | mov r9, r4 @ create working copy of max way size |
69 | loop3: | 76 | 1003: |
70 | ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 | 77 | ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 |
71 | THUMB( lsl r6, r9, r5 ) | 78 | THUMB( lsl r6, r9, r5 ) |
72 | THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 | 79 | THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 |
73 | ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 | 80 | ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 |
74 | THUMB( lsl r6, r7, r2 ) | 81 | THUMB( lsl r6, r7, r2 ) |
75 | THUMB( orr r11, r11, r6 ) @ factor index number into r11 | 82 | THUMB( orr r11, r11, r6 ) @ factor index number into r11 |
76 | mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way | 83 | mcr p15, 0, r11, c7, \op, 2 @ op=c10/c14, clean/flush by set/way |
77 | subs r9, r9, #1 @ decrement the way | 84 | subs r9, r9, #1 @ decrement the way |
78 | bge loop3 | 85 | bge 1003b |
79 | subs r7, r7, #1 @ decrement the index | 86 | subs r7, r7, #1 @ decrement the index |
80 | bge loop2 | 87 | bge 1002b |
81 | skip: | 88 | 1004: |
82 | add r10, r10, #2 @ increment cache number | 89 | add r10, r10, #2 @ increment cache number |
83 | cmp r3, r10 | 90 | cmp r3, r10 |
84 | bgt loop1 | 91 | bgt 1001b |
85 | finished: | 92 | 1005: |
86 | mov r10, #0 @ swith back to cache level 0 | 93 | mov r10, #0 @ swith back to cache level 0 |
87 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr | 94 | mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr |
88 | dsb | 95 | dsb |
89 | isb | 96 | isb |
90 | mov pc, lr | 97 | mov pc, lr |
98 | .endm | ||
99 | |||
100 | ENTRY(v7_flush_dcache_all) | ||
101 | v7_op_dcache_all c14 | ||
91 | ENDPROC(v7_flush_dcache_all) | 102 | ENDPROC(v7_flush_dcache_all) |
92 | 103 | ||
104 | ENTRY(v7_clean_dcache_all) | ||
105 | v7_op_dcache_all c10 | ||
106 | ENDPROC(v7_clean_dcache_all) | ||
107 | |||
93 | /* | 108 | /* |
94 | * v7_flush_cache_all() | 109 | * v7_flush_cache_all() |
95 | * | 110 | * |
@@ -114,6 +129,24 @@ ENTRY(v7_flush_kern_cache_all) | |||
114 | ENDPROC(v7_flush_kern_cache_all) | 129 | ENDPROC(v7_flush_kern_cache_all) |
115 | 130 | ||
116 | /* | 131 | /* |
132 | * v7_clean_kern_cache_all() | ||
133 | */ | ||
134 | ENTRY(v7_clean_kern_cache_all) | ||
135 | ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} ) | ||
136 | THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) | ||
137 | bl v7_clean_dcache_all | ||
138 | mov r0, #0 | ||
139 | #ifdef CONFIG_SMP | ||
140 | mcr p15, 0, r0, c7, c1, 0 @ invalidate I-cache inner shareable | ||
141 | #else | ||
142 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate | ||
143 | #endif | ||
144 | ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) | ||
145 | THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) | ||
146 | mov pc, lr | ||
147 | ENDPROC(v7_clean_kern_cache_all) | ||
148 | |||
149 | /* | ||
117 | * v7_flush_cache_all() | 150 | * v7_flush_cache_all() |
118 | * | 151 | * |
119 | * Flush all TLB entries in a particular address space | 152 | * Flush all TLB entries in a particular address space |
@@ -174,6 +207,10 @@ ENTRY(v7_coherent_user_range) | |||
174 | dcache_line_size r2, r3 | 207 | dcache_line_size r2, r3 |
175 | sub r3, r2, #1 | 208 | sub r3, r2, #1 |
176 | bic r12, r0, r3 | 209 | bic r12, r0, r3 |
210 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
211 | ALT_SMP(W(dsb)) | ||
212 | ALT_UP(W(nop)) | ||
213 | #endif | ||
177 | 1: | 214 | 1: |
178 | USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification | 215 | USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification |
179 | add r12, r12, r2 | 216 | add r12, r12, r2 |
@@ -223,6 +260,10 @@ ENTRY(v7_flush_kern_dcache_area) | |||
223 | add r1, r0, r1 | 260 | add r1, r0, r1 |
224 | sub r3, r2, #1 | 261 | sub r3, r2, #1 |
225 | bic r0, r0, r3 | 262 | bic r0, r0, r3 |
263 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
264 | ALT_SMP(W(dsb)) | ||
265 | ALT_UP(W(nop)) | ||
266 | #endif | ||
226 | 1: | 267 | 1: |
227 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line | 268 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line |
228 | add r0, r0, r2 | 269 | add r0, r0, r2 |
@@ -247,6 +288,10 @@ v7_dma_inv_range: | |||
247 | sub r3, r2, #1 | 288 | sub r3, r2, #1 |
248 | tst r0, r3 | 289 | tst r0, r3 |
249 | bic r0, r0, r3 | 290 | bic r0, r0, r3 |
291 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
292 | ALT_SMP(W(dsb)) | ||
293 | ALT_UP(W(nop)) | ||
294 | #endif | ||
250 | mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line | 295 | mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line |
251 | 296 | ||
252 | tst r1, r3 | 297 | tst r1, r3 |
@@ -270,6 +315,10 @@ v7_dma_clean_range: | |||
270 | dcache_line_size r2, r3 | 315 | dcache_line_size r2, r3 |
271 | sub r3, r2, #1 | 316 | sub r3, r2, #1 |
272 | bic r0, r0, r3 | 317 | bic r0, r0, r3 |
318 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
319 | ALT_SMP(W(dsb)) | ||
320 | ALT_UP(W(nop)) | ||
321 | #endif | ||
273 | 1: | 322 | 1: |
274 | mcr p15, 0, r0, c7, c10, 1 @ clean D / U line | 323 | mcr p15, 0, r0, c7, c10, 1 @ clean D / U line |
275 | add r0, r0, r2 | 324 | add r0, r0, r2 |
@@ -288,6 +337,10 @@ ENTRY(v7_dma_flush_range) | |||
288 | dcache_line_size r2, r3 | 337 | dcache_line_size r2, r3 |
289 | sub r3, r2, #1 | 338 | sub r3, r2, #1 |
290 | bic r0, r0, r3 | 339 | bic r0, r0, r3 |
340 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
341 | ALT_SMP(W(dsb)) | ||
342 | ALT_UP(W(nop)) | ||
343 | #endif | ||
291 | 1: | 344 | 1: |
292 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line | 345 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line |
293 | add r0, r0, r2 | 346 | add r0, r0, r2 |
@@ -325,16 +378,5 @@ ENDPROC(v7_dma_unmap_area) | |||
325 | 378 | ||
326 | __INITDATA | 379 | __INITDATA |
327 | 380 | ||
328 | .type v7_cache_fns, #object | 381 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
329 | ENTRY(v7_cache_fns) | 382 | define_cache_functions v7 |
330 | .long v7_flush_icache_all | ||
331 | .long v7_flush_kern_cache_all | ||
332 | .long v7_flush_user_cache_all | ||
333 | .long v7_flush_user_cache_range | ||
334 | .long v7_coherent_kern_range | ||
335 | .long v7_coherent_user_range | ||
336 | .long v7_flush_kern_dcache_area | ||
337 | .long v7_dma_map_area | ||
338 | .long v7_dma_unmap_area | ||
339 | .long v7_dma_flush_range | ||
340 | .size v7_cache_fns, . - v7_cache_fns | ||
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index bdba6c65c90..63cca009713 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -41,7 +41,6 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, | |||
41 | kfrom = kmap_atomic(from, KM_USER0); | 41 | kfrom = kmap_atomic(from, KM_USER0); |
42 | kto = kmap_atomic(to, KM_USER1); | 42 | kto = kmap_atomic(to, KM_USER1); |
43 | copy_page(kto, kfrom); | 43 | copy_page(kto, kfrom); |
44 | __cpuc_flush_dcache_area(kto, PAGE_SIZE); | ||
45 | kunmap_atomic(kto, KM_USER1); | 44 | kunmap_atomic(kto, KM_USER1); |
46 | kunmap_atomic(kfrom, KM_USER0); | 45 | kunmap_atomic(kfrom, KM_USER0); |
47 | } | 46 | } |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 82a093cee09..9cd5334019e 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -25,9 +25,11 @@ | |||
25 | #include <asm/tlbflush.h> | 25 | #include <asm/tlbflush.h> |
26 | #include <asm/sizes.h> | 26 | #include <asm/sizes.h> |
27 | 27 | ||
28 | #include "mm.h" | ||
29 | |||
28 | static u64 get_coherent_dma_mask(struct device *dev) | 30 | static u64 get_coherent_dma_mask(struct device *dev) |
29 | { | 31 | { |
30 | u64 mask = ISA_DMA_THRESHOLD; | 32 | u64 mask = (u64)arm_dma_limit; |
31 | 33 | ||
32 | if (dev) { | 34 | if (dev) { |
33 | mask = dev->coherent_dma_mask; | 35 | mask = dev->coherent_dma_mask; |
@@ -41,10 +43,10 @@ static u64 get_coherent_dma_mask(struct device *dev) | |||
41 | return 0; | 43 | return 0; |
42 | } | 44 | } |
43 | 45 | ||
44 | if ((~mask) & ISA_DMA_THRESHOLD) { | 46 | if ((~mask) & (u64)arm_dma_limit) { |
45 | dev_warn(dev, "coherent DMA mask %#llx is smaller " | 47 | dev_warn(dev, "coherent DMA mask %#llx is smaller " |
46 | "than system GFP_DMA mask %#llx\n", | 48 | "than system GFP_DMA mask %#llx\n", |
47 | mask, (unsigned long long)ISA_DMA_THRESHOLD); | 49 | mask, (u64)arm_dma_limit); |
48 | return 0; | 50 | return 0; |
49 | } | 51 | } |
50 | } | 52 | } |
@@ -308,6 +310,13 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
308 | struct page *page; | 310 | struct page *page; |
309 | void *addr; | 311 | void *addr; |
310 | 312 | ||
313 | /* Following is a work-around (a.k.a. hack) to prevent pages | ||
314 | * with __GFP_COMP being passed to split_page() which cannot | ||
315 | * handle them. The real problem is that this flag probably | ||
316 | * should be 0 on ARM as it is not supported on this | ||
317 | * platform--see CONFIG_HUGETLB_PAGE. */ | ||
318 | gfp &= ~(__GFP_COMP); | ||
319 | |||
311 | *handle = ~0; | 320 | *handle = ~0; |
312 | size = PAGE_ALIGN(size); | 321 | size = PAGE_ALIGN(size); |
313 | 322 | ||
@@ -322,6 +331,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
322 | 331 | ||
323 | if (addr) | 332 | if (addr) |
324 | *handle = pfn_to_dma(dev, page_to_pfn(page)); | 333 | *handle = pfn_to_dma(dev, page_to_pfn(page)); |
334 | else | ||
335 | __dma_free_buffer(page, size); | ||
325 | 336 | ||
326 | return addr; | 337 | return addr; |
327 | } | 338 | } |
@@ -657,6 +668,33 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | |||
657 | } | 668 | } |
658 | EXPORT_SYMBOL(dma_sync_sg_for_device); | 669 | EXPORT_SYMBOL(dma_sync_sg_for_device); |
659 | 670 | ||
671 | /* | ||
672 | * Return whether the given device DMA address mask can be supported | ||
673 | * properly. For example, if your device can only drive the low 24-bits | ||
674 | * during bus mastering, then you would pass 0x00ffffff as the mask | ||
675 | * to this function. | ||
676 | */ | ||
677 | int dma_supported(struct device *dev, u64 mask) | ||
678 | { | ||
679 | if (mask < (u64)arm_dma_limit) | ||
680 | return 0; | ||
681 | return 1; | ||
682 | } | ||
683 | EXPORT_SYMBOL(dma_supported); | ||
684 | |||
685 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
686 | { | ||
687 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
688 | return -EIO; | ||
689 | |||
690 | #ifndef CONFIG_DMABOUNCE | ||
691 | *dev->dma_mask = dma_mask; | ||
692 | #endif | ||
693 | |||
694 | return 0; | ||
695 | } | ||
696 | EXPORT_SYMBOL(dma_set_mask); | ||
697 | |||
660 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 | 698 | #define PREALLOC_DMA_DEBUG_ENTRIES 4096 |
661 | 699 | ||
662 | static int __init dma_debug_do_init(void) | 700 | static int __init dma_debug_do_init(void) |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bc0e1d88fd3..3b5ea68acbb 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -94,7 +94,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |||
94 | 94 | ||
95 | pud = pud_offset(pgd, addr); | 95 | pud = pud_offset(pgd, addr); |
96 | if (PTRS_PER_PUD != 1) | 96 | if (PTRS_PER_PUD != 1) |
97 | printk(", *pud=%08lx", pud_val(*pud)); | 97 | printk(", *pud=%08llx", (long long)pud_val(*pud)); |
98 | 98 | ||
99 | if (pud_none(*pud)) | 99 | if (pud_none(*pud)) |
100 | break; | 100 | break; |
@@ -285,6 +285,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
285 | tsk = current; | 285 | tsk = current; |
286 | mm = tsk->mm; | 286 | mm = tsk->mm; |
287 | 287 | ||
288 | /* Enable interrupts if they were enabled in the parent context. */ | ||
289 | if (interrupts_enabled(regs)) | ||
290 | local_irq_enable(); | ||
291 | |||
288 | /* | 292 | /* |
289 | * If we're in an interrupt or have no user | 293 | * If we're in an interrupt or have no user |
290 | * context, we must not take the fault.. | 294 | * context, we must not take the fault.. |
@@ -318,11 +322,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
318 | fault = __do_page_fault(mm, addr, fsr, tsk); | 322 | fault = __do_page_fault(mm, addr, fsr, tsk); |
319 | up_read(&mm->mmap_sem); | 323 | up_read(&mm->mmap_sem); |
320 | 324 | ||
321 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr); | 325 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); |
322 | if (fault & VM_FAULT_MAJOR) | 326 | if (fault & VM_FAULT_MAJOR) |
323 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr); | 327 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr); |
324 | else if (fault & VM_FAULT_MINOR) | 328 | else if (fault & VM_FAULT_MINOR) |
325 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr); | 329 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr); |
326 | 330 | ||
327 | /* | 331 | /* |
328 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR | 332 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index c19571c40a2..f8037ba338a 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -212,6 +212,18 @@ static void __init arm_bootmem_init(unsigned long start_pfn, | |||
212 | } | 212 | } |
213 | 213 | ||
214 | #ifdef CONFIG_ZONE_DMA | 214 | #ifdef CONFIG_ZONE_DMA |
215 | |||
216 | unsigned long arm_dma_zone_size __read_mostly; | ||
217 | EXPORT_SYMBOL(arm_dma_zone_size); | ||
218 | |||
219 | /* | ||
220 | * The DMA mask corresponding to the maximum bus address allocatable | ||
221 | * using GFP_DMA. The default here places no restriction on DMA | ||
222 | * allocations. This must be the smallest DMA mask in the system, | ||
223 | * so a successful GFP_DMA allocation will always satisfy this. | ||
224 | */ | ||
225 | u32 arm_dma_limit; | ||
226 | |||
215 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, | 227 | static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, |
216 | unsigned long dma_size) | 228 | unsigned long dma_size) |
217 | { | 229 | { |
@@ -267,17 +279,17 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, | |||
267 | #endif | 279 | #endif |
268 | } | 280 | } |
269 | 281 | ||
270 | #ifdef ARM_DMA_ZONE_SIZE | 282 | #ifdef CONFIG_ZONE_DMA |
271 | #ifndef CONFIG_ZONE_DMA | ||
272 | #error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations | ||
273 | #endif | ||
274 | |||
275 | /* | 283 | /* |
276 | * Adjust the sizes according to any special requirements for | 284 | * Adjust the sizes according to any special requirements for |
277 | * this machine type. | 285 | * this machine type. |
278 | */ | 286 | */ |
279 | arm_adjust_dma_zone(zone_size, zhole_size, | 287 | if (arm_dma_zone_size) { |
280 | ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); | 288 | arm_adjust_dma_zone(zone_size, zhole_size, |
289 | arm_dma_zone_size >> PAGE_SHIFT); | ||
290 | arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; | ||
291 | } else | ||
292 | arm_dma_limit = 0xffffffff; | ||
281 | #endif | 293 | #endif |
282 | 294 | ||
283 | free_area_init_node(0, zone_size, min, zhole_size); | 295 | free_area_init_node(0, zone_size, min, zhole_size); |
@@ -286,7 +298,7 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, | |||
286 | #ifdef CONFIG_HAVE_ARCH_PFN_VALID | 298 | #ifdef CONFIG_HAVE_ARCH_PFN_VALID |
287 | int pfn_valid(unsigned long pfn) | 299 | int pfn_valid(unsigned long pfn) |
288 | { | 300 | { |
289 | return memblock_is_memory(pfn << PAGE_SHIFT); | 301 | return memblock_is_memory(__pfn_to_phys(pfn)); |
290 | } | 302 | } |
291 | EXPORT_SYMBOL(pfn_valid); | 303 | EXPORT_SYMBOL(pfn_valid); |
292 | #endif | 304 | #endif |
@@ -422,6 +434,17 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s) | |||
422 | return pages; | 434 | return pages; |
423 | } | 435 | } |
424 | 436 | ||
437 | /* | ||
438 | * Poison init memory with an undefined instruction (ARM) or a branch to an | ||
439 | * undefined instruction (Thumb). | ||
440 | */ | ||
441 | static inline void poison_init_mem(void *s, size_t count) | ||
442 | { | ||
443 | u32 *p = (u32 *)s; | ||
444 | for (; count != 0; count -= 4) | ||
445 | *p++ = 0xe7fddef0; | ||
446 | } | ||
447 | |||
425 | static inline void | 448 | static inline void |
426 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) | 449 | free_memmap(unsigned long start_pfn, unsigned long end_pfn) |
427 | { | 450 | { |
@@ -473,6 +496,13 @@ static void __init free_unused_memmap(struct meminfo *mi) | |||
473 | */ | 496 | */ |
474 | bank_start = min(bank_start, | 497 | bank_start = min(bank_start, |
475 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); | 498 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); |
499 | #else | ||
500 | /* | ||
501 | * Align down here since the VM subsystem insists that the | ||
502 | * memmap entries are valid from the bank start aligned to | ||
503 | * MAX_ORDER_NR_PAGES. | ||
504 | */ | ||
505 | bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES); | ||
476 | #endif | 506 | #endif |
477 | /* | 507 | /* |
478 | * If we had a previous bank, and there is a space | 508 | * If we had a previous bank, and there is a space |
@@ -639,8 +669,8 @@ void __init mem_init(void) | |||
639 | " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" | 669 | " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" |
640 | #endif | 670 | #endif |
641 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" | 671 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" |
642 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
643 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" | 672 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" |
673 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" | ||
644 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n" | 674 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n" |
645 | " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", | 675 | " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", |
646 | 676 | ||
@@ -662,8 +692,8 @@ void __init mem_init(void) | |||
662 | #endif | 692 | #endif |
663 | MLM(MODULES_VADDR, MODULES_END), | 693 | MLM(MODULES_VADDR, MODULES_END), |
664 | 694 | ||
665 | MLK_ROUNDUP(__init_begin, __init_end), | ||
666 | MLK_ROUNDUP(_text, _etext), | 695 | MLK_ROUNDUP(_text, _etext), |
696 | MLK_ROUNDUP(__init_begin, __init_end), | ||
667 | MLK_ROUNDUP(_sdata, _edata), | 697 | MLK_ROUNDUP(_sdata, _edata), |
668 | MLK_ROUNDUP(__bss_start, __bss_stop)); | 698 | MLK_ROUNDUP(__bss_start, __bss_stop)); |
669 | 699 | ||
@@ -704,11 +734,13 @@ void free_initmem(void) | |||
704 | #ifdef CONFIG_HAVE_TCM | 734 | #ifdef CONFIG_HAVE_TCM |
705 | extern char __tcm_start, __tcm_end; | 735 | extern char __tcm_start, __tcm_end; |
706 | 736 | ||
737 | poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); | ||
707 | totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), | 738 | totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), |
708 | __phys_to_pfn(__pa(&__tcm_end)), | 739 | __phys_to_pfn(__pa(&__tcm_end)), |
709 | "TCM link"); | 740 | "TCM link"); |
710 | #endif | 741 | #endif |
711 | 742 | ||
743 | poison_init_mem(__init_begin, __init_end - __init_begin); | ||
712 | if (!machine_is_integrator() && !machine_is_cintegrator()) | 744 | if (!machine_is_integrator() && !machine_is_cintegrator()) |
713 | totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), | 745 | totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), |
714 | __phys_to_pfn(__pa(__init_end)), | 746 | __phys_to_pfn(__pa(__init_end)), |
@@ -721,10 +753,12 @@ static int keep_initrd; | |||
721 | 753 | ||
722 | void free_initrd_mem(unsigned long start, unsigned long end) | 754 | void free_initrd_mem(unsigned long start, unsigned long end) |
723 | { | 755 | { |
724 | if (!keep_initrd) | 756 | if (!keep_initrd) { |
757 | poison_init_mem((void *)start, PAGE_ALIGN(end) - start); | ||
725 | totalram_pages += free_area(__phys_to_pfn(__pa(start)), | 758 | totalram_pages += free_area(__phys_to_pfn(__pa(start)), |
726 | __phys_to_pfn(__pa(end)), | 759 | __phys_to_pfn(__pa(end)), |
727 | "initrd"); | 760 | "initrd"); |
761 | } | ||
728 | } | 762 | } |
729 | 763 | ||
730 | static int __init keepinitrd_setup(char *__unused) | 764 | static int __init keepinitrd_setup(char *__unused) |
diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c index ffad039cbb7..430df1a5978 100644 --- a/arch/arm/mm/iomap.c +++ b/arch/arm/mm/iomap.c | |||
@@ -9,6 +9,9 @@ | |||
9 | #include <linux/ioport.h> | 9 | #include <linux/ioport.h> |
10 | #include <linux/io.h> | 10 | #include <linux/io.h> |
11 | 11 | ||
12 | unsigned long vga_base; | ||
13 | EXPORT_SYMBOL(vga_base); | ||
14 | |||
12 | #ifdef __io | 15 | #ifdef __io |
13 | void __iomem *ioport_map(unsigned long port, unsigned int nr) | 16 | void __iomem *ioport_map(unsigned long port, unsigned int nr) |
14 | { | 17 | { |
@@ -23,6 +26,15 @@ EXPORT_SYMBOL(ioport_unmap); | |||
23 | #endif | 26 | #endif |
24 | 27 | ||
25 | #ifdef CONFIG_PCI | 28 | #ifdef CONFIG_PCI |
29 | unsigned long pcibios_min_io = 0x1000; | ||
30 | EXPORT_SYMBOL(pcibios_min_io); | ||
31 | |||
32 | unsigned long pcibios_min_mem = 0x01000000; | ||
33 | EXPORT_SYMBOL(pcibios_min_mem); | ||
34 | |||
35 | unsigned int pci_flags = PCI_REASSIGN_ALL_RSRC; | ||
36 | EXPORT_SYMBOL(pci_flags); | ||
37 | |||
26 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 38 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
27 | { | 39 | { |
28 | resource_size_t start = pci_resource_start(dev, bar); | 40 | resource_size_t start = pci_resource_start(dev, bar); |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 5b3d7d54365..010566799c8 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -23,5 +23,11 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page | |||
23 | 23 | ||
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | #ifdef CONFIG_ZONE_DMA | ||
27 | extern u32 arm_dma_limit; | ||
28 | #else | ||
29 | #define arm_dma_limit ((u32)~0) | ||
30 | #endif | ||
31 | |||
26 | void __init bootmem_init(void); | 32 | void __init bootmem_init(void); |
27 | void arm_mm_memblock_reserve(void); | 33 | void arm_mm_memblock_reserve(void); |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 594d677b92c..4fa9c246ae9 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -554,6 +554,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, | |||
554 | const struct mem_type *type) | 554 | const struct mem_type *type) |
555 | { | 555 | { |
556 | pmd_t *pmd = pmd_offset(pud, addr); | 556 | pmd_t *pmd = pmd_offset(pud, addr); |
557 | unsigned long pages_2m = 0, pages_4k = 0; | ||
558 | unsigned long stash_phys = phys; | ||
557 | 559 | ||
558 | /* | 560 | /* |
559 | * Try a section mapping - end, addr and phys must all be aligned | 561 | * Try a section mapping - end, addr and phys must all be aligned |
@@ -564,6 +566,8 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, | |||
564 | if (((addr | end | phys) & ~SECTION_MASK) == 0) { | 566 | if (((addr | end | phys) & ~SECTION_MASK) == 0) { |
565 | pmd_t *p = pmd; | 567 | pmd_t *p = pmd; |
566 | 568 | ||
569 | pages_2m = (end - addr) >> (PGDIR_SHIFT); | ||
570 | |||
567 | if (addr & SECTION_SIZE) | 571 | if (addr & SECTION_SIZE) |
568 | pmd++; | 572 | pmd++; |
569 | 573 | ||
@@ -574,12 +578,18 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, | |||
574 | 578 | ||
575 | flush_pmd_entry(p); | 579 | flush_pmd_entry(p); |
576 | } else { | 580 | } else { |
581 | pages_4k = (end - addr) >> PAGE_SHIFT; | ||
577 | /* | 582 | /* |
578 | * No need to loop; pte's aren't interested in the | 583 | * No need to loop; pte's aren't interested in the |
579 | * individual L1 entries. | 584 | * individual L1 entries. |
580 | */ | 585 | */ |
581 | alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); | 586 | alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); |
582 | } | 587 | } |
588 | |||
589 | if ((stash_phys >= PHYS_OFFSET) && (stash_phys < lowmem_limit)) { | ||
590 | update_page_count(PG_LEVEL_2M, pages_2m); | ||
591 | update_page_count(PG_LEVEL_4K, pages_4k); | ||
592 | } | ||
583 | } | 593 | } |
584 | 594 | ||
585 | static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, | 595 | static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, |
@@ -757,7 +767,7 @@ static int __init early_vmalloc(char *arg) | |||
757 | } | 767 | } |
758 | early_param("vmalloc", early_vmalloc); | 768 | early_param("vmalloc", early_vmalloc); |
759 | 769 | ||
760 | static phys_addr_t lowmem_limit __initdata = 0; | 770 | phys_addr_t lowmem_limit; |
761 | 771 | ||
762 | void __init sanity_check_meminfo(void) | 772 | void __init sanity_check_meminfo(void) |
763 | { | 773 | { |
diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S index 87970eba88e..8bbff025269 100644 --- a/arch/arm/mm/pabort-legacy.S +++ b/arch/arm/mm/pabort-legacy.S | |||
@@ -4,16 +4,18 @@ | |||
4 | /* | 4 | /* |
5 | * Function: legacy_pabort | 5 | * Function: legacy_pabort |
6 | * | 6 | * |
7 | * Params : r0 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r4 = address of aborted instruction | ||
9 | * : r5 = psr for parent context | ||
8 | * | 10 | * |
9 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = Simulated IFSR with section translation fault status | ||
11 | * | 12 | * |
12 | * Purpose : obtain information about current prefetch abort. | 13 | * Purpose : obtain information about current prefetch abort. |
13 | */ | 14 | */ |
14 | 15 | ||
15 | .align 5 | 16 | .align 5 |
16 | ENTRY(legacy_pabort) | 17 | ENTRY(legacy_pabort) |
18 | mov r0, r4 | ||
17 | mov r1, #5 | 19 | mov r1, #5 |
18 | mov pc, lr | 20 | b do_PrefetchAbort |
19 | ENDPROC(legacy_pabort) | 21 | ENDPROC(legacy_pabort) |
diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S index 06e3d1ef211..9627646ce78 100644 --- a/arch/arm/mm/pabort-v6.S +++ b/arch/arm/mm/pabort-v6.S | |||
@@ -4,16 +4,18 @@ | |||
4 | /* | 4 | /* |
5 | * Function: v6_pabort | 5 | * Function: v6_pabort |
6 | * | 6 | * |
7 | * Params : r0 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r4 = address of aborted instruction | ||
9 | * : r5 = psr for parent context | ||
8 | * | 10 | * |
9 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = IFSR | ||
11 | * | 12 | * |
12 | * Purpose : obtain information about current prefetch abort. | 13 | * Purpose : obtain information about current prefetch abort. |
13 | */ | 14 | */ |
14 | 15 | ||
15 | .align 5 | 16 | .align 5 |
16 | ENTRY(v6_pabort) | 17 | ENTRY(v6_pabort) |
18 | mov r0, r4 | ||
17 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR | 19 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR |
18 | mov pc, lr | 20 | b do_PrefetchAbort |
19 | ENDPROC(v6_pabort) | 21 | ENDPROC(v6_pabort) |
diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S index a8b3b300a18..875761f44f3 100644 --- a/arch/arm/mm/pabort-v7.S +++ b/arch/arm/mm/pabort-v7.S | |||
@@ -2,12 +2,13 @@ | |||
2 | #include <asm/assembler.h> | 2 | #include <asm/assembler.h> |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Function: v6_pabort | 5 | * Function: v7_pabort |
6 | * | 6 | * |
7 | * Params : r0 = address of aborted instruction | 7 | * Params : r2 = pt_regs |
8 | * : r4 = address of aborted instruction | ||
9 | * : r5 = psr for parent context | ||
8 | * | 10 | * |
9 | * Returns : r0 = address of abort | 11 | * Returns : r4 - r11, r13 preserved |
10 | * : r1 = IFSR | ||
11 | * | 12 | * |
12 | * Purpose : obtain information about current prefetch abort. | 13 | * Purpose : obtain information about current prefetch abort. |
13 | */ | 14 | */ |
@@ -16,5 +17,5 @@ | |||
16 | ENTRY(v7_pabort) | 17 | ENTRY(v7_pabort) |
17 | mrc p15, 0, r0, c6, c0, 2 @ get IFAR | 18 | mrc p15, 0, r0, c6, c0, 2 @ get IFAR |
18 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR | 19 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR |
19 | mov pc, lr | 20 | b do_PrefetchAbort |
20 | ENDPROC(v7_pabort) | 21 | ENDPROC(v7_pabort) |
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c new file mode 100644 index 00000000000..5f8071110e8 --- /dev/null +++ b/arch/arm/mm/pageattr.c | |||
@@ -0,0 +1,1076 @@ | |||
1 | /* | ||
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | ||
3 | * Thanks to Ben LaHaise for precious feedback. | ||
4 | */ | ||
5 | #include <linux/highmem.h> | ||
6 | #include <linux/bootmem.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/sched.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/seq_file.h> | ||
12 | #include <linux/debugfs.h> | ||
13 | #include <linux/pfn.h> | ||
14 | #include <linux/percpu.h> | ||
15 | #include <linux/gfp.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/mutex.h> | ||
18 | |||
19 | #include <asm/processor.h> | ||
20 | #include <asm/tlbflush.h> | ||
21 | #include <asm/sections.h> | ||
22 | #include <asm/setup.h> | ||
23 | #include <asm/uaccess.h> | ||
24 | #include <asm/pgalloc.h> | ||
25 | |||
26 | #ifdef CPA_DEBUG | ||
27 | #define cpa_debug(x, ...) printk(x, __VA_ARGS__) | ||
28 | #else | ||
29 | #define cpa_debug(x, ...) | ||
30 | #endif | ||
31 | |||
32 | #define FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD 8 | ||
33 | extern void v7_flush_kern_cache_all(void *); | ||
34 | extern void __flush_dcache_page(struct address_space *, struct page *); | ||
35 | |||
36 | static void inner_flush_cache_all(void) | ||
37 | { | ||
38 | on_each_cpu(v7_flush_kern_cache_all, NULL, 1); | ||
39 | } | ||
40 | |||
41 | #if defined(CONFIG_CPA) | ||
42 | /* | ||
43 | * The current flushing context - we pass it instead of 5 arguments: | ||
44 | */ | ||
45 | struct cpa_data { | ||
46 | unsigned long *vaddr; | ||
47 | pgprot_t mask_set; | ||
48 | pgprot_t mask_clr; | ||
49 | int numpages; | ||
50 | int flags; | ||
51 | unsigned long pfn; | ||
52 | unsigned force_split:1; | ||
53 | int curpage; | ||
54 | struct page **pages; | ||
55 | }; | ||
56 | |||
57 | /* | ||
58 | * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) | ||
59 | * using cpa_lock. So that we don't allow any other cpu, with stale large tlb | ||
60 | * entries change the page attribute in parallel to some other cpu | ||
61 | * splitting a large page entry along with changing the attribute. | ||
62 | */ | ||
63 | static DEFINE_MUTEX(cpa_lock); | ||
64 | |||
65 | #define CPA_FLUSHTLB 1 | ||
66 | #define CPA_ARRAY 2 | ||
67 | #define CPA_PAGES_ARRAY 4 | ||
68 | |||
69 | #ifdef CONFIG_PROC_FS | ||
70 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; | ||
71 | |||
72 | void update_page_count(int level, unsigned long pages) | ||
73 | { | ||
74 | unsigned long flags; | ||
75 | |||
76 | /* Protect against CPA */ | ||
77 | spin_lock_irqsave(&pgd_lock, flags); | ||
78 | direct_pages_count[level] += pages; | ||
79 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
80 | } | ||
81 | |||
82 | static void split_page_count(int level) | ||
83 | { | ||
84 | direct_pages_count[level]--; | ||
85 | direct_pages_count[level - 1] += PTRS_PER_PTE; | ||
86 | } | ||
87 | |||
88 | void arch_report_meminfo(struct seq_file *m) | ||
89 | { | ||
90 | seq_printf(m, "DirectMap4k: %8lu kB\n", | ||
91 | direct_pages_count[PG_LEVEL_4K] << 2); | ||
92 | seq_printf(m, "DirectMap2M: %8lu kB\n", | ||
93 | direct_pages_count[PG_LEVEL_2M] << 11); | ||
94 | } | ||
95 | #else | ||
96 | static inline void split_page_count(int level) { } | ||
97 | #endif | ||
98 | |||
99 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
100 | # define debug_pagealloc 1 | ||
101 | #else | ||
102 | # define debug_pagealloc 0 | ||
103 | #endif | ||
104 | |||
105 | static inline int | ||
106 | within(unsigned long addr, unsigned long start, unsigned long end) | ||
107 | { | ||
108 | return addr >= start && addr < end; | ||
109 | } | ||
110 | |||
111 | static void cpa_flush_range(unsigned long start, int numpages, int cache) | ||
112 | { | ||
113 | unsigned int i, level; | ||
114 | unsigned long addr; | ||
115 | |||
116 | BUG_ON(irqs_disabled()); | ||
117 | WARN_ON(PAGE_ALIGN(start) != start); | ||
118 | |||
119 | flush_tlb_kernel_range(start, start + (numpages << PAGE_SHIFT)); | ||
120 | |||
121 | if (!cache) | ||
122 | return; | ||
123 | |||
124 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { | ||
125 | pte_t *pte = lookup_address(addr, &level); | ||
126 | |||
127 | /* | ||
128 | * Only flush present addresses: | ||
129 | */ | ||
130 | if (pte && pte_present(*pte)) { | ||
131 | __cpuc_flush_dcache_area((void *) addr, PAGE_SIZE); | ||
132 | outer_flush_range(__pa((void *)addr), | ||
133 | __pa((void *)addr) + PAGE_SIZE); | ||
134 | } | ||
135 | } | ||
136 | } | ||
137 | |||
138 | static void cpa_flush_array(unsigned long *start, int numpages, int cache, | ||
139 | int in_flags, struct page **pages) | ||
140 | { | ||
141 | unsigned int i, level; | ||
142 | bool flush_inner = true; | ||
143 | unsigned long base; | ||
144 | |||
145 | BUG_ON(irqs_disabled()); | ||
146 | |||
147 | if (numpages >= FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD && | ||
148 | cache && in_flags & CPA_PAGES_ARRAY) { | ||
149 | inner_flush_cache_all(); | ||
150 | flush_inner = false; | ||
151 | } | ||
152 | |||
153 | for (i = 0; i < numpages; i++) { | ||
154 | unsigned long addr; | ||
155 | pte_t *pte; | ||
156 | |||
157 | if (in_flags & CPA_PAGES_ARRAY) | ||
158 | addr = (unsigned long)page_address(pages[i]); | ||
159 | else | ||
160 | addr = start[i]; | ||
161 | |||
162 | flush_tlb_kernel_range(addr, addr + PAGE_SIZE); | ||
163 | |||
164 | if (cache && in_flags & CPA_PAGES_ARRAY) { | ||
165 | /* cache flush all pages including high mem pages. */ | ||
166 | if (flush_inner) | ||
167 | __flush_dcache_page( | ||
168 | page_mapping(pages[i]), pages[i]); | ||
169 | base = page_to_phys(pages[i]); | ||
170 | outer_flush_range(base, base + PAGE_SIZE); | ||
171 | } else if (cache) { | ||
172 | pte = lookup_address(addr, &level); | ||
173 | |||
174 | /* | ||
175 | * Only flush present addresses: | ||
176 | */ | ||
177 | if (pte && pte_present(*pte)) { | ||
178 | __cpuc_flush_dcache_area((void *)addr, | ||
179 | PAGE_SIZE); | ||
180 | outer_flush_range(__pa((void *)addr), | ||
181 | __pa((void *)addr) + PAGE_SIZE); | ||
182 | } | ||
183 | } | ||
184 | } | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * Certain areas of memory require very specific protection flags, | ||
189 | * for example the kernel text. Callers don't always get this | ||
190 | * right so this function checks and fixes these known static | ||
191 | * required protection bits. | ||
192 | */ | ||
193 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | ||
194 | unsigned long pfn) | ||
195 | { | ||
196 | pgprot_t forbidden = __pgprot(0); | ||
197 | |||
198 | /* | ||
199 | * The kernel text needs to be executable for obvious reasons | ||
200 | * Does not cover __inittext since that is gone later on. | ||
201 | */ | ||
202 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) | ||
203 | pgprot_val(forbidden) |= L_PTE_XN; | ||
204 | |||
205 | /* | ||
206 | * The .rodata section needs to be read-only. Using the pfn | ||
207 | * catches all aliases. | ||
208 | */ | ||
209 | if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, | ||
210 | __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) | ||
211 | prot |= L_PTE_RDONLY; | ||
212 | |||
213 | /* | ||
214 | * Mask off the forbidden bits and set the bits that are needed | ||
215 | */ | ||
216 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); | ||
217 | |||
218 | |||
219 | return prot; | ||
220 | } | ||
221 | |||
222 | static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte, | ||
223 | unsigned long ext_prot) | ||
224 | { | ||
225 | pgprot_t ref_prot; | ||
226 | |||
227 | ref_prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; | ||
228 | |||
229 | if (pte & L_PTE_MT_BUFFERABLE) | ||
230 | ref_prot |= PMD_SECT_BUFFERABLE; | ||
231 | |||
232 | if (pte & L_PTE_MT_WRITETHROUGH) | ||
233 | ref_prot |= PMD_SECT_CACHEABLE; | ||
234 | |||
235 | if (pte & L_PTE_SHARED) | ||
236 | ref_prot |= PMD_SECT_S; | ||
237 | |||
238 | if (pte & L_PTE_XN) | ||
239 | ref_prot |= PMD_SECT_XN; | ||
240 | |||
241 | if (pte & L_PTE_RDONLY) | ||
242 | ref_prot &= ~PMD_SECT_AP_WRITE; | ||
243 | |||
244 | ref_prot |= (ext_prot & (PTE_EXT_AP0 | PTE_EXT_AP1 | PTE_EXT_APX | | ||
245 | PTE_EXT_NG | (7 << 6))) << 6; | ||
246 | |||
247 | return ref_prot; | ||
248 | } | ||
249 | |||
250 | static inline pgprot_t pmd_to_pte_pgprot(unsigned long pmd, | ||
251 | unsigned long *ext_prot) | ||
252 | { | ||
253 | pgprot_t ref_prot = 0; | ||
254 | |||
255 | ref_prot |= L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_RDONLY; | ||
256 | |||
257 | if (pmd & PMD_SECT_BUFFERABLE) | ||
258 | ref_prot |= L_PTE_MT_BUFFERABLE; | ||
259 | |||
260 | if (pmd & PMD_SECT_CACHEABLE) | ||
261 | ref_prot |= L_PTE_MT_WRITETHROUGH; | ||
262 | |||
263 | if (pmd & PMD_SECT_S) | ||
264 | ref_prot |= L_PTE_SHARED; | ||
265 | |||
266 | if (pmd & PMD_SECT_XN) | ||
267 | ref_prot |= L_PTE_XN; | ||
268 | |||
269 | if (pmd & PMD_SECT_AP_WRITE) | ||
270 | ref_prot &= ~L_PTE_RDONLY; | ||
271 | |||
272 | /* AP/APX/TEX bits */ | ||
273 | *ext_prot = (pmd & (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | | ||
274 | PMD_SECT_APX | PMD_SECT_nG | (7 << 12))) >> 6; | ||
275 | |||
276 | return ref_prot; | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * Lookup the page table entry for a virtual address. Return a pointer | ||
281 | * to the entry and the level of the mapping. | ||
282 | * | ||
283 | * Note: We return pud and pmd either when the entry is marked large | ||
284 | * or when the present bit is not set. Otherwise we would return a | ||
285 | * pointer to a nonexisting mapping. | ||
286 | */ | ||
287 | pte_t *lookup_address(unsigned long address, unsigned int *level) | ||
288 | { | ||
289 | pgd_t *pgd = pgd_offset_k(address); | ||
290 | pte_t *pte; | ||
291 | pmd_t *pmd; | ||
292 | |||
293 | /* pmds are folded into pgds on ARM */ | ||
294 | *level = PG_LEVEL_NONE; | ||
295 | |||
296 | if (pgd == NULL || pgd_none(*pgd)) | ||
297 | return NULL; | ||
298 | |||
299 | pmd = pmd_offset(pgd, address); | ||
300 | |||
301 | if (pmd == NULL || pmd_none(*pmd) || !pmd_present(*pmd)) | ||
302 | return NULL; | ||
303 | |||
304 | if (((pmd_val(*pmd) & (PMD_TYPE_SECT | PMD_SECT_SUPER)) | ||
305 | == (PMD_TYPE_SECT | PMD_SECT_SUPER)) || !pmd_present(*pmd)) { | ||
306 | |||
307 | return NULL; | ||
308 | } else if (pmd_val(*pmd) & PMD_TYPE_SECT) { | ||
309 | |||
310 | *level = PG_LEVEL_2M; | ||
311 | return (pte_t *)pmd; | ||
312 | } | ||
313 | |||
314 | pte = pte_offset_kernel(pmd, address); | ||
315 | |||
316 | if ((pte == NULL) || pte_none(*pte)) | ||
317 | return NULL; | ||
318 | |||
319 | *level = PG_LEVEL_4K; | ||
320 | |||
321 | return pte; | ||
322 | } | ||
323 | EXPORT_SYMBOL_GPL(lookup_address); | ||
324 | |||
325 | /* | ||
326 | * Set the new pmd in all the pgds we know about: | ||
327 | */ | ||
328 | static void __set_pmd_pte(pmd_t *pmd, unsigned long address, pte_t *pte) | ||
329 | { | ||
330 | struct page *page; | ||
331 | |||
332 | cpa_debug("__set_pmd_pte %x %x %x\n", pmd, pte, *pte); | ||
333 | |||
334 | /* change init_mm */ | ||
335 | pmd_populate_kernel(&init_mm, pmd, pte); | ||
336 | |||
337 | /* change entry in all the pgd's */ | ||
338 | list_for_each_entry(page, &pgd_list, lru) { | ||
339 | cpa_debug("list %x %x %x\n", (unsigned long)page, | ||
340 | (unsigned long)pgd_index(address), address); | ||
341 | pmd = pmd_offset(((pgd_t *)page_address(page)) + | ||
342 | pgd_index(address), address); | ||
343 | pmd_populate_kernel(NULL, pmd, pte); | ||
344 | } | ||
345 | |||
346 | } | ||
347 | |||
348 | static int | ||
349 | try_preserve_large_page(pte_t *kpte, unsigned long address, | ||
350 | struct cpa_data *cpa) | ||
351 | { | ||
352 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; | ||
353 | pte_t old_pte, *tmp; | ||
354 | pgprot_t old_prot, new_prot, ext_prot, req_prot; | ||
355 | int i, do_split = 1; | ||
356 | unsigned int level; | ||
357 | |||
358 | if (cpa->force_split) | ||
359 | return 1; | ||
360 | |||
361 | spin_lock_irqsave(&pgd_lock, flags); | ||
362 | /* | ||
363 | * Check for races, another CPU might have split this page | ||
364 | * up already: | ||
365 | */ | ||
366 | tmp = lookup_address(address, &level); | ||
367 | if (tmp != kpte) | ||
368 | goto out_unlock; | ||
369 | |||
370 | switch (level) { | ||
371 | |||
372 | case PG_LEVEL_2M: | ||
373 | psize = PMD_SIZE; | ||
374 | pmask = PMD_MASK; | ||
375 | break; | ||
376 | |||
377 | default: | ||
378 | do_split = -EINVAL; | ||
379 | goto out_unlock; | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * Calculate the number of pages, which fit into this large | ||
384 | * page starting at address: | ||
385 | */ | ||
386 | nextpage_addr = (address + psize) & pmask; | ||
387 | numpages = (nextpage_addr - address) >> PAGE_SHIFT; | ||
388 | if (numpages < cpa->numpages) | ||
389 | cpa->numpages = numpages; | ||
390 | |||
391 | old_prot = new_prot = req_prot = pmd_to_pte_pgprot(pmd_val(*kpte), | ||
392 | &ext_prot); | ||
393 | |||
394 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); | ||
395 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); | ||
396 | |||
397 | /* | ||
398 | * old_pte points to the large page base address. So we need | ||
399 | * to add the offset of the virtual address: | ||
400 | */ | ||
401 | pfn = pmd_pfn(*kpte) + ((address & (psize - 1)) >> PAGE_SHIFT); | ||
402 | cpa->pfn = pfn; | ||
403 | |||
404 | new_prot = static_protections(req_prot, address, pfn); | ||
405 | |||
406 | /* | ||
407 | * We need to check the full range, whether | ||
408 | * static_protection() requires a different pgprot for one of | ||
409 | * the pages in the range we try to preserve: | ||
410 | */ | ||
411 | addr = address & pmask; | ||
412 | pfn = pmd_pfn(old_pte); | ||
413 | for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { | ||
414 | pgprot_t chk_prot = static_protections(req_prot, addr, pfn); | ||
415 | |||
416 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) | ||
417 | goto out_unlock; | ||
418 | } | ||
419 | |||
420 | /* | ||
421 | * If there are no changes, return. maxpages has been updated | ||
422 | * above: | ||
423 | */ | ||
424 | if (pgprot_val(new_prot) == pgprot_val(old_prot)) { | ||
425 | do_split = 0; | ||
426 | goto out_unlock; | ||
427 | } | ||
428 | |||
429 | /* | ||
430 | * convert prot to pmd format | ||
431 | */ | ||
432 | new_prot = pte_to_pmd_pgprot(new_prot, ext_prot); | ||
433 | |||
434 | /* | ||
435 | * We need to change the attributes. Check, whether we can | ||
436 | * change the large page in one go. We request a split, when | ||
437 | * the address is not aligned and the number of pages is | ||
438 | * smaller than the number of pages in the large page. Note | ||
439 | * that we limited the number of possible pages already to | ||
440 | * the number of pages in the large page. | ||
441 | */ | ||
442 | if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { | ||
443 | /* | ||
444 | * The address is aligned and the number of pages | ||
445 | * covers the full page. | ||
446 | */ | ||
447 | phys_addr_t phys = __pfn_to_phys(pmd_pfn(*kpte)); | ||
448 | pmd_t *p = (pmd_t *)kpte; | ||
449 | |||
450 | *kpte++ = __pmd(phys | new_prot); | ||
451 | *kpte = __pmd((phys + SECTION_SIZE) | new_prot); | ||
452 | flush_pmd_entry(p); | ||
453 | cpa->flags |= CPA_FLUSHTLB; | ||
454 | do_split = 0; | ||
455 | cpa_debug("preserving page at phys %x pmd %x\n", phys, p); | ||
456 | } | ||
457 | |||
458 | out_unlock: | ||
459 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
460 | |||
461 | return do_split; | ||
462 | } | ||
463 | |||
464 | static int split_large_page(pte_t *kpte, unsigned long address) | ||
465 | { | ||
466 | unsigned long flags, pfn, pfninc = 1; | ||
467 | unsigned int i, level; | ||
468 | pte_t *pbase, *tmp; | ||
469 | pgprot_t ref_prot = 0, ext_prot = 0; | ||
470 | int ret = 0; | ||
471 | |||
472 | pbase = pte_alloc_one_kernel(&init_mm, address); | ||
473 | if (!pbase) | ||
474 | return -ENOMEM; | ||
475 | |||
476 | cpa_debug("split_large_page %x PMD %x new pte @ %x\n", address, | ||
477 | *kpte, pbase); | ||
478 | |||
479 | spin_lock_irqsave(&pgd_lock, flags); | ||
480 | /* | ||
481 | * Check for races, another CPU might have split this page | ||
482 | * up for us already: | ||
483 | */ | ||
484 | tmp = lookup_address(address, &level); | ||
485 | if (tmp != kpte) | ||
486 | goto out_unlock; | ||
487 | |||
488 | /* | ||
489 | * we only split 2MB entries for now | ||
490 | */ | ||
491 | if (level != PG_LEVEL_2M) { | ||
492 | ret = -EINVAL; | ||
493 | goto out_unlock; | ||
494 | } | ||
495 | |||
496 | ref_prot = pmd_to_pte_pgprot(pmd_val(*kpte), &ext_prot); | ||
497 | |||
498 | /* | ||
499 | * Get the target pfn from the original entry: | ||
500 | */ | ||
501 | pfn = pmd_pfn(*kpte); | ||
502 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) | ||
503 | set_pte_ext(&pbase[i], pfn_pte(pfn, ref_prot), ext_prot); | ||
504 | |||
505 | if (address >= (unsigned long)__va(0) && | ||
506 | address < (unsigned long)__va(lowmem_limit)) | ||
507 | split_page_count(level); | ||
508 | |||
509 | /* | ||
510 | * Install the new, split up pagetable. | ||
511 | */ | ||
512 | __set_pmd_pte((pmd_t *)kpte, address, pbase); | ||
513 | |||
514 | pbase = NULL; | ||
515 | |||
516 | out_unlock: | ||
517 | /* | ||
518 | * If we dropped out via the lookup_address check under | ||
519 | * pgd_lock then stick the page back into the pool: | ||
520 | */ | ||
521 | if (pbase) | ||
522 | pte_free_kernel(&init_mm, pbase); | ||
523 | |||
524 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
525 | |||
526 | return ret; | ||
527 | } | ||
528 | |||
529 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, | ||
530 | int primary) | ||
531 | { | ||
532 | /* | ||
533 | * Ignore all non primary paths. | ||
534 | */ | ||
535 | if (!primary) | ||
536 | return 0; | ||
537 | |||
538 | /* | ||
539 | * Ignore the NULL PTE for kernel identity mapping, as it is expected | ||
540 | * to have holes. | ||
541 | * Also set numpages to '1' indicating that we processed cpa req for | ||
542 | * one virtual address page and its pfn. TBD: numpages can be set based | ||
543 | * on the initial value and the level returned by lookup_address(). | ||
544 | */ | ||
545 | if (within(vaddr, PAGE_OFFSET, | ||
546 | PAGE_OFFSET + lowmem_limit)) { | ||
547 | cpa->numpages = 1; | ||
548 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; | ||
549 | return 0; | ||
550 | } else { | ||
551 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | ||
552 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, | ||
553 | *cpa->vaddr); | ||
554 | |||
555 | return -EFAULT; | ||
556 | } | ||
557 | } | ||
558 | |||
559 | static int __change_page_attr(struct cpa_data *cpa, int primary) | ||
560 | { | ||
561 | unsigned long address; | ||
562 | int do_split, err; | ||
563 | unsigned int level; | ||
564 | pte_t *kpte, old_pte; | ||
565 | |||
566 | if (cpa->flags & CPA_PAGES_ARRAY) { | ||
567 | struct page *page = cpa->pages[cpa->curpage]; | ||
568 | |||
569 | if (unlikely(PageHighMem(page))) | ||
570 | return 0; | ||
571 | |||
572 | address = (unsigned long)page_address(page); | ||
573 | |||
574 | } else if (cpa->flags & CPA_ARRAY) | ||
575 | address = cpa->vaddr[cpa->curpage]; | ||
576 | else | ||
577 | address = *cpa->vaddr; | ||
578 | |||
579 | repeat: | ||
580 | kpte = lookup_address(address, &level); | ||
581 | if (!kpte) | ||
582 | return __cpa_process_fault(cpa, address, primary); | ||
583 | |||
584 | old_pte = *kpte; | ||
585 | if (!pte_val(old_pte)) | ||
586 | return __cpa_process_fault(cpa, address, primary); | ||
587 | |||
588 | if (level == PG_LEVEL_4K) { | ||
589 | pte_t new_pte; | ||
590 | pgprot_t new_prot = pte_pgprot(old_pte); | ||
591 | unsigned long pfn = pte_pfn(old_pte); | ||
592 | |||
593 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); | ||
594 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | ||
595 | |||
596 | new_prot = static_protections(new_prot, address, pfn); | ||
597 | |||
598 | /* | ||
599 | * We need to keep the pfn from the existing PTE, | ||
600 | * after all we're only going to change it's attributes | ||
601 | * not the memory it points to | ||
602 | */ | ||
603 | new_pte = pfn_pte(pfn, new_prot); | ||
604 | cpa->pfn = pfn; | ||
605 | |||
606 | /* | ||
607 | * Do we really change anything ? | ||
608 | */ | ||
609 | if (pte_val(old_pte) != pte_val(new_pte)) { | ||
610 | set_pte_ext(kpte, new_pte, 0); | ||
611 | /* | ||
612 | * FIXME : is this needed on arm? | ||
613 | * set_pte_ext already does a flush | ||
614 | */ | ||
615 | cpa->flags |= CPA_FLUSHTLB; | ||
616 | } | ||
617 | cpa->numpages = 1; | ||
618 | return 0; | ||
619 | } | ||
620 | |||
621 | /* | ||
622 | * Check, whether we can keep the large page intact | ||
623 | * and just change the pte: | ||
624 | */ | ||
625 | do_split = try_preserve_large_page(kpte, address, cpa); | ||
626 | |||
627 | /* | ||
628 | * When the range fits into the existing large page, | ||
629 | * return. cp->numpages and cpa->tlbflush have been updated in | ||
630 | * try_large_page: | ||
631 | */ | ||
632 | if (do_split <= 0) | ||
633 | return do_split; | ||
634 | |||
635 | /* | ||
636 | * We have to split the large page: | ||
637 | */ | ||
638 | err = split_large_page(kpte, address); | ||
639 | |||
640 | if (!err) { | ||
641 | /* | ||
642 | * Do a global flush tlb after splitting the large page | ||
643 | * and before we do the actual change page attribute in the PTE. | ||
644 | * | ||
645 | * With out this, we violate the TLB application note, that says | ||
646 | * "The TLBs may contain both ordinary and large-page | ||
647 | * translations for a 4-KByte range of linear addresses. This | ||
648 | * may occur if software modifies the paging structures so that | ||
649 | * the page size used for the address range changes. If the two | ||
650 | * translations differ with respect to page frame or attributes | ||
651 | * (e.g., permissions), processor behavior is undefined and may | ||
652 | * be implementation-specific." | ||
653 | * | ||
654 | * We do this global tlb flush inside the cpa_lock, so that we | ||
655 | * don't allow any other cpu, with stale tlb entries change the | ||
656 | * page attribute in parallel, that also falls into the | ||
657 | * just split large page entry. | ||
658 | */ | ||
659 | flush_tlb_all(); | ||
660 | goto repeat; | ||
661 | } | ||
662 | |||
663 | return err; | ||
664 | } | ||
665 | |||
666 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); | ||
667 | |||
668 | static int cpa_process_alias(struct cpa_data *cpa) | ||
669 | { | ||
670 | struct cpa_data alias_cpa; | ||
671 | unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); | ||
672 | unsigned long vaddr; | ||
673 | int ret; | ||
674 | |||
675 | if (cpa->pfn >= (lowmem_limit >> PAGE_SHIFT)) | ||
676 | return 0; | ||
677 | |||
678 | /* | ||
679 | * No need to redo, when the primary call touched the direct | ||
680 | * mapping already: | ||
681 | */ | ||
682 | if (cpa->flags & CPA_PAGES_ARRAY) { | ||
683 | struct page *page = cpa->pages[cpa->curpage]; | ||
684 | if (unlikely(PageHighMem(page))) | ||
685 | return 0; | ||
686 | vaddr = (unsigned long)page_address(page); | ||
687 | } else if (cpa->flags & CPA_ARRAY) | ||
688 | vaddr = cpa->vaddr[cpa->curpage]; | ||
689 | else | ||
690 | vaddr = *cpa->vaddr; | ||
691 | |||
692 | if (!(within(vaddr, PAGE_OFFSET, | ||
693 | PAGE_OFFSET + lowmem_limit))) { | ||
694 | |||
695 | alias_cpa = *cpa; | ||
696 | alias_cpa.vaddr = &laddr; | ||
697 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | ||
698 | |||
699 | ret = __change_page_attr_set_clr(&alias_cpa, 0); | ||
700 | if (ret) | ||
701 | return ret; | ||
702 | } | ||
703 | |||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) | ||
708 | { | ||
709 | int ret, numpages = cpa->numpages; | ||
710 | |||
711 | while (numpages) { | ||
712 | /* | ||
713 | * Store the remaining nr of pages for the large page | ||
714 | * preservation check. | ||
715 | */ | ||
716 | cpa->numpages = numpages; | ||
717 | /* for array changes, we can't use large page */ | ||
718 | if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) | ||
719 | cpa->numpages = 1; | ||
720 | |||
721 | if (!debug_pagealloc) | ||
722 | mutex_lock(&cpa_lock); | ||
723 | ret = __change_page_attr(cpa, checkalias); | ||
724 | if (!debug_pagealloc) | ||
725 | mutex_unlock(&cpa_lock); | ||
726 | if (ret) | ||
727 | return ret; | ||
728 | |||
729 | if (checkalias) { | ||
730 | ret = cpa_process_alias(cpa); | ||
731 | if (ret) | ||
732 | return ret; | ||
733 | } | ||
734 | |||
735 | /* | ||
736 | * Adjust the number of pages with the result of the | ||
737 | * CPA operation. Either a large page has been | ||
738 | * preserved or a single page update happened. | ||
739 | */ | ||
740 | BUG_ON(cpa->numpages > numpages); | ||
741 | numpages -= cpa->numpages; | ||
742 | if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) | ||
743 | cpa->curpage++; | ||
744 | else | ||
745 | *cpa->vaddr += cpa->numpages * PAGE_SIZE; | ||
746 | } | ||
747 | return 0; | ||
748 | } | ||
749 | |||
750 | static inline int cache_attr(pgprot_t attr) | ||
751 | { | ||
752 | /* | ||
753 | * We need to flush the cache for all memory type changes | ||
754 | * except when a page is being marked write back cacheable | ||
755 | */ | ||
756 | return !((pgprot_val(attr) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK); | ||
757 | } | ||
758 | |||
759 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, | ||
760 | pgprot_t mask_set, pgprot_t mask_clr, | ||
761 | int force_split, int in_flag, | ||
762 | struct page **pages) | ||
763 | { | ||
764 | struct cpa_data cpa; | ||
765 | int ret, cache, checkalias; | ||
766 | unsigned long baddr = 0; | ||
767 | |||
768 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) | ||
769 | return 0; | ||
770 | |||
771 | /* Ensure we are PAGE_SIZE aligned */ | ||
772 | if (in_flag & CPA_ARRAY) { | ||
773 | int i; | ||
774 | for (i = 0; i < numpages; i++) { | ||
775 | if (addr[i] & ~PAGE_MASK) { | ||
776 | addr[i] &= PAGE_MASK; | ||
777 | WARN_ON_ONCE(1); | ||
778 | } | ||
779 | } | ||
780 | } else if (!(in_flag & CPA_PAGES_ARRAY)) { | ||
781 | /* | ||
782 | * in_flag of CPA_PAGES_ARRAY implies it is aligned. | ||
783 | * No need to cehck in that case | ||
784 | */ | ||
785 | if (*addr & ~PAGE_MASK) { | ||
786 | *addr &= PAGE_MASK; | ||
787 | /* | ||
788 | * People should not be passing in unaligned addresses: | ||
789 | */ | ||
790 | WARN_ON_ONCE(1); | ||
791 | } | ||
792 | /* | ||
793 | * Save address for cache flush. *addr is modified in the call | ||
794 | * to __change_page_attr_set_clr() below. | ||
795 | */ | ||
796 | baddr = *addr; | ||
797 | } | ||
798 | |||
799 | /* Must avoid aliasing mappings in the highmem code */ | ||
800 | kmap_flush_unused(); | ||
801 | |||
802 | vm_unmap_aliases(); | ||
803 | |||
804 | cpa.vaddr = addr; | ||
805 | cpa.pages = pages; | ||
806 | cpa.numpages = numpages; | ||
807 | cpa.mask_set = mask_set; | ||
808 | cpa.mask_clr = mask_clr; | ||
809 | cpa.flags = 0; | ||
810 | cpa.curpage = 0; | ||
811 | cpa.force_split = force_split; | ||
812 | |||
813 | if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY)) | ||
814 | cpa.flags |= in_flag; | ||
815 | |||
816 | /* No alias checking for XN bit modifications */ | ||
817 | checkalias = (pgprot_val(mask_set) | | ||
818 | pgprot_val(mask_clr)) != L_PTE_XN; | ||
819 | |||
820 | ret = __change_page_attr_set_clr(&cpa, checkalias); | ||
821 | |||
822 | cache = cache_attr(mask_set); | ||
823 | /* | ||
824 | * Check whether we really changed something or | ||
825 | * cache need to be flushed. | ||
826 | */ | ||
827 | if (!(cpa.flags & CPA_FLUSHTLB) && !cache) | ||
828 | goto out; | ||
829 | |||
830 | if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { | ||
831 | cpa_flush_array(addr, numpages, cache, | ||
832 | cpa.flags, pages); | ||
833 | } else | ||
834 | cpa_flush_range(baddr, numpages, cache); | ||
835 | |||
836 | out: | ||
837 | return ret; | ||
838 | } | ||
839 | |||
840 | static inline int change_page_attr_set(unsigned long *addr, int numpages, | ||
841 | pgprot_t mask, int array) | ||
842 | { | ||
843 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0, | ||
844 | (array ? CPA_ARRAY : 0), NULL); | ||
845 | } | ||
846 | |||
847 | static inline int change_page_attr_clear(unsigned long *addr, int numpages, | ||
848 | pgprot_t mask, int array) | ||
849 | { | ||
850 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0, | ||
851 | (array ? CPA_ARRAY : 0), NULL); | ||
852 | } | ||
853 | |||
854 | static inline int cpa_set_pages_array(struct page **pages, int numpages, | ||
855 | pgprot_t mask) | ||
856 | { | ||
857 | return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0, | ||
858 | CPA_PAGES_ARRAY, pages); | ||
859 | } | ||
860 | |||
861 | static inline int cpa_clear_pages_array(struct page **pages, int numpages, | ||
862 | pgprot_t mask) | ||
863 | { | ||
864 | return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0, | ||
865 | CPA_PAGES_ARRAY, pages); | ||
866 | } | ||
867 | |||
868 | int set_memory_uc(unsigned long addr, int numpages) | ||
869 | { | ||
870 | return change_page_attr_set_clr(&addr, numpages, | ||
871 | __pgprot(L_PTE_MT_UNCACHED), | ||
872 | __pgprot(L_PTE_MT_MASK), 0, 0, NULL); | ||
873 | } | ||
874 | EXPORT_SYMBOL(set_memory_uc); | ||
875 | |||
876 | int _set_memory_array(unsigned long *addr, int addrinarray, | ||
877 | unsigned long set, unsigned long clr) | ||
878 | { | ||
879 | return change_page_attr_set_clr(addr, addrinarray, __pgprot(set), | ||
880 | __pgprot(clr), 0, CPA_ARRAY, NULL); | ||
881 | } | ||
882 | |||
883 | int set_memory_array_uc(unsigned long *addr, int addrinarray) | ||
884 | { | ||
885 | return _set_memory_array(addr, addrinarray, | ||
886 | L_PTE_MT_UNCACHED, L_PTE_MT_MASK); | ||
887 | } | ||
888 | EXPORT_SYMBOL(set_memory_array_uc); | ||
889 | |||
890 | int set_memory_array_wc(unsigned long *addr, int addrinarray) | ||
891 | { | ||
892 | return _set_memory_array(addr, addrinarray, | ||
893 | L_PTE_MT_BUFFERABLE, L_PTE_MT_MASK); | ||
894 | } | ||
895 | EXPORT_SYMBOL(set_memory_array_wc); | ||
896 | |||
897 | int set_memory_wc(unsigned long addr, int numpages) | ||
898 | { | ||
899 | int ret; | ||
900 | |||
901 | ret = change_page_attr_set_clr(&addr, numpages, | ||
902 | __pgprot(L_PTE_MT_BUFFERABLE), | ||
903 | __pgprot(L_PTE_MT_MASK), | ||
904 | 0, 0, NULL); | ||
905 | return ret; | ||
906 | } | ||
907 | EXPORT_SYMBOL(set_memory_wc); | ||
908 | |||
909 | int set_memory_wb(unsigned long addr, int numpages) | ||
910 | { | ||
911 | return change_page_attr_set_clr(&addr, numpages, | ||
912 | __pgprot(L_PTE_MT_WRITEBACK), | ||
913 | __pgprot(L_PTE_MT_MASK), | ||
914 | 0, 0, NULL); | ||
915 | } | ||
916 | EXPORT_SYMBOL(set_memory_wb); | ||
917 | |||
918 | int set_memory_iwb(unsigned long addr, int numpages) | ||
919 | { | ||
920 | return change_page_attr_set_clr(&addr, numpages, | ||
921 | __pgprot(L_PTE_MT_INNER_WB), | ||
922 | __pgprot(L_PTE_MT_MASK), | ||
923 | 0, 0, NULL); | ||
924 | } | ||
925 | EXPORT_SYMBOL(set_memory_iwb); | ||
926 | |||
927 | int set_memory_array_wb(unsigned long *addr, int addrinarray) | ||
928 | { | ||
929 | return change_page_attr_set_clr(addr, addrinarray, | ||
930 | __pgprot(L_PTE_MT_WRITEBACK), | ||
931 | __pgprot(L_PTE_MT_MASK), | ||
932 | 0, CPA_ARRAY, NULL); | ||
933 | |||
934 | } | ||
935 | EXPORT_SYMBOL(set_memory_array_wb); | ||
936 | |||
937 | int set_memory_array_iwb(unsigned long *addr, int addrinarray) | ||
938 | { | ||
939 | return change_page_attr_set_clr(addr, addrinarray, | ||
940 | __pgprot(L_PTE_MT_INNER_WB), | ||
941 | __pgprot(L_PTE_MT_MASK), | ||
942 | 0, CPA_ARRAY, NULL); | ||
943 | |||
944 | } | ||
945 | EXPORT_SYMBOL(set_memory_array_iwb); | ||
946 | |||
947 | int set_memory_x(unsigned long addr, int numpages) | ||
948 | { | ||
949 | return change_page_attr_clear(&addr, numpages, | ||
950 | __pgprot(L_PTE_XN), 0); | ||
951 | } | ||
952 | EXPORT_SYMBOL(set_memory_x); | ||
953 | |||
954 | int set_memory_nx(unsigned long addr, int numpages) | ||
955 | { | ||
956 | return change_page_attr_set(&addr, numpages, | ||
957 | __pgprot(L_PTE_XN), 0); | ||
958 | } | ||
959 | EXPORT_SYMBOL(set_memory_nx); | ||
960 | |||
961 | int set_memory_ro(unsigned long addr, int numpages) | ||
962 | { | ||
963 | return change_page_attr_set(&addr, numpages, | ||
964 | __pgprot(L_PTE_RDONLY), 0); | ||
965 | } | ||
966 | EXPORT_SYMBOL_GPL(set_memory_ro); | ||
967 | |||
968 | int set_memory_rw(unsigned long addr, int numpages) | ||
969 | { | ||
970 | return change_page_attr_clear(&addr, numpages, | ||
971 | __pgprot(L_PTE_RDONLY), 0); | ||
972 | } | ||
973 | EXPORT_SYMBOL_GPL(set_memory_rw); | ||
974 | |||
975 | int set_memory_np(unsigned long addr, int numpages) | ||
976 | { | ||
977 | return change_page_attr_clear(&addr, numpages, | ||
978 | __pgprot(L_PTE_PRESENT), 0); | ||
979 | } | ||
980 | |||
981 | int set_memory_4k(unsigned long addr, int numpages) | ||
982 | { | ||
983 | return change_page_attr_set_clr(&addr, numpages, __pgprot(0), | ||
984 | __pgprot(0), 1, 0, NULL); | ||
985 | } | ||
986 | |||
987 | static int _set_pages_array(struct page **pages, int addrinarray, | ||
988 | unsigned long set, unsigned long clr) | ||
989 | { | ||
990 | return change_page_attr_set_clr(NULL, addrinarray, | ||
991 | __pgprot(set), | ||
992 | __pgprot(clr), | ||
993 | 0, CPA_PAGES_ARRAY, pages); | ||
994 | } | ||
995 | |||
996 | int set_pages_array_uc(struct page **pages, int addrinarray) | ||
997 | { | ||
998 | return _set_pages_array(pages, addrinarray, | ||
999 | L_PTE_MT_UNCACHED, L_PTE_MT_MASK); | ||
1000 | } | ||
1001 | EXPORT_SYMBOL(set_pages_array_uc); | ||
1002 | |||
1003 | int set_pages_array_wc(struct page **pages, int addrinarray) | ||
1004 | { | ||
1005 | return _set_pages_array(pages, addrinarray, L_PTE_MT_BUFFERABLE, | ||
1006 | L_PTE_MT_MASK); | ||
1007 | } | ||
1008 | EXPORT_SYMBOL(set_pages_array_wc); | ||
1009 | |||
1010 | int set_pages_array_wb(struct page **pages, int addrinarray) | ||
1011 | { | ||
1012 | return _set_pages_array(pages, addrinarray, | ||
1013 | L_PTE_MT_WRITEBACK, L_PTE_MT_MASK); | ||
1014 | } | ||
1015 | EXPORT_SYMBOL(set_pages_array_wb); | ||
1016 | |||
1017 | int set_pages_array_iwb(struct page **pages, int addrinarray) | ||
1018 | { | ||
1019 | return _set_pages_array(pages, addrinarray, | ||
1020 | L_PTE_MT_INNER_WB, L_PTE_MT_MASK); | ||
1021 | } | ||
1022 | EXPORT_SYMBOL(set_pages_array_iwb); | ||
1023 | |||
1024 | #else /* CONFIG_CPA */ | ||
1025 | |||
1026 | void update_page_count(int level, unsigned long pages) | ||
1027 | { | ||
1028 | } | ||
1029 | |||
1030 | static void flush_cache(struct page **pages, int numpages) | ||
1031 | { | ||
1032 | unsigned int i; | ||
1033 | bool flush_inner = true; | ||
1034 | unsigned long base; | ||
1035 | |||
1036 | if (numpages >= FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD) { | ||
1037 | inner_flush_cache_all(); | ||
1038 | flush_inner = false; | ||
1039 | } | ||
1040 | |||
1041 | for (i = 0; i < numpages; i++) { | ||
1042 | if (flush_inner) | ||
1043 | __flush_dcache_page(page_mapping(pages[i]), pages[i]); | ||
1044 | base = page_to_phys(pages[i]); | ||
1045 | outer_flush_range(base, base + PAGE_SIZE); | ||
1046 | } | ||
1047 | } | ||
1048 | |||
1049 | int set_pages_array_uc(struct page **pages, int addrinarray) | ||
1050 | { | ||
1051 | flush_cache(pages, addrinarray); | ||
1052 | return 0; | ||
1053 | } | ||
1054 | EXPORT_SYMBOL(set_pages_array_uc); | ||
1055 | |||
1056 | int set_pages_array_wc(struct page **pages, int addrinarray) | ||
1057 | { | ||
1058 | flush_cache(pages, addrinarray); | ||
1059 | return 0; | ||
1060 | } | ||
1061 | EXPORT_SYMBOL(set_pages_array_wc); | ||
1062 | |||
1063 | int set_pages_array_wb(struct page **pages, int addrinarray) | ||
1064 | { | ||
1065 | return 0; | ||
1066 | } | ||
1067 | EXPORT_SYMBOL(set_pages_array_wb); | ||
1068 | |||
1069 | int set_pages_array_iwb(struct page **pages, int addrinarray) | ||
1070 | { | ||
1071 | flush_cache(pages, addrinarray); | ||
1072 | return 0; | ||
1073 | } | ||
1074 | EXPORT_SYMBOL(set_pages_array_iwb); | ||
1075 | |||
1076 | #endif | ||
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index b2027c154b2..3e9503bb7bf 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c | |||
@@ -17,6 +17,23 @@ | |||
17 | 17 | ||
18 | #include "mm.h" | 18 | #include "mm.h" |
19 | 19 | ||
20 | DEFINE_SPINLOCK(pgd_lock); | ||
21 | LIST_HEAD(pgd_list); | ||
22 | |||
23 | static inline void pgd_list_add(pgd_t *pgd) | ||
24 | { | ||
25 | struct page *page = virt_to_page(pgd); | ||
26 | |||
27 | list_add(&page->lru, &pgd_list); | ||
28 | } | ||
29 | |||
30 | static inline void pgd_list_del(pgd_t *pgd) | ||
31 | { | ||
32 | struct page *page = virt_to_page(pgd); | ||
33 | |||
34 | list_del(&page->lru); | ||
35 | } | ||
36 | |||
20 | /* | 37 | /* |
21 | * need to get a 16k page for level 1 | 38 | * need to get a 16k page for level 1 |
22 | */ | 39 | */ |
@@ -26,6 +43,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
26 | pud_t *new_pud, *init_pud; | 43 | pud_t *new_pud, *init_pud; |
27 | pmd_t *new_pmd, *init_pmd; | 44 | pmd_t *new_pmd, *init_pmd; |
28 | pte_t *new_pte, *init_pte; | 45 | pte_t *new_pte, *init_pte; |
46 | unsigned long flags; | ||
29 | 47 | ||
30 | new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); | 48 | new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); |
31 | if (!new_pgd) | 49 | if (!new_pgd) |
@@ -33,6 +51,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
33 | 51 | ||
34 | memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); | 52 | memset(new_pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); |
35 | 53 | ||
54 | spin_lock_irqsave(&pgd_lock, flags); | ||
36 | /* | 55 | /* |
37 | * Copy over the kernel and IO PGD entries | 56 | * Copy over the kernel and IO PGD entries |
38 | */ | 57 | */ |
@@ -40,7 +59,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
40 | memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, | 59 | memcpy(new_pgd + USER_PTRS_PER_PGD, init_pgd + USER_PTRS_PER_PGD, |
41 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); | 60 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
42 | 61 | ||
62 | #if !defined(CONFIG_CPU_CACHE_V7) || !defined(CONFIG_SMP) | ||
43 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | 63 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); |
64 | #endif | ||
65 | |||
66 | pgd_list_add(new_pgd); | ||
67 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
44 | 68 | ||
45 | if (!vectors_high()) { | 69 | if (!vectors_high()) { |
46 | /* | 70 | /* |
@@ -74,6 +98,9 @@ no_pte: | |||
74 | no_pmd: | 98 | no_pmd: |
75 | pud_free(mm, new_pud); | 99 | pud_free(mm, new_pud); |
76 | no_pud: | 100 | no_pud: |
101 | spin_lock_irqsave(&pgd_lock, flags); | ||
102 | pgd_list_del(new_pgd); | ||
103 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
77 | free_pages((unsigned long)new_pgd, 2); | 104 | free_pages((unsigned long)new_pgd, 2); |
78 | no_pgd: | 105 | no_pgd: |
79 | return NULL; | 106 | return NULL; |
@@ -85,10 +112,15 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) | |||
85 | pud_t *pud; | 112 | pud_t *pud; |
86 | pmd_t *pmd; | 113 | pmd_t *pmd; |
87 | pgtable_t pte; | 114 | pgtable_t pte; |
115 | unsigned long flags; | ||
88 | 116 | ||
89 | if (!pgd_base) | 117 | if (!pgd_base) |
90 | return; | 118 | return; |
91 | 119 | ||
120 | spin_lock_irqsave(&pgd_lock, flags); | ||
121 | pgd_list_del(pgd_base); | ||
122 | spin_unlock_irqrestore(&pgd_lock, flags); | ||
123 | |||
92 | pgd = pgd_base + pgd_index(0); | 124 | pgd = pgd_base + pgd_index(0); |
93 | if (pgd_none_or_clear_bad(pgd)) | 125 | if (pgd_none_or_clear_bad(pgd)) |
94 | goto no_pgd; | 126 | goto no_pgd; |
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 6c4e7fd6c8a..67469665d47 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S | |||
@@ -364,17 +364,8 @@ ENTRY(arm1020_dma_unmap_area) | |||
364 | mov pc, lr | 364 | mov pc, lr |
365 | ENDPROC(arm1020_dma_unmap_area) | 365 | ENDPROC(arm1020_dma_unmap_area) |
366 | 366 | ||
367 | ENTRY(arm1020_cache_fns) | 367 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
368 | .long arm1020_flush_icache_all | 368 | define_cache_functions arm1020 |
369 | .long arm1020_flush_kern_cache_all | ||
370 | .long arm1020_flush_user_cache_all | ||
371 | .long arm1020_flush_user_cache_range | ||
372 | .long arm1020_coherent_kern_range | ||
373 | .long arm1020_coherent_user_range | ||
374 | .long arm1020_flush_kern_dcache_area | ||
375 | .long arm1020_dma_map_area | ||
376 | .long arm1020_dma_unmap_area | ||
377 | .long arm1020_dma_flush_range | ||
378 | 369 | ||
379 | .align 5 | 370 | .align 5 |
380 | ENTRY(cpu_arm1020_dcache_clean_area) | 371 | ENTRY(cpu_arm1020_dcache_clean_area) |
@@ -477,38 +468,14 @@ arm1020_crval: | |||
477 | crval clear=0x0000593f, mmuset=0x00003935, ucset=0x00001930 | 468 | crval clear=0x0000593f, mmuset=0x00003935, ucset=0x00001930 |
478 | 469 | ||
479 | __INITDATA | 470 | __INITDATA |
471 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | ||
472 | define_processor_functions arm1020, dabort=v4t_early_abort, pabort=legacy_pabort | ||
480 | 473 | ||
481 | /* | ||
482 | * Purpose : Function pointers used to access above functions - all calls | ||
483 | * come through these | ||
484 | */ | ||
485 | .type arm1020_processor_functions, #object | ||
486 | arm1020_processor_functions: | ||
487 | .word v4t_early_abort | ||
488 | .word legacy_pabort | ||
489 | .word cpu_arm1020_proc_init | ||
490 | .word cpu_arm1020_proc_fin | ||
491 | .word cpu_arm1020_reset | ||
492 | .word cpu_arm1020_do_idle | ||
493 | .word cpu_arm1020_dcache_clean_area | ||
494 | .word cpu_arm1020_switch_mm | ||
495 | .word cpu_arm1020_set_pte_ext | ||
496 | .word 0 | ||
497 | .word 0 | ||
498 | .word 0 | ||
499 | .size arm1020_processor_functions, . - arm1020_processor_functions | ||
500 | 474 | ||
501 | .section ".rodata" | 475 | .section ".rodata" |
502 | 476 | ||
503 | .type cpu_arch_name, #object | 477 | string cpu_arch_name, "armv5t" |
504 | cpu_arch_name: | 478 | string cpu_elf_name, "v5" |
505 | .asciz "armv5t" | ||
506 | .size cpu_arch_name, . - cpu_arch_name | ||
507 | |||
508 | .type cpu_elf_name, #object | ||
509 | cpu_elf_name: | ||
510 | .asciz "v5" | ||
511 | .size cpu_elf_name, . - cpu_elf_name | ||
512 | 479 | ||
513 | .type cpu_arm1020_name, #object | 480 | .type cpu_arm1020_name, #object |
514 | cpu_arm1020_name: | 481 | cpu_arm1020_name: |
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 4ce947c1962..4251421c0ed 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S | |||
@@ -350,17 +350,8 @@ ENTRY(arm1020e_dma_unmap_area) | |||
350 | mov pc, lr | 350 | mov pc, lr |
351 | ENDPROC(arm1020e_dma_unmap_area) | 351 | ENDPROC(arm1020e_dma_unmap_area) |
352 | 352 | ||
353 | ENTRY(arm1020e_cache_fns) | 353 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
354 | .long arm1020e_flush_icache_all | 354 | define_cache_functions arm1020e |
355 | .long arm1020e_flush_kern_cache_all | ||
356 | .long arm1020e_flush_user_cache_all | ||
357 | .long arm1020e_flush_user_cache_range | ||
358 | .long arm1020e_coherent_kern_range | ||
359 | .long arm1020e_coherent_user_range | ||
360 | .long arm1020e_flush_kern_dcache_area | ||
361 | .long arm1020e_dma_map_area | ||
362 | .long arm1020e_dma_unmap_area | ||
363 | .long arm1020e_dma_flush_range | ||
364 | 355 | ||
365 | .align 5 | 356 | .align 5 |
366 | ENTRY(cpu_arm1020e_dcache_clean_area) | 357 | ENTRY(cpu_arm1020e_dcache_clean_area) |
@@ -458,43 +449,14 @@ arm1020e_crval: | |||
458 | crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 | 449 | crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 |
459 | 450 | ||
460 | __INITDATA | 451 | __INITDATA |
461 | 452 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
462 | /* | 453 | define_processor_functions arm1020e, dabort=v4t_early_abort, pabort=legacy_pabort |
463 | * Purpose : Function pointers used to access above functions - all calls | ||
464 | * come through these | ||
465 | */ | ||
466 | .type arm1020e_processor_functions, #object | ||
467 | arm1020e_processor_functions: | ||
468 | .word v4t_early_abort | ||
469 | .word legacy_pabort | ||
470 | .word cpu_arm1020e_proc_init | ||
471 | .word cpu_arm1020e_proc_fin | ||
472 | .word cpu_arm1020e_reset | ||
473 | .word cpu_arm1020e_do_idle | ||
474 | .word cpu_arm1020e_dcache_clean_area | ||
475 | .word cpu_arm1020e_switch_mm | ||
476 | .word cpu_arm1020e_set_pte_ext | ||
477 | .word 0 | ||
478 | .word 0 | ||
479 | .word 0 | ||
480 | .size arm1020e_processor_functions, . - arm1020e_processor_functions | ||
481 | 454 | ||
482 | .section ".rodata" | 455 | .section ".rodata" |
483 | 456 | ||
484 | .type cpu_arch_name, #object | 457 | string cpu_arch_name, "armv5te" |
485 | cpu_arch_name: | 458 | string cpu_elf_name, "v5" |
486 | .asciz "armv5te" | 459 | string cpu_arm1020e_name, "ARM1020E" |
487 | .size cpu_arch_name, . - cpu_arch_name | ||
488 | |||
489 | .type cpu_elf_name, #object | ||
490 | cpu_elf_name: | ||
491 | .asciz "v5" | ||
492 | .size cpu_elf_name, . - cpu_elf_name | ||
493 | |||
494 | .type cpu_arm1020e_name, #object | ||
495 | cpu_arm1020e_name: | ||
496 | .asciz "ARM1020E" | ||
497 | .size cpu_arm1020e_name, . - cpu_arm1020e_name | ||
498 | 460 | ||
499 | .align | 461 | .align |
500 | 462 | ||
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index c8884c5413a..d283cf3d06e 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S | |||
@@ -339,17 +339,8 @@ ENTRY(arm1022_dma_unmap_area) | |||
339 | mov pc, lr | 339 | mov pc, lr |
340 | ENDPROC(arm1022_dma_unmap_area) | 340 | ENDPROC(arm1022_dma_unmap_area) |
341 | 341 | ||
342 | ENTRY(arm1022_cache_fns) | 342 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
343 | .long arm1022_flush_icache_all | 343 | define_cache_functions arm1022 |
344 | .long arm1022_flush_kern_cache_all | ||
345 | .long arm1022_flush_user_cache_all | ||
346 | .long arm1022_flush_user_cache_range | ||
347 | .long arm1022_coherent_kern_range | ||
348 | .long arm1022_coherent_user_range | ||
349 | .long arm1022_flush_kern_dcache_area | ||
350 | .long arm1022_dma_map_area | ||
351 | .long arm1022_dma_unmap_area | ||
352 | .long arm1022_dma_flush_range | ||
353 | 344 | ||
354 | .align 5 | 345 | .align 5 |
355 | ENTRY(cpu_arm1022_dcache_clean_area) | 346 | ENTRY(cpu_arm1022_dcache_clean_area) |
@@ -441,43 +432,14 @@ arm1022_crval: | |||
441 | crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 | 432 | crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930 |
442 | 433 | ||
443 | __INITDATA | 434 | __INITDATA |
444 | 435 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
445 | /* | 436 | define_processor_functions arm1022, dabort=v4t_early_abort, pabort=legacy_pabort |
446 | * Purpose : Function pointers used to access above functions - all calls | ||
447 | * come through these | ||
448 | */ | ||
449 | .type arm1022_processor_functions, #object | ||
450 | arm1022_processor_functions: | ||
451 | .word v4t_early_abort | ||
452 | .word legacy_pabort | ||
453 | .word cpu_arm1022_proc_init | ||
454 | .word cpu_arm1022_proc_fin | ||
455 | .word cpu_arm1022_reset | ||
456 | .word cpu_arm1022_do_idle | ||
457 | .word cpu_arm1022_dcache_clean_area | ||
458 | .word cpu_arm1022_switch_mm | ||
459 | .word cpu_arm1022_set_pte_ext | ||
460 | .word 0 | ||
461 | .word 0 | ||
462 | .word 0 | ||
463 | .size arm1022_processor_functions, . - arm1022_processor_functions | ||
464 | 437 | ||
465 | .section ".rodata" | 438 | .section ".rodata" |
466 | 439 | ||
467 | .type cpu_arch_name, #object | 440 | string cpu_arch_name, "armv5te" |
468 | cpu_arch_name: | 441 | string cpu_elf_name, "v5" |
469 | .asciz "armv5te" | 442 | string cpu_arm1022_name, "ARM1022" |
470 | .size cpu_arch_name, . - cpu_arch_name | ||
471 | |||
472 | .type cpu_elf_name, #object | ||
473 | cpu_elf_name: | ||
474 | .asciz "v5" | ||
475 | .size cpu_elf_name, . - cpu_elf_name | ||
476 | |||
477 | .type cpu_arm1022_name, #object | ||
478 | cpu_arm1022_name: | ||
479 | .asciz "ARM1022" | ||
480 | .size cpu_arm1022_name, . - cpu_arm1022_name | ||
481 | 443 | ||
482 | .align | 444 | .align |
483 | 445 | ||
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 413684660aa..678a1ceafed 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S | |||
@@ -333,17 +333,8 @@ ENTRY(arm1026_dma_unmap_area) | |||
333 | mov pc, lr | 333 | mov pc, lr |
334 | ENDPROC(arm1026_dma_unmap_area) | 334 | ENDPROC(arm1026_dma_unmap_area) |
335 | 335 | ||
336 | ENTRY(arm1026_cache_fns) | 336 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
337 | .long arm1026_flush_icache_all | 337 | define_cache_functions arm1026 |
338 | .long arm1026_flush_kern_cache_all | ||
339 | .long arm1026_flush_user_cache_all | ||
340 | .long arm1026_flush_user_cache_range | ||
341 | .long arm1026_coherent_kern_range | ||
342 | .long arm1026_coherent_user_range | ||
343 | .long arm1026_flush_kern_dcache_area | ||
344 | .long arm1026_dma_map_area | ||
345 | .long arm1026_dma_unmap_area | ||
346 | .long arm1026_dma_flush_range | ||
347 | 338 | ||
348 | .align 5 | 339 | .align 5 |
349 | ENTRY(cpu_arm1026_dcache_clean_area) | 340 | ENTRY(cpu_arm1026_dcache_clean_area) |
@@ -436,45 +427,15 @@ arm1026_crval: | |||
436 | crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001934 | 427 | crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001934 |
437 | 428 | ||
438 | __INITDATA | 429 | __INITDATA |
439 | 430 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
440 | /* | 431 | define_processor_functions arm1026, dabort=v5t_early_abort, pabort=legacy_pabort |
441 | * Purpose : Function pointers used to access above functions - all calls | ||
442 | * come through these | ||
443 | */ | ||
444 | .type arm1026_processor_functions, #object | ||
445 | arm1026_processor_functions: | ||
446 | .word v5t_early_abort | ||
447 | .word legacy_pabort | ||
448 | .word cpu_arm1026_proc_init | ||
449 | .word cpu_arm1026_proc_fin | ||
450 | .word cpu_arm1026_reset | ||
451 | .word cpu_arm1026_do_idle | ||
452 | .word cpu_arm1026_dcache_clean_area | ||
453 | .word cpu_arm1026_switch_mm | ||
454 | .word cpu_arm1026_set_pte_ext | ||
455 | .word 0 | ||
456 | .word 0 | ||
457 | .word 0 | ||
458 | .size arm1026_processor_functions, . - arm1026_processor_functions | ||
459 | 432 | ||
460 | .section .rodata | 433 | .section .rodata |
461 | 434 | ||
462 | .type cpu_arch_name, #object | 435 | string cpu_arch_name, "armv5tej" |
463 | cpu_arch_name: | 436 | string cpu_elf_name, "v5" |
464 | .asciz "armv5tej" | ||
465 | .size cpu_arch_name, . - cpu_arch_name | ||
466 | |||
467 | .type cpu_elf_name, #object | ||
468 | cpu_elf_name: | ||
469 | .asciz "v5" | ||
470 | .size cpu_elf_name, . - cpu_elf_name | ||
471 | .align | 437 | .align |
472 | 438 | string cpu_arm1026_name, "ARM1026EJ-S" | |
473 | .type cpu_arm1026_name, #object | ||
474 | cpu_arm1026_name: | ||
475 | .asciz "ARM1026EJ-S" | ||
476 | .size cpu_arm1026_name, . - cpu_arm1026_name | ||
477 | |||
478 | .align | 439 | .align |
479 | 440 | ||
480 | .section ".proc.info.init", #alloc, #execinstr | 441 | .section ".proc.info.init", #alloc, #execinstr |
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 5f79dc4ce3f..e5b974cddac 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S | |||
@@ -29,19 +29,19 @@ ENTRY(cpu_arm7_dcache_clean_area) | |||
29 | /* | 29 | /* |
30 | * Function: arm6_7_data_abort () | 30 | * Function: arm6_7_data_abort () |
31 | * | 31 | * |
32 | * Params : r2 = address of aborted instruction | 32 | * Params : r2 = pt_regs |
33 | * : sp = pointer to registers | 33 | * : r4 = aborted context pc |
34 | * : r5 = aborted context psr | ||
34 | * | 35 | * |
35 | * Purpose : obtain information about current aborted instruction | 36 | * Purpose : obtain information about current aborted instruction |
36 | * | 37 | * |
37 | * Returns : r0 = address of abort | 38 | * Returns : r4-r5, r10-r11, r13 preserved |
38 | * : r1 = FSR | ||
39 | */ | 39 | */ |
40 | 40 | ||
41 | ENTRY(cpu_arm7_data_abort) | 41 | ENTRY(cpu_arm7_data_abort) |
42 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 42 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
43 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 43 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
44 | ldr r8, [r2] @ read arm instruction | 44 | ldr r8, [r4] @ read arm instruction |
45 | tst r8, #1 << 20 @ L = 0 -> write? | 45 | tst r8, #1 << 20 @ L = 0 -> write? |
46 | orreq r1, r1, #1 << 11 @ yes. | 46 | orreq r1, r1, #1 << 11 @ yes. |
47 | and r7, r8, #15 << 24 | 47 | and r7, r8, #15 << 24 |
@@ -49,7 +49,7 @@ ENTRY(cpu_arm7_data_abort) | |||
49 | nop | 49 | nop |
50 | 50 | ||
51 | /* 0 */ b .data_unknown | 51 | /* 0 */ b .data_unknown |
52 | /* 1 */ mov pc, lr @ swp | 52 | /* 1 */ b do_DataAbort @ swp |
53 | /* 2 */ b .data_unknown | 53 | /* 2 */ b .data_unknown |
54 | /* 3 */ b .data_unknown | 54 | /* 3 */ b .data_unknown |
55 | /* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m | 55 | /* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m |
@@ -60,87 +60,85 @@ ENTRY(cpu_arm7_data_abort) | |||
60 | /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> | 60 | /* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> |
61 | /* a */ b .data_unknown | 61 | /* a */ b .data_unknown |
62 | /* b */ b .data_unknown | 62 | /* b */ b .data_unknown |
63 | /* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m | 63 | /* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m |
64 | /* d */ mov pc, lr @ ldc rd, [rn, #m] | 64 | /* d */ b do_DataAbort @ ldc rd, [rn, #m] |
65 | /* e */ b .data_unknown | 65 | /* e */ b .data_unknown |
66 | /* f */ | 66 | /* f */ |
67 | .data_unknown: @ Part of jumptable | 67 | .data_unknown: @ Part of jumptable |
68 | mov r0, r2 | 68 | mov r0, r4 |
69 | mov r1, r8 | 69 | mov r1, r8 |
70 | mov r2, sp | 70 | b baddataabort |
71 | bl baddataabort | ||
72 | b ret_from_exception | ||
73 | 71 | ||
74 | ENTRY(cpu_arm6_data_abort) | 72 | ENTRY(cpu_arm6_data_abort) |
75 | mrc p15, 0, r1, c5, c0, 0 @ get FSR | 73 | mrc p15, 0, r1, c5, c0, 0 @ get FSR |
76 | mrc p15, 0, r0, c6, c0, 0 @ get FAR | 74 | mrc p15, 0, r0, c6, c0, 0 @ get FAR |
77 | ldr r8, [r2] @ read arm instruction | 75 | ldr r8, [r4] @ read arm instruction |
78 | tst r8, #1 << 20 @ L = 0 -> write? | 76 | tst r8, #1 << 20 @ L = 0 -> write? |
79 | orreq r1, r1, #1 << 11 @ yes. | 77 | orreq r1, r1, #1 << 11 @ yes. |
80 | and r7, r8, #14 << 24 | 78 | and r7, r8, #14 << 24 |
81 | teq r7, #8 << 24 @ was it ldm/stm | 79 | teq r7, #8 << 24 @ was it ldm/stm |
82 | movne pc, lr | 80 | bne do_DataAbort |
83 | 81 | ||
84 | .data_arm_ldmstm: | 82 | .data_arm_ldmstm: |
85 | tst r8, #1 << 21 @ check writeback bit | 83 | tst r8, #1 << 21 @ check writeback bit |
86 | moveq pc, lr @ no writeback -> no fixup | 84 | beq do_DataAbort @ no writeback -> no fixup |
87 | mov r7, #0x11 | 85 | mov r7, #0x11 |
88 | orr r7, r7, #0x1100 | 86 | orr r7, r7, #0x1100 |
89 | and r6, r8, r7 | 87 | and r6, r8, r7 |
90 | and r2, r8, r7, lsl #1 | 88 | and r9, r8, r7, lsl #1 |
91 | add r6, r6, r2, lsr #1 | 89 | add r6, r6, r9, lsr #1 |
92 | and r2, r8, r7, lsl #2 | 90 | and r9, r8, r7, lsl #2 |
93 | add r6, r6, r2, lsr #2 | 91 | add r6, r6, r9, lsr #2 |
94 | and r2, r8, r7, lsl #3 | 92 | and r9, r8, r7, lsl #3 |
95 | add r6, r6, r2, lsr #3 | 93 | add r6, r6, r9, lsr #3 |
96 | add r6, r6, r6, lsr #8 | 94 | add r6, r6, r6, lsr #8 |
97 | add r6, r6, r6, lsr #4 | 95 | add r6, r6, r6, lsr #4 |
98 | and r6, r6, #15 @ r6 = no. of registers to transfer. | 96 | and r6, r6, #15 @ r6 = no. of registers to transfer. |
99 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 97 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
100 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 98 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
101 | tst r8, #1 << 23 @ Check U bit | 99 | tst r8, #1 << 23 @ Check U bit |
102 | subne r7, r7, r6, lsl #2 @ Undo increment | 100 | subne r7, r7, r6, lsl #2 @ Undo increment |
103 | addeq r7, r7, r6, lsl #2 @ Undo decrement | 101 | addeq r7, r7, r6, lsl #2 @ Undo decrement |
104 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 102 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
105 | mov pc, lr | 103 | b do_DataAbort |
106 | 104 | ||
107 | .data_arm_apply_r6_and_rn: | 105 | .data_arm_apply_r6_and_rn: |
108 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 106 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
109 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 107 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
110 | tst r8, #1 << 23 @ Check U bit | 108 | tst r8, #1 << 23 @ Check U bit |
111 | subne r7, r7, r6 @ Undo incrmenet | 109 | subne r7, r7, r6 @ Undo incrmenet |
112 | addeq r7, r7, r6 @ Undo decrement | 110 | addeq r7, r7, r6 @ Undo decrement |
113 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 111 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
114 | mov pc, lr | 112 | b do_DataAbort |
115 | 113 | ||
116 | .data_arm_lateldrpreconst: | 114 | .data_arm_lateldrpreconst: |
117 | tst r8, #1 << 21 @ check writeback bit | 115 | tst r8, #1 << 21 @ check writeback bit |
118 | moveq pc, lr @ no writeback -> no fixup | 116 | beq do_DataAbort @ no writeback -> no fixup |
119 | .data_arm_lateldrpostconst: | 117 | .data_arm_lateldrpostconst: |
120 | movs r2, r8, lsl #20 @ Get offset | 118 | movs r6, r8, lsl #20 @ Get offset |
121 | moveq pc, lr @ zero -> no fixup | 119 | beq do_DataAbort @ zero -> no fixup |
122 | and r5, r8, #15 << 16 @ Extract 'n' from instruction | 120 | and r9, r8, #15 << 16 @ Extract 'n' from instruction |
123 | ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' | 121 | ldr r7, [r2, r9, lsr #14] @ Get register 'Rn' |
124 | tst r8, #1 << 23 @ Check U bit | 122 | tst r8, #1 << 23 @ Check U bit |
125 | subne r7, r7, r2, lsr #20 @ Undo increment | 123 | subne r7, r7, r6, lsr #20 @ Undo increment |
126 | addeq r7, r7, r2, lsr #20 @ Undo decrement | 124 | addeq r7, r7, r6, lsr #20 @ Undo decrement |
127 | str r7, [sp, r5, lsr #14] @ Put register 'Rn' | 125 | str r7, [r2, r9, lsr #14] @ Put register 'Rn' |
128 | mov pc, lr | 126 | b do_DataAbort |
129 | 127 | ||
130 | .data_arm_lateldrprereg: | 128 | .data_arm_lateldrprereg: |
131 | tst r8, #1 << 21 @ check writeback bit | 129 | tst r8, #1 << 21 @ check writeback bit |
132 | moveq pc, lr @ no writeback -> no fixup | 130 | beq do_DataAbort @ no writeback -> no fixup |
133 | .data_arm_lateldrpostreg: | 131 | .data_arm_lateldrpostreg: |
134 | and r7, r8, #15 @ Extract 'm' from instruction | 132 | and r7, r8, #15 @ Extract 'm' from instruction |
135 | ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' | 133 | ldr r6, [r2, r7, lsl #2] @ Get register 'Rm' |
136 | mov r5, r8, lsr #7 @ get shift count | 134 | mov r9, r8, lsr #7 @ get shift count |
137 | ands r5, r5, #31 | 135 | ands r9, r9, #31 |
138 | and r7, r8, #0x70 @ get shift type | 136 | and r7, r8, #0x70 @ get shift type |
139 | orreq r7, r7, #8 @ shift count = 0 | 137 | orreq r7, r7, #8 @ shift count = 0 |
140 | add pc, pc, r7 | 138 | add pc, pc, r7 |
141 | nop | 139 | nop |
142 | 140 | ||
143 | mov r6, r6, lsl r5 @ 0: LSL #!0 | 141 | mov r6, r6, lsl r9 @ 0: LSL #!0 |
144 | b .data_arm_apply_r6_and_rn | 142 | b .data_arm_apply_r6_and_rn |
145 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 | 143 | b .data_arm_apply_r6_and_rn @ 1: LSL #0 |
146 | nop | 144 | nop |
@@ -148,7 +146,7 @@ ENTRY(cpu_arm6_data_abort) | |||
148 | nop | 146 | nop |
149 | b .data_unknown @ 3: MUL? | 147 | b .data_unknown @ 3: MUL? |
150 | nop | 148 | nop |
151 | mov r6, r6, lsr r5 @ 4: LSR #!0 | 149 | mov r6, r6, lsr r9 @ 4: LSR #!0 |
152 | b .data_arm_apply_r6_and_rn | 150 | b .data_arm_apply_r6_and_rn |
153 | mov r6, r6, lsr #32 @ 5: LSR #32 | 151 | mov r6, r6, lsr #32 @ 5: LSR #32 |
154 | b .data_arm_apply_r6_and_rn | 152 | b .data_arm_apply_r6_and_rn |
@@ -156,7 +154,7 @@ ENTRY(cpu_arm6_data_abort) | |||
156 | nop | 154 | nop |
157 | b .data_unknown @ 7: MUL? | 155 | b .data_unknown @ 7: MUL? |
158 | nop | 156 | nop |
159 | mov r6, r6, asr r5 @ 8: ASR #!0 | 157 | mov r6, r6, asr r9 @ 8: ASR #!0 |
160 | b .data_arm_apply_r6_and_rn | 158 | b .data_arm_apply_r6_and_rn |
161 | mov r6, r6, asr #32 @ 9: ASR #32 | 159 | mov r6, r6, asr #32 @ 9: ASR #32 |
162 | b .data_arm_apply_r6_and_rn | 160 | b .data_arm_apply_r6_and_rn |
@@ -164,7 +162,7 @@ ENTRY(cpu_arm6_data_abort) | |||
164 | nop | 162 | nop |
165 | b .data_unknown @ B: MUL? | 163 | b .data_unknown @ B: MUL? |
166 | nop | 164 | nop |
167 | mov r6, r6, ror r5 @ C: ROR #!0 | 165 | mov r6, r6, ror r9 @ C: ROR #!0 |
168 | b .data_arm_apply_r6_and_rn | 166 | b .data_arm_apply_r6_and_rn |
169 | mov r6, r6, rrx @ D: RRX | 167 | mov r6, r6, rrx @ D: RRX |
170 | b .data_arm_apply_r6_and_rn | 168 | b .data_arm_apply_r6_and_rn |
@@ -269,159 +267,57 @@ __arm7_setup: mov r0, #0 | |||
269 | 267 | ||
270 | __INITDATA | 268 | __INITDATA |
271 | 269 | ||
272 | /* | 270 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
273 | * Purpose : Function pointers used to access above functions - all calls | 271 | define_processor_functions arm6, dabort=cpu_arm6_data_abort, pabort=legacy_pabort |
274 | * come through these | 272 | define_processor_functions arm7, dabort=cpu_arm7_data_abort, pabort=legacy_pabort |
275 | */ | ||
276 | .type arm6_processor_functions, #object | ||
277 | ENTRY(arm6_processor_functions) | ||
278 | .word cpu_arm6_data_abort | ||
279 | .word legacy_pabort | ||
280 | .word cpu_arm6_proc_init | ||
281 | .word cpu_arm6_proc_fin | ||
282 | .word cpu_arm6_reset | ||
283 | .word cpu_arm6_do_idle | ||
284 | .word cpu_arm6_dcache_clean_area | ||
285 | .word cpu_arm6_switch_mm | ||
286 | .word cpu_arm6_set_pte_ext | ||
287 | .word 0 | ||
288 | .word 0 | ||
289 | .word 0 | ||
290 | .size arm6_processor_functions, . - arm6_processor_functions | ||
291 | |||
292 | /* | ||
293 | * Purpose : Function pointers used to access above functions - all calls | ||
294 | * come through these | ||
295 | */ | ||
296 | .type arm7_processor_functions, #object | ||
297 | ENTRY(arm7_processor_functions) | ||
298 | .word cpu_arm7_data_abort | ||
299 | .word legacy_pabort | ||
300 | .word cpu_arm7_proc_init | ||
301 | .word cpu_arm7_proc_fin | ||
302 | .word cpu_arm7_reset | ||
303 | .word cpu_arm7_do_idle | ||
304 | .word cpu_arm7_dcache_clean_area | ||
305 | .word cpu_arm7_switch_mm | ||
306 | .word cpu_arm7_set_pte_ext | ||
307 | .word 0 | ||
308 | .word 0 | ||
309 | .word 0 | ||
310 | .size arm7_processor_functions, . - arm7_processor_functions | ||
311 | 273 | ||
312 | .section ".rodata" | 274 | .section ".rodata" |
313 | 275 | ||
314 | .type cpu_arch_name, #object | 276 | string cpu_arch_name, "armv3" |
315 | cpu_arch_name: .asciz "armv3" | 277 | string cpu_elf_name, "v3" |
316 | .size cpu_arch_name, . - cpu_arch_name | 278 | string cpu_arm6_name, "ARM6" |
317 | 279 | string cpu_arm610_name, "ARM610" | |
318 | .type cpu_elf_name, #object | 280 | string cpu_arm7_name, "ARM7" |
319 | cpu_elf_name: .asciz "v3" | 281 | string cpu_arm710_name, "ARM710" |
320 | .size cpu_elf_name, . - cpu_elf_name | ||
321 | |||
322 | .type cpu_arm6_name, #object | ||
323 | cpu_arm6_name: .asciz "ARM6" | ||
324 | .size cpu_arm6_name, . - cpu_arm6_name | ||
325 | |||
326 | .type cpu_arm610_name, #object | ||
327 | cpu_arm610_name: | ||
328 | .asciz "ARM610" | ||
329 | .size cpu_arm610_name, . - cpu_arm610_name | ||
330 | |||
331 | .type cpu_arm7_name, #object | ||
332 | cpu_arm7_name: .asciz "ARM7" | ||
333 | .size cpu_arm7_name, . - cpu_arm7_name | ||
334 | |||
335 | .type cpu_arm710_name, #object | ||
336 | cpu_arm710_name: | ||
337 | .asciz "ARM710" | ||
338 | .size cpu_arm710_name, . - cpu_arm710_name | ||
339 | 282 | ||
340 | .align | 283 | .align |
341 | 284 | ||
342 | .section ".proc.info.init", #alloc, #execinstr | 285 | .section ".proc.info.init", #alloc, #execinstr |
343 | 286 | ||
344 | .type __arm6_proc_info, #object | 287 | .macro arm67_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \ |
345 | __arm6_proc_info: | 288 | cpu_mm_mmu_flags:req, cpu_flush:req, cpu_proc_funcs:req |
346 | .long 0x41560600 | 289 | .type __\name\()_proc_info, #object |
347 | .long 0xfffffff0 | 290 | __\name\()_proc_info: |
348 | .long 0x00000c1e | 291 | .long \cpu_val |
349 | .long PMD_TYPE_SECT | \ | 292 | .long \cpu_mask |
350 | PMD_BIT4 | \ | 293 | .long \cpu_mm_mmu_flags |
351 | PMD_SECT_AP_WRITE | \ | ||
352 | PMD_SECT_AP_READ | ||
353 | b __arm6_setup | ||
354 | .long cpu_arch_name | ||
355 | .long cpu_elf_name | ||
356 | .long HWCAP_SWP | HWCAP_26BIT | ||
357 | .long cpu_arm6_name | ||
358 | .long arm6_processor_functions | ||
359 | .long v3_tlb_fns | ||
360 | .long v3_user_fns | ||
361 | .long v3_cache_fns | ||
362 | .size __arm6_proc_info, . - __arm6_proc_info | ||
363 | |||
364 | .type __arm610_proc_info, #object | ||
365 | __arm610_proc_info: | ||
366 | .long 0x41560610 | ||
367 | .long 0xfffffff0 | ||
368 | .long 0x00000c1e | ||
369 | .long PMD_TYPE_SECT | \ | 294 | .long PMD_TYPE_SECT | \ |
370 | PMD_BIT4 | \ | 295 | PMD_BIT4 | \ |
371 | PMD_SECT_AP_WRITE | \ | 296 | PMD_SECT_AP_WRITE | \ |
372 | PMD_SECT_AP_READ | 297 | PMD_SECT_AP_READ |
373 | b __arm6_setup | 298 | b \cpu_flush |
374 | .long cpu_arch_name | 299 | .long cpu_arch_name |
375 | .long cpu_elf_name | 300 | .long cpu_elf_name |
376 | .long HWCAP_SWP | HWCAP_26BIT | 301 | .long HWCAP_SWP | HWCAP_26BIT |
377 | .long cpu_arm610_name | 302 | .long \cpu_name |
378 | .long arm6_processor_functions | 303 | .long \cpu_proc_funcs |
379 | .long v3_tlb_fns | 304 | .long v3_tlb_fns |
380 | .long v3_user_fns | 305 | .long v3_user_fns |
381 | .long v3_cache_fns | 306 | .long v3_cache_fns |
382 | .size __arm610_proc_info, . - __arm610_proc_info | 307 | .size __\name\()_proc_info, . - __\name\()_proc_info |
383 | 308 | .endm | |
384 | .type __arm7_proc_info, #object | 309 | |
385 | __arm7_proc_info: | 310 | arm67_proc_info arm6, 0x41560600, 0xfffffff0, cpu_arm6_name, \ |
386 | .long 0x41007000 | 311 | 0x00000c1e, __arm6_setup, arm6_processor_functions |
387 | .long 0xffffff00 | 312 | arm67_proc_info arm610, 0x41560610, 0xfffffff0, cpu_arm610_name, \ |
388 | .long 0x00000c1e | 313 | 0x00000c1e, __arm6_setup, arm6_processor_functions |
389 | .long PMD_TYPE_SECT | \ | 314 | arm67_proc_info arm7, 0x41007000, 0xffffff00, cpu_arm7_name, \ |
390 | PMD_BIT4 | \ | 315 | 0x00000c1e, __arm7_setup, arm7_processor_functions |
391 | PMD_SECT_AP_WRITE | \ | 316 | arm67_proc_info arm710, 0x41007100, 0xfff8ff00, cpu_arm710_name, \ |
392 | PMD_SECT_AP_READ | 317 | PMD_TYPE_SECT | \ |
393 | b __arm7_setup | ||
394 | .long cpu_arch_name | ||
395 | .long cpu_elf_name | ||
396 | .long HWCAP_SWP | HWCAP_26BIT | ||
397 | .long cpu_arm7_name | ||
398 | .long arm7_processor_functions | ||
399 | .long v3_tlb_fns | ||
400 | .long v3_user_fns | ||
401 | .long v3_cache_fns | ||
402 | .size __arm7_proc_info, . - __arm7_proc_info | ||
403 | |||
404 | .type __arm710_proc_info, #object | ||
405 | __arm710_proc_info: | ||
406 | .long 0x41007100 | ||
407 | .long 0xfff8ff00 | ||
408 | .long PMD_TYPE_SECT | \ | ||
409 | PMD_SECT_BUFFERABLE | \ | 318 | PMD_SECT_BUFFERABLE | \ |
410 | PMD_SECT_CACHEABLE | \ | 319 | PMD_SECT_CACHEABLE | \ |
411 | PMD_BIT4 | \ | 320 | PMD_BIT4 | \ |
412 | PMD_SECT_AP_WRITE | \ | 321 | PMD_SECT_AP_WRITE | \ |
413 | PMD_SECT_AP_READ | 322 | PMD_SECT_AP_READ, \ |
414 | .long PMD_TYPE_SECT | \ | 323 | __arm7_setup, arm7_processor_functions |
415 | PMD_BIT4 | \ | ||
416 | PMD_SECT_AP_WRITE | \ | ||
417 | PMD_SECT_AP_READ | ||
418 | b __arm7_setup | ||
419 | .long cpu_arch_name | ||
420 | .long cpu_elf_name | ||
421 | .long HWCAP_SWP | HWCAP_26BIT | ||
422 | .long cpu_arm710_name | ||
423 | .long arm7_processor_functions | ||
424 | .long v3_tlb_fns | ||
425 | .long v3_user_fns | ||
426 | .long v3_cache_fns | ||
427 | .size __arm710_proc_info, . - __arm710_proc_info | ||
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 7a06e5964f5..55f4e290665 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S | |||
@@ -169,46 +169,15 @@ arm720_crval: | |||
169 | crval clear=0x00002f3f, mmuset=0x0000213d, ucset=0x00000130 | 169 | crval clear=0x00002f3f, mmuset=0x0000213d, ucset=0x00000130 |
170 | 170 | ||
171 | __INITDATA | 171 | __INITDATA |
172 | 172 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
173 | /* | 173 | define_processor_functions arm720, dabort=v4t_late_abort, pabort=legacy_pabort |
174 | * Purpose : Function pointers used to access above functions - all calls | ||
175 | * come through these | ||
176 | */ | ||
177 | .type arm720_processor_functions, #object | ||
178 | ENTRY(arm720_processor_functions) | ||
179 | .word v4t_late_abort | ||
180 | .word legacy_pabort | ||
181 | .word cpu_arm720_proc_init | ||
182 | .word cpu_arm720_proc_fin | ||
183 | .word cpu_arm720_reset | ||
184 | .word cpu_arm720_do_idle | ||
185 | .word cpu_arm720_dcache_clean_area | ||
186 | .word cpu_arm720_switch_mm | ||
187 | .word cpu_arm720_set_pte_ext | ||
188 | .word 0 | ||
189 | .word 0 | ||
190 | .word 0 | ||
191 | .size arm720_processor_functions, . - arm720_processor_functions | ||
192 | 174 | ||
193 | .section ".rodata" | 175 | .section ".rodata" |
194 | 176 | ||
195 | .type cpu_arch_name, #object | 177 | string cpu_arch_name, "armv4t" |
196 | cpu_arch_name: .asciz "armv4t" | 178 | string cpu_elf_name, "v4" |
197 | .size cpu_arch_name, . - cpu_arch_name | 179 | string cpu_arm710_name, "ARM710T" |
198 | 180 | string cpu_arm720_name, "ARM720T" | |
199 | .type cpu_elf_name, #object | ||
200 | cpu_elf_name: .asciz "v4" | ||
201 | .size cpu_elf_name, . - cpu_elf_name | ||
202 | |||
203 | .type cpu_arm710_name, #object | ||
204 | cpu_arm710_name: | ||
205 | .asciz "ARM710T" | ||
206 | .size cpu_arm710_name, . - cpu_arm710_name | ||
207 | |||
208 | .type cpu_arm720_name, #object | ||
209 | cpu_arm720_name: | ||
210 | .asciz "ARM720T" | ||
211 | .size cpu_arm720_name, . - cpu_arm720_name | ||
212 | 181 | ||
213 | .align | 182 | .align |
214 | 183 | ||
@@ -218,10 +187,11 @@ cpu_arm720_name: | |||
218 | 187 | ||
219 | .section ".proc.info.init", #alloc, #execinstr | 188 | .section ".proc.info.init", #alloc, #execinstr |
220 | 189 | ||
221 | .type __arm710_proc_info, #object | 190 | .macro arm720_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cpu_flush:req |
222 | __arm710_proc_info: | 191 | .type __\name\()_proc_info,#object |
223 | .long 0x41807100 @ cpu_val | 192 | __\name\()_proc_info: |
224 | .long 0xffffff00 @ cpu_mask | 193 | .long \cpu_val |
194 | .long \cpu_mask | ||
225 | .long PMD_TYPE_SECT | \ | 195 | .long PMD_TYPE_SECT | \ |
226 | PMD_SECT_BUFFERABLE | \ | 196 | PMD_SECT_BUFFERABLE | \ |
227 | PMD_SECT_CACHEABLE | \ | 197 | PMD_SECT_CACHEABLE | \ |
@@ -232,38 +202,17 @@ __arm710_proc_info: | |||
232 | PMD_BIT4 | \ | 202 | PMD_BIT4 | \ |
233 | PMD_SECT_AP_WRITE | \ | 203 | PMD_SECT_AP_WRITE | \ |
234 | PMD_SECT_AP_READ | 204 | PMD_SECT_AP_READ |
235 | b __arm710_setup @ cpu_flush | 205 | b \cpu_flush @ cpu_flush |
236 | .long cpu_arch_name @ arch_name | 206 | .long cpu_arch_name @ arch_name |
237 | .long cpu_elf_name @ elf_name | 207 | .long cpu_elf_name @ elf_name |
238 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap | 208 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap |
239 | .long cpu_arm710_name @ name | 209 | .long \cpu_name |
240 | .long arm720_processor_functions | 210 | .long arm720_processor_functions |
241 | .long v4_tlb_fns | 211 | .long v4_tlb_fns |
242 | .long v4wt_user_fns | 212 | .long v4wt_user_fns |
243 | .long v4_cache_fns | 213 | .long v4_cache_fns |
244 | .size __arm710_proc_info, . - __arm710_proc_info | 214 | .size __\name\()_proc_info, . - __\name\()_proc_info |
215 | .endm | ||
245 | 216 | ||
246 | .type __arm720_proc_info, #object | 217 | arm720_proc_info arm710, 0x41807100, 0xffffff00, cpu_arm710_name, __arm710_setup |
247 | __arm720_proc_info: | 218 | arm720_proc_info arm720, 0x41807200, 0xffffff00, cpu_arm720_name, __arm720_setup |
248 | .long 0x41807200 @ cpu_val | ||
249 | .long 0xffffff00 @ cpu_mask | ||
250 | .long PMD_TYPE_SECT | \ | ||
251 | PMD_SECT_BUFFERABLE | \ | ||
252 | PMD_SECT_CACHEABLE | \ | ||
253 | PMD_BIT4 | \ | ||
254 | PMD_SECT_AP_WRITE | \ | ||
255 | PMD_SECT_AP_READ | ||
256 | .long PMD_TYPE_SECT | \ | ||
257 | PMD_BIT4 | \ | ||
258 | PMD_SECT_AP_WRITE | \ | ||
259 | PMD_SECT_AP_READ | ||
260 | b __arm720_setup @ cpu_flush | ||
261 | .long cpu_arch_name @ arch_name | ||
262 | .long cpu_elf_name @ elf_name | ||
263 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB @ elf_hwcap | ||
264 | .long cpu_arm720_name @ name | ||
265 | .long arm720_processor_functions | ||
266 | .long v4_tlb_fns | ||
267 | .long v4wt_user_fns | ||
268 | .long v4_cache_fns | ||
269 | .size __arm720_proc_info, . - __arm720_proc_info | ||
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 6f9d12effee..4506be3adda 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
18 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
19 | 19 | ||
20 | #include "proc-macros.S" | ||
21 | |||
20 | .text | 22 | .text |
21 | /* | 23 | /* |
22 | * cpu_arm740_proc_init() | 24 | * cpu_arm740_proc_init() |
@@ -115,42 +117,14 @@ __arm740_setup: | |||
115 | 117 | ||
116 | __INITDATA | 118 | __INITDATA |
117 | 119 | ||
118 | /* | 120 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
119 | * Purpose : Function pointers used to access above functions - all calls | 121 | define_processor_functions arm740, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1 |
120 | * come through these | ||
121 | */ | ||
122 | .type arm740_processor_functions, #object | ||
123 | ENTRY(arm740_processor_functions) | ||
124 | .word v4t_late_abort | ||
125 | .word legacy_pabort | ||
126 | .word cpu_arm740_proc_init | ||
127 | .word cpu_arm740_proc_fin | ||
128 | .word cpu_arm740_reset | ||
129 | .word cpu_arm740_do_idle | ||
130 | .word cpu_arm740_dcache_clean_area | ||
131 | .word cpu_arm740_switch_mm | ||
132 | .word 0 @ cpu_*_set_pte | ||
133 | .word 0 | ||
134 | .word 0 | ||
135 | .word 0 | ||
136 | .size arm740_processor_functions, . - arm740_processor_functions | ||
137 | 122 | ||
138 | .section ".rodata" | 123 | .section ".rodata" |
139 | 124 | ||
140 | .type cpu_arch_name, #object | 125 | string cpu_arch_name, "armv4" |
141 | cpu_arch_name: | 126 | string cpu_elf_name, "v4" |
142 | .asciz "armv4" | 127 | string cpu_arm740_name, "ARM740T" |
143 | .size cpu_arch_name, . - cpu_arch_name | ||
144 | |||
145 | .type cpu_elf_name, #object | ||
146 | cpu_elf_name: | ||
147 | .asciz "v4" | ||
148 | .size cpu_elf_name, . - cpu_elf_name | ||
149 | |||
150 | .type cpu_arm740_name, #object | ||
151 | cpu_arm740_name: | ||
152 | .ascii "ARM740T" | ||
153 | .size cpu_arm740_name, . - cpu_arm740_name | ||
154 | 128 | ||
155 | .align | 129 | .align |
156 | 130 | ||
@@ -170,5 +144,3 @@ __arm740_proc_info: | |||
170 | .long 0 | 144 | .long 0 |
171 | .long v3_cache_fns @ cache model | 145 | .long v3_cache_fns @ cache model |
172 | .size __arm740_proc_info, . - __arm740_proc_info | 146 | .size __arm740_proc_info, . - __arm740_proc_info |
173 | |||
174 | |||
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 537ffcb0646..7e0e1fe4ed4 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
18 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
19 | 19 | ||
20 | #include "proc-macros.S" | ||
21 | |||
20 | .text | 22 | .text |
21 | /* | 23 | /* |
22 | * cpu_arm7tdmi_proc_init() | 24 | * cpu_arm7tdmi_proc_init() |
@@ -55,197 +57,57 @@ __arm7tdmi_setup: | |||
55 | 57 | ||
56 | __INITDATA | 58 | __INITDATA |
57 | 59 | ||
58 | /* | 60 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
59 | * Purpose : Function pointers used to access above functions - all calls | 61 | define_processor_functions arm7tdmi, dabort=v4t_late_abort, pabort=legacy_pabort, nommu=1 |
60 | * come through these | ||
61 | */ | ||
62 | .type arm7tdmi_processor_functions, #object | ||
63 | ENTRY(arm7tdmi_processor_functions) | ||
64 | .word v4t_late_abort | ||
65 | .word legacy_pabort | ||
66 | .word cpu_arm7tdmi_proc_init | ||
67 | .word cpu_arm7tdmi_proc_fin | ||
68 | .word cpu_arm7tdmi_reset | ||
69 | .word cpu_arm7tdmi_do_idle | ||
70 | .word cpu_arm7tdmi_dcache_clean_area | ||
71 | .word cpu_arm7tdmi_switch_mm | ||
72 | .word 0 @ cpu_*_set_pte | ||
73 | .word 0 | ||
74 | .word 0 | ||
75 | .word 0 | ||
76 | .size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions | ||
77 | 62 | ||
78 | .section ".rodata" | 63 | .section ".rodata" |
79 | 64 | ||
80 | .type cpu_arch_name, #object | 65 | string cpu_arch_name, "armv4t" |
81 | cpu_arch_name: | 66 | string cpu_elf_name, "v4" |
82 | .asciz "armv4t" | 67 | string cpu_arm7tdmi_name, "ARM7TDMI" |
83 | .size cpu_arch_name, . - cpu_arch_name | 68 | string cpu_triscenda7_name, "Triscend-A7x" |
84 | 69 | string cpu_at91_name, "Atmel-AT91M40xxx" | |
85 | .type cpu_elf_name, #object | 70 | string cpu_s3c3410_name, "Samsung-S3C3410" |
86 | cpu_elf_name: | 71 | string cpu_s3c44b0x_name, "Samsung-S3C44B0x" |
87 | .asciz "v4" | 72 | string cpu_s3c4510b_name, "Samsung-S3C4510B" |
88 | .size cpu_elf_name, . - cpu_elf_name | 73 | string cpu_s3c4530_name, "Samsung-S3C4530" |
89 | 74 | string cpu_netarm_name, "NETARM" | |
90 | .type cpu_arm7tdmi_name, #object | ||
91 | cpu_arm7tdmi_name: | ||
92 | .asciz "ARM7TDMI" | ||
93 | .size cpu_arm7tdmi_name, . - cpu_arm7tdmi_name | ||
94 | |||
95 | .type cpu_triscenda7_name, #object | ||
96 | cpu_triscenda7_name: | ||
97 | .asciz "Triscend-A7x" | ||
98 | .size cpu_triscenda7_name, . - cpu_triscenda7_name | ||
99 | |||
100 | .type cpu_at91_name, #object | ||
101 | cpu_at91_name: | ||
102 | .asciz "Atmel-AT91M40xxx" | ||
103 | .size cpu_at91_name, . - cpu_at91_name | ||
104 | |||
105 | .type cpu_s3c3410_name, #object | ||
106 | cpu_s3c3410_name: | ||
107 | .asciz "Samsung-S3C3410" | ||
108 | .size cpu_s3c3410_name, . - cpu_s3c3410_name | ||
109 | |||
110 | .type cpu_s3c44b0x_name, #object | ||
111 | cpu_s3c44b0x_name: | ||
112 | .asciz "Samsung-S3C44B0x" | ||
113 | .size cpu_s3c44b0x_name, . - cpu_s3c44b0x_name | ||
114 | |||
115 | .type cpu_s3c4510b, #object | ||
116 | cpu_s3c4510b_name: | ||
117 | .asciz "Samsung-S3C4510B" | ||
118 | .size cpu_s3c4510b_name, . - cpu_s3c4510b_name | ||
119 | |||
120 | .type cpu_s3c4530_name, #object | ||
121 | cpu_s3c4530_name: | ||
122 | .asciz "Samsung-S3C4530" | ||
123 | .size cpu_s3c4530_name, . - cpu_s3c4530_name | ||
124 | |||
125 | .type cpu_netarm_name, #object | ||
126 | cpu_netarm_name: | ||
127 | .asciz "NETARM" | ||
128 | .size cpu_netarm_name, . - cpu_netarm_name | ||
129 | 75 | ||
130 | .align | 76 | .align |
131 | 77 | ||
132 | .section ".proc.info.init", #alloc, #execinstr | 78 | .section ".proc.info.init", #alloc, #execinstr |
133 | 79 | ||
134 | .type __arm7tdmi_proc_info, #object | 80 | .macro arm7tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, \ |
135 | __arm7tdmi_proc_info: | 81 | extra_hwcaps=0 |
136 | .long 0x41007700 | 82 | .type __\name\()_proc_info, #object |
137 | .long 0xfff8ff00 | 83 | __\name\()_proc_info: |
138 | .long 0 | 84 | .long \cpu_val |
139 | .long 0 | 85 | .long \cpu_mask |
140 | b __arm7tdmi_setup | ||
141 | .long cpu_arch_name | ||
142 | .long cpu_elf_name | ||
143 | .long HWCAP_SWP | HWCAP_26BIT | ||
144 | .long cpu_arm7tdmi_name | ||
145 | .long arm7tdmi_processor_functions | ||
146 | .long 0 | ||
147 | .long 0 | ||
148 | .long v4_cache_fns | ||
149 | .size __arm7tdmi_proc_info, . - __arm7tdmi_proc_info | ||
150 | |||
151 | .type __triscenda7_proc_info, #object | ||
152 | __triscenda7_proc_info: | ||
153 | .long 0x0001d2ff | ||
154 | .long 0x0001ffff | ||
155 | .long 0 | ||
156 | .long 0 | ||
157 | b __arm7tdmi_setup | ||
158 | .long cpu_arch_name | ||
159 | .long cpu_elf_name | ||
160 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | ||
161 | .long cpu_triscenda7_name | ||
162 | .long arm7tdmi_processor_functions | ||
163 | .long 0 | ||
164 | .long 0 | ||
165 | .long v4_cache_fns | ||
166 | .size __triscenda7_proc_info, . - __triscenda7_proc_info | ||
167 | |||
168 | .type __at91_proc_info, #object | ||
169 | __at91_proc_info: | ||
170 | .long 0x14000040 | ||
171 | .long 0xfff000e0 | ||
172 | .long 0 | ||
173 | .long 0 | ||
174 | b __arm7tdmi_setup | ||
175 | .long cpu_arch_name | ||
176 | .long cpu_elf_name | ||
177 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | ||
178 | .long cpu_at91_name | ||
179 | .long arm7tdmi_processor_functions | ||
180 | .long 0 | ||
181 | .long 0 | ||
182 | .long v4_cache_fns | ||
183 | .size __at91_proc_info, . - __at91_proc_info | ||
184 | |||
185 | .type __s3c4510b_proc_info, #object | ||
186 | __s3c4510b_proc_info: | ||
187 | .long 0x36365000 | ||
188 | .long 0xfffff000 | ||
189 | .long 0 | ||
190 | .long 0 | ||
191 | b __arm7tdmi_setup | ||
192 | .long cpu_arch_name | ||
193 | .long cpu_elf_name | ||
194 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | ||
195 | .long cpu_s3c4510b_name | ||
196 | .long arm7tdmi_processor_functions | ||
197 | .long 0 | ||
198 | .long 0 | ||
199 | .long v4_cache_fns | ||
200 | .size __s3c4510b_proc_info, . - __s3c4510b_proc_info | ||
201 | |||
202 | .type __s3c4530_proc_info, #object | ||
203 | __s3c4530_proc_info: | ||
204 | .long 0x4c000000 | ||
205 | .long 0xfff000e0 | ||
206 | .long 0 | ||
207 | .long 0 | ||
208 | b __arm7tdmi_setup | ||
209 | .long cpu_arch_name | ||
210 | .long cpu_elf_name | ||
211 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | ||
212 | .long cpu_s3c4530_name | ||
213 | .long arm7tdmi_processor_functions | ||
214 | .long 0 | ||
215 | .long 0 | ||
216 | .long v4_cache_fns | ||
217 | .size __s3c4530_proc_info, . - __s3c4530_proc_info | ||
218 | |||
219 | .type __s3c3410_proc_info, #object | ||
220 | __s3c3410_proc_info: | ||
221 | .long 0x34100000 | ||
222 | .long 0xffff0000 | ||
223 | .long 0 | ||
224 | .long 0 | ||
225 | b __arm7tdmi_setup | ||
226 | .long cpu_arch_name | ||
227 | .long cpu_elf_name | ||
228 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | ||
229 | .long cpu_s3c3410_name | ||
230 | .long arm7tdmi_processor_functions | ||
231 | .long 0 | ||
232 | .long 0 | ||
233 | .long v4_cache_fns | ||
234 | .size __s3c3410_proc_info, . - __s3c3410_proc_info | ||
235 | |||
236 | .type __s3c44b0x_proc_info, #object | ||
237 | __s3c44b0x_proc_info: | ||
238 | .long 0x44b00000 | ||
239 | .long 0xffff0000 | ||
240 | .long 0 | 86 | .long 0 |
241 | .long 0 | 87 | .long 0 |
242 | b __arm7tdmi_setup | 88 | b __arm7tdmi_setup |
243 | .long cpu_arch_name | 89 | .long cpu_arch_name |
244 | .long cpu_elf_name | 90 | .long cpu_elf_name |
245 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | 91 | .long HWCAP_SWP | HWCAP_26BIT | ( \extra_hwcaps ) |
246 | .long cpu_s3c44b0x_name | 92 | .long \cpu_name |
247 | .long arm7tdmi_processor_functions | 93 | .long arm7tdmi_processor_functions |
248 | .long 0 | 94 | .long 0 |
249 | .long 0 | 95 | .long 0 |
250 | .long v4_cache_fns | 96 | .long v4_cache_fns |
251 | .size __s3c44b0x_proc_info, . - __s3c44b0x_proc_info | 97 | .size __\name\()_proc_info, . - __\name\()_proc_info |
98 | .endm | ||
99 | |||
100 | arm7tdmi_proc_info arm7tdmi, 0x41007700, 0xfff8ff00, \ | ||
101 | cpu_arm7tdmi_name | ||
102 | arm7tdmi_proc_info triscenda7, 0x0001d2ff, 0x0001ffff, \ | ||
103 | cpu_triscenda7_name, extra_hwcaps=HWCAP_THUMB | ||
104 | arm7tdmi_proc_info at91, 0x14000040, 0xfff000e0, \ | ||
105 | cpu_at91_name, extra_hwcaps=HWCAP_THUMB | ||
106 | arm7tdmi_proc_info s3c4510b, 0x36365000, 0xfffff000, \ | ||
107 | cpu_s3c4510b_name, extra_hwcaps=HWCAP_THUMB | ||
108 | arm7tdmi_proc_info s3c4530, 0x4c000000, 0xfff000e0, \ | ||
109 | cpu_s3c4530_name, extra_hwcaps=HWCAP_THUMB | ||
110 | arm7tdmi_proc_info s3c3410, 0x34100000, 0xffff0000, \ | ||
111 | cpu_s3c3410_name, extra_hwcaps=HWCAP_THUMB | ||
112 | arm7tdmi_proc_info s3c44b0x, 0x44b00000, 0xffff0000, \ | ||
113 | cpu_s3c44b0x_name, extra_hwcaps=HWCAP_THUMB | ||
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index bf8a1d1cccb..2e6849b41f6 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S | |||
@@ -315,18 +315,8 @@ ENTRY(arm920_dma_unmap_area) | |||
315 | mov pc, lr | 315 | mov pc, lr |
316 | ENDPROC(arm920_dma_unmap_area) | 316 | ENDPROC(arm920_dma_unmap_area) |
317 | 317 | ||
318 | ENTRY(arm920_cache_fns) | 318 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
319 | .long arm920_flush_icache_all | 319 | define_cache_functions arm920 |
320 | .long arm920_flush_kern_cache_all | ||
321 | .long arm920_flush_user_cache_all | ||
322 | .long arm920_flush_user_cache_range | ||
323 | .long arm920_coherent_kern_range | ||
324 | .long arm920_coherent_user_range | ||
325 | .long arm920_flush_kern_dcache_area | ||
326 | .long arm920_dma_map_area | ||
327 | .long arm920_dma_unmap_area | ||
328 | .long arm920_dma_flush_range | ||
329 | |||
330 | #endif | 320 | #endif |
331 | 321 | ||
332 | 322 | ||
@@ -389,7 +379,7 @@ ENTRY(cpu_arm920_set_pte_ext) | |||
389 | 379 | ||
390 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ | 380 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ |
391 | .globl cpu_arm920_suspend_size | 381 | .globl cpu_arm920_suspend_size |
392 | .equ cpu_arm920_suspend_size, 4 * 3 | 382 | .equ cpu_arm920_suspend_size, 4 * 4 |
393 | #ifdef CONFIG_PM_SLEEP | 383 | #ifdef CONFIG_PM_SLEEP |
394 | ENTRY(cpu_arm920_do_suspend) | 384 | ENTRY(cpu_arm920_do_suspend) |
395 | stmfd sp!, {r4 - r7, lr} | 385 | stmfd sp!, {r4 - r7, lr} |
@@ -416,9 +406,6 @@ ENTRY(cpu_arm920_do_resume) | |||
416 | PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE | 406 | PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE |
417 | b cpu_resume_mmu | 407 | b cpu_resume_mmu |
418 | ENDPROC(cpu_arm920_do_resume) | 408 | ENDPROC(cpu_arm920_do_resume) |
419 | #else | ||
420 | #define cpu_arm920_do_suspend 0 | ||
421 | #define cpu_arm920_do_resume 0 | ||
422 | #endif | 409 | #endif |
423 | 410 | ||
424 | __CPUINIT | 411 | __CPUINIT |
@@ -450,43 +437,14 @@ arm920_crval: | |||
450 | crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 | 437 | crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 |
451 | 438 | ||
452 | __INITDATA | 439 | __INITDATA |
453 | 440 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
454 | /* | 441 | define_processor_functions arm920, dabort=v4t_early_abort, pabort=legacy_pabort, suspend=1 |
455 | * Purpose : Function pointers used to access above functions - all calls | ||
456 | * come through these | ||
457 | */ | ||
458 | .type arm920_processor_functions, #object | ||
459 | arm920_processor_functions: | ||
460 | .word v4t_early_abort | ||
461 | .word legacy_pabort | ||
462 | .word cpu_arm920_proc_init | ||
463 | .word cpu_arm920_proc_fin | ||
464 | .word cpu_arm920_reset | ||
465 | .word cpu_arm920_do_idle | ||
466 | .word cpu_arm920_dcache_clean_area | ||
467 | .word cpu_arm920_switch_mm | ||
468 | .word cpu_arm920_set_pte_ext | ||
469 | .word cpu_arm920_suspend_size | ||
470 | .word cpu_arm920_do_suspend | ||
471 | .word cpu_arm920_do_resume | ||
472 | .size arm920_processor_functions, . - arm920_processor_functions | ||
473 | 442 | ||
474 | .section ".rodata" | 443 | .section ".rodata" |
475 | 444 | ||
476 | .type cpu_arch_name, #object | 445 | string cpu_arch_name, "armv4t" |
477 | cpu_arch_name: | 446 | string cpu_elf_name, "v4" |
478 | .asciz "armv4t" | 447 | string cpu_arm920_name, "ARM920T" |
479 | .size cpu_arch_name, . - cpu_arch_name | ||
480 | |||
481 | .type cpu_elf_name, #object | ||
482 | cpu_elf_name: | ||
483 | .asciz "v4" | ||
484 | .size cpu_elf_name, . - cpu_elf_name | ||
485 | |||
486 | .type cpu_arm920_name, #object | ||
487 | cpu_arm920_name: | ||
488 | .asciz "ARM920T" | ||
489 | .size cpu_arm920_name, . - cpu_arm920_name | ||
490 | 448 | ||
491 | .align | 449 | .align |
492 | 450 | ||
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 95ba1fc56e4..490e1883385 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S | |||
@@ -317,18 +317,8 @@ ENTRY(arm922_dma_unmap_area) | |||
317 | mov pc, lr | 317 | mov pc, lr |
318 | ENDPROC(arm922_dma_unmap_area) | 318 | ENDPROC(arm922_dma_unmap_area) |
319 | 319 | ||
320 | ENTRY(arm922_cache_fns) | 320 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
321 | .long arm922_flush_icache_all | 321 | define_cache_functions arm922 |
322 | .long arm922_flush_kern_cache_all | ||
323 | .long arm922_flush_user_cache_all | ||
324 | .long arm922_flush_user_cache_range | ||
325 | .long arm922_coherent_kern_range | ||
326 | .long arm922_coherent_user_range | ||
327 | .long arm922_flush_kern_dcache_area | ||
328 | .long arm922_dma_map_area | ||
329 | .long arm922_dma_unmap_area | ||
330 | .long arm922_dma_flush_range | ||
331 | |||
332 | #endif | 322 | #endif |
333 | 323 | ||
334 | 324 | ||
@@ -420,43 +410,14 @@ arm922_crval: | |||
420 | crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 | 410 | crval clear=0x00003f3f, mmuset=0x00003135, ucset=0x00001130 |
421 | 411 | ||
422 | __INITDATA | 412 | __INITDATA |
423 | 413 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
424 | /* | 414 | define_processor_functions arm922, dabort=v4t_early_abort, pabort=legacy_pabort |
425 | * Purpose : Function pointers used to access above functions - all calls | ||
426 | * come through these | ||
427 | */ | ||
428 | .type arm922_processor_functions, #object | ||
429 | arm922_processor_functions: | ||
430 | .word v4t_early_abort | ||
431 | .word legacy_pabort | ||
432 | .word cpu_arm922_proc_init | ||
433 | .word cpu_arm922_proc_fin | ||
434 | .word cpu_arm922_reset | ||
435 | .word cpu_arm922_do_idle | ||
436 | .word cpu_arm922_dcache_clean_area | ||
437 | .word cpu_arm922_switch_mm | ||
438 | .word cpu_arm922_set_pte_ext | ||
439 | .word 0 | ||
440 | .word 0 | ||
441 | .word 0 | ||
442 | .size arm922_processor_functions, . - arm922_processor_functions | ||
443 | 415 | ||
444 | .section ".rodata" | 416 | .section ".rodata" |
445 | 417 | ||
446 | .type cpu_arch_name, #object | 418 | string cpu_arch_name, "armv4t" |
447 | cpu_arch_name: | 419 | string cpu_elf_name, "v4" |
448 | .asciz "armv4t" | 420 | string cpu_arm922_name, "ARM922T" |
449 | .size cpu_arch_name, . - cpu_arch_name | ||
450 | |||
451 | .type cpu_elf_name, #object | ||
452 | cpu_elf_name: | ||
453 | .asciz "v4" | ||
454 | .size cpu_elf_name, . - cpu_elf_name | ||
455 | |||
456 | .type cpu_arm922_name, #object | ||
457 | cpu_arm922_name: | ||
458 | .asciz "ARM922T" | ||
459 | .size cpu_arm922_name, . - cpu_arm922_name | ||
460 | 421 | ||
461 | .align | 422 | .align |
462 | 423 | ||
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 541e4774eea..51d494be057 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S | |||
@@ -372,17 +372,8 @@ ENTRY(arm925_dma_unmap_area) | |||
372 | mov pc, lr | 372 | mov pc, lr |
373 | ENDPROC(arm925_dma_unmap_area) | 373 | ENDPROC(arm925_dma_unmap_area) |
374 | 374 | ||
375 | ENTRY(arm925_cache_fns) | 375 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
376 | .long arm925_flush_icache_all | 376 | define_cache_functions arm925 |
377 | .long arm925_flush_kern_cache_all | ||
378 | .long arm925_flush_user_cache_all | ||
379 | .long arm925_flush_user_cache_range | ||
380 | .long arm925_coherent_kern_range | ||
381 | .long arm925_coherent_user_range | ||
382 | .long arm925_flush_kern_dcache_area | ||
383 | .long arm925_dma_map_area | ||
384 | .long arm925_dma_unmap_area | ||
385 | .long arm925_dma_flush_range | ||
386 | 377 | ||
387 | ENTRY(cpu_arm925_dcache_clean_area) | 378 | ENTRY(cpu_arm925_dcache_clean_area) |
388 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 379 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
@@ -487,52 +478,24 @@ arm925_crval: | |||
487 | crval clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130 | 478 | crval clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130 |
488 | 479 | ||
489 | __INITDATA | 480 | __INITDATA |
490 | 481 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | |
491 | /* | 482 | define_processor_functions arm925, dabort=v4t_early_abort, pabort=legacy_pabort |
492 | * Purpose : Function pointers used to access above functions - all calls | ||
493 | * come through these | ||
494 | */ | ||
495 | .type arm925_processor_functions, #object | ||
496 | arm925_processor_functions: | ||
497 | .word v4t_early_abort | ||
498 | .word legacy_pabort | ||
499 | .word cpu_arm925_proc_init | ||
500 | .word cpu_arm925_proc_fin | ||
501 | .word cpu_arm925_reset | ||
502 | .word cpu_arm925_do_idle | ||
503 | .word cpu_arm925_dcache_clean_area | ||
504 | .word cpu_arm925_switch_mm | ||
505 | .word cpu_arm925_set_pte_ext | ||
506 | .word 0 | ||
507 | .word 0 | ||
508 | .word 0 | ||
509 | .size arm925_processor_functions, . - arm925_processor_functions | ||
510 | 483 | ||
511 | .section ".rodata" | 484 | .section ".rodata" |
512 | 485 | ||
513 | .type cpu_arch_name, #object | 486 | string cpu_arch_name, "armv4t" |
514 | cpu_arch_name: | 487 | string cpu_elf_name, "v4" |
515 | .asciz "armv4t" | 488 | string cpu_arm925_name, "ARM925T" |
516 | .size cpu_arch_name, . - cpu_arch_name | ||
517 | |||
518 | .type cpu_elf_name, #object | ||
519 | cpu_elf_name: | ||
520 | .asciz "v4" | ||
521 | .size cpu_elf_name, . - cpu_elf_name | ||
522 | |||
523 | .type cpu_arm925_name, #object | ||
524 | cpu_arm925_name: | ||
525 | .asciz "ARM925T" | ||
526 | .size cpu_arm925_name, . - cpu_arm925_name | ||
527 | 489 | ||
528 | .align | 490 | .align |
529 | 491 | ||
530 | .section ".proc.info.init", #alloc, #execinstr | 492 | .section ".proc.info.init", #alloc, #execinstr |
531 | 493 | ||
532 | .type __arm925_proc_info,#object | 494 | .macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache |
533 | __arm925_proc_info: | 495 | .type __\name\()_proc_info,#object |
534 | .long 0x54029250 | 496 | __\name\()_proc_info: |
535 | .long 0xfffffff0 | 497 | .long \cpu_val |
498 | .long \cpu_mask | ||
536 | .long PMD_TYPE_SECT | \ | 499 | .long PMD_TYPE_SECT | \ |
537 | PMD_BIT4 | \ | 500 | PMD_BIT4 | \ |
538 | PMD_SECT_AP_WRITE | \ | 501 | PMD_SECT_AP_WRITE | \ |
@@ -550,27 +513,8 @@ __arm925_proc_info: | |||
550 | .long v4wbi_tlb_fns | 513 | .long v4wbi_tlb_fns |
551 | .long v4wb_user_fns | 514 | .long v4wb_user_fns |
552 | .long arm925_cache_fns | 515 | .long arm925_cache_fns |
553 | .size __arm925_proc_info, . - __arm925_proc_info | 516 | .size __\name\()_proc_info, . - __\name\()_proc_info |
517 | .endm | ||
554 | 518 | ||
555 | .type __arm915_proc_info,#object | 519 | arm925_proc_info arm925, 0x54029250, 0xfffffff0, cpu_arm925_name |
556 | __arm915_proc_info: | 520 | arm925_proc_info arm915, 0x54029150, 0xfffffff0, cpu_arm925_name |
557 | .long 0x54029150 | ||
558 | .long 0xfffffff0 | ||
559 | .long PMD_TYPE_SECT | \ | ||
560 | PMD_BIT4 | \ | ||
561 | PMD_SECT_AP_WRITE | \ | ||
562 | PMD_SECT_AP_READ | ||
563 | .long PMD_TYPE_SECT | \ | ||
564 | PMD_BIT4 | \ | ||
565 | PMD_SECT_AP_WRITE | \ | ||
566 | PMD_SECT_AP_READ | ||
567 | b __arm925_setup | ||
568 | .long cpu_arch_name | ||
569 | .long cpu_elf_name | ||
570 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | ||
571 | .long cpu_arm925_name | ||
572 | .long arm925_processor_functions | ||
573 | .long v4wbi_tlb_fns | ||
574 | .long v4wb_user_fns | ||
575 | .long arm925_cache_fns | ||
576 | .size __arm925_proc_info, . - __arm925_proc_info | ||
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 0ed85d930c0..cd8f79c3a28 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S | |||
@@ -335,17 +335,8 @@ ENTRY(arm926_dma_unmap_area) | |||
335 | mov pc, lr | 335 | mov pc, lr |
336 | ENDPROC(arm926_dma_unmap_area) | 336 | ENDPROC(arm926_dma_unmap_area) |
337 | 337 | ||
338 | ENTRY(arm926_cache_fns) | 338 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
339 | .long arm926_flush_icache_all | 339 | define_cache_functions arm926 |
340 | .long arm926_flush_kern_cache_all | ||
341 | .long arm926_flush_user_cache_all | ||
342 | .long arm926_flush_user_cache_range | ||
343 | .long arm926_coherent_kern_range | ||
344 | .long arm926_coherent_user_range | ||
345 | .long arm926_flush_kern_dcache_area | ||
346 | .long arm926_dma_map_area | ||
347 | .long arm926_dma_unmap_area | ||
348 | .long arm926_dma_flush_range | ||
349 | 340 | ||
350 | ENTRY(cpu_arm926_dcache_clean_area) | 341 | ENTRY(cpu_arm926_dcache_clean_area) |
351 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 342 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
@@ -403,7 +394,7 @@ ENTRY(cpu_arm926_set_pte_ext) | |||
403 | 394 | ||
404 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ | 395 | /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ |
405 | .globl cpu_arm926_suspend_size | 396 | .globl cpu_arm926_suspend_size |
406 | .equ cpu_arm926_suspend_size, 4 * 3 | 397 | .equ cpu_arm926_suspend_size, 4 * 4 |
407 | #ifdef CONFIG_PM_SLEEP | 398 | #ifdef CONFIG_PM_SLEEP |
408 | ENTRY(cpu_arm926_do_suspend) | 399 | ENTRY(cpu_arm926_do_suspend) |
409 | stmfd sp!, {r4 - r7, lr} | 400 | stmfd sp!, {r4 - r7, lr} |
@@ -430,9 +421,6 @@ ENTRY(cpu_arm926_do_resume) | |||
430 | PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE | 421 | PMD_SECT_CACHEABLE | PMD_BIT4 | PMD_SECT_AP_WRITE |
431 | b cpu_resume_mmu | 422 | b cpu_resume_mmu |
432 | ENDPROC(cpu_arm926_do_resume) | 423 | ENDPROC(cpu_arm926_do_resume) |
433 | #else | ||
434 | #define cpu_arm926_do_suspend 0 | ||
435 | #define cpu_arm926_do_resume 0 | ||
436 | #endif | 424 | #endif |
437 | 425 | ||
438 | __CPUINIT | 426 | __CPUINIT |
@@ -475,42 +463,14 @@ arm926_crval: | |||
475 | 463 | ||
476 | __INITDATA | 464 | __INITDATA |
477 | 465 | ||
478 | /* | 466 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
479 | * Purpose : Function pointers used to access above functions - all calls | 467 | define_processor_functions arm926, dabort=v5tj_early_abort, pabort=legacy_pabort, suspend=1 |
480 | * come through these | ||
481 | */ | ||
482 | .type arm926_processor_functions, #object | ||
483 | arm926_processor_functions: | ||
484 | .word v5tj_early_abort | ||
485 | .word legacy_pabort | ||
486 | .word cpu_arm926_proc_init | ||
487 | .word cpu_arm926_proc_fin | ||
488 | .word cpu_arm926_reset | ||
489 | .word cpu_arm926_do_idle | ||
490 | .word cpu_arm926_dcache_clean_area | ||
491 | .word cpu_arm926_switch_mm | ||
492 | .word cpu_arm926_set_pte_ext | ||
493 | .word cpu_arm926_suspend_size | ||
494 | .word cpu_arm926_do_suspend | ||
495 | .word cpu_arm926_do_resume | ||
496 | .size arm926_processor_functions, . - arm926_processor_functions | ||
497 | 468 | ||
498 | .section ".rodata" | 469 | .section ".rodata" |
499 | 470 | ||
500 | .type cpu_arch_name, #object | 471 | string cpu_arch_name, "armv5tej" |
501 | cpu_arch_name: | 472 | string cpu_elf_name, "v5" |
502 | .asciz "armv5tej" | 473 | string cpu_arm926_name, "ARM926EJ-S" |
503 | .size cpu_arch_name, . - cpu_arch_name | ||
504 | |||
505 | .type cpu_elf_name, #object | ||
506 | cpu_elf_name: | ||
507 | .asciz "v5" | ||
508 | .size cpu_elf_name, . - cpu_elf_name | ||
509 | |||
510 | .type cpu_arm926_name, #object | ||
511 | cpu_arm926_name: | ||
512 | .asciz "ARM926EJ-S" | ||
513 | .size cpu_arm926_name, . - cpu_arm926_name | ||
514 | 474 | ||
515 | .align | 475 | .align |
516 | 476 | ||
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index 26aea3f71c2..ac750d50615 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S | |||
@@ -264,17 +264,8 @@ ENTRY(arm940_dma_unmap_area) | |||
264 | mov pc, lr | 264 | mov pc, lr |
265 | ENDPROC(arm940_dma_unmap_area) | 265 | ENDPROC(arm940_dma_unmap_area) |
266 | 266 | ||
267 | ENTRY(arm940_cache_fns) | 267 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
268 | .long arm940_flush_icache_all | 268 | define_cache_functions arm940 |
269 | .long arm940_flush_kern_cache_all | ||
270 | .long arm940_flush_user_cache_all | ||
271 | .long arm940_flush_user_cache_range | ||
272 | .long arm940_coherent_kern_range | ||
273 | .long arm940_coherent_user_range | ||
274 | .long arm940_flush_kern_dcache_area | ||
275 | .long arm940_dma_map_area | ||
276 | .long arm940_dma_unmap_area | ||
277 | .long arm940_dma_flush_range | ||
278 | 269 | ||
279 | __CPUINIT | 270 | __CPUINIT |
280 | 271 | ||
@@ -348,42 +339,14 @@ __arm940_setup: | |||
348 | 339 | ||
349 | __INITDATA | 340 | __INITDATA |
350 | 341 | ||
351 | /* | 342 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
352 | * Purpose : Function pointers used to access above functions - all calls | 343 | define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 |
353 | * come through these | ||
354 | */ | ||
355 | .type arm940_processor_functions, #object | ||
356 | ENTRY(arm940_processor_functions) | ||
357 | .word nommu_early_abort | ||
358 | .word legacy_pabort | ||
359 | .word cpu_arm940_proc_init | ||
360 | .word cpu_arm940_proc_fin | ||
361 | .word cpu_arm940_reset | ||
362 | .word cpu_arm940_do_idle | ||
363 | .word cpu_arm940_dcache_clean_area | ||
364 | .word cpu_arm940_switch_mm | ||
365 | .word 0 @ cpu_*_set_pte | ||
366 | .word 0 | ||
367 | .word 0 | ||
368 | .word 0 | ||
369 | .size arm940_processor_functions, . - arm940_processor_functions | ||
370 | 344 | ||
371 | .section ".rodata" | 345 | .section ".rodata" |
372 | 346 | ||
373 | .type cpu_arch_name, #object | 347 | string cpu_arch_name, "armv4t" |
374 | cpu_arch_name: | 348 | string cpu_elf_name, "v4" |
375 | .asciz "armv4t" | 349 | string cpu_arm940_name, "ARM940T" |
376 | .size cpu_arch_name, . - cpu_arch_name | ||
377 | |||
378 | .type cpu_elf_name, #object | ||
379 | cpu_elf_name: | ||
380 | .asciz "v4" | ||
381 | .size cpu_elf_name, . - cpu_elf_name | ||
382 | |||
383 | .type cpu_arm940_name, #object | ||
384 | cpu_arm940_name: | ||
385 | .ascii "ARM940T" | ||
386 | .size cpu_arm940_name, . - cpu_arm940_name | ||
387 | 350 | ||
388 | .align | 351 | .align |
389 | 352 | ||
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 8063345406f..683af3a182b 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S | |||
@@ -306,18 +306,8 @@ ENTRY(arm946_dma_unmap_area) | |||
306 | mov pc, lr | 306 | mov pc, lr |
307 | ENDPROC(arm946_dma_unmap_area) | 307 | ENDPROC(arm946_dma_unmap_area) |
308 | 308 | ||
309 | ENTRY(arm946_cache_fns) | 309 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
310 | .long arm946_flush_icache_all | 310 | define_cache_functions arm946 |
311 | .long arm946_flush_kern_cache_all | ||
312 | .long arm946_flush_user_cache_all | ||
313 | .long arm946_flush_user_cache_range | ||
314 | .long arm946_coherent_kern_range | ||
315 | .long arm946_coherent_user_range | ||
316 | .long arm946_flush_kern_dcache_area | ||
317 | .long arm946_dma_map_area | ||
318 | .long arm946_dma_unmap_area | ||
319 | .long arm946_dma_flush_range | ||
320 | |||
321 | 311 | ||
322 | ENTRY(cpu_arm946_dcache_clean_area) | 312 | ENTRY(cpu_arm946_dcache_clean_area) |
323 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH | 313 | #ifndef CONFIG_CPU_DCACHE_WRITETHROUGH |
@@ -403,43 +393,14 @@ __arm946_setup: | |||
403 | 393 | ||
404 | __INITDATA | 394 | __INITDATA |
405 | 395 | ||
406 | /* | 396 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
407 | * Purpose : Function pointers used to access above functions - all calls | 397 | define_processor_functions arm946, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 |
408 | * come through these | ||
409 | */ | ||
410 | .type arm946_processor_functions, #object | ||
411 | ENTRY(arm946_processor_functions) | ||
412 | .word nommu_early_abort | ||
413 | .word legacy_pabort | ||
414 | .word cpu_arm946_proc_init | ||
415 | .word cpu_arm946_proc_fin | ||
416 | .word cpu_arm946_reset | ||
417 | .word cpu_arm946_do_idle | ||
418 | |||
419 | .word cpu_arm946_dcache_clean_area | ||
420 | .word cpu_arm946_switch_mm | ||
421 | .word 0 @ cpu_*_set_pte | ||
422 | .word 0 | ||
423 | .word 0 | ||
424 | .word 0 | ||
425 | .size arm946_processor_functions, . - arm946_processor_functions | ||
426 | 398 | ||
427 | .section ".rodata" | 399 | .section ".rodata" |
428 | 400 | ||
429 | .type cpu_arch_name, #object | 401 | string cpu_arch_name, "armv5te" |
430 | cpu_arch_name: | 402 | string cpu_elf_name, "v5t" |
431 | .asciz "armv5te" | 403 | string cpu_arm946_name, "ARM946E-S" |
432 | .size cpu_arch_name, . - cpu_arch_name | ||
433 | |||
434 | .type cpu_elf_name, #object | ||
435 | cpu_elf_name: | ||
436 | .asciz "v5t" | ||
437 | .size cpu_elf_name, . - cpu_elf_name | ||
438 | |||
439 | .type cpu_arm946_name, #object | ||
440 | cpu_arm946_name: | ||
441 | .ascii "ARM946E-S" | ||
442 | .size cpu_arm946_name, . - cpu_arm946_name | ||
443 | 404 | ||
444 | .align | 405 | .align |
445 | 406 | ||
@@ -449,6 +410,7 @@ __arm946_proc_info: | |||
449 | .long 0x41009460 | 410 | .long 0x41009460 |
450 | .long 0xff00fff0 | 411 | .long 0xff00fff0 |
451 | .long 0 | 412 | .long 0 |
413 | .long 0 | ||
452 | b __arm946_setup | 414 | b __arm946_setup |
453 | .long cpu_arch_name | 415 | .long cpu_arch_name |
454 | .long cpu_elf_name | 416 | .long cpu_elf_name |
@@ -457,6 +419,6 @@ __arm946_proc_info: | |||
457 | .long arm946_processor_functions | 419 | .long arm946_processor_functions |
458 | .long 0 | 420 | .long 0 |
459 | .long 0 | 421 | .long 0 |
460 | .long arm940_cache_fns | 422 | .long arm946_cache_fns |
461 | .size __arm946_proc_info, . - __arm946_proc_info | 423 | .size __arm946_proc_info, . - __arm946_proc_info |
462 | 424 | ||
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index 546b54da100..2120f9e2af7 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
18 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
19 | 19 | ||
20 | #include "proc-macros.S" | ||
21 | |||
20 | .text | 22 | .text |
21 | /* | 23 | /* |
22 | * cpu_arm9tdmi_proc_init() | 24 | * cpu_arm9tdmi_proc_init() |
@@ -55,82 +57,38 @@ __arm9tdmi_setup: | |||
55 | 57 | ||
56 | __INITDATA | 58 | __INITDATA |
57 | 59 | ||
58 | /* | 60 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
59 | * Purpose : Function pointers used to access above functions - all calls | 61 | define_processor_functions arm9tdmi, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1 |
60 | * come through these | ||
61 | */ | ||
62 | .type arm9tdmi_processor_functions, #object | ||
63 | ENTRY(arm9tdmi_processor_functions) | ||
64 | .word nommu_early_abort | ||
65 | .word legacy_pabort | ||
66 | .word cpu_arm9tdmi_proc_init | ||
67 | .word cpu_arm9tdmi_proc_fin | ||
68 | .word cpu_arm9tdmi_reset | ||
69 | .word cpu_arm9tdmi_do_idle | ||
70 | .word cpu_arm9tdmi_dcache_clean_area | ||
71 | .word cpu_arm9tdmi_switch_mm | ||
72 | .word 0 @ cpu_*_set_pte | ||
73 | .word 0 | ||
74 | .word 0 | ||
75 | .word 0 | ||
76 | .size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions | ||
77 | 62 | ||
78 | .section ".rodata" | 63 | .section ".rodata" |
79 | 64 | ||
80 | .type cpu_arch_name, #object | 65 | string cpu_arch_name, "armv4t" |
81 | cpu_arch_name: | 66 | string cpu_elf_name, "v4" |
82 | .asciz "armv4t" | 67 | string cpu_arm9tdmi_name, "ARM9TDMI" |
83 | .size cpu_arch_name, . - cpu_arch_name | 68 | string cpu_p2001_name, "P2001" |
84 | |||
85 | .type cpu_elf_name, #object | ||
86 | cpu_elf_name: | ||
87 | .asciz "v4" | ||
88 | .size cpu_elf_name, . - cpu_elf_name | ||
89 | |||
90 | .type cpu_arm9tdmi_name, #object | ||
91 | cpu_arm9tdmi_name: | ||
92 | .asciz "ARM9TDMI" | ||
93 | .size cpu_arm9tdmi_name, . - cpu_arm9tdmi_name | ||
94 | |||
95 | .type cpu_p2001_name, #object | ||
96 | cpu_p2001_name: | ||
97 | .asciz "P2001" | ||
98 | .size cpu_p2001_name, . - cpu_p2001_name | ||
99 | 69 | ||
100 | .align | 70 | .align |
101 | 71 | ||
102 | .section ".proc.info.init", #alloc, #execinstr | 72 | .section ".proc.info.init", #alloc, #execinstr |
103 | 73 | ||
104 | .type __arm9tdmi_proc_info, #object | 74 | .macro arm9tdmi_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req |
105 | __arm9tdmi_proc_info: | 75 | .type __\name\()_proc_info, #object |
106 | .long 0x41009900 | 76 | __\name\()_proc_info: |
107 | .long 0xfff8ff00 | 77 | .long \cpu_val |
78 | .long \cpu_mask | ||
108 | .long 0 | 79 | .long 0 |
109 | .long 0 | 80 | .long 0 |
110 | b __arm9tdmi_setup | 81 | b __arm9tdmi_setup |
111 | .long cpu_arch_name | 82 | .long cpu_arch_name |
112 | .long cpu_elf_name | 83 | .long cpu_elf_name |
113 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | 84 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT |
114 | .long cpu_arm9tdmi_name | 85 | .long \cpu_name |
115 | .long arm9tdmi_processor_functions | 86 | .long arm9tdmi_processor_functions |
116 | .long 0 | 87 | .long 0 |
117 | .long 0 | 88 | .long 0 |
118 | .long v4_cache_fns | 89 | .long v4_cache_fns |
119 | .size __arm9tdmi_proc_info, . - __arm9tdmi_proc_info | 90 | .size __\name\()_proc_info, . - __\name\()_proc_info |
91 | .endm | ||
120 | 92 | ||
121 | .type __p2001_proc_info, #object | 93 | arm9tdmi_proc_info arm9tdmi, 0x41009900, 0xfff8ff00, cpu_arm9tdmi_name |
122 | __p2001_proc_info: | 94 | arm9tdmi_proc_info p2001, 0x41029000, 0xffffffff, cpu_p2001_name |
123 | .long 0x41029000 | ||
124 | .long 0xffffffff | ||
125 | .long 0 | ||
126 | .long 0 | ||
127 | b __arm9tdmi_setup | ||
128 | .long cpu_arch_name | ||
129 | .long cpu_elf_name | ||
130 | .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT | ||
131 | .long cpu_p2001_name | ||
132 | .long arm9tdmi_processor_functions | ||
133 | .long 0 | ||
134 | .long 0 | ||
135 | .long v4_cache_fns | ||
136 | .size __p2001_proc_info, . - __p2001_proc_info | ||
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index fc2a4ae15cf..4c7a5710472 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S | |||
@@ -180,42 +180,14 @@ fa526_cr1_set: | |||
180 | 180 | ||
181 | __INITDATA | 181 | __INITDATA |
182 | 182 | ||
183 | /* | 183 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
184 | * Purpose : Function pointers used to access above functions - all calls | 184 | define_processor_functions fa526, dabort=v4_early_abort, pabort=legacy_pabort |
185 | * come through these | ||
186 | */ | ||
187 | .type fa526_processor_functions, #object | ||
188 | fa526_processor_functions: | ||
189 | .word v4_early_abort | ||
190 | .word legacy_pabort | ||
191 | .word cpu_fa526_proc_init | ||
192 | .word cpu_fa526_proc_fin | ||
193 | .word cpu_fa526_reset | ||
194 | .word cpu_fa526_do_idle | ||
195 | .word cpu_fa526_dcache_clean_area | ||
196 | .word cpu_fa526_switch_mm | ||
197 | .word cpu_fa526_set_pte_ext | ||
198 | .word 0 | ||
199 | .word 0 | ||
200 | .word 0 | ||
201 | .size fa526_processor_functions, . - fa526_processor_functions | ||
202 | 185 | ||
203 | .section ".rodata" | 186 | .section ".rodata" |
204 | 187 | ||
205 | .type cpu_arch_name, #object | 188 | string cpu_arch_name, "armv4" |
206 | cpu_arch_name: | 189 | string cpu_elf_name, "v4" |
207 | .asciz "armv4" | 190 | string cpu_fa526_name, "FA526" |
208 | .size cpu_arch_name, . - cpu_arch_name | ||
209 | |||
210 | .type cpu_elf_name, #object | ||
211 | cpu_elf_name: | ||
212 | .asciz "v4" | ||
213 | .size cpu_elf_name, . - cpu_elf_name | ||
214 | |||
215 | .type cpu_fa526_name, #object | ||
216 | cpu_fa526_name: | ||
217 | .asciz "FA526" | ||
218 | .size cpu_fa526_name, . - cpu_fa526_name | ||
219 | 191 | ||
220 | .align | 192 | .align |
221 | 193 | ||
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index d3883eed7a4..8a6c2f78c1c 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S | |||
@@ -411,29 +411,28 @@ ENTRY(feroceon_dma_unmap_area) | |||
411 | mov pc, lr | 411 | mov pc, lr |
412 | ENDPROC(feroceon_dma_unmap_area) | 412 | ENDPROC(feroceon_dma_unmap_area) |
413 | 413 | ||
414 | ENTRY(feroceon_cache_fns) | 414 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
415 | .long feroceon_flush_icache_all | 415 | define_cache_functions feroceon |
416 | .long feroceon_flush_kern_cache_all | 416 | |
417 | .long feroceon_flush_user_cache_all | 417 | .macro range_alias basename |
418 | .long feroceon_flush_user_cache_range | 418 | .globl feroceon_range_\basename |
419 | .long feroceon_coherent_kern_range | 419 | .type feroceon_range_\basename , %function |
420 | .long feroceon_coherent_user_range | 420 | .equ feroceon_range_\basename , feroceon_\basename |
421 | .long feroceon_flush_kern_dcache_area | 421 | .endm |
422 | .long feroceon_dma_map_area | 422 | |
423 | .long feroceon_dma_unmap_area | 423 | /* |
424 | .long feroceon_dma_flush_range | 424 | * Most of the cache functions are unchanged for this case. |
425 | 425 | * Export suitable alias symbols for the unchanged functions: | |
426 | ENTRY(feroceon_range_cache_fns) | 426 | */ |
427 | .long feroceon_flush_icache_all | 427 | range_alias flush_icache_all |
428 | .long feroceon_flush_kern_cache_all | 428 | range_alias flush_user_cache_all |
429 | .long feroceon_flush_user_cache_all | 429 | range_alias flush_kern_cache_all |
430 | .long feroceon_flush_user_cache_range | 430 | range_alias flush_user_cache_range |
431 | .long feroceon_coherent_kern_range | 431 | range_alias coherent_kern_range |
432 | .long feroceon_coherent_user_range | 432 | range_alias coherent_user_range |
433 | .long feroceon_range_flush_kern_dcache_area | 433 | range_alias dma_unmap_area |
434 | .long feroceon_range_dma_map_area | 434 | |
435 | .long feroceon_dma_unmap_area | 435 | define_cache_functions feroceon_range |
436 | .long feroceon_range_dma_flush_range | ||
437 | 436 | ||
438 | .align 5 | 437 | .align 5 |
439 | ENTRY(cpu_feroceon_dcache_clean_area) | 438 | ENTRY(cpu_feroceon_dcache_clean_area) |
@@ -539,93 +538,27 @@ feroceon_crval: | |||
539 | 538 | ||
540 | __INITDATA | 539 | __INITDATA |
541 | 540 | ||
542 | /* | 541 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
543 | * Purpose : Function pointers used to access above functions - all calls | 542 | define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort |
544 | * come through these | ||
545 | */ | ||
546 | .type feroceon_processor_functions, #object | ||
547 | feroceon_processor_functions: | ||
548 | .word v5t_early_abort | ||
549 | .word legacy_pabort | ||
550 | .word cpu_feroceon_proc_init | ||
551 | .word cpu_feroceon_proc_fin | ||
552 | .word cpu_feroceon_reset | ||
553 | .word cpu_feroceon_do_idle | ||
554 | .word cpu_feroceon_dcache_clean_area | ||
555 | .word cpu_feroceon_switch_mm | ||
556 | .word cpu_feroceon_set_pte_ext | ||
557 | .word 0 | ||
558 | .word 0 | ||
559 | .word 0 | ||
560 | .size feroceon_processor_functions, . - feroceon_processor_functions | ||
561 | 543 | ||
562 | .section ".rodata" | 544 | .section ".rodata" |
563 | 545 | ||
564 | .type cpu_arch_name, #object | 546 | string cpu_arch_name, "armv5te" |
565 | cpu_arch_name: | 547 | string cpu_elf_name, "v5" |
566 | .asciz "armv5te" | 548 | string cpu_feroceon_name, "Feroceon" |
567 | .size cpu_arch_name, . - cpu_arch_name | 549 | string cpu_88fr531_name, "Feroceon 88FR531-vd" |
568 | 550 | string cpu_88fr571_name, "Feroceon 88FR571-vd" | |
569 | .type cpu_elf_name, #object | 551 | string cpu_88fr131_name, "Feroceon 88FR131" |
570 | cpu_elf_name: | ||
571 | .asciz "v5" | ||
572 | .size cpu_elf_name, . - cpu_elf_name | ||
573 | |||
574 | .type cpu_feroceon_name, #object | ||
575 | cpu_feroceon_name: | ||
576 | .asciz "Feroceon" | ||
577 | .size cpu_feroceon_name, . - cpu_feroceon_name | ||
578 | |||
579 | .type cpu_88fr531_name, #object | ||
580 | cpu_88fr531_name: | ||
581 | .asciz "Feroceon 88FR531-vd" | ||
582 | .size cpu_88fr531_name, . - cpu_88fr531_name | ||
583 | |||
584 | .type cpu_88fr571_name, #object | ||
585 | cpu_88fr571_name: | ||
586 | .asciz "Feroceon 88FR571-vd" | ||
587 | .size cpu_88fr571_name, . - cpu_88fr571_name | ||
588 | |||
589 | .type cpu_88fr131_name, #object | ||
590 | cpu_88fr131_name: | ||
591 | .asciz "Feroceon 88FR131" | ||
592 | .size cpu_88fr131_name, . - cpu_88fr131_name | ||
593 | 552 | ||
594 | .align | 553 | .align |
595 | 554 | ||
596 | .section ".proc.info.init", #alloc, #execinstr | 555 | .section ".proc.info.init", #alloc, #execinstr |
597 | 556 | ||
598 | #ifdef CONFIG_CPU_FEROCEON_OLD_ID | 557 | .macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req |
599 | .type __feroceon_old_id_proc_info,#object | 558 | .type __\name\()_proc_info,#object |
600 | __feroceon_old_id_proc_info: | 559 | __\name\()_proc_info: |
601 | .long 0x41009260 | 560 | .long \cpu_val |
602 | .long 0xff00fff0 | 561 | .long \cpu_mask |
603 | .long PMD_TYPE_SECT | \ | ||
604 | PMD_SECT_BUFFERABLE | \ | ||
605 | PMD_SECT_CACHEABLE | \ | ||
606 | PMD_BIT4 | \ | ||
607 | PMD_SECT_AP_WRITE | \ | ||
608 | PMD_SECT_AP_READ | ||
609 | .long PMD_TYPE_SECT | \ | ||
610 | PMD_BIT4 | \ | ||
611 | PMD_SECT_AP_WRITE | \ | ||
612 | PMD_SECT_AP_READ | ||
613 | b __feroceon_setup | ||
614 | .long cpu_arch_name | ||
615 | .long cpu_elf_name | ||
616 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
617 | .long cpu_feroceon_name | ||
618 | .long feroceon_processor_functions | ||
619 | .long v4wbi_tlb_fns | ||
620 | .long feroceon_user_fns | ||
621 | .long feroceon_cache_fns | ||
622 | .size __feroceon_old_id_proc_info, . - __feroceon_old_id_proc_info | ||
623 | #endif | ||
624 | |||
625 | .type __88fr531_proc_info,#object | ||
626 | __88fr531_proc_info: | ||
627 | .long 0x56055310 | ||
628 | .long 0xfffffff0 | ||
629 | .long PMD_TYPE_SECT | \ | 562 | .long PMD_TYPE_SECT | \ |
630 | PMD_SECT_BUFFERABLE | \ | 563 | PMD_SECT_BUFFERABLE | \ |
631 | PMD_SECT_CACHEABLE | \ | 564 | PMD_SECT_CACHEABLE | \ |
@@ -640,59 +573,22 @@ __88fr531_proc_info: | |||
640 | .long cpu_arch_name | 573 | .long cpu_arch_name |
641 | .long cpu_elf_name | 574 | .long cpu_elf_name |
642 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | 575 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP |
643 | .long cpu_88fr531_name | 576 | .long \cpu_name |
644 | .long feroceon_processor_functions | 577 | .long feroceon_processor_functions |
645 | .long v4wbi_tlb_fns | 578 | .long v4wbi_tlb_fns |
646 | .long feroceon_user_fns | 579 | .long feroceon_user_fns |
647 | .long feroceon_cache_fns | 580 | .long \cache |
648 | .size __88fr531_proc_info, . - __88fr531_proc_info | 581 | .size __\name\()_proc_info, . - __\name\()_proc_info |
582 | .endm | ||
649 | 583 | ||
650 | .type __88fr571_proc_info,#object | 584 | #ifdef CONFIG_CPU_FEROCEON_OLD_ID |
651 | __88fr571_proc_info: | 585 | feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \ |
652 | .long 0x56155710 | 586 | cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns |
653 | .long 0xfffffff0 | 587 | #endif |
654 | .long PMD_TYPE_SECT | \ | ||
655 | PMD_SECT_BUFFERABLE | \ | ||
656 | PMD_SECT_CACHEABLE | \ | ||
657 | PMD_BIT4 | \ | ||
658 | PMD_SECT_AP_WRITE | \ | ||
659 | PMD_SECT_AP_READ | ||
660 | .long PMD_TYPE_SECT | \ | ||
661 | PMD_BIT4 | \ | ||
662 | PMD_SECT_AP_WRITE | \ | ||
663 | PMD_SECT_AP_READ | ||
664 | b __feroceon_setup | ||
665 | .long cpu_arch_name | ||
666 | .long cpu_elf_name | ||
667 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
668 | .long cpu_88fr571_name | ||
669 | .long feroceon_processor_functions | ||
670 | .long v4wbi_tlb_fns | ||
671 | .long feroceon_user_fns | ||
672 | .long feroceon_range_cache_fns | ||
673 | .size __88fr571_proc_info, . - __88fr571_proc_info | ||
674 | 588 | ||
675 | .type __88fr131_proc_info,#object | 589 | feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \ |
676 | __88fr131_proc_info: | 590 | cache=feroceon_cache_fns |
677 | .long 0x56251310 | 591 | feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \ |
678 | .long 0xfffffff0 | 592 | cache=feroceon_range_cache_fns |
679 | .long PMD_TYPE_SECT | \ | 593 | feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \ |
680 | PMD_SECT_BUFFERABLE | \ | 594 | cache=feroceon_range_cache_fns |
681 | PMD_SECT_CACHEABLE | \ | ||
682 | PMD_BIT4 | \ | ||
683 | PMD_SECT_AP_WRITE | \ | ||
684 | PMD_SECT_AP_READ | ||
685 | .long PMD_TYPE_SECT | \ | ||
686 | PMD_BIT4 | \ | ||
687 | PMD_SECT_AP_WRITE | \ | ||
688 | PMD_SECT_AP_READ | ||
689 | b __feroceon_setup | ||
690 | .long cpu_arch_name | ||
691 | .long cpu_elf_name | ||
692 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
693 | .long cpu_88fr131_name | ||
694 | .long feroceon_processor_functions | ||
695 | .long v4wbi_tlb_fns | ||
696 | .long feroceon_user_fns | ||
697 | .long feroceon_range_cache_fns | ||
698 | .size __88fr131_proc_info, . - __88fr131_proc_info | ||
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 34261f9486b..87f8ee2ebf7 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S | |||
@@ -121,7 +121,7 @@ | |||
121 | .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH | 121 | .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH |
122 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK | 122 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK |
123 | .long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED | 123 | .long PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED |
124 | .long 0x00 @ unused | 124 | .long PTE_EXT_TEX(4) | PTE_BUFFERABLE @ L_PTE_MT_INNER_WB |
125 | .long 0x00 @ L_PTE_MT_MINICACHE (not present) | 125 | .long 0x00 @ L_PTE_MT_MINICACHE (not present) |
126 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC | 126 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC |
127 | .long 0x00 @ unused | 127 | .long 0x00 @ unused |
@@ -254,3 +254,71 @@ | |||
254 | mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line | 254 | mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line |
255 | mcr p15, 0, ip, c7, c10, 4 @ data write barrier | 255 | mcr p15, 0, ip, c7, c10, 4 @ data write barrier |
256 | .endm | 256 | .endm |
257 | |||
258 | .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0 | ||
259 | .type \name\()_processor_functions, #object | ||
260 | .align 2 | ||
261 | ENTRY(\name\()_processor_functions) | ||
262 | .word \dabort | ||
263 | .word \pabort | ||
264 | .word cpu_\name\()_proc_init | ||
265 | .word cpu_\name\()_proc_fin | ||
266 | .word cpu_\name\()_reset | ||
267 | .word cpu_\name\()_do_idle | ||
268 | .word cpu_\name\()_dcache_clean_area | ||
269 | .word cpu_\name\()_switch_mm | ||
270 | |||
271 | .if \nommu | ||
272 | .word 0 | ||
273 | .else | ||
274 | .word cpu_\name\()_set_pte_ext | ||
275 | .endif | ||
276 | |||
277 | .if \suspend | ||
278 | .word cpu_\name\()_suspend_size | ||
279 | #ifdef CONFIG_PM_SLEEP | ||
280 | .word cpu_\name\()_do_suspend | ||
281 | .word cpu_\name\()_do_resume | ||
282 | #else | ||
283 | .word 0 | ||
284 | .word 0 | ||
285 | #endif | ||
286 | .else | ||
287 | .word 0 | ||
288 | .word 0 | ||
289 | .word 0 | ||
290 | .endif | ||
291 | |||
292 | .size \name\()_processor_functions, . - \name\()_processor_functions | ||
293 | .endm | ||
294 | |||
295 | .macro define_cache_functions name:req | ||
296 | .align 2 | ||
297 | .type \name\()_cache_fns, #object | ||
298 | ENTRY(\name\()_cache_fns) | ||
299 | .long \name\()_flush_icache_all | ||
300 | .long \name\()_flush_kern_cache_all | ||
301 | .long \name\()_flush_user_cache_all | ||
302 | .long \name\()_flush_user_cache_range | ||
303 | .long \name\()_coherent_kern_range | ||
304 | .long \name\()_coherent_user_range | ||
305 | .long \name\()_flush_kern_dcache_area | ||
306 | .long \name\()_dma_map_area | ||
307 | .long \name\()_dma_unmap_area | ||
308 | .long \name\()_dma_flush_range | ||
309 | .size \name\()_cache_fns, . - \name\()_cache_fns | ||
310 | .endm | ||
311 | |||
312 | .macro define_tlb_functions name:req, flags_up:req, flags_smp | ||
313 | .type \name\()_tlb_fns, #object | ||
314 | ENTRY(\name\()_tlb_fns) | ||
315 | .long \name\()_flush_user_tlb_range | ||
316 | .long \name\()_flush_kern_tlb_range | ||
317 | .ifnb \flags_smp | ||
318 | ALT_SMP(.long \flags_smp ) | ||
319 | ALT_UP(.long \flags_up ) | ||
320 | .else | ||
321 | .long \flags_up | ||
322 | .endif | ||
323 | .size \name\()_tlb_fns, . - \name\()_tlb_fns | ||
324 | .endm | ||
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 9d4f2ae6337..db52b0fb14a 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S | |||
@@ -93,6 +93,17 @@ ENTRY(cpu_mohawk_do_idle) | |||
93 | mov pc, lr | 93 | mov pc, lr |
94 | 94 | ||
95 | /* | 95 | /* |
96 | * flush_icache_all() | ||
97 | * | ||
98 | * Unconditionally clean and invalidate the entire icache. | ||
99 | */ | ||
100 | ENTRY(mohawk_flush_icache_all) | ||
101 | mov r0, #0 | ||
102 | mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache | ||
103 | mov pc, lr | ||
104 | ENDPROC(mohawk_flush_icache_all) | ||
105 | |||
106 | /* | ||
96 | * flush_user_cache_all() | 107 | * flush_user_cache_all() |
97 | * | 108 | * |
98 | * Clean and invalidate all cache entries in a particular | 109 | * Clean and invalidate all cache entries in a particular |
@@ -288,16 +299,8 @@ ENTRY(mohawk_dma_unmap_area) | |||
288 | mov pc, lr | 299 | mov pc, lr |
289 | ENDPROC(mohawk_dma_unmap_area) | 300 | ENDPROC(mohawk_dma_unmap_area) |
290 | 301 | ||
291 | ENTRY(mohawk_cache_fns) | 302 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
292 | .long mohawk_flush_kern_cache_all | 303 | define_cache_functions mohawk |
293 | .long mohawk_flush_user_cache_all | ||
294 | .long mohawk_flush_user_cache_range | ||
295 | .long mohawk_coherent_kern_range | ||
296 | .long mohawk_coherent_user_range | ||
297 | .long mohawk_flush_kern_dcache_area | ||
298 | .long mohawk_dma_map_area | ||
299 | .long mohawk_dma_unmap_area | ||
300 | .long mohawk_dma_flush_range | ||
301 | 304 | ||
302 | ENTRY(cpu_mohawk_dcache_clean_area) | 305 | ENTRY(cpu_mohawk_dcache_clean_area) |
303 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 306 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -373,42 +376,14 @@ mohawk_crval: | |||
373 | 376 | ||
374 | __INITDATA | 377 | __INITDATA |
375 | 378 | ||
376 | /* | 379 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
377 | * Purpose : Function pointers used to access above functions - all calls | 380 | define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort |
378 | * come through these | ||
379 | */ | ||
380 | .type mohawk_processor_functions, #object | ||
381 | mohawk_processor_functions: | ||
382 | .word v5t_early_abort | ||
383 | .word legacy_pabort | ||
384 | .word cpu_mohawk_proc_init | ||
385 | .word cpu_mohawk_proc_fin | ||
386 | .word cpu_mohawk_reset | ||
387 | .word cpu_mohawk_do_idle | ||
388 | .word cpu_mohawk_dcache_clean_area | ||
389 | .word cpu_mohawk_switch_mm | ||
390 | .word cpu_mohawk_set_pte_ext | ||
391 | .word 0 | ||
392 | .word 0 | ||
393 | .word 0 | ||
394 | .size mohawk_processor_functions, . - mohawk_processor_functions | ||
395 | 381 | ||
396 | .section ".rodata" | 382 | .section ".rodata" |
397 | 383 | ||
398 | .type cpu_arch_name, #object | 384 | string cpu_arch_name, "armv5te" |
399 | cpu_arch_name: | 385 | string cpu_elf_name, "v5" |
400 | .asciz "armv5te" | 386 | string cpu_mohawk_name, "Marvell 88SV331x" |
401 | .size cpu_arch_name, . - cpu_arch_name | ||
402 | |||
403 | .type cpu_elf_name, #object | ||
404 | cpu_elf_name: | ||
405 | .asciz "v5" | ||
406 | .size cpu_elf_name, . - cpu_elf_name | ||
407 | |||
408 | .type cpu_mohawk_name, #object | ||
409 | cpu_mohawk_name: | ||
410 | .asciz "Marvell 88SV331x" | ||
411 | .size cpu_mohawk_name, . - cpu_mohawk_name | ||
412 | 387 | ||
413 | .align | 388 | .align |
414 | 389 | ||
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 46f09ed16b9..d50ada26edd 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S | |||
@@ -187,43 +187,14 @@ sa110_crval: | |||
187 | 187 | ||
188 | __INITDATA | 188 | __INITDATA |
189 | 189 | ||
190 | /* | 190 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
191 | * Purpose : Function pointers used to access above functions - all calls | 191 | define_processor_functions sa110, dabort=v4_early_abort, pabort=legacy_pabort |
192 | * come through these | ||
193 | */ | ||
194 | |||
195 | .type sa110_processor_functions, #object | ||
196 | ENTRY(sa110_processor_functions) | ||
197 | .word v4_early_abort | ||
198 | .word legacy_pabort | ||
199 | .word cpu_sa110_proc_init | ||
200 | .word cpu_sa110_proc_fin | ||
201 | .word cpu_sa110_reset | ||
202 | .word cpu_sa110_do_idle | ||
203 | .word cpu_sa110_dcache_clean_area | ||
204 | .word cpu_sa110_switch_mm | ||
205 | .word cpu_sa110_set_pte_ext | ||
206 | .word 0 | ||
207 | .word 0 | ||
208 | .word 0 | ||
209 | .size sa110_processor_functions, . - sa110_processor_functions | ||
210 | 192 | ||
211 | .section ".rodata" | 193 | .section ".rodata" |
212 | 194 | ||
213 | .type cpu_arch_name, #object | 195 | string cpu_arch_name, "armv4" |
214 | cpu_arch_name: | 196 | string cpu_elf_name, "v4" |
215 | .asciz "armv4" | 197 | string cpu_sa110_name, "StrongARM-110" |
216 | .size cpu_arch_name, . - cpu_arch_name | ||
217 | |||
218 | .type cpu_elf_name, #object | ||
219 | cpu_elf_name: | ||
220 | .asciz "v4" | ||
221 | .size cpu_elf_name, . - cpu_elf_name | ||
222 | |||
223 | .type cpu_sa110_name, #object | ||
224 | cpu_sa110_name: | ||
225 | .asciz "StrongARM-110" | ||
226 | .size cpu_sa110_name, . - cpu_sa110_name | ||
227 | 198 | ||
228 | .align | 199 | .align |
229 | 200 | ||
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 184a9c997e3..69e7f2ef738 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S | |||
@@ -34,7 +34,7 @@ | |||
34 | */ | 34 | */ |
35 | #define DCACHELINESIZE 32 | 35 | #define DCACHELINESIZE 32 |
36 | 36 | ||
37 | __INIT | 37 | .section .text |
38 | 38 | ||
39 | /* | 39 | /* |
40 | * cpu_sa1100_proc_init() | 40 | * cpu_sa1100_proc_init() |
@@ -45,8 +45,6 @@ ENTRY(cpu_sa1100_proc_init) | |||
45 | mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland | 45 | mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland |
46 | mov pc, lr | 46 | mov pc, lr |
47 | 47 | ||
48 | .section .text | ||
49 | |||
50 | /* | 48 | /* |
51 | * cpu_sa1100_proc_fin() | 49 | * cpu_sa1100_proc_fin() |
52 | * | 50 | * |
@@ -184,11 +182,11 @@ ENDPROC(cpu_sa1100_do_suspend) | |||
184 | 182 | ||
185 | ENTRY(cpu_sa1100_do_resume) | 183 | ENTRY(cpu_sa1100_do_resume) |
186 | ldmia r0, {r4 - r7} @ load cp regs | 184 | ldmia r0, {r4 - r7} @ load cp regs |
187 | mov r1, #0 | 185 | mov ip, #0 |
188 | mcr p15, 0, r1, c8, c7, 0 @ flush I+D TLBs | 186 | mcr p15, 0, ip, c8, c7, 0 @ flush I+D TLBs |
189 | mcr p15, 0, r1, c7, c7, 0 @ flush I&D cache | 187 | mcr p15, 0, ip, c7, c7, 0 @ flush I&D cache |
190 | mcr p15, 0, r1, c9, c0, 0 @ invalidate RB | 188 | mcr p15, 0, ip, c9, c0, 0 @ invalidate RB |
191 | mcr p15, 0, r1, c9, c0, 5 @ allow user space to use RB | 189 | mcr p15, 0, ip, c9, c0, 5 @ allow user space to use RB |
192 | 190 | ||
193 | mcr p15, 0, r4, c3, c0, 0 @ domain ID | 191 | mcr p15, 0, r4, c3, c0, 0 @ domain ID |
194 | mcr p15, 0, r5, c2, c0, 0 @ translation table base addr | 192 | mcr p15, 0, r5, c2, c0, 0 @ translation table base addr |
@@ -200,9 +198,6 @@ ENTRY(cpu_sa1100_do_resume) | |||
200 | PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE | 198 | PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE |
201 | b cpu_resume_mmu | 199 | b cpu_resume_mmu |
202 | ENDPROC(cpu_sa1100_do_resume) | 200 | ENDPROC(cpu_sa1100_do_resume) |
203 | #else | ||
204 | #define cpu_sa1100_do_suspend 0 | ||
205 | #define cpu_sa1100_do_resume 0 | ||
206 | #endif | 201 | #endif |
207 | 202 | ||
208 | __CPUINIT | 203 | __CPUINIT |
@@ -236,59 +231,28 @@ sa1100_crval: | |||
236 | __INITDATA | 231 | __INITDATA |
237 | 232 | ||
238 | /* | 233 | /* |
239 | * Purpose : Function pointers used to access above functions - all calls | ||
240 | * come through these | ||
241 | */ | ||
242 | |||
243 | /* | ||
244 | * SA1100 and SA1110 share the same function calls | 234 | * SA1100 and SA1110 share the same function calls |
245 | */ | 235 | */ |
246 | .type sa1100_processor_functions, #object | ||
247 | ENTRY(sa1100_processor_functions) | ||
248 | .word v4_early_abort | ||
249 | .word legacy_pabort | ||
250 | .word cpu_sa1100_proc_init | ||
251 | .word cpu_sa1100_proc_fin | ||
252 | .word cpu_sa1100_reset | ||
253 | .word cpu_sa1100_do_idle | ||
254 | .word cpu_sa1100_dcache_clean_area | ||
255 | .word cpu_sa1100_switch_mm | ||
256 | .word cpu_sa1100_set_pte_ext | ||
257 | .word cpu_sa1100_suspend_size | ||
258 | .word cpu_sa1100_do_suspend | ||
259 | .word cpu_sa1100_do_resume | ||
260 | .size sa1100_processor_functions, . - sa1100_processor_functions | ||
261 | |||
262 | .section ".rodata" | ||
263 | |||
264 | .type cpu_arch_name, #object | ||
265 | cpu_arch_name: | ||
266 | .asciz "armv4" | ||
267 | .size cpu_arch_name, . - cpu_arch_name | ||
268 | 236 | ||
269 | .type cpu_elf_name, #object | 237 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
270 | cpu_elf_name: | 238 | define_processor_functions sa1100, dabort=v4_early_abort, pabort=legacy_pabort, suspend=1 |
271 | .asciz "v4" | ||
272 | .size cpu_elf_name, . - cpu_elf_name | ||
273 | 239 | ||
274 | .type cpu_sa1100_name, #object | 240 | .section ".rodata" |
275 | cpu_sa1100_name: | ||
276 | .asciz "StrongARM-1100" | ||
277 | .size cpu_sa1100_name, . - cpu_sa1100_name | ||
278 | 241 | ||
279 | .type cpu_sa1110_name, #object | 242 | string cpu_arch_name, "armv4" |
280 | cpu_sa1110_name: | 243 | string cpu_elf_name, "v4" |
281 | .asciz "StrongARM-1110" | 244 | string cpu_sa1100_name, "StrongARM-1100" |
282 | .size cpu_sa1110_name, . - cpu_sa1110_name | 245 | string cpu_sa1110_name, "StrongARM-1110" |
283 | 246 | ||
284 | .align | 247 | .align |
285 | 248 | ||
286 | .section ".proc.info.init", #alloc, #execinstr | 249 | .section ".proc.info.init", #alloc, #execinstr |
287 | 250 | ||
288 | .type __sa1100_proc_info,#object | 251 | .macro sa1100_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req |
289 | __sa1100_proc_info: | 252 | .type __\name\()_proc_info,#object |
290 | .long 0x4401a110 | 253 | __\name\()_proc_info: |
291 | .long 0xfffffff0 | 254 | .long \cpu_val |
255 | .long \cpu_mask | ||
292 | .long PMD_TYPE_SECT | \ | 256 | .long PMD_TYPE_SECT | \ |
293 | PMD_SECT_BUFFERABLE | \ | 257 | PMD_SECT_BUFFERABLE | \ |
294 | PMD_SECT_CACHEABLE | \ | 258 | PMD_SECT_CACHEABLE | \ |
@@ -301,32 +265,13 @@ __sa1100_proc_info: | |||
301 | .long cpu_arch_name | 265 | .long cpu_arch_name |
302 | .long cpu_elf_name | 266 | .long cpu_elf_name |
303 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT | 267 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT |
304 | .long cpu_sa1100_name | 268 | .long \cpu_name |
305 | .long sa1100_processor_functions | 269 | .long sa1100_processor_functions |
306 | .long v4wb_tlb_fns | 270 | .long v4wb_tlb_fns |
307 | .long v4_mc_user_fns | 271 | .long v4_mc_user_fns |
308 | .long v4wb_cache_fns | 272 | .long v4wb_cache_fns |
309 | .size __sa1100_proc_info, . - __sa1100_proc_info | 273 | .size __\name\()_proc_info, . - __\name\()_proc_info |
274 | .endm | ||
310 | 275 | ||
311 | .type __sa1110_proc_info,#object | 276 | sa1100_proc_info sa1100, 0x4401a110, 0xfffffff0, cpu_sa1100_name |
312 | __sa1110_proc_info: | 277 | sa1100_proc_info sa1110, 0x6901b110, 0xfffffff0, cpu_sa1110_name |
313 | .long 0x6901b110 | ||
314 | .long 0xfffffff0 | ||
315 | .long PMD_TYPE_SECT | \ | ||
316 | PMD_SECT_BUFFERABLE | \ | ||
317 | PMD_SECT_CACHEABLE | \ | ||
318 | PMD_SECT_AP_WRITE | \ | ||
319 | PMD_SECT_AP_READ | ||
320 | .long PMD_TYPE_SECT | \ | ||
321 | PMD_SECT_AP_WRITE | \ | ||
322 | PMD_SECT_AP_READ | ||
323 | b __sa1100_setup | ||
324 | .long cpu_arch_name | ||
325 | .long cpu_elf_name | ||
326 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT | HWCAP_FAST_MULT | ||
327 | .long cpu_sa1110_name | ||
328 | .long sa1100_processor_functions | ||
329 | .long v4wb_tlb_fns | ||
330 | .long v4_mc_user_fns | ||
331 | .long v4wb_cache_fns | ||
332 | .size __sa1110_proc_info, . - __sa1110_proc_info | ||
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 1d2b8451bf2..a923aa0fd00 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -56,6 +56,11 @@ ENTRY(cpu_v6_proc_fin) | |||
56 | */ | 56 | */ |
57 | .align 5 | 57 | .align 5 |
58 | ENTRY(cpu_v6_reset) | 58 | ENTRY(cpu_v6_reset) |
59 | mrc p15, 0, r1, c1, c0, 0 @ ctrl register | ||
60 | bic r1, r1, #0x1 @ ...............m | ||
61 | mcr p15, 0, r1, c1, c0, 0 @ disable MMU | ||
62 | mov r1, #0 | ||
63 | mcr p15, 0, r1, c7, c5, 4 @ ISB | ||
59 | mov pc, r0 | 64 | mov pc, r0 |
60 | 65 | ||
61 | /* | 66 | /* |
@@ -164,16 +169,9 @@ ENDPROC(cpu_v6_do_resume) | |||
164 | cpu_resume_l1_flags: | 169 | cpu_resume_l1_flags: |
165 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) | 170 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) |
166 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) | 171 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) |
167 | #else | ||
168 | #define cpu_v6_do_suspend 0 | ||
169 | #define cpu_v6_do_resume 0 | ||
170 | #endif | 172 | #endif |
171 | 173 | ||
172 | 174 | string cpu_v6_name, "ARMv6-compatible processor" | |
173 | .type cpu_v6_name, #object | ||
174 | cpu_v6_name: | ||
175 | .asciz "ARMv6-compatible processor" | ||
176 | .size cpu_v6_name, . - cpu_v6_name | ||
177 | 175 | ||
178 | .align | 176 | .align |
179 | 177 | ||
@@ -225,6 +223,22 @@ __v6_setup: | |||
225 | mrc p15, 0, r0, c1, c0, 0 @ read control register | 223 | mrc p15, 0, r0, c1, c0, 0 @ read control register |
226 | bic r0, r0, r5 @ clear bits them | 224 | bic r0, r0, r5 @ clear bits them |
227 | orr r0, r0, r6 @ set them | 225 | orr r0, r0, r6 @ set them |
226 | #ifdef CONFIG_ARM_ERRATA_364296 | ||
227 | /* | ||
228 | * Workaround for the 364296 ARM1136 r0p2 erratum (possible cache data | ||
229 | * corruption with hit-under-miss enabled). The conditional code below | ||
230 | * (setting the undocumented bit 31 in the auxiliary control register | ||
231 | * and the FI bit in the control register) disables hit-under-miss | ||
232 | * without putting the processor into full low interrupt latency mode. | ||
233 | */ | ||
234 | ldr r6, =0x4107b362 @ id for ARM1136 r0p2 | ||
235 | mrc p15, 0, r5, c0, c0, 0 @ get processor id | ||
236 | teq r5, r6 @ check for the faulty core | ||
237 | mrceq p15, 0, r5, c1, c0, 1 @ load aux control reg | ||
238 | orreq r5, r5, #(1 << 31) @ set the undocumented bit 31 | ||
239 | mcreq p15, 0, r5, c1, c0, 1 @ write aux control reg | ||
240 | orreq r0, r0, #(1 << 21) @ low interrupt latency configuration | ||
241 | #endif | ||
228 | mov pc, lr @ return to head.S:__ret | 242 | mov pc, lr @ return to head.S:__ret |
229 | 243 | ||
230 | /* | 244 | /* |
@@ -239,33 +253,13 @@ v6_crval: | |||
239 | 253 | ||
240 | __INITDATA | 254 | __INITDATA |
241 | 255 | ||
242 | .type v6_processor_functions, #object | 256 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
243 | ENTRY(v6_processor_functions) | 257 | define_processor_functions v6, dabort=v6_early_abort, pabort=v6_pabort, suspend=1 |
244 | .word v6_early_abort | ||
245 | .word v6_pabort | ||
246 | .word cpu_v6_proc_init | ||
247 | .word cpu_v6_proc_fin | ||
248 | .word cpu_v6_reset | ||
249 | .word cpu_v6_do_idle | ||
250 | .word cpu_v6_dcache_clean_area | ||
251 | .word cpu_v6_switch_mm | ||
252 | .word cpu_v6_set_pte_ext | ||
253 | .word cpu_v6_suspend_size | ||
254 | .word cpu_v6_do_suspend | ||
255 | .word cpu_v6_do_resume | ||
256 | .size v6_processor_functions, . - v6_processor_functions | ||
257 | 258 | ||
258 | .section ".rodata" | 259 | .section ".rodata" |
259 | 260 | ||
260 | .type cpu_arch_name, #object | 261 | string cpu_arch_name, "armv6" |
261 | cpu_arch_name: | 262 | string cpu_elf_name, "v6" |
262 | .asciz "armv6" | ||
263 | .size cpu_arch_name, . - cpu_arch_name | ||
264 | |||
265 | .type cpu_elf_name, #object | ||
266 | cpu_elf_name: | ||
267 | .asciz "v6" | ||
268 | .size cpu_elf_name, . - cpu_elf_name | ||
269 | .align | 263 | .align |
270 | 264 | ||
271 | .section ".proc.info.init", #alloc, #execinstr | 265 | .section ".proc.info.init", #alloc, #execinstr |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 089c0b5e454..38c78253f76 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -58,9 +58,17 @@ ENDPROC(cpu_v7_proc_fin) | |||
58 | * to what would be the reset vector. | 58 | * to what would be the reset vector. |
59 | * | 59 | * |
60 | * - loc - location to jump to for soft reset | 60 | * - loc - location to jump to for soft reset |
61 | * | ||
62 | * This code must be executed using a flat identity mapping with | ||
63 | * caches disabled. | ||
61 | */ | 64 | */ |
62 | .align 5 | 65 | .align 5 |
63 | ENTRY(cpu_v7_reset) | 66 | ENTRY(cpu_v7_reset) |
67 | mrc p15, 0, r1, c1, c0, 0 @ ctrl register | ||
68 | bic r1, r1, #0x1 @ ...............m | ||
69 | THUMB( bic r1, r1, #1 << 30 ) @ SCTLR.TE (Thumb exceptions) | ||
70 | mcr p15, 0, r1, c1, c0, 0 @ disable MMU | ||
71 | isb | ||
64 | mov pc, r0 | 72 | mov pc, r0 |
65 | ENDPROC(cpu_v7_reset) | 73 | ENDPROC(cpu_v7_reset) |
66 | 74 | ||
@@ -168,13 +176,14 @@ ENTRY(cpu_v7_set_pte_ext) | |||
168 | ARM( str r3, [r0, #2048]! ) | 176 | ARM( str r3, [r0, #2048]! ) |
169 | THUMB( add r0, r0, #2048 ) | 177 | THUMB( add r0, r0, #2048 ) |
170 | THUMB( str r3, [r0] ) | 178 | THUMB( str r3, [r0] ) |
171 | mcr p15, 0, r0, c7, c10, 1 @ flush_pte | 179 | mrc p15, 0, r3, c0, c1, 7 @ read ID_MMFR3 |
180 | tst r3, #0xf << 20 @ check the coherent walk bits | ||
181 | mcreq p15, 0, r0, c7, c10, 1 @ flush_pte | ||
172 | #endif | 182 | #endif |
173 | mov pc, lr | 183 | mov pc, lr |
174 | ENDPROC(cpu_v7_set_pte_ext) | 184 | ENDPROC(cpu_v7_set_pte_ext) |
175 | 185 | ||
176 | cpu_v7_name: | 186 | string cpu_v7_name, "ARMv7 Processor" |
177 | .ascii "ARMv7 Processor" | ||
178 | .align | 187 | .align |
179 | 188 | ||
180 | /* | 189 | /* |
@@ -205,49 +214,254 @@ cpu_v7_name: | |||
205 | * NS1 = PRRR[19] = 1 - normal shareable property | 214 | * NS1 = PRRR[19] = 1 - normal shareable property |
206 | * NOS = PRRR[24+n] = 1 - not outer shareable | 215 | * NOS = PRRR[24+n] = 1 - not outer shareable |
207 | */ | 216 | */ |
208 | .equ PRRR, 0xff0a81a8 | 217 | .equ PRRR, 0xff0a89a8 |
209 | .equ NMRR, 0x40e040e0 | 218 | .equ NMRR, 0xc0e044e0 |
210 | 219 | ||
211 | /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ | 220 | /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ |
221 | .local cpu_v7_debug_suspend_size | ||
222 | #ifdef CONFIG_ARM_SAVE_DEBUG_CONTEXT | ||
223 | /* | ||
224 | * Debug context: | ||
225 | * 8 CP14 registers | ||
226 | * 16x2 CP14 breakpoint registers (maximum) | ||
227 | * 16x2 CP14 watchpoint registers (maximum) | ||
228 | */ | ||
229 | .equ cpu_v7_debug_suspend_size, (4 * (8 + (16 * 2) + (16 * 2))) | ||
230 | |||
231 | .macro save_brkpt cm | ||
232 | mrc p14, 0, r4, c0, \cm, 4 | ||
233 | mrc p14, 0, r5, c0, \cm, 5 | ||
234 | stmia r0!, {r4 - r5} | ||
235 | .endm | ||
236 | |||
237 | .macro restore_brkpt cm | ||
238 | ldmia r0!, {r4 - r5} | ||
239 | mcr p14, 0, r4, c0, \cm, 4 | ||
240 | mcr p14, 0, r5, c0, \cm, 5 | ||
241 | .endm | ||
242 | |||
243 | .macro save_wpt cm | ||
244 | mrc p14, 0, r4, c0, \cm, 6 | ||
245 | mrc p14, 0, r5, c0, \cm, 7 | ||
246 | stmia r0!, {r4 - r5} | ||
247 | .endm | ||
248 | |||
249 | .macro restore_wpt cm | ||
250 | ldmia r0!, {r4 - r5} | ||
251 | mcr p14, 0, r4, c0, \cm, 6 | ||
252 | mcr p14, 0, r5, c0, \cm, 7 | ||
253 | .endm | ||
254 | |||
255 | #else | ||
256 | .equ cpu_v7_debug_suspend_size, 0 | ||
257 | #endif | ||
258 | |||
212 | .globl cpu_v7_suspend_size | 259 | .globl cpu_v7_suspend_size |
213 | .equ cpu_v7_suspend_size, 4 * 9 | 260 | .equ cpu_v7_suspend_size, (4 * 10) + cpu_v7_debug_suspend_size |
214 | #ifdef CONFIG_PM_SLEEP | 261 | #ifdef CONFIG_PM_SLEEP |
215 | ENTRY(cpu_v7_do_suspend) | 262 | ENTRY(cpu_v7_do_suspend) |
216 | stmfd sp!, {r4 - r11, lr} | 263 | stmfd sp!, {r0, r3 - r11, lr} |
264 | mrc p15, 0, r3, c15, c0, 1 @ diag | ||
217 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID | 265 | mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID |
218 | mrc p15, 0, r5, c13, c0, 1 @ Context ID | 266 | mrc p15, 0, r5, c13, c0, 1 @ Context ID |
219 | mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID | 267 | mrc p15, 0, r6, c13, c0, 3 @ User r/o thread ID |
220 | stmia r0!, {r4 - r6} | 268 | stmia r0!, {r3 - r6} |
221 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID | 269 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID |
222 | mrc p15, 0, r7, c2, c0, 0 @ TTB 0 | 270 | mrc p15, 0, r7, c2, c0, 0 @ TTB 0 |
223 | mrc p15, 0, r8, c2, c0, 1 @ TTB 1 | 271 | mrc p15, 0, r8, c2, c0, 1 @ TTB 1 |
224 | mrc p15, 0, r9, c1, c0, 0 @ Control register | 272 | mrc p15, 0, r9, c1, c0, 0 @ Control register |
225 | mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register | 273 | mrc p15, 0, r10, c1, c0, 1 @ Auxiliary control register |
226 | mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control | 274 | mrc p15, 0, r11, c1, c0, 2 @ Co-processor access control |
227 | stmia r0, {r6 - r11} | 275 | stmia r0!, {r6 - r11} |
228 | ldmfd sp!, {r4 - r11, pc} | 276 | |
277 | #ifdef CONFIG_ARM_SAVE_DEBUG_CONTEXT | ||
278 | /* Save CP14 debug controller context */ | ||
279 | |||
280 | mrc p14, 0, r4, c0, c2, 2 @ DBGDSCRext | ||
281 | mrc p14, 0, r5, c0, c6, 0 @ DBGWFAR | ||
282 | mrc p14, 0, r6, c0, c7, 0 @ DBGVCR | ||
283 | mrc p14, 0, r7, c7, c9, 6 @ DBGCLAIMCLR | ||
284 | stmia r0!, {r4-r7} | ||
285 | |||
286 | mrc p14, 0, r4, c0, c10, 0 @ DBGDSCCR | ||
287 | mrc p14, 0, r5, c0, c11, 0 @ DBGDSMCR | ||
288 | stmia r0!, {r4-r5} | ||
289 | |||
290 | tst r4, #(1 << 29) @ DBGDSCRext.TXfull | ||
291 | mrcne p14, 0, r4, c0, c3, 2 @ DBGDTRTXext | ||
292 | strne r4, [r0], #4 | ||
293 | |||
294 | tst r4, #(1 << 30) @ DBGDSCRext.RXfull | ||
295 | mrcne p14, 0, r4, c0, c0, 2 @ DBGDTRRXext | ||
296 | strne r4, [r0], #4 | ||
297 | |||
298 | mrc p14, 0, r8, c0, c0, 0 @ read IDR | ||
299 | mov r3, r8, lsr #24 | ||
300 | and r3, r3, #0xf @ r3 has the number of brkpt | ||
301 | rsb r3, r3, #0xf | ||
302 | |||
303 | /* r3 = (15 - #of brkpt) ; | ||
304 | switch offset = r3*12 - 4 = (r3*3 - 1)<<2 | ||
305 | */ | ||
306 | add r3, r3, r3, lsl #1 | ||
307 | sub r3, r3, #1 | ||
308 | add pc, pc, r3, lsl #2 | ||
309 | |||
310 | save_brkpt c15 | ||
311 | save_brkpt c14 | ||
312 | save_brkpt c13 | ||
313 | save_brkpt c12 | ||
314 | save_brkpt c11 | ||
315 | save_brkpt c10 | ||
316 | save_brkpt c9 | ||
317 | save_brkpt c8 | ||
318 | save_brkpt c7 | ||
319 | save_brkpt c6 | ||
320 | save_brkpt c5 | ||
321 | save_brkpt c4 | ||
322 | save_brkpt c3 | ||
323 | save_brkpt c2 | ||
324 | save_brkpt c1 | ||
325 | save_brkpt c0 | ||
326 | |||
327 | mov r3, r8, lsr #28 @ r3 has the number of wpt | ||
328 | rsb r3, r3, #0xf | ||
329 | |||
330 | /* r3 = (15 - #of wpt) ; | ||
331 | switch offset = r3*12 - 4 = (r3*3 - 1)<<2 | ||
332 | */ | ||
333 | add r3, r3, r3, lsl #1 | ||
334 | sub r3, r3, #1 | ||
335 | add pc, pc, r3, lsl #2 | ||
336 | |||
337 | save_wpt c15 | ||
338 | save_wpt c14 | ||
339 | save_wpt c13 | ||
340 | save_wpt c12 | ||
341 | save_wpt c11 | ||
342 | save_wpt c10 | ||
343 | save_wpt c9 | ||
344 | save_wpt c8 | ||
345 | save_wpt c7 | ||
346 | save_wpt c6 | ||
347 | save_wpt c5 | ||
348 | save_wpt c4 | ||
349 | save_wpt c3 | ||
350 | save_wpt c2 | ||
351 | save_wpt c1 | ||
352 | save_wpt c0 | ||
353 | #endif | ||
354 | ldmfd sp!, {r0, r3 - r11, pc} | ||
229 | ENDPROC(cpu_v7_do_suspend) | 355 | ENDPROC(cpu_v7_do_suspend) |
230 | 356 | ||
231 | ENTRY(cpu_v7_do_resume) | 357 | ENTRY(cpu_v7_do_resume) |
232 | mov ip, #0 | 358 | mov ip, #0 |
233 | mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs | 359 | mcr p15, 0, ip, c8, c7, 0 @ invalidate TLBs |
234 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache | 360 | mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache |
235 | ldmia r0!, {r4 - r6} | 361 | ldmia r0!, {r3 - r6} |
362 | #ifndef CONFIG_TRUSTED_FOUNDATIONS | ||
363 | mcr p15, 0, r3, c15, c0, 1 @ diag | ||
364 | #endif | ||
236 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID | 365 | mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID |
237 | mcr p15, 0, r5, c13, c0, 1 @ Context ID | 366 | mcr p15, 0, r5, c13, c0, 1 @ Context ID |
238 | mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID | 367 | mcr p15, 0, r6, c13, c0, 3 @ User r/o thread ID |
239 | ldmia r0, {r6 - r11} | 368 | ldmia r0!, {r6 - r11} |
240 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID | 369 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID |
241 | mcr p15, 0, r7, c2, c0, 0 @ TTB 0 | 370 | mcr p15, 0, r7, c2, c0, 0 @ TTB 0 |
242 | mcr p15, 0, r8, c2, c0, 1 @ TTB 1 | 371 | mcr p15, 0, r8, c2, c0, 1 @ TTB 1 |
243 | mcr p15, 0, ip, c2, c0, 2 @ TTB control register | 372 | mcr p15, 0, ip, c2, c0, 2 @ TTB control register |
244 | mcr p15, 0, r10, c1, c0, 1 @ Auxiliary control register | 373 | mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register |
374 | teq r4, r10 @ Is it already set? | ||
375 | mcrne p15, 0, r10, c1, c0, 1 @ No, so write it | ||
245 | mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control | 376 | mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control |
246 | ldr r4, =PRRR @ PRRR | 377 | ldr r4, =PRRR @ PRRR |
247 | ldr r5, =NMRR @ NMRR | 378 | ldr r5, =NMRR @ NMRR |
248 | mcr p15, 0, r4, c10, c2, 0 @ write PRRR | 379 | mcr p15, 0, r4, c10, c2, 0 @ write PRRR |
249 | mcr p15, 0, r5, c10, c2, 1 @ write NMRR | 380 | mcr p15, 0, r5, c10, c2, 1 @ write NMRR |
250 | isb | 381 | isb |
382 | |||
383 | #ifdef CONFIG_ARM_SAVE_DEBUG_CONTEXT | ||
384 | /* Restore CP14 debug controller context */ | ||
385 | |||
386 | ldmia r0!, {r2 - r5} | ||
387 | mcr p14, 0, r3, c0, c6, 0 @ DBGWFAR | ||
388 | mcr p14, 0, r4, c0, c7, 0 @ DBGVCR | ||
389 | mcr p14, 0, r5, c7, c8, 6 @ DBGCLAIMSET | ||
390 | |||
391 | ldmia r0!, {r4-r5} | ||
392 | mcr p14, 0, r4, c0, c10, 0 @ DBGDSCCR | ||
393 | mcr p14, 0, r5, c0, c11, 0 @ DBGDSMCR | ||
394 | |||
395 | tst r2, #(1 << 29) @ DBGDSCRext.TXfull | ||
396 | ldrne r4, [r0], #4 | ||
397 | mcrne p14, 0, r4, c0, c3, 2 @ DBGDTRTXext | ||
398 | |||
399 | tst r2, #(1 << 30) @ DBGDSCRext.RXfull | ||
400 | ldrne r4, [r0], #4 | ||
401 | mcrne p14, 0, r4, c0, c0, 2 @ DBGDTRRXext | ||
402 | |||
403 | mrc p14, 0, r8, c0, c0, 0 @ read IDR | ||
404 | mov r3, r8, lsr #24 | ||
405 | and r3, r3, #0xf @ r3 has the number of brkpt | ||
406 | rsb r3, r3, #0xf | ||
407 | |||
408 | /* r3 = (15 - #of wpt) ; | ||
409 | switch offset = r3*12 - 4 = (r3*3 - 1)<<2 | ||
410 | */ | ||
411 | add r3, r3, r3, lsl #1 | ||
412 | sub r3, r3, #1 | ||
413 | add pc, pc, r3, lsl #2 | ||
414 | |||
415 | restore_brkpt c15 | ||
416 | restore_brkpt c14 | ||
417 | restore_brkpt c13 | ||
418 | restore_brkpt c12 | ||
419 | restore_brkpt c11 | ||
420 | restore_brkpt c10 | ||
421 | restore_brkpt c9 | ||
422 | restore_brkpt c8 | ||
423 | restore_brkpt c7 | ||
424 | restore_brkpt c6 | ||
425 | restore_brkpt c5 | ||
426 | restore_brkpt c4 | ||
427 | restore_brkpt c3 | ||
428 | restore_brkpt c2 | ||
429 | restore_brkpt c1 | ||
430 | restore_brkpt c0 | ||
431 | |||
432 | mov r3, r8, lsr #28 @ r3 has the number of wpt | ||
433 | rsb r3, r3, #0xf | ||
434 | |||
435 | /* r3 = (15 - #of wpt) ; | ||
436 | switch offset = r3*12 - 4 = (r3*3 - 1)<<2 | ||
437 | */ | ||
438 | add r3, r3, r3, lsl #1 | ||
439 | sub r3, r3, #1 | ||
440 | add pc, pc, r3, lsl #2 | ||
441 | |||
442 | start_restore_wpt: | ||
443 | restore_wpt c15 | ||
444 | restore_wpt c14 | ||
445 | restore_wpt c13 | ||
446 | restore_wpt c12 | ||
447 | restore_wpt c11 | ||
448 | restore_wpt c10 | ||
449 | restore_wpt c9 | ||
450 | restore_wpt c8 | ||
451 | restore_wpt c7 | ||
452 | restore_wpt c6 | ||
453 | restore_wpt c5 | ||
454 | restore_wpt c4 | ||
455 | restore_wpt c3 | ||
456 | restore_wpt c2 | ||
457 | restore_wpt c1 | ||
458 | restore_wpt c0 | ||
459 | isb | ||
460 | |||
461 | mcr p14, 0, r2, c0, c2, 2 @ DSCR | ||
462 | isb | ||
463 | #endif | ||
464 | dsb | ||
251 | mov r0, r9 @ control register | 465 | mov r0, r9 @ control register |
252 | mov r2, r7, lsr #14 @ get TTB0 base | 466 | mov r2, r7, lsr #14 @ get TTB0 base |
253 | mov r2, r2, lsl #14 | 467 | mov r2, r2, lsl #14 |
@@ -257,9 +471,6 @@ ENDPROC(cpu_v7_do_resume) | |||
257 | cpu_resume_l1_flags: | 471 | cpu_resume_l1_flags: |
258 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) | 472 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_SMP) |
259 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) | 473 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_FLAGS_UP) |
260 | #else | ||
261 | #define cpu_v7_do_suspend 0 | ||
262 | #define cpu_v7_do_resume 0 | ||
263 | #endif | 474 | #endif |
264 | 475 | ||
265 | __CPUINIT | 476 | __CPUINIT |
@@ -270,22 +481,25 @@ cpu_resume_l1_flags: | |||
270 | * Initialise TLB, Caches, and MMU state ready to switch the MMU | 481 | * Initialise TLB, Caches, and MMU state ready to switch the MMU |
271 | * on. Return in r0 the new CP15 C1 control register setting. | 482 | * on. Return in r0 the new CP15 C1 control register setting. |
272 | * | 483 | * |
273 | * We automatically detect if we have a Harvard cache, and use the | ||
274 | * Harvard cache control instructions insead of the unified cache | ||
275 | * control instructions. | ||
276 | * | ||
277 | * This should be able to cover all ARMv7 cores. | 484 | * This should be able to cover all ARMv7 cores. |
278 | * | 485 | * |
279 | * It is assumed that: | 486 | * It is assumed that: |
280 | * - cache type register is implemented | 487 | * - cache type register is implemented |
281 | */ | 488 | */ |
489 | __v7_ca5mp_setup: | ||
282 | __v7_ca9mp_setup: | 490 | __v7_ca9mp_setup: |
491 | mov r10, #(1 << 0) @ TLB ops broadcasting | ||
492 | b 1f | ||
493 | __v7_ca15mp_setup: | ||
494 | mov r10, #0 | ||
495 | 1: | ||
283 | #ifdef CONFIG_SMP | 496 | #ifdef CONFIG_SMP |
284 | ALT_SMP(mrc p15, 0, r0, c1, c0, 1) | 497 | ALT_SMP(mrc p15, 0, r0, c1, c0, 1) |
285 | ALT_UP(mov r0, #(1 << 6)) @ fake it for UP | 498 | ALT_UP(mov r0, #(1 << 6)) @ fake it for UP |
286 | tst r0, #(1 << 6) @ SMP/nAMP mode enabled? | 499 | tst r0, #(1 << 6) @ SMP/nAMP mode enabled? |
287 | orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and | 500 | orreq r0, r0, #(1 << 6) @ Enable SMP/nAMP mode |
288 | mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting | 501 | orreq r0, r0, r10 @ Enable CPU-specific SMP bits |
502 | mcreq p15, 0, r0, c1, c0, 1 | ||
289 | #endif | 503 | #endif |
290 | __v7_setup: | 504 | __v7_setup: |
291 | adr r12, __v7_setup_stack @ the local stack | 505 | adr r12, __v7_setup_stack @ the local stack |
@@ -332,6 +546,17 @@ __v7_setup: | |||
332 | 2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number | 546 | 2: ldr r10, =0x00000c09 @ Cortex-A9 primary part number |
333 | teq r0, r10 | 547 | teq r0, r10 |
334 | bne 3f | 548 | bne 3f |
549 | #ifndef CONFIG_TRUSTED_FOUNDATIONS | ||
550 | cmp r6, #0x10 @ power ctrl reg added r1p0 | ||
551 | mrcge p15, 0, r10, c15, c0, 0 @ read power control register | ||
552 | orrge r10, r10, #1 @ enable dynamic clock gating | ||
553 | mcrge p15, 0, r10, c15, c0, 0 @ write power control register | ||
554 | #ifdef CONFIG_ARM_ERRATA_720791 | ||
555 | teq r5, #0x00100000 @ only present in r1p* | ||
556 | mrceq p15, 0, r10, c15, c0, 2 @ read "chicken power ctrl" reg | ||
557 | orreq r10, r10, #0x30 @ disable core clk gate on | ||
558 | mcreq p15, 0, r10, c15, c0, 2 @ instr-side waits | ||
559 | #endif | ||
335 | #ifdef CONFIG_ARM_ERRATA_742230 | 560 | #ifdef CONFIG_ARM_ERRATA_742230 |
336 | cmp r6, #0x22 @ only present up to r2p2 | 561 | cmp r6, #0x22 @ only present up to r2p2 |
337 | mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register | 562 | mrcle p15, 0, r10, c15, c0, 1 @ read diagnostic register |
@@ -351,6 +576,8 @@ __v7_setup: | |||
351 | teq r6, #0x20 @ present in r2p0 | 576 | teq r6, #0x20 @ present in r2p0 |
352 | teqne r6, #0x21 @ present in r2p1 | 577 | teqne r6, #0x21 @ present in r2p1 |
353 | teqne r6, #0x22 @ present in r2p2 | 578 | teqne r6, #0x22 @ present in r2p2 |
579 | teqne r6, #0x27 @ present in r2p7 | ||
580 | teqne r6, #0x29 @ present in r2p9 | ||
354 | mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register | 581 | mrceq p15, 0, r10, c15, c0, 1 @ read diagnostic register |
355 | orreq r10, r10, #1 << 6 @ set bit #6 | 582 | orreq r10, r10, #1 << 6 @ set bit #6 |
356 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | 583 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register |
@@ -361,11 +588,16 @@ __v7_setup: | |||
361 | orrlt r10, r10, #1 << 11 @ set bit #11 | 588 | orrlt r10, r10, #1 << 11 @ set bit #11 |
362 | mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register | 589 | mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register |
363 | #endif | 590 | #endif |
591 | #ifdef CONFIG_ARM_ERRATA_752520 | ||
592 | cmp r6, #0x29 @ present prior to r2p9 | ||
593 | mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
594 | orrlt r10, r10, #1 << 20 @ set bit #20 | ||
595 | mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
596 | #endif | ||
597 | #endif | ||
364 | 598 | ||
365 | 3: mov r10, #0 | 599 | 3: mov r10, #0 |
366 | #ifdef HARVARD_CACHE | ||
367 | mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate | 600 | mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate |
368 | #endif | ||
369 | dsb | 601 | dsb |
370 | #ifdef CONFIG_MMU | 602 | #ifdef CONFIG_MMU |
371 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs | 603 | mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs |
@@ -411,94 +643,75 @@ __v7_setup_stack: | |||
411 | 643 | ||
412 | __INITDATA | 644 | __INITDATA |
413 | 645 | ||
414 | .type v7_processor_functions, #object | 646 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
415 | ENTRY(v7_processor_functions) | 647 | define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 |
416 | .word v7_early_abort | ||
417 | .word v7_pabort | ||
418 | .word cpu_v7_proc_init | ||
419 | .word cpu_v7_proc_fin | ||
420 | .word cpu_v7_reset | ||
421 | .word cpu_v7_do_idle | ||
422 | .word cpu_v7_dcache_clean_area | ||
423 | .word cpu_v7_switch_mm | ||
424 | .word cpu_v7_set_pte_ext | ||
425 | .word cpu_v7_suspend_size | ||
426 | .word cpu_v7_do_suspend | ||
427 | .word cpu_v7_do_resume | ||
428 | .size v7_processor_functions, . - v7_processor_functions | ||
429 | 648 | ||
430 | .section ".rodata" | 649 | .section ".rodata" |
431 | 650 | ||
432 | .type cpu_arch_name, #object | 651 | string cpu_arch_name, "armv7" |
433 | cpu_arch_name: | 652 | string cpu_elf_name, "v7" |
434 | .asciz "armv7" | ||
435 | .size cpu_arch_name, . - cpu_arch_name | ||
436 | |||
437 | .type cpu_elf_name, #object | ||
438 | cpu_elf_name: | ||
439 | .asciz "v7" | ||
440 | .size cpu_elf_name, . - cpu_elf_name | ||
441 | .align | 653 | .align |
442 | 654 | ||
443 | .section ".proc.info.init", #alloc, #execinstr | 655 | .section ".proc.info.init", #alloc, #execinstr |
444 | 656 | ||
445 | .type __v7_ca9mp_proc_info, #object | 657 | /* |
446 | __v7_ca9mp_proc_info: | 658 | * Standard v7 proc info content |
447 | .long 0x410fc090 @ Required ID value | 659 | */ |
448 | .long 0xff0ffff0 @ Mask for ID | 660 | .macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 |
449 | ALT_SMP(.long \ | 661 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ |
450 | PMD_TYPE_SECT | \ | 662 | PMD_FLAGS_SMP | \mm_mmuflags) |
451 | PMD_SECT_AP_WRITE | \ | 663 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ |
452 | PMD_SECT_AP_READ | \ | 664 | PMD_FLAGS_UP | \mm_mmuflags) |
453 | PMD_FLAGS_SMP) | 665 | .long PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_WRITE | \ |
454 | ALT_UP(.long \ | 666 | PMD_SECT_AP_READ | \io_mmuflags |
455 | PMD_TYPE_SECT | \ | 667 | W(b) \initfunc |
456 | PMD_SECT_AP_WRITE | \ | ||
457 | PMD_SECT_AP_READ | \ | ||
458 | PMD_FLAGS_UP) | ||
459 | .long PMD_TYPE_SECT | \ | ||
460 | PMD_SECT_XN | \ | ||
461 | PMD_SECT_AP_WRITE | \ | ||
462 | PMD_SECT_AP_READ | ||
463 | W(b) __v7_ca9mp_setup | ||
464 | .long cpu_arch_name | 668 | .long cpu_arch_name |
465 | .long cpu_elf_name | 669 | .long cpu_elf_name |
466 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS | 670 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ |
671 | HWCAP_EDSP | HWCAP_TLS | \hwcaps | ||
467 | .long cpu_v7_name | 672 | .long cpu_v7_name |
468 | .long v7_processor_functions | 673 | .long v7_processor_functions |
469 | .long v7wbi_tlb_fns | 674 | .long v7wbi_tlb_fns |
470 | .long v6_user_fns | 675 | .long v6_user_fns |
471 | .long v7_cache_fns | 676 | .long v7_cache_fns |
677 | .endm | ||
678 | |||
679 | /* | ||
680 | * ARM Ltd. Cortex A5 processor. | ||
681 | */ | ||
682 | .type __v7_ca5mp_proc_info, #object | ||
683 | __v7_ca5mp_proc_info: | ||
684 | .long 0x410fc050 | ||
685 | .long 0xff0ffff0 | ||
686 | __v7_proc __v7_ca5mp_setup | ||
687 | .size __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info | ||
688 | |||
689 | /* | ||
690 | * ARM Ltd. Cortex A9 processor. | ||
691 | */ | ||
692 | .type __v7_ca9mp_proc_info, #object | ||
693 | __v7_ca9mp_proc_info: | ||
694 | .long 0x410fc090 | ||
695 | .long 0xff0ffff0 | ||
696 | __v7_proc __v7_ca9mp_setup | ||
472 | .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info | 697 | .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info |
473 | 698 | ||
474 | /* | 699 | /* |
700 | * ARM Ltd. Cortex A15 processor. | ||
701 | */ | ||
702 | .type __v7_ca15mp_proc_info, #object | ||
703 | __v7_ca15mp_proc_info: | ||
704 | .long 0x410fc0f0 | ||
705 | .long 0xff0ffff0 | ||
706 | __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV | ||
707 | .size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info | ||
708 | |||
709 | /* | ||
475 | * Match any ARMv7 processor core. | 710 | * Match any ARMv7 processor core. |
476 | */ | 711 | */ |
477 | .type __v7_proc_info, #object | 712 | .type __v7_proc_info, #object |
478 | __v7_proc_info: | 713 | __v7_proc_info: |
479 | .long 0x000f0000 @ Required ID value | 714 | .long 0x000f0000 @ Required ID value |
480 | .long 0x000f0000 @ Mask for ID | 715 | .long 0x000f0000 @ Mask for ID |
481 | ALT_SMP(.long \ | 716 | __v7_proc __v7_setup |
482 | PMD_TYPE_SECT | \ | ||
483 | PMD_SECT_AP_WRITE | \ | ||
484 | PMD_SECT_AP_READ | \ | ||
485 | PMD_FLAGS_SMP) | ||
486 | ALT_UP(.long \ | ||
487 | PMD_TYPE_SECT | \ | ||
488 | PMD_SECT_AP_WRITE | \ | ||
489 | PMD_SECT_AP_READ | \ | ||
490 | PMD_FLAGS_UP) | ||
491 | .long PMD_TYPE_SECT | \ | ||
492 | PMD_SECT_XN | \ | ||
493 | PMD_SECT_AP_WRITE | \ | ||
494 | PMD_SECT_AP_READ | ||
495 | W(b) __v7_setup | ||
496 | .long cpu_arch_name | ||
497 | .long cpu_elf_name | ||
498 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS | ||
499 | .long cpu_v7_name | ||
500 | .long v7_processor_functions | ||
501 | .long v7wbi_tlb_fns | ||
502 | .long v6_user_fns | ||
503 | .long v7_cache_fns | ||
504 | .size __v7_proc_info, . - __v7_proc_info | 717 | .size __v7_proc_info, . - __v7_proc_info |
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 596213699f3..1a2021cedc7 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/init.h> | 28 | #include <linux/init.h> |
29 | #include <asm/assembler.h> | 29 | #include <asm/assembler.h> |
30 | #include <asm/hwcap.h> | 30 | #include <asm/hwcap.h> |
31 | #include <mach/hardware.h> | ||
32 | #include <asm/pgtable.h> | 31 | #include <asm/pgtable.h> |
33 | #include <asm/pgtable-hwdef.h> | 32 | #include <asm/pgtable-hwdef.h> |
34 | #include <asm/page.h> | 33 | #include <asm/page.h> |
@@ -335,17 +334,8 @@ ENTRY(xsc3_dma_unmap_area) | |||
335 | mov pc, lr | 334 | mov pc, lr |
336 | ENDPROC(xsc3_dma_unmap_area) | 335 | ENDPROC(xsc3_dma_unmap_area) |
337 | 336 | ||
338 | ENTRY(xsc3_cache_fns) | 337 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
339 | .long xsc3_flush_icache_all | 338 | define_cache_functions xsc3 |
340 | .long xsc3_flush_kern_cache_all | ||
341 | .long xsc3_flush_user_cache_all | ||
342 | .long xsc3_flush_user_cache_range | ||
343 | .long xsc3_coherent_kern_range | ||
344 | .long xsc3_coherent_user_range | ||
345 | .long xsc3_flush_kern_dcache_area | ||
346 | .long xsc3_dma_map_area | ||
347 | .long xsc3_dma_unmap_area | ||
348 | .long xsc3_dma_flush_range | ||
349 | 339 | ||
350 | ENTRY(cpu_xsc3_dcache_clean_area) | 340 | ENTRY(cpu_xsc3_dcache_clean_area) |
351 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line | 341 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line |
@@ -385,7 +375,7 @@ cpu_xsc3_mt_table: | |||
385 | .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH | 375 | .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH |
386 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK | 376 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK |
387 | .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED | 377 | .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED |
388 | .long 0x00 @ unused | 378 | .long PTE_EXT_TEX(4) | PTE_BUFFERABLE @ L_PTE_MT_INNER_WB (not present?) |
389 | .long 0x00 @ L_PTE_MT_MINICACHE (not present) | 379 | .long 0x00 @ L_PTE_MT_MINICACHE (not present) |
390 | .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?) | 380 | .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?) |
391 | .long 0x00 @ unused | 381 | .long 0x00 @ unused |
@@ -416,7 +406,7 @@ ENTRY(cpu_xsc3_set_pte_ext) | |||
416 | .align | 406 | .align |
417 | 407 | ||
418 | .globl cpu_xsc3_suspend_size | 408 | .globl cpu_xsc3_suspend_size |
419 | .equ cpu_xsc3_suspend_size, 4 * 8 | 409 | .equ cpu_xsc3_suspend_size, 4 * 7 |
420 | #ifdef CONFIG_PM_SLEEP | 410 | #ifdef CONFIG_PM_SLEEP |
421 | ENTRY(cpu_xsc3_do_suspend) | 411 | ENTRY(cpu_xsc3_do_suspend) |
422 | stmfd sp!, {r4 - r10, lr} | 412 | stmfd sp!, {r4 - r10, lr} |
@@ -428,12 +418,12 @@ ENTRY(cpu_xsc3_do_suspend) | |||
428 | mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg | 418 | mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg |
429 | mrc p15, 0, r10, c1, c0, 0 @ control reg | 419 | mrc p15, 0, r10, c1, c0, 0 @ control reg |
430 | bic r4, r4, #2 @ clear frequency change bit | 420 | bic r4, r4, #2 @ clear frequency change bit |
431 | stmia r0, {r1, r4 - r10} @ store v:p offset + cp regs | 421 | stmia r0, {r4 - r10} @ store cp regs |
432 | ldmia sp!, {r4 - r10, pc} | 422 | ldmia sp!, {r4 - r10, pc} |
433 | ENDPROC(cpu_xsc3_do_suspend) | 423 | ENDPROC(cpu_xsc3_do_suspend) |
434 | 424 | ||
435 | ENTRY(cpu_xsc3_do_resume) | 425 | ENTRY(cpu_xsc3_do_resume) |
436 | ldmia r0, {r1, r4 - r10} @ load v:p offset + cp regs | 426 | ldmia r0, {r4 - r10} @ load cp regs |
437 | mov ip, #0 | 427 | mov ip, #0 |
438 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB | 428 | mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB |
439 | mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer | 429 | mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer |
@@ -454,9 +444,6 @@ ENTRY(cpu_xsc3_do_resume) | |||
454 | ldr r3, =0x542e @ section flags | 444 | ldr r3, =0x542e @ section flags |
455 | b cpu_resume_mmu | 445 | b cpu_resume_mmu |
456 | ENDPROC(cpu_xsc3_do_resume) | 446 | ENDPROC(cpu_xsc3_do_resume) |
457 | #else | ||
458 | #define cpu_xsc3_do_suspend 0 | ||
459 | #define cpu_xsc3_do_resume 0 | ||
460 | #endif | 447 | #endif |
461 | 448 | ||
462 | __CPUINIT | 449 | __CPUINIT |
@@ -503,52 +490,24 @@ xsc3_crval: | |||
503 | 490 | ||
504 | __INITDATA | 491 | __INITDATA |
505 | 492 | ||
506 | /* | 493 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
507 | * Purpose : Function pointers used to access above functions - all calls | 494 | define_processor_functions xsc3, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1 |
508 | * come through these | ||
509 | */ | ||
510 | |||
511 | .type xsc3_processor_functions, #object | ||
512 | ENTRY(xsc3_processor_functions) | ||
513 | .word v5t_early_abort | ||
514 | .word legacy_pabort | ||
515 | .word cpu_xsc3_proc_init | ||
516 | .word cpu_xsc3_proc_fin | ||
517 | .word cpu_xsc3_reset | ||
518 | .word cpu_xsc3_do_idle | ||
519 | .word cpu_xsc3_dcache_clean_area | ||
520 | .word cpu_xsc3_switch_mm | ||
521 | .word cpu_xsc3_set_pte_ext | ||
522 | .word cpu_xsc3_suspend_size | ||
523 | .word cpu_xsc3_do_suspend | ||
524 | .word cpu_xsc3_do_resume | ||
525 | .size xsc3_processor_functions, . - xsc3_processor_functions | ||
526 | 495 | ||
527 | .section ".rodata" | 496 | .section ".rodata" |
528 | 497 | ||
529 | .type cpu_arch_name, #object | 498 | string cpu_arch_name, "armv5te" |
530 | cpu_arch_name: | 499 | string cpu_elf_name, "v5" |
531 | .asciz "armv5te" | 500 | string cpu_xsc3_name, "XScale-V3 based processor" |
532 | .size cpu_arch_name, . - cpu_arch_name | ||
533 | |||
534 | .type cpu_elf_name, #object | ||
535 | cpu_elf_name: | ||
536 | .asciz "v5" | ||
537 | .size cpu_elf_name, . - cpu_elf_name | ||
538 | |||
539 | .type cpu_xsc3_name, #object | ||
540 | cpu_xsc3_name: | ||
541 | .asciz "XScale-V3 based processor" | ||
542 | .size cpu_xsc3_name, . - cpu_xsc3_name | ||
543 | 501 | ||
544 | .align | 502 | .align |
545 | 503 | ||
546 | .section ".proc.info.init", #alloc, #execinstr | 504 | .section ".proc.info.init", #alloc, #execinstr |
547 | 505 | ||
548 | .type __xsc3_proc_info,#object | 506 | .macro xsc3_proc_info name:req, cpu_val:req, cpu_mask:req |
549 | __xsc3_proc_info: | 507 | .type __\name\()_proc_info,#object |
550 | .long 0x69056000 | 508 | __\name\()_proc_info: |
551 | .long 0xffffe000 | 509 | .long \cpu_val |
510 | .long \cpu_mask | ||
552 | .long PMD_TYPE_SECT | \ | 511 | .long PMD_TYPE_SECT | \ |
553 | PMD_SECT_BUFFERABLE | \ | 512 | PMD_SECT_BUFFERABLE | \ |
554 | PMD_SECT_CACHEABLE | \ | 513 | PMD_SECT_CACHEABLE | \ |
@@ -566,29 +525,10 @@ __xsc3_proc_info: | |||
566 | .long v4wbi_tlb_fns | 525 | .long v4wbi_tlb_fns |
567 | .long xsc3_mc_user_fns | 526 | .long xsc3_mc_user_fns |
568 | .long xsc3_cache_fns | 527 | .long xsc3_cache_fns |
569 | .size __xsc3_proc_info, . - __xsc3_proc_info | 528 | .size __\name\()_proc_info, . - __\name\()_proc_info |
529 | .endm | ||
570 | 530 | ||
571 | /* Note: PXA935 changed its implementor ID from Intel to Marvell */ | 531 | xsc3_proc_info xsc3, 0x69056000, 0xffffe000 |
572 | 532 | ||
573 | .type __xsc3_pxa935_proc_info,#object | 533 | /* Note: PXA935 changed its implementor ID from Intel to Marvell */ |
574 | __xsc3_pxa935_proc_info: | 534 | xsc3_proc_info xsc3_pxa935, 0x56056000, 0xffffe000 |
575 | .long 0x56056000 | ||
576 | .long 0xffffe000 | ||
577 | .long PMD_TYPE_SECT | \ | ||
578 | PMD_SECT_BUFFERABLE | \ | ||
579 | PMD_SECT_CACHEABLE | \ | ||
580 | PMD_SECT_AP_WRITE | \ | ||
581 | PMD_SECT_AP_READ | ||
582 | .long PMD_TYPE_SECT | \ | ||
583 | PMD_SECT_AP_WRITE | \ | ||
584 | PMD_SECT_AP_READ | ||
585 | b __xsc3_setup | ||
586 | .long cpu_arch_name | ||
587 | .long cpu_elf_name | ||
588 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
589 | .long cpu_xsc3_name | ||
590 | .long xsc3_processor_functions | ||
591 | .long v4wbi_tlb_fns | ||
592 | .long xsc3_mc_user_fns | ||
593 | .long xsc3_cache_fns | ||
594 | .size __xsc3_pxa935_proc_info, . - __xsc3_pxa935_proc_info | ||
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 42af97664c9..b0fe4b1e233 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
@@ -390,12 +390,12 @@ ENDPROC(xscale_dma_map_area) | |||
390 | * - size - size of region | 390 | * - size - size of region |
391 | * - dir - DMA direction | 391 | * - dir - DMA direction |
392 | */ | 392 | */ |
393 | ENTRY(xscale_dma_a0_map_area) | 393 | ENTRY(xscale_80200_A0_A1_dma_map_area) |
394 | add r1, r1, r0 | 394 | add r1, r1, r0 |
395 | teq r2, #DMA_TO_DEVICE | 395 | teq r2, #DMA_TO_DEVICE |
396 | beq xscale_dma_clean_range | 396 | beq xscale_dma_clean_range |
397 | b xscale_dma_flush_range | 397 | b xscale_dma_flush_range |
398 | ENDPROC(xscale_dma_a0_map_area) | 398 | ENDPROC(xscale_80200_A0_A1_dma_map_area) |
399 | 399 | ||
400 | /* | 400 | /* |
401 | * dma_unmap_area(start, size, dir) | 401 | * dma_unmap_area(start, size, dir) |
@@ -407,17 +407,8 @@ ENTRY(xscale_dma_unmap_area) | |||
407 | mov pc, lr | 407 | mov pc, lr |
408 | ENDPROC(xscale_dma_unmap_area) | 408 | ENDPROC(xscale_dma_unmap_area) |
409 | 409 | ||
410 | ENTRY(xscale_cache_fns) | 410 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) |
411 | .long xscale_flush_icache_all | 411 | define_cache_functions xscale |
412 | .long xscale_flush_kern_cache_all | ||
413 | .long xscale_flush_user_cache_all | ||
414 | .long xscale_flush_user_cache_range | ||
415 | .long xscale_coherent_kern_range | ||
416 | .long xscale_coherent_user_range | ||
417 | .long xscale_flush_kern_dcache_area | ||
418 | .long xscale_dma_map_area | ||
419 | .long xscale_dma_unmap_area | ||
420 | .long xscale_dma_flush_range | ||
421 | 412 | ||
422 | /* | 413 | /* |
423 | * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't | 414 | * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't |
@@ -432,16 +423,28 @@ ENTRY(xscale_cache_fns) | |||
432 | * revision January 22, 2003, available at: | 423 | * revision January 22, 2003, available at: |
433 | * http://www.intel.com/design/iio/specupdt/273415.htm | 424 | * http://www.intel.com/design/iio/specupdt/273415.htm |
434 | */ | 425 | */ |
435 | ENTRY(xscale_80200_A0_A1_cache_fns) | 426 | .macro a0_alias basename |
436 | .long xscale_flush_kern_cache_all | 427 | .globl xscale_80200_A0_A1_\basename |
437 | .long xscale_flush_user_cache_all | 428 | .type xscale_80200_A0_A1_\basename , %function |
438 | .long xscale_flush_user_cache_range | 429 | .equ xscale_80200_A0_A1_\basename , xscale_\basename |
439 | .long xscale_coherent_kern_range | 430 | .endm |
440 | .long xscale_coherent_user_range | 431 | |
441 | .long xscale_flush_kern_dcache_area | 432 | /* |
442 | .long xscale_dma_a0_map_area | 433 | * Most of the cache functions are unchanged for these processor revisions. |
443 | .long xscale_dma_unmap_area | 434 | * Export suitable alias symbols for the unchanged functions: |
444 | .long xscale_dma_flush_range | 435 | */ |
436 | a0_alias flush_icache_all | ||
437 | a0_alias flush_user_cache_all | ||
438 | a0_alias flush_kern_cache_all | ||
439 | a0_alias flush_user_cache_range | ||
440 | a0_alias coherent_kern_range | ||
441 | a0_alias coherent_user_range | ||
442 | a0_alias flush_kern_dcache_area | ||
443 | a0_alias dma_flush_range | ||
444 | a0_alias dma_unmap_area | ||
445 | |||
446 | @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S) | ||
447 | define_cache_functions xscale_80200_A0_A1 | ||
445 | 448 | ||
446 | ENTRY(cpu_xscale_dcache_clean_area) | 449 | ENTRY(cpu_xscale_dcache_clean_area) |
447 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry | 450 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry |
@@ -481,7 +484,7 @@ cpu_xscale_mt_table: | |||
481 | .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH | 484 | .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH |
482 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK | 485 | .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK |
483 | .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED | 486 | .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED |
484 | .long 0x00 @ unused | 487 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_INNER_WB |
485 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE | 488 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE |
486 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC | 489 | .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC |
487 | .long 0x00 @ unused | 490 | .long 0x00 @ unused |
@@ -551,9 +554,6 @@ ENTRY(cpu_xscale_do_resume) | |||
551 | PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE | 554 | PMD_SECT_CACHEABLE | PMD_SECT_AP_WRITE |
552 | b cpu_resume_mmu | 555 | b cpu_resume_mmu |
553 | ENDPROC(cpu_xscale_do_resume) | 556 | ENDPROC(cpu_xscale_do_resume) |
554 | #else | ||
555 | #define cpu_xscale_do_suspend 0 | ||
556 | #define cpu_xscale_do_resume 0 | ||
557 | #endif | 557 | #endif |
558 | 558 | ||
559 | __CPUINIT | 559 | __CPUINIT |
@@ -587,432 +587,74 @@ xscale_crval: | |||
587 | 587 | ||
588 | __INITDATA | 588 | __INITDATA |
589 | 589 | ||
590 | /* | 590 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
591 | * Purpose : Function pointers used to access above functions - all calls | 591 | define_processor_functions xscale, dabort=v5t_early_abort, pabort=legacy_pabort, suspend=1 |
592 | * come through these | ||
593 | */ | ||
594 | |||
595 | .type xscale_processor_functions, #object | ||
596 | ENTRY(xscale_processor_functions) | ||
597 | .word v5t_early_abort | ||
598 | .word legacy_pabort | ||
599 | .word cpu_xscale_proc_init | ||
600 | .word cpu_xscale_proc_fin | ||
601 | .word cpu_xscale_reset | ||
602 | .word cpu_xscale_do_idle | ||
603 | .word cpu_xscale_dcache_clean_area | ||
604 | .word cpu_xscale_switch_mm | ||
605 | .word cpu_xscale_set_pte_ext | ||
606 | .word cpu_xscale_suspend_size | ||
607 | .word cpu_xscale_do_suspend | ||
608 | .word cpu_xscale_do_resume | ||
609 | .size xscale_processor_functions, . - xscale_processor_functions | ||
610 | 592 | ||
611 | .section ".rodata" | 593 | .section ".rodata" |
612 | 594 | ||
613 | .type cpu_arch_name, #object | 595 | string cpu_arch_name, "armv5te" |
614 | cpu_arch_name: | 596 | string cpu_elf_name, "v5" |
615 | .asciz "armv5te" | 597 | |
616 | .size cpu_arch_name, . - cpu_arch_name | 598 | string cpu_80200_A0_A1_name, "XScale-80200 A0/A1" |
617 | 599 | string cpu_80200_name, "XScale-80200" | |
618 | .type cpu_elf_name, #object | 600 | string cpu_80219_name, "XScale-80219" |
619 | cpu_elf_name: | 601 | string cpu_8032x_name, "XScale-IOP8032x Family" |
620 | .asciz "v5" | 602 | string cpu_8033x_name, "XScale-IOP8033x Family" |
621 | .size cpu_elf_name, . - cpu_elf_name | 603 | string cpu_pxa250_name, "XScale-PXA250" |
622 | 604 | string cpu_pxa210_name, "XScale-PXA210" | |
623 | .type cpu_80200_A0_A1_name, #object | 605 | string cpu_ixp42x_name, "XScale-IXP42x Family" |
624 | cpu_80200_A0_A1_name: | 606 | string cpu_ixp43x_name, "XScale-IXP43x Family" |
625 | .asciz "XScale-80200 A0/A1" | 607 | string cpu_ixp46x_name, "XScale-IXP46x Family" |
626 | .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name | 608 | string cpu_ixp2400_name, "XScale-IXP2400" |
627 | 609 | string cpu_ixp2800_name, "XScale-IXP2800" | |
628 | .type cpu_80200_name, #object | 610 | string cpu_pxa255_name, "XScale-PXA255" |
629 | cpu_80200_name: | 611 | string cpu_pxa270_name, "XScale-PXA270" |
630 | .asciz "XScale-80200" | ||
631 | .size cpu_80200_name, . - cpu_80200_name | ||
632 | |||
633 | .type cpu_80219_name, #object | ||
634 | cpu_80219_name: | ||
635 | .asciz "XScale-80219" | ||
636 | .size cpu_80219_name, . - cpu_80219_name | ||
637 | |||
638 | .type cpu_8032x_name, #object | ||
639 | cpu_8032x_name: | ||
640 | .asciz "XScale-IOP8032x Family" | ||
641 | .size cpu_8032x_name, . - cpu_8032x_name | ||
642 | |||
643 | .type cpu_8033x_name, #object | ||
644 | cpu_8033x_name: | ||
645 | .asciz "XScale-IOP8033x Family" | ||
646 | .size cpu_8033x_name, . - cpu_8033x_name | ||
647 | |||
648 | .type cpu_pxa250_name, #object | ||
649 | cpu_pxa250_name: | ||
650 | .asciz "XScale-PXA250" | ||
651 | .size cpu_pxa250_name, . - cpu_pxa250_name | ||
652 | |||
653 | .type cpu_pxa210_name, #object | ||
654 | cpu_pxa210_name: | ||
655 | .asciz "XScale-PXA210" | ||
656 | .size cpu_pxa210_name, . - cpu_pxa210_name | ||
657 | |||
658 | .type cpu_ixp42x_name, #object | ||
659 | cpu_ixp42x_name: | ||
660 | .asciz "XScale-IXP42x Family" | ||
661 | .size cpu_ixp42x_name, . - cpu_ixp42x_name | ||
662 | |||
663 | .type cpu_ixp43x_name, #object | ||
664 | cpu_ixp43x_name: | ||
665 | .asciz "XScale-IXP43x Family" | ||
666 | .size cpu_ixp43x_name, . - cpu_ixp43x_name | ||
667 | |||
668 | .type cpu_ixp46x_name, #object | ||
669 | cpu_ixp46x_name: | ||
670 | .asciz "XScale-IXP46x Family" | ||
671 | .size cpu_ixp46x_name, . - cpu_ixp46x_name | ||
672 | |||
673 | .type cpu_ixp2400_name, #object | ||
674 | cpu_ixp2400_name: | ||
675 | .asciz "XScale-IXP2400" | ||
676 | .size cpu_ixp2400_name, . - cpu_ixp2400_name | ||
677 | |||
678 | .type cpu_ixp2800_name, #object | ||
679 | cpu_ixp2800_name: | ||
680 | .asciz "XScale-IXP2800" | ||
681 | .size cpu_ixp2800_name, . - cpu_ixp2800_name | ||
682 | |||
683 | .type cpu_pxa255_name, #object | ||
684 | cpu_pxa255_name: | ||
685 | .asciz "XScale-PXA255" | ||
686 | .size cpu_pxa255_name, . - cpu_pxa255_name | ||
687 | |||
688 | .type cpu_pxa270_name, #object | ||
689 | cpu_pxa270_name: | ||
690 | .asciz "XScale-PXA270" | ||
691 | .size cpu_pxa270_name, . - cpu_pxa270_name | ||
692 | 612 | ||
693 | .align | 613 | .align |
694 | 614 | ||
695 | .section ".proc.info.init", #alloc, #execinstr | 615 | .section ".proc.info.init", #alloc, #execinstr |
696 | 616 | ||
697 | .type __80200_A0_A1_proc_info,#object | 617 | .macro xscale_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache |
698 | __80200_A0_A1_proc_info: | 618 | .type __\name\()_proc_info,#object |
699 | .long 0x69052000 | 619 | __\name\()_proc_info: |
700 | .long 0xfffffffe | 620 | .long \cpu_val |
701 | .long PMD_TYPE_SECT | \ | 621 | .long \cpu_mask |
702 | PMD_SECT_BUFFERABLE | \ | 622 | .long PMD_TYPE_SECT | \ |
703 | PMD_SECT_CACHEABLE | \ | ||
704 | PMD_SECT_AP_WRITE | \ | ||
705 | PMD_SECT_AP_READ | ||
706 | .long PMD_TYPE_SECT | \ | ||
707 | PMD_SECT_AP_WRITE | \ | ||
708 | PMD_SECT_AP_READ | ||
709 | b __xscale_setup | ||
710 | .long cpu_arch_name | ||
711 | .long cpu_elf_name | ||
712 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
713 | .long cpu_80200_name | ||
714 | .long xscale_processor_functions | ||
715 | .long v4wbi_tlb_fns | ||
716 | .long xscale_mc_user_fns | ||
717 | .long xscale_80200_A0_A1_cache_fns | ||
718 | .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info | ||
719 | |||
720 | .type __80200_proc_info,#object | ||
721 | __80200_proc_info: | ||
722 | .long 0x69052000 | ||
723 | .long 0xfffffff0 | ||
724 | .long PMD_TYPE_SECT | \ | ||
725 | PMD_SECT_BUFFERABLE | \ | ||
726 | PMD_SECT_CACHEABLE | \ | ||
727 | PMD_SECT_AP_WRITE | \ | ||
728 | PMD_SECT_AP_READ | ||
729 | .long PMD_TYPE_SECT | \ | ||
730 | PMD_SECT_AP_WRITE | \ | ||
731 | PMD_SECT_AP_READ | ||
732 | b __xscale_setup | ||
733 | .long cpu_arch_name | ||
734 | .long cpu_elf_name | ||
735 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
736 | .long cpu_80200_name | ||
737 | .long xscale_processor_functions | ||
738 | .long v4wbi_tlb_fns | ||
739 | .long xscale_mc_user_fns | ||
740 | .long xscale_cache_fns | ||
741 | .size __80200_proc_info, . - __80200_proc_info | ||
742 | |||
743 | .type __80219_proc_info,#object | ||
744 | __80219_proc_info: | ||
745 | .long 0x69052e20 | ||
746 | .long 0xffffffe0 | ||
747 | .long PMD_TYPE_SECT | \ | ||
748 | PMD_SECT_BUFFERABLE | \ | ||
749 | PMD_SECT_CACHEABLE | \ | ||
750 | PMD_SECT_AP_WRITE | \ | ||
751 | PMD_SECT_AP_READ | ||
752 | .long PMD_TYPE_SECT | \ | ||
753 | PMD_SECT_AP_WRITE | \ | ||
754 | PMD_SECT_AP_READ | ||
755 | b __xscale_setup | ||
756 | .long cpu_arch_name | ||
757 | .long cpu_elf_name | ||
758 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
759 | .long cpu_80219_name | ||
760 | .long xscale_processor_functions | ||
761 | .long v4wbi_tlb_fns | ||
762 | .long xscale_mc_user_fns | ||
763 | .long xscale_cache_fns | ||
764 | .size __80219_proc_info, . - __80219_proc_info | ||
765 | |||
766 | .type __8032x_proc_info,#object | ||
767 | __8032x_proc_info: | ||
768 | .long 0x69052420 | ||
769 | .long 0xfffff7e0 | ||
770 | .long PMD_TYPE_SECT | \ | ||
771 | PMD_SECT_BUFFERABLE | \ | ||
772 | PMD_SECT_CACHEABLE | \ | ||
773 | PMD_SECT_AP_WRITE | \ | ||
774 | PMD_SECT_AP_READ | ||
775 | .long PMD_TYPE_SECT | \ | ||
776 | PMD_SECT_AP_WRITE | \ | ||
777 | PMD_SECT_AP_READ | ||
778 | b __xscale_setup | ||
779 | .long cpu_arch_name | ||
780 | .long cpu_elf_name | ||
781 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
782 | .long cpu_8032x_name | ||
783 | .long xscale_processor_functions | ||
784 | .long v4wbi_tlb_fns | ||
785 | .long xscale_mc_user_fns | ||
786 | .long xscale_cache_fns | ||
787 | .size __8032x_proc_info, . - __8032x_proc_info | ||
788 | |||
789 | .type __8033x_proc_info,#object | ||
790 | __8033x_proc_info: | ||
791 | .long 0x69054010 | ||
792 | .long 0xfffffd30 | ||
793 | .long PMD_TYPE_SECT | \ | ||
794 | PMD_SECT_BUFFERABLE | \ | ||
795 | PMD_SECT_CACHEABLE | \ | ||
796 | PMD_SECT_AP_WRITE | \ | ||
797 | PMD_SECT_AP_READ | ||
798 | .long PMD_TYPE_SECT | \ | ||
799 | PMD_SECT_AP_WRITE | \ | ||
800 | PMD_SECT_AP_READ | ||
801 | b __xscale_setup | ||
802 | .long cpu_arch_name | ||
803 | .long cpu_elf_name | ||
804 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
805 | .long cpu_8033x_name | ||
806 | .long xscale_processor_functions | ||
807 | .long v4wbi_tlb_fns | ||
808 | .long xscale_mc_user_fns | ||
809 | .long xscale_cache_fns | ||
810 | .size __8033x_proc_info, . - __8033x_proc_info | ||
811 | |||
812 | .type __pxa250_proc_info,#object | ||
813 | __pxa250_proc_info: | ||
814 | .long 0x69052100 | ||
815 | .long 0xfffff7f0 | ||
816 | .long PMD_TYPE_SECT | \ | ||
817 | PMD_SECT_BUFFERABLE | \ | ||
818 | PMD_SECT_CACHEABLE | \ | ||
819 | PMD_SECT_AP_WRITE | \ | ||
820 | PMD_SECT_AP_READ | ||
821 | .long PMD_TYPE_SECT | \ | ||
822 | PMD_SECT_AP_WRITE | \ | ||
823 | PMD_SECT_AP_READ | ||
824 | b __xscale_setup | ||
825 | .long cpu_arch_name | ||
826 | .long cpu_elf_name | ||
827 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
828 | .long cpu_pxa250_name | ||
829 | .long xscale_processor_functions | ||
830 | .long v4wbi_tlb_fns | ||
831 | .long xscale_mc_user_fns | ||
832 | .long xscale_cache_fns | ||
833 | .size __pxa250_proc_info, . - __pxa250_proc_info | ||
834 | |||
835 | .type __pxa210_proc_info,#object | ||
836 | __pxa210_proc_info: | ||
837 | .long 0x69052120 | ||
838 | .long 0xfffff3f0 | ||
839 | .long PMD_TYPE_SECT | \ | ||
840 | PMD_SECT_BUFFERABLE | \ | 623 | PMD_SECT_BUFFERABLE | \ |
841 | PMD_SECT_CACHEABLE | \ | 624 | PMD_SECT_CACHEABLE | \ |
842 | PMD_SECT_AP_WRITE | \ | 625 | PMD_SECT_AP_WRITE | \ |
843 | PMD_SECT_AP_READ | 626 | PMD_SECT_AP_READ |
844 | .long PMD_TYPE_SECT | \ | 627 | .long PMD_TYPE_SECT | \ |
845 | PMD_SECT_AP_WRITE | \ | 628 | PMD_SECT_AP_WRITE | \ |
846 | PMD_SECT_AP_READ | 629 | PMD_SECT_AP_READ |
847 | b __xscale_setup | 630 | b __xscale_setup |
848 | .long cpu_arch_name | 631 | .long cpu_arch_name |
849 | .long cpu_elf_name | 632 | .long cpu_elf_name |
850 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | 633 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP |
851 | .long cpu_pxa210_name | 634 | .long \cpu_name |
852 | .long xscale_processor_functions | 635 | .long xscale_processor_functions |
853 | .long v4wbi_tlb_fns | 636 | .long v4wbi_tlb_fns |
854 | .long xscale_mc_user_fns | 637 | .long xscale_mc_user_fns |
855 | .long xscale_cache_fns | 638 | .ifb \cache |
856 | .size __pxa210_proc_info, . - __pxa210_proc_info | 639 | .long xscale_cache_fns |
857 | 640 | .else | |
858 | .type __ixp2400_proc_info, #object | 641 | .long \cache |
859 | __ixp2400_proc_info: | 642 | .endif |
860 | .long 0x69054190 | 643 | .size __\name\()_proc_info, . - __\name\()_proc_info |
861 | .long 0xfffffff0 | 644 | .endm |
862 | .long PMD_TYPE_SECT | \ | 645 | |
863 | PMD_SECT_BUFFERABLE | \ | 646 | xscale_proc_info 80200_A0_A1, 0x69052000, 0xfffffffe, cpu_80200_name, \ |
864 | PMD_SECT_CACHEABLE | \ | 647 | cache=xscale_80200_A0_A1_cache_fns |
865 | PMD_SECT_AP_WRITE | \ | 648 | xscale_proc_info 80200, 0x69052000, 0xfffffff0, cpu_80200_name |
866 | PMD_SECT_AP_READ | 649 | xscale_proc_info 80219, 0x69052e20, 0xffffffe0, cpu_80219_name |
867 | .long PMD_TYPE_SECT | \ | 650 | xscale_proc_info 8032x, 0x69052420, 0xfffff7e0, cpu_8032x_name |
868 | PMD_SECT_AP_WRITE | \ | 651 | xscale_proc_info 8033x, 0x69054010, 0xfffffd30, cpu_8033x_name |
869 | PMD_SECT_AP_READ | 652 | xscale_proc_info pxa250, 0x69052100, 0xfffff7f0, cpu_pxa250_name |
870 | b __xscale_setup | 653 | xscale_proc_info pxa210, 0x69052120, 0xfffff3f0, cpu_pxa210_name |
871 | .long cpu_arch_name | 654 | xscale_proc_info ixp2400, 0x69054190, 0xfffffff0, cpu_ixp2400_name |
872 | .long cpu_elf_name | 655 | xscale_proc_info ixp2800, 0x690541a0, 0xfffffff0, cpu_ixp2800_name |
873 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | 656 | xscale_proc_info ixp42x, 0x690541c0, 0xffffffc0, cpu_ixp42x_name |
874 | .long cpu_ixp2400_name | 657 | xscale_proc_info ixp43x, 0x69054040, 0xfffffff0, cpu_ixp43x_name |
875 | .long xscale_processor_functions | 658 | xscale_proc_info ixp46x, 0x69054200, 0xffffff00, cpu_ixp46x_name |
876 | .long v4wbi_tlb_fns | 659 | xscale_proc_info pxa255, 0x69052d00, 0xfffffff0, cpu_pxa255_name |
877 | .long xscale_mc_user_fns | 660 | xscale_proc_info pxa270, 0x69054110, 0xfffffff0, cpu_pxa270_name |
878 | .long xscale_cache_fns | ||
879 | .size __ixp2400_proc_info, . - __ixp2400_proc_info | ||
880 | |||
881 | .type __ixp2800_proc_info, #object | ||
882 | __ixp2800_proc_info: | ||
883 | .long 0x690541a0 | ||
884 | .long 0xfffffff0 | ||
885 | .long PMD_TYPE_SECT | \ | ||
886 | PMD_SECT_BUFFERABLE | \ | ||
887 | PMD_SECT_CACHEABLE | \ | ||
888 | PMD_SECT_AP_WRITE | \ | ||
889 | PMD_SECT_AP_READ | ||
890 | .long PMD_TYPE_SECT | \ | ||
891 | PMD_SECT_AP_WRITE | \ | ||
892 | PMD_SECT_AP_READ | ||
893 | b __xscale_setup | ||
894 | .long cpu_arch_name | ||
895 | .long cpu_elf_name | ||
896 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
897 | .long cpu_ixp2800_name | ||
898 | .long xscale_processor_functions | ||
899 | .long v4wbi_tlb_fns | ||
900 | .long xscale_mc_user_fns | ||
901 | .long xscale_cache_fns | ||
902 | .size __ixp2800_proc_info, . - __ixp2800_proc_info | ||
903 | |||
904 | .type __ixp42x_proc_info, #object | ||
905 | __ixp42x_proc_info: | ||
906 | .long 0x690541c0 | ||
907 | .long 0xffffffc0 | ||
908 | .long PMD_TYPE_SECT | \ | ||
909 | PMD_SECT_BUFFERABLE | \ | ||
910 | PMD_SECT_CACHEABLE | \ | ||
911 | PMD_SECT_AP_WRITE | \ | ||
912 | PMD_SECT_AP_READ | ||
913 | .long PMD_TYPE_SECT | \ | ||
914 | PMD_SECT_AP_WRITE | \ | ||
915 | PMD_SECT_AP_READ | ||
916 | b __xscale_setup | ||
917 | .long cpu_arch_name | ||
918 | .long cpu_elf_name | ||
919 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
920 | .long cpu_ixp42x_name | ||
921 | .long xscale_processor_functions | ||
922 | .long v4wbi_tlb_fns | ||
923 | .long xscale_mc_user_fns | ||
924 | .long xscale_cache_fns | ||
925 | .size __ixp42x_proc_info, . - __ixp42x_proc_info | ||
926 | |||
927 | .type __ixp43x_proc_info, #object | ||
928 | __ixp43x_proc_info: | ||
929 | .long 0x69054040 | ||
930 | .long 0xfffffff0 | ||
931 | .long PMD_TYPE_SECT | \ | ||
932 | PMD_SECT_BUFFERABLE | \ | ||
933 | PMD_SECT_CACHEABLE | \ | ||
934 | PMD_SECT_AP_WRITE | \ | ||
935 | PMD_SECT_AP_READ | ||
936 | .long PMD_TYPE_SECT | \ | ||
937 | PMD_SECT_AP_WRITE | \ | ||
938 | PMD_SECT_AP_READ | ||
939 | b __xscale_setup | ||
940 | .long cpu_arch_name | ||
941 | .long cpu_elf_name | ||
942 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
943 | .long cpu_ixp43x_name | ||
944 | .long xscale_processor_functions | ||
945 | .long v4wbi_tlb_fns | ||
946 | .long xscale_mc_user_fns | ||
947 | .long xscale_cache_fns | ||
948 | .size __ixp43x_proc_info, . - __ixp43x_proc_info | ||
949 | |||
950 | .type __ixp46x_proc_info, #object | ||
951 | __ixp46x_proc_info: | ||
952 | .long 0x69054200 | ||
953 | .long 0xffffff00 | ||
954 | .long PMD_TYPE_SECT | \ | ||
955 | PMD_SECT_BUFFERABLE | \ | ||
956 | PMD_SECT_CACHEABLE | \ | ||
957 | PMD_SECT_AP_WRITE | \ | ||
958 | PMD_SECT_AP_READ | ||
959 | .long PMD_TYPE_SECT | \ | ||
960 | PMD_SECT_AP_WRITE | \ | ||
961 | PMD_SECT_AP_READ | ||
962 | b __xscale_setup | ||
963 | .long cpu_arch_name | ||
964 | .long cpu_elf_name | ||
965 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
966 | .long cpu_ixp46x_name | ||
967 | .long xscale_processor_functions | ||
968 | .long v4wbi_tlb_fns | ||
969 | .long xscale_mc_user_fns | ||
970 | .long xscale_cache_fns | ||
971 | .size __ixp46x_proc_info, . - __ixp46x_proc_info | ||
972 | |||
973 | .type __pxa255_proc_info,#object | ||
974 | __pxa255_proc_info: | ||
975 | .long 0x69052d00 | ||
976 | .long 0xfffffff0 | ||
977 | .long PMD_TYPE_SECT | \ | ||
978 | PMD_SECT_BUFFERABLE | \ | ||
979 | PMD_SECT_CACHEABLE | \ | ||
980 | PMD_SECT_AP_WRITE | \ | ||
981 | PMD_SECT_AP_READ | ||
982 | .long PMD_TYPE_SECT | \ | ||
983 | PMD_SECT_AP_WRITE | \ | ||
984 | PMD_SECT_AP_READ | ||
985 | b __xscale_setup | ||
986 | .long cpu_arch_name | ||
987 | .long cpu_elf_name | ||
988 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
989 | .long cpu_pxa255_name | ||
990 | .long xscale_processor_functions | ||
991 | .long v4wbi_tlb_fns | ||
992 | .long xscale_mc_user_fns | ||
993 | .long xscale_cache_fns | ||
994 | .size __pxa255_proc_info, . - __pxa255_proc_info | ||
995 | |||
996 | .type __pxa270_proc_info,#object | ||
997 | __pxa270_proc_info: | ||
998 | .long 0x69054110 | ||
999 | .long 0xfffffff0 | ||
1000 | .long PMD_TYPE_SECT | \ | ||
1001 | PMD_SECT_BUFFERABLE | \ | ||
1002 | PMD_SECT_CACHEABLE | \ | ||
1003 | PMD_SECT_AP_WRITE | \ | ||
1004 | PMD_SECT_AP_READ | ||
1005 | .long PMD_TYPE_SECT | \ | ||
1006 | PMD_SECT_AP_WRITE | \ | ||
1007 | PMD_SECT_AP_READ | ||
1008 | b __xscale_setup | ||
1009 | .long cpu_arch_name | ||
1010 | .long cpu_elf_name | ||
1011 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
1012 | .long cpu_pxa270_name | ||
1013 | .long xscale_processor_functions | ||
1014 | .long v4wbi_tlb_fns | ||
1015 | .long xscale_mc_user_fns | ||
1016 | .long xscale_cache_fns | ||
1017 | .size __pxa270_proc_info, . - __pxa270_proc_info | ||
1018 | |||
diff --git a/arch/arm/mm/tlb-fa.S b/arch/arm/mm/tlb-fa.S index 9694f1f6f48..d3ddcf9a76c 100644 --- a/arch/arm/mm/tlb-fa.S +++ b/arch/arm/mm/tlb-fa.S | |||
@@ -46,7 +46,6 @@ ENTRY(fa_flush_user_tlb_range) | |||
46 | add r0, r0, #PAGE_SZ | 46 | add r0, r0, #PAGE_SZ |
47 | cmp r0, r1 | 47 | cmp r0, r1 |
48 | blo 1b | 48 | blo 1b |
49 | mcr p15, 0, r3, c7, c5, 6 @ invalidate BTB | ||
50 | mcr p15, 0, r3, c7, c10, 4 @ data write barrier | 49 | mcr p15, 0, r3, c7, c10, 4 @ data write barrier |
51 | mov pc, lr | 50 | mov pc, lr |
52 | 51 | ||
@@ -60,16 +59,11 @@ ENTRY(fa_flush_kern_tlb_range) | |||
60 | add r0, r0, #PAGE_SZ | 59 | add r0, r0, #PAGE_SZ |
61 | cmp r0, r1 | 60 | cmp r0, r1 |
62 | blo 1b | 61 | blo 1b |
63 | mcr p15, 0, r3, c7, c5, 6 @ invalidate BTB | ||
64 | mcr p15, 0, r3, c7, c10, 4 @ data write barrier | 62 | mcr p15, 0, r3, c7, c10, 4 @ data write barrier |
65 | mcr p15, 0, r3, c7, c5, 4 @ prefetch flush | 63 | mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb) |
66 | mov pc, lr | 64 | mov pc, lr |
67 | 65 | ||
68 | __INITDATA | 66 | __INITDATA |
69 | 67 | ||
70 | .type fa_tlb_fns, #object | 68 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
71 | ENTRY(fa_tlb_fns) | 69 | define_tlb_functions fa, fa_tlb_flags |
72 | .long fa_flush_user_tlb_range | ||
73 | .long fa_flush_kern_tlb_range | ||
74 | .long fa_tlb_flags | ||
75 | .size fa_tlb_fns, . - fa_tlb_fns | ||
diff --git a/arch/arm/mm/tlb-v3.S b/arch/arm/mm/tlb-v3.S index c10786ec8e0..d253995ec4c 100644 --- a/arch/arm/mm/tlb-v3.S +++ b/arch/arm/mm/tlb-v3.S | |||
@@ -44,9 +44,5 @@ ENTRY(v3_flush_kern_tlb_range) | |||
44 | 44 | ||
45 | __INITDATA | 45 | __INITDATA |
46 | 46 | ||
47 | .type v3_tlb_fns, #object | 47 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
48 | ENTRY(v3_tlb_fns) | 48 | define_tlb_functions v3, v3_tlb_flags |
49 | .long v3_flush_user_tlb_range | ||
50 | .long v3_flush_kern_tlb_range | ||
51 | .long v3_tlb_flags | ||
52 | .size v3_tlb_fns, . - v3_tlb_fns | ||
diff --git a/arch/arm/mm/tlb-v4.S b/arch/arm/mm/tlb-v4.S index d6c94457c2b..17a025ade57 100644 --- a/arch/arm/mm/tlb-v4.S +++ b/arch/arm/mm/tlb-v4.S | |||
@@ -57,9 +57,5 @@ ENTRY(v4_flush_user_tlb_range) | |||
57 | 57 | ||
58 | __INITDATA | 58 | __INITDATA |
59 | 59 | ||
60 | .type v4_tlb_fns, #object | 60 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
61 | ENTRY(v4_tlb_fns) | 61 | define_tlb_functions v4, v4_tlb_flags |
62 | .long v4_flush_user_tlb_range | ||
63 | .long v4_flush_kern_tlb_range | ||
64 | .long v4_tlb_flags | ||
65 | .size v4_tlb_fns, . - v4_tlb_fns | ||
diff --git a/arch/arm/mm/tlb-v4wb.S b/arch/arm/mm/tlb-v4wb.S index cb829ca7845..c04598fa4d4 100644 --- a/arch/arm/mm/tlb-v4wb.S +++ b/arch/arm/mm/tlb-v4wb.S | |||
@@ -69,9 +69,5 @@ ENTRY(v4wb_flush_kern_tlb_range) | |||
69 | 69 | ||
70 | __INITDATA | 70 | __INITDATA |
71 | 71 | ||
72 | .type v4wb_tlb_fns, #object | 72 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
73 | ENTRY(v4wb_tlb_fns) | 73 | define_tlb_functions v4wb, v4wb_tlb_flags |
74 | .long v4wb_flush_user_tlb_range | ||
75 | .long v4wb_flush_kern_tlb_range | ||
76 | .long v4wb_tlb_flags | ||
77 | .size v4wb_tlb_fns, . - v4wb_tlb_fns | ||
diff --git a/arch/arm/mm/tlb-v4wbi.S b/arch/arm/mm/tlb-v4wbi.S index 60cfc4a25dd..1f6062b6c1c 100644 --- a/arch/arm/mm/tlb-v4wbi.S +++ b/arch/arm/mm/tlb-v4wbi.S | |||
@@ -60,9 +60,5 @@ ENTRY(v4wbi_flush_kern_tlb_range) | |||
60 | 60 | ||
61 | __INITDATA | 61 | __INITDATA |
62 | 62 | ||
63 | .type v4wbi_tlb_fns, #object | 63 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
64 | ENTRY(v4wbi_tlb_fns) | 64 | define_tlb_functions v4wbi, v4wbi_tlb_flags |
65 | .long v4wbi_flush_user_tlb_range | ||
66 | .long v4wbi_flush_kern_tlb_range | ||
67 | .long v4wbi_tlb_flags | ||
68 | .size v4wbi_tlb_fns, . - v4wbi_tlb_fns | ||
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S index 73d7d89b04c..eca07f550a0 100644 --- a/arch/arm/mm/tlb-v6.S +++ b/arch/arm/mm/tlb-v6.S | |||
@@ -54,7 +54,6 @@ ENTRY(v6wbi_flush_user_tlb_range) | |||
54 | add r0, r0, #PAGE_SZ | 54 | add r0, r0, #PAGE_SZ |
55 | cmp r0, r1 | 55 | cmp r0, r1 |
56 | blo 1b | 56 | blo 1b |
57 | mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB | ||
58 | mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier | 57 | mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier |
59 | mov pc, lr | 58 | mov pc, lr |
60 | 59 | ||
@@ -83,16 +82,11 @@ ENTRY(v6wbi_flush_kern_tlb_range) | |||
83 | add r0, r0, #PAGE_SZ | 82 | add r0, r0, #PAGE_SZ |
84 | cmp r0, r1 | 83 | cmp r0, r1 |
85 | blo 1b | 84 | blo 1b |
86 | mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB | ||
87 | mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier | 85 | mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier |
88 | mcr p15, 0, r2, c7, c5, 4 @ prefetch flush | 86 | mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb) |
89 | mov pc, lr | 87 | mov pc, lr |
90 | 88 | ||
91 | __INIT | 89 | __INIT |
92 | 90 | ||
93 | .type v6wbi_tlb_fns, #object | 91 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
94 | ENTRY(v6wbi_tlb_fns) | 92 | define_tlb_functions v6wbi, v6wbi_tlb_flags |
95 | .long v6wbi_flush_user_tlb_range | ||
96 | .long v6wbi_flush_kern_tlb_range | ||
97 | .long v6wbi_tlb_flags | ||
98 | .size v6wbi_tlb_fns, . - v6wbi_tlb_fns | ||
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index 53cd5b45467..845f461f8ec 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S | |||
@@ -48,9 +48,6 @@ ENTRY(v7wbi_flush_user_tlb_range) | |||
48 | add r0, r0, #PAGE_SZ | 48 | add r0, r0, #PAGE_SZ |
49 | cmp r0, r1 | 49 | cmp r0, r1 |
50 | blo 1b | 50 | blo 1b |
51 | mov ip, #0 | ||
52 | ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable | ||
53 | ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB | ||
54 | dsb | 51 | dsb |
55 | mov pc, lr | 52 | mov pc, lr |
56 | ENDPROC(v7wbi_flush_user_tlb_range) | 53 | ENDPROC(v7wbi_flush_user_tlb_range) |
@@ -75,9 +72,6 @@ ENTRY(v7wbi_flush_kern_tlb_range) | |||
75 | add r0, r0, #PAGE_SZ | 72 | add r0, r0, #PAGE_SZ |
76 | cmp r0, r1 | 73 | cmp r0, r1 |
77 | blo 1b | 74 | blo 1b |
78 | mov r2, #0 | ||
79 | ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable | ||
80 | ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB | ||
81 | dsb | 75 | dsb |
82 | isb | 76 | isb |
83 | mov pc, lr | 77 | mov pc, lr |
@@ -85,10 +79,5 @@ ENDPROC(v7wbi_flush_kern_tlb_range) | |||
85 | 79 | ||
86 | __INIT | 80 | __INIT |
87 | 81 | ||
88 | .type v7wbi_tlb_fns, #object | 82 | /* define struct cpu_tlb_fns (see <asm/tlbflush.h> and proc-macros.S) */ |
89 | ENTRY(v7wbi_tlb_fns) | 83 | define_tlb_functions v7wbi, v7wbi_tlb_flags_up, flags_smp=v7wbi_tlb_flags_smp |
90 | .long v7wbi_flush_user_tlb_range | ||
91 | .long v7wbi_flush_kern_tlb_range | ||
92 | ALT_SMP(.long v7wbi_tlb_flags_smp) | ||
93 | ALT_UP(.long v7wbi_tlb_flags_up) | ||
94 | .size v7wbi_tlb_fns, . - v7wbi_tlb_fns | ||