diff options
Diffstat (limited to 'arch/arm')
79 files changed, 2151 insertions, 472 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 24626b0419ee..be8f634f001e 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -9,6 +9,7 @@ config ARM | |||
9 | select SYS_SUPPORTS_APM_EMULATION | 9 | select SYS_SUPPORTS_APM_EMULATION |
10 | select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) | 10 | select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI) |
11 | select HAVE_OPROFILE if (HAVE_PERF_EVENTS) | 11 | select HAVE_OPROFILE if (HAVE_PERF_EVENTS) |
12 | select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL | ||
12 | select HAVE_ARCH_KGDB | 13 | select HAVE_ARCH_KGDB |
13 | select HAVE_KPROBES if !XIP_KERNEL | 14 | select HAVE_KPROBES if !XIP_KERNEL |
14 | select HAVE_KRETPROBES if (HAVE_KPROBES) | 15 | select HAVE_KRETPROBES if (HAVE_KPROBES) |
@@ -21,6 +22,7 @@ config ARM | |||
21 | select HAVE_KERNEL_GZIP | 22 | select HAVE_KERNEL_GZIP |
22 | select HAVE_KERNEL_LZO | 23 | select HAVE_KERNEL_LZO |
23 | select HAVE_KERNEL_LZMA | 24 | select HAVE_KERNEL_LZMA |
25 | select HAVE_KERNEL_XZ | ||
24 | select HAVE_IRQ_WORK | 26 | select HAVE_IRQ_WORK |
25 | select HAVE_PERF_EVENTS | 27 | select HAVE_PERF_EVENTS |
26 | select PERF_USE_VMALLOC | 28 | select PERF_USE_VMALLOC |
@@ -32,6 +34,7 @@ config ARM | |||
32 | select GENERIC_IRQ_SHOW | 34 | select GENERIC_IRQ_SHOW |
33 | select CPU_PM if (SUSPEND || CPU_IDLE) | 35 | select CPU_PM if (SUSPEND || CPU_IDLE) |
34 | select GENERIC_PCI_IOMAP | 36 | select GENERIC_PCI_IOMAP |
37 | select HAVE_BPF_JIT if NET | ||
35 | help | 38 | help |
36 | The ARM series is a line of low-power-consumption RISC chip designs | 39 | The ARM series is a line of low-power-consumption RISC chip designs |
37 | licensed by ARM Ltd and targeted at embedded applications and | 40 | licensed by ARM Ltd and targeted at embedded applications and |
@@ -266,6 +269,7 @@ config ARCH_INTEGRATOR | |||
266 | select PLAT_VERSATILE | 269 | select PLAT_VERSATILE |
267 | select PLAT_VERSATILE_FPGA_IRQ | 270 | select PLAT_VERSATILE_FPGA_IRQ |
268 | select NEED_MACH_MEMORY_H | 271 | select NEED_MACH_MEMORY_H |
272 | select SPARSE_IRQ | ||
269 | help | 273 | help |
270 | Support for ARM's Integrator platform. | 274 | Support for ARM's Integrator platform. |
271 | 275 | ||
@@ -312,6 +316,7 @@ config ARCH_VEXPRESS | |||
312 | select HAVE_CLK | 316 | select HAVE_CLK |
313 | select HAVE_PATA_PLATFORM | 317 | select HAVE_PATA_PLATFORM |
314 | select ICST | 318 | select ICST |
319 | select NO_IOPORT | ||
315 | select PLAT_VERSATILE | 320 | select PLAT_VERSATILE |
316 | select PLAT_VERSATILE_CLCD | 321 | select PLAT_VERSATILE_CLCD |
317 | help | 322 | help |
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug index e0d236d7ff73..755d9fbe3238 100644 --- a/arch/arm/Kconfig.debug +++ b/arch/arm/Kconfig.debug | |||
@@ -100,6 +100,22 @@ choice | |||
100 | Note that the system will appear to hang during boot if there | 100 | Note that the system will appear to hang during boot if there |
101 | is nothing connected to read from the DCC. | 101 | is nothing connected to read from the DCC. |
102 | 102 | ||
103 | config DEBUG_SEMIHOSTING | ||
104 | bool "Kernel low-level debug output via semihosting I" | ||
105 | help | ||
106 | Semihosting enables code running on an ARM target to use | ||
107 | the I/O facilities on a host debugger/emulator through a | ||
108 | simple SVC calls. The host debugger or emulator must have | ||
109 | semihosting enabled for the special svc call to be trapped | ||
110 | otherwise the kernel will crash. | ||
111 | |||
112 | This is known to work with OpenOCD, as wellas | ||
113 | ARM's Fast Models, or any other controlling environment | ||
114 | that implements semihosting. | ||
115 | |||
116 | For more details about semihosting, please see | ||
117 | chapter 8 of DUI0203I_rvct_developer_guide.pdf from ARM Ltd. | ||
118 | |||
103 | config AT91_DEBUG_LL_DBGU0 | 119 | config AT91_DEBUG_LL_DBGU0 |
104 | bool "Kernel low-level debugging on rm9200, 9260/9g20, 9261/9g10 and 9rl" | 120 | bool "Kernel low-level debugging on rm9200, 9260/9g20, 9261/9g10 and 9rl" |
105 | depends on HAVE_AT91_DBGU0 | 121 | depends on HAVE_AT91_DBGU0 |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 40319d91bb7f..93d63be8fa59 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -253,6 +253,7 @@ core-$(CONFIG_VFP) += arch/arm/vfp/ | |||
253 | 253 | ||
254 | # If we have a machine-specific directory, then include it in the build. | 254 | # If we have a machine-specific directory, then include it in the build. |
255 | core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ | 255 | core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ |
256 | core-y += arch/arm/net/ | ||
256 | core-y += $(machdirs) $(platdirs) | 257 | core-y += $(machdirs) $(platdirs) |
257 | 258 | ||
258 | drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/ | 259 | drivers-$(CONFIG_OPROFILE) += arch/arm/oprofile/ |
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index e0936a148516..d0d441c429ae 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore | |||
@@ -1,8 +1,10 @@ | |||
1 | ashldi3.S | ||
1 | font.c | 2 | font.c |
2 | lib1funcs.S | 3 | lib1funcs.S |
3 | piggy.gzip | 4 | piggy.gzip |
4 | piggy.lzo | 5 | piggy.lzo |
5 | piggy.lzma | 6 | piggy.lzma |
7 | piggy.xzkern | ||
6 | vmlinux | 8 | vmlinux |
7 | vmlinux.lds | 9 | vmlinux.lds |
8 | 10 | ||
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index cf0a64ce4b83..bb267562e7ed 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
@@ -92,6 +92,7 @@ SEDFLAGS = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ | |||
92 | suffix_$(CONFIG_KERNEL_GZIP) = gzip | 92 | suffix_$(CONFIG_KERNEL_GZIP) = gzip |
93 | suffix_$(CONFIG_KERNEL_LZO) = lzo | 93 | suffix_$(CONFIG_KERNEL_LZO) = lzo |
94 | suffix_$(CONFIG_KERNEL_LZMA) = lzma | 94 | suffix_$(CONFIG_KERNEL_LZMA) = lzma |
95 | suffix_$(CONFIG_KERNEL_XZ) = xzkern | ||
95 | 96 | ||
96 | # Borrowed libfdt files for the ATAG compatibility mode | 97 | # Borrowed libfdt files for the ATAG compatibility mode |
97 | 98 | ||
@@ -112,10 +113,12 @@ endif | |||
112 | 113 | ||
113 | targets := vmlinux vmlinux.lds \ | 114 | targets := vmlinux vmlinux.lds \ |
114 | piggy.$(suffix_y) piggy.$(suffix_y).o \ | 115 | piggy.$(suffix_y) piggy.$(suffix_y).o \ |
115 | lib1funcs.o lib1funcs.S font.o font.c head.o misc.o $(OBJS) | 116 | lib1funcs.o lib1funcs.S ashldi3.o ashldi3.S \ |
117 | font.o font.c head.o misc.o $(OBJS) | ||
116 | 118 | ||
117 | # Make sure files are removed during clean | 119 | # Make sure files are removed during clean |
118 | extra-y += piggy.gzip piggy.lzo piggy.lzma lib1funcs.S $(libfdt) $(libfdt_hdrs) | 120 | extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ |
121 | lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) | ||
119 | 122 | ||
120 | ifeq ($(CONFIG_FUNCTION_TRACER),y) | 123 | ifeq ($(CONFIG_FUNCTION_TRACER),y) |
121 | ORIG_CFLAGS := $(KBUILD_CFLAGS) | 124 | ORIG_CFLAGS := $(KBUILD_CFLAGS) |
@@ -151,6 +154,12 @@ lib1funcs = $(obj)/lib1funcs.o | |||
151 | $(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S | 154 | $(obj)/lib1funcs.S: $(srctree)/arch/$(SRCARCH)/lib/lib1funcs.S |
152 | $(call cmd,shipped) | 155 | $(call cmd,shipped) |
153 | 156 | ||
157 | # For __aeabi_llsl | ||
158 | ashldi3 = $(obj)/ashldi3.o | ||
159 | |||
160 | $(obj)/ashldi3.S: $(srctree)/arch/$(SRCARCH)/lib/ashldi3.S | ||
161 | $(call cmd,shipped) | ||
162 | |||
154 | # We need to prevent any GOTOFF relocs being used with references | 163 | # We need to prevent any GOTOFF relocs being used with references |
155 | # to symbols in the .bss section since we cannot relocate them | 164 | # to symbols in the .bss section since we cannot relocate them |
156 | # independently from the rest at run time. This can be achieved by | 165 | # independently from the rest at run time. This can be achieved by |
@@ -172,7 +181,7 @@ if [ $(words $(ZRELADDR)) -gt 1 -a "$(CONFIG_AUTO_ZRELADDR)" = "" ]; then \ | |||
172 | fi | 181 | fi |
173 | 182 | ||
174 | $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \ | 183 | $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/$(HEAD) $(obj)/piggy.$(suffix_y).o \ |
175 | $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) FORCE | 184 | $(addprefix $(obj)/, $(OBJS)) $(lib1funcs) $(ashldi3) FORCE |
176 | @$(check_for_multiple_zreladdr) | 185 | @$(check_for_multiple_zreladdr) |
177 | $(call if_changed,ld) | 186 | $(call if_changed,ld) |
178 | @$(check_for_bad_syms) | 187 | @$(check_for_bad_syms) |
diff --git a/arch/arm/boot/compressed/decompress.c b/arch/arm/boot/compressed/decompress.c index 07be5a2f8302..f41b38cafce8 100644 --- a/arch/arm/boot/compressed/decompress.c +++ b/arch/arm/boot/compressed/decompress.c | |||
@@ -44,6 +44,12 @@ extern void error(char *); | |||
44 | #include "../../../../lib/decompress_unlzma.c" | 44 | #include "../../../../lib/decompress_unlzma.c" |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | #ifdef CONFIG_KERNEL_XZ | ||
48 | #define memmove memmove | ||
49 | #define memcpy memcpy | ||
50 | #include "../../../../lib/decompress_unxz.c" | ||
51 | #endif | ||
52 | |||
47 | int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) | 53 | int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) |
48 | { | 54 | { |
49 | return decompress(input, len, NULL, NULL, output, NULL, error); | 55 | return decompress(input, len, NULL, NULL, output, NULL, error); |
diff --git a/arch/arm/boot/compressed/piggy.xzkern.S b/arch/arm/boot/compressed/piggy.xzkern.S new file mode 100644 index 000000000000..5703f300d027 --- /dev/null +++ b/arch/arm/boot/compressed/piggy.xzkern.S | |||
@@ -0,0 +1,6 @@ | |||
1 | .section .piggydata,#alloc | ||
2 | .globl input_data | ||
3 | input_data: | ||
4 | .incbin "arch/arm/boot/compressed/piggy.xzkern" | ||
5 | .globl input_data_end | ||
6 | input_data_end: | ||
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c index b2dc2dd7f1df..a3bc86fa3156 100644 --- a/arch/arm/common/gic.c +++ b/arch/arm/common/gic.c | |||
@@ -697,13 +697,12 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, | |||
697 | * For primary GICs, skip over SGIs. | 697 | * For primary GICs, skip over SGIs. |
698 | * For secondary GICs, skip over PPIs, too. | 698 | * For secondary GICs, skip over PPIs, too. |
699 | */ | 699 | */ |
700 | domain->hwirq_base = 32; | 700 | if (gic_nr == 0 && (irq_start & 31) > 0) { |
701 | if (gic_nr == 0) { | 701 | domain->hwirq_base = 16; |
702 | if ((irq_start & 31) > 0) { | 702 | if (irq_start != -1) |
703 | domain->hwirq_base = 16; | 703 | irq_start = (irq_start & ~31) + 16; |
704 | if (irq_start != -1) | 704 | } else { |
705 | irq_start = (irq_start & ~31) + 16; | 705 | domain->hwirq_base = 32; |
706 | } | ||
707 | } | 706 | } |
708 | 707 | ||
709 | /* | 708 | /* |
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig index 1103f62a1964..a8314c3ee84d 100644 --- a/arch/arm/configs/integrator_defconfig +++ b/arch/arm/configs/integrator_defconfig | |||
@@ -57,18 +57,24 @@ CONFIG_NETDEVICES=y | |||
57 | CONFIG_NET_ETHERNET=y | 57 | CONFIG_NET_ETHERNET=y |
58 | CONFIG_NET_PCI=y | 58 | CONFIG_NET_PCI=y |
59 | CONFIG_E100=y | 59 | CONFIG_E100=y |
60 | CONFIG_SMC91X=y | ||
60 | # CONFIG_KEYBOARD_ATKBD is not set | 61 | # CONFIG_KEYBOARD_ATKBD is not set |
61 | # CONFIG_SERIO_SERPORT is not set | 62 | # CONFIG_SERIO_SERPORT is not set |
62 | CONFIG_SERIAL_AMBA_PL010=y | 63 | CONFIG_SERIAL_AMBA_PL010=y |
63 | CONFIG_SERIAL_AMBA_PL010_CONSOLE=y | 64 | CONFIG_SERIAL_AMBA_PL010_CONSOLE=y |
64 | CONFIG_FB=y | 65 | CONFIG_FB=y |
65 | CONFIG_FB_MODE_HELPERS=y | 66 | CONFIG_FB_MODE_HELPERS=y |
67 | CONFIG_FB_ARMCLCD=y | ||
66 | CONFIG_FB_MATROX=y | 68 | CONFIG_FB_MATROX=y |
67 | CONFIG_FB_MATROX_MILLENIUM=y | 69 | CONFIG_FB_MATROX_MILLENIUM=y |
68 | CONFIG_FB_MATROX_MYSTIQUE=y | 70 | CONFIG_FB_MATROX_MYSTIQUE=y |
71 | # CONFIG_VGA_CONSOLE is not set | ||
72 | CONFIG_MMC=y | ||
73 | CONFIG_MMC_ARMMMCI=y | ||
69 | CONFIG_RTC_CLASS=y | 74 | CONFIG_RTC_CLASS=y |
70 | CONFIG_RTC_DRV_PL030=y | 75 | CONFIG_RTC_DRV_PL030=y |
71 | CONFIG_EXT2_FS=y | 76 | CONFIG_EXT2_FS=y |
77 | CONFIG_VFAT_FS=y | ||
72 | CONFIG_TMPFS=y | 78 | CONFIG_TMPFS=y |
73 | CONFIG_JFFS2_FS=y | 79 | CONFIG_JFFS2_FS=y |
74 | CONFIG_CRAMFS=y | 80 | CONFIG_CRAMFS=y |
@@ -78,5 +84,7 @@ CONFIG_ROOT_NFS=y | |||
78 | CONFIG_NFSD=y | 84 | CONFIG_NFSD=y |
79 | CONFIG_NFSD_V3=y | 85 | CONFIG_NFSD_V3=y |
80 | CONFIG_PARTITION_ADVANCED=y | 86 | CONFIG_PARTITION_ADVANCED=y |
87 | CONFIG_NLS_CODEPAGE_437=y | ||
88 | CONFIG_NLS_ISO8859_1=y | ||
81 | CONFIG_MAGIC_SYSRQ=y | 89 | CONFIG_MAGIC_SYSRQ=y |
82 | CONFIG_DEBUG_KERNEL=y | 90 | CONFIG_DEBUG_KERNEL=y |
diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h new file mode 100644 index 000000000000..3dabd8dd4049 --- /dev/null +++ b/arch/arm/include/asm/cp15.h | |||
@@ -0,0 +1,87 @@ | |||
1 | #ifndef __ASM_ARM_CP15_H | ||
2 | #define __ASM_ARM_CP15_H | ||
3 | |||
4 | #include <asm/system.h> | ||
5 | |||
6 | /* | ||
7 | * CR1 bits (CP#15 CR1) | ||
8 | */ | ||
9 | #define CR_M (1 << 0) /* MMU enable */ | ||
10 | #define CR_A (1 << 1) /* Alignment abort enable */ | ||
11 | #define CR_C (1 << 2) /* Dcache enable */ | ||
12 | #define CR_W (1 << 3) /* Write buffer enable */ | ||
13 | #define CR_P (1 << 4) /* 32-bit exception handler */ | ||
14 | #define CR_D (1 << 5) /* 32-bit data address range */ | ||
15 | #define CR_L (1 << 6) /* Implementation defined */ | ||
16 | #define CR_B (1 << 7) /* Big endian */ | ||
17 | #define CR_S (1 << 8) /* System MMU protection */ | ||
18 | #define CR_R (1 << 9) /* ROM MMU protection */ | ||
19 | #define CR_F (1 << 10) /* Implementation defined */ | ||
20 | #define CR_Z (1 << 11) /* Implementation defined */ | ||
21 | #define CR_I (1 << 12) /* Icache enable */ | ||
22 | #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ | ||
23 | #define CR_RR (1 << 14) /* Round Robin cache replacement */ | ||
24 | #define CR_L4 (1 << 15) /* LDR pc can set T bit */ | ||
25 | #define CR_DT (1 << 16) | ||
26 | #define CR_IT (1 << 18) | ||
27 | #define CR_ST (1 << 19) | ||
28 | #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ | ||
29 | #define CR_U (1 << 22) /* Unaligned access operation */ | ||
30 | #define CR_XP (1 << 23) /* Extended page tables */ | ||
31 | #define CR_VE (1 << 24) /* Vectored interrupts */ | ||
32 | #define CR_EE (1 << 25) /* Exception (Big) Endian */ | ||
33 | #define CR_TRE (1 << 28) /* TEX remap enable */ | ||
34 | #define CR_AFE (1 << 29) /* Access flag enable */ | ||
35 | #define CR_TE (1 << 30) /* Thumb exception enable */ | ||
36 | |||
37 | #ifndef __ASSEMBLY__ | ||
38 | |||
39 | #if __LINUX_ARM_ARCH__ >= 4 | ||
40 | #define vectors_high() (cr_alignment & CR_V) | ||
41 | #else | ||
42 | #define vectors_high() (0) | ||
43 | #endif | ||
44 | |||
45 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ | ||
46 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | ||
47 | |||
48 | static inline unsigned int get_cr(void) | ||
49 | { | ||
50 | unsigned int val; | ||
51 | asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); | ||
52 | return val; | ||
53 | } | ||
54 | |||
55 | static inline void set_cr(unsigned int val) | ||
56 | { | ||
57 | asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" | ||
58 | : : "r" (val) : "cc"); | ||
59 | isb(); | ||
60 | } | ||
61 | |||
62 | #ifndef CONFIG_SMP | ||
63 | extern void adjust_cr(unsigned long mask, unsigned long set); | ||
64 | #endif | ||
65 | |||
66 | #define CPACC_FULL(n) (3 << (n * 2)) | ||
67 | #define CPACC_SVC(n) (1 << (n * 2)) | ||
68 | #define CPACC_DISABLE(n) (0 << (n * 2)) | ||
69 | |||
70 | static inline unsigned int get_copro_access(void) | ||
71 | { | ||
72 | unsigned int val; | ||
73 | asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" | ||
74 | : "=r" (val) : : "cc"); | ||
75 | return val; | ||
76 | } | ||
77 | |||
78 | static inline void set_copro_access(unsigned int val) | ||
79 | { | ||
80 | asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" | ||
81 | : : "r" (val) : "cc"); | ||
82 | isb(); | ||
83 | } | ||
84 | |||
85 | #endif | ||
86 | |||
87 | #endif | ||
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index 0e9ce8d9686e..38050b1c4800 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h | |||
@@ -130,8 +130,4 @@ struct mm_struct; | |||
130 | extern unsigned long arch_randomize_brk(struct mm_struct *mm); | 130 | extern unsigned long arch_randomize_brk(struct mm_struct *mm); |
131 | #define arch_randomize_brk arch_randomize_brk | 131 | #define arch_randomize_brk arch_randomize_brk |
132 | 132 | ||
133 | extern int vectors_user_mapping(void); | ||
134 | #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping() | ||
135 | #define ARCH_HAS_SETUP_ADDITIONAL_PAGES | ||
136 | |||
137 | #endif | 133 | #endif |
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h new file mode 100644 index 000000000000..5c5ca2ea62b0 --- /dev/null +++ b/arch/arm/include/asm/jump_label.h | |||
@@ -0,0 +1,41 @@ | |||
1 | #ifndef _ASM_ARM_JUMP_LABEL_H | ||
2 | #define _ASM_ARM_JUMP_LABEL_H | ||
3 | |||
4 | #ifdef __KERNEL__ | ||
5 | |||
6 | #include <linux/types.h> | ||
7 | #include <asm/system.h> | ||
8 | |||
9 | #define JUMP_LABEL_NOP_SIZE 4 | ||
10 | |||
11 | #ifdef CONFIG_THUMB2_KERNEL | ||
12 | #define JUMP_LABEL_NOP "nop.w" | ||
13 | #else | ||
14 | #define JUMP_LABEL_NOP "nop" | ||
15 | #endif | ||
16 | |||
17 | static __always_inline bool arch_static_branch(struct jump_label_key *key) | ||
18 | { | ||
19 | asm goto("1:\n\t" | ||
20 | JUMP_LABEL_NOP "\n\t" | ||
21 | ".pushsection __jump_table, \"aw\"\n\t" | ||
22 | ".word 1b, %l[l_yes], %c0\n\t" | ||
23 | ".popsection\n\t" | ||
24 | : : "i" (key) : : l_yes); | ||
25 | |||
26 | return false; | ||
27 | l_yes: | ||
28 | return true; | ||
29 | } | ||
30 | |||
31 | #endif /* __KERNEL__ */ | ||
32 | |||
33 | typedef u32 jump_label_t; | ||
34 | |||
35 | struct jump_entry { | ||
36 | jump_label_t code; | ||
37 | jump_label_t target; | ||
38 | jump_label_t key; | ||
39 | }; | ||
40 | |||
41 | #endif | ||
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index a8997d71084e..fcb575747e5e 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -116,6 +116,8 @@ | |||
116 | #define MODULES_END (END_MEM) | 116 | #define MODULES_END (END_MEM) |
117 | #define MODULES_VADDR (PHYS_OFFSET) | 117 | #define MODULES_VADDR (PHYS_OFFSET) |
118 | 118 | ||
119 | #define XIP_VIRT_ADDR(physaddr) (physaddr) | ||
120 | |||
119 | #endif /* !CONFIG_MMU */ | 121 | #endif /* !CONFIG_MMU */ |
120 | 122 | ||
121 | /* | 123 | /* |
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index 71605d9f8e42..a0b3cac0547c 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/cachetype.h> | 19 | #include <asm/cachetype.h> |
20 | #include <asm/proc-fns.h> | 20 | #include <asm/proc-fns.h> |
21 | #include <asm-generic/mm_hooks.h> | ||
21 | 22 | ||
22 | void __check_kvm_seq(struct mm_struct *mm); | 23 | void __check_kvm_seq(struct mm_struct *mm); |
23 | 24 | ||
@@ -133,32 +134,4 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
133 | #define deactivate_mm(tsk,mm) do { } while (0) | 134 | #define deactivate_mm(tsk,mm) do { } while (0) |
134 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) | 135 | #define activate_mm(prev,next) switch_mm(prev, next, NULL) |
135 | 136 | ||
136 | /* | ||
137 | * We are inserting a "fake" vma for the user-accessible vector page so | ||
138 | * gdb and friends can get to it through ptrace and /proc/<pid>/mem. | ||
139 | * But we also want to remove it before the generic code gets to see it | ||
140 | * during process exit or the unmapping of it would cause total havoc. | ||
141 | * (the macro is used as remove_vma() is static to mm/mmap.c) | ||
142 | */ | ||
143 | #define arch_exit_mmap(mm) \ | ||
144 | do { \ | ||
145 | struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \ | ||
146 | if (high_vma) { \ | ||
147 | BUG_ON(high_vma->vm_next); /* it should be last */ \ | ||
148 | if (high_vma->vm_prev) \ | ||
149 | high_vma->vm_prev->vm_next = NULL; \ | ||
150 | else \ | ||
151 | mm->mmap = NULL; \ | ||
152 | rb_erase(&high_vma->vm_rb, &mm->mm_rb); \ | ||
153 | mm->mmap_cache = NULL; \ | ||
154 | mm->map_count--; \ | ||
155 | remove_vma(high_vma); \ | ||
156 | } \ | ||
157 | } while (0) | ||
158 | |||
159 | static inline void arch_dup_mmap(struct mm_struct *oldmm, | ||
160 | struct mm_struct *mm) | ||
161 | { | ||
162 | } | ||
163 | |||
164 | #endif | 137 | #endif |
diff --git a/arch/arm/include/asm/opcodes.h b/arch/arm/include/asm/opcodes.h index c0efdd60966f..19c48deda70f 100644 --- a/arch/arm/include/asm/opcodes.h +++ b/arch/arm/include/asm/opcodes.h | |||
@@ -17,4 +17,63 @@ extern asmlinkage unsigned int arm_check_condition(u32 opcode, u32 psr); | |||
17 | #define ARM_OPCODE_CONDTEST_PASS 1 | 17 | #define ARM_OPCODE_CONDTEST_PASS 1 |
18 | #define ARM_OPCODE_CONDTEST_UNCOND 2 | 18 | #define ARM_OPCODE_CONDTEST_UNCOND 2 |
19 | 19 | ||
20 | |||
21 | /* | ||
22 | * Opcode byteswap helpers | ||
23 | * | ||
24 | * These macros help with converting instructions between a canonical integer | ||
25 | * format and in-memory representation, in an endianness-agnostic manner. | ||
26 | * | ||
27 | * __mem_to_opcode_*() convert from in-memory representation to canonical form. | ||
28 | * __opcode_to_mem_*() convert from canonical form to in-memory representation. | ||
29 | * | ||
30 | * | ||
31 | * Canonical instruction representation: | ||
32 | * | ||
33 | * ARM: 0xKKLLMMNN | ||
34 | * Thumb 16-bit: 0x0000KKLL, where KK < 0xE8 | ||
35 | * Thumb 32-bit: 0xKKLLMMNN, where KK >= 0xE8 | ||
36 | * | ||
37 | * There is no way to distinguish an ARM instruction in canonical representation | ||
38 | * from a Thumb instruction (just as these cannot be distinguished in memory). | ||
39 | * Where this distinction is important, it needs to be tracked separately. | ||
40 | * | ||
41 | * Note that values in the range 0x0000E800..0xE7FFFFFF intentionally do not | ||
42 | * represent any valid Thumb-2 instruction. For this range, | ||
43 | * __opcode_is_thumb32() and __opcode_is_thumb16() will both be false. | ||
44 | */ | ||
45 | |||
46 | #ifndef __ASSEMBLY__ | ||
47 | |||
48 | #include <linux/types.h> | ||
49 | #include <linux/swab.h> | ||
50 | |||
51 | #ifdef CONFIG_CPU_ENDIAN_BE8 | ||
52 | #define __opcode_to_mem_arm(x) swab32(x) | ||
53 | #define __opcode_to_mem_thumb16(x) swab16(x) | ||
54 | #define __opcode_to_mem_thumb32(x) swahb32(x) | ||
55 | #else | ||
56 | #define __opcode_to_mem_arm(x) ((u32)(x)) | ||
57 | #define __opcode_to_mem_thumb16(x) ((u16)(x)) | ||
58 | #define __opcode_to_mem_thumb32(x) swahw32(x) | ||
59 | #endif | ||
60 | |||
61 | #define __mem_to_opcode_arm(x) __opcode_to_mem_arm(x) | ||
62 | #define __mem_to_opcode_thumb16(x) __opcode_to_mem_thumb16(x) | ||
63 | #define __mem_to_opcode_thumb32(x) __opcode_to_mem_thumb32(x) | ||
64 | |||
65 | /* Operations specific to Thumb opcodes */ | ||
66 | |||
67 | /* Instruction size checks: */ | ||
68 | #define __opcode_is_thumb32(x) ((u32)(x) >= 0xE8000000UL) | ||
69 | #define __opcode_is_thumb16(x) ((u32)(x) < 0xE800UL) | ||
70 | |||
71 | /* Operations to construct or split 32-bit Thumb instructions: */ | ||
72 | #define __opcode_thumb32_first(x) ((u16)((x) >> 16)) | ||
73 | #define __opcode_thumb32_second(x) ((u16)(x)) | ||
74 | #define __opcode_thumb32_compose(first, second) \ | ||
75 | (((u32)(u16)(first) << 16) | (u32)(u16)(second)) | ||
76 | |||
77 | #endif /* __ASSEMBLY__ */ | ||
78 | |||
20 | #endif /* __ASM_ARM_OPCODES_H */ | 79 | #endif /* __ASM_ARM_OPCODES_H */ |
diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 97b440c25c58..5838361c48b3 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h | |||
@@ -151,6 +151,8 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from, | |||
151 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) | 151 | #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) |
152 | extern void copy_page(void *to, const void *from); | 152 | extern void copy_page(void *to, const void *from); |
153 | 153 | ||
154 | #define __HAVE_ARCH_GATE_AREA 1 | ||
155 | |||
154 | #ifdef CONFIG_ARM_LPAE | 156 | #ifdef CONFIG_ARM_LPAE |
155 | #include <asm/pgtable-3level-types.h> | 157 | #include <asm/pgtable-3level-types.h> |
156 | #else | 158 | #else |
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 99cfe3607989..ee7c056be3f4 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h | |||
@@ -26,6 +26,7 @@ enum arm_perf_pmu_ids { | |||
26 | ARM_PERF_PMU_ID_CA9, | 26 | ARM_PERF_PMU_ID_CA9, |
27 | ARM_PERF_PMU_ID_CA5, | 27 | ARM_PERF_PMU_ID_CA5, |
28 | ARM_PERF_PMU_ID_CA15, | 28 | ARM_PERF_PMU_ID_CA15, |
29 | ARM_PERF_PMU_ID_CA7, | ||
29 | ARM_NUM_PMU_IDS, | 30 | ARM_NUM_PMU_IDS, |
30 | }; | 31 | }; |
31 | 32 | ||
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index ce280b8d613c..d7038fa22343 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h | |||
@@ -55,7 +55,6 @@ struct thread_struct { | |||
55 | #define start_thread(regs,pc,sp) \ | 55 | #define start_thread(regs,pc,sp) \ |
56 | ({ \ | 56 | ({ \ |
57 | unsigned long *stack = (unsigned long *)sp; \ | 57 | unsigned long *stack = (unsigned long *)sp; \ |
58 | set_fs(USER_DS); \ | ||
59 | memset(regs->uregs, 0, sizeof(regs->uregs)); \ | 58 | memset(regs->uregs, 0, sizeof(regs->uregs)); \ |
60 | if (current->personality & ADDR_LIMIT_32BIT) \ | 59 | if (current->personality & ADDR_LIMIT_32BIT) \ |
61 | regs->ARM_cpsr = USR_MODE; \ | 60 | regs->ARM_cpsr = USR_MODE; \ |
diff --git a/arch/arm/include/asm/prom.h b/arch/arm/include/asm/prom.h index ee0363307918..aeae9c609df4 100644 --- a/arch/arm/include/asm/prom.h +++ b/arch/arm/include/asm/prom.h | |||
@@ -13,8 +13,6 @@ | |||
13 | 13 | ||
14 | #ifdef CONFIG_OF | 14 | #ifdef CONFIG_OF |
15 | 15 | ||
16 | #include <asm/irq.h> | ||
17 | |||
18 | extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); | 16 | extern struct machine_desc *setup_machine_fdt(unsigned int dt_phys); |
19 | extern void arm_dt_memblock_reserve(void); | 17 | extern void arm_dt_memblock_reserve(void); |
20 | 18 | ||
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index e4c96cc6ec0c..774c41e8addf 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h | |||
@@ -15,37 +15,6 @@ | |||
15 | #define CPU_ARCH_ARMv7 9 | 15 | #define CPU_ARCH_ARMv7 9 |
16 | 16 | ||
17 | /* | 17 | /* |
18 | * CR1 bits (CP#15 CR1) | ||
19 | */ | ||
20 | #define CR_M (1 << 0) /* MMU enable */ | ||
21 | #define CR_A (1 << 1) /* Alignment abort enable */ | ||
22 | #define CR_C (1 << 2) /* Dcache enable */ | ||
23 | #define CR_W (1 << 3) /* Write buffer enable */ | ||
24 | #define CR_P (1 << 4) /* 32-bit exception handler */ | ||
25 | #define CR_D (1 << 5) /* 32-bit data address range */ | ||
26 | #define CR_L (1 << 6) /* Implementation defined */ | ||
27 | #define CR_B (1 << 7) /* Big endian */ | ||
28 | #define CR_S (1 << 8) /* System MMU protection */ | ||
29 | #define CR_R (1 << 9) /* ROM MMU protection */ | ||
30 | #define CR_F (1 << 10) /* Implementation defined */ | ||
31 | #define CR_Z (1 << 11) /* Implementation defined */ | ||
32 | #define CR_I (1 << 12) /* Icache enable */ | ||
33 | #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */ | ||
34 | #define CR_RR (1 << 14) /* Round Robin cache replacement */ | ||
35 | #define CR_L4 (1 << 15) /* LDR pc can set T bit */ | ||
36 | #define CR_DT (1 << 16) | ||
37 | #define CR_IT (1 << 18) | ||
38 | #define CR_ST (1 << 19) | ||
39 | #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */ | ||
40 | #define CR_U (1 << 22) /* Unaligned access operation */ | ||
41 | #define CR_XP (1 << 23) /* Extended page tables */ | ||
42 | #define CR_VE (1 << 24) /* Vectored interrupts */ | ||
43 | #define CR_EE (1 << 25) /* Exception (Big) Endian */ | ||
44 | #define CR_TRE (1 << 28) /* TEX remap enable */ | ||
45 | #define CR_AFE (1 << 29) /* Access flag enable */ | ||
46 | #define CR_TE (1 << 30) /* Thumb exception enable */ | ||
47 | |||
48 | /* | ||
49 | * This is used to ensure the compiler did actually allocate the register we | 18 | * This is used to ensure the compiler did actually allocate the register we |
50 | * asked it for some inline assembly sequences. Apparently we can't trust | 19 | * asked it for some inline assembly sequences. Apparently we can't trust |
51 | * the compiler from one version to another so a bit of paranoia won't hurt. | 20 | * the compiler from one version to another so a bit of paranoia won't hurt. |
@@ -119,12 +88,6 @@ extern void (*arm_pm_restart)(char str, const char *cmd); | |||
119 | 88 | ||
120 | extern unsigned int user_debug; | 89 | extern unsigned int user_debug; |
121 | 90 | ||
122 | #if __LINUX_ARM_ARCH__ >= 4 | ||
123 | #define vectors_high() (cr_alignment & CR_V) | ||
124 | #else | ||
125 | #define vectors_high() (0) | ||
126 | #endif | ||
127 | |||
128 | #if __LINUX_ARM_ARCH__ >= 7 || \ | 91 | #if __LINUX_ARM_ARCH__ >= 7 || \ |
129 | (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) | 92 | (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) |
130 | #define sev() __asm__ __volatile__ ("sev" : : : "memory") | 93 | #define sev() __asm__ __volatile__ ("sev" : : : "memory") |
@@ -185,46 +148,6 @@ extern unsigned int user_debug; | |||
185 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) | 148 | #define set_mb(var, value) do { var = value; smp_mb(); } while (0) |
186 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); | 149 | #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); |
187 | 150 | ||
188 | extern unsigned long cr_no_alignment; /* defined in entry-armv.S */ | ||
189 | extern unsigned long cr_alignment; /* defined in entry-armv.S */ | ||
190 | |||
191 | static inline unsigned int get_cr(void) | ||
192 | { | ||
193 | unsigned int val; | ||
194 | asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); | ||
195 | return val; | ||
196 | } | ||
197 | |||
198 | static inline void set_cr(unsigned int val) | ||
199 | { | ||
200 | asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" | ||
201 | : : "r" (val) : "cc"); | ||
202 | isb(); | ||
203 | } | ||
204 | |||
205 | #ifndef CONFIG_SMP | ||
206 | extern void adjust_cr(unsigned long mask, unsigned long set); | ||
207 | #endif | ||
208 | |||
209 | #define CPACC_FULL(n) (3 << (n * 2)) | ||
210 | #define CPACC_SVC(n) (1 << (n * 2)) | ||
211 | #define CPACC_DISABLE(n) (0 << (n * 2)) | ||
212 | |||
213 | static inline unsigned int get_copro_access(void) | ||
214 | { | ||
215 | unsigned int val; | ||
216 | asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access" | ||
217 | : "=r" (val) : : "cc"); | ||
218 | return val; | ||
219 | } | ||
220 | |||
221 | static inline void set_copro_access(unsigned int val) | ||
222 | { | ||
223 | asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access" | ||
224 | : : "r" (val) : "cc"); | ||
225 | isb(); | ||
226 | } | ||
227 | |||
228 | /* | 151 | /* |
229 | * switch_mm() may do a full cache flush over the context switch, | 152 | * switch_mm() may do a full cache flush over the context switch, |
230 | * so enable interrupts over the context switch to avoid high | 153 | * so enable interrupts over the context switch to avoid high |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 02b2f8203982..85fe61e73202 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
@@ -318,6 +318,21 @@ extern struct cpu_tlb_fns cpu_tlb; | |||
318 | 318 | ||
319 | #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) | 319 | #define tlb_flag(f) ((always_tlb_flags & (f)) || (__tlb_flag & possible_tlb_flags & (f))) |
320 | 320 | ||
321 | #define __tlb_op(f, insnarg, arg) \ | ||
322 | do { \ | ||
323 | if (always_tlb_flags & (f)) \ | ||
324 | asm("mcr " insnarg \ | ||
325 | : : "r" (arg) : "cc"); \ | ||
326 | else if (possible_tlb_flags & (f)) \ | ||
327 | asm("tst %1, %2\n\t" \ | ||
328 | "mcrne " insnarg \ | ||
329 | : : "r" (arg), "r" (__tlb_flag), "Ir" (f) \ | ||
330 | : "cc"); \ | ||
331 | } while (0) | ||
332 | |||
333 | #define tlb_op(f, regs, arg) __tlb_op(f, "p15, 0, %0, " regs, arg) | ||
334 | #define tlb_l2_op(f, regs, arg) __tlb_op(f, "p15, 1, %0, " regs, arg) | ||
335 | |||
321 | static inline void local_flush_tlb_all(void) | 336 | static inline void local_flush_tlb_all(void) |
322 | { | 337 | { |
323 | const int zero = 0; | 338 | const int zero = 0; |
@@ -326,16 +341,11 @@ static inline void local_flush_tlb_all(void) | |||
326 | if (tlb_flag(TLB_WB)) | 341 | if (tlb_flag(TLB_WB)) |
327 | dsb(); | 342 | dsb(); |
328 | 343 | ||
329 | if (tlb_flag(TLB_V3_FULL)) | 344 | tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); |
330 | asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); | 345 | tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero); |
331 | if (tlb_flag(TLB_V4_U_FULL | TLB_V6_U_FULL)) | 346 | tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero); |
332 | asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); | 347 | tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero); |
333 | if (tlb_flag(TLB_V4_D_FULL | TLB_V6_D_FULL)) | 348 | tlb_op(TLB_V7_UIS_FULL, "c8, c3, 0", zero); |
334 | asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); | ||
335 | if (tlb_flag(TLB_V4_I_FULL | TLB_V6_I_FULL)) | ||
336 | asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); | ||
337 | if (tlb_flag(TLB_V7_UIS_FULL)) | ||
338 | asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); | ||
339 | 349 | ||
340 | if (tlb_flag(TLB_BARRIER)) { | 350 | if (tlb_flag(TLB_BARRIER)) { |
341 | dsb(); | 351 | dsb(); |
@@ -352,29 +362,23 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) | |||
352 | if (tlb_flag(TLB_WB)) | 362 | if (tlb_flag(TLB_WB)) |
353 | dsb(); | 363 | dsb(); |
354 | 364 | ||
355 | if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { | 365 | if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) { |
356 | if (tlb_flag(TLB_V3_FULL)) | 366 | if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) { |
357 | asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (zero) : "cc"); | 367 | tlb_op(TLB_V3_FULL, "c6, c0, 0", zero); |
358 | if (tlb_flag(TLB_V4_U_FULL)) | 368 | tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero); |
359 | asm("mcr p15, 0, %0, c8, c7, 0" : : "r" (zero) : "cc"); | 369 | tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero); |
360 | if (tlb_flag(TLB_V4_D_FULL)) | 370 | tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero); |
361 | asm("mcr p15, 0, %0, c8, c6, 0" : : "r" (zero) : "cc"); | 371 | } |
362 | if (tlb_flag(TLB_V4_I_FULL)) | 372 | put_cpu(); |
363 | asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); | ||
364 | } | 373 | } |
365 | put_cpu(); | 374 | |
366 | 375 | tlb_op(TLB_V6_U_ASID, "c8, c7, 2", asid); | |
367 | if (tlb_flag(TLB_V6_U_ASID)) | 376 | tlb_op(TLB_V6_D_ASID, "c8, c6, 2", asid); |
368 | asm("mcr p15, 0, %0, c8, c7, 2" : : "r" (asid) : "cc"); | 377 | tlb_op(TLB_V6_I_ASID, "c8, c5, 2", asid); |
369 | if (tlb_flag(TLB_V6_D_ASID)) | ||
370 | asm("mcr p15, 0, %0, c8, c6, 2" : : "r" (asid) : "cc"); | ||
371 | if (tlb_flag(TLB_V6_I_ASID)) | ||
372 | asm("mcr p15, 0, %0, c8, c5, 2" : : "r" (asid) : "cc"); | ||
373 | if (tlb_flag(TLB_V7_UIS_ASID)) | ||
374 | #ifdef CONFIG_ARM_ERRATA_720789 | 378 | #ifdef CONFIG_ARM_ERRATA_720789 |
375 | asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); | 379 | tlb_op(TLB_V7_UIS_ASID, "c8, c3, 0", zero); |
376 | #else | 380 | #else |
377 | asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); | 381 | tlb_op(TLB_V7_UIS_ASID, "c8, c3, 2", asid); |
378 | #endif | 382 | #endif |
379 | 383 | ||
380 | if (tlb_flag(TLB_BARRIER)) | 384 | if (tlb_flag(TLB_BARRIER)) |
@@ -392,30 +396,23 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |||
392 | if (tlb_flag(TLB_WB)) | 396 | if (tlb_flag(TLB_WB)) |
393 | dsb(); | 397 | dsb(); |
394 | 398 | ||
395 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | 399 | if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) && |
396 | if (tlb_flag(TLB_V3_PAGE)) | 400 | cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { |
397 | asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (uaddr) : "cc"); | 401 | tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr); |
398 | if (tlb_flag(TLB_V4_U_PAGE)) | 402 | tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr); |
399 | asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); | 403 | tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr); |
400 | if (tlb_flag(TLB_V4_D_PAGE)) | 404 | tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr); |
401 | asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); | ||
402 | if (tlb_flag(TLB_V4_I_PAGE)) | ||
403 | asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); | ||
404 | if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) | 405 | if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) |
405 | asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); | 406 | asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); |
406 | } | 407 | } |
407 | 408 | ||
408 | if (tlb_flag(TLB_V6_U_PAGE)) | 409 | tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", uaddr); |
409 | asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (uaddr) : "cc"); | 410 | tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", uaddr); |
410 | if (tlb_flag(TLB_V6_D_PAGE)) | 411 | tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", uaddr); |
411 | asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (uaddr) : "cc"); | ||
412 | if (tlb_flag(TLB_V6_I_PAGE)) | ||
413 | asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (uaddr) : "cc"); | ||
414 | if (tlb_flag(TLB_V7_UIS_PAGE)) | ||
415 | #ifdef CONFIG_ARM_ERRATA_720789 | 412 | #ifdef CONFIG_ARM_ERRATA_720789 |
416 | asm("mcr p15, 0, %0, c8, c3, 3" : : "r" (uaddr & PAGE_MASK) : "cc"); | 413 | tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 3", uaddr & PAGE_MASK); |
417 | #else | 414 | #else |
418 | asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); | 415 | tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", uaddr); |
419 | #endif | 416 | #endif |
420 | 417 | ||
421 | if (tlb_flag(TLB_BARRIER)) | 418 | if (tlb_flag(TLB_BARRIER)) |
@@ -432,25 +429,17 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) | |||
432 | if (tlb_flag(TLB_WB)) | 429 | if (tlb_flag(TLB_WB)) |
433 | dsb(); | 430 | dsb(); |
434 | 431 | ||
435 | if (tlb_flag(TLB_V3_PAGE)) | 432 | tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr); |
436 | asm("mcr p15, 0, %0, c6, c0, 0" : : "r" (kaddr) : "cc"); | 433 | tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr); |
437 | if (tlb_flag(TLB_V4_U_PAGE)) | 434 | tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr); |
438 | asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); | 435 | tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr); |
439 | if (tlb_flag(TLB_V4_D_PAGE)) | ||
440 | asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); | ||
441 | if (tlb_flag(TLB_V4_I_PAGE)) | ||
442 | asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); | ||
443 | if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) | 436 | if (!tlb_flag(TLB_V4_I_PAGE) && tlb_flag(TLB_V4_I_FULL)) |
444 | asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); | 437 | asm("mcr p15, 0, %0, c8, c5, 0" : : "r" (zero) : "cc"); |
445 | 438 | ||
446 | if (tlb_flag(TLB_V6_U_PAGE)) | 439 | tlb_op(TLB_V6_U_PAGE, "c8, c7, 1", kaddr); |
447 | asm("mcr p15, 0, %0, c8, c7, 1" : : "r" (kaddr) : "cc"); | 440 | tlb_op(TLB_V6_D_PAGE, "c8, c6, 1", kaddr); |
448 | if (tlb_flag(TLB_V6_D_PAGE)) | 441 | tlb_op(TLB_V6_I_PAGE, "c8, c5, 1", kaddr); |
449 | asm("mcr p15, 0, %0, c8, c6, 1" : : "r" (kaddr) : "cc"); | 442 | tlb_op(TLB_V7_UIS_PAGE, "c8, c3, 1", kaddr); |
450 | if (tlb_flag(TLB_V6_I_PAGE)) | ||
451 | asm("mcr p15, 0, %0, c8, c5, 1" : : "r" (kaddr) : "cc"); | ||
452 | if (tlb_flag(TLB_V7_UIS_PAGE)) | ||
453 | asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc"); | ||
454 | 443 | ||
455 | if (tlb_flag(TLB_BARRIER)) { | 444 | if (tlb_flag(TLB_BARRIER)) { |
456 | dsb(); | 445 | dsb(); |
@@ -475,13 +464,8 @@ static inline void flush_pmd_entry(void *pmd) | |||
475 | { | 464 | { |
476 | const unsigned int __tlb_flag = __cpu_tlb_flags; | 465 | const unsigned int __tlb_flag = __cpu_tlb_flags; |
477 | 466 | ||
478 | if (tlb_flag(TLB_DCLEAN)) | 467 | tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); |
479 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" | 468 | tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); |
480 | : : "r" (pmd) : "cc"); | ||
481 | |||
482 | if (tlb_flag(TLB_L2CLEAN_FR)) | ||
483 | asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd" | ||
484 | : : "r" (pmd) : "cc"); | ||
485 | 469 | ||
486 | if (tlb_flag(TLB_WB)) | 470 | if (tlb_flag(TLB_WB)) |
487 | dsb(); | 471 | dsb(); |
@@ -491,15 +475,11 @@ static inline void clean_pmd_entry(void *pmd) | |||
491 | { | 475 | { |
492 | const unsigned int __tlb_flag = __cpu_tlb_flags; | 476 | const unsigned int __tlb_flag = __cpu_tlb_flags; |
493 | 477 | ||
494 | if (tlb_flag(TLB_DCLEAN)) | 478 | tlb_op(TLB_DCLEAN, "c7, c10, 1 @ flush_pmd", pmd); |
495 | asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pmd" | 479 | tlb_l2_op(TLB_L2CLEAN_FR, "c15, c9, 1 @ L2 flush_pmd", pmd); |
496 | : : "r" (pmd) : "cc"); | ||
497 | |||
498 | if (tlb_flag(TLB_L2CLEAN_FR)) | ||
499 | asm("mcr p15, 1, %0, c15, c9, 1 @ L2 flush_pmd" | ||
500 | : : "r" (pmd) : "cc"); | ||
501 | } | 480 | } |
502 | 481 | ||
482 | #undef tlb_op | ||
503 | #undef tlb_flag | 483 | #undef tlb_flag |
504 | #undef always_tlb_flags | 484 | #undef always_tlb_flags |
505 | #undef possible_tlb_flags | 485 | #undef possible_tlb_flags |
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h index 5b29a6673625..f555bb3664dc 100644 --- a/arch/arm/include/asm/traps.h +++ b/arch/arm/include/asm/traps.h | |||
@@ -46,7 +46,7 @@ static inline int in_exception_text(unsigned long ptr) | |||
46 | return in ? : __in_irqentry_text(ptr); | 46 | return in ? : __in_irqentry_text(ptr); |
47 | } | 47 | } |
48 | 48 | ||
49 | extern void __init early_trap_init(void); | 49 | extern void __init early_trap_init(void *); |
50 | extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); | 50 | extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame); |
51 | extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs); | 51 | extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs); |
52 | 52 | ||
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 43b740d0e374..1b7d9a390971 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -7,6 +7,8 @@ AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) | |||
7 | 7 | ||
8 | ifdef CONFIG_FUNCTION_TRACER | 8 | ifdef CONFIG_FUNCTION_TRACER |
9 | CFLAGS_REMOVE_ftrace.o = -pg | 9 | CFLAGS_REMOVE_ftrace.o = -pg |
10 | CFLAGS_REMOVE_insn.o = -pg | ||
11 | CFLAGS_REMOVE_patch.o = -pg | ||
10 | endif | 12 | endif |
11 | 13 | ||
12 | CFLAGS_REMOVE_return_address.o = -pg | 14 | CFLAGS_REMOVE_return_address.o = -pg |
@@ -34,10 +36,11 @@ obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o | |||
34 | obj-$(CONFIG_SMP) += smp.o smp_tlb.o | 36 | obj-$(CONFIG_SMP) += smp.o smp_tlb.o |
35 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o | 37 | obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o |
36 | obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o | 38 | obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o |
37 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 39 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o |
38 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 40 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o |
41 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o | ||
39 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o | 42 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o |
40 | obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o | 43 | obj-$(CONFIG_KPROBES) += kprobes.o kprobes-common.o patch.o |
41 | ifdef CONFIG_THUMB2_KERNEL | 44 | ifdef CONFIG_THUMB2_KERNEL |
42 | obj-$(CONFIG_KPROBES) += kprobes-thumb.o | 45 | obj-$(CONFIG_KPROBES) += kprobes-thumb.o |
43 | else | 46 | else |
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index 204e2160cfcc..e5a765c5f06a 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S | |||
@@ -100,7 +100,7 @@ | |||
100 | 100 | ||
101 | #endif /* CONFIG_CPU_V6 */ | 101 | #endif /* CONFIG_CPU_V6 */ |
102 | 102 | ||
103 | #else | 103 | #elif !defined(CONFIG_DEBUG_SEMIHOSTING) |
104 | #include <mach/debug-macro.S> | 104 | #include <mach/debug-macro.S> |
105 | #endif /* CONFIG_DEBUG_ICEDCC */ | 105 | #endif /* CONFIG_DEBUG_ICEDCC */ |
106 | 106 | ||
@@ -155,6 +155,8 @@ hexbuf: .space 16 | |||
155 | 155 | ||
156 | .ltorg | 156 | .ltorg |
157 | 157 | ||
158 | #ifndef CONFIG_DEBUG_SEMIHOSTING | ||
159 | |||
158 | ENTRY(printascii) | 160 | ENTRY(printascii) |
159 | addruart_current r3, r1, r2 | 161 | addruart_current r3, r1, r2 |
160 | b 2f | 162 | b 2f |
@@ -177,3 +179,24 @@ ENTRY(printch) | |||
177 | mov r0, #0 | 179 | mov r0, #0 |
178 | b 1b | 180 | b 1b |
179 | ENDPROC(printch) | 181 | ENDPROC(printch) |
182 | |||
183 | #else | ||
184 | |||
185 | ENTRY(printascii) | ||
186 | mov r1, r0 | ||
187 | mov r0, #0x04 @ SYS_WRITE0 | ||
188 | ARM( svc #0x123456 ) | ||
189 | THUMB( svc #0xab ) | ||
190 | mov pc, lr | ||
191 | ENDPROC(printascii) | ||
192 | |||
193 | ENTRY(printch) | ||
194 | adr r1, hexbuf | ||
195 | strb r0, [r1] | ||
196 | mov r0, #0x03 @ SYS_WRITEC | ||
197 | ARM( svc #0x123456 ) | ||
198 | THUMB( svc #0xab ) | ||
199 | mov pc, lr | ||
200 | ENDPROC(printch) | ||
201 | |||
202 | #endif | ||
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 4c164ece5891..c32f8456aa09 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c | |||
@@ -42,9 +42,9 @@ | |||
42 | #include <linux/seq_file.h> | 42 | #include <linux/seq_file.h> |
43 | 43 | ||
44 | #include <asm/cacheflush.h> | 44 | #include <asm/cacheflush.h> |
45 | #include <asm/cp15.h> | ||
45 | #include <asm/fiq.h> | 46 | #include <asm/fiq.h> |
46 | #include <asm/irq.h> | 47 | #include <asm/irq.h> |
47 | #include <asm/system.h> | ||
48 | #include <asm/traps.h> | 48 | #include <asm/traps.h> |
49 | 49 | ||
50 | static unsigned long no_fiq_insn; | 50 | static unsigned long no_fiq_insn; |
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index c0062ad1e847..df0bf0c8cb79 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c | |||
@@ -16,10 +16,13 @@ | |||
16 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
17 | 17 | ||
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/opcodes.h> | ||
19 | #include <asm/ftrace.h> | 20 | #include <asm/ftrace.h> |
20 | 21 | ||
22 | #include "insn.h" | ||
23 | |||
21 | #ifdef CONFIG_THUMB2_KERNEL | 24 | #ifdef CONFIG_THUMB2_KERNEL |
22 | #define NOP 0xeb04f85d /* pop.w {lr} */ | 25 | #define NOP 0xf85deb04 /* pop.w {lr} */ |
23 | #else | 26 | #else |
24 | #define NOP 0xe8bd4000 /* pop {lr} */ | 27 | #define NOP 0xe8bd4000 /* pop {lr} */ |
25 | #endif | 28 | #endif |
@@ -60,76 +63,31 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) | |||
60 | } | 63 | } |
61 | #endif | 64 | #endif |
62 | 65 | ||
63 | #ifdef CONFIG_THUMB2_KERNEL | ||
64 | static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr, | ||
65 | bool link) | ||
66 | { | ||
67 | unsigned long s, j1, j2, i1, i2, imm10, imm11; | ||
68 | unsigned long first, second; | ||
69 | long offset; | ||
70 | |||
71 | offset = (long)addr - (long)(pc + 4); | ||
72 | if (offset < -16777216 || offset > 16777214) { | ||
73 | WARN_ON_ONCE(1); | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | s = (offset >> 24) & 0x1; | ||
78 | i1 = (offset >> 23) & 0x1; | ||
79 | i2 = (offset >> 22) & 0x1; | ||
80 | imm10 = (offset >> 12) & 0x3ff; | ||
81 | imm11 = (offset >> 1) & 0x7ff; | ||
82 | |||
83 | j1 = (!i1) ^ s; | ||
84 | j2 = (!i2) ^ s; | ||
85 | |||
86 | first = 0xf000 | (s << 10) | imm10; | ||
87 | second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11; | ||
88 | if (link) | ||
89 | second |= 1 << 14; | ||
90 | |||
91 | return (second << 16) | first; | ||
92 | } | ||
93 | #else | ||
94 | static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr, | ||
95 | bool link) | ||
96 | { | ||
97 | unsigned long opcode = 0xea000000; | ||
98 | long offset; | ||
99 | |||
100 | if (link) | ||
101 | opcode |= 1 << 24; | ||
102 | |||
103 | offset = (long)addr - (long)(pc + 8); | ||
104 | if (unlikely(offset < -33554432 || offset > 33554428)) { | ||
105 | /* Can't generate branches that far (from ARM ARM). Ftrace | ||
106 | * doesn't generate branches outside of kernel text. | ||
107 | */ | ||
108 | WARN_ON_ONCE(1); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | offset = (offset >> 2) & 0x00ffffff; | ||
113 | |||
114 | return opcode | offset; | ||
115 | } | ||
116 | #endif | ||
117 | |||
118 | static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) | 66 | static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) |
119 | { | 67 | { |
120 | return ftrace_gen_branch(pc, addr, true); | 68 | return arm_gen_branch_link(pc, addr); |
121 | } | 69 | } |
122 | 70 | ||
123 | static int ftrace_modify_code(unsigned long pc, unsigned long old, | 71 | static int ftrace_modify_code(unsigned long pc, unsigned long old, |
124 | unsigned long new) | 72 | unsigned long new, bool validate) |
125 | { | 73 | { |
126 | unsigned long replaced; | 74 | unsigned long replaced; |
127 | 75 | ||
128 | if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) | 76 | if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { |
129 | return -EFAULT; | 77 | old = __opcode_to_mem_thumb32(old); |
78 | new = __opcode_to_mem_thumb32(new); | ||
79 | } else { | ||
80 | old = __opcode_to_mem_arm(old); | ||
81 | new = __opcode_to_mem_arm(new); | ||
82 | } | ||
130 | 83 | ||
131 | if (replaced != old) | 84 | if (validate) { |
132 | return -EINVAL; | 85 | if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE)) |
86 | return -EFAULT; | ||
87 | |||
88 | if (replaced != old) | ||
89 | return -EINVAL; | ||
90 | } | ||
133 | 91 | ||
134 | if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE)) | 92 | if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE)) |
135 | return -EPERM; | 93 | return -EPERM; |
@@ -141,23 +99,21 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old, | |||
141 | 99 | ||
142 | int ftrace_update_ftrace_func(ftrace_func_t func) | 100 | int ftrace_update_ftrace_func(ftrace_func_t func) |
143 | { | 101 | { |
144 | unsigned long pc, old; | 102 | unsigned long pc; |
145 | unsigned long new; | 103 | unsigned long new; |
146 | int ret; | 104 | int ret; |
147 | 105 | ||
148 | pc = (unsigned long)&ftrace_call; | 106 | pc = (unsigned long)&ftrace_call; |
149 | memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE); | ||
150 | new = ftrace_call_replace(pc, (unsigned long)func); | 107 | new = ftrace_call_replace(pc, (unsigned long)func); |
151 | 108 | ||
152 | ret = ftrace_modify_code(pc, old, new); | 109 | ret = ftrace_modify_code(pc, 0, new, false); |
153 | 110 | ||
154 | #ifdef CONFIG_OLD_MCOUNT | 111 | #ifdef CONFIG_OLD_MCOUNT |
155 | if (!ret) { | 112 | if (!ret) { |
156 | pc = (unsigned long)&ftrace_call_old; | 113 | pc = (unsigned long)&ftrace_call_old; |
157 | memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE); | ||
158 | new = ftrace_call_replace(pc, (unsigned long)func); | 114 | new = ftrace_call_replace(pc, (unsigned long)func); |
159 | 115 | ||
160 | ret = ftrace_modify_code(pc, old, new); | 116 | ret = ftrace_modify_code(pc, 0, new, false); |
161 | } | 117 | } |
162 | #endif | 118 | #endif |
163 | 119 | ||
@@ -172,7 +128,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
172 | old = ftrace_nop_replace(rec); | 128 | old = ftrace_nop_replace(rec); |
173 | new = ftrace_call_replace(ip, adjust_address(rec, addr)); | 129 | new = ftrace_call_replace(ip, adjust_address(rec, addr)); |
174 | 130 | ||
175 | return ftrace_modify_code(rec->ip, old, new); | 131 | return ftrace_modify_code(rec->ip, old, new, true); |
176 | } | 132 | } |
177 | 133 | ||
178 | int ftrace_make_nop(struct module *mod, | 134 | int ftrace_make_nop(struct module *mod, |
@@ -185,7 +141,7 @@ int ftrace_make_nop(struct module *mod, | |||
185 | 141 | ||
186 | old = ftrace_call_replace(ip, adjust_address(rec, addr)); | 142 | old = ftrace_call_replace(ip, adjust_address(rec, addr)); |
187 | new = ftrace_nop_replace(rec); | 143 | new = ftrace_nop_replace(rec); |
188 | ret = ftrace_modify_code(ip, old, new); | 144 | ret = ftrace_modify_code(ip, old, new, true); |
189 | 145 | ||
190 | #ifdef CONFIG_OLD_MCOUNT | 146 | #ifdef CONFIG_OLD_MCOUNT |
191 | if (ret == -EINVAL && addr == MCOUNT_ADDR) { | 147 | if (ret == -EINVAL && addr == MCOUNT_ADDR) { |
@@ -193,7 +149,7 @@ int ftrace_make_nop(struct module *mod, | |||
193 | 149 | ||
194 | old = ftrace_call_replace(ip, adjust_address(rec, addr)); | 150 | old = ftrace_call_replace(ip, adjust_address(rec, addr)); |
195 | new = ftrace_nop_replace(rec); | 151 | new = ftrace_nop_replace(rec); |
196 | ret = ftrace_modify_code(ip, old, new); | 152 | ret = ftrace_modify_code(ip, old, new, true); |
197 | } | 153 | } |
198 | #endif | 154 | #endif |
199 | 155 | ||
@@ -249,12 +205,12 @@ static int __ftrace_modify_caller(unsigned long *callsite, | |||
249 | { | 205 | { |
250 | unsigned long caller_fn = (unsigned long) func; | 206 | unsigned long caller_fn = (unsigned long) func; |
251 | unsigned long pc = (unsigned long) callsite; | 207 | unsigned long pc = (unsigned long) callsite; |
252 | unsigned long branch = ftrace_gen_branch(pc, caller_fn, false); | 208 | unsigned long branch = arm_gen_branch(pc, caller_fn); |
253 | unsigned long nop = 0xe1a00000; /* mov r0, r0 */ | 209 | unsigned long nop = 0xe1a00000; /* mov r0, r0 */ |
254 | unsigned long old = enable ? nop : branch; | 210 | unsigned long old = enable ? nop : branch; |
255 | unsigned long new = enable ? branch : nop; | 211 | unsigned long new = enable ? branch : nop; |
256 | 212 | ||
257 | return ftrace_modify_code(pc, old, new); | 213 | return ftrace_modify_code(pc, old, new, true); |
258 | } | 214 | } |
259 | 215 | ||
260 | static int ftrace_modify_graph_caller(bool enable) | 216 | static int ftrace_modify_graph_caller(bool enable) |
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S index d46f25968bec..278cfc144f44 100644 --- a/arch/arm/kernel/head-nommu.S +++ b/arch/arm/kernel/head-nommu.S | |||
@@ -17,8 +17,8 @@ | |||
17 | #include <asm/assembler.h> | 17 | #include <asm/assembler.h> |
18 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
19 | #include <asm/asm-offsets.h> | 19 | #include <asm/asm-offsets.h> |
20 | #include <asm/cp15.h> | ||
20 | #include <asm/thread_info.h> | 21 | #include <asm/thread_info.h> |
21 | #include <asm/system.h> | ||
22 | 22 | ||
23 | /* | 23 | /* |
24 | * Kernel startup entry point. | 24 | * Kernel startup entry point. |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 6d5791144066..3bf0c7f8b043 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -15,12 +15,12 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | 16 | ||
17 | #include <asm/assembler.h> | 17 | #include <asm/assembler.h> |
18 | #include <asm/cp15.h> | ||
18 | #include <asm/domain.h> | 19 | #include <asm/domain.h> |
19 | #include <asm/ptrace.h> | 20 | #include <asm/ptrace.h> |
20 | #include <asm/asm-offsets.h> | 21 | #include <asm/asm-offsets.h> |
21 | #include <asm/memory.h> | 22 | #include <asm/memory.h> |
22 | #include <asm/thread_info.h> | 23 | #include <asm/thread_info.h> |
23 | #include <asm/system.h> | ||
24 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
25 | 25 | ||
26 | #ifdef CONFIG_DEBUG_LL | 26 | #ifdef CONFIG_DEBUG_LL |
@@ -265,7 +265,7 @@ __create_page_tables: | |||
265 | str r6, [r3] | 265 | str r6, [r3] |
266 | 266 | ||
267 | #ifdef CONFIG_DEBUG_LL | 267 | #ifdef CONFIG_DEBUG_LL |
268 | #ifndef CONFIG_DEBUG_ICEDCC | 268 | #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) |
269 | /* | 269 | /* |
270 | * Map in IO space for serial debugging. | 270 | * Map in IO space for serial debugging. |
271 | * This allows debug messages to be output | 271 | * This allows debug messages to be output |
@@ -297,10 +297,10 @@ __create_page_tables: | |||
297 | cmp r0, r6 | 297 | cmp r0, r6 |
298 | blo 1b | 298 | blo 1b |
299 | 299 | ||
300 | #else /* CONFIG_DEBUG_ICEDCC */ | 300 | #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ |
301 | /* we don't need any serial debugging mappings for ICEDCC */ | 301 | /* we don't need any serial debugging mappings */ |
302 | ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags | 302 | ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags |
303 | #endif /* !CONFIG_DEBUG_ICEDCC */ | 303 | #endif |
304 | 304 | ||
305 | #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) | 305 | #if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) |
306 | /* | 306 | /* |
diff --git a/arch/arm/kernel/insn.c b/arch/arm/kernel/insn.c new file mode 100644 index 000000000000..ab312e516546 --- /dev/null +++ b/arch/arm/kernel/insn.c | |||
@@ -0,0 +1,61 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <asm/opcodes.h> | ||
3 | |||
4 | static unsigned long | ||
5 | __arm_gen_branch_thumb2(unsigned long pc, unsigned long addr, bool link) | ||
6 | { | ||
7 | unsigned long s, j1, j2, i1, i2, imm10, imm11; | ||
8 | unsigned long first, second; | ||
9 | long offset; | ||
10 | |||
11 | offset = (long)addr - (long)(pc + 4); | ||
12 | if (offset < -16777216 || offset > 16777214) { | ||
13 | WARN_ON_ONCE(1); | ||
14 | return 0; | ||
15 | } | ||
16 | |||
17 | s = (offset >> 24) & 0x1; | ||
18 | i1 = (offset >> 23) & 0x1; | ||
19 | i2 = (offset >> 22) & 0x1; | ||
20 | imm10 = (offset >> 12) & 0x3ff; | ||
21 | imm11 = (offset >> 1) & 0x7ff; | ||
22 | |||
23 | j1 = (!i1) ^ s; | ||
24 | j2 = (!i2) ^ s; | ||
25 | |||
26 | first = 0xf000 | (s << 10) | imm10; | ||
27 | second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11; | ||
28 | if (link) | ||
29 | second |= 1 << 14; | ||
30 | |||
31 | return __opcode_thumb32_compose(first, second); | ||
32 | } | ||
33 | |||
34 | static unsigned long | ||
35 | __arm_gen_branch_arm(unsigned long pc, unsigned long addr, bool link) | ||
36 | { | ||
37 | unsigned long opcode = 0xea000000; | ||
38 | long offset; | ||
39 | |||
40 | if (link) | ||
41 | opcode |= 1 << 24; | ||
42 | |||
43 | offset = (long)addr - (long)(pc + 8); | ||
44 | if (unlikely(offset < -33554432 || offset > 33554428)) { | ||
45 | WARN_ON_ONCE(1); | ||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | offset = (offset >> 2) & 0x00ffffff; | ||
50 | |||
51 | return opcode | offset; | ||
52 | } | ||
53 | |||
54 | unsigned long | ||
55 | __arm_gen_branch(unsigned long pc, unsigned long addr, bool link) | ||
56 | { | ||
57 | if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) | ||
58 | return __arm_gen_branch_thumb2(pc, addr, link); | ||
59 | else | ||
60 | return __arm_gen_branch_arm(pc, addr, link); | ||
61 | } | ||
diff --git a/arch/arm/kernel/insn.h b/arch/arm/kernel/insn.h new file mode 100644 index 000000000000..e96065da4dae --- /dev/null +++ b/arch/arm/kernel/insn.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef __ASM_ARM_INSN_H | ||
2 | #define __ASM_ARM_INSN_H | ||
3 | |||
4 | static inline unsigned long | ||
5 | arm_gen_nop(void) | ||
6 | { | ||
7 | #ifdef CONFIG_THUMB2_KERNEL | ||
8 | return 0xf3af8000; /* nop.w */ | ||
9 | #else | ||
10 | return 0xe1a00000; /* mov r0, r0 */ | ||
11 | #endif | ||
12 | } | ||
13 | |||
14 | unsigned long | ||
15 | __arm_gen_branch(unsigned long pc, unsigned long addr, bool link); | ||
16 | |||
17 | static inline unsigned long | ||
18 | arm_gen_branch(unsigned long pc, unsigned long addr) | ||
19 | { | ||
20 | return __arm_gen_branch(pc, addr, false); | ||
21 | } | ||
22 | |||
23 | static inline unsigned long | ||
24 | arm_gen_branch_link(unsigned long pc, unsigned long addr) | ||
25 | { | ||
26 | return __arm_gen_branch(pc, addr, true); | ||
27 | } | ||
28 | |||
29 | #endif | ||
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 3efd82cc95f0..3f86ee11f54e 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -181,10 +181,7 @@ void migrate_irqs(void) | |||
181 | local_irq_save(flags); | 181 | local_irq_save(flags); |
182 | 182 | ||
183 | for_each_irq_desc(i, desc) { | 183 | for_each_irq_desc(i, desc) { |
184 | bool affinity_broken = false; | 184 | bool affinity_broken; |
185 | |||
186 | if (!desc) | ||
187 | continue; | ||
188 | 185 | ||
189 | raw_spin_lock(&desc->lock); | 186 | raw_spin_lock(&desc->lock); |
190 | affinity_broken = migrate_one_irq(desc); | 187 | affinity_broken = migrate_one_irq(desc); |
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c new file mode 100644 index 000000000000..4ce4f789446d --- /dev/null +++ b/arch/arm/kernel/jump_label.c | |||
@@ -0,0 +1,39 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/jump_label.h> | ||
3 | |||
4 | #include "insn.h" | ||
5 | #include "patch.h" | ||
6 | |||
7 | #ifdef HAVE_JUMP_LABEL | ||
8 | |||
9 | static void __arch_jump_label_transform(struct jump_entry *entry, | ||
10 | enum jump_label_type type, | ||
11 | bool is_static) | ||
12 | { | ||
13 | void *addr = (void *)entry->code; | ||
14 | unsigned int insn; | ||
15 | |||
16 | if (type == JUMP_LABEL_ENABLE) | ||
17 | insn = arm_gen_branch(entry->code, entry->target); | ||
18 | else | ||
19 | insn = arm_gen_nop(); | ||
20 | |||
21 | if (is_static) | ||
22 | __patch_text(addr, insn); | ||
23 | else | ||
24 | patch_text(addr, insn); | ||
25 | } | ||
26 | |||
27 | void arch_jump_label_transform(struct jump_entry *entry, | ||
28 | enum jump_label_type type) | ||
29 | { | ||
30 | __arch_jump_label_transform(entry, type, false); | ||
31 | } | ||
32 | |||
33 | void arch_jump_label_transform_static(struct jump_entry *entry, | ||
34 | enum jump_label_type type) | ||
35 | { | ||
36 | __arch_jump_label_transform(entry, type, true); | ||
37 | } | ||
38 | |||
39 | #endif | ||
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 129c1163248b..ab1869dac97a 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
30 | 30 | ||
31 | #include "kprobes.h" | 31 | #include "kprobes.h" |
32 | #include "patch.h" | ||
32 | 33 | ||
33 | #define MIN_STACK_SIZE(addr) \ | 34 | #define MIN_STACK_SIZE(addr) \ |
34 | min((unsigned long)MAX_STACK_SIZE, \ | 35 | min((unsigned long)MAX_STACK_SIZE, \ |
@@ -103,57 +104,33 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
103 | return 0; | 104 | return 0; |
104 | } | 105 | } |
105 | 106 | ||
106 | #ifdef CONFIG_THUMB2_KERNEL | ||
107 | |||
108 | /* | ||
109 | * For a 32-bit Thumb breakpoint spanning two memory words we need to take | ||
110 | * special precautions to insert the breakpoint atomically, especially on SMP | ||
111 | * systems. This is achieved by calling this arming function using stop_machine. | ||
112 | */ | ||
113 | static int __kprobes set_t32_breakpoint(void *addr) | ||
114 | { | ||
115 | ((u16 *)addr)[0] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION >> 16; | ||
116 | ((u16 *)addr)[1] = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION & 0xffff; | ||
117 | flush_insns(addr, 2*sizeof(u16)); | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | void __kprobes arch_arm_kprobe(struct kprobe *p) | 107 | void __kprobes arch_arm_kprobe(struct kprobe *p) |
122 | { | 108 | { |
123 | uintptr_t addr = (uintptr_t)p->addr & ~1; /* Remove any Thumb flag */ | 109 | unsigned int brkp; |
124 | 110 | void *addr; | |
125 | if (!is_wide_instruction(p->opcode)) { | 111 | |
126 | *(u16 *)addr = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION; | 112 | if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) { |
127 | flush_insns(addr, sizeof(u16)); | 113 | /* Remove any Thumb flag */ |
128 | } else if (addr & 2) { | 114 | addr = (void *)((uintptr_t)p->addr & ~1); |
129 | /* A 32-bit instruction spanning two words needs special care */ | 115 | |
130 | stop_machine(set_t32_breakpoint, (void *)addr, &cpu_online_map); | 116 | if (is_wide_instruction(p->opcode)) |
117 | brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; | ||
118 | else | ||
119 | brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION; | ||
131 | } else { | 120 | } else { |
132 | /* Word aligned 32-bit instruction can be written atomically */ | 121 | kprobe_opcode_t insn = p->opcode; |
133 | u32 bkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION; | ||
134 | #ifndef __ARMEB__ /* Swap halfwords for little-endian */ | ||
135 | bkp = (bkp >> 16) | (bkp << 16); | ||
136 | #endif | ||
137 | *(u32 *)addr = bkp; | ||
138 | flush_insns(addr, sizeof(u32)); | ||
139 | } | ||
140 | } | ||
141 | 122 | ||
142 | #else /* !CONFIG_THUMB2_KERNEL */ | 123 | addr = p->addr; |
124 | brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION; | ||
143 | 125 | ||
144 | void __kprobes arch_arm_kprobe(struct kprobe *p) | 126 | if (insn >= 0xe0000000) |
145 | { | 127 | brkp |= 0xe0000000; /* Unconditional instruction */ |
146 | kprobe_opcode_t insn = p->opcode; | 128 | else |
147 | kprobe_opcode_t brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION; | 129 | brkp |= insn & 0xf0000000; /* Copy condition from insn */ |
148 | if (insn >= 0xe0000000) | 130 | } |
149 | brkp |= 0xe0000000; /* Unconditional instruction */ | ||
150 | else | ||
151 | brkp |= insn & 0xf0000000; /* Copy condition from insn */ | ||
152 | *p->addr = brkp; | ||
153 | flush_insns(p->addr, sizeof(p->addr[0])); | ||
154 | } | ||
155 | 131 | ||
156 | #endif /* !CONFIG_THUMB2_KERNEL */ | 132 | patch_text(addr, brkp); |
133 | } | ||
157 | 134 | ||
158 | /* | 135 | /* |
159 | * The actual disarming is done here on each CPU and synchronized using | 136 | * The actual disarming is done here on each CPU and synchronized using |
@@ -166,25 +143,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p) | |||
166 | int __kprobes __arch_disarm_kprobe(void *p) | 143 | int __kprobes __arch_disarm_kprobe(void *p) |
167 | { | 144 | { |
168 | struct kprobe *kp = p; | 145 | struct kprobe *kp = p; |
169 | #ifdef CONFIG_THUMB2_KERNEL | 146 | void *addr = (void *)((uintptr_t)kp->addr & ~1); |
170 | u16 *addr = (u16 *)((uintptr_t)kp->addr & ~1); | ||
171 | kprobe_opcode_t insn = kp->opcode; | ||
172 | unsigned int len; | ||
173 | 147 | ||
174 | if (is_wide_instruction(insn)) { | 148 | __patch_text(addr, kp->opcode); |
175 | ((u16 *)addr)[0] = insn>>16; | ||
176 | ((u16 *)addr)[1] = insn; | ||
177 | len = 2*sizeof(u16); | ||
178 | } else { | ||
179 | ((u16 *)addr)[0] = insn; | ||
180 | len = sizeof(u16); | ||
181 | } | ||
182 | flush_insns(addr, len); | ||
183 | 149 | ||
184 | #else /* !CONFIG_THUMB2_KERNEL */ | ||
185 | *kp->addr = kp->opcode; | ||
186 | flush_insns(kp->addr, sizeof(kp->addr[0])); | ||
187 | #endif | ||
188 | return 0; | 150 | return 0; |
189 | } | 151 | } |
190 | 152 | ||
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 764bd456d84f..a30004476e70 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/delay.h> | 7 | #include <linux/delay.h> |
8 | #include <linux/reboot.h> | 8 | #include <linux/reboot.h> |
9 | #include <linux/io.h> | 9 | #include <linux/io.h> |
10 | #include <linux/irq.h> | ||
10 | #include <asm/pgtable.h> | 11 | #include <asm/pgtable.h> |
11 | #include <asm/pgalloc.h> | 12 | #include <asm/pgalloc.h> |
12 | #include <asm/mmu_context.h> | 13 | #include <asm/mmu_context.h> |
@@ -53,6 +54,29 @@ void machine_crash_nonpanic_core(void *unused) | |||
53 | cpu_relax(); | 54 | cpu_relax(); |
54 | } | 55 | } |
55 | 56 | ||
57 | static void machine_kexec_mask_interrupts(void) | ||
58 | { | ||
59 | unsigned int i; | ||
60 | struct irq_desc *desc; | ||
61 | |||
62 | for_each_irq_desc(i, desc) { | ||
63 | struct irq_chip *chip; | ||
64 | |||
65 | chip = irq_desc_get_chip(desc); | ||
66 | if (!chip) | ||
67 | continue; | ||
68 | |||
69 | if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) | ||
70 | chip->irq_eoi(&desc->irq_data); | ||
71 | |||
72 | if (chip->irq_mask) | ||
73 | chip->irq_mask(&desc->irq_data); | ||
74 | |||
75 | if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) | ||
76 | chip->irq_disable(&desc->irq_data); | ||
77 | } | ||
78 | } | ||
79 | |||
56 | void machine_crash_shutdown(struct pt_regs *regs) | 80 | void machine_crash_shutdown(struct pt_regs *regs) |
57 | { | 81 | { |
58 | unsigned long msecs; | 82 | unsigned long msecs; |
@@ -70,6 +94,7 @@ void machine_crash_shutdown(struct pt_regs *regs) | |||
70 | printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n"); | 94 | printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n"); |
71 | 95 | ||
72 | crash_save_cpu(regs, smp_processor_id()); | 96 | crash_save_cpu(regs, smp_processor_id()); |
97 | machine_kexec_mask_interrupts(); | ||
73 | 98 | ||
74 | printk(KERN_INFO "Loading crashdump kernel...\n"); | 99 | printk(KERN_INFO "Loading crashdump kernel...\n"); |
75 | } | 100 | } |
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c new file mode 100644 index 000000000000..07314af47733 --- /dev/null +++ b/arch/arm/kernel/patch.c | |||
@@ -0,0 +1,75 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/kprobes.h> | ||
3 | #include <linux/stop_machine.h> | ||
4 | |||
5 | #include <asm/cacheflush.h> | ||
6 | #include <asm/smp_plat.h> | ||
7 | #include <asm/opcodes.h> | ||
8 | |||
9 | #include "patch.h" | ||
10 | |||
11 | struct patch { | ||
12 | void *addr; | ||
13 | unsigned int insn; | ||
14 | }; | ||
15 | |||
16 | void __kprobes __patch_text(void *addr, unsigned int insn) | ||
17 | { | ||
18 | bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL); | ||
19 | int size; | ||
20 | |||
21 | if (thumb2 && __opcode_is_thumb16(insn)) { | ||
22 | *(u16 *)addr = __opcode_to_mem_thumb16(insn); | ||
23 | size = sizeof(u16); | ||
24 | } else if (thumb2 && ((uintptr_t)addr & 2)) { | ||
25 | u16 first = __opcode_thumb32_first(insn); | ||
26 | u16 second = __opcode_thumb32_second(insn); | ||
27 | u16 *addrh = addr; | ||
28 | |||
29 | addrh[0] = __opcode_to_mem_thumb16(first); | ||
30 | addrh[1] = __opcode_to_mem_thumb16(second); | ||
31 | |||
32 | size = sizeof(u32); | ||
33 | } else { | ||
34 | if (thumb2) | ||
35 | insn = __opcode_to_mem_thumb32(insn); | ||
36 | else | ||
37 | insn = __opcode_to_mem_arm(insn); | ||
38 | |||
39 | *(u32 *)addr = insn; | ||
40 | size = sizeof(u32); | ||
41 | } | ||
42 | |||
43 | flush_icache_range((uintptr_t)(addr), | ||
44 | (uintptr_t)(addr) + size); | ||
45 | } | ||
46 | |||
47 | static int __kprobes patch_text_stop_machine(void *data) | ||
48 | { | ||
49 | struct patch *patch = data; | ||
50 | |||
51 | __patch_text(patch->addr, patch->insn); | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | void __kprobes patch_text(void *addr, unsigned int insn) | ||
57 | { | ||
58 | struct patch patch = { | ||
59 | .addr = addr, | ||
60 | .insn = insn, | ||
61 | }; | ||
62 | |||
63 | if (cache_ops_need_broadcast()) { | ||
64 | stop_machine(patch_text_stop_machine, &patch, cpu_online_mask); | ||
65 | } else { | ||
66 | bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL) | ||
67 | && __opcode_is_thumb32(insn) | ||
68 | && ((uintptr_t)addr & 2); | ||
69 | |||
70 | if (straddles_word) | ||
71 | stop_machine(patch_text_stop_machine, &patch, NULL); | ||
72 | else | ||
73 | __patch_text(addr, insn); | ||
74 | } | ||
75 | } | ||
diff --git a/arch/arm/kernel/patch.h b/arch/arm/kernel/patch.h new file mode 100644 index 000000000000..b4731f2dac38 --- /dev/null +++ b/arch/arm/kernel/patch.h | |||
@@ -0,0 +1,7 @@ | |||
1 | #ifndef _ARM_KERNEL_PATCH_H | ||
2 | #define _ARM_KERNEL_PATCH_H | ||
3 | |||
4 | void patch_text(void *addr, unsigned int insn); | ||
5 | void __patch_text(void *addr, unsigned int insn); | ||
6 | |||
7 | #endif | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 5bb91bf3d47f..ab59c3bae5e8 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -712,6 +712,9 @@ init_hw_perf_events(void) | |||
712 | case 0xC0F0: /* Cortex-A15 */ | 712 | case 0xC0F0: /* Cortex-A15 */ |
713 | cpu_pmu = armv7_a15_pmu_init(); | 713 | cpu_pmu = armv7_a15_pmu_init(); |
714 | break; | 714 | break; |
715 | case 0xC070: /* Cortex-A7 */ | ||
716 | cpu_pmu = armv7_a7_pmu_init(); | ||
717 | break; | ||
715 | } | 718 | } |
716 | /* Intel CPUs [xscale]. */ | 719 | /* Intel CPUs [xscale]. */ |
717 | } else if (0x69 == implementor) { | 720 | } else if (0x69 == implementor) { |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 460bbbb6b885..b2b764ec011b 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -582,6 +582,130 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
582 | }; | 582 | }; |
583 | 583 | ||
584 | /* | 584 | /* |
585 | * Cortex-A7 HW events mapping | ||
586 | */ | ||
587 | static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = { | ||
588 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
589 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | ||
590 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | ||
591 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | ||
592 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
593 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
594 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, | ||
595 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, | ||
596 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, | ||
597 | }; | ||
598 | |||
599 | static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
600 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
601 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
602 | [C(L1D)] = { | ||
603 | /* | ||
604 | * The performance counters don't differentiate between read | ||
605 | * and write accesses/misses so this isn't strictly correct, | ||
606 | * but it's the best we can do. Writes and reads get | ||
607 | * combined. | ||
608 | */ | ||
609 | [C(OP_READ)] = { | ||
610 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | ||
611 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | ||
612 | }, | ||
613 | [C(OP_WRITE)] = { | ||
614 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | ||
615 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | ||
616 | }, | ||
617 | [C(OP_PREFETCH)] = { | ||
618 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
619 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
620 | }, | ||
621 | }, | ||
622 | [C(L1I)] = { | ||
623 | [C(OP_READ)] = { | ||
624 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | ||
625 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | ||
626 | }, | ||
627 | [C(OP_WRITE)] = { | ||
628 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, | ||
629 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, | ||
630 | }, | ||
631 | [C(OP_PREFETCH)] = { | ||
632 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
633 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
634 | }, | ||
635 | }, | ||
636 | [C(LL)] = { | ||
637 | [C(OP_READ)] = { | ||
638 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS, | ||
639 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, | ||
640 | }, | ||
641 | [C(OP_WRITE)] = { | ||
642 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS, | ||
643 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL, | ||
644 | }, | ||
645 | [C(OP_PREFETCH)] = { | ||
646 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
647 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
648 | }, | ||
649 | }, | ||
650 | [C(DTLB)] = { | ||
651 | [C(OP_READ)] = { | ||
652 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
653 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
654 | }, | ||
655 | [C(OP_WRITE)] = { | ||
656 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
657 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
658 | }, | ||
659 | [C(OP_PREFETCH)] = { | ||
660 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
661 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
662 | }, | ||
663 | }, | ||
664 | [C(ITLB)] = { | ||
665 | [C(OP_READ)] = { | ||
666 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
667 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | ||
668 | }, | ||
669 | [C(OP_WRITE)] = { | ||
670 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
671 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL, | ||
672 | }, | ||
673 | [C(OP_PREFETCH)] = { | ||
674 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
675 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
676 | }, | ||
677 | }, | ||
678 | [C(BPU)] = { | ||
679 | [C(OP_READ)] = { | ||
680 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | ||
681 | [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
682 | }, | ||
683 | [C(OP_WRITE)] = { | ||
684 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | ||
685 | [C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
686 | }, | ||
687 | [C(OP_PREFETCH)] = { | ||
688 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
689 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
690 | }, | ||
691 | }, | ||
692 | [C(NODE)] = { | ||
693 | [C(OP_READ)] = { | ||
694 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
695 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
696 | }, | ||
697 | [C(OP_WRITE)] = { | ||
698 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
699 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
700 | }, | ||
701 | [C(OP_PREFETCH)] = { | ||
702 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
703 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
704 | }, | ||
705 | }, | ||
706 | }; | ||
707 | |||
708 | /* | ||
585 | * Perf Events' indices | 709 | * Perf Events' indices |
586 | */ | 710 | */ |
587 | #define ARMV7_IDX_CYCLE_COUNTER 0 | 711 | #define ARMV7_IDX_CYCLE_COUNTER 0 |
@@ -1067,6 +1191,12 @@ static int armv7_a15_map_event(struct perf_event *event) | |||
1067 | &armv7_a15_perf_cache_map, 0xFF); | 1191 | &armv7_a15_perf_cache_map, 0xFF); |
1068 | } | 1192 | } |
1069 | 1193 | ||
1194 | static int armv7_a7_map_event(struct perf_event *event) | ||
1195 | { | ||
1196 | return map_cpu_event(event, &armv7_a7_perf_map, | ||
1197 | &armv7_a7_perf_cache_map, 0xFF); | ||
1198 | } | ||
1199 | |||
1070 | static struct arm_pmu armv7pmu = { | 1200 | static struct arm_pmu armv7pmu = { |
1071 | .handle_irq = armv7pmu_handle_irq, | 1201 | .handle_irq = armv7pmu_handle_irq, |
1072 | .enable = armv7pmu_enable_event, | 1202 | .enable = armv7pmu_enable_event, |
@@ -1127,6 +1257,16 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void) | |||
1127 | armv7pmu.set_event_filter = armv7pmu_set_event_filter; | 1257 | armv7pmu.set_event_filter = armv7pmu_set_event_filter; |
1128 | return &armv7pmu; | 1258 | return &armv7pmu; |
1129 | } | 1259 | } |
1260 | |||
1261 | static struct arm_pmu *__init armv7_a7_pmu_init(void) | ||
1262 | { | ||
1263 | armv7pmu.id = ARM_PERF_PMU_ID_CA7; | ||
1264 | armv7pmu.name = "ARMv7 Cortex-A7"; | ||
1265 | armv7pmu.map_event = armv7_a7_map_event; | ||
1266 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | ||
1267 | armv7pmu.set_event_filter = armv7pmu_set_event_filter; | ||
1268 | return &armv7pmu; | ||
1269 | } | ||
1130 | #else | 1270 | #else |
1131 | static struct arm_pmu *__init armv7_a8_pmu_init(void) | 1271 | static struct arm_pmu *__init armv7_a8_pmu_init(void) |
1132 | { | 1272 | { |
@@ -1147,4 +1287,9 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void) | |||
1147 | { | 1287 | { |
1148 | return NULL; | 1288 | return NULL; |
1149 | } | 1289 | } |
1290 | |||
1291 | static struct arm_pmu *__init armv7_a7_pmu_init(void) | ||
1292 | { | ||
1293 | return NULL; | ||
1294 | } | ||
1150 | #endif /* CONFIG_CPU_V7 */ | 1295 | #endif /* CONFIG_CPU_V7 */ |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 971d65c253a9..e11b523db332 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -526,22 +526,40 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) | |||
526 | #ifdef CONFIG_MMU | 526 | #ifdef CONFIG_MMU |
527 | /* | 527 | /* |
528 | * The vectors page is always readable from user space for the | 528 | * The vectors page is always readable from user space for the |
529 | * atomic helpers and the signal restart code. Let's declare a mapping | 529 | * atomic helpers and the signal restart code. Insert it into the |
530 | * for it so it is visible through ptrace and /proc/<pid>/mem. | 530 | * gate_vma so that it is visible through ptrace and /proc/<pid>/mem. |
531 | */ | 531 | */ |
532 | static struct vm_area_struct gate_vma; | ||
532 | 533 | ||
533 | int vectors_user_mapping(void) | 534 | static int __init gate_vma_init(void) |
534 | { | 535 | { |
535 | struct mm_struct *mm = current->mm; | 536 | gate_vma.vm_start = 0xffff0000; |
536 | return install_special_mapping(mm, 0xffff0000, PAGE_SIZE, | 537 | gate_vma.vm_end = 0xffff0000 + PAGE_SIZE; |
537 | VM_READ | VM_EXEC | | 538 | gate_vma.vm_page_prot = PAGE_READONLY_EXEC; |
538 | VM_MAYREAD | VM_MAYEXEC | | 539 | gate_vma.vm_flags = VM_READ | VM_EXEC | |
539 | VM_ALWAYSDUMP | VM_RESERVED, | 540 | VM_MAYREAD | VM_MAYEXEC | |
540 | NULL); | 541 | VM_ALWAYSDUMP; |
542 | return 0; | ||
543 | } | ||
544 | arch_initcall(gate_vma_init); | ||
545 | |||
546 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) | ||
547 | { | ||
548 | return &gate_vma; | ||
549 | } | ||
550 | |||
551 | int in_gate_area(struct mm_struct *mm, unsigned long addr) | ||
552 | { | ||
553 | return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end); | ||
554 | } | ||
555 | |||
556 | int in_gate_area_no_mm(unsigned long addr) | ||
557 | { | ||
558 | return in_gate_area(NULL, addr); | ||
541 | } | 559 | } |
542 | 560 | ||
543 | const char *arch_vma_name(struct vm_area_struct *vma) | 561 | const char *arch_vma_name(struct vm_area_struct *vma) |
544 | { | 562 | { |
545 | return (vma->vm_start == 0xffff0000) ? "[vectors]" : NULL; | 563 | return (vma == &gate_vma) ? "[vectors]" : NULL; |
546 | } | 564 | } |
547 | #endif | 565 | #endif |
diff --git a/arch/arm/kernel/sched_clock.c b/arch/arm/kernel/sched_clock.c index 5416c7c12528..27d186abbc06 100644 --- a/arch/arm/kernel/sched_clock.c +++ b/arch/arm/kernel/sched_clock.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/jiffies.h> | 10 | #include <linux/jiffies.h> |
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/syscore_ops.h> | ||
13 | #include <linux/timer.h> | 14 | #include <linux/timer.h> |
14 | 15 | ||
15 | #include <asm/sched_clock.h> | 16 | #include <asm/sched_clock.h> |
@@ -164,3 +165,20 @@ void __init sched_clock_postinit(void) | |||
164 | 165 | ||
165 | sched_clock_poll(sched_clock_timer.data); | 166 | sched_clock_poll(sched_clock_timer.data); |
166 | } | 167 | } |
168 | |||
169 | static int sched_clock_suspend(void) | ||
170 | { | ||
171 | sched_clock_poll(sched_clock_timer.data); | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | static struct syscore_ops sched_clock_ops = { | ||
176 | .suspend = sched_clock_suspend, | ||
177 | }; | ||
178 | |||
179 | static int __init sched_clock_syscore_init(void) | ||
180 | { | ||
181 | register_syscore_ops(&sched_clock_ops); | ||
182 | return 0; | ||
183 | } | ||
184 | device_initcall(sched_clock_syscore_init); | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 129fbd55bde8..e5c37fc1d1d6 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/sort.h> | 34 | #include <linux/sort.h> |
35 | 35 | ||
36 | #include <asm/unified.h> | 36 | #include <asm/unified.h> |
37 | #include <asm/cp15.h> | ||
37 | #include <asm/cpu.h> | 38 | #include <asm/cpu.h> |
38 | #include <asm/cputype.h> | 39 | #include <asm/cputype.h> |
39 | #include <asm/elf.h> | 40 | #include <asm/elf.h> |
@@ -45,7 +46,6 @@ | |||
45 | #include <asm/cacheflush.h> | 46 | #include <asm/cacheflush.h> |
46 | #include <asm/cachetype.h> | 47 | #include <asm/cachetype.h> |
47 | #include <asm/tlbflush.h> | 48 | #include <asm/tlbflush.h> |
48 | #include <asm/system.h> | ||
49 | 49 | ||
50 | #include <asm/prom.h> | 50 | #include <asm/prom.h> |
51 | #include <asm/mach/arch.h> | 51 | #include <asm/mach/arch.h> |
@@ -961,7 +961,6 @@ void __init setup_arch(char **cmdline_p) | |||
961 | conswitchp = &dummy_con; | 961 | conswitchp = &dummy_con; |
962 | #endif | 962 | #endif |
963 | #endif | 963 | #endif |
964 | early_trap_init(); | ||
965 | 964 | ||
966 | if (mdesc->init_early) | 965 | if (mdesc->init_early) |
967 | mdesc->init_early(); | 966 | mdesc->init_early(); |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 0340224cf73c..d13e61ac18da 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -66,12 +66,13 @@ const unsigned long syscall_restart_code[2] = { | |||
66 | */ | 66 | */ |
67 | asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) | 67 | asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) |
68 | { | 68 | { |
69 | mask &= _BLOCKABLE; | 69 | sigset_t blocked; |
70 | spin_lock_irq(¤t->sighand->siglock); | 70 | |
71 | current->saved_sigmask = current->blocked; | 71 | current->saved_sigmask = current->blocked; |
72 | siginitset(¤t->blocked, mask); | 72 | |
73 | recalc_sigpending(); | 73 | mask &= _BLOCKABLE; |
74 | spin_unlock_irq(¤t->sighand->siglock); | 74 | siginitset(&blocked, mask); |
75 | set_current_blocked(&blocked); | ||
75 | 76 | ||
76 | current->state = TASK_INTERRUPTIBLE; | 77 | current->state = TASK_INTERRUPTIBLE; |
77 | schedule(); | 78 | schedule(); |
@@ -281,10 +282,7 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) | |||
281 | err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); | 282 | err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); |
282 | if (err == 0) { | 283 | if (err == 0) { |
283 | sigdelsetmask(&set, ~_BLOCKABLE); | 284 | sigdelsetmask(&set, ~_BLOCKABLE); |
284 | spin_lock_irq(¤t->sighand->siglock); | 285 | set_current_blocked(&set); |
285 | current->blocked = set; | ||
286 | recalc_sigpending(); | ||
287 | spin_unlock_irq(¤t->sighand->siglock); | ||
288 | } | 286 | } |
289 | 287 | ||
290 | __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); | 288 | __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); |
@@ -637,13 +635,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
637 | /* | 635 | /* |
638 | * Block the signal if we were successful. | 636 | * Block the signal if we were successful. |
639 | */ | 637 | */ |
640 | spin_lock_irq(&tsk->sighand->siglock); | 638 | block_sigmask(ka, sig); |
641 | sigorsets(&tsk->blocked, &tsk->blocked, | ||
642 | &ka->sa.sa_mask); | ||
643 | if (!(ka->sa.sa_flags & SA_NODEFER)) | ||
644 | sigaddset(&tsk->blocked, sig); | ||
645 | recalc_sigpending(); | ||
646 | spin_unlock_irq(&tsk->sighand->siglock); | ||
647 | 639 | ||
648 | return 0; | 640 | return 0; |
649 | } | 641 | } |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 57db122a4f62..2b26dca2168b 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -58,6 +58,8 @@ enum ipi_msg_type { | |||
58 | IPI_CPU_STOP, | 58 | IPI_CPU_STOP, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | static DECLARE_COMPLETION(cpu_running); | ||
62 | |||
61 | int __cpuinit __cpu_up(unsigned int cpu) | 63 | int __cpuinit __cpu_up(unsigned int cpu) |
62 | { | 64 | { |
63 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); | 65 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
@@ -98,20 +100,12 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
98 | */ | 100 | */ |
99 | ret = boot_secondary(cpu, idle); | 101 | ret = boot_secondary(cpu, idle); |
100 | if (ret == 0) { | 102 | if (ret == 0) { |
101 | unsigned long timeout; | ||
102 | |||
103 | /* | 103 | /* |
104 | * CPU was successfully started, wait for it | 104 | * CPU was successfully started, wait for it |
105 | * to come online or time out. | 105 | * to come online or time out. |
106 | */ | 106 | */ |
107 | timeout = jiffies + HZ; | 107 | wait_for_completion_timeout(&cpu_running, |
108 | while (time_before(jiffies, timeout)) { | 108 | msecs_to_jiffies(1000)); |
109 | if (cpu_online(cpu)) | ||
110 | break; | ||
111 | |||
112 | udelay(10); | ||
113 | barrier(); | ||
114 | } | ||
115 | 109 | ||
116 | if (!cpu_online(cpu)) { | 110 | if (!cpu_online(cpu)) { |
117 | pr_crit("CPU%u: failed to come online\n", cpu); | 111 | pr_crit("CPU%u: failed to come online\n", cpu); |
@@ -300,9 +294,10 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
300 | /* | 294 | /* |
301 | * OK, now it's safe to let the boot CPU continue. Wait for | 295 | * OK, now it's safe to let the boot CPU continue. Wait for |
302 | * the CPU migration code to notice that the CPU is online | 296 | * the CPU migration code to notice that the CPU is online |
303 | * before we continue. | 297 | * before we continue - which happens after __cpu_up returns. |
304 | */ | 298 | */ |
305 | set_cpu_online(cpu, true); | 299 | set_cpu_online(cpu, true); |
300 | complete(&cpu_running); | ||
306 | 301 | ||
307 | /* | 302 | /* |
308 | * Setup the percpu timer for this CPU. | 303 | * Setup the percpu timer for this CPU. |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 99a572702509..23377a3bf50d 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -227,6 +227,11 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) | |||
227 | #else | 227 | #else |
228 | #define S_SMP "" | 228 | #define S_SMP "" |
229 | #endif | 229 | #endif |
230 | #ifdef CONFIG_THUMB2_KERNEL | ||
231 | #define S_ISA " THUMB2" | ||
232 | #else | ||
233 | #define S_ISA " ARM" | ||
234 | #endif | ||
230 | 235 | ||
231 | static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) | 236 | static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) |
232 | { | 237 | { |
@@ -234,8 +239,8 @@ static int __die(const char *str, int err, struct thread_info *thread, struct pt | |||
234 | static int die_counter; | 239 | static int die_counter; |
235 | int ret; | 240 | int ret; |
236 | 241 | ||
237 | printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", | 242 | printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP |
238 | str, err, ++die_counter); | 243 | S_ISA "\n", str, err, ++die_counter); |
239 | 244 | ||
240 | /* trap and error numbers are mostly meaningless on ARM */ | 245 | /* trap and error numbers are mostly meaningless on ARM */ |
241 | ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); | 246 | ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); |
@@ -781,18 +786,16 @@ static void __init kuser_get_tls_init(unsigned long vectors) | |||
781 | memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); | 786 | memcpy((void *)vectors + 0xfe0, (void *)vectors + 0xfe8, 4); |
782 | } | 787 | } |
783 | 788 | ||
784 | void __init early_trap_init(void) | 789 | void __init early_trap_init(void *vectors_base) |
785 | { | 790 | { |
786 | #if defined(CONFIG_CPU_USE_DOMAINS) | 791 | unsigned long vectors = (unsigned long)vectors_base; |
787 | unsigned long vectors = CONFIG_VECTORS_BASE; | ||
788 | #else | ||
789 | unsigned long vectors = (unsigned long)vectors_page; | ||
790 | #endif | ||
791 | extern char __stubs_start[], __stubs_end[]; | 792 | extern char __stubs_start[], __stubs_end[]; |
792 | extern char __vectors_start[], __vectors_end[]; | 793 | extern char __vectors_start[], __vectors_end[]; |
793 | extern char __kuser_helper_start[], __kuser_helper_end[]; | 794 | extern char __kuser_helper_start[], __kuser_helper_end[]; |
794 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; | 795 | int kuser_sz = __kuser_helper_end - __kuser_helper_start; |
795 | 796 | ||
797 | vectors_page = vectors_base; | ||
798 | |||
796 | /* | 799 | /* |
797 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) | 800 | * Copy the vectors, stubs and kuser helpers (in entry-armv.S) |
798 | * into the vector page, mapped at 0xffff0000, and ensure these | 801 | * into the vector page, mapped at 0xffff0000, and ensure these |
diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c index da70e7e39937..c9146fa7f22d 100644 --- a/arch/arm/mach-exynos/hotplug.c +++ b/arch/arm/mach-exynos/hotplug.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/io.h> | 16 | #include <linux/io.h> |
17 | 17 | ||
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/cp15.h> | ||
19 | 20 | ||
20 | #include <mach/regs-pmu.h> | 21 | #include <mach/regs-pmu.h> |
21 | 22 | ||
diff --git a/arch/arm/mach-integrator/Kconfig b/arch/arm/mach-integrator/Kconfig index 350e26636a06..5a6148ab6dac 100644 --- a/arch/arm/mach-integrator/Kconfig +++ b/arch/arm/mach-integrator/Kconfig | |||
@@ -5,6 +5,7 @@ menu "Integrator Options" | |||
5 | config ARCH_INTEGRATOR_AP | 5 | config ARCH_INTEGRATOR_AP |
6 | bool "Support Integrator/AP and Integrator/PP2 platforms" | 6 | bool "Support Integrator/AP and Integrator/PP2 platforms" |
7 | select CLKSRC_MMIO | 7 | select CLKSRC_MMIO |
8 | select HAVE_SCHED_CLOCK | ||
8 | select MIGHT_HAVE_PCI | 9 | select MIGHT_HAVE_PCI |
9 | select SERIAL_AMBA_PL010 | 10 | select SERIAL_AMBA_PL010 |
10 | select SERIAL_AMBA_PL010_CONSOLE | 11 | select SERIAL_AMBA_PL010_CONSOLE |
diff --git a/arch/arm/mach-integrator/core.c b/arch/arm/mach-integrator/core.c index 019f0ab08f66..cba1907c2e1c 100644 --- a/arch/arm/mach-integrator/core.c +++ b/arch/arm/mach-integrator/core.c | |||
@@ -25,8 +25,9 @@ | |||
25 | 25 | ||
26 | #include <mach/hardware.h> | 26 | #include <mach/hardware.h> |
27 | #include <mach/platform.h> | 27 | #include <mach/platform.h> |
28 | #include <asm/irq.h> | ||
29 | #include <mach/cm.h> | 28 | #include <mach/cm.h> |
29 | #include <mach/irqs.h> | ||
30 | |||
30 | #include <asm/system.h> | 31 | #include <asm/system.h> |
31 | #include <asm/leds.h> | 32 | #include <asm/leds.h> |
32 | #include <asm/mach-types.h> | 33 | #include <asm/mach-types.h> |
diff --git a/arch/arm/mach-integrator/include/mach/irqs.h b/arch/arm/mach-integrator/include/mach/irqs.h index 1fbe6d190222..a19a1a2fcf6b 100644 --- a/arch/arm/mach-integrator/include/mach/irqs.h +++ b/arch/arm/mach-integrator/include/mach/irqs.h | |||
@@ -78,5 +78,6 @@ | |||
78 | #define IRQ_SIC_CP_LMINT7 46 | 78 | #define IRQ_SIC_CP_LMINT7 46 |
79 | #define IRQ_SIC_END 46 | 79 | #define IRQ_SIC_END 46 |
80 | 80 | ||
81 | #define NR_IRQS 47 | 81 | #define NR_IRQS_INTEGRATOR_AP 34 |
82 | #define NR_IRQS_INTEGRATOR_CP 47 | ||
82 | 83 | ||
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c index 21a1d6cbef40..871f148ffd72 100644 --- a/arch/arm/mach-integrator/integrator_ap.c +++ b/arch/arm/mach-integrator/integrator_ap.c | |||
@@ -38,12 +38,13 @@ | |||
38 | #include <mach/hardware.h> | 38 | #include <mach/hardware.h> |
39 | #include <mach/platform.h> | 39 | #include <mach/platform.h> |
40 | #include <asm/hardware/arm_timer.h> | 40 | #include <asm/hardware/arm_timer.h> |
41 | #include <asm/irq.h> | ||
42 | #include <asm/setup.h> | 41 | #include <asm/setup.h> |
43 | #include <asm/param.h> /* HZ */ | 42 | #include <asm/param.h> /* HZ */ |
44 | #include <asm/mach-types.h> | 43 | #include <asm/mach-types.h> |
44 | #include <asm/sched_clock.h> | ||
45 | 45 | ||
46 | #include <mach/lm.h> | 46 | #include <mach/lm.h> |
47 | #include <mach/irqs.h> | ||
47 | 48 | ||
48 | #include <asm/mach/arch.h> | 49 | #include <asm/mach/arch.h> |
49 | #include <asm/mach/irq.h> | 50 | #include <asm/mach/irq.h> |
@@ -325,6 +326,11 @@ static void __init ap_init(void) | |||
325 | 326 | ||
326 | static unsigned long timer_reload; | 327 | static unsigned long timer_reload; |
327 | 328 | ||
329 | static u32 notrace integrator_read_sched_clock(void) | ||
330 | { | ||
331 | return -readl((void __iomem *) TIMER2_VA_BASE + TIMER_VALUE); | ||
332 | } | ||
333 | |||
328 | static void integrator_clocksource_init(unsigned long inrate) | 334 | static void integrator_clocksource_init(unsigned long inrate) |
329 | { | 335 | { |
330 | void __iomem *base = (void __iomem *)TIMER2_VA_BASE; | 336 | void __iomem *base = (void __iomem *)TIMER2_VA_BASE; |
@@ -341,6 +347,7 @@ static void integrator_clocksource_init(unsigned long inrate) | |||
341 | 347 | ||
342 | clocksource_mmio_init(base + TIMER_VALUE, "timer2", | 348 | clocksource_mmio_init(base + TIMER_VALUE, "timer2", |
343 | rate, 200, 16, clocksource_mmio_readl_down); | 349 | rate, 200, 16, clocksource_mmio_readl_down); |
350 | setup_sched_clock(integrator_read_sched_clock, 16, rate); | ||
344 | } | 351 | } |
345 | 352 | ||
346 | static void __iomem * const clkevt_base = (void __iomem *)TIMER1_VA_BASE; | 353 | static void __iomem * const clkevt_base = (void __iomem *)TIMER1_VA_BASE; |
@@ -468,6 +475,7 @@ MACHINE_START(INTEGRATOR, "ARM-Integrator") | |||
468 | .atag_offset = 0x100, | 475 | .atag_offset = 0x100, |
469 | .reserve = integrator_reserve, | 476 | .reserve = integrator_reserve, |
470 | .map_io = ap_map_io, | 477 | .map_io = ap_map_io, |
478 | .nr_irqs = NR_IRQS_INTEGRATOR_AP, | ||
471 | .init_early = integrator_init_early, | 479 | .init_early = integrator_init_early, |
472 | .init_irq = ap_init_irq, | 480 | .init_irq = ap_init_irq, |
473 | .timer = &ap_timer, | 481 | .timer = &ap_timer, |
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c index a8b6aa6003f3..38d997dc96f8 100644 --- a/arch/arm/mach-integrator/integrator_cp.c +++ b/arch/arm/mach-integrator/integrator_cp.c | |||
@@ -26,7 +26,6 @@ | |||
26 | 26 | ||
27 | #include <mach/hardware.h> | 27 | #include <mach/hardware.h> |
28 | #include <mach/platform.h> | 28 | #include <mach/platform.h> |
29 | #include <asm/irq.h> | ||
30 | #include <asm/setup.h> | 29 | #include <asm/setup.h> |
31 | #include <asm/mach-types.h> | 30 | #include <asm/mach-types.h> |
32 | #include <asm/hardware/arm_timer.h> | 31 | #include <asm/hardware/arm_timer.h> |
@@ -34,6 +33,7 @@ | |||
34 | 33 | ||
35 | #include <mach/cm.h> | 34 | #include <mach/cm.h> |
36 | #include <mach/lm.h> | 35 | #include <mach/lm.h> |
36 | #include <mach/irqs.h> | ||
37 | 37 | ||
38 | #include <asm/mach/arch.h> | 38 | #include <asm/mach/arch.h> |
39 | #include <asm/mach/irq.h> | 39 | #include <asm/mach/irq.h> |
@@ -495,6 +495,7 @@ MACHINE_START(CINTEGRATOR, "ARM-IntegratorCP") | |||
495 | .atag_offset = 0x100, | 495 | .atag_offset = 0x100, |
496 | .reserve = integrator_reserve, | 496 | .reserve = integrator_reserve, |
497 | .map_io = intcp_map_io, | 497 | .map_io = intcp_map_io, |
498 | .nr_irqs = NR_IRQS_INTEGRATOR_CP, | ||
498 | .init_early = intcp_init_early, | 499 | .init_early = intcp_init_early, |
499 | .init_irq = intcp_init_irq, | 500 | .init_irq = intcp_init_irq, |
500 | .timer = &cp_timer, | 501 | .timer = &cp_timer, |
diff --git a/arch/arm/mach-integrator/pci.c b/arch/arm/mach-integrator/pci.c index 520b6bf81bb1..e15aa43cfd27 100644 --- a/arch/arm/mach-integrator/pci.c +++ b/arch/arm/mach-integrator/pci.c | |||
@@ -26,11 +26,12 @@ | |||
26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | 28 | ||
29 | #include <asm/irq.h> | ||
30 | #include <asm/system.h> | 29 | #include <asm/system.h> |
31 | #include <asm/mach/pci.h> | 30 | #include <asm/mach/pci.h> |
32 | #include <asm/mach-types.h> | 31 | #include <asm/mach-types.h> |
33 | 32 | ||
33 | #include <mach/irqs.h> | ||
34 | |||
34 | /* | 35 | /* |
35 | * A small note about bridges and interrupts. The DECchip 21050 (and | 36 | * A small note about bridges and interrupts. The DECchip 21050 (and |
36 | * later) adheres to the PCI-PCI bridge specification. This says that | 37 | * later) adheres to the PCI-PCI bridge specification. This says that |
diff --git a/arch/arm/mach-integrator/pci_v3.c b/arch/arm/mach-integrator/pci_v3.c index 3c82566acece..65e5896630e4 100644 --- a/arch/arm/mach-integrator/pci_v3.c +++ b/arch/arm/mach-integrator/pci_v3.c | |||
@@ -30,7 +30,8 @@ | |||
30 | 30 | ||
31 | #include <mach/hardware.h> | 31 | #include <mach/hardware.h> |
32 | #include <mach/platform.h> | 32 | #include <mach/platform.h> |
33 | #include <asm/irq.h> | 33 | #include <mach/irqs.h> |
34 | |||
34 | #include <asm/signal.h> | 35 | #include <asm/signal.h> |
35 | #include <asm/system.h> | 36 | #include <asm/system.h> |
36 | #include <asm/mach/pci.h> | 37 | #include <asm/mach/pci.h> |
diff --git a/arch/arm/mach-realview/hotplug.c b/arch/arm/mach-realview/hotplug.c index ac1aed2a8da4..5e64fbf8d0ec 100644 --- a/arch/arm/mach-realview/hotplug.c +++ b/arch/arm/mach-realview/hotplug.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | 14 | ||
15 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
16 | #include <asm/cp15.h> | ||
16 | 17 | ||
17 | extern volatile int pen_release; | 18 | extern volatile int pen_release; |
18 | 19 | ||
diff --git a/arch/arm/mach-tegra/hotplug.c b/arch/arm/mach-tegra/hotplug.c index f3294040d357..d8dc9ddd6d18 100644 --- a/arch/arm/mach-tegra/hotplug.c +++ b/arch/arm/mach-tegra/hotplug.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | 14 | ||
15 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
16 | #include <asm/cp15.h> | ||
16 | 17 | ||
17 | static inline void cpu_enter_lowpower(void) | 18 | static inline void cpu_enter_lowpower(void) |
18 | { | 19 | { |
diff --git a/arch/arm/mach-vexpress/hotplug.c b/arch/arm/mach-vexpress/hotplug.c index 813ee08f96e6..7a05548544c3 100644 --- a/arch/arm/mach-vexpress/hotplug.c +++ b/arch/arm/mach-vexpress/hotplug.c | |||
@@ -13,7 +13,7 @@ | |||
13 | #include <linux/smp.h> | 13 | #include <linux/smp.h> |
14 | 14 | ||
15 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
16 | #include <asm/system.h> | 16 | #include <asm/cp15.h> |
17 | 17 | ||
18 | extern volatile int pen_release; | 18 | extern volatile int pen_release; |
19 | 19 | ||
diff --git a/arch/arm/mach-vexpress/include/mach/io.h b/arch/arm/mach-vexpress/include/mach/io.h index 13522d86685e..0088cd388a84 100644 --- a/arch/arm/mach-vexpress/include/mach/io.h +++ b/arch/arm/mach-vexpress/include/mach/io.h | |||
@@ -20,7 +20,6 @@ | |||
20 | #ifndef __ASM_ARM_ARCH_IO_H | 20 | #ifndef __ASM_ARM_ARCH_IO_H |
21 | #define __ASM_ARM_ARCH_IO_H | 21 | #define __ASM_ARM_ARCH_IO_H |
22 | 22 | ||
23 | #define __io(a) __typesafe_io(a) | ||
24 | #define __mem_pci(a) (a) | 23 | #define __mem_pci(a) (a) |
25 | 24 | ||
26 | #endif | 25 | #endif |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index caf14dc059e5..78459b8a2a1d 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
24 | 24 | ||
25 | #include <asm/system.h> | 25 | #include <asm/cp15.h> |
26 | #include <asm/unaligned.h> | 26 | #include <asm/unaligned.h> |
27 | 27 | ||
28 | #include "fault.h" | 28 | #include "fault.h" |
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index e0b0e7a4ec68..dd3d59122cc3 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/init.h> | 15 | #include <linux/init.h> |
16 | #include <linux/highmem.h> | 16 | #include <linux/highmem.h> |
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/cp15.h> | ||
18 | #include <plat/cache-feroceon-l2.h> | 19 | #include <plat/cache-feroceon-l2.h> |
19 | 20 | ||
20 | /* | 21 | /* |
diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c index 50868651890f..1fbca05fe906 100644 --- a/arch/arm/mm/cache-tauros2.c +++ b/arch/arm/mm/cache-tauros2.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/cp15.h> | ||
19 | #include <asm/hardware/cache-tauros2.h> | 20 | #include <asm/hardware/cache-tauros2.h> |
20 | 21 | ||
21 | 22 | ||
diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c index 5a32020471e3..6c3edeb66e74 100644 --- a/arch/arm/mm/cache-xsc3l2.c +++ b/arch/arm/mm/cache-xsc3l2.c | |||
@@ -18,7 +18,7 @@ | |||
18 | */ | 18 | */ |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
21 | #include <asm/system.h> | 21 | #include <asm/cp15.h> |
22 | #include <asm/cputype.h> | 22 | #include <asm/cputype.h> |
23 | #include <asm/cacheflush.h> | 23 | #include <asm/cacheflush.h> |
24 | 24 | ||
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 7d0a8c230342..6e06180a8bc0 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
@@ -23,10 +23,6 @@ | |||
23 | 23 | ||
24 | #include "mm.h" | 24 | #include "mm.h" |
25 | 25 | ||
26 | /* | ||
27 | * 0xffff8000 to 0xffffffff is reserved for any ARM architecture | ||
28 | * specific hacks for copying pages efficiently. | ||
29 | */ | ||
30 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ | 26 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ |
31 | L_PTE_MT_MINICACHE) | 27 | L_PTE_MT_MINICACHE) |
32 | 28 | ||
@@ -78,10 +74,9 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, | |||
78 | 74 | ||
79 | raw_spin_lock(&minicache_lock); | 75 | raw_spin_lock(&minicache_lock); |
80 | 76 | ||
81 | set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); | 77 | set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); |
82 | flush_tlb_kernel_page(0xffff8000); | ||
83 | 78 | ||
84 | mc_copy_user_page((void *)0xffff8000, kto); | 79 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); |
85 | 80 | ||
86 | raw_spin_unlock(&minicache_lock); | 81 | raw_spin_unlock(&minicache_lock); |
87 | 82 | ||
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 3d9a1552cef6..29c770463e41 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -24,9 +24,6 @@ | |||
24 | #error FIX ME | 24 | #error FIX ME |
25 | #endif | 25 | #endif |
26 | 26 | ||
27 | #define from_address (0xffff8000) | ||
28 | #define to_address (0xffffc000) | ||
29 | |||
30 | static DEFINE_RAW_SPINLOCK(v6_lock); | 27 | static DEFINE_RAW_SPINLOCK(v6_lock); |
31 | 28 | ||
32 | /* | 29 | /* |
@@ -90,14 +87,11 @@ static void v6_copy_user_highpage_aliasing(struct page *to, | |||
90 | */ | 87 | */ |
91 | raw_spin_lock(&v6_lock); | 88 | raw_spin_lock(&v6_lock); |
92 | 89 | ||
93 | set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); | 90 | kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT); |
94 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); | 91 | kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT); |
95 | |||
96 | kfrom = from_address + (offset << PAGE_SHIFT); | ||
97 | kto = to_address + (offset << PAGE_SHIFT); | ||
98 | 92 | ||
99 | flush_tlb_kernel_page(kfrom); | 93 | set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL)); |
100 | flush_tlb_kernel_page(kto); | 94 | set_top_pte(kto, mk_pte(to, PAGE_KERNEL)); |
101 | 95 | ||
102 | copy_page((void *)kto, (void *)kfrom); | 96 | copy_page((void *)kto, (void *)kfrom); |
103 | 97 | ||
@@ -111,8 +105,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to, | |||
111 | */ | 105 | */ |
112 | static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) | 106 | static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) |
113 | { | 107 | { |
114 | unsigned int offset = CACHE_COLOUR(vaddr); | 108 | unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
115 | unsigned long to = to_address + (offset << PAGE_SHIFT); | ||
116 | 109 | ||
117 | /* FIXME: not highmem safe */ | 110 | /* FIXME: not highmem safe */ |
118 | discard_old_kernel_data(page_address(page)); | 111 | discard_old_kernel_data(page_address(page)); |
@@ -123,8 +116,7 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad | |||
123 | */ | 116 | */ |
124 | raw_spin_lock(&v6_lock); | 117 | raw_spin_lock(&v6_lock); |
125 | 118 | ||
126 | set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); | 119 | set_top_pte(to, mk_pte(page, PAGE_KERNEL)); |
127 | flush_tlb_kernel_page(to); | ||
128 | clear_page((void *)to); | 120 | clear_page((void *)to); |
129 | 121 | ||
130 | raw_spin_unlock(&v6_lock); | 122 | raw_spin_unlock(&v6_lock); |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 610c24ced310..804eeddda97f 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
@@ -23,12 +23,6 @@ | |||
23 | 23 | ||
24 | #include "mm.h" | 24 | #include "mm.h" |
25 | 25 | ||
26 | /* | ||
27 | * 0xffff8000 to 0xffffffff is reserved for any ARM architecture | ||
28 | * specific hacks for copying pages efficiently. | ||
29 | */ | ||
30 | #define COPYPAGE_MINICACHE 0xffff8000 | ||
31 | |||
32 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ | 26 | #define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ |
33 | L_PTE_MT_MINICACHE) | 27 | L_PTE_MT_MINICACHE) |
34 | 28 | ||
@@ -100,8 +94,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | |||
100 | 94 | ||
101 | raw_spin_lock(&minicache_lock); | 95 | raw_spin_lock(&minicache_lock); |
102 | 96 | ||
103 | set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); | 97 | set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot)); |
104 | flush_tlb_kernel_page(COPYPAGE_MINICACHE); | ||
105 | 98 | ||
106 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); | 99 | mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); |
107 | 100 | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1aa664a1999f..db23ae4aaaab 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -214,7 +214,8 @@ static int __init consistent_init(void) | |||
214 | core_initcall(consistent_init); | 214 | core_initcall(consistent_init); |
215 | 215 | ||
216 | static void * | 216 | static void * |
217 | __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) | 217 | __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, |
218 | const void *caller) | ||
218 | { | 219 | { |
219 | struct arm_vmregion *c; | 220 | struct arm_vmregion *c; |
220 | size_t align; | 221 | size_t align; |
@@ -241,7 +242,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) | |||
241 | * Allocate a virtual address in the consistent mapping region. | 242 | * Allocate a virtual address in the consistent mapping region. |
242 | */ | 243 | */ |
243 | c = arm_vmregion_alloc(&consistent_head, align, size, | 244 | c = arm_vmregion_alloc(&consistent_head, align, size, |
244 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | 245 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM), caller); |
245 | if (c) { | 246 | if (c) { |
246 | pte_t *pte; | 247 | pte_t *pte; |
247 | int idx = CONSISTENT_PTE_INDEX(c->vm_start); | 248 | int idx = CONSISTENT_PTE_INDEX(c->vm_start); |
@@ -320,14 +321,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size) | |||
320 | 321 | ||
321 | #else /* !CONFIG_MMU */ | 322 | #else /* !CONFIG_MMU */ |
322 | 323 | ||
323 | #define __dma_alloc_remap(page, size, gfp, prot) page_address(page) | 324 | #define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page) |
324 | #define __dma_free_remap(addr, size) do { } while (0) | 325 | #define __dma_free_remap(addr, size) do { } while (0) |
325 | 326 | ||
326 | #endif /* CONFIG_MMU */ | 327 | #endif /* CONFIG_MMU */ |
327 | 328 | ||
328 | static void * | 329 | static void * |
329 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | 330 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, |
330 | pgprot_t prot) | 331 | pgprot_t prot, const void *caller) |
331 | { | 332 | { |
332 | struct page *page; | 333 | struct page *page; |
333 | void *addr; | 334 | void *addr; |
@@ -349,7 +350,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
349 | return NULL; | 350 | return NULL; |
350 | 351 | ||
351 | if (!arch_is_coherent()) | 352 | if (!arch_is_coherent()) |
352 | addr = __dma_alloc_remap(page, size, gfp, prot); | 353 | addr = __dma_alloc_remap(page, size, gfp, prot, caller); |
353 | else | 354 | else |
354 | addr = page_address(page); | 355 | addr = page_address(page); |
355 | 356 | ||
@@ -374,7 +375,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf | |||
374 | return memory; | 375 | return memory; |
375 | 376 | ||
376 | return __dma_alloc(dev, size, handle, gfp, | 377 | return __dma_alloc(dev, size, handle, gfp, |
377 | pgprot_dmacoherent(pgprot_kernel)); | 378 | pgprot_dmacoherent(pgprot_kernel), |
379 | __builtin_return_address(0)); | ||
378 | } | 380 | } |
379 | EXPORT_SYMBOL(dma_alloc_coherent); | 381 | EXPORT_SYMBOL(dma_alloc_coherent); |
380 | 382 | ||
@@ -386,7 +388,8 @@ void * | |||
386 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) | 388 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) |
387 | { | 389 | { |
388 | return __dma_alloc(dev, size, handle, gfp, | 390 | return __dma_alloc(dev, size, handle, gfp, |
389 | pgprot_writecombine(pgprot_kernel)); | 391 | pgprot_writecombine(pgprot_kernel), |
392 | __builtin_return_address(0)); | ||
390 | } | 393 | } |
391 | EXPORT_SYMBOL(dma_alloc_writecombine); | 394 | EXPORT_SYMBOL(dma_alloc_writecombine); |
392 | 395 | ||
@@ -723,6 +726,9 @@ EXPORT_SYMBOL(dma_set_mask); | |||
723 | 726 | ||
724 | static int __init dma_debug_do_init(void) | 727 | static int __init dma_debug_do_init(void) |
725 | { | 728 | { |
729 | #ifdef CONFIG_MMU | ||
730 | arm_vmregion_create_proc("dma-mappings", &consistent_head); | ||
731 | #endif | ||
726 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 732 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
727 | return 0; | 733 | return 0; |
728 | } | 734 | } |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index bb7eac381a8e..40c43a94ba72 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
@@ -164,7 +164,8 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, | |||
164 | struct siginfo si; | 164 | struct siginfo si; |
165 | 165 | ||
166 | #ifdef CONFIG_DEBUG_USER | 166 | #ifdef CONFIG_DEBUG_USER |
167 | if (user_debug & UDBG_SEGV) { | 167 | if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || |
168 | ((user_debug & UDBG_BUS) && (sig == SIGBUS))) { | ||
168 | printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", | 169 | printk(KERN_DEBUG "%s: unhandled page fault (%d) at 0x%08lx, code 0x%03x\n", |
169 | tsk->comm, sig, addr, fsr); | 170 | tsk->comm, sig, addr, fsr); |
170 | show_pte(tsk->mm, addr); | 171 | show_pte(tsk->mm, addr); |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 1a8d4aa821be..4d0b70f035eb 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -23,15 +23,12 @@ | |||
23 | 23 | ||
24 | #ifdef CONFIG_CPU_CACHE_VIPT | 24 | #ifdef CONFIG_CPU_CACHE_VIPT |
25 | 25 | ||
26 | #define ALIAS_FLUSH_START 0xffff4000 | ||
27 | |||
28 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | 26 | static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) |
29 | { | 27 | { |
30 | unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); | 28 | unsigned long to = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
31 | const int zero = 0; | 29 | const int zero = 0; |
32 | 30 | ||
33 | set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0); | 31 | set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); |
34 | flush_tlb_kernel_page(to); | ||
35 | 32 | ||
36 | asm( "mcrr p15, 0, %1, %0, c14\n" | 33 | asm( "mcrr p15, 0, %1, %0, c14\n" |
37 | " mcr p15, 0, %2, c7, c10, 4" | 34 | " mcr p15, 0, %2, c7, c10, 4" |
@@ -42,13 +39,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | |||
42 | 39 | ||
43 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) | 40 | static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) |
44 | { | 41 | { |
45 | unsigned long colour = CACHE_COLOUR(vaddr); | 42 | unsigned long va = FLUSH_ALIAS_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); |
46 | unsigned long offset = vaddr & (PAGE_SIZE - 1); | 43 | unsigned long offset = vaddr & (PAGE_SIZE - 1); |
47 | unsigned long to; | 44 | unsigned long to; |
48 | 45 | ||
49 | set_pte_ext(TOP_PTE(ALIAS_FLUSH_START) + colour, pfn_pte(pfn, PAGE_KERNEL), 0); | 46 | set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); |
50 | to = ALIAS_FLUSH_START + (colour << PAGE_SHIFT) + offset; | 47 | to = va + offset; |
51 | flush_tlb_kernel_page(to); | ||
52 | flush_icache_range(to, to + len); | 48 | flush_icache_range(to, to + len); |
53 | } | 49 | } |
54 | 50 | ||
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 807c0573abbe..3a9e8aa19759 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -69,15 +69,14 @@ void *__kmap_atomic(struct page *page) | |||
69 | * With debugging enabled, kunmap_atomic forces that entry to 0. | 69 | * With debugging enabled, kunmap_atomic forces that entry to 0. |
70 | * Make sure it was indeed properly unmapped. | 70 | * Make sure it was indeed properly unmapped. |
71 | */ | 71 | */ |
72 | BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); | 72 | BUG_ON(!pte_none(get_top_pte(vaddr))); |
73 | #endif | 73 | #endif |
74 | set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); | ||
75 | /* | 74 | /* |
76 | * When debugging is off, kunmap_atomic leaves the previous mapping | 75 | * When debugging is off, kunmap_atomic leaves the previous mapping |
77 | * in place, so this TLB flush ensures the TLB is updated with the | 76 | * in place, so the contained TLB flush ensures the TLB is updated |
78 | * new mapping. | 77 | * with the new mapping. |
79 | */ | 78 | */ |
80 | local_flush_tlb_kernel_page(vaddr); | 79 | set_top_pte(vaddr, mk_pte(page, kmap_prot)); |
81 | 80 | ||
82 | return (void *)vaddr; | 81 | return (void *)vaddr; |
83 | } | 82 | } |
@@ -96,8 +95,7 @@ void __kunmap_atomic(void *kvaddr) | |||
96 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); | 95 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); |
97 | #ifdef CONFIG_DEBUG_HIGHMEM | 96 | #ifdef CONFIG_DEBUG_HIGHMEM |
98 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | 97 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
99 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); | 98 | set_top_pte(vaddr, __pte(0)); |
100 | local_flush_tlb_kernel_page(vaddr); | ||
101 | #else | 99 | #else |
102 | (void) idx; /* to kill a warning */ | 100 | (void) idx; /* to kill a warning */ |
103 | #endif | 101 | #endif |
@@ -121,10 +119,9 @@ void *kmap_atomic_pfn(unsigned long pfn) | |||
121 | idx = type + KM_TYPE_NR * smp_processor_id(); | 119 | idx = type + KM_TYPE_NR * smp_processor_id(); |
122 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 120 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
123 | #ifdef CONFIG_DEBUG_HIGHMEM | 121 | #ifdef CONFIG_DEBUG_HIGHMEM |
124 | BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); | 122 | BUG_ON(!pte_none(get_top_pte(vaddr))); |
125 | #endif | 123 | #endif |
126 | set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); | 124 | set_top_pte(vaddr, pfn_pte(pfn, kmap_prot)); |
127 | local_flush_tlb_kernel_page(vaddr); | ||
128 | 125 | ||
129 | return (void *)vaddr; | 126 | return (void *)vaddr; |
130 | } | 127 | } |
@@ -132,11 +129,9 @@ void *kmap_atomic_pfn(unsigned long pfn) | |||
132 | struct page *kmap_atomic_to_page(const void *ptr) | 129 | struct page *kmap_atomic_to_page(const void *ptr) |
133 | { | 130 | { |
134 | unsigned long vaddr = (unsigned long)ptr; | 131 | unsigned long vaddr = (unsigned long)ptr; |
135 | pte_t *pte; | ||
136 | 132 | ||
137 | if (vaddr < FIXADDR_START) | 133 | if (vaddr < FIXADDR_START) |
138 | return virt_to_page(ptr); | 134 | return virt_to_page(ptr); |
139 | 135 | ||
140 | pte = TOP_PTE(vaddr); | 136 | return pte_page(get_top_pte(vaddr)); |
141 | return pte_page(*pte); | ||
142 | } | 137 | } |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 6ec1226fc62d..42d906f89964 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -659,7 +659,9 @@ void __init mem_init(void) | |||
659 | #ifdef CONFIG_HIGHMEM | 659 | #ifdef CONFIG_HIGHMEM |
660 | " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" | 660 | " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" |
661 | #endif | 661 | #endif |
662 | #ifdef CONFIG_MODULES | ||
662 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" | 663 | " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" |
664 | #endif | ||
663 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" | 665 | " .text : 0x%p" " - 0x%p" " (%4d kB)\n" |
664 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" | 666 | " .init : 0x%p" " - 0x%p" " (%4d kB)\n" |
665 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n" | 667 | " .data : 0x%p" " - 0x%p" " (%4d kB)\n" |
@@ -678,7 +680,9 @@ void __init mem_init(void) | |||
678 | MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * | 680 | MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * |
679 | (PAGE_SIZE)), | 681 | (PAGE_SIZE)), |
680 | #endif | 682 | #endif |
683 | #ifdef CONFIG_MODULES | ||
681 | MLM(MODULES_VADDR, MODULES_END), | 684 | MLM(MODULES_VADDR, MODULES_END), |
685 | #endif | ||
682 | 686 | ||
683 | MLK_ROUNDUP(_text, _etext), | 687 | MLK_ROUNDUP(_text, _etext), |
684 | MLK_ROUNDUP(__init_begin, __init_end), | 688 | MLK_ROUNDUP(__init_begin, __init_end), |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 80632e8d7538..66daf17b5e33 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/vmalloc.h> | 26 | #include <linux/vmalloc.h> |
27 | #include <linux/io.h> | 27 | #include <linux/io.h> |
28 | 28 | ||
29 | #include <asm/cp15.h> | ||
29 | #include <asm/cputype.h> | 30 | #include <asm/cputype.h> |
30 | #include <asm/cacheflush.h> | 31 | #include <asm/cacheflush.h> |
31 | #include <asm/mmu_context.h> | 32 | #include <asm/mmu_context.h> |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 70f6d3ea4834..27f4a619b35d 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -3,7 +3,31 @@ | |||
3 | /* the upper-most page table pointer */ | 3 | /* the upper-most page table pointer */ |
4 | extern pmd_t *top_pmd; | 4 | extern pmd_t *top_pmd; |
5 | 5 | ||
6 | #define TOP_PTE(x) pte_offset_kernel(top_pmd, x) | 6 | /* |
7 | * 0xffff8000 to 0xffffffff is reserved for any ARM architecture | ||
8 | * specific hacks for copying pages efficiently, while 0xffff4000 | ||
9 | * is reserved for VIPT aliasing flushing by generic code. | ||
10 | * | ||
11 | * Note that we don't allow VIPT aliasing caches with SMP. | ||
12 | */ | ||
13 | #define COPYPAGE_MINICACHE 0xffff8000 | ||
14 | #define COPYPAGE_V6_FROM 0xffff8000 | ||
15 | #define COPYPAGE_V6_TO 0xffffc000 | ||
16 | /* PFN alias flushing, for VIPT caches */ | ||
17 | #define FLUSH_ALIAS_START 0xffff4000 | ||
18 | |||
19 | static inline void set_top_pte(unsigned long va, pte_t pte) | ||
20 | { | ||
21 | pte_t *ptep = pte_offset_kernel(top_pmd, va); | ||
22 | set_pte_ext(ptep, pte, 0); | ||
23 | local_flush_tlb_kernel_page(va); | ||
24 | } | ||
25 | |||
26 | static inline pte_t get_top_pte(unsigned long va) | ||
27 | { | ||
28 | pte_t *ptep = pte_offset_kernel(top_pmd, va); | ||
29 | return *ptep; | ||
30 | } | ||
7 | 31 | ||
8 | static inline pmd_t *pmd_off_k(unsigned long virt) | 32 | static inline pmd_t *pmd_off_k(unsigned long virt) |
9 | { | 33 | { |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 94c5a0c94f5e..f77f1dbbdf59 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
18 | #include <linux/vmalloc.h> | 18 | #include <linux/vmalloc.h> |
19 | 19 | ||
20 | #include <asm/cp15.h> | ||
20 | #include <asm/cputype.h> | 21 | #include <asm/cputype.h> |
21 | #include <asm/sections.h> | 22 | #include <asm/sections.h> |
22 | #include <asm/cachetype.h> | 23 | #include <asm/cachetype.h> |
@@ -997,11 +998,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
997 | { | 998 | { |
998 | struct map_desc map; | 999 | struct map_desc map; |
999 | unsigned long addr; | 1000 | unsigned long addr; |
1001 | void *vectors; | ||
1000 | 1002 | ||
1001 | /* | 1003 | /* |
1002 | * Allocate the vector page early. | 1004 | * Allocate the vector page early. |
1003 | */ | 1005 | */ |
1004 | vectors_page = early_alloc(PAGE_SIZE); | 1006 | vectors = early_alloc(PAGE_SIZE); |
1007 | |||
1008 | early_trap_init(vectors); | ||
1005 | 1009 | ||
1006 | for (addr = VMALLOC_START; addr; addr += PMD_SIZE) | 1010 | for (addr = VMALLOC_START; addr; addr += PMD_SIZE) |
1007 | pmd_clear(pmd_off_k(addr)); | 1011 | pmd_clear(pmd_off_k(addr)); |
@@ -1041,7 +1045,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) | |||
1041 | * location (0xffff0000). If we aren't using high-vectors, also | 1045 | * location (0xffff0000). If we aren't using high-vectors, also |
1042 | * create a mapping at the low-vectors virtual address. | 1046 | * create a mapping at the low-vectors virtual address. |
1043 | */ | 1047 | */ |
1044 | map.pfn = __phys_to_pfn(virt_to_phys(vectors_page)); | 1048 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); |
1045 | map.virtual = 0xffff0000; | 1049 | map.virtual = 0xffff0000; |
1046 | map.length = PAGE_SIZE; | 1050 | map.length = PAGE_SIZE; |
1047 | map.type = MT_HIGH_VECTORS; | 1051 | map.type = MT_HIGH_VECTORS; |
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index a3e78ccabd65..0acb089d0f70 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/highmem.h> | 12 | #include <linux/highmem.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | 14 | ||
15 | #include <asm/cp15.h> | ||
15 | #include <asm/pgalloc.h> | 16 | #include <asm/pgalloc.h> |
16 | #include <asm/page.h> | 17 | #include <asm/page.h> |
17 | #include <asm/tlbflush.h> | 18 | #include <asm/tlbflush.h> |
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c index 036fdbfdd62f..a631016e1f8f 100644 --- a/arch/arm/mm/vmregion.c +++ b/arch/arm/mm/vmregion.c | |||
@@ -1,5 +1,8 @@ | |||
1 | #include <linux/fs.h> | ||
1 | #include <linux/spinlock.h> | 2 | #include <linux/spinlock.h> |
2 | #include <linux/list.h> | 3 | #include <linux/list.h> |
4 | #include <linux/proc_fs.h> | ||
5 | #include <linux/seq_file.h> | ||
3 | #include <linux/slab.h> | 6 | #include <linux/slab.h> |
4 | 7 | ||
5 | #include "vmregion.h" | 8 | #include "vmregion.h" |
@@ -36,7 +39,7 @@ | |||
36 | 39 | ||
37 | struct arm_vmregion * | 40 | struct arm_vmregion * |
38 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, | 41 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, |
39 | size_t size, gfp_t gfp) | 42 | size_t size, gfp_t gfp, const void *caller) |
40 | { | 43 | { |
41 | unsigned long start = head->vm_start, addr = head->vm_end; | 44 | unsigned long start = head->vm_start, addr = head->vm_end; |
42 | unsigned long flags; | 45 | unsigned long flags; |
@@ -52,6 +55,8 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align, | |||
52 | if (!new) | 55 | if (!new) |
53 | goto out; | 56 | goto out; |
54 | 57 | ||
58 | new->caller = caller; | ||
59 | |||
55 | spin_lock_irqsave(&head->vm_lock, flags); | 60 | spin_lock_irqsave(&head->vm_lock, flags); |
56 | 61 | ||
57 | addr = rounddown(addr - size, align); | 62 | addr = rounddown(addr - size, align); |
@@ -129,3 +134,72 @@ void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) | |||
129 | 134 | ||
130 | kfree(c); | 135 | kfree(c); |
131 | } | 136 | } |
137 | |||
138 | #ifdef CONFIG_PROC_FS | ||
139 | static int arm_vmregion_show(struct seq_file *m, void *p) | ||
140 | { | ||
141 | struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list); | ||
142 | |||
143 | seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end, | ||
144 | c->vm_end - c->vm_start); | ||
145 | if (c->caller) | ||
146 | seq_printf(m, " %pS", (void *)c->caller); | ||
147 | seq_putc(m, '\n'); | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static void *arm_vmregion_start(struct seq_file *m, loff_t *pos) | ||
152 | { | ||
153 | struct arm_vmregion_head *h = m->private; | ||
154 | spin_lock_irq(&h->vm_lock); | ||
155 | return seq_list_start(&h->vm_list, *pos); | ||
156 | } | ||
157 | |||
158 | static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos) | ||
159 | { | ||
160 | struct arm_vmregion_head *h = m->private; | ||
161 | return seq_list_next(p, &h->vm_list, pos); | ||
162 | } | ||
163 | |||
164 | static void arm_vmregion_stop(struct seq_file *m, void *p) | ||
165 | { | ||
166 | struct arm_vmregion_head *h = m->private; | ||
167 | spin_unlock_irq(&h->vm_lock); | ||
168 | } | ||
169 | |||
170 | static const struct seq_operations arm_vmregion_ops = { | ||
171 | .start = arm_vmregion_start, | ||
172 | .stop = arm_vmregion_stop, | ||
173 | .next = arm_vmregion_next, | ||
174 | .show = arm_vmregion_show, | ||
175 | }; | ||
176 | |||
177 | static int arm_vmregion_open(struct inode *inode, struct file *file) | ||
178 | { | ||
179 | struct arm_vmregion_head *h = PDE(inode)->data; | ||
180 | int ret = seq_open(file, &arm_vmregion_ops); | ||
181 | if (!ret) { | ||
182 | struct seq_file *m = file->private_data; | ||
183 | m->private = h; | ||
184 | } | ||
185 | return ret; | ||
186 | } | ||
187 | |||
188 | static const struct file_operations arm_vmregion_fops = { | ||
189 | .open = arm_vmregion_open, | ||
190 | .read = seq_read, | ||
191 | .llseek = seq_lseek, | ||
192 | .release = seq_release, | ||
193 | }; | ||
194 | |||
195 | int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) | ||
196 | { | ||
197 | proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h); | ||
198 | return 0; | ||
199 | } | ||
200 | #else | ||
201 | int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h) | ||
202 | { | ||
203 | return 0; | ||
204 | } | ||
205 | #endif | ||
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h index 15e9f044db9f..162be662c088 100644 --- a/arch/arm/mm/vmregion.h +++ b/arch/arm/mm/vmregion.h | |||
@@ -19,11 +19,14 @@ struct arm_vmregion { | |||
19 | unsigned long vm_end; | 19 | unsigned long vm_end; |
20 | struct page *vm_pages; | 20 | struct page *vm_pages; |
21 | int vm_active; | 21 | int vm_active; |
22 | const void *caller; | ||
22 | }; | 23 | }; |
23 | 24 | ||
24 | struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t); | 25 | struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *); |
25 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); | 26 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); |
26 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); | 27 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); |
27 | void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); | 28 | void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); |
28 | 29 | ||
30 | int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *); | ||
31 | |||
29 | #endif | 32 | #endif |
diff --git a/arch/arm/net/Makefile b/arch/arm/net/Makefile new file mode 100644 index 000000000000..c2c10841b6be --- /dev/null +++ b/arch/arm/net/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | # ARM-specific networking code | ||
2 | |||
3 | obj-$(CONFIG_BPF_JIT) += bpf_jit_32.o | ||
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c new file mode 100644 index 000000000000..62135849f48b --- /dev/null +++ b/arch/arm/net/bpf_jit_32.c | |||
@@ -0,0 +1,915 @@ | |||
1 | /* | ||
2 | * Just-In-Time compiler for BPF filters on 32bit ARM | ||
3 | * | ||
4 | * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License. | ||
9 | */ | ||
10 | |||
11 | #include <linux/bitops.h> | ||
12 | #include <linux/compiler.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/filter.h> | ||
15 | #include <linux/moduleloader.h> | ||
16 | #include <linux/netdevice.h> | ||
17 | #include <linux/string.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <asm/cacheflush.h> | ||
20 | #include <asm/hwcap.h> | ||
21 | |||
22 | #include "bpf_jit_32.h" | ||
23 | |||
24 | /* | ||
25 | * ABI: | ||
26 | * | ||
27 | * r0 scratch register | ||
28 | * r4 BPF register A | ||
29 | * r5 BPF register X | ||
30 | * r6 pointer to the skb | ||
31 | * r7 skb->data | ||
32 | * r8 skb_headlen(skb) | ||
33 | */ | ||
34 | |||
35 | #define r_scratch ARM_R0 | ||
36 | /* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */ | ||
37 | #define r_off ARM_R1 | ||
38 | #define r_A ARM_R4 | ||
39 | #define r_X ARM_R5 | ||
40 | #define r_skb ARM_R6 | ||
41 | #define r_skb_data ARM_R7 | ||
42 | #define r_skb_hl ARM_R8 | ||
43 | |||
44 | #define SCRATCH_SP_OFFSET 0 | ||
45 | #define SCRATCH_OFF(k) (SCRATCH_SP_OFFSET + (k)) | ||
46 | |||
47 | #define SEEN_MEM ((1 << BPF_MEMWORDS) - 1) | ||
48 | #define SEEN_MEM_WORD(k) (1 << (k)) | ||
49 | #define SEEN_X (1 << BPF_MEMWORDS) | ||
50 | #define SEEN_CALL (1 << (BPF_MEMWORDS + 1)) | ||
51 | #define SEEN_SKB (1 << (BPF_MEMWORDS + 2)) | ||
52 | #define SEEN_DATA (1 << (BPF_MEMWORDS + 3)) | ||
53 | |||
54 | #define FLAG_NEED_X_RESET (1 << 0) | ||
55 | |||
56 | struct jit_ctx { | ||
57 | const struct sk_filter *skf; | ||
58 | unsigned idx; | ||
59 | unsigned prologue_bytes; | ||
60 | int ret0_fp_idx; | ||
61 | u32 seen; | ||
62 | u32 flags; | ||
63 | u32 *offsets; | ||
64 | u32 *target; | ||
65 | #if __LINUX_ARM_ARCH__ < 7 | ||
66 | u16 epilogue_bytes; | ||
67 | u16 imm_count; | ||
68 | u32 *imms; | ||
69 | #endif | ||
70 | }; | ||
71 | |||
72 | int bpf_jit_enable __read_mostly; | ||
73 | |||
74 | static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) | ||
75 | { | ||
76 | u8 ret; | ||
77 | int err; | ||
78 | |||
79 | err = skb_copy_bits(skb, offset, &ret, 1); | ||
80 | |||
81 | return (u64)err << 32 | ret; | ||
82 | } | ||
83 | |||
84 | static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) | ||
85 | { | ||
86 | u16 ret; | ||
87 | int err; | ||
88 | |||
89 | err = skb_copy_bits(skb, offset, &ret, 2); | ||
90 | |||
91 | return (u64)err << 32 | ntohs(ret); | ||
92 | } | ||
93 | |||
94 | static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) | ||
95 | { | ||
96 | u32 ret; | ||
97 | int err; | ||
98 | |||
99 | err = skb_copy_bits(skb, offset, &ret, 4); | ||
100 | |||
101 | return (u64)err << 32 | ntohl(ret); | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Wrapper that handles both OABI and EABI and assures Thumb2 interworking | ||
106 | * (where the assembly routines like __aeabi_uidiv could cause problems). | ||
107 | */ | ||
108 | static u32 jit_udiv(u32 dividend, u32 divisor) | ||
109 | { | ||
110 | return dividend / divisor; | ||
111 | } | ||
112 | |||
113 | static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx) | ||
114 | { | ||
115 | if (ctx->target != NULL) | ||
116 | ctx->target[ctx->idx] = inst | (cond << 28); | ||
117 | |||
118 | ctx->idx++; | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * Emit an instruction that will be executed unconditionally. | ||
123 | */ | ||
124 | static inline void emit(u32 inst, struct jit_ctx *ctx) | ||
125 | { | ||
126 | _emit(ARM_COND_AL, inst, ctx); | ||
127 | } | ||
128 | |||
129 | static u16 saved_regs(struct jit_ctx *ctx) | ||
130 | { | ||
131 | u16 ret = 0; | ||
132 | |||
133 | if ((ctx->skf->len > 1) || | ||
134 | (ctx->skf->insns[0].code == BPF_S_RET_A)) | ||
135 | ret |= 1 << r_A; | ||
136 | |||
137 | #ifdef CONFIG_FRAME_POINTER | ||
138 | ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC); | ||
139 | #else | ||
140 | if (ctx->seen & SEEN_CALL) | ||
141 | ret |= 1 << ARM_LR; | ||
142 | #endif | ||
143 | if (ctx->seen & (SEEN_DATA | SEEN_SKB)) | ||
144 | ret |= 1 << r_skb; | ||
145 | if (ctx->seen & SEEN_DATA) | ||
146 | ret |= (1 << r_skb_data) | (1 << r_skb_hl); | ||
147 | if (ctx->seen & SEEN_X) | ||
148 | ret |= 1 << r_X; | ||
149 | |||
150 | return ret; | ||
151 | } | ||
152 | |||
153 | static inline int mem_words_used(struct jit_ctx *ctx) | ||
154 | { | ||
155 | /* yes, we do waste some stack space IF there are "holes" in the set" */ | ||
156 | return fls(ctx->seen & SEEN_MEM); | ||
157 | } | ||
158 | |||
159 | static inline bool is_load_to_a(u16 inst) | ||
160 | { | ||
161 | switch (inst) { | ||
162 | case BPF_S_LD_W_LEN: | ||
163 | case BPF_S_LD_W_ABS: | ||
164 | case BPF_S_LD_H_ABS: | ||
165 | case BPF_S_LD_B_ABS: | ||
166 | case BPF_S_ANC_CPU: | ||
167 | case BPF_S_ANC_IFINDEX: | ||
168 | case BPF_S_ANC_MARK: | ||
169 | case BPF_S_ANC_PROTOCOL: | ||
170 | case BPF_S_ANC_RXHASH: | ||
171 | case BPF_S_ANC_QUEUE: | ||
172 | return true; | ||
173 | default: | ||
174 | return false; | ||
175 | } | ||
176 | } | ||
177 | |||
178 | static void build_prologue(struct jit_ctx *ctx) | ||
179 | { | ||
180 | u16 reg_set = saved_regs(ctx); | ||
181 | u16 first_inst = ctx->skf->insns[0].code; | ||
182 | u16 off; | ||
183 | |||
184 | #ifdef CONFIG_FRAME_POINTER | ||
185 | emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx); | ||
186 | emit(ARM_PUSH(reg_set), ctx); | ||
187 | emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); | ||
188 | #else | ||
189 | if (reg_set) | ||
190 | emit(ARM_PUSH(reg_set), ctx); | ||
191 | #endif | ||
192 | |||
193 | if (ctx->seen & (SEEN_DATA | SEEN_SKB)) | ||
194 | emit(ARM_MOV_R(r_skb, ARM_R0), ctx); | ||
195 | |||
196 | if (ctx->seen & SEEN_DATA) { | ||
197 | off = offsetof(struct sk_buff, data); | ||
198 | emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx); | ||
199 | /* headlen = len - data_len */ | ||
200 | off = offsetof(struct sk_buff, len); | ||
201 | emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx); | ||
202 | off = offsetof(struct sk_buff, data_len); | ||
203 | emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); | ||
204 | emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx); | ||
205 | } | ||
206 | |||
207 | if (ctx->flags & FLAG_NEED_X_RESET) | ||
208 | emit(ARM_MOV_I(r_X, 0), ctx); | ||
209 | |||
210 | /* do not leak kernel data to userspace */ | ||
211 | if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) | ||
212 | emit(ARM_MOV_I(r_A, 0), ctx); | ||
213 | |||
214 | /* stack space for the BPF_MEM words */ | ||
215 | if (ctx->seen & SEEN_MEM) | ||
216 | emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); | ||
217 | } | ||
218 | |||
219 | static void build_epilogue(struct jit_ctx *ctx) | ||
220 | { | ||
221 | u16 reg_set = saved_regs(ctx); | ||
222 | |||
223 | if (ctx->seen & SEEN_MEM) | ||
224 | emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); | ||
225 | |||
226 | reg_set &= ~(1 << ARM_LR); | ||
227 | |||
228 | #ifdef CONFIG_FRAME_POINTER | ||
229 | /* the first instruction of the prologue was: mov ip, sp */ | ||
230 | reg_set &= ~(1 << ARM_IP); | ||
231 | reg_set |= (1 << ARM_SP); | ||
232 | emit(ARM_LDM(ARM_SP, reg_set), ctx); | ||
233 | #else | ||
234 | if (reg_set) { | ||
235 | if (ctx->seen & SEEN_CALL) | ||
236 | reg_set |= 1 << ARM_PC; | ||
237 | emit(ARM_POP(reg_set), ctx); | ||
238 | } | ||
239 | |||
240 | if (!(ctx->seen & SEEN_CALL)) | ||
241 | emit(ARM_BX(ARM_LR), ctx); | ||
242 | #endif | ||
243 | } | ||
244 | |||
245 | static int16_t imm8m(u32 x) | ||
246 | { | ||
247 | u32 rot; | ||
248 | |||
249 | for (rot = 0; rot < 16; rot++) | ||
250 | if ((x & ~ror32(0xff, 2 * rot)) == 0) | ||
251 | return rol32(x, 2 * rot) | (rot << 8); | ||
252 | |||
253 | return -1; | ||
254 | } | ||
255 | |||
256 | #if __LINUX_ARM_ARCH__ < 7 | ||
257 | |||
258 | static u16 imm_offset(u32 k, struct jit_ctx *ctx) | ||
259 | { | ||
260 | unsigned i = 0, offset; | ||
261 | u16 imm; | ||
262 | |||
263 | /* on the "fake" run we just count them (duplicates included) */ | ||
264 | if (ctx->target == NULL) { | ||
265 | ctx->imm_count++; | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | while ((i < ctx->imm_count) && ctx->imms[i]) { | ||
270 | if (ctx->imms[i] == k) | ||
271 | break; | ||
272 | i++; | ||
273 | } | ||
274 | |||
275 | if (ctx->imms[i] == 0) | ||
276 | ctx->imms[i] = k; | ||
277 | |||
278 | /* constants go just after the epilogue */ | ||
279 | offset = ctx->offsets[ctx->skf->len]; | ||
280 | offset += ctx->prologue_bytes; | ||
281 | offset += ctx->epilogue_bytes; | ||
282 | offset += i * 4; | ||
283 | |||
284 | ctx->target[offset / 4] = k; | ||
285 | |||
286 | /* PC in ARM mode == address of the instruction + 8 */ | ||
287 | imm = offset - (8 + ctx->idx * 4); | ||
288 | |||
289 | return imm; | ||
290 | } | ||
291 | |||
292 | #endif /* __LINUX_ARM_ARCH__ */ | ||
293 | |||
294 | /* | ||
295 | * Move an immediate that's not an imm8m to a core register. | ||
296 | */ | ||
297 | static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx) | ||
298 | { | ||
299 | #if __LINUX_ARM_ARCH__ < 7 | ||
300 | emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx); | ||
301 | #else | ||
302 | emit(ARM_MOVW(rd, val & 0xffff), ctx); | ||
303 | if (val > 0xffff) | ||
304 | emit(ARM_MOVT(rd, val >> 16), ctx); | ||
305 | #endif | ||
306 | } | ||
307 | |||
308 | static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx) | ||
309 | { | ||
310 | int imm12 = imm8m(val); | ||
311 | |||
312 | if (imm12 >= 0) | ||
313 | emit(ARM_MOV_I(rd, imm12), ctx); | ||
314 | else | ||
315 | emit_mov_i_no8m(rd, val, ctx); | ||
316 | } | ||
317 | |||
318 | #if __LINUX_ARM_ARCH__ < 6 | ||
319 | |||
320 | static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) | ||
321 | { | ||
322 | _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx); | ||
323 | _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); | ||
324 | _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx); | ||
325 | _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx); | ||
326 | _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx); | ||
327 | _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx); | ||
328 | _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx); | ||
329 | _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx); | ||
330 | } | ||
331 | |||
332 | static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) | ||
333 | { | ||
334 | _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); | ||
335 | _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx); | ||
336 | _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx); | ||
337 | } | ||
338 | |||
339 | static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) | ||
340 | { | ||
341 | emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx); | ||
342 | emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx); | ||
343 | emit(ARM_LSL_I(r_dst, r_dst, 8), ctx); | ||
344 | emit(ARM_LSL_R(r_dst, r_dst, 8), ctx); | ||
345 | } | ||
346 | |||
347 | #else /* ARMv6+ */ | ||
348 | |||
349 | static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) | ||
350 | { | ||
351 | _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx); | ||
352 | #ifdef __LITTLE_ENDIAN | ||
353 | _emit(cond, ARM_REV(r_res, r_res), ctx); | ||
354 | #endif | ||
355 | } | ||
356 | |||
357 | static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) | ||
358 | { | ||
359 | _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx); | ||
360 | #ifdef __LITTLE_ENDIAN | ||
361 | _emit(cond, ARM_REV16(r_res, r_res), ctx); | ||
362 | #endif | ||
363 | } | ||
364 | |||
365 | static inline void emit_swap16(u8 r_dst __maybe_unused, | ||
366 | u8 r_src __maybe_unused, | ||
367 | struct jit_ctx *ctx __maybe_unused) | ||
368 | { | ||
369 | #ifdef __LITTLE_ENDIAN | ||
370 | emit(ARM_REV16(r_dst, r_src), ctx); | ||
371 | #endif | ||
372 | } | ||
373 | |||
374 | #endif /* __LINUX_ARM_ARCH__ < 6 */ | ||
375 | |||
376 | |||
377 | /* Compute the immediate value for a PC-relative branch. */ | ||
378 | static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx) | ||
379 | { | ||
380 | u32 imm; | ||
381 | |||
382 | if (ctx->target == NULL) | ||
383 | return 0; | ||
384 | /* | ||
385 | * BPF allows only forward jumps and the offset of the target is | ||
386 | * still the one computed during the first pass. | ||
387 | */ | ||
388 | imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8); | ||
389 | |||
390 | return imm >> 2; | ||
391 | } | ||
392 | |||
393 | #define OP_IMM3(op, r1, r2, imm_val, ctx) \ | ||
394 | do { \ | ||
395 | imm12 = imm8m(imm_val); \ | ||
396 | if (imm12 < 0) { \ | ||
397 | emit_mov_i_no8m(r_scratch, imm_val, ctx); \ | ||
398 | emit(op ## _R((r1), (r2), r_scratch), ctx); \ | ||
399 | } else { \ | ||
400 | emit(op ## _I((r1), (r2), imm12), ctx); \ | ||
401 | } \ | ||
402 | } while (0) | ||
403 | |||
404 | static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx) | ||
405 | { | ||
406 | if (ctx->ret0_fp_idx >= 0) { | ||
407 | _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx); | ||
408 | /* NOP to keep the size constant between passes */ | ||
409 | emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx); | ||
410 | } else { | ||
411 | _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx); | ||
412 | _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) | ||
417 | { | ||
418 | #if __LINUX_ARM_ARCH__ < 5 | ||
419 | emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); | ||
420 | |||
421 | if (elf_hwcap & HWCAP_THUMB) | ||
422 | emit(ARM_BX(tgt_reg), ctx); | ||
423 | else | ||
424 | emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); | ||
425 | #else | ||
426 | emit(ARM_BLX_R(tgt_reg), ctx); | ||
427 | #endif | ||
428 | } | ||
429 | |||
430 | static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx) | ||
431 | { | ||
432 | #if __LINUX_ARM_ARCH__ == 7 | ||
433 | if (elf_hwcap & HWCAP_IDIVA) { | ||
434 | emit(ARM_UDIV(rd, rm, rn), ctx); | ||
435 | return; | ||
436 | } | ||
437 | #endif | ||
438 | if (rm != ARM_R0) | ||
439 | emit(ARM_MOV_R(ARM_R0, rm), ctx); | ||
440 | if (rn != ARM_R1) | ||
441 | emit(ARM_MOV_R(ARM_R1, rn), ctx); | ||
442 | |||
443 | ctx->seen |= SEEN_CALL; | ||
444 | emit_mov_i(ARM_R3, (u32)jit_udiv, ctx); | ||
445 | emit_blx_r(ARM_R3, ctx); | ||
446 | |||
447 | if (rd != ARM_R0) | ||
448 | emit(ARM_MOV_R(rd, ARM_R0), ctx); | ||
449 | } | ||
450 | |||
451 | static inline void update_on_xread(struct jit_ctx *ctx) | ||
452 | { | ||
453 | if (!(ctx->seen & SEEN_X)) | ||
454 | ctx->flags |= FLAG_NEED_X_RESET; | ||
455 | |||
456 | ctx->seen |= SEEN_X; | ||
457 | } | ||
458 | |||
459 | static int build_body(struct jit_ctx *ctx) | ||
460 | { | ||
461 | void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w}; | ||
462 | const struct sk_filter *prog = ctx->skf; | ||
463 | const struct sock_filter *inst; | ||
464 | unsigned i, load_order, off, condt; | ||
465 | int imm12; | ||
466 | u32 k; | ||
467 | |||
468 | for (i = 0; i < prog->len; i++) { | ||
469 | inst = &(prog->insns[i]); | ||
470 | /* K as an immediate value operand */ | ||
471 | k = inst->k; | ||
472 | |||
473 | /* compute offsets only in the fake pass */ | ||
474 | if (ctx->target == NULL) | ||
475 | ctx->offsets[i] = ctx->idx * 4; | ||
476 | |||
477 | switch (inst->code) { | ||
478 | case BPF_S_LD_IMM: | ||
479 | emit_mov_i(r_A, k, ctx); | ||
480 | break; | ||
481 | case BPF_S_LD_W_LEN: | ||
482 | ctx->seen |= SEEN_SKB; | ||
483 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); | ||
484 | emit(ARM_LDR_I(r_A, r_skb, | ||
485 | offsetof(struct sk_buff, len)), ctx); | ||
486 | break; | ||
487 | case BPF_S_LD_MEM: | ||
488 | /* A = scratch[k] */ | ||
489 | ctx->seen |= SEEN_MEM_WORD(k); | ||
490 | emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); | ||
491 | break; | ||
492 | case BPF_S_LD_W_ABS: | ||
493 | load_order = 2; | ||
494 | goto load; | ||
495 | case BPF_S_LD_H_ABS: | ||
496 | load_order = 1; | ||
497 | goto load; | ||
498 | case BPF_S_LD_B_ABS: | ||
499 | load_order = 0; | ||
500 | load: | ||
501 | /* the interpreter will deal with the negative K */ | ||
502 | if ((int)k < 0) | ||
503 | return -ENOTSUPP; | ||
504 | emit_mov_i(r_off, k, ctx); | ||
505 | load_common: | ||
506 | ctx->seen |= SEEN_DATA | SEEN_CALL; | ||
507 | |||
508 | if (load_order > 0) { | ||
509 | emit(ARM_SUB_I(r_scratch, r_skb_hl, | ||
510 | 1 << load_order), ctx); | ||
511 | emit(ARM_CMP_R(r_scratch, r_off), ctx); | ||
512 | condt = ARM_COND_HS; | ||
513 | } else { | ||
514 | emit(ARM_CMP_R(r_skb_hl, r_off), ctx); | ||
515 | condt = ARM_COND_HI; | ||
516 | } | ||
517 | |||
518 | _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), | ||
519 | ctx); | ||
520 | |||
521 | if (load_order == 0) | ||
522 | _emit(condt, ARM_LDRB_I(r_A, r_scratch, 0), | ||
523 | ctx); | ||
524 | else if (load_order == 1) | ||
525 | emit_load_be16(condt, r_A, r_scratch, ctx); | ||
526 | else if (load_order == 2) | ||
527 | emit_load_be32(condt, r_A, r_scratch, ctx); | ||
528 | |||
529 | _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx); | ||
530 | |||
531 | /* the slowpath */ | ||
532 | emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx); | ||
533 | emit(ARM_MOV_R(ARM_R0, r_skb), ctx); | ||
534 | /* the offset is already in R1 */ | ||
535 | emit_blx_r(ARM_R3, ctx); | ||
536 | /* check the result of skb_copy_bits */ | ||
537 | emit(ARM_CMP_I(ARM_R1, 0), ctx); | ||
538 | emit_err_ret(ARM_COND_NE, ctx); | ||
539 | emit(ARM_MOV_R(r_A, ARM_R0), ctx); | ||
540 | break; | ||
541 | case BPF_S_LD_W_IND: | ||
542 | load_order = 2; | ||
543 | goto load_ind; | ||
544 | case BPF_S_LD_H_IND: | ||
545 | load_order = 1; | ||
546 | goto load_ind; | ||
547 | case BPF_S_LD_B_IND: | ||
548 | load_order = 0; | ||
549 | load_ind: | ||
550 | OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); | ||
551 | goto load_common; | ||
552 | case BPF_S_LDX_IMM: | ||
553 | ctx->seen |= SEEN_X; | ||
554 | emit_mov_i(r_X, k, ctx); | ||
555 | break; | ||
556 | case BPF_S_LDX_W_LEN: | ||
557 | ctx->seen |= SEEN_X | SEEN_SKB; | ||
558 | emit(ARM_LDR_I(r_X, r_skb, | ||
559 | offsetof(struct sk_buff, len)), ctx); | ||
560 | break; | ||
561 | case BPF_S_LDX_MEM: | ||
562 | ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); | ||
563 | emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); | ||
564 | break; | ||
565 | case BPF_S_LDX_B_MSH: | ||
566 | /* x = ((*(frame + k)) & 0xf) << 2; */ | ||
567 | ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; | ||
568 | /* the interpreter should deal with the negative K */ | ||
569 | if (k < 0) | ||
570 | return -1; | ||
571 | /* offset in r1: we might have to take the slow path */ | ||
572 | emit_mov_i(r_off, k, ctx); | ||
573 | emit(ARM_CMP_R(r_skb_hl, r_off), ctx); | ||
574 | |||
575 | /* load in r0: common with the slowpath */ | ||
576 | _emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data, | ||
577 | ARM_R1), ctx); | ||
578 | /* | ||
579 | * emit_mov_i() might generate one or two instructions, | ||
580 | * the same holds for emit_blx_r() | ||
581 | */ | ||
582 | _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx); | ||
583 | |||
584 | emit(ARM_MOV_R(ARM_R0, r_skb), ctx); | ||
585 | /* r_off is r1 */ | ||
586 | emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx); | ||
587 | emit_blx_r(ARM_R3, ctx); | ||
588 | /* check the return value of skb_copy_bits */ | ||
589 | emit(ARM_CMP_I(ARM_R1, 0), ctx); | ||
590 | emit_err_ret(ARM_COND_NE, ctx); | ||
591 | |||
592 | emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); | ||
593 | emit(ARM_LSL_I(r_X, r_X, 2), ctx); | ||
594 | break; | ||
595 | case BPF_S_ST: | ||
596 | ctx->seen |= SEEN_MEM_WORD(k); | ||
597 | emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); | ||
598 | break; | ||
599 | case BPF_S_STX: | ||
600 | update_on_xread(ctx); | ||
601 | ctx->seen |= SEEN_MEM_WORD(k); | ||
602 | emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); | ||
603 | break; | ||
604 | case BPF_S_ALU_ADD_K: | ||
605 | /* A += K */ | ||
606 | OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); | ||
607 | break; | ||
608 | case BPF_S_ALU_ADD_X: | ||
609 | update_on_xread(ctx); | ||
610 | emit(ARM_ADD_R(r_A, r_A, r_X), ctx); | ||
611 | break; | ||
612 | case BPF_S_ALU_SUB_K: | ||
613 | /* A -= K */ | ||
614 | OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); | ||
615 | break; | ||
616 | case BPF_S_ALU_SUB_X: | ||
617 | update_on_xread(ctx); | ||
618 | emit(ARM_SUB_R(r_A, r_A, r_X), ctx); | ||
619 | break; | ||
620 | case BPF_S_ALU_MUL_K: | ||
621 | /* A *= K */ | ||
622 | emit_mov_i(r_scratch, k, ctx); | ||
623 | emit(ARM_MUL(r_A, r_A, r_scratch), ctx); | ||
624 | break; | ||
625 | case BPF_S_ALU_MUL_X: | ||
626 | update_on_xread(ctx); | ||
627 | emit(ARM_MUL(r_A, r_A, r_X), ctx); | ||
628 | break; | ||
629 | case BPF_S_ALU_DIV_K: | ||
630 | /* current k == reciprocal_value(userspace k) */ | ||
631 | emit_mov_i(r_scratch, k, ctx); | ||
632 | /* A = top 32 bits of the product */ | ||
633 | emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx); | ||
634 | break; | ||
635 | case BPF_S_ALU_DIV_X: | ||
636 | update_on_xread(ctx); | ||
637 | emit(ARM_CMP_I(r_X, 0), ctx); | ||
638 | emit_err_ret(ARM_COND_EQ, ctx); | ||
639 | emit_udiv(r_A, r_A, r_X, ctx); | ||
640 | break; | ||
641 | case BPF_S_ALU_OR_K: | ||
642 | /* A |= K */ | ||
643 | OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); | ||
644 | break; | ||
645 | case BPF_S_ALU_OR_X: | ||
646 | update_on_xread(ctx); | ||
647 | emit(ARM_ORR_R(r_A, r_A, r_X), ctx); | ||
648 | break; | ||
649 | case BPF_S_ALU_AND_K: | ||
650 | /* A &= K */ | ||
651 | OP_IMM3(ARM_AND, r_A, r_A, k, ctx); | ||
652 | break; | ||
653 | case BPF_S_ALU_AND_X: | ||
654 | update_on_xread(ctx); | ||
655 | emit(ARM_AND_R(r_A, r_A, r_X), ctx); | ||
656 | break; | ||
657 | case BPF_S_ALU_LSH_K: | ||
658 | if (unlikely(k > 31)) | ||
659 | return -1; | ||
660 | emit(ARM_LSL_I(r_A, r_A, k), ctx); | ||
661 | break; | ||
662 | case BPF_S_ALU_LSH_X: | ||
663 | update_on_xread(ctx); | ||
664 | emit(ARM_LSL_R(r_A, r_A, r_X), ctx); | ||
665 | break; | ||
666 | case BPF_S_ALU_RSH_K: | ||
667 | if (unlikely(k > 31)) | ||
668 | return -1; | ||
669 | emit(ARM_LSR_I(r_A, r_A, k), ctx); | ||
670 | break; | ||
671 | case BPF_S_ALU_RSH_X: | ||
672 | update_on_xread(ctx); | ||
673 | emit(ARM_LSR_R(r_A, r_A, r_X), ctx); | ||
674 | break; | ||
675 | case BPF_S_ALU_NEG: | ||
676 | /* A = -A */ | ||
677 | emit(ARM_RSB_I(r_A, r_A, 0), ctx); | ||
678 | break; | ||
679 | case BPF_S_JMP_JA: | ||
680 | /* pc += K */ | ||
681 | emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); | ||
682 | break; | ||
683 | case BPF_S_JMP_JEQ_K: | ||
684 | /* pc += (A == K) ? pc->jt : pc->jf */ | ||
685 | condt = ARM_COND_EQ; | ||
686 | goto cmp_imm; | ||
687 | case BPF_S_JMP_JGT_K: | ||
688 | /* pc += (A > K) ? pc->jt : pc->jf */ | ||
689 | condt = ARM_COND_HI; | ||
690 | goto cmp_imm; | ||
691 | case BPF_S_JMP_JGE_K: | ||
692 | /* pc += (A >= K) ? pc->jt : pc->jf */ | ||
693 | condt = ARM_COND_HS; | ||
694 | cmp_imm: | ||
695 | imm12 = imm8m(k); | ||
696 | if (imm12 < 0) { | ||
697 | emit_mov_i_no8m(r_scratch, k, ctx); | ||
698 | emit(ARM_CMP_R(r_A, r_scratch), ctx); | ||
699 | } else { | ||
700 | emit(ARM_CMP_I(r_A, imm12), ctx); | ||
701 | } | ||
702 | cond_jump: | ||
703 | if (inst->jt) | ||
704 | _emit(condt, ARM_B(b_imm(i + inst->jt + 1, | ||
705 | ctx)), ctx); | ||
706 | if (inst->jf) | ||
707 | _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, | ||
708 | ctx)), ctx); | ||
709 | break; | ||
710 | case BPF_S_JMP_JEQ_X: | ||
711 | /* pc += (A == X) ? pc->jt : pc->jf */ | ||
712 | condt = ARM_COND_EQ; | ||
713 | goto cmp_x; | ||
714 | case BPF_S_JMP_JGT_X: | ||
715 | /* pc += (A > X) ? pc->jt : pc->jf */ | ||
716 | condt = ARM_COND_HI; | ||
717 | goto cmp_x; | ||
718 | case BPF_S_JMP_JGE_X: | ||
719 | /* pc += (A >= X) ? pc->jt : pc->jf */ | ||
720 | condt = ARM_COND_CS; | ||
721 | cmp_x: | ||
722 | update_on_xread(ctx); | ||
723 | emit(ARM_CMP_R(r_A, r_X), ctx); | ||
724 | goto cond_jump; | ||
725 | case BPF_S_JMP_JSET_K: | ||
726 | /* pc += (A & K) ? pc->jt : pc->jf */ | ||
727 | condt = ARM_COND_NE; | ||
728 | /* not set iff all zeroes iff Z==1 iff EQ */ | ||
729 | |||
730 | imm12 = imm8m(k); | ||
731 | if (imm12 < 0) { | ||
732 | emit_mov_i_no8m(r_scratch, k, ctx); | ||
733 | emit(ARM_TST_R(r_A, r_scratch), ctx); | ||
734 | } else { | ||
735 | emit(ARM_TST_I(r_A, imm12), ctx); | ||
736 | } | ||
737 | goto cond_jump; | ||
738 | case BPF_S_JMP_JSET_X: | ||
739 | /* pc += (A & X) ? pc->jt : pc->jf */ | ||
740 | update_on_xread(ctx); | ||
741 | condt = ARM_COND_NE; | ||
742 | emit(ARM_TST_R(r_A, r_X), ctx); | ||
743 | goto cond_jump; | ||
744 | case BPF_S_RET_A: | ||
745 | emit(ARM_MOV_R(ARM_R0, r_A), ctx); | ||
746 | goto b_epilogue; | ||
747 | case BPF_S_RET_K: | ||
748 | if ((k == 0) && (ctx->ret0_fp_idx < 0)) | ||
749 | ctx->ret0_fp_idx = i; | ||
750 | emit_mov_i(ARM_R0, k, ctx); | ||
751 | b_epilogue: | ||
752 | if (i != ctx->skf->len - 1) | ||
753 | emit(ARM_B(b_imm(prog->len, ctx)), ctx); | ||
754 | break; | ||
755 | case BPF_S_MISC_TAX: | ||
756 | /* X = A */ | ||
757 | ctx->seen |= SEEN_X; | ||
758 | emit(ARM_MOV_R(r_X, r_A), ctx); | ||
759 | break; | ||
760 | case BPF_S_MISC_TXA: | ||
761 | /* A = X */ | ||
762 | update_on_xread(ctx); | ||
763 | emit(ARM_MOV_R(r_A, r_X), ctx); | ||
764 | break; | ||
765 | case BPF_S_ANC_PROTOCOL: | ||
766 | /* A = ntohs(skb->protocol) */ | ||
767 | ctx->seen |= SEEN_SKB; | ||
768 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, | ||
769 | protocol) != 2); | ||
770 | off = offsetof(struct sk_buff, protocol); | ||
771 | emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); | ||
772 | emit_swap16(r_A, r_scratch, ctx); | ||
773 | break; | ||
774 | case BPF_S_ANC_CPU: | ||
775 | /* r_scratch = current_thread_info() */ | ||
776 | OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); | ||
777 | /* A = current_thread_info()->cpu */ | ||
778 | BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4); | ||
779 | off = offsetof(struct thread_info, cpu); | ||
780 | emit(ARM_LDR_I(r_A, r_scratch, off), ctx); | ||
781 | break; | ||
782 | case BPF_S_ANC_IFINDEX: | ||
783 | /* A = skb->dev->ifindex */ | ||
784 | ctx->seen |= SEEN_SKB; | ||
785 | off = offsetof(struct sk_buff, dev); | ||
786 | emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); | ||
787 | |||
788 | emit(ARM_CMP_I(r_scratch, 0), ctx); | ||
789 | emit_err_ret(ARM_COND_EQ, ctx); | ||
790 | |||
791 | BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, | ||
792 | ifindex) != 4); | ||
793 | off = offsetof(struct net_device, ifindex); | ||
794 | emit(ARM_LDR_I(r_A, r_scratch, off), ctx); | ||
795 | break; | ||
796 | case BPF_S_ANC_MARK: | ||
797 | ctx->seen |= SEEN_SKB; | ||
798 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); | ||
799 | off = offsetof(struct sk_buff, mark); | ||
800 | emit(ARM_LDR_I(r_A, r_skb, off), ctx); | ||
801 | break; | ||
802 | case BPF_S_ANC_RXHASH: | ||
803 | ctx->seen |= SEEN_SKB; | ||
804 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4); | ||
805 | off = offsetof(struct sk_buff, rxhash); | ||
806 | emit(ARM_LDR_I(r_A, r_skb, off), ctx); | ||
807 | break; | ||
808 | case BPF_S_ANC_QUEUE: | ||
809 | ctx->seen |= SEEN_SKB; | ||
810 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, | ||
811 | queue_mapping) != 2); | ||
812 | BUILD_BUG_ON(offsetof(struct sk_buff, | ||
813 | queue_mapping) > 0xff); | ||
814 | off = offsetof(struct sk_buff, queue_mapping); | ||
815 | emit(ARM_LDRH_I(r_A, r_skb, off), ctx); | ||
816 | break; | ||
817 | default: | ||
818 | return -1; | ||
819 | } | ||
820 | } | ||
821 | |||
822 | /* compute offsets only during the first pass */ | ||
823 | if (ctx->target == NULL) | ||
824 | ctx->offsets[i] = ctx->idx * 4; | ||
825 | |||
826 | return 0; | ||
827 | } | ||
828 | |||
829 | |||
830 | void bpf_jit_compile(struct sk_filter *fp) | ||
831 | { | ||
832 | struct jit_ctx ctx; | ||
833 | unsigned tmp_idx; | ||
834 | unsigned alloc_size; | ||
835 | |||
836 | if (!bpf_jit_enable) | ||
837 | return; | ||
838 | |||
839 | memset(&ctx, 0, sizeof(ctx)); | ||
840 | ctx.skf = fp; | ||
841 | ctx.ret0_fp_idx = -1; | ||
842 | |||
843 | ctx.offsets = kzalloc(GFP_KERNEL, 4 * (ctx.skf->len + 1)); | ||
844 | if (ctx.offsets == NULL) | ||
845 | return; | ||
846 | |||
847 | /* fake pass to fill in the ctx->seen */ | ||
848 | if (unlikely(build_body(&ctx))) | ||
849 | goto out; | ||
850 | |||
851 | tmp_idx = ctx.idx; | ||
852 | build_prologue(&ctx); | ||
853 | ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; | ||
854 | |||
855 | #if __LINUX_ARM_ARCH__ < 7 | ||
856 | tmp_idx = ctx.idx; | ||
857 | build_epilogue(&ctx); | ||
858 | ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4; | ||
859 | |||
860 | ctx.idx += ctx.imm_count; | ||
861 | if (ctx.imm_count) { | ||
862 | ctx.imms = kzalloc(GFP_KERNEL, 4 * ctx.imm_count); | ||
863 | if (ctx.imms == NULL) | ||
864 | goto out; | ||
865 | } | ||
866 | #else | ||
867 | /* there's nothing after the epilogue on ARMv7 */ | ||
868 | build_epilogue(&ctx); | ||
869 | #endif | ||
870 | |||
871 | alloc_size = 4 * ctx.idx; | ||
872 | ctx.target = module_alloc(max(sizeof(struct work_struct), | ||
873 | alloc_size)); | ||
874 | if (unlikely(ctx.target == NULL)) | ||
875 | goto out; | ||
876 | |||
877 | ctx.idx = 0; | ||
878 | build_prologue(&ctx); | ||
879 | build_body(&ctx); | ||
880 | build_epilogue(&ctx); | ||
881 | |||
882 | flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx)); | ||
883 | |||
884 | #if __LINUX_ARM_ARCH__ < 7 | ||
885 | if (ctx.imm_count) | ||
886 | kfree(ctx.imms); | ||
887 | #endif | ||
888 | |||
889 | if (bpf_jit_enable > 1) | ||
890 | print_hex_dump(KERN_INFO, "BPF JIT code: ", | ||
891 | DUMP_PREFIX_ADDRESS, 16, 4, ctx.target, | ||
892 | alloc_size, false); | ||
893 | |||
894 | fp->bpf_func = (void *)ctx.target; | ||
895 | out: | ||
896 | kfree(ctx.offsets); | ||
897 | return; | ||
898 | } | ||
899 | |||
900 | static void bpf_jit_free_worker(struct work_struct *work) | ||
901 | { | ||
902 | module_free(NULL, work); | ||
903 | } | ||
904 | |||
905 | void bpf_jit_free(struct sk_filter *fp) | ||
906 | { | ||
907 | struct work_struct *work; | ||
908 | |||
909 | if (fp->bpf_func != sk_run_filter) { | ||
910 | work = (struct work_struct *)fp->bpf_func; | ||
911 | |||
912 | INIT_WORK(work, bpf_jit_free_worker); | ||
913 | schedule_work(work); | ||
914 | } | ||
915 | } | ||
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h new file mode 100644 index 000000000000..99ae5e3f46d2 --- /dev/null +++ b/arch/arm/net/bpf_jit_32.h | |||
@@ -0,0 +1,190 @@ | |||
1 | /* | ||
2 | * Just-In-Time compiler for BPF filters on 32bit ARM | ||
3 | * | ||
4 | * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; version 2 of the License. | ||
9 | */ | ||
10 | |||
11 | #ifndef PFILTER_OPCODES_ARM_H | ||
12 | #define PFILTER_OPCODES_ARM_H | ||
13 | |||
14 | #define ARM_R0 0 | ||
15 | #define ARM_R1 1 | ||
16 | #define ARM_R2 2 | ||
17 | #define ARM_R3 3 | ||
18 | #define ARM_R4 4 | ||
19 | #define ARM_R5 5 | ||
20 | #define ARM_R6 6 | ||
21 | #define ARM_R7 7 | ||
22 | #define ARM_R8 8 | ||
23 | #define ARM_R9 9 | ||
24 | #define ARM_R10 10 | ||
25 | #define ARM_FP 11 | ||
26 | #define ARM_IP 12 | ||
27 | #define ARM_SP 13 | ||
28 | #define ARM_LR 14 | ||
29 | #define ARM_PC 15 | ||
30 | |||
31 | #define ARM_COND_EQ 0x0 | ||
32 | #define ARM_COND_NE 0x1 | ||
33 | #define ARM_COND_CS 0x2 | ||
34 | #define ARM_COND_HS ARM_COND_CS | ||
35 | #define ARM_COND_CC 0x3 | ||
36 | #define ARM_COND_LO ARM_COND_CC | ||
37 | #define ARM_COND_MI 0x4 | ||
38 | #define ARM_COND_PL 0x5 | ||
39 | #define ARM_COND_VS 0x6 | ||
40 | #define ARM_COND_VC 0x7 | ||
41 | #define ARM_COND_HI 0x8 | ||
42 | #define ARM_COND_LS 0x9 | ||
43 | #define ARM_COND_GE 0xa | ||
44 | #define ARM_COND_LT 0xb | ||
45 | #define ARM_COND_GT 0xc | ||
46 | #define ARM_COND_LE 0xd | ||
47 | #define ARM_COND_AL 0xe | ||
48 | |||
49 | /* register shift types */ | ||
50 | #define SRTYPE_LSL 0 | ||
51 | #define SRTYPE_LSR 1 | ||
52 | #define SRTYPE_ASR 2 | ||
53 | #define SRTYPE_ROR 3 | ||
54 | |||
55 | #define ARM_INST_ADD_R 0x00800000 | ||
56 | #define ARM_INST_ADD_I 0x02800000 | ||
57 | |||
58 | #define ARM_INST_AND_R 0x00000000 | ||
59 | #define ARM_INST_AND_I 0x02000000 | ||
60 | |||
61 | #define ARM_INST_BIC_R 0x01c00000 | ||
62 | #define ARM_INST_BIC_I 0x03c00000 | ||
63 | |||
64 | #define ARM_INST_B 0x0a000000 | ||
65 | #define ARM_INST_BX 0x012FFF10 | ||
66 | #define ARM_INST_BLX_R 0x012fff30 | ||
67 | |||
68 | #define ARM_INST_CMP_R 0x01500000 | ||
69 | #define ARM_INST_CMP_I 0x03500000 | ||
70 | |||
71 | #define ARM_INST_LDRB_I 0x05d00000 | ||
72 | #define ARM_INST_LDRB_R 0x07d00000 | ||
73 | #define ARM_INST_LDRH_I 0x01d000b0 | ||
74 | #define ARM_INST_LDR_I 0x05900000 | ||
75 | |||
76 | #define ARM_INST_LDM 0x08900000 | ||
77 | |||
78 | #define ARM_INST_LSL_I 0x01a00000 | ||
79 | #define ARM_INST_LSL_R 0x01a00010 | ||
80 | |||
81 | #define ARM_INST_LSR_I 0x01a00020 | ||
82 | #define ARM_INST_LSR_R 0x01a00030 | ||
83 | |||
84 | #define ARM_INST_MOV_R 0x01a00000 | ||
85 | #define ARM_INST_MOV_I 0x03a00000 | ||
86 | #define ARM_INST_MOVW 0x03000000 | ||
87 | #define ARM_INST_MOVT 0x03400000 | ||
88 | |||
89 | #define ARM_INST_MUL 0x00000090 | ||
90 | |||
91 | #define ARM_INST_POP 0x08bd0000 | ||
92 | #define ARM_INST_PUSH 0x092d0000 | ||
93 | |||
94 | #define ARM_INST_ORR_R 0x01800000 | ||
95 | #define ARM_INST_ORR_I 0x03800000 | ||
96 | |||
97 | #define ARM_INST_REV 0x06bf0f30 | ||
98 | #define ARM_INST_REV16 0x06bf0fb0 | ||
99 | |||
100 | #define ARM_INST_RSB_I 0x02600000 | ||
101 | |||
102 | #define ARM_INST_SUB_R 0x00400000 | ||
103 | #define ARM_INST_SUB_I 0x02400000 | ||
104 | |||
105 | #define ARM_INST_STR_I 0x05800000 | ||
106 | |||
107 | #define ARM_INST_TST_R 0x01100000 | ||
108 | #define ARM_INST_TST_I 0x03100000 | ||
109 | |||
110 | #define ARM_INST_UDIV 0x0730f010 | ||
111 | |||
112 | #define ARM_INST_UMULL 0x00800090 | ||
113 | |||
114 | /* register */ | ||
115 | #define _AL3_R(op, rd, rn, rm) ((op ## _R) | (rd) << 12 | (rn) << 16 | (rm)) | ||
116 | /* immediate */ | ||
117 | #define _AL3_I(op, rd, rn, imm) ((op ## _I) | (rd) << 12 | (rn) << 16 | (imm)) | ||
118 | |||
119 | #define ARM_ADD_R(rd, rn, rm) _AL3_R(ARM_INST_ADD, rd, rn, rm) | ||
120 | #define ARM_ADD_I(rd, rn, imm) _AL3_I(ARM_INST_ADD, rd, rn, imm) | ||
121 | |||
122 | #define ARM_AND_R(rd, rn, rm) _AL3_R(ARM_INST_AND, rd, rn, rm) | ||
123 | #define ARM_AND_I(rd, rn, imm) _AL3_I(ARM_INST_AND, rd, rn, imm) | ||
124 | |||
125 | #define ARM_BIC_R(rd, rn, rm) _AL3_R(ARM_INST_BIC, rd, rn, rm) | ||
126 | #define ARM_BIC_I(rd, rn, imm) _AL3_I(ARM_INST_BIC, rd, rn, imm) | ||
127 | |||
128 | #define ARM_B(imm24) (ARM_INST_B | ((imm24) & 0xffffff)) | ||
129 | #define ARM_BX(rm) (ARM_INST_BX | (rm)) | ||
130 | #define ARM_BLX_R(rm) (ARM_INST_BLX_R | (rm)) | ||
131 | |||
132 | #define ARM_CMP_R(rn, rm) _AL3_R(ARM_INST_CMP, 0, rn, rm) | ||
133 | #define ARM_CMP_I(rn, imm) _AL3_I(ARM_INST_CMP, 0, rn, imm) | ||
134 | |||
135 | #define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \ | ||
136 | | (off)) | ||
137 | #define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \ | ||
138 | | (off)) | ||
139 | #define ARM_LDRB_R(rt, rn, rm) (ARM_INST_LDRB_R | (rt) << 12 | (rn) << 16 \ | ||
140 | | (rm)) | ||
141 | #define ARM_LDRH_I(rt, rn, off) (ARM_INST_LDRH_I | (rt) << 12 | (rn) << 16 \ | ||
142 | | (((off) & 0xf0) << 4) | ((off) & 0xf)) | ||
143 | |||
144 | #define ARM_LDM(rn, regs) (ARM_INST_LDM | (rn) << 16 | (regs)) | ||
145 | |||
146 | #define ARM_LSL_R(rd, rn, rm) (_AL3_R(ARM_INST_LSL, rd, 0, rn) | (rm) << 8) | ||
147 | #define ARM_LSL_I(rd, rn, imm) (_AL3_I(ARM_INST_LSL, rd, 0, rn) | (imm) << 7) | ||
148 | |||
149 | #define ARM_LSR_R(rd, rn, rm) (_AL3_R(ARM_INST_LSR, rd, 0, rn) | (rm) << 8) | ||
150 | #define ARM_LSR_I(rd, rn, imm) (_AL3_I(ARM_INST_LSR, rd, 0, rn) | (imm) << 7) | ||
151 | |||
152 | #define ARM_MOV_R(rd, rm) _AL3_R(ARM_INST_MOV, rd, 0, rm) | ||
153 | #define ARM_MOV_I(rd, imm) _AL3_I(ARM_INST_MOV, rd, 0, imm) | ||
154 | |||
155 | #define ARM_MOVW(rd, imm) \ | ||
156 | (ARM_INST_MOVW | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff)) | ||
157 | |||
158 | #define ARM_MOVT(rd, imm) \ | ||
159 | (ARM_INST_MOVT | ((imm) >> 12) << 16 | (rd) << 12 | ((imm) & 0x0fff)) | ||
160 | |||
161 | #define ARM_MUL(rd, rm, rn) (ARM_INST_MUL | (rd) << 16 | (rm) << 8 | (rn)) | ||
162 | |||
163 | #define ARM_POP(regs) (ARM_INST_POP | (regs)) | ||
164 | #define ARM_PUSH(regs) (ARM_INST_PUSH | (regs)) | ||
165 | |||
166 | #define ARM_ORR_R(rd, rn, rm) _AL3_R(ARM_INST_ORR, rd, rn, rm) | ||
167 | #define ARM_ORR_I(rd, rn, imm) _AL3_I(ARM_INST_ORR, rd, rn, imm) | ||
168 | #define ARM_ORR_S(rd, rn, rm, type, rs) \ | ||
169 | (ARM_ORR_R(rd, rn, rm) | (type) << 5 | (rs) << 7) | ||
170 | |||
171 | #define ARM_REV(rd, rm) (ARM_INST_REV | (rd) << 12 | (rm)) | ||
172 | #define ARM_REV16(rd, rm) (ARM_INST_REV16 | (rd) << 12 | (rm)) | ||
173 | |||
174 | #define ARM_RSB_I(rd, rn, imm) _AL3_I(ARM_INST_RSB, rd, rn, imm) | ||
175 | |||
176 | #define ARM_SUB_R(rd, rn, rm) _AL3_R(ARM_INST_SUB, rd, rn, rm) | ||
177 | #define ARM_SUB_I(rd, rn, imm) _AL3_I(ARM_INST_SUB, rd, rn, imm) | ||
178 | |||
179 | #define ARM_STR_I(rt, rn, off) (ARM_INST_STR_I | (rt) << 12 | (rn) << 16 \ | ||
180 | | (off)) | ||
181 | |||
182 | #define ARM_TST_R(rn, rm) _AL3_R(ARM_INST_TST, 0, rn, rm) | ||
183 | #define ARM_TST_I(rn, imm) _AL3_I(ARM_INST_TST, 0, rn, imm) | ||
184 | |||
185 | #define ARM_UDIV(rd, rn, rm) (ARM_INST_UDIV | (rd) << 16 | (rn) | (rm) << 8) | ||
186 | |||
187 | #define ARM_UMULL(rd_lo, rd_hi, rn, rm) (ARM_INST_UMULL | (rd_hi) << 16 \ | ||
188 | | (rd_lo) << 12 | (rm) << 8 | rn) | ||
189 | |||
190 | #endif /* PFILTER_OPCODES_ARM_H */ | ||
diff --git a/arch/arm/plat-versatile/Kconfig b/arch/arm/plat-versatile/Kconfig index 52353beb369d..aa63f38f2848 100644 --- a/arch/arm/plat-versatile/Kconfig +++ b/arch/arm/plat-versatile/Kconfig | |||
@@ -11,7 +11,7 @@ config PLAT_VERSATILE_LEDS | |||
11 | depends on ARCH_REALVIEW || ARCH_VERSATILE | 11 | depends on ARCH_REALVIEW || ARCH_VERSATILE |
12 | 12 | ||
13 | config PLAT_VERSATILE_SCHED_CLOCK | 13 | config PLAT_VERSATILE_SCHED_CLOCK |
14 | def_bool y if !ARCH_INTEGRATOR_AP | 14 | def_bool y |
15 | select HAVE_SCHED_CLOCK | 15 | select HAVE_SCHED_CLOCK |
16 | 16 | ||
17 | endif | 17 | endif |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 8f3ccddbdafd..d89068f6d6e5 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | 20 | ||
21 | #include <asm/cp15.h> | ||
21 | #include <asm/cputype.h> | 22 | #include <asm/cputype.h> |
22 | #include <asm/thread_notify.h> | 23 | #include <asm/thread_notify.h> |
23 | #include <asm/vfp.h> | 24 | #include <asm/vfp.h> |