diff options
Diffstat (limited to 'arch')
182 files changed, 1936 insertions, 1050 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 49d993cee512..5a326f935858 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -175,6 +175,9 @@ config ARCH_HAS_CPUFREQ | |||
175 | and that the relevant menu configurations are displayed for | 175 | and that the relevant menu configurations are displayed for |
176 | it. | 176 | it. |
177 | 177 | ||
178 | config ARCH_HAS_BANDGAP | ||
179 | bool | ||
180 | |||
178 | config GENERIC_HWEIGHT | 181 | config GENERIC_HWEIGHT |
179 | bool | 182 | bool |
180 | default y | 183 | default y |
@@ -1087,6 +1090,20 @@ if !MMU | |||
1087 | source "arch/arm/Kconfig-nommu" | 1090 | source "arch/arm/Kconfig-nommu" |
1088 | endif | 1091 | endif |
1089 | 1092 | ||
1093 | config PJ4B_ERRATA_4742 | ||
1094 | bool "PJ4B Errata 4742: IDLE Wake Up Commands can Cause the CPU Core to Cease Operation" | ||
1095 | depends on CPU_PJ4B && MACH_ARMADA_370 | ||
1096 | default y | ||
1097 | help | ||
1098 | When coming out of either a Wait for Interrupt (WFI) or a Wait for | ||
1099 | Event (WFE) IDLE states, a specific timing sensitivity exists between | ||
1100 | the retiring WFI/WFE instructions and the newly issued subsequent | ||
1101 | instructions. This sensitivity can result in a CPU hang scenario. | ||
1102 | Workaround: | ||
1103 | The software must insert either a Data Synchronization Barrier (DSB) | ||
1104 | or Data Memory Barrier (DMB) command immediately after the WFI/WFE | ||
1105 | instruction | ||
1106 | |||
1090 | config ARM_ERRATA_326103 | 1107 | config ARM_ERRATA_326103 |
1091 | bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory" | 1108 | bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory" |
1092 | depends on CPU_V6 | 1109 | depends on CPU_V6 |
@@ -1189,6 +1206,16 @@ config PL310_ERRATA_588369 | |||
1189 | is not correctly implemented in PL310 as clean lines are not | 1206 | is not correctly implemented in PL310 as clean lines are not |
1190 | invalidated as a result of these operations. | 1207 | invalidated as a result of these operations. |
1191 | 1208 | ||
1209 | config ARM_ERRATA_643719 | ||
1210 | bool "ARM errata: LoUIS bit field in CLIDR register is incorrect" | ||
1211 | depends on CPU_V7 && SMP | ||
1212 | help | ||
1213 | This option enables the workaround for the 643719 Cortex-A9 (prior to | ||
1214 | r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR | ||
1215 | register returns zero when it should return one. The workaround | ||
1216 | corrects this value, ensuring cache maintenance operations which use | ||
1217 | it behave as intended and avoiding data corruption. | ||
1218 | |||
1192 | config ARM_ERRATA_720789 | 1219 | config ARM_ERRATA_720789 |
1193 | bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" | 1220 | bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID" |
1194 | depends on CPU_V7 | 1221 | depends on CPU_V7 |
@@ -2006,7 +2033,7 @@ config XIP_PHYS_ADDR | |||
2006 | 2033 | ||
2007 | config KEXEC | 2034 | config KEXEC |
2008 | bool "Kexec system call (EXPERIMENTAL)" | 2035 | bool "Kexec system call (EXPERIMENTAL)" |
2009 | depends on (!SMP || HOTPLUG_CPU) | 2036 | depends on (!SMP || PM_SLEEP_SMP) |
2010 | help | 2037 | help |
2011 | kexec is a system call that implements the ability to shutdown your | 2038 | kexec is a system call that implements the ability to shutdown your |
2012 | current kernel, and to start another kernel. It is like a reboot | 2039 | current kernel, and to start another kernel. It is like a reboot |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 1ba358ba16b8..de4e1cb2f14f 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -59,37 +59,43 @@ comma = , | |||
59 | # Note that GCC does not numerically define an architecture version | 59 | # Note that GCC does not numerically define an architecture version |
60 | # macro, but instead defines a whole series of macros which makes | 60 | # macro, but instead defines a whole series of macros which makes |
61 | # testing for a specific architecture or later rather impossible. | 61 | # testing for a specific architecture or later rather impossible. |
62 | arch-$(CONFIG_CPU_32v7) :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) | 62 | arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) |
63 | arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) | 63 | arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) |
64 | # Only override the compiler option if ARMv6. The ARMv6K extensions are | 64 | # Only override the compiler option if ARMv6. The ARMv6K extensions are |
65 | # always available in ARMv7 | 65 | # always available in ARMv7 |
66 | ifeq ($(CONFIG_CPU_32v6),y) | 66 | ifeq ($(CONFIG_CPU_32v6),y) |
67 | arch-$(CONFIG_CPU_32v6K) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k) | 67 | arch-$(CONFIG_CPU_32v6K) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k) |
68 | endif | 68 | endif |
69 | arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) | 69 | arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) |
70 | arch-$(CONFIG_CPU_32v4T) :=-D__LINUX_ARM_ARCH__=4 -march=armv4t | 70 | arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t |
71 | arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4 | 71 | arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4 |
72 | arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3 | 72 | arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3 |
73 | |||
74 | # Evaluate arch cc-option calls now | ||
75 | arch-y := $(arch-y) | ||
73 | 76 | ||
74 | # This selects how we optimise for the processor. | 77 | # This selects how we optimise for the processor. |
75 | tune-$(CONFIG_CPU_ARM7TDMI) :=-mtune=arm7tdmi | 78 | tune-$(CONFIG_CPU_ARM7TDMI) =-mtune=arm7tdmi |
76 | tune-$(CONFIG_CPU_ARM720T) :=-mtune=arm7tdmi | 79 | tune-$(CONFIG_CPU_ARM720T) =-mtune=arm7tdmi |
77 | tune-$(CONFIG_CPU_ARM740T) :=-mtune=arm7tdmi | 80 | tune-$(CONFIG_CPU_ARM740T) =-mtune=arm7tdmi |
78 | tune-$(CONFIG_CPU_ARM9TDMI) :=-mtune=arm9tdmi | 81 | tune-$(CONFIG_CPU_ARM9TDMI) =-mtune=arm9tdmi |
79 | tune-$(CONFIG_CPU_ARM940T) :=-mtune=arm9tdmi | 82 | tune-$(CONFIG_CPU_ARM940T) =-mtune=arm9tdmi |
80 | tune-$(CONFIG_CPU_ARM946E) :=$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi) | 83 | tune-$(CONFIG_CPU_ARM946E) =$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi) |
81 | tune-$(CONFIG_CPU_ARM920T) :=-mtune=arm9tdmi | 84 | tune-$(CONFIG_CPU_ARM920T) =-mtune=arm9tdmi |
82 | tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi | 85 | tune-$(CONFIG_CPU_ARM922T) =-mtune=arm9tdmi |
83 | tune-$(CONFIG_CPU_ARM925T) :=-mtune=arm9tdmi | 86 | tune-$(CONFIG_CPU_ARM925T) =-mtune=arm9tdmi |
84 | tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi | 87 | tune-$(CONFIG_CPU_ARM926T) =-mtune=arm9tdmi |
85 | tune-$(CONFIG_CPU_FA526) :=-mtune=arm9tdmi | 88 | tune-$(CONFIG_CPU_FA526) =-mtune=arm9tdmi |
86 | tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110 | 89 | tune-$(CONFIG_CPU_SA110) =-mtune=strongarm110 |
87 | tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100 | 90 | tune-$(CONFIG_CPU_SA1100) =-mtune=strongarm1100 |
88 | tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale | 91 | tune-$(CONFIG_CPU_XSCALE) =$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale |
89 | tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale | 92 | tune-$(CONFIG_CPU_XSC3) =$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale |
90 | tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale) | 93 | tune-$(CONFIG_CPU_FEROCEON) =$(call cc-option,-mtune=marvell-f,-mtune=xscale) |
91 | tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) | 94 | tune-$(CONFIG_CPU_V6) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) |
92 | tune-$(CONFIG_CPU_V6K) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) | 95 | tune-$(CONFIG_CPU_V6K) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) |
96 | |||
97 | # Evaluate tune cc-option calls now | ||
98 | tune-y := $(tune-y) | ||
93 | 99 | ||
94 | ifeq ($(CONFIG_AEABI),y) | 100 | ifeq ($(CONFIG_AEABI),y) |
95 | CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork | 101 | CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork |
@@ -289,9 +295,10 @@ zImage Image xipImage bootpImage uImage: vmlinux | |||
289 | zinstall uinstall install: vmlinux | 295 | zinstall uinstall install: vmlinux |
290 | $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ | 296 | $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ |
291 | 297 | ||
292 | %.dtb: scripts | 298 | %.dtb: | scripts |
293 | $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@ | 299 | $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@ |
294 | 300 | ||
301 | PHONY += dtbs | ||
295 | dtbs: scripts | 302 | dtbs: scripts |
296 | $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) dtbs | 303 | $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) dtbs |
297 | 304 | ||
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 3580d57ea218..120b83bfde20 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
@@ -116,7 +116,8 @@ targets := vmlinux vmlinux.lds \ | |||
116 | 116 | ||
117 | # Make sure files are removed during clean | 117 | # Make sure files are removed during clean |
118 | extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ | 118 | extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \ |
119 | lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) | 119 | lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \ |
120 | hyp-stub.S | ||
120 | 121 | ||
121 | ifeq ($(CONFIG_FUNCTION_TRACER),y) | 122 | ifeq ($(CONFIG_FUNCTION_TRACER),y) |
122 | ORIG_CFLAGS := $(KBUILD_CFLAGS) | 123 | ORIG_CFLAGS := $(KBUILD_CFLAGS) |
@@ -124,7 +125,7 @@ KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) | |||
124 | endif | 125 | endif |
125 | 126 | ||
126 | ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) | 127 | ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) |
127 | asflags-y := -Wa,-march=all -DZIMAGE | 128 | asflags-y := -DZIMAGE |
128 | 129 | ||
129 | # Supply kernel BSS size to the decompressor via a linker symbol. | 130 | # Supply kernel BSS size to the decompressor via a linker symbol. |
130 | KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \ | 131 | KBSS_SZ = $(shell $(CROSS_COMPILE)size $(obj)/../../../../vmlinux | \ |
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c index aabc02a68482..d1153c8a765a 100644 --- a/arch/arm/boot/compressed/atags_to_fdt.c +++ b/arch/arm/boot/compressed/atags_to_fdt.c | |||
@@ -53,6 +53,17 @@ static const void *getprop(const void *fdt, const char *node_path, | |||
53 | return fdt_getprop(fdt, offset, property, len); | 53 | return fdt_getprop(fdt, offset, property, len); |
54 | } | 54 | } |
55 | 55 | ||
56 | static uint32_t get_cell_size(const void *fdt) | ||
57 | { | ||
58 | int len; | ||
59 | uint32_t cell_size = 1; | ||
60 | const uint32_t *size_len = getprop(fdt, "/", "#size-cells", &len); | ||
61 | |||
62 | if (size_len) | ||
63 | cell_size = fdt32_to_cpu(*size_len); | ||
64 | return cell_size; | ||
65 | } | ||
66 | |||
56 | static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) | 67 | static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) |
57 | { | 68 | { |
58 | char cmdline[COMMAND_LINE_SIZE]; | 69 | char cmdline[COMMAND_LINE_SIZE]; |
@@ -95,9 +106,11 @@ static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) | |||
95 | int atags_to_fdt(void *atag_list, void *fdt, int total_space) | 106 | int atags_to_fdt(void *atag_list, void *fdt, int total_space) |
96 | { | 107 | { |
97 | struct tag *atag = atag_list; | 108 | struct tag *atag = atag_list; |
98 | uint32_t mem_reg_property[2 * NR_BANKS]; | 109 | /* In the case of 64 bits memory size, need to reserve 2 cells for |
110 | * address and size for each bank */ | ||
111 | uint32_t mem_reg_property[2 * 2 * NR_BANKS]; | ||
99 | int memcount = 0; | 112 | int memcount = 0; |
100 | int ret; | 113 | int ret, memsize; |
101 | 114 | ||
102 | /* make sure we've got an aligned pointer */ | 115 | /* make sure we've got an aligned pointer */ |
103 | if ((u32)atag_list & 0x3) | 116 | if ((u32)atag_list & 0x3) |
@@ -137,8 +150,25 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space) | |||
137 | continue; | 150 | continue; |
138 | if (!atag->u.mem.size) | 151 | if (!atag->u.mem.size) |
139 | continue; | 152 | continue; |
140 | mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start); | 153 | memsize = get_cell_size(fdt); |
141 | mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size); | 154 | |
155 | if (memsize == 2) { | ||
156 | /* if memsize is 2, that means that | ||
157 | * each data needs 2 cells of 32 bits, | ||
158 | * so the data are 64 bits */ | ||
159 | uint64_t *mem_reg_prop64 = | ||
160 | (uint64_t *)mem_reg_property; | ||
161 | mem_reg_prop64[memcount++] = | ||
162 | cpu_to_fdt64(atag->u.mem.start); | ||
163 | mem_reg_prop64[memcount++] = | ||
164 | cpu_to_fdt64(atag->u.mem.size); | ||
165 | } else { | ||
166 | mem_reg_property[memcount++] = | ||
167 | cpu_to_fdt32(atag->u.mem.start); | ||
168 | mem_reg_property[memcount++] = | ||
169 | cpu_to_fdt32(atag->u.mem.size); | ||
170 | } | ||
171 | |||
142 | } else if (atag->hdr.tag == ATAG_INITRD2) { | 172 | } else if (atag->hdr.tag == ATAG_INITRD2) { |
143 | uint32_t initrd_start, initrd_size; | 173 | uint32_t initrd_start, initrd_size; |
144 | initrd_start = atag->u.initrd.start; | 174 | initrd_start = atag->u.initrd.start; |
@@ -150,8 +180,10 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space) | |||
150 | } | 180 | } |
151 | } | 181 | } |
152 | 182 | ||
153 | if (memcount) | 183 | if (memcount) { |
154 | setprop(fdt, "/memory", "reg", mem_reg_property, 4*memcount); | 184 | setprop(fdt, "/memory", "reg", mem_reg_property, |
185 | 4 * memcount * memsize); | ||
186 | } | ||
155 | 187 | ||
156 | return fdt_pack(fdt); | 188 | return fdt_pack(fdt); |
157 | } | 189 | } |
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S index 6e8382d5b7a4..5392ee63338f 100644 --- a/arch/arm/boot/compressed/debug.S +++ b/arch/arm/boot/compressed/debug.S | |||
@@ -1,6 +1,8 @@ | |||
1 | #include <linux/linkage.h> | 1 | #include <linux/linkage.h> |
2 | #include <asm/assembler.h> | 2 | #include <asm/assembler.h> |
3 | 3 | ||
4 | #ifndef CONFIG_DEBUG_SEMIHOSTING | ||
5 | |||
4 | #include CONFIG_DEBUG_LL_INCLUDE | 6 | #include CONFIG_DEBUG_LL_INCLUDE |
5 | 7 | ||
6 | ENTRY(putc) | 8 | ENTRY(putc) |
@@ -10,3 +12,29 @@ ENTRY(putc) | |||
10 | busyuart r3, r1 | 12 | busyuart r3, r1 |
11 | mov pc, lr | 13 | mov pc, lr |
12 | ENDPROC(putc) | 14 | ENDPROC(putc) |
15 | |||
16 | #else | ||
17 | |||
18 | ENTRY(putc) | ||
19 | adr r1, 1f | ||
20 | ldmia r1, {r2, r3} | ||
21 | add r2, r2, r1 | ||
22 | ldr r1, [r2, r3] | ||
23 | strb r0, [r1] | ||
24 | mov r0, #0x03 @ SYS_WRITEC | ||
25 | ARM( svc #0x123456 ) | ||
26 | THUMB( svc #0xab ) | ||
27 | mov pc, lr | ||
28 | .align 2 | ||
29 | 1: .word _GLOBAL_OFFSET_TABLE_ - . | ||
30 | .word semi_writec_buf(GOT) | ||
31 | ENDPROC(putc) | ||
32 | |||
33 | .bss | ||
34 | .global semi_writec_buf | ||
35 | .type semi_writec_buf, %object | ||
36 | semi_writec_buf: | ||
37 | .space 4 | ||
38 | .size semi_writec_buf, 4 | ||
39 | |||
40 | #endif | ||
diff --git a/arch/arm/boot/compressed/head-sa1100.S b/arch/arm/boot/compressed/head-sa1100.S index 6179d94dd5c6..3115e313d9f6 100644 --- a/arch/arm/boot/compressed/head-sa1100.S +++ b/arch/arm/boot/compressed/head-sa1100.S | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <asm/mach-types.h> | 11 | #include <asm/mach-types.h> |
12 | 12 | ||
13 | .section ".start", "ax" | 13 | .section ".start", "ax" |
14 | .arch armv4 | ||
14 | 15 | ||
15 | __SA1100_start: | 16 | __SA1100_start: |
16 | 17 | ||
diff --git a/arch/arm/boot/compressed/head-shark.S b/arch/arm/boot/compressed/head-shark.S index 089c560e07f1..92b56897ed64 100644 --- a/arch/arm/boot/compressed/head-shark.S +++ b/arch/arm/boot/compressed/head-shark.S | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | .section ".start", "ax" | 19 | .section ".start", "ax" |
20 | 20 | ||
21 | .arch armv4 | ||
21 | b __beginning | 22 | b __beginning |
22 | 23 | ||
23 | __ofw_data: .long 0 @ the number of memory blocks | 24 | __ofw_data: .long 0 @ the number of memory blocks |
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S index fe4d9c3ad761..75189f13cf54 100644 --- a/arch/arm/boot/compressed/head.S +++ b/arch/arm/boot/compressed/head.S | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
12 | #include <asm/assembler.h> | 12 | #include <asm/assembler.h> |
13 | 13 | ||
14 | .arch armv7-a | ||
14 | /* | 15 | /* |
15 | * Debugging stuff | 16 | * Debugging stuff |
16 | * | 17 | * |
@@ -141,7 +142,6 @@ start: | |||
141 | mov r7, r1 @ save architecture ID | 142 | mov r7, r1 @ save architecture ID |
142 | mov r8, r2 @ save atags pointer | 143 | mov r8, r2 @ save atags pointer |
143 | 144 | ||
144 | #ifndef __ARM_ARCH_2__ | ||
145 | /* | 145 | /* |
146 | * Booting from Angel - need to enter SVC mode and disable | 146 | * Booting from Angel - need to enter SVC mode and disable |
147 | * FIQs/IRQs (numeric definitions from angel arm.h source). | 147 | * FIQs/IRQs (numeric definitions from angel arm.h source). |
@@ -157,10 +157,6 @@ not_angel: | |||
157 | safe_svcmode_maskall r0 | 157 | safe_svcmode_maskall r0 |
158 | msr spsr_cxsf, r9 @ Save the CPU boot mode in | 158 | msr spsr_cxsf, r9 @ Save the CPU boot mode in |
159 | @ SPSR | 159 | @ SPSR |
160 | #else | ||
161 | teqp pc, #0x0c000003 @ turn off interrupts | ||
162 | #endif | ||
163 | |||
164 | /* | 160 | /* |
165 | * Note that some cache flushing and other stuff may | 161 | * Note that some cache flushing and other stuff may |
166 | * be needed here - is there an Angel SWI call for this? | 162 | * be needed here - is there an Angel SWI call for this? |
@@ -182,7 +178,19 @@ not_angel: | |||
182 | ldr r4, =zreladdr | 178 | ldr r4, =zreladdr |
183 | #endif | 179 | #endif |
184 | 180 | ||
185 | bl cache_on | 181 | /* |
182 | * Set up a page table only if it won't overwrite ourself. | ||
183 | * That means r4 < pc && r4 - 16k page directory > &_end. | ||
184 | * Given that r4 > &_end is most unfrequent, we add a rough | ||
185 | * additional 1MB of room for a possible appended DTB. | ||
186 | */ | ||
187 | mov r0, pc | ||
188 | cmp r0, r4 | ||
189 | ldrcc r0, LC0+32 | ||
190 | addcc r0, r0, pc | ||
191 | cmpcc r4, r0 | ||
192 | orrcc r4, r4, #1 @ remember we skipped cache_on | ||
193 | blcs cache_on | ||
186 | 194 | ||
187 | restart: adr r0, LC0 | 195 | restart: adr r0, LC0 |
188 | ldmia r0, {r1, r2, r3, r6, r10, r11, r12} | 196 | ldmia r0, {r1, r2, r3, r6, r10, r11, r12} |
@@ -228,7 +236,7 @@ restart: adr r0, LC0 | |||
228 | * r0 = delta | 236 | * r0 = delta |
229 | * r2 = BSS start | 237 | * r2 = BSS start |
230 | * r3 = BSS end | 238 | * r3 = BSS end |
231 | * r4 = final kernel address | 239 | * r4 = final kernel address (possibly with LSB set) |
232 | * r5 = appended dtb size (still unknown) | 240 | * r5 = appended dtb size (still unknown) |
233 | * r6 = _edata | 241 | * r6 = _edata |
234 | * r7 = architecture ID | 242 | * r7 = architecture ID |
@@ -276,6 +284,7 @@ restart: adr r0, LC0 | |||
276 | */ | 284 | */ |
277 | cmp r0, #1 | 285 | cmp r0, #1 |
278 | sub r0, r4, #TEXT_OFFSET | 286 | sub r0, r4, #TEXT_OFFSET |
287 | bic r0, r0, #1 | ||
279 | add r0, r0, #0x100 | 288 | add r0, r0, #0x100 |
280 | mov r1, r6 | 289 | mov r1, r6 |
281 | sub r2, sp, r6 | 290 | sub r2, sp, r6 |
@@ -322,12 +331,13 @@ dtb_check_done: | |||
322 | 331 | ||
323 | /* | 332 | /* |
324 | * Check to see if we will overwrite ourselves. | 333 | * Check to see if we will overwrite ourselves. |
325 | * r4 = final kernel address | 334 | * r4 = final kernel address (possibly with LSB set) |
326 | * r9 = size of decompressed image | 335 | * r9 = size of decompressed image |
327 | * r10 = end of this image, including bss/stack/malloc space if non XIP | 336 | * r10 = end of this image, including bss/stack/malloc space if non XIP |
328 | * We basically want: | 337 | * We basically want: |
329 | * r4 - 16k page directory >= r10 -> OK | 338 | * r4 - 16k page directory >= r10 -> OK |
330 | * r4 + image length <= address of wont_overwrite -> OK | 339 | * r4 + image length <= address of wont_overwrite -> OK |
340 | * Note: the possible LSB in r4 is harmless here. | ||
331 | */ | 341 | */ |
332 | add r10, r10, #16384 | 342 | add r10, r10, #16384 |
333 | cmp r4, r10 | 343 | cmp r4, r10 |
@@ -389,7 +399,8 @@ dtb_check_done: | |||
389 | add sp, sp, r6 | 399 | add sp, sp, r6 |
390 | #endif | 400 | #endif |
391 | 401 | ||
392 | bl cache_clean_flush | 402 | tst r4, #1 |
403 | bleq cache_clean_flush | ||
393 | 404 | ||
394 | adr r0, BSYM(restart) | 405 | adr r0, BSYM(restart) |
395 | add r0, r0, r6 | 406 | add r0, r0, r6 |
@@ -401,7 +412,7 @@ wont_overwrite: | |||
401 | * r0 = delta | 412 | * r0 = delta |
402 | * r2 = BSS start | 413 | * r2 = BSS start |
403 | * r3 = BSS end | 414 | * r3 = BSS end |
404 | * r4 = kernel execution address | 415 | * r4 = kernel execution address (possibly with LSB set) |
405 | * r5 = appended dtb size (0 if not present) | 416 | * r5 = appended dtb size (0 if not present) |
406 | * r7 = architecture ID | 417 | * r7 = architecture ID |
407 | * r8 = atags pointer | 418 | * r8 = atags pointer |
@@ -464,6 +475,15 @@ not_relocated: mov r0, #0 | |||
464 | cmp r2, r3 | 475 | cmp r2, r3 |
465 | blo 1b | 476 | blo 1b |
466 | 477 | ||
478 | /* | ||
479 | * Did we skip the cache setup earlier? | ||
480 | * That is indicated by the LSB in r4. | ||
481 | * Do it now if so. | ||
482 | */ | ||
483 | tst r4, #1 | ||
484 | bic r4, r4, #1 | ||
485 | blne cache_on | ||
486 | |||
467 | /* | 487 | /* |
468 | * The C runtime environment should now be setup sufficiently. | 488 | * The C runtime environment should now be setup sufficiently. |
469 | * Set up some pointers, and start decompressing. | 489 | * Set up some pointers, and start decompressing. |
@@ -512,6 +532,7 @@ LC0: .word LC0 @ r1 | |||
512 | .word _got_start @ r11 | 532 | .word _got_start @ r11 |
513 | .word _got_end @ ip | 533 | .word _got_end @ ip |
514 | .word .L_user_stack_end @ sp | 534 | .word .L_user_stack_end @ sp |
535 | .word _end - restart + 16384 + 1024*1024 | ||
515 | .size LC0, . - LC0 | 536 | .size LC0, . - LC0 |
516 | 537 | ||
517 | #ifdef CONFIG_ARCH_RPC | 538 | #ifdef CONFIG_ARCH_RPC |
@@ -805,8 +826,8 @@ call_cache_fn: adr r12, proc_types | |||
805 | .align 2 | 826 | .align 2 |
806 | .type proc_types,#object | 827 | .type proc_types,#object |
807 | proc_types: | 828 | proc_types: |
808 | .word 0x00000000 @ old ARM ID | 829 | .word 0x41000000 @ old ARM ID |
809 | .word 0x0000f000 | 830 | .word 0xff00f000 |
810 | mov pc, lr | 831 | mov pc, lr |
811 | THUMB( nop ) | 832 | THUMB( nop ) |
812 | mov pc, lr | 833 | mov pc, lr |
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 1460d9b88adf..8e1248f01fab 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi | |||
@@ -409,8 +409,8 @@ | |||
409 | ti,hwmods = "gpmc"; | 409 | ti,hwmods = "gpmc"; |
410 | reg = <0x50000000 0x2000>; | 410 | reg = <0x50000000 0x2000>; |
411 | interrupts = <100>; | 411 | interrupts = <100>; |
412 | num-cs = <7>; | 412 | gpmc,num-cs = <7>; |
413 | num-waitpins = <2>; | 413 | gpmc,num-waitpins = <2>; |
414 | #address-cells = <2>; | 414 | #address-cells = <2>; |
415 | #size-cells = <1>; | 415 | #size-cells = <1>; |
416 | status = "disabled"; | 416 | status = "disabled"; |
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts index 3ee63d128e27..76db557adbe7 100644 --- a/arch/arm/boot/dts/armada-xp-gp.dts +++ b/arch/arm/boot/dts/armada-xp-gp.dts | |||
@@ -39,8 +39,9 @@ | |||
39 | }; | 39 | }; |
40 | 40 | ||
41 | soc { | 41 | soc { |
42 | ranges = <0 0 0xd0000000 0x100000 | 42 | ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */ |
43 | 0xf0000000 0 0xf0000000 0x1000000>; | 43 | 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */ |
44 | 0xf0000000 0 0xf0000000 0x1000000 /* Device Bus, NOR 16MiB */>; | ||
44 | 45 | ||
45 | internal-regs { | 46 | internal-regs { |
46 | serial@12000 { | 47 | serial@12000 { |
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts index 46b785064dd8..fdea75c73411 100644 --- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts +++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts | |||
@@ -27,8 +27,9 @@ | |||
27 | }; | 27 | }; |
28 | 28 | ||
29 | soc { | 29 | soc { |
30 | ranges = <0 0 0xd0000000 0x100000 | 30 | ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */ |
31 | 0xf0000000 0 0xf0000000 0x8000000>; | 31 | 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */ |
32 | 0xf0000000 0 0xf0000000 0x8000000 /* Device Bus, NOR 128MiB */>; | ||
32 | 33 | ||
33 | internal-regs { | 34 | internal-regs { |
34 | serial@12000 { | 35 | serial@12000 { |
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi index 41b2c6c33f09..5e48c85abc2f 100644 --- a/arch/arm/boot/dts/bcm11351.dtsi +++ b/arch/arm/boot/dts/bcm11351.dtsi | |||
@@ -47,10 +47,10 @@ | |||
47 | }; | 47 | }; |
48 | 48 | ||
49 | L2: l2-cache { | 49 | L2: l2-cache { |
50 | compatible = "arm,pl310-cache"; | 50 | compatible = "bcm,bcm11351-a2-pl310-cache"; |
51 | reg = <0x3ff20000 0x1000>; | 51 | reg = <0x3ff20000 0x1000>; |
52 | cache-unified; | 52 | cache-unified; |
53 | cache-level = <2>; | 53 | cache-level = <2>; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | timer@35006000 { | 56 | timer@35006000 { |
diff --git a/arch/arm/boot/dts/bcm2835.dtsi b/arch/arm/boot/dts/bcm2835.dtsi index f0052dccf9a8..1e12aeff403b 100644 --- a/arch/arm/boot/dts/bcm2835.dtsi +++ b/arch/arm/boot/dts/bcm2835.dtsi | |||
@@ -44,6 +44,7 @@ | |||
44 | reg = <0x7e201000 0x1000>; | 44 | reg = <0x7e201000 0x1000>; |
45 | interrupts = <2 25>; | 45 | interrupts = <2 25>; |
46 | clock-frequency = <3000000>; | 46 | clock-frequency = <3000000>; |
47 | arm,primecell-periphid = <0x00241011>; | ||
47 | }; | 48 | }; |
48 | 49 | ||
49 | gpio: gpio { | 50 | gpio: gpio { |
diff --git a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi index d1650fb34c0a..ded558bb0f3b 100644 --- a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi +++ b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi | |||
@@ -763,7 +763,7 @@ | |||
763 | }; | 763 | }; |
764 | }; | 764 | }; |
765 | 765 | ||
766 | pinctrl@03680000 { | 766 | pinctrl@03860000 { |
767 | gpz: gpz { | 767 | gpz: gpz { |
768 | gpio-controller; | 768 | gpio-controller; |
769 | #gpio-cells = <2>; | 769 | #gpio-cells = <2>; |
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 0673524238a6..fc9fb3d526e2 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi | |||
@@ -161,9 +161,9 @@ | |||
161 | interrupts = <0 50 0>; | 161 | interrupts = <0 50 0>; |
162 | }; | 162 | }; |
163 | 163 | ||
164 | pinctrl_3: pinctrl@03680000 { | 164 | pinctrl_3: pinctrl@03860000 { |
165 | compatible = "samsung,exynos5250-pinctrl"; | 165 | compatible = "samsung,exynos5250-pinctrl"; |
166 | reg = <0x0368000 0x1000>; | 166 | reg = <0x03860000 0x1000>; |
167 | interrupts = <0 47 0>; | 167 | interrupts = <0 47 0>; |
168 | }; | 168 | }; |
169 | 169 | ||
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi index d2550e0bca24..701153992c69 100644 --- a/arch/arm/boot/dts/imx25.dtsi +++ b/arch/arm/boot/dts/imx25.dtsi | |||
@@ -141,8 +141,8 @@ | |||
141 | #size-cells = <0>; | 141 | #size-cells = <0>; |
142 | compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; | 142 | compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; |
143 | reg = <0x43fa4000 0x4000>; | 143 | reg = <0x43fa4000 0x4000>; |
144 | clocks = <&clks 62>; | 144 | clocks = <&clks 62>, <&clks 62>; |
145 | clock-names = "ipg"; | 145 | clock-names = "ipg", "per"; |
146 | interrupts = <14>; | 146 | interrupts = <14>; |
147 | status = "disabled"; | 147 | status = "disabled"; |
148 | }; | 148 | }; |
@@ -182,8 +182,8 @@ | |||
182 | compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; | 182 | compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; |
183 | reg = <0x50004000 0x4000>; | 183 | reg = <0x50004000 0x4000>; |
184 | interrupts = <0>; | 184 | interrupts = <0>; |
185 | clocks = <&clks 80>; | 185 | clocks = <&clks 80>, <&clks 80>; |
186 | clock-names = "ipg"; | 186 | clock-names = "ipg", "per"; |
187 | status = "disabled"; | 187 | status = "disabled"; |
188 | }; | 188 | }; |
189 | 189 | ||
@@ -210,8 +210,8 @@ | |||
210 | #size-cells = <0>; | 210 | #size-cells = <0>; |
211 | compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; | 211 | compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; |
212 | reg = <0x50010000 0x4000>; | 212 | reg = <0x50010000 0x4000>; |
213 | clocks = <&clks 79>; | 213 | clocks = <&clks 79>, <&clks 79>; |
214 | clock-names = "ipg"; | 214 | clock-names = "ipg", "per"; |
215 | interrupts = <13>; | 215 | interrupts = <13>; |
216 | status = "disabled"; | 216 | status = "disabled"; |
217 | }; | 217 | }; |
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi index ff4bd4873edf..75bd11386516 100644 --- a/arch/arm/boot/dts/imx27.dtsi +++ b/arch/arm/boot/dts/imx27.dtsi | |||
@@ -131,7 +131,7 @@ | |||
131 | compatible = "fsl,imx27-cspi"; | 131 | compatible = "fsl,imx27-cspi"; |
132 | reg = <0x1000e000 0x1000>; | 132 | reg = <0x1000e000 0x1000>; |
133 | interrupts = <16>; | 133 | interrupts = <16>; |
134 | clocks = <&clks 53>, <&clks 0>; | 134 | clocks = <&clks 53>, <&clks 53>; |
135 | clock-names = "ipg", "per"; | 135 | clock-names = "ipg", "per"; |
136 | status = "disabled"; | 136 | status = "disabled"; |
137 | }; | 137 | }; |
@@ -142,7 +142,7 @@ | |||
142 | compatible = "fsl,imx27-cspi"; | 142 | compatible = "fsl,imx27-cspi"; |
143 | reg = <0x1000f000 0x1000>; | 143 | reg = <0x1000f000 0x1000>; |
144 | interrupts = <15>; | 144 | interrupts = <15>; |
145 | clocks = <&clks 52>, <&clks 0>; | 145 | clocks = <&clks 52>, <&clks 52>; |
146 | clock-names = "ipg", "per"; | 146 | clock-names = "ipg", "per"; |
147 | status = "disabled"; | 147 | status = "disabled"; |
148 | }; | 148 | }; |
@@ -223,7 +223,7 @@ | |||
223 | compatible = "fsl,imx27-cspi"; | 223 | compatible = "fsl,imx27-cspi"; |
224 | reg = <0x10017000 0x1000>; | 224 | reg = <0x10017000 0x1000>; |
225 | interrupts = <6>; | 225 | interrupts = <6>; |
226 | clocks = <&clks 51>, <&clks 0>; | 226 | clocks = <&clks 51>, <&clks 51>; |
227 | clock-names = "ipg", "per"; | 227 | clock-names = "ipg", "per"; |
228 | status = "disabled"; | 228 | status = "disabled"; |
229 | }; | 229 | }; |
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi index 21bb786c5b31..53fdde69bbf4 100644 --- a/arch/arm/boot/dts/imx51.dtsi +++ b/arch/arm/boot/dts/imx51.dtsi | |||
@@ -631,7 +631,7 @@ | |||
631 | compatible = "fsl,imx51-cspi", "fsl,imx35-cspi"; | 631 | compatible = "fsl,imx51-cspi", "fsl,imx35-cspi"; |
632 | reg = <0x83fc0000 0x4000>; | 632 | reg = <0x83fc0000 0x4000>; |
633 | interrupts = <38>; | 633 | interrupts = <38>; |
634 | clocks = <&clks 55>, <&clks 0>; | 634 | clocks = <&clks 55>, <&clks 55>; |
635 | clock-names = "ipg", "per"; | 635 | clock-names = "ipg", "per"; |
636 | status = "disabled"; | 636 | status = "disabled"; |
637 | }; | 637 | }; |
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi index 845982eaac22..eb83aa039b8b 100644 --- a/arch/arm/boot/dts/imx53.dtsi +++ b/arch/arm/boot/dts/imx53.dtsi | |||
@@ -714,7 +714,7 @@ | |||
714 | compatible = "fsl,imx53-cspi", "fsl,imx35-cspi"; | 714 | compatible = "fsl,imx53-cspi", "fsl,imx35-cspi"; |
715 | reg = <0x63fc0000 0x4000>; | 715 | reg = <0x63fc0000 0x4000>; |
716 | interrupts = <38>; | 716 | interrupts = <38>; |
717 | clocks = <&clks 55>, <&clks 0>; | 717 | clocks = <&clks 55>, <&clks 55>; |
718 | clock-names = "ipg", "per"; | 718 | clock-names = "ipg", "per"; |
719 | status = "disabled"; | 719 | status = "disabled"; |
720 | }; | 720 | }; |
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi index 03bd60deb52b..eeb734e25709 100644 --- a/arch/arm/boot/dts/omap4-panda-common.dtsi +++ b/arch/arm/boot/dts/omap4-panda-common.dtsi | |||
@@ -56,9 +56,23 @@ | |||
56 | }; | 56 | }; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | &omap4_pmx_wkup { | ||
60 | pinctrl-names = "default"; | ||
61 | pinctrl-0 = < | ||
62 | &twl6030_wkup_pins | ||
63 | >; | ||
64 | |||
65 | twl6030_wkup_pins: pinmux_twl6030_wkup_pins { | ||
66 | pinctrl-single,pins = < | ||
67 | 0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */ | ||
68 | >; | ||
69 | }; | ||
70 | }; | ||
71 | |||
59 | &omap4_pmx_core { | 72 | &omap4_pmx_core { |
60 | pinctrl-names = "default"; | 73 | pinctrl-names = "default"; |
61 | pinctrl-0 = < | 74 | pinctrl-0 = < |
75 | &twl6030_pins | ||
62 | &twl6040_pins | 76 | &twl6040_pins |
63 | &mcpdm_pins | 77 | &mcpdm_pins |
64 | &mcbsp1_pins | 78 | &mcbsp1_pins |
@@ -66,6 +80,12 @@ | |||
66 | &tpd12s015_pins | 80 | &tpd12s015_pins |
67 | >; | 81 | >; |
68 | 82 | ||
83 | twl6030_pins: pinmux_twl6030_pins { | ||
84 | pinctrl-single,pins = < | ||
85 | 0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */ | ||
86 | >; | ||
87 | }; | ||
88 | |||
69 | twl6040_pins: pinmux_twl6040_pins { | 89 | twl6040_pins: pinmux_twl6040_pins { |
70 | pinctrl-single,pins = < | 90 | pinctrl-single,pins = < |
71 | 0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */ | 91 | 0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */ |
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts index a35d9cd58063..98505a2ef162 100644 --- a/arch/arm/boot/dts/omap4-sdp.dts +++ b/arch/arm/boot/dts/omap4-sdp.dts | |||
@@ -142,9 +142,23 @@ | |||
142 | }; | 142 | }; |
143 | }; | 143 | }; |
144 | 144 | ||
145 | &omap4_pmx_wkup { | ||
146 | pinctrl-names = "default"; | ||
147 | pinctrl-0 = < | ||
148 | &twl6030_wkup_pins | ||
149 | >; | ||
150 | |||
151 | twl6030_wkup_pins: pinmux_twl6030_wkup_pins { | ||
152 | pinctrl-single,pins = < | ||
153 | 0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */ | ||
154 | >; | ||
155 | }; | ||
156 | }; | ||
157 | |||
145 | &omap4_pmx_core { | 158 | &omap4_pmx_core { |
146 | pinctrl-names = "default"; | 159 | pinctrl-names = "default"; |
147 | pinctrl-0 = < | 160 | pinctrl-0 = < |
161 | &twl6030_pins | ||
148 | &twl6040_pins | 162 | &twl6040_pins |
149 | &mcpdm_pins | 163 | &mcpdm_pins |
150 | &dmic_pins | 164 | &dmic_pins |
@@ -179,6 +193,12 @@ | |||
179 | >; | 193 | >; |
180 | }; | 194 | }; |
181 | 195 | ||
196 | twl6030_pins: pinmux_twl6030_pins { | ||
197 | pinctrl-single,pins = < | ||
198 | 0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */ | ||
199 | >; | ||
200 | }; | ||
201 | |||
182 | twl6040_pins: pinmux_twl6040_pins { | 202 | twl6040_pins: pinmux_twl6040_pins { |
183 | pinctrl-single,pins = < | 203 | pinctrl-single,pins = < |
184 | 0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */ | 204 | 0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */ |
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index 3dd7ff825828..635cae283011 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi | |||
@@ -538,6 +538,7 @@ | |||
538 | interrupts = <0 41 0x4>; | 538 | interrupts = <0 41 0x4>; |
539 | ti,hwmods = "timer5"; | 539 | ti,hwmods = "timer5"; |
540 | ti,timer-dsp; | 540 | ti,timer-dsp; |
541 | ti,timer-pwm; | ||
541 | }; | 542 | }; |
542 | 543 | ||
543 | timer6: timer@4013a000 { | 544 | timer6: timer@4013a000 { |
@@ -574,6 +575,7 @@ | |||
574 | reg = <0x4803e000 0x80>; | 575 | reg = <0x4803e000 0x80>; |
575 | interrupts = <0 45 0x4>; | 576 | interrupts = <0 45 0x4>; |
576 | ti,hwmods = "timer9"; | 577 | ti,hwmods = "timer9"; |
578 | ti,timer-pwm; | ||
577 | }; | 579 | }; |
578 | 580 | ||
579 | timer10: timer@48086000 { | 581 | timer10: timer@48086000 { |
@@ -581,6 +583,7 @@ | |||
581 | reg = <0x48086000 0x80>; | 583 | reg = <0x48086000 0x80>; |
582 | interrupts = <0 46 0x4>; | 584 | interrupts = <0 46 0x4>; |
583 | ti,hwmods = "timer10"; | 585 | ti,hwmods = "timer10"; |
586 | ti,timer-pwm; | ||
584 | }; | 587 | }; |
585 | 588 | ||
586 | timer11: timer@48088000 { | 589 | timer11: timer@48088000 { |
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index bff71388e72a..17d0ae8672fa 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma, | |||
320 | } | 320 | } |
321 | 321 | ||
322 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | 322 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
323 | static inline void flush_kernel_dcache_page(struct page *page) | 323 | extern void flush_kernel_dcache_page(struct page *); |
324 | { | ||
325 | } | ||
326 | 324 | ||
327 | #define flush_dcache_mmap_lock(mapping) \ | 325 | #define flush_dcache_mmap_lock(mapping) \ |
328 | spin_lock_irq(&(mapping)->tree_lock) | 326 | spin_lock_irq(&(mapping)->tree_lock) |
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 7652712d1d14..dba62cb1ad08 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
@@ -32,6 +32,8 @@ | |||
32 | 32 | ||
33 | #define MPIDR_HWID_BITMASK 0xFFFFFF | 33 | #define MPIDR_HWID_BITMASK 0xFFFFFF |
34 | 34 | ||
35 | #define MPIDR_INVALID (~MPIDR_HWID_BITMASK) | ||
36 | |||
35 | #define MPIDR_LEVEL_BITS 8 | 37 | #define MPIDR_LEVEL_BITS 8 |
36 | #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) | 38 | #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1) |
37 | 39 | ||
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h index fe92ccf1d0b0..191ada6e4d2d 100644 --- a/arch/arm/include/asm/div64.h +++ b/arch/arm/include/asm/div64.h | |||
@@ -46,7 +46,7 @@ | |||
46 | __rem; \ | 46 | __rem; \ |
47 | }) | 47 | }) |
48 | 48 | ||
49 | #if __GNUC__ < 4 | 49 | #if __GNUC__ < 4 || !defined(CONFIG_AEABI) |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * gcc versions earlier than 4.0 are simply too problematic for the | 52 | * gcc versions earlier than 4.0 are simply too problematic for the |
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h index ac1dd54724b6..8017e94acc5e 100644 --- a/arch/arm/include/asm/glue-proc.h +++ b/arch/arm/include/asm/glue-proc.h | |||
@@ -230,6 +230,15 @@ | |||
230 | # endif | 230 | # endif |
231 | #endif | 231 | #endif |
232 | 232 | ||
233 | #ifdef CONFIG_CPU_PJ4B | ||
234 | # ifdef CPU_NAME | ||
235 | # undef MULTI_CPU | ||
236 | # define MULTI_CPU | ||
237 | # else | ||
238 | # define CPU_NAME cpu_pj4b | ||
239 | # endif | ||
240 | #endif | ||
241 | |||
233 | #ifndef MULTI_CPU | 242 | #ifndef MULTI_CPU |
234 | #define cpu_proc_init __glue(CPU_NAME,_proc_init) | 243 | #define cpu_proc_init __glue(CPU_NAME,_proc_init) |
235 | #define cpu_proc_fin __glue(CPU_NAME,_proc_fin) | 244 | #define cpu_proc_fin __glue(CPU_NAME,_proc_fin) |
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 652b56086de7..d070741b2b37 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
@@ -130,16 +130,16 @@ static inline u32 __raw_readl(const volatile void __iomem *addr) | |||
130 | */ | 130 | */ |
131 | extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, | 131 | extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, |
132 | size_t, unsigned int, void *); | 132 | size_t, unsigned int, void *); |
133 | extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int, | 133 | extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int, |
134 | void *); | 134 | void *); |
135 | 135 | ||
136 | extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); | 136 | extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); |
137 | extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); | 137 | extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int); |
138 | extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached); | 138 | extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached); |
139 | extern void __iounmap(volatile void __iomem *addr); | 139 | extern void __iounmap(volatile void __iomem *addr); |
140 | extern void __arm_iounmap(volatile void __iomem *addr); | 140 | extern void __arm_iounmap(volatile void __iomem *addr); |
141 | 141 | ||
142 | extern void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, | 142 | extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, |
143 | unsigned int, void *); | 143 | unsigned int, void *); |
144 | extern void (*arch_iounmap)(volatile void __iomem *); | 144 | extern void (*arch_iounmap)(volatile void __iomem *); |
145 | 145 | ||
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index a7b85e0d0cc1..b5792b7fd8d3 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/cacheflush.h> | 18 | #include <asm/cacheflush.h> |
19 | #include <asm/cachetype.h> | 19 | #include <asm/cachetype.h> |
20 | #include <asm/proc-fns.h> | 20 | #include <asm/proc-fns.h> |
21 | #include <asm/smp_plat.h> | ||
21 | #include <asm-generic/mm_hooks.h> | 22 | #include <asm-generic/mm_hooks.h> |
22 | 23 | ||
23 | void __check_vmalloc_seq(struct mm_struct *mm); | 24 | void __check_vmalloc_seq(struct mm_struct *mm); |
@@ -27,7 +28,15 @@ void __check_vmalloc_seq(struct mm_struct *mm); | |||
27 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); | 28 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); |
28 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) | 29 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) |
29 | 30 | ||
30 | DECLARE_PER_CPU(atomic64_t, active_asids); | 31 | #ifdef CONFIG_ARM_ERRATA_798181 |
32 | void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | ||
33 | cpumask_t *mask); | ||
34 | #else /* !CONFIG_ARM_ERRATA_798181 */ | ||
35 | static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | ||
36 | cpumask_t *mask) | ||
37 | { | ||
38 | } | ||
39 | #endif /* CONFIG_ARM_ERRATA_798181 */ | ||
31 | 40 | ||
32 | #else /* !CONFIG_CPU_HAS_ASID */ | 41 | #else /* !CONFIG_CPU_HAS_ASID */ |
33 | 42 | ||
@@ -98,12 +107,16 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
98 | #ifdef CONFIG_MMU | 107 | #ifdef CONFIG_MMU |
99 | unsigned int cpu = smp_processor_id(); | 108 | unsigned int cpu = smp_processor_id(); |
100 | 109 | ||
101 | #ifdef CONFIG_SMP | 110 | /* |
102 | /* check for possible thread migration */ | 111 | * __sync_icache_dcache doesn't broadcast the I-cache invalidation, |
103 | if (!cpumask_empty(mm_cpumask(next)) && | 112 | * so check for possible thread migration and invalidate the I-cache |
113 | * if we're new to this CPU. | ||
114 | */ | ||
115 | if (cache_ops_need_broadcast() && | ||
116 | !cpumask_empty(mm_cpumask(next)) && | ||
104 | !cpumask_test_cpu(cpu, mm_cpumask(next))) | 117 | !cpumask_test_cpu(cpu, mm_cpumask(next))) |
105 | __flush_icache_all(); | 118 | __flush_icache_all(); |
106 | #endif | 119 | |
107 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { | 120 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { |
108 | check_and_switch_context(next, tsk); | 121 | check_and_switch_context(next, tsk); |
109 | if (cache_is_vivt()) | 122 | if (cache_is_vivt()) |
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h index 968c0a14e0a3..209e6504922e 100644 --- a/arch/arm/include/asm/percpu.h +++ b/arch/arm/include/asm/percpu.h | |||
@@ -30,8 +30,15 @@ static inline void set_my_cpu_offset(unsigned long off) | |||
30 | static inline unsigned long __my_cpu_offset(void) | 30 | static inline unsigned long __my_cpu_offset(void) |
31 | { | 31 | { |
32 | unsigned long off; | 32 | unsigned long off; |
33 | /* Read TPIDRPRW */ | 33 | register unsigned long *sp asm ("sp"); |
34 | asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : : "memory"); | 34 | |
35 | /* | ||
36 | * Read TPIDRPRW. | ||
37 | * We want to allow caching the value, so avoid using volatile and | ||
38 | * instead use a fake stack read to hazard against barrier(). | ||
39 | */ | ||
40 | asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp)); | ||
41 | |||
35 | return off; | 42 | return off; |
36 | } | 43 | } |
37 | #define __my_cpu_offset __my_cpu_offset() | 44 | #define __my_cpu_offset __my_cpu_offset() |
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index aaa61b6f50ff..e78983202737 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h | |||
@@ -49,7 +49,7 @@ static inline int cache_ops_need_broadcast(void) | |||
49 | /* | 49 | /* |
50 | * Logical CPU mapping. | 50 | * Logical CPU mapping. |
51 | */ | 51 | */ |
52 | extern int __cpu_logical_map[]; | 52 | extern u32 __cpu_logical_map[]; |
53 | #define cpu_logical_map(cpu) __cpu_logical_map[cpu] | 53 | #define cpu_logical_map(cpu) __cpu_logical_map[cpu] |
54 | /* | 54 | /* |
55 | * Retrieve logical cpu index corresponding to a given MPIDR[23:0] | 55 | * Retrieve logical cpu index corresponding to a given MPIDR[23:0] |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 6220e9fdf4c7..f8b8965666e9 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -97,19 +97,22 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) | |||
97 | 97 | ||
98 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 98 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
99 | { | 99 | { |
100 | unsigned long tmp; | 100 | unsigned long contended, res; |
101 | u32 slock; | 101 | u32 slock; |
102 | 102 | ||
103 | __asm__ __volatile__( | 103 | do { |
104 | " ldrex %0, [%2]\n" | 104 | __asm__ __volatile__( |
105 | " subs %1, %0, %0, ror #16\n" | 105 | " ldrex %0, [%3]\n" |
106 | " addeq %0, %0, %3\n" | 106 | " mov %2, #0\n" |
107 | " strexeq %1, %0, [%2]" | 107 | " subs %1, %0, %0, ror #16\n" |
108 | : "=&r" (slock), "=&r" (tmp) | 108 | " addeq %0, %0, %4\n" |
109 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | 109 | " strexeq %2, %0, [%3]" |
110 | : "cc"); | 110 | : "=&r" (slock), "=&r" (contended), "=r" (res) |
111 | 111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | |
112 | if (tmp == 0) { | 112 | : "cc"); |
113 | } while (res); | ||
114 | |||
115 | if (!contended) { | ||
113 | smp_mb(); | 116 | smp_mb(); |
114 | return 1; | 117 | return 1; |
115 | } else { | 118 | } else { |
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 1995d1a84060..214d4158089a 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h | |||
@@ -58,7 +58,7 @@ struct thread_info { | |||
58 | struct cpu_context_save cpu_context; /* cpu context */ | 58 | struct cpu_context_save cpu_context; /* cpu context */ |
59 | __u32 syscall; /* syscall number */ | 59 | __u32 syscall; /* syscall number */ |
60 | __u8 used_cp[16]; /* thread used copro */ | 60 | __u8 used_cp[16]; /* thread used copro */ |
61 | unsigned long tp_value; | 61 | unsigned long tp_value[2]; /* TLS registers */ |
62 | #ifdef CONFIG_CRUNCH | 62 | #ifdef CONFIG_CRUNCH |
63 | struct crunch_state crunchstate; | 63 | struct crunch_state crunchstate; |
64 | #endif | 64 | #endif |
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 99a19512ee26..bdf2b8458ec1 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -33,18 +33,6 @@ | |||
33 | #include <asm/pgalloc.h> | 33 | #include <asm/pgalloc.h> |
34 | #include <asm/tlbflush.h> | 34 | #include <asm/tlbflush.h> |
35 | 35 | ||
36 | /* | ||
37 | * We need to delay page freeing for SMP as other CPUs can access pages | ||
38 | * which have been removed but not yet had their TLB entries invalidated. | ||
39 | * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, | ||
40 | * we need to apply this same delaying tactic to ensure correct operation. | ||
41 | */ | ||
42 | #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) | ||
43 | #define tlb_fast_mode(tlb) 0 | ||
44 | #else | ||
45 | #define tlb_fast_mode(tlb) 1 | ||
46 | #endif | ||
47 | |||
48 | #define MMU_GATHER_BUNDLE 8 | 36 | #define MMU_GATHER_BUNDLE 8 |
49 | 37 | ||
50 | /* | 38 | /* |
@@ -112,12 +100,10 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) | |||
112 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | 100 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
113 | { | 101 | { |
114 | tlb_flush(tlb); | 102 | tlb_flush(tlb); |
115 | if (!tlb_fast_mode(tlb)) { | 103 | free_pages_and_swap_cache(tlb->pages, tlb->nr); |
116 | free_pages_and_swap_cache(tlb->pages, tlb->nr); | 104 | tlb->nr = 0; |
117 | tlb->nr = 0; | 105 | if (tlb->pages == tlb->local) |
118 | if (tlb->pages == tlb->local) | 106 | __tlb_alloc_page(tlb); |
119 | __tlb_alloc_page(tlb); | ||
120 | } | ||
121 | } | 107 | } |
122 | 108 | ||
123 | static inline void | 109 | static inline void |
@@ -178,11 +164,6 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
178 | 164 | ||
179 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | 165 | static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) |
180 | { | 166 | { |
181 | if (tlb_fast_mode(tlb)) { | ||
182 | free_page_and_swap_cache(page); | ||
183 | return 1; /* avoid calling tlb_flush_mmu */ | ||
184 | } | ||
185 | |||
186 | tlb->pages[tlb->nr++] = page; | 167 | tlb->pages[tlb->nr++] = page; |
187 | VM_BUG_ON(tlb->nr > tlb->max); | 168 | VM_BUG_ON(tlb->nr > tlb->max); |
188 | return tlb->max - tlb->nr; | 169 | return tlb->max - tlb->nr; |
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h index 73409e6c0251..83259b873333 100644 --- a/arch/arm/include/asm/tls.h +++ b/arch/arm/include/asm/tls.h | |||
@@ -2,27 +2,30 @@ | |||
2 | #define __ASMARM_TLS_H | 2 | #define __ASMARM_TLS_H |
3 | 3 | ||
4 | #ifdef __ASSEMBLY__ | 4 | #ifdef __ASSEMBLY__ |
5 | .macro set_tls_none, tp, tmp1, tmp2 | 5 | #include <asm/asm-offsets.h> |
6 | .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2 | ||
6 | .endm | 7 | .endm |
7 | 8 | ||
8 | .macro set_tls_v6k, tp, tmp1, tmp2 | 9 | .macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2 |
10 | mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register | ||
9 | mcr p15, 0, \tp, c13, c0, 3 @ set TLS register | 11 | mcr p15, 0, \tp, c13, c0, 3 @ set TLS register |
10 | mov \tmp1, #0 | 12 | mcr p15, 0, \tpuser, c13, c0, 2 @ and the user r/w register |
11 | mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register | 13 | str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it |
12 | .endm | 14 | .endm |
13 | 15 | ||
14 | .macro set_tls_v6, tp, tmp1, tmp2 | 16 | .macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2 |
15 | ldr \tmp1, =elf_hwcap | 17 | ldr \tmp1, =elf_hwcap |
16 | ldr \tmp1, [\tmp1, #0] | 18 | ldr \tmp1, [\tmp1, #0] |
17 | mov \tmp2, #0xffff0fff | 19 | mov \tmp2, #0xffff0fff |
18 | tst \tmp1, #HWCAP_TLS @ hardware TLS available? | 20 | tst \tmp1, #HWCAP_TLS @ hardware TLS available? |
19 | mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register | ||
20 | movne \tmp1, #0 | ||
21 | mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register | ||
22 | streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 | 21 | streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 |
22 | mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register | ||
23 | mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register | ||
24 | mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register | ||
25 | strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it | ||
23 | .endm | 26 | .endm |
24 | 27 | ||
25 | .macro set_tls_software, tp, tmp1, tmp2 | 28 | .macro switch_tls_software, base, tp, tpuser, tmp1, tmp2 |
26 | mov \tmp1, #0xffff0fff | 29 | mov \tmp1, #0xffff0fff |
27 | str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0 | 30 | str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0 |
28 | .endm | 31 | .endm |
@@ -31,19 +34,30 @@ | |||
31 | #ifdef CONFIG_TLS_REG_EMUL | 34 | #ifdef CONFIG_TLS_REG_EMUL |
32 | #define tls_emu 1 | 35 | #define tls_emu 1 |
33 | #define has_tls_reg 1 | 36 | #define has_tls_reg 1 |
34 | #define set_tls set_tls_none | 37 | #define switch_tls switch_tls_none |
35 | #elif defined(CONFIG_CPU_V6) | 38 | #elif defined(CONFIG_CPU_V6) |
36 | #define tls_emu 0 | 39 | #define tls_emu 0 |
37 | #define has_tls_reg (elf_hwcap & HWCAP_TLS) | 40 | #define has_tls_reg (elf_hwcap & HWCAP_TLS) |
38 | #define set_tls set_tls_v6 | 41 | #define switch_tls switch_tls_v6 |
39 | #elif defined(CONFIG_CPU_32v6K) | 42 | #elif defined(CONFIG_CPU_32v6K) |
40 | #define tls_emu 0 | 43 | #define tls_emu 0 |
41 | #define has_tls_reg 1 | 44 | #define has_tls_reg 1 |
42 | #define set_tls set_tls_v6k | 45 | #define switch_tls switch_tls_v6k |
43 | #else | 46 | #else |
44 | #define tls_emu 0 | 47 | #define tls_emu 0 |
45 | #define has_tls_reg 0 | 48 | #define has_tls_reg 0 |
46 | #define set_tls set_tls_software | 49 | #define switch_tls switch_tls_software |
47 | #endif | 50 | #endif |
48 | 51 | ||
52 | #ifndef __ASSEMBLY__ | ||
53 | static inline unsigned long get_tpuser(void) | ||
54 | { | ||
55 | unsigned long reg = 0; | ||
56 | |||
57 | if (has_tls_reg && !tls_emu) | ||
58 | __asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg)); | ||
59 | |||
60 | return reg; | ||
61 | } | ||
62 | #endif | ||
49 | #endif /* __ASMARM_TLS_H */ | 63 | #endif /* __ASMARM_TLS_H */ |
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 5af04f6daa33..0905502bee15 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c | |||
@@ -82,7 +82,7 @@ void __init arm_dt_init_cpu_maps(void) | |||
82 | u32 i, j, cpuidx = 1; | 82 | u32 i, j, cpuidx = 1; |
83 | u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; | 83 | u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0; |
84 | 84 | ||
85 | u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX }; | 85 | u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; |
86 | bool bootcpu_valid = false; | 86 | bool bootcpu_valid = false; |
87 | cpus = of_find_node_by_path("/cpus"); | 87 | cpus = of_find_node_by_path("/cpus"); |
88 | 88 | ||
@@ -92,6 +92,9 @@ void __init arm_dt_init_cpu_maps(void) | |||
92 | for_each_child_of_node(cpus, cpu) { | 92 | for_each_child_of_node(cpus, cpu) { |
93 | u32 hwid; | 93 | u32 hwid; |
94 | 94 | ||
95 | if (of_node_cmp(cpu->type, "cpu")) | ||
96 | continue; | ||
97 | |||
95 | pr_debug(" * %s...\n", cpu->full_name); | 98 | pr_debug(" * %s...\n", cpu->full_name); |
96 | /* | 99 | /* |
97 | * A device tree containing CPU nodes with missing "reg" | 100 | * A device tree containing CPU nodes with missing "reg" |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 582b405befc5..a39cfc2a1f90 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -685,15 +685,16 @@ ENTRY(__switch_to) | |||
685 | UNWIND(.fnstart ) | 685 | UNWIND(.fnstart ) |
686 | UNWIND(.cantunwind ) | 686 | UNWIND(.cantunwind ) |
687 | add ip, r1, #TI_CPU_SAVE | 687 | add ip, r1, #TI_CPU_SAVE |
688 | ldr r3, [r2, #TI_TP_VALUE] | ||
689 | ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack | 688 | ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack |
690 | THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack | 689 | THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack |
691 | THUMB( str sp, [ip], #4 ) | 690 | THUMB( str sp, [ip], #4 ) |
692 | THUMB( str lr, [ip], #4 ) | 691 | THUMB( str lr, [ip], #4 ) |
692 | ldr r4, [r2, #TI_TP_VALUE] | ||
693 | ldr r5, [r2, #TI_TP_VALUE + 4] | ||
693 | #ifdef CONFIG_CPU_USE_DOMAINS | 694 | #ifdef CONFIG_CPU_USE_DOMAINS |
694 | ldr r6, [r2, #TI_CPU_DOMAIN] | 695 | ldr r6, [r2, #TI_CPU_DOMAIN] |
695 | #endif | 696 | #endif |
696 | set_tls r3, r4, r5 | 697 | switch_tls r1, r4, r5, r3, r7 |
697 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) | 698 | #if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) |
698 | ldr r7, [r2, #TI_TASK] | 699 | ldr r7, [r2, #TI_TASK] |
699 | ldr r8, =__stack_chk_guard | 700 | ldr r8, =__stack_chk_guard |
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index bc5bc0a97131..4bc816a74a2e 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S | |||
@@ -362,6 +362,16 @@ ENTRY(vector_swi) | |||
362 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 | 362 | str r0, [sp, #S_OLD_R0] @ Save OLD_R0 |
363 | zero_fp | 363 | zero_fp |
364 | 364 | ||
365 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
366 | ldr ip, __cr_alignment | ||
367 | ldr ip, [ip] | ||
368 | mcr p15, 0, ip, c1, c0 @ update control register | ||
369 | #endif | ||
370 | |||
371 | enable_irq | ||
372 | ct_user_exit | ||
373 | get_thread_info tsk | ||
374 | |||
365 | /* | 375 | /* |
366 | * Get the system call number. | 376 | * Get the system call number. |
367 | */ | 377 | */ |
@@ -375,9 +385,9 @@ ENTRY(vector_swi) | |||
375 | #ifdef CONFIG_ARM_THUMB | 385 | #ifdef CONFIG_ARM_THUMB |
376 | tst r8, #PSR_T_BIT | 386 | tst r8, #PSR_T_BIT |
377 | movne r10, #0 @ no thumb OABI emulation | 387 | movne r10, #0 @ no thumb OABI emulation |
378 | ldreq r10, [lr, #-4] @ get SWI instruction | 388 | USER( ldreq r10, [lr, #-4] ) @ get SWI instruction |
379 | #else | 389 | #else |
380 | ldr r10, [lr, #-4] @ get SWI instruction | 390 | USER( ldr r10, [lr, #-4] ) @ get SWI instruction |
381 | #endif | 391 | #endif |
382 | #ifdef CONFIG_CPU_ENDIAN_BE8 | 392 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
383 | rev r10, r10 @ little endian instruction | 393 | rev r10, r10 @ little endian instruction |
@@ -392,22 +402,13 @@ ENTRY(vector_swi) | |||
392 | /* Legacy ABI only, possibly thumb mode. */ | 402 | /* Legacy ABI only, possibly thumb mode. */ |
393 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs | 403 | tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs |
394 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in | 404 | addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in |
395 | ldreq scno, [lr, #-4] | 405 | USER( ldreq scno, [lr, #-4] ) |
396 | 406 | ||
397 | #else | 407 | #else |
398 | /* Legacy ABI only. */ | 408 | /* Legacy ABI only. */ |
399 | ldr scno, [lr, #-4] @ get SWI instruction | 409 | USER( ldr scno, [lr, #-4] ) @ get SWI instruction |
400 | #endif | 410 | #endif |
401 | 411 | ||
402 | #ifdef CONFIG_ALIGNMENT_TRAP | ||
403 | ldr ip, __cr_alignment | ||
404 | ldr ip, [ip] | ||
405 | mcr p15, 0, ip, c1, c0 @ update control register | ||
406 | #endif | ||
407 | enable_irq | ||
408 | ct_user_exit | ||
409 | |||
410 | get_thread_info tsk | ||
411 | adr tbl, sys_call_table @ load syscall table pointer | 412 | adr tbl, sys_call_table @ load syscall table pointer |
412 | 413 | ||
413 | #if defined(CONFIG_OABI_COMPAT) | 414 | #if defined(CONFIG_OABI_COMPAT) |
@@ -442,6 +443,21 @@ local_restart: | |||
442 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back | 443 | eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back |
443 | bcs arm_syscall | 444 | bcs arm_syscall |
444 | b sys_ni_syscall @ not private func | 445 | b sys_ni_syscall @ not private func |
446 | |||
447 | #if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI) | ||
448 | /* | ||
449 | * We failed to handle a fault trying to access the page | ||
450 | * containing the swi instruction, but we're not really in a | ||
451 | * position to return -EFAULT. Instead, return back to the | ||
452 | * instruction and re-enter the user fault handling path trying | ||
453 | * to page it in. This will likely result in sending SEGV to the | ||
454 | * current task. | ||
455 | */ | ||
456 | 9001: | ||
457 | sub lr, lr, #4 | ||
458 | str lr, [sp, #S_PC] | ||
459 | b ret_fast_syscall | ||
460 | #endif | ||
445 | ENDPROC(vector_swi) | 461 | ENDPROC(vector_swi) |
446 | 462 | ||
447 | /* | 463 | /* |
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 8ef8c9337809..4fb074c446bf 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c | |||
@@ -134,6 +134,10 @@ void machine_kexec(struct kimage *image) | |||
134 | unsigned long reboot_code_buffer_phys; | 134 | unsigned long reboot_code_buffer_phys; |
135 | void *reboot_code_buffer; | 135 | void *reboot_code_buffer; |
136 | 136 | ||
137 | if (num_online_cpus() > 1) { | ||
138 | pr_err("kexec: error: multiple CPUs still online\n"); | ||
139 | return; | ||
140 | } | ||
137 | 141 | ||
138 | page_list = image->head & PAGE_MASK; | 142 | page_list = image->head & PAGE_MASK; |
139 | 143 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 8c3094d0f7b7..d9f5cd4e533f 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -569,6 +569,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) | |||
569 | return; | 569 | return; |
570 | } | 570 | } |
571 | 571 | ||
572 | perf_callchain_store(entry, regs->ARM_pc); | ||
572 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; | 573 | tail = (struct frame_tail __user *)regs->ARM_fp - 1; |
573 | 574 | ||
574 | while ((entry->nr < PERF_MAX_STACK_DEPTH) && | 575 | while ((entry->nr < PERF_MAX_STACK_DEPTH) && |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 282de4826abb..7f1efcd4a6e9 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/thread_notify.h> | 39 | #include <asm/thread_notify.h> |
40 | #include <asm/stacktrace.h> | 40 | #include <asm/stacktrace.h> |
41 | #include <asm/mach/time.h> | 41 | #include <asm/mach/time.h> |
42 | #include <asm/tls.h> | ||
42 | 43 | ||
43 | #ifdef CONFIG_CC_STACKPROTECTOR | 44 | #ifdef CONFIG_CC_STACKPROTECTOR |
44 | #include <linux/stackprotector.h> | 45 | #include <linux/stackprotector.h> |
@@ -184,30 +185,61 @@ int __init reboot_setup(char *str) | |||
184 | 185 | ||
185 | __setup("reboot=", reboot_setup); | 186 | __setup("reboot=", reboot_setup); |
186 | 187 | ||
188 | /* | ||
189 | * Called by kexec, immediately prior to machine_kexec(). | ||
190 | * | ||
191 | * This must completely disable all secondary CPUs; simply causing those CPUs | ||
192 | * to execute e.g. a RAM-based pin loop is not sufficient. This allows the | ||
193 | * kexec'd kernel to use any and all RAM as it sees fit, without having to | ||
194 | * avoid any code or data used by any SW CPU pin loop. The CPU hotplug | ||
195 | * functionality embodied in disable_nonboot_cpus() to achieve this. | ||
196 | */ | ||
187 | void machine_shutdown(void) | 197 | void machine_shutdown(void) |
188 | { | 198 | { |
189 | #ifdef CONFIG_SMP | 199 | disable_nonboot_cpus(); |
190 | smp_send_stop(); | ||
191 | #endif | ||
192 | } | 200 | } |
193 | 201 | ||
202 | /* | ||
203 | * Halting simply requires that the secondary CPUs stop performing any | ||
204 | * activity (executing tasks, handling interrupts). smp_send_stop() | ||
205 | * achieves this. | ||
206 | */ | ||
194 | void machine_halt(void) | 207 | void machine_halt(void) |
195 | { | 208 | { |
196 | machine_shutdown(); | 209 | smp_send_stop(); |
210 | |||
197 | local_irq_disable(); | 211 | local_irq_disable(); |
198 | while (1); | 212 | while (1); |
199 | } | 213 | } |
200 | 214 | ||
215 | /* | ||
216 | * Power-off simply requires that the secondary CPUs stop performing any | ||
217 | * activity (executing tasks, handling interrupts). smp_send_stop() | ||
218 | * achieves this. When the system power is turned off, it will take all CPUs | ||
219 | * with it. | ||
220 | */ | ||
201 | void machine_power_off(void) | 221 | void machine_power_off(void) |
202 | { | 222 | { |
203 | machine_shutdown(); | 223 | smp_send_stop(); |
224 | |||
204 | if (pm_power_off) | 225 | if (pm_power_off) |
205 | pm_power_off(); | 226 | pm_power_off(); |
206 | } | 227 | } |
207 | 228 | ||
229 | /* | ||
230 | * Restart requires that the secondary CPUs stop performing any activity | ||
231 | * while the primary CPU resets the system. Systems with a single CPU can | ||
232 | * use soft_restart() as their machine descriptor's .restart hook, since that | ||
233 | * will cause the only available CPU to reset. Systems with multiple CPUs must | ||
234 | * provide a HW restart implementation, to ensure that all CPUs reset at once. | ||
235 | * This is required so that any code running after reset on the primary CPU | ||
236 | * doesn't have to co-ordinate with other CPUs to ensure they aren't still | ||
237 | * executing pre-reset code, and using RAM that the primary CPU's code wishes | ||
238 | * to use. Implementing such co-ordination would be essentially impossible. | ||
239 | */ | ||
208 | void machine_restart(char *cmd) | 240 | void machine_restart(char *cmd) |
209 | { | 241 | { |
210 | machine_shutdown(); | 242 | smp_send_stop(); |
211 | 243 | ||
212 | arm_pm_restart(reboot_mode, cmd); | 244 | arm_pm_restart(reboot_mode, cmd); |
213 | 245 | ||
@@ -343,7 +375,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start, | |||
343 | clear_ptrace_hw_breakpoint(p); | 375 | clear_ptrace_hw_breakpoint(p); |
344 | 376 | ||
345 | if (clone_flags & CLONE_SETTLS) | 377 | if (clone_flags & CLONE_SETTLS) |
346 | thread->tp_value = childregs->ARM_r3; | 378 | thread->tp_value[0] = childregs->ARM_r3; |
379 | thread->tp_value[1] = get_tpuser(); | ||
347 | 380 | ||
348 | thread_notify(THREAD_NOTIFY_COPY, thread); | 381 | thread_notify(THREAD_NOTIFY_COPY, thread); |
349 | 382 | ||
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 03deeffd9f6d..2bc1514d6dbe 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -849,7 +849,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
849 | #endif | 849 | #endif |
850 | 850 | ||
851 | case PTRACE_GET_THREAD_AREA: | 851 | case PTRACE_GET_THREAD_AREA: |
852 | ret = put_user(task_thread_info(child)->tp_value, | 852 | ret = put_user(task_thread_info(child)->tp_value[0], |
853 | datap); | 853 | datap); |
854 | break; | 854 | break; |
855 | 855 | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1522c7ae31b0..0cde326f5542 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -444,7 +444,7 @@ void notrace cpu_init(void) | |||
444 | : "r14"); | 444 | : "r14"); |
445 | } | 445 | } |
446 | 446 | ||
447 | int __cpu_logical_map[NR_CPUS]; | 447 | u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID }; |
448 | 448 | ||
449 | void __init smp_setup_processor_id(void) | 449 | void __init smp_setup_processor_id(void) |
450 | { | 450 | { |
@@ -456,6 +456,13 @@ void __init smp_setup_processor_id(void) | |||
456 | for (i = 1; i < nr_cpu_ids; ++i) | 456 | for (i = 1; i < nr_cpu_ids; ++i) |
457 | cpu_logical_map(i) = i == cpu ? 0 : i; | 457 | cpu_logical_map(i) = i == cpu ? 0 : i; |
458 | 458 | ||
459 | /* | ||
460 | * clear __my_cpu_offset on boot CPU to avoid hang caused by | ||
461 | * using percpu variable early, for example, lockdep will | ||
462 | * access percpu variable inside lock_release | ||
463 | */ | ||
464 | set_my_cpu_offset(0); | ||
465 | |||
459 | printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); | 466 | printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); |
460 | } | 467 | } |
461 | 468 | ||
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 550d63cef68e..5919eb451bb9 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -651,17 +651,6 @@ void smp_send_reschedule(int cpu) | |||
651 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); | 651 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); |
652 | } | 652 | } |
653 | 653 | ||
654 | #ifdef CONFIG_HOTPLUG_CPU | ||
655 | static void smp_kill_cpus(cpumask_t *mask) | ||
656 | { | ||
657 | unsigned int cpu; | ||
658 | for_each_cpu(cpu, mask) | ||
659 | platform_cpu_kill(cpu); | ||
660 | } | ||
661 | #else | ||
662 | static void smp_kill_cpus(cpumask_t *mask) { } | ||
663 | #endif | ||
664 | |||
665 | void smp_send_stop(void) | 654 | void smp_send_stop(void) |
666 | { | 655 | { |
667 | unsigned long timeout; | 656 | unsigned long timeout; |
@@ -679,8 +668,6 @@ void smp_send_stop(void) | |||
679 | 668 | ||
680 | if (num_online_cpus() > 1) | 669 | if (num_online_cpus() > 1) |
681 | pr_warning("SMP: failed to stop secondary CPUs\n"); | 670 | pr_warning("SMP: failed to stop secondary CPUs\n"); |
682 | |||
683 | smp_kill_cpus(&mask); | ||
684 | } | 671 | } |
685 | 672 | ||
686 | /* | 673 | /* |
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index 9a52a07aa40e..a98b62dca2fa 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c | |||
@@ -103,7 +103,7 @@ static void broadcast_tlb_a15_erratum(void) | |||
103 | 103 | ||
104 | static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) | 104 | static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) |
105 | { | 105 | { |
106 | int cpu, this_cpu; | 106 | int this_cpu; |
107 | cpumask_t mask = { CPU_BITS_NONE }; | 107 | cpumask_t mask = { CPU_BITS_NONE }; |
108 | 108 | ||
109 | if (!erratum_a15_798181()) | 109 | if (!erratum_a15_798181()) |
@@ -111,21 +111,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) | |||
111 | 111 | ||
112 | dummy_flush_tlb_a15_erratum(); | 112 | dummy_flush_tlb_a15_erratum(); |
113 | this_cpu = get_cpu(); | 113 | this_cpu = get_cpu(); |
114 | for_each_online_cpu(cpu) { | 114 | a15_erratum_get_cpumask(this_cpu, mm, &mask); |
115 | if (cpu == this_cpu) | ||
116 | continue; | ||
117 | /* | ||
118 | * We only need to send an IPI if the other CPUs are running | ||
119 | * the same ASID as the one being invalidated. There is no | ||
120 | * need for locking around the active_asids check since the | ||
121 | * switch_mm() function has at least one dmb() (as required by | ||
122 | * this workaround) in case a context switch happens on | ||
123 | * another CPU after the condition below. | ||
124 | */ | ||
125 | if (atomic64_read(&mm->context.id) == | ||
126 | atomic64_read(&per_cpu(active_asids, cpu))) | ||
127 | cpumask_set_cpu(cpu, &mask); | ||
128 | } | ||
129 | smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); | 115 | smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); |
130 | put_cpu(); | 116 | put_cpu(); |
131 | } | 117 | } |
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index f10316b4ecdc..c5a59546a256 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <linux/cpu.h> | 14 | #include <linux/cpu.h> |
15 | #include <linux/cpumask.h> | 15 | #include <linux/cpumask.h> |
16 | #include <linux/export.h> | ||
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
17 | #include <linux/percpu.h> | 18 | #include <linux/percpu.h> |
18 | #include <linux/node.h> | 19 | #include <linux/node.h> |
@@ -200,6 +201,7 @@ static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} | |||
200 | * cpu topology table | 201 | * cpu topology table |
201 | */ | 202 | */ |
202 | struct cputopo_arm cpu_topology[NR_CPUS]; | 203 | struct cputopo_arm cpu_topology[NR_CPUS]; |
204 | EXPORT_SYMBOL_GPL(cpu_topology); | ||
203 | 205 | ||
204 | const struct cpumask *cpu_coregroup_mask(int cpu) | 206 | const struct cpumask *cpu_coregroup_mask(int cpu) |
205 | { | 207 | { |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 18b32e8e4497..517bfd4da1c9 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -581,7 +581,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) | |||
581 | return regs->ARM_r0; | 581 | return regs->ARM_r0; |
582 | 582 | ||
583 | case NR(set_tls): | 583 | case NR(set_tls): |
584 | thread->tp_value = regs->ARM_r0; | 584 | thread->tp_value[0] = regs->ARM_r0; |
585 | if (tls_emu) | 585 | if (tls_emu) |
586 | return 0; | 586 | return 0; |
587 | if (has_tls_reg) { | 587 | if (has_tls_reg) { |
@@ -699,7 +699,7 @@ static int get_tp_trap(struct pt_regs *regs, unsigned int instr) | |||
699 | int reg = (instr >> 12) & 15; | 699 | int reg = (instr >> 12) & 15; |
700 | if (reg == 15) | 700 | if (reg == 15) |
701 | return 1; | 701 | return 1; |
702 | regs->uregs[reg] = current_thread_info()->tp_value; | 702 | regs->uregs[reg] = current_thread_info()->tp_value[0]; |
703 | regs->ARM_pc += 4; | 703 | regs->ARM_pc += 4; |
704 | return 0; | 704 | return 0; |
705 | } | 705 | } |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 37d216d814cd..ef1703b9587b 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -492,6 +492,11 @@ static void vcpu_pause(struct kvm_vcpu *vcpu) | |||
492 | wait_event_interruptible(*wq, !vcpu->arch.pause); | 492 | wait_event_interruptible(*wq, !vcpu->arch.pause); |
493 | } | 493 | } |
494 | 494 | ||
495 | static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) | ||
496 | { | ||
497 | return vcpu->arch.target >= 0; | ||
498 | } | ||
499 | |||
495 | /** | 500 | /** |
496 | * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code | 501 | * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code |
497 | * @vcpu: The VCPU pointer | 502 | * @vcpu: The VCPU pointer |
@@ -508,8 +513,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
508 | int ret; | 513 | int ret; |
509 | sigset_t sigsaved; | 514 | sigset_t sigsaved; |
510 | 515 | ||
511 | /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ | 516 | if (unlikely(!kvm_vcpu_initialized(vcpu))) |
512 | if (unlikely(vcpu->arch.target < 0)) | ||
513 | return -ENOEXEC; | 517 | return -ENOEXEC; |
514 | 518 | ||
515 | ret = kvm_vcpu_first_run_init(vcpu); | 519 | ret = kvm_vcpu_first_run_init(vcpu); |
@@ -710,6 +714,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
710 | case KVM_SET_ONE_REG: | 714 | case KVM_SET_ONE_REG: |
711 | case KVM_GET_ONE_REG: { | 715 | case KVM_GET_ONE_REG: { |
712 | struct kvm_one_reg reg; | 716 | struct kvm_one_reg reg; |
717 | |||
718 | if (unlikely(!kvm_vcpu_initialized(vcpu))) | ||
719 | return -ENOEXEC; | ||
720 | |||
713 | if (copy_from_user(®, argp, sizeof(reg))) | 721 | if (copy_from_user(®, argp, sizeof(reg))) |
714 | return -EFAULT; | 722 | return -EFAULT; |
715 | if (ioctl == KVM_SET_ONE_REG) | 723 | if (ioctl == KVM_SET_ONE_REG) |
@@ -722,6 +730,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
722 | struct kvm_reg_list reg_list; | 730 | struct kvm_reg_list reg_list; |
723 | unsigned n; | 731 | unsigned n; |
724 | 732 | ||
733 | if (unlikely(!kvm_vcpu_initialized(vcpu))) | ||
734 | return -ENOEXEC; | ||
735 | |||
725 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) | 736 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) |
726 | return -EFAULT; | 737 | return -EFAULT; |
727 | n = reg_list.n; | 738 | n = reg_list.n; |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 965706578f13..84ba67b982c0 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -43,7 +43,14 @@ static phys_addr_t hyp_idmap_vector; | |||
43 | 43 | ||
44 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | 44 | static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) |
45 | { | 45 | { |
46 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); | 46 | /* |
47 | * This function also gets called when dealing with HYP page | ||
48 | * tables. As HYP doesn't have an associated struct kvm (and | ||
49 | * the HYP page tables are fairly static), we don't do | ||
50 | * anything there. | ||
51 | */ | ||
52 | if (kvm) | ||
53 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); | ||
47 | } | 54 | } |
48 | 55 | ||
49 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | 56 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
@@ -78,18 +85,20 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) | |||
78 | return p; | 85 | return p; |
79 | } | 86 | } |
80 | 87 | ||
81 | static void clear_pud_entry(pud_t *pud) | 88 | static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) |
82 | { | 89 | { |
83 | pmd_t *pmd_table = pmd_offset(pud, 0); | 90 | pmd_t *pmd_table = pmd_offset(pud, 0); |
84 | pud_clear(pud); | 91 | pud_clear(pud); |
92 | kvm_tlb_flush_vmid_ipa(kvm, addr); | ||
85 | pmd_free(NULL, pmd_table); | 93 | pmd_free(NULL, pmd_table); |
86 | put_page(virt_to_page(pud)); | 94 | put_page(virt_to_page(pud)); |
87 | } | 95 | } |
88 | 96 | ||
89 | static void clear_pmd_entry(pmd_t *pmd) | 97 | static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) |
90 | { | 98 | { |
91 | pte_t *pte_table = pte_offset_kernel(pmd, 0); | 99 | pte_t *pte_table = pte_offset_kernel(pmd, 0); |
92 | pmd_clear(pmd); | 100 | pmd_clear(pmd); |
101 | kvm_tlb_flush_vmid_ipa(kvm, addr); | ||
93 | pte_free_kernel(NULL, pte_table); | 102 | pte_free_kernel(NULL, pte_table); |
94 | put_page(virt_to_page(pmd)); | 103 | put_page(virt_to_page(pmd)); |
95 | } | 104 | } |
@@ -100,11 +109,12 @@ static bool pmd_empty(pmd_t *pmd) | |||
100 | return page_count(pmd_page) == 1; | 109 | return page_count(pmd_page) == 1; |
101 | } | 110 | } |
102 | 111 | ||
103 | static void clear_pte_entry(pte_t *pte) | 112 | static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) |
104 | { | 113 | { |
105 | if (pte_present(*pte)) { | 114 | if (pte_present(*pte)) { |
106 | kvm_set_pte(pte, __pte(0)); | 115 | kvm_set_pte(pte, __pte(0)); |
107 | put_page(virt_to_page(pte)); | 116 | put_page(virt_to_page(pte)); |
117 | kvm_tlb_flush_vmid_ipa(kvm, addr); | ||
108 | } | 118 | } |
109 | } | 119 | } |
110 | 120 | ||
@@ -114,7 +124,8 @@ static bool pte_empty(pte_t *pte) | |||
114 | return page_count(pte_page) == 1; | 124 | return page_count(pte_page) == 1; |
115 | } | 125 | } |
116 | 126 | ||
117 | static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size) | 127 | static void unmap_range(struct kvm *kvm, pgd_t *pgdp, |
128 | unsigned long long start, u64 size) | ||
118 | { | 129 | { |
119 | pgd_t *pgd; | 130 | pgd_t *pgd; |
120 | pud_t *pud; | 131 | pud_t *pud; |
@@ -138,15 +149,15 @@ static void unmap_range(pgd_t *pgdp, unsigned long long start, u64 size) | |||
138 | } | 149 | } |
139 | 150 | ||
140 | pte = pte_offset_kernel(pmd, addr); | 151 | pte = pte_offset_kernel(pmd, addr); |
141 | clear_pte_entry(pte); | 152 | clear_pte_entry(kvm, pte, addr); |
142 | range = PAGE_SIZE; | 153 | range = PAGE_SIZE; |
143 | 154 | ||
144 | /* If we emptied the pte, walk back up the ladder */ | 155 | /* If we emptied the pte, walk back up the ladder */ |
145 | if (pte_empty(pte)) { | 156 | if (pte_empty(pte)) { |
146 | clear_pmd_entry(pmd); | 157 | clear_pmd_entry(kvm, pmd, addr); |
147 | range = PMD_SIZE; | 158 | range = PMD_SIZE; |
148 | if (pmd_empty(pmd)) { | 159 | if (pmd_empty(pmd)) { |
149 | clear_pud_entry(pud); | 160 | clear_pud_entry(kvm, pud, addr); |
150 | range = PUD_SIZE; | 161 | range = PUD_SIZE; |
151 | } | 162 | } |
152 | } | 163 | } |
@@ -165,14 +176,14 @@ void free_boot_hyp_pgd(void) | |||
165 | mutex_lock(&kvm_hyp_pgd_mutex); | 176 | mutex_lock(&kvm_hyp_pgd_mutex); |
166 | 177 | ||
167 | if (boot_hyp_pgd) { | 178 | if (boot_hyp_pgd) { |
168 | unmap_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); | 179 | unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE); |
169 | unmap_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | 180 | unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
170 | kfree(boot_hyp_pgd); | 181 | kfree(boot_hyp_pgd); |
171 | boot_hyp_pgd = NULL; | 182 | boot_hyp_pgd = NULL; |
172 | } | 183 | } |
173 | 184 | ||
174 | if (hyp_pgd) | 185 | if (hyp_pgd) |
175 | unmap_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); | 186 | unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE); |
176 | 187 | ||
177 | kfree(init_bounce_page); | 188 | kfree(init_bounce_page); |
178 | init_bounce_page = NULL; | 189 | init_bounce_page = NULL; |
@@ -200,9 +211,10 @@ void free_hyp_pgds(void) | |||
200 | 211 | ||
201 | if (hyp_pgd) { | 212 | if (hyp_pgd) { |
202 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) | 213 | for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE) |
203 | unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); | 214 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
204 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) | 215 | for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE) |
205 | unmap_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); | 216 | unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE); |
217 | |||
206 | kfree(hyp_pgd); | 218 | kfree(hyp_pgd); |
207 | hyp_pgd = NULL; | 219 | hyp_pgd = NULL; |
208 | } | 220 | } |
@@ -393,7 +405,7 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm) | |||
393 | */ | 405 | */ |
394 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) | 406 | static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) |
395 | { | 407 | { |
396 | unmap_range(kvm->arch.pgd, start, size); | 408 | unmap_range(kvm, kvm->arch.pgd, start, size); |
397 | } | 409 | } |
398 | 410 | ||
399 | /** | 411 | /** |
@@ -675,7 +687,6 @@ static void handle_hva_to_gpa(struct kvm *kvm, | |||
675 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) | 687 | static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) |
676 | { | 688 | { |
677 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); | 689 | unmap_stage2_range(kvm, gpa, PAGE_SIZE); |
678 | kvm_tlb_flush_vmid_ipa(kvm, gpa); | ||
679 | } | 690 | } |
680 | 691 | ||
681 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) | 692 | int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) |
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c index b13cc74114db..8a53f346cdb3 100644 --- a/arch/arm/mach-ebsa110/core.c +++ b/arch/arm/mach-ebsa110/core.c | |||
@@ -116,7 +116,7 @@ static void __init ebsa110_map_io(void) | |||
116 | iotable_init(ebsa110_io_desc, ARRAY_SIZE(ebsa110_io_desc)); | 116 | iotable_init(ebsa110_io_desc, ARRAY_SIZE(ebsa110_io_desc)); |
117 | } | 117 | } |
118 | 118 | ||
119 | static void __iomem *ebsa110_ioremap_caller(unsigned long cookie, size_t size, | 119 | static void __iomem *ebsa110_ioremap_caller(phys_addr_t cookie, size_t size, |
120 | unsigned int flags, void *caller) | 120 | unsigned int flags, void *caller) |
121 | { | 121 | { |
122 | return (void __iomem *)cookie; | 122 | return (void __iomem *)cookie; |
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c index 027c9e7f0d13..f7e504b7874d 100644 --- a/arch/arm/mach-exynos/common.c +++ b/arch/arm/mach-exynos/common.c | |||
@@ -386,6 +386,8 @@ int __init exynos_fdt_map_chipid(unsigned long node, const char *uname, | |||
386 | 386 | ||
387 | void __init exynos_init_io(struct map_desc *mach_desc, int size) | 387 | void __init exynos_init_io(struct map_desc *mach_desc, int size) |
388 | { | 388 | { |
389 | debug_ll_io_init(); | ||
390 | |||
389 | #ifdef CONFIG_OF | 391 | #ifdef CONFIG_OF |
390 | if (initial_boot_params) | 392 | if (initial_boot_params) |
391 | of_scan_flat_dt(exynos_fdt_map_chipid, NULL); | 393 | of_scan_flat_dt(exynos_fdt_map_chipid, NULL); |
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index dda9a2bd3acb..4e3148ce852d 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c | |||
@@ -181,14 +181,14 @@ static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", "dummy", | |||
181 | static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", }; | 181 | static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", }; |
182 | static const char *periph_sels[] = { "periph_pre", "periph_clk2", }; | 182 | static const char *periph_sels[] = { "periph_pre", "periph_clk2", }; |
183 | static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", }; | 183 | static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", }; |
184 | static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", }; | 184 | static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "periph", "pll3_pfd1_540m", }; |
185 | static const char *audio_sels[] = { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", }; | 185 | static const char *audio_sels[] = { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", }; |
186 | static const char *gpu_axi_sels[] = { "axi", "ahb", }; | 186 | static const char *gpu_axi_sels[] = { "axi", "ahb", }; |
187 | static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", }; | 187 | static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", }; |
188 | static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; | 188 | static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; |
189 | static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", }; | 189 | static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", }; |
190 | static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; | 190 | static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; |
191 | static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; | 191 | static const char *ldb_di_sels[] = { "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; |
192 | static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; | 192 | static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; |
193 | static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; | 193 | static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; |
194 | static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; | 194 | static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", }; |
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c index e0e69a682174..eed32ca0b8ab 100644 --- a/arch/arm/mach-imx/mm-imx3.c +++ b/arch/arm/mach-imx/mm-imx3.c | |||
@@ -65,7 +65,7 @@ static void imx3_idle(void) | |||
65 | : "=r" (reg)); | 65 | : "=r" (reg)); |
66 | } | 66 | } |
67 | 67 | ||
68 | static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size, | 68 | static void __iomem *imx3_ioremap_caller(phys_addr_t phys_addr, size_t size, |
69 | unsigned int mtype, void *caller) | 69 | unsigned int mtype, void *caller) |
70 | { | 70 | { |
71 | if (mtype == MT_DEVICE) { | 71 | if (mtype == MT_DEVICE) { |
diff --git a/arch/arm/mach-iop13xx/io.c b/arch/arm/mach-iop13xx/io.c index 183dc8b5511b..faaf7d4482c5 100644 --- a/arch/arm/mach-iop13xx/io.c +++ b/arch/arm/mach-iop13xx/io.c | |||
@@ -23,7 +23,7 @@ | |||
23 | 23 | ||
24 | #include "pci.h" | 24 | #include "pci.h" |
25 | 25 | ||
26 | static void __iomem *__iop13xx_ioremap_caller(unsigned long cookie, | 26 | static void __iomem *__iop13xx_ioremap_caller(phys_addr_t cookie, |
27 | size_t size, unsigned int mtype, void *caller) | 27 | size_t size, unsigned int mtype, void *caller) |
28 | { | 28 | { |
29 | void __iomem * retval; | 29 | void __iomem * retval; |
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 6600cff6bd92..d7223b3b81f3 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c | |||
@@ -559,7 +559,7 @@ void ixp4xx_restart(char mode, const char *cmd) | |||
559 | * fallback to the default. | 559 | * fallback to the default. |
560 | */ | 560 | */ |
561 | 561 | ||
562 | static void __iomem *ixp4xx_ioremap_caller(unsigned long addr, size_t size, | 562 | static void __iomem *ixp4xx_ioremap_caller(phys_addr_t addr, size_t size, |
563 | unsigned int mtype, void *caller) | 563 | unsigned int mtype, void *caller) |
564 | { | 564 | { |
565 | if (!is_pci_memory(addr)) | 565 | if (!is_pci_memory(addr)) |
diff --git a/arch/arm/mach-kirkwood/board-ts219.c b/arch/arm/mach-kirkwood/board-ts219.c index acb0187c7ee1..4695d5f35fc9 100644 --- a/arch/arm/mach-kirkwood/board-ts219.c +++ b/arch/arm/mach-kirkwood/board-ts219.c | |||
@@ -41,13 +41,3 @@ void __init qnap_dt_ts219_init(void) | |||
41 | 41 | ||
42 | pm_power_off = qnap_tsx1x_power_off; | 42 | pm_power_off = qnap_tsx1x_power_off; |
43 | } | 43 | } |
44 | |||
45 | /* FIXME: Will not work with DT. Maybe use MPP40_GPIO? */ | ||
46 | static int __init ts219_pci_init(void) | ||
47 | { | ||
48 | if (machine_is_ts219()) | ||
49 | kirkwood_pcie_init(KW_PCIE0); | ||
50 | |||
51 | return 0; | ||
52 | } | ||
53 | subsys_initcall(ts219_pci_init); | ||
diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c index 827cde42414f..e96fd71abd76 100644 --- a/arch/arm/mach-kirkwood/mpp.c +++ b/arch/arm/mach-kirkwood/mpp.c | |||
@@ -22,9 +22,10 @@ static unsigned int __init kirkwood_variant(void) | |||
22 | 22 | ||
23 | kirkwood_pcie_id(&dev, &rev); | 23 | kirkwood_pcie_id(&dev, &rev); |
24 | 24 | ||
25 | if ((dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) || | 25 | if (dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) |
26 | (dev == MV88F6282_DEV_ID)) | ||
27 | return MPP_F6281_MASK; | 26 | return MPP_F6281_MASK; |
27 | if (dev == MV88F6282_DEV_ID) | ||
28 | return MPP_F6282_MASK; | ||
28 | if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0) | 29 | if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0) |
29 | return MPP_F6192_MASK; | 30 | return MPP_F6192_MASK; |
30 | if (dev == MV88F6180_DEV_ID) | 31 | if (dev == MV88F6180_DEV_ID) |
diff --git a/arch/arm/mach-msm/common.h b/arch/arm/mach-msm/common.h index ce8215a269e5..421cf7751a80 100644 --- a/arch/arm/mach-msm/common.h +++ b/arch/arm/mach-msm/common.h | |||
@@ -23,7 +23,7 @@ extern void msm_map_msm8x60_io(void); | |||
23 | extern void msm_map_msm8960_io(void); | 23 | extern void msm_map_msm8960_io(void); |
24 | extern void msm_map_qsd8x50_io(void); | 24 | extern void msm_map_qsd8x50_io(void); |
25 | 25 | ||
26 | extern void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size, | 26 | extern void __iomem *__msm_ioremap_caller(phys_addr_t phys_addr, size_t size, |
27 | unsigned int mtype, void *caller); | 27 | unsigned int mtype, void *caller); |
28 | 28 | ||
29 | extern struct smp_operations msm_smp_ops; | 29 | extern struct smp_operations msm_smp_ops; |
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c index 123ef9cbce1b..fd65b6d42cde 100644 --- a/arch/arm/mach-msm/io.c +++ b/arch/arm/mach-msm/io.c | |||
@@ -172,7 +172,7 @@ void __init msm_map_msm7x30_io(void) | |||
172 | } | 172 | } |
173 | #endif /* CONFIG_ARCH_MSM7X30 */ | 173 | #endif /* CONFIG_ARCH_MSM7X30 */ |
174 | 174 | ||
175 | void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size, | 175 | void __iomem *__msm_ioremap_caller(phys_addr_t phys_addr, size_t size, |
176 | unsigned int mtype, void *caller) | 176 | unsigned int mtype, void *caller) |
177 | { | 177 | { |
178 | if (mtype == MT_DEVICE) { | 178 | if (mtype == MT_DEVICE) { |
diff --git a/arch/arm/mach-mvebu/coherency_ll.S b/arch/arm/mach-mvebu/coherency_ll.S index 53e8391192cd..5476669ba905 100644 --- a/arch/arm/mach-mvebu/coherency_ll.S +++ b/arch/arm/mach-mvebu/coherency_ll.S | |||
@@ -32,15 +32,21 @@ ENTRY(ll_set_cpu_coherent) | |||
32 | 32 | ||
33 | /* Add CPU to SMP group - Atomic */ | 33 | /* Add CPU to SMP group - Atomic */ |
34 | add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET | 34 | add r3, r0, #ARMADA_XP_CFB_CTL_REG_OFFSET |
35 | ldr r2, [r3] | 35 | 1: |
36 | ldrex r2, [r3] | ||
36 | orr r2, r2, r1 | 37 | orr r2, r2, r1 |
37 | str r2, [r3] | 38 | strex r0, r2, [r3] |
39 | cmp r0, #0 | ||
40 | bne 1b | ||
38 | 41 | ||
39 | /* Enable coherency on CPU - Atomic */ | 42 | /* Enable coherency on CPU - Atomic */ |
40 | add r3, r0, #ARMADA_XP_CFB_CFG_REG_OFFSET | 43 | add r3, r3, #ARMADA_XP_CFB_CFG_REG_OFFSET |
41 | ldr r2, [r3] | 44 | 1: |
45 | ldrex r2, [r3] | ||
42 | orr r2, r2, r1 | 46 | orr r2, r2, r1 |
43 | str r2, [r3] | 47 | strex r0, r2, [r3] |
48 | cmp r0, #0 | ||
49 | bne 1b | ||
44 | 50 | ||
45 | dsb | 51 | dsb |
46 | 52 | ||
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index f49cd51e162a..8620ab52a4de 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -4,6 +4,7 @@ config ARCH_OMAP | |||
4 | config ARCH_OMAP2PLUS | 4 | config ARCH_OMAP2PLUS |
5 | bool "TI OMAP2/3/4/5 SoCs with device tree support" if (ARCH_MULTI_V6 || ARCH_MULTI_V7) | 5 | bool "TI OMAP2/3/4/5 SoCs with device tree support" if (ARCH_MULTI_V6 || ARCH_MULTI_V7) |
6 | select ARCH_HAS_CPUFREQ | 6 | select ARCH_HAS_CPUFREQ |
7 | select ARCH_HAS_BANDGAP | ||
7 | select ARCH_HAS_HOLES_MEMORYMODEL | 8 | select ARCH_HAS_HOLES_MEMORYMODEL |
8 | select ARCH_OMAP | 9 | select ARCH_OMAP |
9 | select ARCH_REQUIRE_GPIOLIB | 10 | select ARCH_REQUIRE_GPIOLIB |
diff --git a/arch/arm/mach-omap2/clock36xx.c b/arch/arm/mach-omap2/clock36xx.c index 8f3bf4e50908..bbd6a3f717e6 100644 --- a/arch/arm/mach-omap2/clock36xx.c +++ b/arch/arm/mach-omap2/clock36xx.c | |||
@@ -20,11 +20,12 @@ | |||
20 | 20 | ||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/clk.h> | 22 | #include <linux/clk.h> |
23 | #include <linux/clk-provider.h> | ||
23 | #include <linux/io.h> | 24 | #include <linux/io.h> |
24 | 25 | ||
25 | #include "clock.h" | 26 | #include "clock.h" |
26 | #include "clock36xx.h" | 27 | #include "clock36xx.h" |
27 | 28 | #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) | |
28 | 29 | ||
29 | /** | 30 | /** |
30 | * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering | 31 | * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering |
@@ -39,29 +40,28 @@ | |||
39 | */ | 40 | */ |
40 | int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk) | 41 | int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk) |
41 | { | 42 | { |
42 | struct clk_hw_omap *parent; | 43 | struct clk_divider *parent; |
43 | struct clk_hw *parent_hw; | 44 | struct clk_hw *parent_hw; |
44 | u32 dummy_v, orig_v, clksel_shift; | 45 | u32 dummy_v, orig_v; |
45 | int ret; | 46 | int ret; |
46 | 47 | ||
47 | /* Clear PWRDN bit of HSDIVIDER */ | 48 | /* Clear PWRDN bit of HSDIVIDER */ |
48 | ret = omap2_dflt_clk_enable(clk); | 49 | ret = omap2_dflt_clk_enable(clk); |
49 | 50 | ||
50 | parent_hw = __clk_get_hw(__clk_get_parent(clk->clk)); | 51 | parent_hw = __clk_get_hw(__clk_get_parent(clk->clk)); |
51 | parent = to_clk_hw_omap(parent_hw); | 52 | parent = to_clk_divider(parent_hw); |
52 | 53 | ||
53 | /* Restore the dividers */ | 54 | /* Restore the dividers */ |
54 | if (!ret) { | 55 | if (!ret) { |
55 | clksel_shift = __ffs(parent->clksel_mask); | 56 | orig_v = __raw_readl(parent->reg); |
56 | orig_v = __raw_readl(parent->clksel_reg); | ||
57 | dummy_v = orig_v; | 57 | dummy_v = orig_v; |
58 | 58 | ||
59 | /* Write any other value different from the Read value */ | 59 | /* Write any other value different from the Read value */ |
60 | dummy_v ^= (1 << clksel_shift); | 60 | dummy_v ^= (1 << parent->shift); |
61 | __raw_writel(dummy_v, parent->clksel_reg); | 61 | __raw_writel(dummy_v, parent->reg); |
62 | 62 | ||
63 | /* Write the original divider */ | 63 | /* Write the original divider */ |
64 | __raw_writel(orig_v, parent->clksel_reg); | 64 | __raw_writel(orig_v, parent->reg); |
65 | } | 65 | } |
66 | 66 | ||
67 | return ret; | 67 | return ret; |
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c index 075f7cc51026..69337af748cc 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c | |||
@@ -2007,6 +2007,13 @@ static struct omap_hwmod am33xx_uart1_hwmod = { | |||
2007 | }, | 2007 | }, |
2008 | }; | 2008 | }; |
2009 | 2009 | ||
2010 | /* uart2 */ | ||
2011 | static struct omap_hwmod_dma_info uart2_edma_reqs[] = { | ||
2012 | { .name = "tx", .dma_req = 28, }, | ||
2013 | { .name = "rx", .dma_req = 29, }, | ||
2014 | { .dma_req = -1 } | ||
2015 | }; | ||
2016 | |||
2010 | static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = { | 2017 | static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = { |
2011 | { .irq = 73 + OMAP_INTC_START, }, | 2018 | { .irq = 73 + OMAP_INTC_START, }, |
2012 | { .irq = -1 }, | 2019 | { .irq = -1 }, |
@@ -2018,7 +2025,7 @@ static struct omap_hwmod am33xx_uart2_hwmod = { | |||
2018 | .clkdm_name = "l4ls_clkdm", | 2025 | .clkdm_name = "l4ls_clkdm", |
2019 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 2026 | .flags = HWMOD_SWSUP_SIDLE_ACT, |
2020 | .mpu_irqs = am33xx_uart2_irqs, | 2027 | .mpu_irqs = am33xx_uart2_irqs, |
2021 | .sdma_reqs = uart1_edma_reqs, | 2028 | .sdma_reqs = uart2_edma_reqs, |
2022 | .main_clk = "dpll_per_m2_div4_ck", | 2029 | .main_clk = "dpll_per_m2_div4_ck", |
2023 | .prcm = { | 2030 | .prcm = { |
2024 | .omap4 = { | 2031 | .omap4 = { |
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index c01859398b54..5a2d8034c8de 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c | |||
@@ -546,8 +546,10 @@ static void __init prcm_setup_regs(void) | |||
546 | /* Clear any pending PRCM interrupts */ | 546 | /* Clear any pending PRCM interrupts */ |
547 | omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); | 547 | omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET); |
548 | 548 | ||
549 | if (omap3_has_iva()) | 549 | /* |
550 | omap3_iva_idle(); | 550 | * We need to idle iva2_pwrdm even on am3703 with no iva2. |
551 | */ | ||
552 | omap3_iva_idle(); | ||
551 | 553 | ||
552 | omap3_d2d_idle(); | 554 | omap3_d2d_idle(); |
553 | } | 555 | } |
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c index 9936c180bf01..8f595c0cc8d9 100644 --- a/arch/arm/mach-prima2/pm.c +++ b/arch/arm/mach-prima2/pm.c | |||
@@ -101,8 +101,10 @@ static int __init sirfsoc_of_pwrc_init(void) | |||
101 | struct device_node *np; | 101 | struct device_node *np; |
102 | 102 | ||
103 | np = of_find_matching_node(NULL, pwrc_ids); | 103 | np = of_find_matching_node(NULL, pwrc_ids); |
104 | if (!np) | 104 | if (!np) { |
105 | panic("unable to find compatible pwrc node in dtb\n"); | 105 | pr_err("unable to find compatible sirf pwrc node in dtb\n"); |
106 | return -ENOENT; | ||
107 | } | ||
106 | 108 | ||
107 | /* | 109 | /* |
108 | * pwrc behind rtciobrg is not located in memory space | 110 | * pwrc behind rtciobrg is not located in memory space |
diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c index 435019ca0a48..d5e0cbc934c0 100644 --- a/arch/arm/mach-prima2/rstc.c +++ b/arch/arm/mach-prima2/rstc.c | |||
@@ -28,8 +28,10 @@ static int __init sirfsoc_of_rstc_init(void) | |||
28 | struct device_node *np; | 28 | struct device_node *np; |
29 | 29 | ||
30 | np = of_find_matching_node(NULL, rstc_ids); | 30 | np = of_find_matching_node(NULL, rstc_ids); |
31 | if (!np) | 31 | if (!np) { |
32 | panic("unable to find compatible rstc node in dtb\n"); | 32 | pr_err("unable to find compatible sirf rstc node in dtb\n"); |
33 | return -ENOENT; | ||
34 | } | ||
33 | 35 | ||
34 | sirfsoc_rstc_base = of_iomap(np, 0); | 36 | sirfsoc_rstc_base = of_iomap(np, 0); |
35 | if (!sirfsoc_rstc_base) | 37 | if (!sirfsoc_rstc_base) |
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c index fdf3894b1cc3..9696f3646864 100644 --- a/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/arch/arm/mach-shmobile/setup-sh73a0.c | |||
@@ -252,7 +252,7 @@ static struct sh_timer_config cmt10_platform_data = { | |||
252 | .name = "CMT10", | 252 | .name = "CMT10", |
253 | .channel_offset = 0x10, | 253 | .channel_offset = 0x10, |
254 | .timer_bit = 0, | 254 | .timer_bit = 0, |
255 | .clockevent_rating = 125, | 255 | .clockevent_rating = 80, |
256 | .clocksource_rating = 125, | 256 | .clocksource_rating = 125, |
257 | }; | 257 | }; |
258 | 258 | ||
diff --git a/arch/arm/mach-ux500/board-mop500-regulators.c b/arch/arm/mach-ux500/board-mop500-regulators.c index 33c353bc1c4a..d6b7c8556fa1 100644 --- a/arch/arm/mach-ux500/board-mop500-regulators.c +++ b/arch/arm/mach-ux500/board-mop500-regulators.c | |||
@@ -374,6 +374,7 @@ static struct ab8500_regulator_reg_init ab8500_reg_init[] = { | |||
374 | static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { | 374 | static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { |
375 | /* supplies to the display/camera */ | 375 | /* supplies to the display/camera */ |
376 | [AB8500_LDO_AUX1] = { | 376 | [AB8500_LDO_AUX1] = { |
377 | .supply_regulator = "ab8500-ext-supply3", | ||
377 | .constraints = { | 378 | .constraints = { |
378 | .name = "V-DISPLAY", | 379 | .name = "V-DISPLAY", |
379 | .min_uV = 2800000, | 380 | .min_uV = 2800000, |
@@ -387,6 +388,7 @@ static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { | |||
387 | }, | 388 | }, |
388 | /* supplies to the on-board eMMC */ | 389 | /* supplies to the on-board eMMC */ |
389 | [AB8500_LDO_AUX2] = { | 390 | [AB8500_LDO_AUX2] = { |
391 | .supply_regulator = "ab8500-ext-supply3", | ||
390 | .constraints = { | 392 | .constraints = { |
391 | .name = "V-eMMC1", | 393 | .name = "V-eMMC1", |
392 | .min_uV = 1100000, | 394 | .min_uV = 1100000, |
@@ -402,6 +404,7 @@ static struct regulator_init_data ab8500_regulators[AB8500_NUM_REGULATORS] = { | |||
402 | }, | 404 | }, |
403 | /* supply for VAUX3, supplies to SDcard slots */ | 405 | /* supply for VAUX3, supplies to SDcard slots */ |
404 | [AB8500_LDO_AUX3] = { | 406 | [AB8500_LDO_AUX3] = { |
407 | .supply_regulator = "ab8500-ext-supply3", | ||
405 | .constraints = { | 408 | .constraints = { |
406 | .name = "V-MMC-SD", | 409 | .name = "V-MMC-SD", |
407 | .min_uV = 1100000, | 410 | .min_uV = 1100000, |
diff --git a/arch/arm/mach-ux500/cpuidle.c b/arch/arm/mach-ux500/cpuidle.c index 317a2be129fb..a45dd09daed9 100644 --- a/arch/arm/mach-ux500/cpuidle.c +++ b/arch/arm/mach-ux500/cpuidle.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/proc-fns.h> | 21 | #include <asm/proc-fns.h> |
22 | 22 | ||
23 | #include "db8500-regs.h" | 23 | #include "db8500-regs.h" |
24 | #include "id.h" | ||
24 | 25 | ||
25 | static atomic_t master = ATOMIC_INIT(0); | 26 | static atomic_t master = ATOMIC_INIT(0); |
26 | static DEFINE_SPINLOCK(master_lock); | 27 | static DEFINE_SPINLOCK(master_lock); |
@@ -114,6 +115,9 @@ static struct cpuidle_driver ux500_idle_driver = { | |||
114 | 115 | ||
115 | int __init ux500_idle_init(void) | 116 | int __init ux500_idle_init(void) |
116 | { | 117 | { |
118 | if (!(cpu_is_u8500_family() || cpu_is_ux540_family())) | ||
119 | return -ENODEV; | ||
120 | |||
117 | /* Configure wake up reasons */ | 121 | /* Configure wake up reasons */ |
118 | prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) | | 122 | prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) | |
119 | PRCMU_WAKEUP(ABB)); | 123 | PRCMU_WAKEUP(ABB)); |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c465faca51b0..d70e0aba0c9d 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -523,6 +523,147 @@ static void aurora_flush_range(unsigned long start, unsigned long end) | |||
523 | } | 523 | } |
524 | } | 524 | } |
525 | 525 | ||
526 | /* | ||
527 | * For certain Broadcom SoCs, depending on the address range, different offsets | ||
528 | * need to be added to the address before passing it to L2 for | ||
529 | * invalidation/clean/flush | ||
530 | * | ||
531 | * Section Address Range Offset EMI | ||
532 | * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC | ||
533 | * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS | ||
534 | * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC | ||
535 | * | ||
536 | * When the start and end addresses have crossed two different sections, we | ||
537 | * need to break the L2 operation into two, each within its own section. | ||
538 | * For example, if we need to invalidate addresses starts at 0xBFFF0000 and | ||
539 | * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2) | ||
540 | * 0xC0000000 - 0xC0001000 | ||
541 | * | ||
542 | * Note 1: | ||
543 | * By breaking a single L2 operation into two, we may potentially suffer some | ||
544 | * performance hit, but keep in mind the cross section case is very rare | ||
545 | * | ||
546 | * Note 2: | ||
547 | * We do not need to handle the case when the start address is in | ||
548 | * Section 1 and the end address is in Section 3, since it is not a valid use | ||
549 | * case | ||
550 | * | ||
551 | * Note 3: | ||
552 | * Section 1 in practical terms can no longer be used on rev A2. Because of | ||
553 | * that the code does not need to handle section 1 at all. | ||
554 | * | ||
555 | */ | ||
556 | #define BCM_SYS_EMI_START_ADDR 0x40000000UL | ||
557 | #define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL | ||
558 | |||
559 | #define BCM_SYS_EMI_OFFSET 0x40000000UL | ||
560 | #define BCM_VC_EMI_OFFSET 0x80000000UL | ||
561 | |||
562 | static inline int bcm_addr_is_sys_emi(unsigned long addr) | ||
563 | { | ||
564 | return (addr >= BCM_SYS_EMI_START_ADDR) && | ||
565 | (addr < BCM_VC_EMI_SEC3_START_ADDR); | ||
566 | } | ||
567 | |||
568 | static inline unsigned long bcm_l2_phys_addr(unsigned long addr) | ||
569 | { | ||
570 | if (bcm_addr_is_sys_emi(addr)) | ||
571 | return addr + BCM_SYS_EMI_OFFSET; | ||
572 | else | ||
573 | return addr + BCM_VC_EMI_OFFSET; | ||
574 | } | ||
575 | |||
576 | static void bcm_inv_range(unsigned long start, unsigned long end) | ||
577 | { | ||
578 | unsigned long new_start, new_end; | ||
579 | |||
580 | BUG_ON(start < BCM_SYS_EMI_START_ADDR); | ||
581 | |||
582 | if (unlikely(end <= start)) | ||
583 | return; | ||
584 | |||
585 | new_start = bcm_l2_phys_addr(start); | ||
586 | new_end = bcm_l2_phys_addr(end); | ||
587 | |||
588 | /* normal case, no cross section between start and end */ | ||
589 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { | ||
590 | l2x0_inv_range(new_start, new_end); | ||
591 | return; | ||
592 | } | ||
593 | |||
594 | /* They cross sections, so it can only be a cross from section | ||
595 | * 2 to section 3 | ||
596 | */ | ||
597 | l2x0_inv_range(new_start, | ||
598 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); | ||
599 | l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), | ||
600 | new_end); | ||
601 | } | ||
602 | |||
603 | static void bcm_clean_range(unsigned long start, unsigned long end) | ||
604 | { | ||
605 | unsigned long new_start, new_end; | ||
606 | |||
607 | BUG_ON(start < BCM_SYS_EMI_START_ADDR); | ||
608 | |||
609 | if (unlikely(end <= start)) | ||
610 | return; | ||
611 | |||
612 | if ((end - start) >= l2x0_size) { | ||
613 | l2x0_clean_all(); | ||
614 | return; | ||
615 | } | ||
616 | |||
617 | new_start = bcm_l2_phys_addr(start); | ||
618 | new_end = bcm_l2_phys_addr(end); | ||
619 | |||
620 | /* normal case, no cross section between start and end */ | ||
621 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { | ||
622 | l2x0_clean_range(new_start, new_end); | ||
623 | return; | ||
624 | } | ||
625 | |||
626 | /* They cross sections, so it can only be a cross from section | ||
627 | * 2 to section 3 | ||
628 | */ | ||
629 | l2x0_clean_range(new_start, | ||
630 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); | ||
631 | l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), | ||
632 | new_end); | ||
633 | } | ||
634 | |||
635 | static void bcm_flush_range(unsigned long start, unsigned long end) | ||
636 | { | ||
637 | unsigned long new_start, new_end; | ||
638 | |||
639 | BUG_ON(start < BCM_SYS_EMI_START_ADDR); | ||
640 | |||
641 | if (unlikely(end <= start)) | ||
642 | return; | ||
643 | |||
644 | if ((end - start) >= l2x0_size) { | ||
645 | l2x0_flush_all(); | ||
646 | return; | ||
647 | } | ||
648 | |||
649 | new_start = bcm_l2_phys_addr(start); | ||
650 | new_end = bcm_l2_phys_addr(end); | ||
651 | |||
652 | /* normal case, no cross section between start and end */ | ||
653 | if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { | ||
654 | l2x0_flush_range(new_start, new_end); | ||
655 | return; | ||
656 | } | ||
657 | |||
658 | /* They cross sections, so it can only be a cross from section | ||
659 | * 2 to section 3 | ||
660 | */ | ||
661 | l2x0_flush_range(new_start, | ||
662 | bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); | ||
663 | l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), | ||
664 | new_end); | ||
665 | } | ||
666 | |||
526 | static void __init l2x0_of_setup(const struct device_node *np, | 667 | static void __init l2x0_of_setup(const struct device_node *np, |
527 | u32 *aux_val, u32 *aux_mask) | 668 | u32 *aux_val, u32 *aux_mask) |
528 | { | 669 | { |
@@ -765,6 +906,21 @@ static const struct l2x0_of_data aurora_no_outer_data = { | |||
765 | }, | 906 | }, |
766 | }; | 907 | }; |
767 | 908 | ||
909 | static const struct l2x0_of_data bcm_l2x0_data = { | ||
910 | .setup = pl310_of_setup, | ||
911 | .save = pl310_save, | ||
912 | .outer_cache = { | ||
913 | .resume = pl310_resume, | ||
914 | .inv_range = bcm_inv_range, | ||
915 | .clean_range = bcm_clean_range, | ||
916 | .flush_range = bcm_flush_range, | ||
917 | .sync = l2x0_cache_sync, | ||
918 | .flush_all = l2x0_flush_all, | ||
919 | .inv_all = l2x0_inv_all, | ||
920 | .disable = l2x0_disable, | ||
921 | }, | ||
922 | }; | ||
923 | |||
768 | static const struct of_device_id l2x0_ids[] __initconst = { | 924 | static const struct of_device_id l2x0_ids[] __initconst = { |
769 | { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, | 925 | { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, |
770 | { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, | 926 | { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, |
@@ -773,6 +929,8 @@ static const struct of_device_id l2x0_ids[] __initconst = { | |||
773 | .data = (void *)&aurora_no_outer_data}, | 929 | .data = (void *)&aurora_no_outer_data}, |
774 | { .compatible = "marvell,aurora-outer-cache", | 930 | { .compatible = "marvell,aurora-outer-cache", |
775 | .data = (void *)&aurora_with_outer_data}, | 931 | .data = (void *)&aurora_with_outer_data}, |
932 | { .compatible = "bcm,bcm11351-a2-pl310-cache", | ||
933 | .data = (void *)&bcm_l2x0_data}, | ||
776 | {} | 934 | {} |
777 | }; | 935 | }; |
778 | 936 | ||
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 15451ee4acc8..515b00064da8 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
@@ -92,6 +92,14 @@ ENTRY(v7_flush_dcache_louis) | |||
92 | mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr | 92 | mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr |
93 | ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr | 93 | ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr |
94 | ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr | 94 | ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr |
95 | #ifdef CONFIG_ARM_ERRATA_643719 | ||
96 | ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register | ||
97 | ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do | ||
98 | ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p? | ||
99 | biceq r2, r2, #0x0000000f @ clear minor revision number | ||
100 | teqeq r2, r1 @ test for errata affected core and if so... | ||
101 | orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne') | ||
102 | #endif | ||
95 | ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 | 103 | ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2 |
96 | ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 | 104 | ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2 |
97 | moveq pc, lr @ return if level == 0 | 105 | moveq pc, lr @ return if level == 0 |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 2ac37372ef52..eeab06ebd06e 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
@@ -39,19 +39,43 @@ | |||
39 | * non 64-bit operations. | 39 | * non 64-bit operations. |
40 | */ | 40 | */ |
41 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) | 41 | #define ASID_FIRST_VERSION (1ULL << ASID_BITS) |
42 | #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) | 42 | #define NUM_USER_ASIDS ASID_FIRST_VERSION |
43 | |||
44 | #define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1) | ||
45 | #define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK) | ||
46 | 43 | ||
47 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); | 44 | static DEFINE_RAW_SPINLOCK(cpu_asid_lock); |
48 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); | 45 | static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); |
49 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); | 46 | static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); |
50 | 47 | ||
51 | DEFINE_PER_CPU(atomic64_t, active_asids); | 48 | static DEFINE_PER_CPU(atomic64_t, active_asids); |
52 | static DEFINE_PER_CPU(u64, reserved_asids); | 49 | static DEFINE_PER_CPU(u64, reserved_asids); |
53 | static cpumask_t tlb_flush_pending; | 50 | static cpumask_t tlb_flush_pending; |
54 | 51 | ||
52 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
53 | void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, | ||
54 | cpumask_t *mask) | ||
55 | { | ||
56 | int cpu; | ||
57 | unsigned long flags; | ||
58 | u64 context_id, asid; | ||
59 | |||
60 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); | ||
61 | context_id = mm->context.id.counter; | ||
62 | for_each_online_cpu(cpu) { | ||
63 | if (cpu == this_cpu) | ||
64 | continue; | ||
65 | /* | ||
66 | * We only need to send an IPI if the other CPUs are | ||
67 | * running the same ASID as the one being invalidated. | ||
68 | */ | ||
69 | asid = per_cpu(active_asids, cpu).counter; | ||
70 | if (asid == 0) | ||
71 | asid = per_cpu(reserved_asids, cpu); | ||
72 | if (context_id == asid) | ||
73 | cpumask_set_cpu(cpu, mask); | ||
74 | } | ||
75 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); | ||
76 | } | ||
77 | #endif | ||
78 | |||
55 | #ifdef CONFIG_ARM_LPAE | 79 | #ifdef CONFIG_ARM_LPAE |
56 | static void cpu_set_reserved_ttbr0(void) | 80 | static void cpu_set_reserved_ttbr0(void) |
57 | { | 81 | { |
@@ -128,7 +152,16 @@ static void flush_context(unsigned int cpu) | |||
128 | asid = 0; | 152 | asid = 0; |
129 | } else { | 153 | } else { |
130 | asid = atomic64_xchg(&per_cpu(active_asids, i), 0); | 154 | asid = atomic64_xchg(&per_cpu(active_asids, i), 0); |
131 | __set_bit(ASID_TO_IDX(asid), asid_map); | 155 | /* |
156 | * If this CPU has already been through a | ||
157 | * rollover, but hasn't run another task in | ||
158 | * the meantime, we must preserve its reserved | ||
159 | * ASID, as this is the only trace we have of | ||
160 | * the process it is still running. | ||
161 | */ | ||
162 | if (asid == 0) | ||
163 | asid = per_cpu(reserved_asids, i); | ||
164 | __set_bit(asid & ~ASID_MASK, asid_map); | ||
132 | } | 165 | } |
133 | per_cpu(reserved_asids, i) = asid; | 166 | per_cpu(reserved_asids, i) = asid; |
134 | } | 167 | } |
@@ -167,17 +200,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu) | |||
167 | /* | 200 | /* |
168 | * Allocate a free ASID. If we can't find one, take a | 201 | * Allocate a free ASID. If we can't find one, take a |
169 | * note of the currently active ASIDs and mark the TLBs | 202 | * note of the currently active ASIDs and mark the TLBs |
170 | * as requiring flushes. | 203 | * as requiring flushes. We always count from ASID #1, |
204 | * as we reserve ASID #0 to switch via TTBR0 and indicate | ||
205 | * rollover events. | ||
171 | */ | 206 | */ |
172 | asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); | 207 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
173 | if (asid == NUM_USER_ASIDS) { | 208 | if (asid == NUM_USER_ASIDS) { |
174 | generation = atomic64_add_return(ASID_FIRST_VERSION, | 209 | generation = atomic64_add_return(ASID_FIRST_VERSION, |
175 | &asid_generation); | 210 | &asid_generation); |
176 | flush_context(cpu); | 211 | flush_context(cpu); |
177 | asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); | 212 | asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); |
178 | } | 213 | } |
179 | __set_bit(asid, asid_map); | 214 | __set_bit(asid, asid_map); |
180 | asid = generation | IDX_TO_ASID(asid); | 215 | asid |= generation; |
181 | cpumask_clear(mm_cpumask(mm)); | 216 | cpumask_clear(mm_cpumask(mm)); |
182 | } | 217 | } |
183 | 218 | ||
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ef3e0f3aac96..c038ec0738ac 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -880,10 +880,24 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, | |||
880 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); | 880 | dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); |
881 | 881 | ||
882 | /* | 882 | /* |
883 | * Mark the D-cache clean for this page to avoid extra flushing. | 883 | * Mark the D-cache clean for these pages to avoid extra flushing. |
884 | */ | 884 | */ |
885 | if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) | 885 | if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) { |
886 | set_bit(PG_dcache_clean, &page->flags); | 886 | unsigned long pfn; |
887 | size_t left = size; | ||
888 | |||
889 | pfn = page_to_pfn(page) + off / PAGE_SIZE; | ||
890 | off %= PAGE_SIZE; | ||
891 | if (off) { | ||
892 | pfn++; | ||
893 | left -= PAGE_SIZE - off; | ||
894 | } | ||
895 | while (left >= PAGE_SIZE) { | ||
896 | page = pfn_to_page(pfn++); | ||
897 | set_bit(PG_dcache_clean, &page->flags); | ||
898 | left -= PAGE_SIZE; | ||
899 | } | ||
900 | } | ||
887 | } | 901 | } |
888 | 902 | ||
889 | /** | 903 | /** |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 0d473cce501c..e4ac5d8278e1 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -287,7 +287,7 @@ void flush_dcache_page(struct page *page) | |||
287 | mapping = page_mapping(page); | 287 | mapping = page_mapping(page); |
288 | 288 | ||
289 | if (!cache_ops_need_broadcast() && | 289 | if (!cache_ops_need_broadcast() && |
290 | mapping && !mapping_mapped(mapping)) | 290 | mapping && !page_mapped(page)) |
291 | clear_bit(PG_dcache_clean, &page->flags); | 291 | clear_bit(PG_dcache_clean, &page->flags); |
292 | else { | 292 | else { |
293 | __flush_dcache_page(mapping, page); | 293 | __flush_dcache_page(mapping, page); |
@@ -301,6 +301,39 @@ void flush_dcache_page(struct page *page) | |||
301 | EXPORT_SYMBOL(flush_dcache_page); | 301 | EXPORT_SYMBOL(flush_dcache_page); |
302 | 302 | ||
303 | /* | 303 | /* |
304 | * Ensure cache coherency for the kernel mapping of this page. We can | ||
305 | * assume that the page is pinned via kmap. | ||
306 | * | ||
307 | * If the page only exists in the page cache and there are no user | ||
308 | * space mappings, this is a no-op since the page was already marked | ||
309 | * dirty at creation. Otherwise, we need to flush the dirty kernel | ||
310 | * cache lines directly. | ||
311 | */ | ||
312 | void flush_kernel_dcache_page(struct page *page) | ||
313 | { | ||
314 | if (cache_is_vivt() || cache_is_vipt_aliasing()) { | ||
315 | struct address_space *mapping; | ||
316 | |||
317 | mapping = page_mapping(page); | ||
318 | |||
319 | if (!mapping || mapping_mapped(mapping)) { | ||
320 | void *addr; | ||
321 | |||
322 | addr = page_address(page); | ||
323 | /* | ||
324 | * kmap_atomic() doesn't set the page virtual | ||
325 | * address for highmem pages, and | ||
326 | * kunmap_atomic() takes care of cache | ||
327 | * flushing already. | ||
328 | */ | ||
329 | if (!IS_ENABLED(CONFIG_HIGHMEM) || addr) | ||
330 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | ||
331 | } | ||
332 | } | ||
333 | } | ||
334 | EXPORT_SYMBOL(flush_kernel_dcache_page); | ||
335 | |||
336 | /* | ||
304 | * Flush an anonymous page so that users of get_user_pages() | 337 | * Flush an anonymous page so that users of get_user_pages() |
305 | * can safely access the data. The expected sequence is: | 338 | * can safely access the data. The expected sequence is: |
306 | * | 339 | * |
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 04d9006eab1f..f123d6eb074b 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -331,10 +331,10 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
331 | return (void __iomem *) (offset + addr); | 331 | return (void __iomem *) (offset + addr); |
332 | } | 332 | } |
333 | 333 | ||
334 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, | 334 | void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, |
335 | unsigned int mtype, void *caller) | 335 | unsigned int mtype, void *caller) |
336 | { | 336 | { |
337 | unsigned long last_addr; | 337 | phys_addr_t last_addr; |
338 | unsigned long offset = phys_addr & ~PAGE_MASK; | 338 | unsigned long offset = phys_addr & ~PAGE_MASK; |
339 | unsigned long pfn = __phys_to_pfn(phys_addr); | 339 | unsigned long pfn = __phys_to_pfn(phys_addr); |
340 | 340 | ||
@@ -367,12 +367,12 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
367 | } | 367 | } |
368 | EXPORT_SYMBOL(__arm_ioremap_pfn); | 368 | EXPORT_SYMBOL(__arm_ioremap_pfn); |
369 | 369 | ||
370 | void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, | 370 | void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, |
371 | unsigned int, void *) = | 371 | unsigned int, void *) = |
372 | __arm_ioremap_caller; | 372 | __arm_ioremap_caller; |
373 | 373 | ||
374 | void __iomem * | 374 | void __iomem * |
375 | __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) | 375 | __arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype) |
376 | { | 376 | { |
377 | return arch_ioremap_caller(phys_addr, size, mtype, | 377 | return arch_ioremap_caller(phys_addr, size, mtype, |
378 | __builtin_return_address(0)); | 378 | __builtin_return_address(0)); |
@@ -387,7 +387,7 @@ EXPORT_SYMBOL(__arm_ioremap); | |||
387 | * CONFIG_GENERIC_ALLOCATOR for allocating external memory. | 387 | * CONFIG_GENERIC_ALLOCATOR for allocating external memory. |
388 | */ | 388 | */ |
389 | void __iomem * | 389 | void __iomem * |
390 | __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) | 390 | __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) |
391 | { | 391 | { |
392 | unsigned int mtype; | 392 | unsigned int mtype; |
393 | 393 | ||
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e0d8565671a6..4d409e6a552d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -616,10 +616,12 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, | |||
616 | } while (pte++, addr += PAGE_SIZE, addr != end); | 616 | } while (pte++, addr += PAGE_SIZE, addr != end); |
617 | } | 617 | } |
618 | 618 | ||
619 | static void __init map_init_section(pmd_t *pmd, unsigned long addr, | 619 | static void __init __map_init_section(pmd_t *pmd, unsigned long addr, |
620 | unsigned long end, phys_addr_t phys, | 620 | unsigned long end, phys_addr_t phys, |
621 | const struct mem_type *type) | 621 | const struct mem_type *type) |
622 | { | 622 | { |
623 | pmd_t *p = pmd; | ||
624 | |||
623 | #ifndef CONFIG_ARM_LPAE | 625 | #ifndef CONFIG_ARM_LPAE |
624 | /* | 626 | /* |
625 | * In classic MMU format, puds and pmds are folded in to | 627 | * In classic MMU format, puds and pmds are folded in to |
@@ -638,7 +640,7 @@ static void __init map_init_section(pmd_t *pmd, unsigned long addr, | |||
638 | phys += SECTION_SIZE; | 640 | phys += SECTION_SIZE; |
639 | } while (pmd++, addr += SECTION_SIZE, addr != end); | 641 | } while (pmd++, addr += SECTION_SIZE, addr != end); |
640 | 642 | ||
641 | flush_pmd_entry(pmd); | 643 | flush_pmd_entry(p); |
642 | } | 644 | } |
643 | 645 | ||
644 | static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, | 646 | static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, |
@@ -661,7 +663,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr, | |||
661 | */ | 663 | */ |
662 | if (type->prot_sect && | 664 | if (type->prot_sect && |
663 | ((addr | next | phys) & ~SECTION_MASK) == 0) { | 665 | ((addr | next | phys) & ~SECTION_MASK) == 0) { |
664 | map_init_section(pmd, addr, next, phys, type); | 666 | __map_init_section(pmd, addr, next, phys, type); |
665 | } else { | 667 | } else { |
666 | alloc_init_pte(pmd, addr, next, | 668 | alloc_init_pte(pmd, addr, next, |
667 | __phys_to_pfn(phys), type); | 669 | __phys_to_pfn(phys), type); |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index d51225f90ae2..7fe0524a5449 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -57,6 +57,12 @@ void flush_dcache_page(struct page *page) | |||
57 | } | 57 | } |
58 | EXPORT_SYMBOL(flush_dcache_page); | 58 | EXPORT_SYMBOL(flush_dcache_page); |
59 | 59 | ||
60 | void flush_kernel_dcache_page(struct page *page) | ||
61 | { | ||
62 | __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); | ||
63 | } | ||
64 | EXPORT_SYMBOL(flush_kernel_dcache_page); | ||
65 | |||
60 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | 66 | void copy_to_user_page(struct vm_area_struct *vma, struct page *page, |
61 | unsigned long uaddr, void *dst, const void *src, | 67 | unsigned long uaddr, void *dst, const void *src, |
62 | unsigned long len) | 68 | unsigned long len) |
@@ -81,16 +87,16 @@ void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, | |||
81 | return __arm_ioremap_pfn(pfn, offset, size, mtype); | 87 | return __arm_ioremap_pfn(pfn, offset, size, mtype); |
82 | } | 88 | } |
83 | 89 | ||
84 | void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, | 90 | void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size, |
85 | unsigned int mtype) | 91 | unsigned int mtype) |
86 | { | 92 | { |
87 | return (void __iomem *)phys_addr; | 93 | return (void __iomem *)phys_addr; |
88 | } | 94 | } |
89 | EXPORT_SYMBOL(__arm_ioremap); | 95 | EXPORT_SYMBOL(__arm_ioremap); |
90 | 96 | ||
91 | void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *); | 97 | void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *); |
92 | 98 | ||
93 | void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, | 99 | void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size, |
94 | unsigned int mtype, void *caller) | 100 | unsigned int mtype, void *caller) |
95 | { | 101 | { |
96 | return __arm_ioremap(phys_addr, size, mtype); | 102 | return __arm_ioremap(phys_addr, size, mtype); |
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index d217e9795d74..aaeb6c127c7a 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S | |||
@@ -81,7 +81,6 @@ ENDPROC(cpu_fa526_reset) | |||
81 | */ | 81 | */ |
82 | .align 4 | 82 | .align 4 |
83 | ENTRY(cpu_fa526_do_idle) | 83 | ENTRY(cpu_fa526_do_idle) |
84 | mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt | ||
85 | mov pc, lr | 84 | mov pc, lr |
86 | 85 | ||
87 | 86 | ||
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index f9a0aa725ea9..e3c48a3fe063 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S | |||
@@ -333,3 +333,8 @@ ENTRY(\name\()_tlb_fns) | |||
333 | .endif | 333 | .endif |
334 | .size \name\()_tlb_fns, . - \name\()_tlb_fns | 334 | .size \name\()_tlb_fns, . - \name\()_tlb_fns |
335 | .endm | 335 | .endm |
336 | |||
337 | .macro globl_equ x, y | ||
338 | .globl \x | ||
339 | .equ \x, \y | ||
340 | .endm | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 2c73a7301ff7..e35fec34453e 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -140,6 +140,29 @@ ENTRY(cpu_v7_do_resume) | |||
140 | ENDPROC(cpu_v7_do_resume) | 140 | ENDPROC(cpu_v7_do_resume) |
141 | #endif | 141 | #endif |
142 | 142 | ||
143 | #ifdef CONFIG_CPU_PJ4B | ||
144 | globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm | ||
145 | globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext | ||
146 | globl_equ cpu_pj4b_proc_init, cpu_v7_proc_init | ||
147 | globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin | ||
148 | globl_equ cpu_pj4b_reset, cpu_v7_reset | ||
149 | #ifdef CONFIG_PJ4B_ERRATA_4742 | ||
150 | ENTRY(cpu_pj4b_do_idle) | ||
151 | dsb @ WFI may enter a low-power mode | ||
152 | wfi | ||
153 | dsb @barrier | ||
154 | mov pc, lr | ||
155 | ENDPROC(cpu_pj4b_do_idle) | ||
156 | #else | ||
157 | globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle | ||
158 | #endif | ||
159 | globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area | ||
160 | globl_equ cpu_pj4b_do_suspend, cpu_v7_do_suspend | ||
161 | globl_equ cpu_pj4b_do_resume, cpu_v7_do_resume | ||
162 | globl_equ cpu_pj4b_suspend_size, cpu_v7_suspend_size | ||
163 | |||
164 | #endif | ||
165 | |||
143 | __CPUINIT | 166 | __CPUINIT |
144 | 167 | ||
145 | /* | 168 | /* |
@@ -350,6 +373,9 @@ __v7_setup_stack: | |||
350 | 373 | ||
351 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) | 374 | @ define struct processor (see <asm/proc-fns.h> and proc-macros.S) |
352 | define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 | 375 | define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 |
376 | #ifdef CONFIG_CPU_PJ4B | ||
377 | define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1 | ||
378 | #endif | ||
353 | 379 | ||
354 | .section ".rodata" | 380 | .section ".rodata" |
355 | 381 | ||
@@ -362,7 +388,7 @@ __v7_setup_stack: | |||
362 | /* | 388 | /* |
363 | * Standard v7 proc info content | 389 | * Standard v7 proc info content |
364 | */ | 390 | */ |
365 | .macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 | 391 | .macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions |
366 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ | 392 | ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ |
367 | PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) | 393 | PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) |
368 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ | 394 | ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ |
@@ -375,7 +401,7 @@ __v7_setup_stack: | |||
375 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ | 401 | .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \ |
376 | HWCAP_EDSP | HWCAP_TLS | \hwcaps | 402 | HWCAP_EDSP | HWCAP_TLS | \hwcaps |
377 | .long cpu_v7_name | 403 | .long cpu_v7_name |
378 | .long v7_processor_functions | 404 | .long \proc_fns |
379 | .long v7wbi_tlb_fns | 405 | .long v7wbi_tlb_fns |
380 | .long v6_user_fns | 406 | .long v6_user_fns |
381 | .long v7_cache_fns | 407 | .long v7_cache_fns |
@@ -407,12 +433,14 @@ __v7_ca9mp_proc_info: | |||
407 | /* | 433 | /* |
408 | * Marvell PJ4B processor. | 434 | * Marvell PJ4B processor. |
409 | */ | 435 | */ |
436 | #ifdef CONFIG_CPU_PJ4B | ||
410 | .type __v7_pj4b_proc_info, #object | 437 | .type __v7_pj4b_proc_info, #object |
411 | __v7_pj4b_proc_info: | 438 | __v7_pj4b_proc_info: |
412 | .long 0x562f5840 | 439 | .long 0x560f5800 |
413 | .long 0xfffffff0 | 440 | .long 0xff0fff00 |
414 | __v7_proc __v7_pj4b_setup | 441 | __v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions |
415 | .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info | 442 | .size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info |
443 | #endif | ||
416 | 444 | ||
417 | /* | 445 | /* |
418 | * ARM Ltd. Cortex A7 processor. | 446 | * ARM Ltd. Cortex A7 processor. |
diff --git a/arch/arm/plat-samsung/include/plat/uncompress.h b/arch/arm/plat-samsung/include/plat/uncompress.h index 438b24846e7f..02b66d723d1a 100644 --- a/arch/arm/plat-samsung/include/plat/uncompress.h +++ b/arch/arm/plat-samsung/include/plat/uncompress.h | |||
@@ -66,6 +66,9 @@ uart_rd(unsigned int reg) | |||
66 | 66 | ||
67 | static void putc(int ch) | 67 | static void putc(int ch) |
68 | { | 68 | { |
69 | if (!config_enabled(CONFIG_DEBUG_LL)) | ||
70 | return; | ||
71 | |||
69 | if (uart_rd(S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE) { | 72 | if (uart_rd(S3C2410_UFCON) & S3C2410_UFCON_FIFOMODE) { |
70 | int level; | 73 | int level; |
71 | 74 | ||
@@ -118,7 +121,12 @@ static void arch_decomp_error(const char *x) | |||
118 | #ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO | 121 | #ifdef CONFIG_S3C_BOOT_UART_FORCE_FIFO |
119 | static inline void arch_enable_uart_fifo(void) | 122 | static inline void arch_enable_uart_fifo(void) |
120 | { | 123 | { |
121 | u32 fifocon = uart_rd(S3C2410_UFCON); | 124 | u32 fifocon; |
125 | |||
126 | if (!config_enabled(CONFIG_DEBUG_LL)) | ||
127 | return; | ||
128 | |||
129 | fifocon = uart_rd(S3C2410_UFCON); | ||
122 | 130 | ||
123 | if (!(fifocon & S3C2410_UFCON_FIFOMODE)) { | 131 | if (!(fifocon & S3C2410_UFCON_FIFOMODE)) { |
124 | fifocon |= S3C2410_UFCON_RESETBOTH; | 132 | fifocon |= S3C2410_UFCON_RESETBOTH; |
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c index 53210ec4e8ec..bd7124c87fea 100644 --- a/arch/arm/plat-samsung/pm.c +++ b/arch/arm/plat-samsung/pm.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/suspend.h> | 16 | #include <linux/suspend.h> |
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/of.h> | ||
19 | #include <linux/serial_core.h> | 20 | #include <linux/serial_core.h> |
20 | #include <linux/io.h> | 21 | #include <linux/io.h> |
21 | 22 | ||
@@ -261,7 +262,8 @@ static int s3c_pm_enter(suspend_state_t state) | |||
261 | * require a full power-cycle) | 262 | * require a full power-cycle) |
262 | */ | 263 | */ |
263 | 264 | ||
264 | if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) && | 265 | if (!of_have_populated_dt() && |
266 | !any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) && | ||
265 | !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) { | 267 | !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) { |
266 | printk(KERN_ERR "%s: No wake-up sources!\n", __func__); | 268 | printk(KERN_ERR "%s: No wake-up sources!\n", __func__); |
267 | printk(KERN_ERR "%s: Aborting sleep\n", __func__); | 269 | printk(KERN_ERR "%s: Aborting sleep\n", __func__); |
@@ -270,8 +272,11 @@ static int s3c_pm_enter(suspend_state_t state) | |||
270 | 272 | ||
271 | /* save all necessary core registers not covered by the drivers */ | 273 | /* save all necessary core registers not covered by the drivers */ |
272 | 274 | ||
273 | samsung_pm_save_gpios(); | 275 | if (!of_have_populated_dt()) { |
274 | samsung_pm_saved_gpios(); | 276 | samsung_pm_save_gpios(); |
277 | samsung_pm_saved_gpios(); | ||
278 | } | ||
279 | |||
275 | s3c_pm_save_uarts(); | 280 | s3c_pm_save_uarts(); |
276 | s3c_pm_save_core(); | 281 | s3c_pm_save_core(); |
277 | 282 | ||
@@ -310,8 +315,11 @@ static int s3c_pm_enter(suspend_state_t state) | |||
310 | 315 | ||
311 | s3c_pm_restore_core(); | 316 | s3c_pm_restore_core(); |
312 | s3c_pm_restore_uarts(); | 317 | s3c_pm_restore_uarts(); |
313 | samsung_pm_restore_gpios(); | 318 | |
314 | s3c_pm_restored_gpios(); | 319 | if (!of_have_populated_dt()) { |
320 | samsung_pm_restore_gpios(); | ||
321 | s3c_pm_restored_gpios(); | ||
322 | } | ||
315 | 323 | ||
316 | s3c_pm_debug_init(); | 324 | s3c_pm_debug_init(); |
317 | 325 | ||
diff --git a/arch/arm/plat-versatile/headsmp.S b/arch/arm/plat-versatile/headsmp.S index b178d44e9eaa..2677bc3762d7 100644 --- a/arch/arm/plat-versatile/headsmp.S +++ b/arch/arm/plat-versatile/headsmp.S | |||
@@ -11,8 +11,6 @@ | |||
11 | #include <linux/linkage.h> | 11 | #include <linux/linkage.h> |
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | 13 | ||
14 | __INIT | ||
15 | |||
16 | /* | 14 | /* |
17 | * Realview/Versatile Express specific entry point for secondary CPUs. | 15 | * Realview/Versatile Express specific entry point for secondary CPUs. |
18 | * This provides a "holding pen" into which all secondary cores are held | 16 | * This provides a "holding pen" into which all secondary cores are held |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 1e49e5eb81e9..9ba33c40cdf8 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -1336,6 +1336,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry, | |||
1336 | return; | 1336 | return; |
1337 | } | 1337 | } |
1338 | 1338 | ||
1339 | perf_callchain_store(entry, regs->pc); | ||
1339 | tail = (struct frame_tail __user *)regs->regs[29]; | 1340 | tail = (struct frame_tail __user *)regs->regs[29]; |
1340 | 1341 | ||
1341 | while (entry->nr < PERF_MAX_STACK_DEPTH && | 1342 | while (entry->nr < PERF_MAX_STACK_DEPTH && |
diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h index 1bf2cf2f4ab4..cec6c06b52c0 100644 --- a/arch/ia64/include/asm/irqflags.h +++ b/arch/ia64/include/asm/irqflags.h | |||
@@ -11,6 +11,7 @@ | |||
11 | #define _ASM_IA64_IRQFLAGS_H | 11 | #define _ASM_IA64_IRQFLAGS_H |
12 | 12 | ||
13 | #include <asm/pal.h> | 13 | #include <asm/pal.h> |
14 | #include <asm/kregs.h> | ||
14 | 15 | ||
15 | #ifdef CONFIG_IA64_DEBUG_IRQ | 16 | #ifdef CONFIG_IA64_DEBUG_IRQ |
16 | extern unsigned long last_cli_ip; | 17 | extern unsigned long last_cli_ip; |
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index c3ffe3e54edc..ef3a9de01954 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h | |||
@@ -46,12 +46,6 @@ | |||
46 | #include <asm/tlbflush.h> | 46 | #include <asm/tlbflush.h> |
47 | #include <asm/machvec.h> | 47 | #include <asm/machvec.h> |
48 | 48 | ||
49 | #ifdef CONFIG_SMP | ||
50 | # define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) | ||
51 | #else | ||
52 | # define tlb_fast_mode(tlb) (1) | ||
53 | #endif | ||
54 | |||
55 | /* | 49 | /* |
56 | * If we can't allocate a page to make a big batch of page pointers | 50 | * If we can't allocate a page to make a big batch of page pointers |
57 | * to work on, then just handle a few from the on-stack structure. | 51 | * to work on, then just handle a few from the on-stack structure. |
@@ -60,7 +54,7 @@ | |||
60 | 54 | ||
61 | struct mmu_gather { | 55 | struct mmu_gather { |
62 | struct mm_struct *mm; | 56 | struct mm_struct *mm; |
63 | unsigned int nr; /* == ~0U => fast mode */ | 57 | unsigned int nr; |
64 | unsigned int max; | 58 | unsigned int max; |
65 | unsigned char fullmm; /* non-zero means full mm flush */ | 59 | unsigned char fullmm; /* non-zero means full mm flush */ |
66 | unsigned char need_flush; /* really unmapped some PTEs? */ | 60 | unsigned char need_flush; /* really unmapped some PTEs? */ |
@@ -103,6 +97,7 @@ extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS]; | |||
103 | static inline void | 97 | static inline void |
104 | ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) | 98 | ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long end) |
105 | { | 99 | { |
100 | unsigned long i; | ||
106 | unsigned int nr; | 101 | unsigned int nr; |
107 | 102 | ||
108 | if (!tlb->need_flush) | 103 | if (!tlb->need_flush) |
@@ -141,13 +136,11 @@ ia64_tlb_flush_mmu (struct mmu_gather *tlb, unsigned long start, unsigned long e | |||
141 | 136 | ||
142 | /* lastly, release the freed pages */ | 137 | /* lastly, release the freed pages */ |
143 | nr = tlb->nr; | 138 | nr = tlb->nr; |
144 | if (!tlb_fast_mode(tlb)) { | 139 | |
145 | unsigned long i; | 140 | tlb->nr = 0; |
146 | tlb->nr = 0; | 141 | tlb->start_addr = ~0UL; |
147 | tlb->start_addr = ~0UL; | 142 | for (i = 0; i < nr; ++i) |
148 | for (i = 0; i < nr; ++i) | 143 | free_page_and_swap_cache(tlb->pages[i]); |
149 | free_page_and_swap_cache(tlb->pages[i]); | ||
150 | } | ||
151 | } | 144 | } |
152 | 145 | ||
153 | static inline void __tlb_alloc_page(struct mmu_gather *tlb) | 146 | static inline void __tlb_alloc_page(struct mmu_gather *tlb) |
@@ -167,20 +160,7 @@ tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_m | |||
167 | tlb->mm = mm; | 160 | tlb->mm = mm; |
168 | tlb->max = ARRAY_SIZE(tlb->local); | 161 | tlb->max = ARRAY_SIZE(tlb->local); |
169 | tlb->pages = tlb->local; | 162 | tlb->pages = tlb->local; |
170 | /* | 163 | tlb->nr = 0; |
171 | * Use fast mode if only 1 CPU is online. | ||
172 | * | ||
173 | * It would be tempting to turn on fast-mode for full_mm_flush as well. But this | ||
174 | * doesn't work because of speculative accesses and software prefetching: the page | ||
175 | * table of "mm" may (and usually is) the currently active page table and even | ||
176 | * though the kernel won't do any user-space accesses during the TLB shoot down, a | ||
177 | * compiler might use speculation or lfetch.fault on what happens to be a valid | ||
178 | * user-space address. This in turn could trigger a TLB miss fault (or a VHPT | ||
179 | * walk) and re-insert a TLB entry we just removed. Slow mode avoids such | ||
180 | * problems. (We could make fast-mode work by switching the current task to a | ||
181 | * different "mm" during the shootdown.) --davidm 08/02/2002 | ||
182 | */ | ||
183 | tlb->nr = (num_online_cpus() == 1) ? ~0U : 0; | ||
184 | tlb->fullmm = full_mm_flush; | 164 | tlb->fullmm = full_mm_flush; |
185 | tlb->start_addr = ~0UL; | 165 | tlb->start_addr = ~0UL; |
186 | } | 166 | } |
@@ -214,11 +194,6 @@ static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page) | |||
214 | { | 194 | { |
215 | tlb->need_flush = 1; | 195 | tlb->need_flush = 1; |
216 | 196 | ||
217 | if (tlb_fast_mode(tlb)) { | ||
218 | free_page_and_swap_cache(page); | ||
219 | return 1; /* avoid calling tlb_flush_mmu */ | ||
220 | } | ||
221 | |||
222 | if (!tlb->nr && tlb->pages == tlb->local) | 197 | if (!tlb->nr && tlb->pages == tlb->local) |
223 | __tlb_alloc_page(tlb); | 198 | __tlb_alloc_page(tlb); |
224 | 199 | ||
diff --git a/arch/m68k/include/asm/gpio.h b/arch/m68k/include/asm/gpio.h index 8cc83431805b..2f6eec1e34b4 100644 --- a/arch/m68k/include/asm/gpio.h +++ b/arch/m68k/include/asm/gpio.h | |||
@@ -86,6 +86,7 @@ static inline int gpio_cansleep(unsigned gpio) | |||
86 | return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); | 86 | return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio); |
87 | } | 87 | } |
88 | 88 | ||
89 | #ifndef CONFIG_GPIOLIB | ||
89 | static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) | 90 | static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label) |
90 | { | 91 | { |
91 | int err; | 92 | int err; |
@@ -105,5 +106,5 @@ static inline int gpio_request_one(unsigned gpio, unsigned long flags, const cha | |||
105 | 106 | ||
106 | return err; | 107 | return err; |
107 | } | 108 | } |
108 | 109 | #endif /* !CONFIG_GPIOLIB */ | |
109 | #endif | 110 | #endif |
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S index d197e7ff62c5..ac85f16534af 100644 --- a/arch/m68k/kernel/head.S +++ b/arch/m68k/kernel/head.S | |||
@@ -2752,11 +2752,9 @@ func_return get_new_page | |||
2752 | #ifdef CONFIG_MAC | 2752 | #ifdef CONFIG_MAC |
2753 | 2753 | ||
2754 | L(scc_initable_mac): | 2754 | L(scc_initable_mac): |
2755 | .byte 9,12 /* Reset */ | ||
2756 | .byte 4,0x44 /* x16, 1 stopbit, no parity */ | 2755 | .byte 4,0x44 /* x16, 1 stopbit, no parity */ |
2757 | .byte 3,0xc0 /* receiver: 8 bpc */ | 2756 | .byte 3,0xc0 /* receiver: 8 bpc */ |
2758 | .byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */ | 2757 | .byte 5,0xe2 /* transmitter: 8 bpc, assert dtr/rts */ |
2759 | .byte 9,0 /* no interrupts */ | ||
2760 | .byte 10,0 /* NRZ */ | 2758 | .byte 10,0 /* NRZ */ |
2761 | .byte 11,0x50 /* use baud rate generator */ | 2759 | .byte 11,0x50 /* use baud rate generator */ |
2762 | .byte 12,1,13,0 /* 38400 baud */ | 2760 | .byte 12,1,13,0 /* 38400 baud */ |
@@ -2899,6 +2897,7 @@ func_start serial_init,%d0/%d1/%a0/%a1 | |||
2899 | is_not_mac(L(serial_init_not_mac)) | 2897 | is_not_mac(L(serial_init_not_mac)) |
2900 | 2898 | ||
2901 | #ifdef SERIAL_DEBUG | 2899 | #ifdef SERIAL_DEBUG |
2900 | |||
2902 | /* You may define either or both of these. */ | 2901 | /* You may define either or both of these. */ |
2903 | #define MAC_USE_SCC_A /* Modem port */ | 2902 | #define MAC_USE_SCC_A /* Modem port */ |
2904 | #define MAC_USE_SCC_B /* Printer port */ | 2903 | #define MAC_USE_SCC_B /* Printer port */ |
@@ -2908,9 +2907,21 @@ func_start serial_init,%d0/%d1/%a0/%a1 | |||
2908 | #define mac_scc_cha_b_data_offset 0x4 | 2907 | #define mac_scc_cha_b_data_offset 0x4 |
2909 | #define mac_scc_cha_a_data_offset 0x6 | 2908 | #define mac_scc_cha_a_data_offset 0x6 |
2910 | 2909 | ||
2910 | #if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) | ||
2911 | movel %pc@(L(mac_sccbase)),%a0 | ||
2912 | /* Reset SCC device */ | ||
2913 | moveb #9,%a0@(mac_scc_cha_a_ctrl_offset) | ||
2914 | moveb #0xc0,%a0@(mac_scc_cha_a_ctrl_offset) | ||
2915 | /* Wait for 5 PCLK cycles, which is about 68 CPU cycles */ | ||
2916 | /* 5 / 3.6864 MHz = approx. 1.36 us = 68 / 50 MHz */ | ||
2917 | movel #35,%d0 | ||
2918 | 5: | ||
2919 | subq #1,%d0 | ||
2920 | jne 5b | ||
2921 | #endif | ||
2922 | |||
2911 | #ifdef MAC_USE_SCC_A | 2923 | #ifdef MAC_USE_SCC_A |
2912 | /* Initialize channel A */ | 2924 | /* Initialize channel A */ |
2913 | movel %pc@(L(mac_sccbase)),%a0 | ||
2914 | lea %pc@(L(scc_initable_mac)),%a1 | 2925 | lea %pc@(L(scc_initable_mac)),%a1 |
2915 | 5: moveb %a1@+,%d0 | 2926 | 5: moveb %a1@+,%d0 |
2916 | jmi 6f | 2927 | jmi 6f |
@@ -2922,9 +2933,6 @@ func_start serial_init,%d0/%d1/%a0/%a1 | |||
2922 | 2933 | ||
2923 | #ifdef MAC_USE_SCC_B | 2934 | #ifdef MAC_USE_SCC_B |
2924 | /* Initialize channel B */ | 2935 | /* Initialize channel B */ |
2925 | #ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */ | ||
2926 | movel %pc@(L(mac_sccbase)),%a0 | ||
2927 | #endif /* MAC_USE_SCC_A */ | ||
2928 | lea %pc@(L(scc_initable_mac)),%a1 | 2936 | lea %pc@(L(scc_initable_mac)),%a1 |
2929 | 7: moveb %a1@+,%d0 | 2937 | 7: moveb %a1@+,%d0 |
2930 | jmi 8f | 2938 | jmi 8f |
@@ -2933,6 +2941,7 @@ func_start serial_init,%d0/%d1/%a0/%a1 | |||
2933 | jra 7b | 2941 | jra 7b |
2934 | 8: | 2942 | 8: |
2935 | #endif /* MAC_USE_SCC_B */ | 2943 | #endif /* MAC_USE_SCC_B */ |
2944 | |||
2936 | #endif /* SERIAL_DEBUG */ | 2945 | #endif /* SERIAL_DEBUG */ |
2937 | 2946 | ||
2938 | jra L(serial_init_done) | 2947 | jra L(serial_init_done) |
@@ -3006,17 +3015,17 @@ func_start serial_putc,%d0/%d1/%a0/%a1 | |||
3006 | 3015 | ||
3007 | #ifdef SERIAL_DEBUG | 3016 | #ifdef SERIAL_DEBUG |
3008 | 3017 | ||
3009 | #ifdef MAC_USE_SCC_A | 3018 | #if defined(MAC_USE_SCC_A) || defined(MAC_USE_SCC_B) |
3010 | movel %pc@(L(mac_sccbase)),%a1 | 3019 | movel %pc@(L(mac_sccbase)),%a1 |
3020 | #endif | ||
3021 | |||
3022 | #ifdef MAC_USE_SCC_A | ||
3011 | 3: btst #2,%a1@(mac_scc_cha_a_ctrl_offset) | 3023 | 3: btst #2,%a1@(mac_scc_cha_a_ctrl_offset) |
3012 | jeq 3b | 3024 | jeq 3b |
3013 | moveb %d0,%a1@(mac_scc_cha_a_data_offset) | 3025 | moveb %d0,%a1@(mac_scc_cha_a_data_offset) |
3014 | #endif /* MAC_USE_SCC_A */ | 3026 | #endif /* MAC_USE_SCC_A */ |
3015 | 3027 | ||
3016 | #ifdef MAC_USE_SCC_B | 3028 | #ifdef MAC_USE_SCC_B |
3017 | #ifndef MAC_USE_SCC_A /* Load mac_sccbase only if needed */ | ||
3018 | movel %pc@(L(mac_sccbase)),%a1 | ||
3019 | #endif /* MAC_USE_SCC_A */ | ||
3020 | 4: btst #2,%a1@(mac_scc_cha_b_ctrl_offset) | 3029 | 4: btst #2,%a1@(mac_scc_cha_b_ctrl_offset) |
3021 | jeq 4b | 3030 | jeq 4b |
3022 | moveb %d0,%a1@(mac_scc_cha_b_data_offset) | 3031 | moveb %d0,%a1@(mac_scc_cha_b_data_offset) |
diff --git a/arch/metag/include/asm/hugetlb.h b/arch/metag/include/asm/hugetlb.h index f545477e61f3..471f481e67f3 100644 --- a/arch/metag/include/asm/hugetlb.h +++ b/arch/metag/include/asm/hugetlb.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_METAG_HUGETLB_H | 2 | #define _ASM_METAG_HUGETLB_H |
3 | 3 | ||
4 | #include <asm/page.h> | 4 | #include <asm/page.h> |
5 | #include <asm-generic/hugetlb.h> | ||
5 | 6 | ||
6 | 7 | ||
7 | static inline int is_hugepage_only_range(struct mm_struct *mm, | 8 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
diff --git a/arch/microblaze/include/asm/cacheflush.h b/arch/microblaze/include/asm/cacheflush.h index 0f553bc009a0..ffea82a16d2c 100644 --- a/arch/microblaze/include/asm/cacheflush.h +++ b/arch/microblaze/include/asm/cacheflush.h | |||
@@ -102,21 +102,23 @@ do { \ | |||
102 | 102 | ||
103 | #define flush_cache_range(vma, start, len) do { } while (0) | 103 | #define flush_cache_range(vma, start, len) do { } while (0) |
104 | 104 | ||
105 | #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ | 105 | static inline void copy_to_user_page(struct vm_area_struct *vma, |
106 | do { \ | 106 | struct page *page, unsigned long vaddr, |
107 | u32 addr = virt_to_phys(dst); \ | 107 | void *dst, void *src, int len) |
108 | memcpy((dst), (src), (len)); \ | 108 | { |
109 | if (vma->vm_flags & VM_EXEC) { \ | 109 | u32 addr = virt_to_phys(dst); |
110 | invalidate_icache_range((unsigned) (addr), \ | 110 | memcpy(dst, src, len); |
111 | (unsigned) (addr) + PAGE_SIZE); \ | 111 | if (vma->vm_flags & VM_EXEC) { |
112 | flush_dcache_range((unsigned) (addr), \ | 112 | invalidate_icache_range(addr, addr + PAGE_SIZE); |
113 | (unsigned) (addr) + PAGE_SIZE); \ | 113 | flush_dcache_range(addr, addr + PAGE_SIZE); |
114 | } \ | 114 | } |
115 | } while (0) | 115 | } |
116 | 116 | ||
117 | #define copy_from_user_page(vma, page, vaddr, dst, src, len) \ | 117 | static inline void copy_from_user_page(struct vm_area_struct *vma, |
118 | do { \ | 118 | struct page *page, unsigned long vaddr, |
119 | memcpy((dst), (src), (len)); \ | 119 | void *dst, void *src, int len) |
120 | } while (0) | 120 | { |
121 | memcpy(dst, src, len); | ||
122 | } | ||
121 | 123 | ||
122 | #endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */ | 124 | #endif /* _ASM_MICROBLAZE_CACHEFLUSH_H */ |
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index efe59d881789..04e49553bdf9 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h | |||
@@ -99,13 +99,13 @@ static inline int access_ok(int type, const void __user *addr, | |||
99 | if ((get_fs().seg < ((unsigned long)addr)) || | 99 | if ((get_fs().seg < ((unsigned long)addr)) || |
100 | (get_fs().seg < ((unsigned long)addr + size - 1))) { | 100 | (get_fs().seg < ((unsigned long)addr + size - 1))) { |
101 | pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", | 101 | pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", |
102 | type ? "WRITE" : "READ ", (u32)addr, (u32)size, | 102 | type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, |
103 | (u32)get_fs().seg); | 103 | (u32)get_fs().seg); |
104 | return 0; | 104 | return 0; |
105 | } | 105 | } |
106 | ok: | 106 | ok: |
107 | pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", | 107 | pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", |
108 | type ? "WRITE" : "READ ", (u32)addr, (u32)size, | 108 | type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, |
109 | (u32)get_fs().seg); | 109 | (u32)get_fs().seg); |
110 | return 1; | 110 | return 1; |
111 | } | 111 | } |
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index b0baa299f899..01b1b3f94feb 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c | |||
@@ -428,13 +428,16 @@ static void octeon_restart(char *command) | |||
428 | */ | 428 | */ |
429 | static void octeon_kill_core(void *arg) | 429 | static void octeon_kill_core(void *arg) |
430 | { | 430 | { |
431 | mb(); | 431 | if (octeon_is_simulation()) |
432 | if (octeon_is_simulation()) { | ||
433 | /* The simulator needs the watchdog to stop for dead cores */ | ||
434 | cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); | ||
435 | /* A break instruction causes the simulator stop a core */ | 432 | /* A break instruction causes the simulator stop a core */ |
436 | asm volatile ("sync\nbreak"); | 433 | asm volatile ("break" ::: "memory"); |
437 | } | 434 | |
435 | local_irq_disable(); | ||
436 | /* Disable watchdog on this core. */ | ||
437 | cvmx_write_csr(CVMX_CIU_WDOGX(cvmx_get_core_num()), 0); | ||
438 | /* Spin in a low power mode. */ | ||
439 | while (true) | ||
440 | asm volatile ("wait" ::: "memory"); | ||
438 | } | 441 | } |
439 | 442 | ||
440 | 443 | ||
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 143875c6c95a..4d6fa0bf1305 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
@@ -496,10 +496,6 @@ struct kvm_mips_callbacks { | |||
496 | uint32_t cause); | 496 | uint32_t cause); |
497 | int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority, | 497 | int (*irq_clear) (struct kvm_vcpu *vcpu, unsigned int priority, |
498 | uint32_t cause); | 498 | uint32_t cause); |
499 | int (*vcpu_ioctl_get_regs) (struct kvm_vcpu *vcpu, | ||
500 | struct kvm_regs *regs); | ||
501 | int (*vcpu_ioctl_set_regs) (struct kvm_vcpu *vcpu, | ||
502 | struct kvm_regs *regs); | ||
503 | }; | 499 | }; |
504 | extern struct kvm_mips_callbacks *kvm_mips_callbacks; | 500 | extern struct kvm_mips_callbacks *kvm_mips_callbacks; |
505 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); | 501 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); |
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 820116067c10..516e6e9a5594 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h | |||
@@ -117,7 +117,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
117 | if (! ((asid += ASID_INC) & ASID_MASK) ) { | 117 | if (! ((asid += ASID_INC) & ASID_MASK) ) { |
118 | if (cpu_has_vtag_icache) | 118 | if (cpu_has_vtag_icache) |
119 | flush_icache_all(); | 119 | flush_icache_all(); |
120 | #ifdef CONFIG_VIRTUALIZATION | 120 | #ifdef CONFIG_KVM |
121 | kvm_local_flush_tlb_all(); /* start new asid cycle */ | 121 | kvm_local_flush_tlb_all(); /* start new asid cycle */ |
122 | #else | 122 | #else |
123 | local_flush_tlb_all(); /* start new asid cycle */ | 123 | local_flush_tlb_all(); /* start new asid cycle */ |
diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index a3186f2bb8a0..5e6cd0947393 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h | |||
@@ -16,6 +16,38 @@ | |||
16 | #include <asm/isadep.h> | 16 | #include <asm/isadep.h> |
17 | #include <uapi/asm/ptrace.h> | 17 | #include <uapi/asm/ptrace.h> |
18 | 18 | ||
19 | /* | ||
20 | * This struct defines the way the registers are stored on the stack during a | ||
21 | * system call/exception. As usual the registers k0/k1 aren't being saved. | ||
22 | */ | ||
23 | struct pt_regs { | ||
24 | #ifdef CONFIG_32BIT | ||
25 | /* Pad bytes for argument save space on the stack. */ | ||
26 | unsigned long pad0[6]; | ||
27 | #endif | ||
28 | |||
29 | /* Saved main processor registers. */ | ||
30 | unsigned long regs[32]; | ||
31 | |||
32 | /* Saved special registers. */ | ||
33 | unsigned long cp0_status; | ||
34 | unsigned long hi; | ||
35 | unsigned long lo; | ||
36 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | ||
37 | unsigned long acx; | ||
38 | #endif | ||
39 | unsigned long cp0_badvaddr; | ||
40 | unsigned long cp0_cause; | ||
41 | unsigned long cp0_epc; | ||
42 | #ifdef CONFIG_MIPS_MT_SMTC | ||
43 | unsigned long cp0_tcstatus; | ||
44 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
45 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
46 | unsigned long long mpl[3]; /* MTM{0,1,2} */ | ||
47 | unsigned long long mtp[3]; /* MTP{0,1,2} */ | ||
48 | #endif | ||
49 | } __aligned(8); | ||
50 | |||
19 | struct task_struct; | 51 | struct task_struct; |
20 | 52 | ||
21 | extern int ptrace_getregs(struct task_struct *child, __s64 __user *data); | 53 | extern int ptrace_getregs(struct task_struct *child, __s64 __user *data); |
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h index 85789eacbf18..f09ff5ae2059 100644 --- a/arch/mips/include/uapi/asm/kvm.h +++ b/arch/mips/include/uapi/asm/kvm.h | |||
@@ -1,55 +1,135 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
7 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 7 | * Copyright (C) 2013 Cavium, Inc. |
8 | */ | 8 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
9 | */ | ||
9 | 10 | ||
10 | #ifndef __LINUX_KVM_MIPS_H | 11 | #ifndef __LINUX_KVM_MIPS_H |
11 | #define __LINUX_KVM_MIPS_H | 12 | #define __LINUX_KVM_MIPS_H |
12 | 13 | ||
13 | #include <linux/types.h> | 14 | #include <linux/types.h> |
14 | 15 | ||
15 | #define __KVM_MIPS | 16 | /* |
16 | 17 | * KVM MIPS specific structures and definitions. | |
17 | #define N_MIPS_COPROC_REGS 32 | 18 | * |
18 | #define N_MIPS_COPROC_SEL 8 | 19 | * Some parts derived from the x86 version of this file. |
20 | */ | ||
19 | 21 | ||
20 | /* for KVM_GET_REGS and KVM_SET_REGS */ | 22 | /* |
23 | * for KVM_GET_REGS and KVM_SET_REGS | ||
24 | * | ||
25 | * If Config[AT] is zero (32-bit CPU), the register contents are | ||
26 | * stored in the lower 32-bits of the struct kvm_regs fields and sign | ||
27 | * extended to 64-bits. | ||
28 | */ | ||
21 | struct kvm_regs { | 29 | struct kvm_regs { |
22 | __u32 gprs[32]; | 30 | /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ |
23 | __u32 hi; | 31 | __u64 gpr[32]; |
24 | __u32 lo; | 32 | __u64 hi; |
25 | __u32 pc; | 33 | __u64 lo; |
26 | 34 | __u64 pc; | |
27 | __u32 cp0reg[N_MIPS_COPROC_REGS][N_MIPS_COPROC_SEL]; | ||
28 | }; | ||
29 | |||
30 | /* for KVM_GET_SREGS and KVM_SET_SREGS */ | ||
31 | struct kvm_sregs { | ||
32 | }; | 35 | }; |
33 | 36 | ||
34 | /* for KVM_GET_FPU and KVM_SET_FPU */ | 37 | /* |
38 | * for KVM_GET_FPU and KVM_SET_FPU | ||
39 | * | ||
40 | * If Status[FR] is zero (32-bit FPU), the upper 32-bits of the FPRs | ||
41 | * are zero filled. | ||
42 | */ | ||
35 | struct kvm_fpu { | 43 | struct kvm_fpu { |
44 | __u64 fpr[32]; | ||
45 | __u32 fir; | ||
46 | __u32 fccr; | ||
47 | __u32 fexr; | ||
48 | __u32 fenr; | ||
49 | __u32 fcsr; | ||
50 | __u32 pad; | ||
36 | }; | 51 | }; |
37 | 52 | ||
53 | |||
54 | /* | ||
55 | * For MIPS, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access CP0 | ||
56 | * registers. The id field is broken down as follows: | ||
57 | * | ||
58 | * bits[2..0] - Register 'sel' index. | ||
59 | * bits[7..3] - Register 'rd' index. | ||
60 | * bits[15..8] - Must be zero. | ||
61 | * bits[31..16] - 1 -> CP0 registers. | ||
62 | * bits[51..32] - Must be zero. | ||
63 | * bits[63..52] - As per linux/kvm.h | ||
64 | * | ||
65 | * Other sets registers may be added in the future. Each set would | ||
66 | * have its own identifier in bits[31..16]. | ||
67 | * | ||
68 | * The registers defined in struct kvm_regs are also accessible, the | ||
69 | * id values for these are below. | ||
70 | */ | ||
71 | |||
72 | #define KVM_REG_MIPS_R0 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0) | ||
73 | #define KVM_REG_MIPS_R1 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 1) | ||
74 | #define KVM_REG_MIPS_R2 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 2) | ||
75 | #define KVM_REG_MIPS_R3 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 3) | ||
76 | #define KVM_REG_MIPS_R4 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 4) | ||
77 | #define KVM_REG_MIPS_R5 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 5) | ||
78 | #define KVM_REG_MIPS_R6 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 6) | ||
79 | #define KVM_REG_MIPS_R7 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 7) | ||
80 | #define KVM_REG_MIPS_R8 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 8) | ||
81 | #define KVM_REG_MIPS_R9 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 9) | ||
82 | #define KVM_REG_MIPS_R10 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 10) | ||
83 | #define KVM_REG_MIPS_R11 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 11) | ||
84 | #define KVM_REG_MIPS_R12 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 12) | ||
85 | #define KVM_REG_MIPS_R13 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 13) | ||
86 | #define KVM_REG_MIPS_R14 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 14) | ||
87 | #define KVM_REG_MIPS_R15 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 15) | ||
88 | #define KVM_REG_MIPS_R16 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 16) | ||
89 | #define KVM_REG_MIPS_R17 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 17) | ||
90 | #define KVM_REG_MIPS_R18 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 18) | ||
91 | #define KVM_REG_MIPS_R19 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 19) | ||
92 | #define KVM_REG_MIPS_R20 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 20) | ||
93 | #define KVM_REG_MIPS_R21 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 21) | ||
94 | #define KVM_REG_MIPS_R22 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 22) | ||
95 | #define KVM_REG_MIPS_R23 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 23) | ||
96 | #define KVM_REG_MIPS_R24 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 24) | ||
97 | #define KVM_REG_MIPS_R25 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 25) | ||
98 | #define KVM_REG_MIPS_R26 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 26) | ||
99 | #define KVM_REG_MIPS_R27 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 27) | ||
100 | #define KVM_REG_MIPS_R28 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 28) | ||
101 | #define KVM_REG_MIPS_R29 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 29) | ||
102 | #define KVM_REG_MIPS_R30 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 30) | ||
103 | #define KVM_REG_MIPS_R31 (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 31) | ||
104 | |||
105 | #define KVM_REG_MIPS_HI (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 32) | ||
106 | #define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33) | ||
107 | #define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34) | ||
108 | |||
109 | /* | ||
110 | * KVM MIPS specific structures and definitions | ||
111 | * | ||
112 | */ | ||
38 | struct kvm_debug_exit_arch { | 113 | struct kvm_debug_exit_arch { |
114 | __u64 epc; | ||
39 | }; | 115 | }; |
40 | 116 | ||
41 | /* for KVM_SET_GUEST_DEBUG */ | 117 | /* for KVM_SET_GUEST_DEBUG */ |
42 | struct kvm_guest_debug_arch { | 118 | struct kvm_guest_debug_arch { |
43 | }; | 119 | }; |
44 | 120 | ||
121 | /* definition of registers in kvm_run */ | ||
122 | struct kvm_sync_regs { | ||
123 | }; | ||
124 | |||
125 | /* dummy definition */ | ||
126 | struct kvm_sregs { | ||
127 | }; | ||
128 | |||
45 | struct kvm_mips_interrupt { | 129 | struct kvm_mips_interrupt { |
46 | /* in */ | 130 | /* in */ |
47 | __u32 cpu; | 131 | __u32 cpu; |
48 | __u32 irq; | 132 | __u32 irq; |
49 | }; | 133 | }; |
50 | 134 | ||
51 | /* definition of registers in kvm_run */ | ||
52 | struct kvm_sync_regs { | ||
53 | }; | ||
54 | |||
55 | #endif /* __LINUX_KVM_MIPS_H */ | 135 | #endif /* __LINUX_KVM_MIPS_H */ |
diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h index 4d58d8468705..b26f7e317279 100644 --- a/arch/mips/include/uapi/asm/ptrace.h +++ b/arch/mips/include/uapi/asm/ptrace.h | |||
@@ -22,16 +22,12 @@ | |||
22 | #define DSP_CONTROL 77 | 22 | #define DSP_CONTROL 77 |
23 | #define ACX 78 | 23 | #define ACX 78 |
24 | 24 | ||
25 | #ifndef __KERNEL__ | ||
25 | /* | 26 | /* |
26 | * This struct defines the way the registers are stored on the stack during a | 27 | * This struct defines the way the registers are stored on the stack during a |
27 | * system call/exception. As usual the registers k0/k1 aren't being saved. | 28 | * system call/exception. As usual the registers k0/k1 aren't being saved. |
28 | */ | 29 | */ |
29 | struct pt_regs { | 30 | struct pt_regs { |
30 | #ifdef CONFIG_32BIT | ||
31 | /* Pad bytes for argument save space on the stack. */ | ||
32 | unsigned long pad0[6]; | ||
33 | #endif | ||
34 | |||
35 | /* Saved main processor registers. */ | 31 | /* Saved main processor registers. */ |
36 | unsigned long regs[32]; | 32 | unsigned long regs[32]; |
37 | 33 | ||
@@ -39,20 +35,11 @@ struct pt_regs { | |||
39 | unsigned long cp0_status; | 35 | unsigned long cp0_status; |
40 | unsigned long hi; | 36 | unsigned long hi; |
41 | unsigned long lo; | 37 | unsigned long lo; |
42 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | ||
43 | unsigned long acx; | ||
44 | #endif | ||
45 | unsigned long cp0_badvaddr; | 38 | unsigned long cp0_badvaddr; |
46 | unsigned long cp0_cause; | 39 | unsigned long cp0_cause; |
47 | unsigned long cp0_epc; | 40 | unsigned long cp0_epc; |
48 | #ifdef CONFIG_MIPS_MT_SMTC | ||
49 | unsigned long cp0_tcstatus; | ||
50 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
51 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
52 | unsigned long long mpl[3]; /* MTM{0,1,2} */ | ||
53 | unsigned long long mtp[3]; /* MTP{0,1,2} */ | ||
54 | #endif | ||
55 | } __attribute__ ((aligned (8))); | 41 | } __attribute__ ((aligned (8))); |
42 | #endif /* __KERNEL__ */ | ||
56 | 43 | ||
57 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ | 44 | /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */ |
58 | #define PTRACE_GETREGS 12 | 45 | #define PTRACE_GETREGS 12 |
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c index e06f777e9c49..1188e00bb120 100644 --- a/arch/mips/kernel/binfmt_elfn32.c +++ b/arch/mips/kernel/binfmt_elfn32.c | |||
@@ -119,4 +119,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); | |||
119 | #undef TASK_SIZE | 119 | #undef TASK_SIZE |
120 | #define TASK_SIZE TASK_SIZE32 | 120 | #define TASK_SIZE TASK_SIZE32 |
121 | 121 | ||
122 | #undef cputime_to_timeval | ||
123 | #define cputime_to_timeval cputime_to_compat_timeval | ||
124 | static __inline__ void | ||
125 | cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) | ||
126 | { | ||
127 | unsigned long jiffies = cputime_to_jiffies(cputime); | ||
128 | |||
129 | value->tv_usec = (jiffies % HZ) * (1000000L / HZ); | ||
130 | value->tv_sec = jiffies / HZ; | ||
131 | } | ||
132 | |||
122 | #include "../../../fs/binfmt_elf.c" | 133 | #include "../../../fs/binfmt_elf.c" |
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c index 97c5a1668e53..202e581e6096 100644 --- a/arch/mips/kernel/binfmt_elfo32.c +++ b/arch/mips/kernel/binfmt_elfo32.c | |||
@@ -162,4 +162,15 @@ MODULE_AUTHOR("Ralf Baechle (ralf@linux-mips.org)"); | |||
162 | #undef TASK_SIZE | 162 | #undef TASK_SIZE |
163 | #define TASK_SIZE TASK_SIZE32 | 163 | #define TASK_SIZE TASK_SIZE32 |
164 | 164 | ||
165 | #undef cputime_to_timeval | ||
166 | #define cputime_to_timeval cputime_to_compat_timeval | ||
167 | static __inline__ void | ||
168 | cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value) | ||
169 | { | ||
170 | unsigned long jiffies = cputime_to_jiffies(cputime); | ||
171 | |||
172 | value->tv_usec = (jiffies % HZ) * (1000000L / HZ); | ||
173 | value->tv_sec = jiffies / HZ; | ||
174 | } | ||
175 | |||
165 | #include "../../../fs/binfmt_elf.c" | 176 | #include "../../../fs/binfmt_elf.c" |
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index cf5509f13dd5..dba90ec0dc38 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -25,12 +25,16 @@ | |||
25 | #define MCOUNT_OFFSET_INSNS 4 | 25 | #define MCOUNT_OFFSET_INSNS 4 |
26 | #endif | 26 | #endif |
27 | 27 | ||
28 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
29 | |||
28 | /* Arch override because MIPS doesn't need to run this from stop_machine() */ | 30 | /* Arch override because MIPS doesn't need to run this from stop_machine() */ |
29 | void arch_ftrace_update_code(int command) | 31 | void arch_ftrace_update_code(int command) |
30 | { | 32 | { |
31 | ftrace_modify_all_code(command); | 33 | ftrace_modify_all_code(command); |
32 | } | 34 | } |
33 | 35 | ||
36 | #endif | ||
37 | |||
34 | /* | 38 | /* |
35 | * Check if the address is in kernel space | 39 | * Check if the address is in kernel space |
36 | * | 40 | * |
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 3b09b888afa9..0c655deeea4a 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c | |||
@@ -93,26 +93,27 @@ static void rm7k_wait_irqoff(void) | |||
93 | } | 93 | } |
94 | 94 | ||
95 | /* | 95 | /* |
96 | * The Au1xxx wait is available only if using 32khz counter or | 96 | * Au1 'wait' is only useful when the 32kHz counter is used as timer, |
97 | * external timer source, but specifically not CP0 Counter. | 97 | * since coreclock (and the cp0 counter) stops upon executing it. Only an |
98 | * alchemy/common/time.c may override cpu_wait! | 98 | * interrupt can wake it, so they must be enabled before entering idle modes. |
99 | */ | 99 | */ |
100 | static void au1k_wait(void) | 100 | static void au1k_wait(void) |
101 | { | 101 | { |
102 | unsigned long c0status = read_c0_status() | 1; /* irqs on */ | ||
103 | |||
102 | __asm__( | 104 | __asm__( |
103 | " .set mips3 \n" | 105 | " .set mips3 \n" |
104 | " cache 0x14, 0(%0) \n" | 106 | " cache 0x14, 0(%0) \n" |
105 | " cache 0x14, 32(%0) \n" | 107 | " cache 0x14, 32(%0) \n" |
106 | " sync \n" | 108 | " sync \n" |
107 | " nop \n" | 109 | " mtc0 %1, $12 \n" /* wr c0status */ |
108 | " wait \n" | 110 | " wait \n" |
109 | " nop \n" | 111 | " nop \n" |
110 | " nop \n" | 112 | " nop \n" |
111 | " nop \n" | 113 | " nop \n" |
112 | " nop \n" | 114 | " nop \n" |
113 | " .set mips0 \n" | 115 | " .set mips0 \n" |
114 | : : "r" (au1k_wait)); | 116 | : : "r" (au1k_wait), "r" (c0status)); |
115 | local_irq_enable(); | ||
116 | } | 117 | } |
117 | 118 | ||
118 | static int __initdata nowait; | 119 | static int __initdata nowait; |
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 93c070b41b0d..6fa198db8999 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <asm/processor.h> | 40 | #include <asm/processor.h> |
41 | #include <asm/vpe.h> | 41 | #include <asm/vpe.h> |
42 | #include <asm/rtlx.h> | 42 | #include <asm/rtlx.h> |
43 | #include <asm/setup.h> | ||
43 | 44 | ||
44 | static struct rtlx_info *rtlx; | 45 | static struct rtlx_info *rtlx; |
45 | static int major; | 46 | static int major; |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index e3be67012d78..a75ae40184aa 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -897,22 +897,24 @@ out_sigsegv: | |||
897 | 897 | ||
898 | asmlinkage void do_tr(struct pt_regs *regs) | 898 | asmlinkage void do_tr(struct pt_regs *regs) |
899 | { | 899 | { |
900 | unsigned int opcode, tcode = 0; | 900 | u32 opcode, tcode = 0; |
901 | u16 instr[2]; | 901 | u16 instr[2]; |
902 | unsigned long epc = exception_epc(regs); | 902 | unsigned long epc = msk_isa16_mode(exception_epc(regs)); |
903 | 903 | ||
904 | if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc))) || | 904 | if (get_isa16_mode(regs->cp0_epc)) { |
905 | (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))) | 905 | if (__get_user(instr[0], (u16 __user *)(epc + 0)) || |
906 | __get_user(instr[1], (u16 __user *)(epc + 2))) | ||
906 | goto out_sigsegv; | 907 | goto out_sigsegv; |
907 | opcode = (instr[0] << 16) | instr[1]; | 908 | opcode = (instr[0] << 16) | instr[1]; |
908 | 909 | /* Immediate versions don't provide a code. */ | |
909 | /* Immediate versions don't provide a code. */ | 910 | if (!(opcode & OPCODE)) |
910 | if (!(opcode & OPCODE)) { | 911 | tcode = (opcode >> 12) & ((1 << 4) - 1); |
911 | if (get_isa16_mode(regs->cp0_epc)) | 912 | } else { |
912 | /* microMIPS */ | 913 | if (__get_user(opcode, (u32 __user *)epc)) |
913 | tcode = (opcode >> 12) & 0x1f; | 914 | goto out_sigsegv; |
914 | else | 915 | /* Immediate versions don't provide a code. */ |
915 | tcode = ((opcode >> 6) & ((1 << 10) - 1)); | 916 | if (!(opcode & OPCODE)) |
917 | tcode = (opcode >> 6) & ((1 << 10) - 1); | ||
916 | } | 918 | } |
917 | 919 | ||
918 | do_trap_or_bp(regs, tcode, "Trap"); | 920 | do_trap_or_bp(regs, tcode, "Trap"); |
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c index e0dad0289797..dd203e59e6fd 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/kvm_mips.c | |||
@@ -195,7 +195,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
195 | long | 195 | long |
196 | kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | 196 | kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) |
197 | { | 197 | { |
198 | return -EINVAL; | 198 | return -ENOIOCTLCMD; |
199 | } | 199 | } |
200 | 200 | ||
201 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, | 201 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, |
@@ -401,7 +401,7 @@ int | |||
401 | kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 401 | kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
402 | struct kvm_guest_debug *dbg) | 402 | struct kvm_guest_debug *dbg) |
403 | { | 403 | { |
404 | return -EINVAL; | 404 | return -ENOIOCTLCMD; |
405 | } | 405 | } |
406 | 406 | ||
407 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | 407 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
@@ -475,14 +475,248 @@ int | |||
475 | kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 475 | kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
476 | struct kvm_mp_state *mp_state) | 476 | struct kvm_mp_state *mp_state) |
477 | { | 477 | { |
478 | return -EINVAL; | 478 | return -ENOIOCTLCMD; |
479 | } | 479 | } |
480 | 480 | ||
481 | int | 481 | int |
482 | kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | 482 | kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
483 | struct kvm_mp_state *mp_state) | 483 | struct kvm_mp_state *mp_state) |
484 | { | 484 | { |
485 | return -EINVAL; | 485 | return -ENOIOCTLCMD; |
486 | } | ||
487 | |||
488 | #define MIPS_CP0_32(_R, _S) \ | ||
489 | (KVM_REG_MIPS | KVM_REG_SIZE_U32 | 0x10000 | (8 * (_R) + (_S))) | ||
490 | |||
491 | #define MIPS_CP0_64(_R, _S) \ | ||
492 | (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 0x10000 | (8 * (_R) + (_S))) | ||
493 | |||
494 | #define KVM_REG_MIPS_CP0_INDEX MIPS_CP0_32(0, 0) | ||
495 | #define KVM_REG_MIPS_CP0_ENTRYLO0 MIPS_CP0_64(2, 0) | ||
496 | #define KVM_REG_MIPS_CP0_ENTRYLO1 MIPS_CP0_64(3, 0) | ||
497 | #define KVM_REG_MIPS_CP0_CONTEXT MIPS_CP0_64(4, 0) | ||
498 | #define KVM_REG_MIPS_CP0_USERLOCAL MIPS_CP0_64(4, 2) | ||
499 | #define KVM_REG_MIPS_CP0_PAGEMASK MIPS_CP0_32(5, 0) | ||
500 | #define KVM_REG_MIPS_CP0_PAGEGRAIN MIPS_CP0_32(5, 1) | ||
501 | #define KVM_REG_MIPS_CP0_WIRED MIPS_CP0_32(6, 0) | ||
502 | #define KVM_REG_MIPS_CP0_HWRENA MIPS_CP0_32(7, 0) | ||
503 | #define KVM_REG_MIPS_CP0_BADVADDR MIPS_CP0_64(8, 0) | ||
504 | #define KVM_REG_MIPS_CP0_COUNT MIPS_CP0_32(9, 0) | ||
505 | #define KVM_REG_MIPS_CP0_ENTRYHI MIPS_CP0_64(10, 0) | ||
506 | #define KVM_REG_MIPS_CP0_COMPARE MIPS_CP0_32(11, 0) | ||
507 | #define KVM_REG_MIPS_CP0_STATUS MIPS_CP0_32(12, 0) | ||
508 | #define KVM_REG_MIPS_CP0_CAUSE MIPS_CP0_32(13, 0) | ||
509 | #define KVM_REG_MIPS_CP0_EBASE MIPS_CP0_64(15, 1) | ||
510 | #define KVM_REG_MIPS_CP0_CONFIG MIPS_CP0_32(16, 0) | ||
511 | #define KVM_REG_MIPS_CP0_CONFIG1 MIPS_CP0_32(16, 1) | ||
512 | #define KVM_REG_MIPS_CP0_CONFIG2 MIPS_CP0_32(16, 2) | ||
513 | #define KVM_REG_MIPS_CP0_CONFIG3 MIPS_CP0_32(16, 3) | ||
514 | #define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7) | ||
515 | #define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0) | ||
516 | #define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0) | ||
517 | |||
518 | static u64 kvm_mips_get_one_regs[] = { | ||
519 | KVM_REG_MIPS_R0, | ||
520 | KVM_REG_MIPS_R1, | ||
521 | KVM_REG_MIPS_R2, | ||
522 | KVM_REG_MIPS_R3, | ||
523 | KVM_REG_MIPS_R4, | ||
524 | KVM_REG_MIPS_R5, | ||
525 | KVM_REG_MIPS_R6, | ||
526 | KVM_REG_MIPS_R7, | ||
527 | KVM_REG_MIPS_R8, | ||
528 | KVM_REG_MIPS_R9, | ||
529 | KVM_REG_MIPS_R10, | ||
530 | KVM_REG_MIPS_R11, | ||
531 | KVM_REG_MIPS_R12, | ||
532 | KVM_REG_MIPS_R13, | ||
533 | KVM_REG_MIPS_R14, | ||
534 | KVM_REG_MIPS_R15, | ||
535 | KVM_REG_MIPS_R16, | ||
536 | KVM_REG_MIPS_R17, | ||
537 | KVM_REG_MIPS_R18, | ||
538 | KVM_REG_MIPS_R19, | ||
539 | KVM_REG_MIPS_R20, | ||
540 | KVM_REG_MIPS_R21, | ||
541 | KVM_REG_MIPS_R22, | ||
542 | KVM_REG_MIPS_R23, | ||
543 | KVM_REG_MIPS_R24, | ||
544 | KVM_REG_MIPS_R25, | ||
545 | KVM_REG_MIPS_R26, | ||
546 | KVM_REG_MIPS_R27, | ||
547 | KVM_REG_MIPS_R28, | ||
548 | KVM_REG_MIPS_R29, | ||
549 | KVM_REG_MIPS_R30, | ||
550 | KVM_REG_MIPS_R31, | ||
551 | |||
552 | KVM_REG_MIPS_HI, | ||
553 | KVM_REG_MIPS_LO, | ||
554 | KVM_REG_MIPS_PC, | ||
555 | |||
556 | KVM_REG_MIPS_CP0_INDEX, | ||
557 | KVM_REG_MIPS_CP0_CONTEXT, | ||
558 | KVM_REG_MIPS_CP0_PAGEMASK, | ||
559 | KVM_REG_MIPS_CP0_WIRED, | ||
560 | KVM_REG_MIPS_CP0_BADVADDR, | ||
561 | KVM_REG_MIPS_CP0_ENTRYHI, | ||
562 | KVM_REG_MIPS_CP0_STATUS, | ||
563 | KVM_REG_MIPS_CP0_CAUSE, | ||
564 | /* EPC set via kvm_regs, et al. */ | ||
565 | KVM_REG_MIPS_CP0_CONFIG, | ||
566 | KVM_REG_MIPS_CP0_CONFIG1, | ||
567 | KVM_REG_MIPS_CP0_CONFIG2, | ||
568 | KVM_REG_MIPS_CP0_CONFIG3, | ||
569 | KVM_REG_MIPS_CP0_CONFIG7, | ||
570 | KVM_REG_MIPS_CP0_ERROREPC | ||
571 | }; | ||
572 | |||
573 | static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, | ||
574 | const struct kvm_one_reg *reg) | ||
575 | { | ||
576 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
577 | s64 v; | ||
578 | |||
579 | switch (reg->id) { | ||
580 | case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: | ||
581 | v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; | ||
582 | break; | ||
583 | case KVM_REG_MIPS_HI: | ||
584 | v = (long)vcpu->arch.hi; | ||
585 | break; | ||
586 | case KVM_REG_MIPS_LO: | ||
587 | v = (long)vcpu->arch.lo; | ||
588 | break; | ||
589 | case KVM_REG_MIPS_PC: | ||
590 | v = (long)vcpu->arch.pc; | ||
591 | break; | ||
592 | |||
593 | case KVM_REG_MIPS_CP0_INDEX: | ||
594 | v = (long)kvm_read_c0_guest_index(cop0); | ||
595 | break; | ||
596 | case KVM_REG_MIPS_CP0_CONTEXT: | ||
597 | v = (long)kvm_read_c0_guest_context(cop0); | ||
598 | break; | ||
599 | case KVM_REG_MIPS_CP0_PAGEMASK: | ||
600 | v = (long)kvm_read_c0_guest_pagemask(cop0); | ||
601 | break; | ||
602 | case KVM_REG_MIPS_CP0_WIRED: | ||
603 | v = (long)kvm_read_c0_guest_wired(cop0); | ||
604 | break; | ||
605 | case KVM_REG_MIPS_CP0_BADVADDR: | ||
606 | v = (long)kvm_read_c0_guest_badvaddr(cop0); | ||
607 | break; | ||
608 | case KVM_REG_MIPS_CP0_ENTRYHI: | ||
609 | v = (long)kvm_read_c0_guest_entryhi(cop0); | ||
610 | break; | ||
611 | case KVM_REG_MIPS_CP0_STATUS: | ||
612 | v = (long)kvm_read_c0_guest_status(cop0); | ||
613 | break; | ||
614 | case KVM_REG_MIPS_CP0_CAUSE: | ||
615 | v = (long)kvm_read_c0_guest_cause(cop0); | ||
616 | break; | ||
617 | case KVM_REG_MIPS_CP0_ERROREPC: | ||
618 | v = (long)kvm_read_c0_guest_errorepc(cop0); | ||
619 | break; | ||
620 | case KVM_REG_MIPS_CP0_CONFIG: | ||
621 | v = (long)kvm_read_c0_guest_config(cop0); | ||
622 | break; | ||
623 | case KVM_REG_MIPS_CP0_CONFIG1: | ||
624 | v = (long)kvm_read_c0_guest_config1(cop0); | ||
625 | break; | ||
626 | case KVM_REG_MIPS_CP0_CONFIG2: | ||
627 | v = (long)kvm_read_c0_guest_config2(cop0); | ||
628 | break; | ||
629 | case KVM_REG_MIPS_CP0_CONFIG3: | ||
630 | v = (long)kvm_read_c0_guest_config3(cop0); | ||
631 | break; | ||
632 | case KVM_REG_MIPS_CP0_CONFIG7: | ||
633 | v = (long)kvm_read_c0_guest_config7(cop0); | ||
634 | break; | ||
635 | default: | ||
636 | return -EINVAL; | ||
637 | } | ||
638 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { | ||
639 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | ||
640 | return put_user(v, uaddr64); | ||
641 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | ||
642 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | ||
643 | u32 v32 = (u32)v; | ||
644 | return put_user(v32, uaddr32); | ||
645 | } else { | ||
646 | return -EINVAL; | ||
647 | } | ||
648 | } | ||
649 | |||
650 | static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | ||
651 | const struct kvm_one_reg *reg) | ||
652 | { | ||
653 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
654 | u64 v; | ||
655 | |||
656 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { | ||
657 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | ||
658 | |||
659 | if (get_user(v, uaddr64) != 0) | ||
660 | return -EFAULT; | ||
661 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | ||
662 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | ||
663 | s32 v32; | ||
664 | |||
665 | if (get_user(v32, uaddr32) != 0) | ||
666 | return -EFAULT; | ||
667 | v = (s64)v32; | ||
668 | } else { | ||
669 | return -EINVAL; | ||
670 | } | ||
671 | |||
672 | switch (reg->id) { | ||
673 | case KVM_REG_MIPS_R0: | ||
674 | /* Silently ignore requests to set $0 */ | ||
675 | break; | ||
676 | case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: | ||
677 | vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; | ||
678 | break; | ||
679 | case KVM_REG_MIPS_HI: | ||
680 | vcpu->arch.hi = v; | ||
681 | break; | ||
682 | case KVM_REG_MIPS_LO: | ||
683 | vcpu->arch.lo = v; | ||
684 | break; | ||
685 | case KVM_REG_MIPS_PC: | ||
686 | vcpu->arch.pc = v; | ||
687 | break; | ||
688 | |||
689 | case KVM_REG_MIPS_CP0_INDEX: | ||
690 | kvm_write_c0_guest_index(cop0, v); | ||
691 | break; | ||
692 | case KVM_REG_MIPS_CP0_CONTEXT: | ||
693 | kvm_write_c0_guest_context(cop0, v); | ||
694 | break; | ||
695 | case KVM_REG_MIPS_CP0_PAGEMASK: | ||
696 | kvm_write_c0_guest_pagemask(cop0, v); | ||
697 | break; | ||
698 | case KVM_REG_MIPS_CP0_WIRED: | ||
699 | kvm_write_c0_guest_wired(cop0, v); | ||
700 | break; | ||
701 | case KVM_REG_MIPS_CP0_BADVADDR: | ||
702 | kvm_write_c0_guest_badvaddr(cop0, v); | ||
703 | break; | ||
704 | case KVM_REG_MIPS_CP0_ENTRYHI: | ||
705 | kvm_write_c0_guest_entryhi(cop0, v); | ||
706 | break; | ||
707 | case KVM_REG_MIPS_CP0_STATUS: | ||
708 | kvm_write_c0_guest_status(cop0, v); | ||
709 | break; | ||
710 | case KVM_REG_MIPS_CP0_CAUSE: | ||
711 | kvm_write_c0_guest_cause(cop0, v); | ||
712 | break; | ||
713 | case KVM_REG_MIPS_CP0_ERROREPC: | ||
714 | kvm_write_c0_guest_errorepc(cop0, v); | ||
715 | break; | ||
716 | default: | ||
717 | return -EINVAL; | ||
718 | } | ||
719 | return 0; | ||
486 | } | 720 | } |
487 | 721 | ||
488 | long | 722 | long |
@@ -491,9 +725,38 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | |||
491 | struct kvm_vcpu *vcpu = filp->private_data; | 725 | struct kvm_vcpu *vcpu = filp->private_data; |
492 | void __user *argp = (void __user *)arg; | 726 | void __user *argp = (void __user *)arg; |
493 | long r; | 727 | long r; |
494 | int intr; | ||
495 | 728 | ||
496 | switch (ioctl) { | 729 | switch (ioctl) { |
730 | case KVM_SET_ONE_REG: | ||
731 | case KVM_GET_ONE_REG: { | ||
732 | struct kvm_one_reg reg; | ||
733 | if (copy_from_user(®, argp, sizeof(reg))) | ||
734 | return -EFAULT; | ||
735 | if (ioctl == KVM_SET_ONE_REG) | ||
736 | return kvm_mips_set_reg(vcpu, ®); | ||
737 | else | ||
738 | return kvm_mips_get_reg(vcpu, ®); | ||
739 | } | ||
740 | case KVM_GET_REG_LIST: { | ||
741 | struct kvm_reg_list __user *user_list = argp; | ||
742 | u64 __user *reg_dest; | ||
743 | struct kvm_reg_list reg_list; | ||
744 | unsigned n; | ||
745 | |||
746 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) | ||
747 | return -EFAULT; | ||
748 | n = reg_list.n; | ||
749 | reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs); | ||
750 | if (copy_to_user(user_list, ®_list, sizeof(reg_list))) | ||
751 | return -EFAULT; | ||
752 | if (n < reg_list.n) | ||
753 | return -E2BIG; | ||
754 | reg_dest = user_list->reg; | ||
755 | if (copy_to_user(reg_dest, kvm_mips_get_one_regs, | ||
756 | sizeof(kvm_mips_get_one_regs))) | ||
757 | return -EFAULT; | ||
758 | return 0; | ||
759 | } | ||
497 | case KVM_NMI: | 760 | case KVM_NMI: |
498 | /* Treat the NMI as a CPU reset */ | 761 | /* Treat the NMI as a CPU reset */ |
499 | r = kvm_mips_reset_vcpu(vcpu); | 762 | r = kvm_mips_reset_vcpu(vcpu); |
@@ -505,8 +768,6 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | |||
505 | if (copy_from_user(&irq, argp, sizeof(irq))) | 768 | if (copy_from_user(&irq, argp, sizeof(irq))) |
506 | goto out; | 769 | goto out; |
507 | 770 | ||
508 | intr = (int)irq.irq; | ||
509 | |||
510 | kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, | 771 | kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, |
511 | irq.irq); | 772 | irq.irq); |
512 | 773 | ||
@@ -514,7 +775,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | |||
514 | break; | 775 | break; |
515 | } | 776 | } |
516 | default: | 777 | default: |
517 | r = -EINVAL; | 778 | r = -ENOIOCTLCMD; |
518 | } | 779 | } |
519 | 780 | ||
520 | out: | 781 | out: |
@@ -565,7 +826,7 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | |||
565 | 826 | ||
566 | switch (ioctl) { | 827 | switch (ioctl) { |
567 | default: | 828 | default: |
568 | r = -EINVAL; | 829 | r = -ENOIOCTLCMD; |
569 | } | 830 | } |
570 | 831 | ||
571 | return r; | 832 | return r; |
@@ -593,13 +854,13 @@ void kvm_arch_exit(void) | |||
593 | int | 854 | int |
594 | kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 855 | kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
595 | { | 856 | { |
596 | return -ENOTSUPP; | 857 | return -ENOIOCTLCMD; |
597 | } | 858 | } |
598 | 859 | ||
599 | int | 860 | int |
600 | kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 861 | kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
601 | { | 862 | { |
602 | return -ENOTSUPP; | 863 | return -ENOIOCTLCMD; |
603 | } | 864 | } |
604 | 865 | ||
605 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | 866 | int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
@@ -609,12 +870,12 @@ int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | |||
609 | 870 | ||
610 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 871 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
611 | { | 872 | { |
612 | return -ENOTSUPP; | 873 | return -ENOIOCTLCMD; |
613 | } | 874 | } |
614 | 875 | ||
615 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 876 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
616 | { | 877 | { |
617 | return -ENOTSUPP; | 878 | return -ENOIOCTLCMD; |
618 | } | 879 | } |
619 | 880 | ||
620 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | 881 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
@@ -627,6 +888,9 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
627 | int r; | 888 | int r; |
628 | 889 | ||
629 | switch (ext) { | 890 | switch (ext) { |
891 | case KVM_CAP_ONE_REG: | ||
892 | r = 1; | ||
893 | break; | ||
630 | case KVM_CAP_COALESCED_MMIO: | 894 | case KVM_CAP_COALESCED_MMIO: |
631 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | 895 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; |
632 | break; | 896 | break; |
@@ -635,7 +899,6 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
635 | break; | 899 | break; |
636 | } | 900 | } |
637 | return r; | 901 | return r; |
638 | |||
639 | } | 902 | } |
640 | 903 | ||
641 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | 904 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) |
@@ -677,28 +940,28 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
677 | { | 940 | { |
678 | int i; | 941 | int i; |
679 | 942 | ||
680 | for (i = 0; i < 32; i++) | 943 | for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) |
681 | vcpu->arch.gprs[i] = regs->gprs[i]; | 944 | vcpu->arch.gprs[i] = regs->gpr[i]; |
682 | 945 | vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ | |
683 | vcpu->arch.hi = regs->hi; | 946 | vcpu->arch.hi = regs->hi; |
684 | vcpu->arch.lo = regs->lo; | 947 | vcpu->arch.lo = regs->lo; |
685 | vcpu->arch.pc = regs->pc; | 948 | vcpu->arch.pc = regs->pc; |
686 | 949 | ||
687 | return kvm_mips_callbacks->vcpu_ioctl_set_regs(vcpu, regs); | 950 | return 0; |
688 | } | 951 | } |
689 | 952 | ||
690 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | 953 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
691 | { | 954 | { |
692 | int i; | 955 | int i; |
693 | 956 | ||
694 | for (i = 0; i < 32; i++) | 957 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) |
695 | regs->gprs[i] = vcpu->arch.gprs[i]; | 958 | regs->gpr[i] = vcpu->arch.gprs[i]; |
696 | 959 | ||
697 | regs->hi = vcpu->arch.hi; | 960 | regs->hi = vcpu->arch.hi; |
698 | regs->lo = vcpu->arch.lo; | 961 | regs->lo = vcpu->arch.lo; |
699 | regs->pc = vcpu->arch.pc; | 962 | regs->pc = vcpu->arch.pc; |
700 | 963 | ||
701 | return kvm_mips_callbacks->vcpu_ioctl_get_regs(vcpu, regs); | 964 | return 0; |
702 | } | 965 | } |
703 | 966 | ||
704 | void kvm_mips_comparecount_func(unsigned long data) | 967 | void kvm_mips_comparecount_func(unsigned long data) |
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c index 466aeef044bd..30d725321db1 100644 --- a/arch/mips/kvm/kvm_trap_emul.c +++ b/arch/mips/kvm/kvm_trap_emul.c | |||
@@ -345,54 +345,6 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) | |||
345 | return ret; | 345 | return ret; |
346 | } | 346 | } |
347 | 347 | ||
348 | static int | ||
349 | kvm_trap_emul_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
350 | { | ||
351 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
352 | |||
353 | kvm_write_c0_guest_index(cop0, regs->cp0reg[MIPS_CP0_TLB_INDEX][0]); | ||
354 | kvm_write_c0_guest_context(cop0, regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0]); | ||
355 | kvm_write_c0_guest_badvaddr(cop0, regs->cp0reg[MIPS_CP0_BAD_VADDR][0]); | ||
356 | kvm_write_c0_guest_entryhi(cop0, regs->cp0reg[MIPS_CP0_TLB_HI][0]); | ||
357 | kvm_write_c0_guest_epc(cop0, regs->cp0reg[MIPS_CP0_EXC_PC][0]); | ||
358 | |||
359 | kvm_write_c0_guest_status(cop0, regs->cp0reg[MIPS_CP0_STATUS][0]); | ||
360 | kvm_write_c0_guest_cause(cop0, regs->cp0reg[MIPS_CP0_CAUSE][0]); | ||
361 | kvm_write_c0_guest_pagemask(cop0, | ||
362 | regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0]); | ||
363 | kvm_write_c0_guest_wired(cop0, regs->cp0reg[MIPS_CP0_TLB_WIRED][0]); | ||
364 | kvm_write_c0_guest_errorepc(cop0, regs->cp0reg[MIPS_CP0_ERROR_PC][0]); | ||
365 | |||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | static int | ||
370 | kvm_trap_emul_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
371 | { | ||
372 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
373 | |||
374 | regs->cp0reg[MIPS_CP0_TLB_INDEX][0] = kvm_read_c0_guest_index(cop0); | ||
375 | regs->cp0reg[MIPS_CP0_TLB_CONTEXT][0] = kvm_read_c0_guest_context(cop0); | ||
376 | regs->cp0reg[MIPS_CP0_BAD_VADDR][0] = kvm_read_c0_guest_badvaddr(cop0); | ||
377 | regs->cp0reg[MIPS_CP0_TLB_HI][0] = kvm_read_c0_guest_entryhi(cop0); | ||
378 | regs->cp0reg[MIPS_CP0_EXC_PC][0] = kvm_read_c0_guest_epc(cop0); | ||
379 | |||
380 | regs->cp0reg[MIPS_CP0_STATUS][0] = kvm_read_c0_guest_status(cop0); | ||
381 | regs->cp0reg[MIPS_CP0_CAUSE][0] = kvm_read_c0_guest_cause(cop0); | ||
382 | regs->cp0reg[MIPS_CP0_TLB_PG_MASK][0] = | ||
383 | kvm_read_c0_guest_pagemask(cop0); | ||
384 | regs->cp0reg[MIPS_CP0_TLB_WIRED][0] = kvm_read_c0_guest_wired(cop0); | ||
385 | regs->cp0reg[MIPS_CP0_ERROR_PC][0] = kvm_read_c0_guest_errorepc(cop0); | ||
386 | |||
387 | regs->cp0reg[MIPS_CP0_CONFIG][0] = kvm_read_c0_guest_config(cop0); | ||
388 | regs->cp0reg[MIPS_CP0_CONFIG][1] = kvm_read_c0_guest_config1(cop0); | ||
389 | regs->cp0reg[MIPS_CP0_CONFIG][2] = kvm_read_c0_guest_config2(cop0); | ||
390 | regs->cp0reg[MIPS_CP0_CONFIG][3] = kvm_read_c0_guest_config3(cop0); | ||
391 | regs->cp0reg[MIPS_CP0_CONFIG][7] = kvm_read_c0_guest_config7(cop0); | ||
392 | |||
393 | return 0; | ||
394 | } | ||
395 | |||
396 | static int kvm_trap_emul_vm_init(struct kvm *kvm) | 348 | static int kvm_trap_emul_vm_init(struct kvm *kvm) |
397 | { | 349 | { |
398 | return 0; | 350 | return 0; |
@@ -471,8 +423,6 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { | |||
471 | .dequeue_io_int = kvm_mips_dequeue_io_int_cb, | 423 | .dequeue_io_int = kvm_mips_dequeue_io_int_cb, |
472 | .irq_deliver = kvm_mips_irq_deliver_cb, | 424 | .irq_deliver = kvm_mips_irq_deliver_cb, |
473 | .irq_clear = kvm_mips_irq_clear_cb, | 425 | .irq_clear = kvm_mips_irq_clear_cb, |
474 | .vcpu_ioctl_get_regs = kvm_trap_emul_ioctl_get_regs, | ||
475 | .vcpu_ioctl_set_regs = kvm_trap_emul_ioctl_set_regs, | ||
476 | }; | 426 | }; |
477 | 427 | ||
478 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) | 428 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index ce9818eef7d3..afeef93f81a7 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -301,10 +301,6 @@ static u32 tlb_handler[128] __cpuinitdata; | |||
301 | static struct uasm_label labels[128] __cpuinitdata; | 301 | static struct uasm_label labels[128] __cpuinitdata; |
302 | static struct uasm_reloc relocs[128] __cpuinitdata; | 302 | static struct uasm_reloc relocs[128] __cpuinitdata; |
303 | 303 | ||
304 | #ifdef CONFIG_64BIT | ||
305 | static int check_for_high_segbits __cpuinitdata; | ||
306 | #endif | ||
307 | |||
308 | static int check_for_high_segbits __cpuinitdata; | 304 | static int check_for_high_segbits __cpuinitdata; |
309 | 305 | ||
310 | static unsigned int kscratch_used_mask __cpuinitdata; | 306 | static unsigned int kscratch_used_mask __cpuinitdata; |
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c index fb1569580def..6b5f3406f414 100644 --- a/arch/mips/ralink/of.c +++ b/arch/mips/ralink/of.c | |||
@@ -88,7 +88,7 @@ void __init plat_mem_setup(void) | |||
88 | __dt_setup_arch(&__dtb_start); | 88 | __dt_setup_arch(&__dtb_start); |
89 | 89 | ||
90 | if (soc_info.mem_size) | 90 | if (soc_info.mem_size) |
91 | add_memory_region(soc_info.mem_base, soc_info.mem_size, | 91 | add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M, |
92 | BOOT_MEM_RAM); | 92 | BOOT_MEM_RAM); |
93 | else | 93 | else |
94 | detect_memory_region(soc_info.mem_base, | 94 | detect_memory_region(soc_info.mem_base, |
diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h index 678f68d5f37b..8730c0a3c37d 100644 --- a/arch/mn10300/include/asm/irqflags.h +++ b/arch/mn10300/include/asm/irqflags.h | |||
@@ -13,9 +13,8 @@ | |||
13 | #define _ASM_IRQFLAGS_H | 13 | #define _ASM_IRQFLAGS_H |
14 | 14 | ||
15 | #include <asm/cpu-regs.h> | 15 | #include <asm/cpu-regs.h> |
16 | #ifndef __ASSEMBLY__ | 16 | /* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */ |
17 | #include <linux/smp.h> | 17 | #include <asm/smp.h> |
18 | #endif | ||
19 | 18 | ||
20 | /* | 19 | /* |
21 | * interrupt control | 20 | * interrupt control |
diff --git a/arch/mn10300/include/asm/smp.h b/arch/mn10300/include/asm/smp.h index 6745dbe64944..56c42417d428 100644 --- a/arch/mn10300/include/asm/smp.h +++ b/arch/mn10300/include/asm/smp.h | |||
@@ -24,6 +24,7 @@ | |||
24 | #ifndef __ASSEMBLY__ | 24 | #ifndef __ASSEMBLY__ |
25 | #include <linux/threads.h> | 25 | #include <linux/threads.h> |
26 | #include <linux/cpumask.h> | 26 | #include <linux/cpumask.h> |
27 | #include <linux/thread_info.h> | ||
27 | #endif | 28 | #endif |
28 | 29 | ||
29 | #ifdef CONFIG_SMP | 30 | #ifdef CONFIG_SMP |
@@ -85,7 +86,7 @@ extern cpumask_t cpu_boot_map; | |||
85 | extern void smp_init_cpus(void); | 86 | extern void smp_init_cpus(void); |
86 | extern void smp_cache_interrupt(void); | 87 | extern void smp_cache_interrupt(void); |
87 | extern void send_IPI_allbutself(int irq); | 88 | extern void send_IPI_allbutself(int irq); |
88 | extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait); | 89 | extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait); |
89 | 90 | ||
90 | extern void arch_send_call_function_single_ipi(int cpu); | 91 | extern void arch_send_call_function_single_ipi(int cpu); |
91 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 92 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
@@ -100,6 +101,7 @@ extern void __cpu_die(unsigned int cpu); | |||
100 | #ifndef __ASSEMBLY__ | 101 | #ifndef __ASSEMBLY__ |
101 | 102 | ||
102 | static inline void smp_init_cpus(void) {} | 103 | static inline void smp_init_cpus(void) {} |
104 | #define raw_smp_processor_id() 0 | ||
103 | 105 | ||
104 | #endif /* __ASSEMBLY__ */ | 106 | #endif /* __ASSEMBLY__ */ |
105 | #endif /* CONFIG_SMP */ | 107 | #endif /* CONFIG_SMP */ |
diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h index cc50d33b7b88..b6b34a0987e7 100644 --- a/arch/parisc/include/asm/mmzone.h +++ b/arch/parisc/include/asm/mmzone.h | |||
@@ -27,7 +27,7 @@ extern struct node_map_data node_data[]; | |||
27 | 27 | ||
28 | #define PFNNID_SHIFT (30 - PAGE_SHIFT) | 28 | #define PFNNID_SHIFT (30 - PAGE_SHIFT) |
29 | #define PFNNID_MAP_MAX 512 /* support 512GB */ | 29 | #define PFNNID_MAP_MAX 512 /* support 512GB */ |
30 | extern unsigned char pfnnid_map[PFNNID_MAP_MAX]; | 30 | extern signed char pfnnid_map[PFNNID_MAP_MAX]; |
31 | 31 | ||
32 | #ifndef CONFIG_64BIT | 32 | #ifndef CONFIG_64BIT |
33 | #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT)) | 33 | #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT)) |
@@ -46,7 +46,7 @@ static inline int pfn_to_nid(unsigned long pfn) | |||
46 | i = pfn >> PFNNID_SHIFT; | 46 | i = pfn >> PFNNID_SHIFT; |
47 | BUG_ON(i >= ARRAY_SIZE(pfnnid_map)); | 47 | BUG_ON(i >= ARRAY_SIZE(pfnnid_map)); |
48 | 48 | ||
49 | return (int)pfnnid_map[i]; | 49 | return pfnnid_map[i]; |
50 | } | 50 | } |
51 | 51 | ||
52 | static inline int pfn_valid(int pfn) | 52 | static inline int pfn_valid(int pfn) |
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h index 3234f492d575..465154076d23 100644 --- a/arch/parisc/include/asm/pci.h +++ b/arch/parisc/include/asm/pci.h | |||
@@ -225,4 +225,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) | |||
225 | return channel ? 15 : 14; | 225 | return channel ? 15 : 14; |
226 | } | 226 | } |
227 | 227 | ||
228 | #define HAVE_PCI_MMAP | ||
229 | |||
230 | extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
231 | enum pci_mmap_state mmap_state, int write_combine); | ||
232 | |||
228 | #endif /* __ASM_PARISC_PCI_H */ | 233 | #endif /* __ASM_PARISC_PCI_H */ |
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c index 9e2d2e408529..872275659d98 100644 --- a/arch/parisc/kernel/hardware.c +++ b/arch/parisc/kernel/hardware.c | |||
@@ -1205,6 +1205,7 @@ static struct hp_hardware hp_hardware_list[] = { | |||
1205 | {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, | 1205 | {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, |
1206 | {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, | 1206 | {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, |
1207 | {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, | 1207 | {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, |
1208 | {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"}, | ||
1208 | {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, | 1209 | {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, |
1209 | {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, | 1210 | {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, |
1210 | {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, | 1211 | {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, |
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 36d7f402e48e..b743a80eaba0 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S | |||
@@ -860,7 +860,7 @@ ENTRY(flush_dcache_page_asm) | |||
860 | #endif | 860 | #endif |
861 | 861 | ||
862 | ldil L%dcache_stride, %r1 | 862 | ldil L%dcache_stride, %r1 |
863 | ldw R%dcache_stride(%r1), %r1 | 863 | ldw R%dcache_stride(%r1), r31 |
864 | 864 | ||
865 | #ifdef CONFIG_64BIT | 865 | #ifdef CONFIG_64BIT |
866 | depdi,z 1, 63-PAGE_SHIFT,1, %r25 | 866 | depdi,z 1, 63-PAGE_SHIFT,1, %r25 |
@@ -868,26 +868,26 @@ ENTRY(flush_dcache_page_asm) | |||
868 | depwi,z 1, 31-PAGE_SHIFT,1, %r25 | 868 | depwi,z 1, 31-PAGE_SHIFT,1, %r25 |
869 | #endif | 869 | #endif |
870 | add %r28, %r25, %r25 | 870 | add %r28, %r25, %r25 |
871 | sub %r25, %r1, %r25 | 871 | sub %r25, r31, %r25 |
872 | 872 | ||
873 | 873 | ||
874 | 1: fdc,m %r1(%r28) | 874 | 1: fdc,m r31(%r28) |
875 | fdc,m %r1(%r28) | 875 | fdc,m r31(%r28) |
876 | fdc,m %r1(%r28) | 876 | fdc,m r31(%r28) |
877 | fdc,m %r1(%r28) | 877 | fdc,m r31(%r28) |
878 | fdc,m %r1(%r28) | 878 | fdc,m r31(%r28) |
879 | fdc,m %r1(%r28) | 879 | fdc,m r31(%r28) |
880 | fdc,m %r1(%r28) | 880 | fdc,m r31(%r28) |
881 | fdc,m %r1(%r28) | 881 | fdc,m r31(%r28) |
882 | fdc,m %r1(%r28) | 882 | fdc,m r31(%r28) |
883 | fdc,m %r1(%r28) | 883 | fdc,m r31(%r28) |
884 | fdc,m %r1(%r28) | 884 | fdc,m r31(%r28) |
885 | fdc,m %r1(%r28) | 885 | fdc,m r31(%r28) |
886 | fdc,m %r1(%r28) | 886 | fdc,m r31(%r28) |
887 | fdc,m %r1(%r28) | 887 | fdc,m r31(%r28) |
888 | fdc,m %r1(%r28) | 888 | fdc,m r31(%r28) |
889 | cmpb,COND(<<) %r28, %r25,1b | 889 | cmpb,COND(<<) %r28, %r25,1b |
890 | fdc,m %r1(%r28) | 890 | fdc,m r31(%r28) |
891 | 891 | ||
892 | sync | 892 | sync |
893 | 893 | ||
@@ -936,7 +936,7 @@ ENTRY(flush_icache_page_asm) | |||
936 | #endif | 936 | #endif |
937 | 937 | ||
938 | ldil L%icache_stride, %r1 | 938 | ldil L%icache_stride, %r1 |
939 | ldw R%icache_stride(%r1), %r1 | 939 | ldw R%icache_stride(%r1), %r31 |
940 | 940 | ||
941 | #ifdef CONFIG_64BIT | 941 | #ifdef CONFIG_64BIT |
942 | depdi,z 1, 63-PAGE_SHIFT,1, %r25 | 942 | depdi,z 1, 63-PAGE_SHIFT,1, %r25 |
@@ -944,28 +944,28 @@ ENTRY(flush_icache_page_asm) | |||
944 | depwi,z 1, 31-PAGE_SHIFT,1, %r25 | 944 | depwi,z 1, 31-PAGE_SHIFT,1, %r25 |
945 | #endif | 945 | #endif |
946 | add %r28, %r25, %r25 | 946 | add %r28, %r25, %r25 |
947 | sub %r25, %r1, %r25 | 947 | sub %r25, %r31, %r25 |
948 | 948 | ||
949 | 949 | ||
950 | /* fic only has the type 26 form on PA1.1, requiring an | 950 | /* fic only has the type 26 form on PA1.1, requiring an |
951 | * explicit space specification, so use %sr4 */ | 951 | * explicit space specification, so use %sr4 */ |
952 | 1: fic,m %r1(%sr4,%r28) | 952 | 1: fic,m %r31(%sr4,%r28) |
953 | fic,m %r1(%sr4,%r28) | 953 | fic,m %r31(%sr4,%r28) |
954 | fic,m %r1(%sr4,%r28) | 954 | fic,m %r31(%sr4,%r28) |
955 | fic,m %r1(%sr4,%r28) | 955 | fic,m %r31(%sr4,%r28) |
956 | fic,m %r1(%sr4,%r28) | 956 | fic,m %r31(%sr4,%r28) |
957 | fic,m %r1(%sr4,%r28) | 957 | fic,m %r31(%sr4,%r28) |
958 | fic,m %r1(%sr4,%r28) | 958 | fic,m %r31(%sr4,%r28) |
959 | fic,m %r1(%sr4,%r28) | 959 | fic,m %r31(%sr4,%r28) |
960 | fic,m %r1(%sr4,%r28) | 960 | fic,m %r31(%sr4,%r28) |
961 | fic,m %r1(%sr4,%r28) | 961 | fic,m %r31(%sr4,%r28) |
962 | fic,m %r1(%sr4,%r28) | 962 | fic,m %r31(%sr4,%r28) |
963 | fic,m %r1(%sr4,%r28) | 963 | fic,m %r31(%sr4,%r28) |
964 | fic,m %r1(%sr4,%r28) | 964 | fic,m %r31(%sr4,%r28) |
965 | fic,m %r1(%sr4,%r28) | 965 | fic,m %r31(%sr4,%r28) |
966 | fic,m %r1(%sr4,%r28) | 966 | fic,m %r31(%sr4,%r28) |
967 | cmpb,COND(<<) %r28, %r25,1b | 967 | cmpb,COND(<<) %r28, %r25,1b |
968 | fic,m %r1(%sr4,%r28) | 968 | fic,m %r31(%sr4,%r28) |
969 | 969 | ||
970 | sync | 970 | sync |
971 | 971 | ||
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c index 60309051875e..64f2764a8cef 100644 --- a/arch/parisc/kernel/pci.c +++ b/arch/parisc/kernel/pci.c | |||
@@ -220,6 +220,33 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res, | |||
220 | } | 220 | } |
221 | 221 | ||
222 | 222 | ||
223 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | ||
224 | enum pci_mmap_state mmap_state, int write_combine) | ||
225 | { | ||
226 | unsigned long prot; | ||
227 | |||
228 | /* | ||
229 | * I/O space can be accessed via normal processor loads and stores on | ||
230 | * this platform but for now we elect not to do this and portable | ||
231 | * drivers should not do this anyway. | ||
232 | */ | ||
233 | if (mmap_state == pci_mmap_io) | ||
234 | return -EINVAL; | ||
235 | |||
236 | if (write_combine) | ||
237 | return -EINVAL; | ||
238 | |||
239 | /* | ||
240 | * Ignore write-combine; for now only return uncached mappings. | ||
241 | */ | ||
242 | prot = pgprot_val(vma->vm_page_prot); | ||
243 | prot |= _PAGE_NO_CACHE; | ||
244 | vma->vm_page_prot = __pgprot(prot); | ||
245 | |||
246 | return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | ||
247 | vma->vm_end - vma->vm_start, vma->vm_page_prot); | ||
248 | } | ||
249 | |||
223 | /* | 250 | /* |
224 | * A driver is enabling the device. We make sure that all the appropriate | 251 | * A driver is enabling the device. We make sure that all the appropriate |
225 | * bits are set to allow the device to operate as the driver is expecting. | 252 | * bits are set to allow the device to operate as the driver is expecting. |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 1c965642068b..505b56c6b9b9 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -47,7 +47,7 @@ pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pt | |||
47 | 47 | ||
48 | #ifdef CONFIG_DISCONTIGMEM | 48 | #ifdef CONFIG_DISCONTIGMEM |
49 | struct node_map_data node_data[MAX_NUMNODES] __read_mostly; | 49 | struct node_map_data node_data[MAX_NUMNODES] __read_mostly; |
50 | unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; | 50 | signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly; |
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | static struct resource data_resource = { | 53 | static struct resource data_resource = { |
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 26807e5aff51..6f3887d884d2 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h | |||
@@ -176,6 +176,7 @@ extern const char *powerpc_base_platform; | |||
176 | #define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000) | 176 | #define CPU_FTR_CFAR LONG_ASM_CONST(0x0100000000000000) |
177 | #define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000) | 177 | #define CPU_FTR_HAS_PPR LONG_ASM_CONST(0x0200000000000000) |
178 | #define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) | 178 | #define CPU_FTR_DAWR LONG_ASM_CONST(0x0400000000000000) |
179 | #define CPU_FTR_DABRX LONG_ASM_CONST(0x0800000000000000) | ||
179 | 180 | ||
180 | #ifndef __ASSEMBLY__ | 181 | #ifndef __ASSEMBLY__ |
181 | 182 | ||
@@ -394,19 +395,20 @@ extern const char *powerpc_base_platform; | |||
394 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \ | 395 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_201 | \ |
395 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ | 396 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ |
396 | CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \ | 397 | CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS | \ |
397 | CPU_FTR_HVMODE) | 398 | CPU_FTR_HVMODE | CPU_FTR_DABRX) |
398 | #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 399 | #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
399 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 400 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
400 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 401 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
401 | CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \ | 402 | CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \ |
402 | CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB) | 403 | CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_DABRX) |
403 | #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 404 | #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
404 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 405 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
405 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 406 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
406 | CPU_FTR_COHERENT_ICACHE | \ | 407 | CPU_FTR_COHERENT_ICACHE | \ |
407 | CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ | 408 | CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ |
408 | CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ | 409 | CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ |
409 | CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) | 410 | CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR | \ |
411 | CPU_FTR_DABRX) | ||
410 | #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 412 | #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
411 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ | 413 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ |
412 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 414 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
@@ -415,7 +417,7 @@ extern const char *powerpc_base_platform; | |||
415 | CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ | 417 | CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ |
416 | CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ | 418 | CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ |
417 | CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \ | 419 | CPU_FTR_ICSWX | CPU_FTR_CFAR | CPU_FTR_HVMODE | \ |
418 | CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR) | 420 | CPU_FTR_VMX_COPY | CPU_FTR_HAS_PPR | CPU_FTR_DABRX) |
419 | #define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 421 | #define CPU_FTRS_POWER8 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
420 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ | 422 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_ARCH_206 |\ |
421 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 423 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
@@ -430,14 +432,15 @@ extern const char *powerpc_base_platform; | |||
430 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 432 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
431 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 433 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
432 | CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ | 434 | CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ |
433 | CPU_FTR_UNALIGNED_LD_STD) | 435 | CPU_FTR_UNALIGNED_LD_STD | CPU_FTR_DABRX) |
434 | #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 436 | #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
435 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \ | 437 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \ |
436 | CPU_FTR_PURR | CPU_FTR_REAL_LE) | 438 | CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX) |
437 | #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) | 439 | #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) |
438 | 440 | ||
439 | #define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \ | 441 | #define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \ |
440 | CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | CPU_FTR_ICSWX) | 442 | CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \ |
443 | CPU_FTR_ICSWX | CPU_FTR_DABRX ) | ||
441 | 444 | ||
442 | #ifdef __powerpc64__ | 445 | #ifdef __powerpc64__ |
443 | #ifdef CONFIG_PPC_BOOK3E | 446 | #ifdef CONFIG_PPC_BOOK3E |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 8e5fae8beaf6..46793b58a761 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -513,7 +513,7 @@ label##_common: \ | |||
513 | */ | 513 | */ |
514 | #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ | 514 | #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \ |
515 | EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \ | 515 | EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \ |
516 | FINISH_NAP;RUNLATCH_ON;DISABLE_INTS) | 516 | FINISH_NAP;DISABLE_INTS;RUNLATCH_ON) |
517 | 517 | ||
518 | /* | 518 | /* |
519 | * When the idle code in power4_idle puts the CPU into NAP mode, | 519 | * When the idle code in power4_idle puts the CPU into NAP mode, |
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index b9dd382cb349..851bac7afa4b 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h | |||
@@ -54,8 +54,16 @@ | |||
54 | #define BOOKE_INTERRUPT_DEBUG 15 | 54 | #define BOOKE_INTERRUPT_DEBUG 15 |
55 | 55 | ||
56 | /* E500 */ | 56 | /* E500 */ |
57 | #define BOOKE_INTERRUPT_SPE_UNAVAIL 32 | 57 | #define BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL 32 |
58 | #define BOOKE_INTERRUPT_SPE_FP_DATA 33 | 58 | #define BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST 33 |
59 | /* | ||
60 | * TODO: Unify 32-bit and 64-bit kernel exception handlers to use same defines | ||
61 | */ | ||
62 | #define BOOKE_INTERRUPT_SPE_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL | ||
63 | #define BOOKE_INTERRUPT_SPE_FP_DATA BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST | ||
64 | #define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL BOOKE_INTERRUPT_SPE_ALTIVEC_UNAVAIL | ||
65 | #define BOOKE_INTERRUPT_ALTIVEC_ASSIST \ | ||
66 | BOOKE_INTERRUPT_SPE_FP_DATA_ALTIVEC_ASSIST | ||
59 | #define BOOKE_INTERRUPT_SPE_FP_ROUND 34 | 67 | #define BOOKE_INTERRUPT_SPE_FP_ROUND 34 |
60 | #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 | 68 | #define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 |
61 | #define BOOKE_INTERRUPT_DOORBELL 36 | 69 | #define BOOKE_INTERRUPT_DOORBELL 36 |
@@ -67,10 +75,6 @@ | |||
67 | #define BOOKE_INTERRUPT_HV_SYSCALL 40 | 75 | #define BOOKE_INTERRUPT_HV_SYSCALL 40 |
68 | #define BOOKE_INTERRUPT_HV_PRIV 41 | 76 | #define BOOKE_INTERRUPT_HV_PRIV 41 |
69 | 77 | ||
70 | /* altivec */ | ||
71 | #define BOOKE_INTERRUPT_ALTIVEC_UNAVAIL 42 | ||
72 | #define BOOKE_INTERRUPT_ALTIVEC_ASSIST 43 | ||
73 | |||
74 | /* book3s */ | 78 | /* book3s */ |
75 | 79 | ||
76 | #define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100 | 80 | #define BOOK3S_INTERRUPT_SYSTEM_RESET 0x100 |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 1f0937d7d4b5..2a45d0f04385 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -452,8 +452,8 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
452 | .mmu_features = MMU_FTRS_POWER8, | 452 | .mmu_features = MMU_FTRS_POWER8, |
453 | .icache_bsize = 128, | 453 | .icache_bsize = 128, |
454 | .dcache_bsize = 128, | 454 | .dcache_bsize = 128, |
455 | .oprofile_type = PPC_OPROFILE_POWER4, | 455 | .oprofile_type = PPC_OPROFILE_INVALID, |
456 | .oprofile_cpu_type = 0, | 456 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", |
457 | .cpu_setup = __setup_cpu_power8, | 457 | .cpu_setup = __setup_cpu_power8, |
458 | .cpu_restore = __restore_cpu_power8, | 458 | .cpu_restore = __restore_cpu_power8, |
459 | .platform = "power8", | 459 | .platform = "power8", |
@@ -506,8 +506,8 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
506 | .dcache_bsize = 128, | 506 | .dcache_bsize = 128, |
507 | .num_pmcs = 6, | 507 | .num_pmcs = 6, |
508 | .pmc_type = PPC_PMC_IBM, | 508 | .pmc_type = PPC_PMC_IBM, |
509 | .oprofile_cpu_type = 0, | 509 | .oprofile_cpu_type = "ppc64/power8", |
510 | .oprofile_type = PPC_OPROFILE_POWER4, | 510 | .oprofile_type = PPC_OPROFILE_INVALID, |
511 | .cpu_setup = __setup_cpu_power8, | 511 | .cpu_setup = __setup_cpu_power8, |
512 | .cpu_restore = __restore_cpu_power8, | 512 | .cpu_restore = __restore_cpu_power8, |
513 | .platform = "power8", | 513 | .platform = "power8", |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 246b11c4fe7e..8741c854e03d 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -465,20 +465,6 @@ BEGIN_FTR_SECTION | |||
465 | std r0, THREAD_EBBHR(r3) | 465 | std r0, THREAD_EBBHR(r3) |
466 | mfspr r0, SPRN_EBBRR | 466 | mfspr r0, SPRN_EBBRR |
467 | std r0, THREAD_EBBRR(r3) | 467 | std r0, THREAD_EBBRR(r3) |
468 | |||
469 | /* PMU registers made user read/(write) by EBB */ | ||
470 | mfspr r0, SPRN_SIAR | ||
471 | std r0, THREAD_SIAR(r3) | ||
472 | mfspr r0, SPRN_SDAR | ||
473 | std r0, THREAD_SDAR(r3) | ||
474 | mfspr r0, SPRN_SIER | ||
475 | std r0, THREAD_SIER(r3) | ||
476 | mfspr r0, SPRN_MMCR0 | ||
477 | std r0, THREAD_MMCR0(r3) | ||
478 | mfspr r0, SPRN_MMCR2 | ||
479 | std r0, THREAD_MMCR2(r3) | ||
480 | mfspr r0, SPRN_MMCRA | ||
481 | std r0, THREAD_MMCRA(r3) | ||
482 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | 468 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
483 | #endif | 469 | #endif |
484 | 470 | ||
@@ -581,20 +567,6 @@ BEGIN_FTR_SECTION | |||
581 | ld r0, THREAD_EBBRR(r4) | 567 | ld r0, THREAD_EBBRR(r4) |
582 | mtspr SPRN_EBBRR, r0 | 568 | mtspr SPRN_EBBRR, r0 |
583 | 569 | ||
584 | /* PMU registers made user read/(write) by EBB */ | ||
585 | ld r0, THREAD_SIAR(r4) | ||
586 | mtspr SPRN_SIAR, r0 | ||
587 | ld r0, THREAD_SDAR(r4) | ||
588 | mtspr SPRN_SDAR, r0 | ||
589 | ld r0, THREAD_SIER(r4) | ||
590 | mtspr SPRN_SIER, r0 | ||
591 | ld r0, THREAD_MMCR0(r4) | ||
592 | mtspr SPRN_MMCR0, r0 | ||
593 | ld r0, THREAD_MMCR2(r4) | ||
594 | mtspr SPRN_MMCR2, r0 | ||
595 | ld r0, THREAD_MMCRA(r4) | ||
596 | mtspr SPRN_MMCRA, r0 | ||
597 | |||
598 | ld r0,THREAD_TAR(r4) | 570 | ld r0,THREAD_TAR(r4) |
599 | mtspr SPRN_TAR,r0 | 571 | mtspr SPRN_TAR,r0 |
600 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | 572 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index e6eba1bf61ad..40e4a17c8ba0 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -454,38 +454,14 @@ BEGIN_FTR_SECTION | |||
454 | xori r10,r10,(MSR_FE0|MSR_FE1) | 454 | xori r10,r10,(MSR_FE0|MSR_FE1) |
455 | mtmsrd r10 | 455 | mtmsrd r10 |
456 | sync | 456 | sync |
457 | fmr 0,0 | 457 | |
458 | fmr 1,1 | 458 | #define FMR2(n) fmr (n), (n) ; fmr n+1, n+1 |
459 | fmr 2,2 | 459 | #define FMR4(n) FMR2(n) ; FMR2(n+2) |
460 | fmr 3,3 | 460 | #define FMR8(n) FMR4(n) ; FMR4(n+4) |
461 | fmr 4,4 | 461 | #define FMR16(n) FMR8(n) ; FMR8(n+8) |
462 | fmr 5,5 | 462 | #define FMR32(n) FMR16(n) ; FMR16(n+16) |
463 | fmr 6,6 | 463 | FMR32(0) |
464 | fmr 7,7 | 464 | |
465 | fmr 8,8 | ||
466 | fmr 9,9 | ||
467 | fmr 10,10 | ||
468 | fmr 11,11 | ||
469 | fmr 12,12 | ||
470 | fmr 13,13 | ||
471 | fmr 14,14 | ||
472 | fmr 15,15 | ||
473 | fmr 16,16 | ||
474 | fmr 17,17 | ||
475 | fmr 18,18 | ||
476 | fmr 19,19 | ||
477 | fmr 20,20 | ||
478 | fmr 21,21 | ||
479 | fmr 22,22 | ||
480 | fmr 23,23 | ||
481 | fmr 24,24 | ||
482 | fmr 25,25 | ||
483 | fmr 26,26 | ||
484 | fmr 27,27 | ||
485 | fmr 28,28 | ||
486 | fmr 29,29 | ||
487 | fmr 30,30 | ||
488 | fmr 31,31 | ||
489 | FTR_SECTION_ELSE | 465 | FTR_SECTION_ELSE |
490 | /* | 466 | /* |
491 | * To denormalise we need to move a copy of the register to itself. | 467 | * To denormalise we need to move a copy of the register to itself. |
@@ -495,39 +471,25 @@ FTR_SECTION_ELSE | |||
495 | oris r10,r10,MSR_VSX@h | 471 | oris r10,r10,MSR_VSX@h |
496 | mtmsrd r10 | 472 | mtmsrd r10 |
497 | sync | 473 | sync |
498 | XVCPSGNDP(0,0,0) | 474 | |
499 | XVCPSGNDP(1,1,1) | 475 | #define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1) |
500 | XVCPSGNDP(2,2,2) | 476 | #define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2) |
501 | XVCPSGNDP(3,3,3) | 477 | #define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4) |
502 | XVCPSGNDP(4,4,4) | 478 | #define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8) |
503 | XVCPSGNDP(5,5,5) | 479 | #define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16) |
504 | XVCPSGNDP(6,6,6) | 480 | XVCPSGNDP32(0) |
505 | XVCPSGNDP(7,7,7) | 481 | |
506 | XVCPSGNDP(8,8,8) | ||
507 | XVCPSGNDP(9,9,9) | ||
508 | XVCPSGNDP(10,10,10) | ||
509 | XVCPSGNDP(11,11,11) | ||
510 | XVCPSGNDP(12,12,12) | ||
511 | XVCPSGNDP(13,13,13) | ||
512 | XVCPSGNDP(14,14,14) | ||
513 | XVCPSGNDP(15,15,15) | ||
514 | XVCPSGNDP(16,16,16) | ||
515 | XVCPSGNDP(17,17,17) | ||
516 | XVCPSGNDP(18,18,18) | ||
517 | XVCPSGNDP(19,19,19) | ||
518 | XVCPSGNDP(20,20,20) | ||
519 | XVCPSGNDP(21,21,21) | ||
520 | XVCPSGNDP(22,22,22) | ||
521 | XVCPSGNDP(23,23,23) | ||
522 | XVCPSGNDP(24,24,24) | ||
523 | XVCPSGNDP(25,25,25) | ||
524 | XVCPSGNDP(26,26,26) | ||
525 | XVCPSGNDP(27,27,27) | ||
526 | XVCPSGNDP(28,28,28) | ||
527 | XVCPSGNDP(29,29,29) | ||
528 | XVCPSGNDP(30,30,30) | ||
529 | XVCPSGNDP(31,31,31) | ||
530 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) | 482 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206) |
483 | |||
484 | BEGIN_FTR_SECTION | ||
485 | b denorm_done | ||
486 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S) | ||
487 | /* | ||
488 | * To denormalise we need to move a copy of the register to itself. | ||
489 | * For POWER8 we need to do that for all 64 VSX registers | ||
490 | */ | ||
491 | XVCPSGNDP32(32) | ||
492 | denorm_done: | ||
531 | mtspr SPRN_HSRR0,r11 | 493 | mtspr SPRN_HSRR0,r11 |
532 | mtcrf 0x80,r9 | 494 | mtcrf 0x80,r9 |
533 | ld r9,PACA_EXGEN+EX_R9(r13) | 495 | ld r9,PACA_EXGEN+EX_R9(r13) |
@@ -721,7 +683,7 @@ machine_check_common: | |||
721 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | 683 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) |
722 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | 684 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) |
723 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | 685 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) |
724 | STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) | 686 | STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt) |
725 | STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) | 687 | STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) |
726 | #ifdef CONFIG_PPC_DOORBELL | 688 | #ifdef CONFIG_PPC_DOORBELL |
727 | STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) | 689 | STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) |
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5cbcf4d5a808..ea185e0b3cae 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -162,7 +162,7 @@ notrace unsigned int __check_irq_replay(void) | |||
162 | * in case we also had a rollover while hard disabled | 162 | * in case we also had a rollover while hard disabled |
163 | */ | 163 | */ |
164 | local_paca->irq_happened &= ~PACA_IRQ_DEC; | 164 | local_paca->irq_happened &= ~PACA_IRQ_DEC; |
165 | if (decrementer_check_overflow()) | 165 | if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow()) |
166 | return 0x900; | 166 | return 0x900; |
167 | 167 | ||
168 | /* Finally check if an external interrupt happened */ | 168 | /* Finally check if an external interrupt happened */ |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 7f2273cc3c7d..eabeec991016 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -827,6 +827,7 @@ static void pcibios_fixup_resources(struct pci_dev *dev) | |||
827 | } | 827 | } |
828 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | 828 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { |
829 | struct resource *res = dev->resource + i; | 829 | struct resource *res = dev->resource + i; |
830 | struct pci_bus_region reg; | ||
830 | if (!res->flags) | 831 | if (!res->flags) |
831 | continue; | 832 | continue; |
832 | 833 | ||
@@ -835,8 +836,9 @@ static void pcibios_fixup_resources(struct pci_dev *dev) | |||
835 | * at 0 as unset as well, except if PCI_PROBE_ONLY is also set | 836 | * at 0 as unset as well, except if PCI_PROBE_ONLY is also set |
836 | * since in that case, we don't want to re-assign anything | 837 | * since in that case, we don't want to re-assign anything |
837 | */ | 838 | */ |
839 | pcibios_resource_to_bus(dev, ®, res); | ||
838 | if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) || | 840 | if (pci_has_flag(PCI_REASSIGN_ALL_RSRC) || |
839 | (res->start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { | 841 | (reg.start == 0 && !pci_has_flag(PCI_PROBE_ONLY))) { |
840 | /* Only print message if not re-assigning */ | 842 | /* Only print message if not re-assigning */ |
841 | if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) | 843 | if (!pci_has_flag(PCI_REASSIGN_ALL_RSRC)) |
842 | pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] " | 844 | pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] " |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index a902723fdc69..076d1242507a 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -399,7 +399,8 @@ static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) | |||
399 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) | 399 | static inline int __set_dabr(unsigned long dabr, unsigned long dabrx) |
400 | { | 400 | { |
401 | mtspr(SPRN_DABR, dabr); | 401 | mtspr(SPRN_DABR, dabr); |
402 | mtspr(SPRN_DABRX, dabrx); | 402 | if (cpu_has_feature(CPU_FTR_DABRX)) |
403 | mtspr(SPRN_DABRX, dabrx); | ||
403 | return 0; | 404 | return 0; |
404 | } | 405 | } |
405 | #else | 406 | #else |
@@ -1368,7 +1369,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) | |||
1368 | 1369 | ||
1369 | #ifdef CONFIG_PPC64 | 1370 | #ifdef CONFIG_PPC64 |
1370 | /* Called with hard IRQs off */ | 1371 | /* Called with hard IRQs off */ |
1371 | void __ppc64_runlatch_on(void) | 1372 | void notrace __ppc64_runlatch_on(void) |
1372 | { | 1373 | { |
1373 | struct thread_info *ti = current_thread_info(); | 1374 | struct thread_info *ti = current_thread_info(); |
1374 | unsigned long ctrl; | 1375 | unsigned long ctrl; |
@@ -1381,7 +1382,7 @@ void __ppc64_runlatch_on(void) | |||
1381 | } | 1382 | } |
1382 | 1383 | ||
1383 | /* Called with hard IRQs off */ | 1384 | /* Called with hard IRQs off */ |
1384 | void __ppc64_runlatch_off(void) | 1385 | void notrace __ppc64_runlatch_off(void) |
1385 | { | 1386 | { |
1386 | struct thread_info *ti = current_thread_info(); | 1387 | struct thread_info *ti = current_thread_info(); |
1387 | unsigned long ctrl; | 1388 | unsigned long ctrl; |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index f18c79c324ef..c0e5caf8ccc7 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -1165,6 +1165,16 @@ bail: | |||
1165 | exception_exit(prev_state); | 1165 | exception_exit(prev_state); |
1166 | } | 1166 | } |
1167 | 1167 | ||
1168 | /* | ||
1169 | * This occurs when running in hypervisor mode on POWER6 or later | ||
1170 | * and an illegal instruction is encountered. | ||
1171 | */ | ||
1172 | void __kprobes emulation_assist_interrupt(struct pt_regs *regs) | ||
1173 | { | ||
1174 | regs->msr |= REASON_ILLEGAL; | ||
1175 | program_check_exception(regs); | ||
1176 | } | ||
1177 | |||
1168 | void alignment_exception(struct pt_regs *regs) | 1178 | void alignment_exception(struct pt_regs *regs) |
1169 | { | 1179 | { |
1170 | enum ctx_state prev_state = exception_enter(); | 1180 | enum ctx_state prev_state = exception_enter(); |
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 5dd3ab469976..ed0385448148 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -441,6 +441,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
441 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | 441 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
442 | struct kvmppc_44x_tlbe *tlbe; | 442 | struct kvmppc_44x_tlbe *tlbe; |
443 | unsigned int gtlb_index; | 443 | unsigned int gtlb_index; |
444 | int idx; | ||
444 | 445 | ||
445 | gtlb_index = kvmppc_get_gpr(vcpu, ra); | 446 | gtlb_index = kvmppc_get_gpr(vcpu, ra); |
446 | if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { | 447 | if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) { |
@@ -473,6 +474,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
473 | return EMULATE_FAIL; | 474 | return EMULATE_FAIL; |
474 | } | 475 | } |
475 | 476 | ||
477 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
478 | |||
476 | if (tlbe_is_host_safe(vcpu, tlbe)) { | 479 | if (tlbe_is_host_safe(vcpu, tlbe)) { |
477 | gva_t eaddr; | 480 | gva_t eaddr; |
478 | gpa_t gpaddr; | 481 | gpa_t gpaddr; |
@@ -489,6 +492,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws) | |||
489 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); | 492 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); |
490 | } | 493 | } |
491 | 494 | ||
495 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
496 | |||
492 | trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, | 497 | trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1, |
493 | tlbe->word2); | 498 | tlbe->word2); |
494 | 499 | ||
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 1020119226db..1a1b51189773 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -673,7 +673,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
673 | ret = s; | 673 | ret = s; |
674 | goto out; | 674 | goto out; |
675 | } | 675 | } |
676 | kvmppc_lazy_ee_enable(); | ||
677 | 676 | ||
678 | kvm_guest_enter(); | 677 | kvm_guest_enter(); |
679 | 678 | ||
@@ -699,6 +698,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
699 | kvmppc_load_guest_fp(vcpu); | 698 | kvmppc_load_guest_fp(vcpu); |
700 | #endif | 699 | #endif |
701 | 700 | ||
701 | kvmppc_lazy_ee_enable(); | ||
702 | |||
702 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | 703 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); |
703 | 704 | ||
704 | /* No need for kvm_guest_exit. It's done in handle_exit. | 705 | /* No need for kvm_guest_exit. It's done in handle_exit. |
@@ -832,6 +833,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
832 | { | 833 | { |
833 | int r = RESUME_HOST; | 834 | int r = RESUME_HOST; |
834 | int s; | 835 | int s; |
836 | int idx; | ||
837 | |||
838 | #ifdef CONFIG_PPC64 | ||
839 | WARN_ON(local_paca->irq_happened != 0); | ||
840 | #endif | ||
841 | |||
842 | /* | ||
843 | * We enter with interrupts disabled in hardware, but | ||
844 | * we need to call hard_irq_disable anyway to ensure that | ||
845 | * the software state is kept in sync. | ||
846 | */ | ||
847 | hard_irq_disable(); | ||
835 | 848 | ||
836 | /* update before a new last_exit_type is rewritten */ | 849 | /* update before a new last_exit_type is rewritten */ |
837 | kvmppc_update_timing_stats(vcpu); | 850 | kvmppc_update_timing_stats(vcpu); |
@@ -1053,6 +1066,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
1053 | break; | 1066 | break; |
1054 | } | 1067 | } |
1055 | 1068 | ||
1069 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
1070 | |||
1056 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); | 1071 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); |
1057 | gfn = gpaddr >> PAGE_SHIFT; | 1072 | gfn = gpaddr >> PAGE_SHIFT; |
1058 | 1073 | ||
@@ -1075,6 +1090,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
1075 | kvmppc_account_exit(vcpu, MMIO_EXITS); | 1090 | kvmppc_account_exit(vcpu, MMIO_EXITS); |
1076 | } | 1091 | } |
1077 | 1092 | ||
1093 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
1078 | break; | 1094 | break; |
1079 | } | 1095 | } |
1080 | 1096 | ||
@@ -1098,6 +1114,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
1098 | 1114 | ||
1099 | kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); | 1115 | kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); |
1100 | 1116 | ||
1117 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
1118 | |||
1101 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); | 1119 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); |
1102 | gfn = gpaddr >> PAGE_SHIFT; | 1120 | gfn = gpaddr >> PAGE_SHIFT; |
1103 | 1121 | ||
@@ -1114,6 +1132,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
1114 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); | 1132 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); |
1115 | } | 1133 | } |
1116 | 1134 | ||
1135 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
1117 | break; | 1136 | break; |
1118 | } | 1137 | } |
1119 | 1138 | ||
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c index c41a5a96b558..6d6f153b6c1d 100644 --- a/arch/powerpc/kvm/e500_mmu.c +++ b/arch/powerpc/kvm/e500_mmu.c | |||
@@ -396,6 +396,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
396 | struct kvm_book3e_206_tlb_entry *gtlbe; | 396 | struct kvm_book3e_206_tlb_entry *gtlbe; |
397 | int tlbsel, esel; | 397 | int tlbsel, esel; |
398 | int recal = 0; | 398 | int recal = 0; |
399 | int idx; | ||
399 | 400 | ||
400 | tlbsel = get_tlb_tlbsel(vcpu); | 401 | tlbsel = get_tlb_tlbsel(vcpu); |
401 | esel = get_tlb_esel(vcpu, tlbsel); | 402 | esel = get_tlb_esel(vcpu, tlbsel); |
@@ -430,6 +431,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
430 | kvmppc_set_tlb1map_range(vcpu, gtlbe); | 431 | kvmppc_set_tlb1map_range(vcpu, gtlbe); |
431 | } | 432 | } |
432 | 433 | ||
434 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
435 | |||
433 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ | 436 | /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ |
434 | if (tlbe_is_host_safe(vcpu, gtlbe)) { | 437 | if (tlbe_is_host_safe(vcpu, gtlbe)) { |
435 | u64 eaddr = get_tlb_eaddr(gtlbe); | 438 | u64 eaddr = get_tlb_eaddr(gtlbe); |
@@ -444,6 +447,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
444 | kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); | 447 | kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel)); |
445 | } | 448 | } |
446 | 449 | ||
450 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
451 | |||
447 | kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); | 452 | kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); |
448 | return EMULATE_DONE; | 453 | return EMULATE_DONE; |
449 | } | 454 | } |
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c index 753cc99eff2b..19c8379575f7 100644 --- a/arch/powerpc/kvm/e500mc.c +++ b/arch/powerpc/kvm/e500mc.c | |||
@@ -177,8 +177,6 @@ int kvmppc_core_check_processor_compat(void) | |||
177 | r = 0; | 177 | r = 0; |
178 | else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) | 178 | else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0) |
179 | r = 0; | 179 | r = 0; |
180 | else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0) | ||
181 | r = 0; | ||
182 | else | 180 | else |
183 | r = -ENOTSUPP; | 181 | r = -ENOTSUPP; |
184 | 182 | ||
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 237c8e5f2640..77fdd2cef33b 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -592,8 +592,14 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud, | |||
592 | do { | 592 | do { |
593 | pmd = pmd_offset(pud, addr); | 593 | pmd = pmd_offset(pud, addr); |
594 | next = pmd_addr_end(addr, end); | 594 | next = pmd_addr_end(addr, end); |
595 | if (pmd_none_or_clear_bad(pmd)) | 595 | if (!is_hugepd(pmd)) { |
596 | /* | ||
597 | * if it is not hugepd pointer, we should already find | ||
598 | * it cleared. | ||
599 | */ | ||
600 | WARN_ON(!pmd_none_or_clear_bad(pmd)); | ||
596 | continue; | 601 | continue; |
602 | } | ||
597 | #ifdef CONFIG_PPC_FSL_BOOK3E | 603 | #ifdef CONFIG_PPC_FSL_BOOK3E |
598 | /* | 604 | /* |
599 | * Increment next by the size of the huge mapping since | 605 | * Increment next by the size of the huge mapping since |
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 845c867444e6..29c6482890c8 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -1758,7 +1758,7 @@ static void perf_event_interrupt(struct pt_regs *regs) | |||
1758 | } | 1758 | } |
1759 | } | 1759 | } |
1760 | } | 1760 | } |
1761 | if ((!found) && printk_ratelimit()) | 1761 | if (!found && !nmi && printk_ratelimit()) |
1762 | printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); | 1762 | printk(KERN_WARNING "Can't find PMC that caused IRQ\n"); |
1763 | 1763 | ||
1764 | /* | 1764 | /* |
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c index 19506f935737..b456b157d33d 100644 --- a/arch/powerpc/platforms/pseries/eeh_pseries.c +++ b/arch/powerpc/platforms/pseries/eeh_pseries.c | |||
@@ -83,7 +83,11 @@ static int pseries_eeh_init(void) | |||
83 | ibm_configure_pe = rtas_token("ibm,configure-pe"); | 83 | ibm_configure_pe = rtas_token("ibm,configure-pe"); |
84 | ibm_configure_bridge = rtas_token("ibm,configure-bridge"); | 84 | ibm_configure_bridge = rtas_token("ibm,configure-bridge"); |
85 | 85 | ||
86 | /* necessary sanity check */ | 86 | /* |
87 | * Necessary sanity check. We needn't check "get-config-addr-info" | ||
88 | * and its variant since the old firmware probably support address | ||
89 | * of domain/bus/slot/function for EEH RTAS operations. | ||
90 | */ | ||
87 | if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) { | 91 | if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) { |
88 | pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n", | 92 | pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n", |
89 | __func__); | 93 | __func__); |
@@ -102,12 +106,6 @@ static int pseries_eeh_init(void) | |||
102 | pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n", | 106 | pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n", |
103 | __func__); | 107 | __func__); |
104 | return -EINVAL; | 108 | return -EINVAL; |
105 | } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE && | ||
106 | ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) { | ||
107 | pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and " | ||
108 | "<ibm,get-config-addr-info> invalid\n", | ||
109 | __func__); | ||
110 | return -EINVAL; | ||
111 | } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && | 109 | } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE && |
112 | ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) { | 110 | ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) { |
113 | pr_warning("%s: RTAS service <ibm,configure-pe> and " | 111 | pr_warning("%s: RTAS service <ibm,configure-pe> and " |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index bae0f402bf2a..87a22092b68f 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -212,7 +212,9 @@ appldata_timer_handler(ctl_table *ctl, int write, | |||
212 | return 0; | 212 | return 0; |
213 | } | 213 | } |
214 | if (!write) { | 214 | if (!write) { |
215 | len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n"); | 215 | strncpy(buf, appldata_timer_active ? "1\n" : "0\n", |
216 | ARRAY_SIZE(buf)); | ||
217 | len = strnlen(buf, ARRAY_SIZE(buf)); | ||
216 | if (len > *lenp) | 218 | if (len > *lenp) |
217 | len = *lenp; | 219 | len = *lenp; |
218 | if (copy_to_user(buffer, buf, len)) | 220 | if (copy_to_user(buffer, buf, len)) |
@@ -317,7 +319,8 @@ appldata_generic_handler(ctl_table *ctl, int write, | |||
317 | return 0; | 319 | return 0; |
318 | } | 320 | } |
319 | if (!write) { | 321 | if (!write) { |
320 | len = sprintf(buf, ops->active ? "1\n" : "0\n"); | 322 | strncpy(buf, ops->active ? "1\n" : "0\n", ARRAY_SIZE(buf)); |
323 | len = strnlen(buf, ARRAY_SIZE(buf)); | ||
321 | if (len > *lenp) | 324 | if (len > *lenp) |
322 | len = *lenp; | 325 | len = *lenp; |
323 | if (copy_to_user(buffer, buf, len)) { | 326 | if (copy_to_user(buffer, buf, len)) { |
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h index 9411db653bac..886ac7d4937a 100644 --- a/arch/s390/include/asm/dma-mapping.h +++ b/arch/s390/include/asm/dma-mapping.h | |||
@@ -71,8 +71,8 @@ static inline void dma_free_coherent(struct device *dev, size_t size, | |||
71 | { | 71 | { |
72 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | 72 | struct dma_map_ops *dma_ops = get_dma_ops(dev); |
73 | 73 | ||
74 | dma_ops->free(dev, size, cpu_addr, dma_handle, NULL); | ||
75 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); | 74 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); |
75 | dma_ops->free(dev, size, cpu_addr, dma_handle, NULL); | ||
76 | } | 76 | } |
77 | 77 | ||
78 | #endif /* _ASM_S390_DMA_MAPPING_H */ | 78 | #endif /* _ASM_S390_DMA_MAPPING_H */ |
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h index 379d96e2105e..fd9be010f9b2 100644 --- a/arch/s390/include/asm/io.h +++ b/arch/s390/include/asm/io.h | |||
@@ -36,6 +36,7 @@ static inline void * phys_to_virt(unsigned long address) | |||
36 | } | 36 | } |
37 | 37 | ||
38 | void *xlate_dev_mem_ptr(unsigned long phys); | 38 | void *xlate_dev_mem_ptr(unsigned long phys); |
39 | #define xlate_dev_mem_ptr xlate_dev_mem_ptr | ||
39 | void unxlate_dev_mem_ptr(unsigned long phys, void *addr); | 40 | void unxlate_dev_mem_ptr(unsigned long phys, void *addr); |
40 | 41 | ||
41 | /* | 42 | /* |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 0f0de30e3e3f..e8b6e5b8932c 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -623,7 +623,7 @@ static inline pgste_t pgste_get_lock(pte_t *ptep) | |||
623 | " csg %0,%1,%2\n" | 623 | " csg %0,%1,%2\n" |
624 | " jl 0b\n" | 624 | " jl 0b\n" |
625 | : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) | 625 | : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE]) |
626 | : "Q" (ptep[PTRS_PER_PTE]) : "cc"); | 626 | : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory"); |
627 | #endif | 627 | #endif |
628 | return __pgste(new); | 628 | return __pgste(new); |
629 | } | 629 | } |
@@ -635,18 +635,26 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) | |||
635 | " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */ | 635 | " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */ |
636 | " stg %1,%0\n" | 636 | " stg %1,%0\n" |
637 | : "=Q" (ptep[PTRS_PER_PTE]) | 637 | : "=Q" (ptep[PTRS_PER_PTE]) |
638 | : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc"); | 638 | : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) |
639 | : "cc", "memory"); | ||
639 | preempt_enable(); | 640 | preempt_enable(); |
640 | #endif | 641 | #endif |
641 | } | 642 | } |
642 | 643 | ||
644 | static inline void pgste_set(pte_t *ptep, pgste_t pgste) | ||
645 | { | ||
646 | #ifdef CONFIG_PGSTE | ||
647 | *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste; | ||
648 | #endif | ||
649 | } | ||
650 | |||
643 | static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) | 651 | static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) |
644 | { | 652 | { |
645 | #ifdef CONFIG_PGSTE | 653 | #ifdef CONFIG_PGSTE |
646 | unsigned long address, bits; | 654 | unsigned long address, bits; |
647 | unsigned char skey; | 655 | unsigned char skey; |
648 | 656 | ||
649 | if (!pte_present(*ptep)) | 657 | if (pte_val(*ptep) & _PAGE_INVALID) |
650 | return pgste; | 658 | return pgste; |
651 | address = pte_val(*ptep) & PAGE_MASK; | 659 | address = pte_val(*ptep) & PAGE_MASK; |
652 | skey = page_get_storage_key(address); | 660 | skey = page_get_storage_key(address); |
@@ -680,7 +688,7 @@ static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) | |||
680 | #ifdef CONFIG_PGSTE | 688 | #ifdef CONFIG_PGSTE |
681 | int young; | 689 | int young; |
682 | 690 | ||
683 | if (!pte_present(*ptep)) | 691 | if (pte_val(*ptep) & _PAGE_INVALID) |
684 | return pgste; | 692 | return pgste; |
685 | /* Get referenced bit from storage key */ | 693 | /* Get referenced bit from storage key */ |
686 | young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); | 694 | young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); |
@@ -704,17 +712,19 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) | |||
704 | { | 712 | { |
705 | #ifdef CONFIG_PGSTE | 713 | #ifdef CONFIG_PGSTE |
706 | unsigned long address; | 714 | unsigned long address; |
707 | unsigned long okey, nkey; | 715 | unsigned long nkey; |
708 | 716 | ||
709 | if (!pte_present(entry)) | 717 | if (pte_val(entry) & _PAGE_INVALID) |
710 | return; | 718 | return; |
719 | VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID)); | ||
711 | address = pte_val(entry) & PAGE_MASK; | 720 | address = pte_val(entry) & PAGE_MASK; |
712 | okey = nkey = page_get_storage_key(address); | 721 | /* |
713 | nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT); | 722 | * Set page access key and fetch protection bit from pgste. |
714 | /* Set page access key and fetch protection bit from pgste */ | 723 | * The guest C/R information is still in the PGSTE, set real |
715 | nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; | 724 | * key C/R to 0. |
716 | if (okey != nkey) | 725 | */ |
717 | page_set_storage_key(address, nkey, 0); | 726 | nkey = (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56; |
727 | page_set_storage_key(address, nkey, 0); | ||
718 | #endif | 728 | #endif |
719 | } | 729 | } |
720 | 730 | ||
@@ -1098,6 +1108,11 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, | |||
1098 | pte = *ptep; | 1108 | pte = *ptep; |
1099 | if (!mm_exclusive(mm)) | 1109 | if (!mm_exclusive(mm)) |
1100 | __ptep_ipte(address, ptep); | 1110 | __ptep_ipte(address, ptep); |
1111 | |||
1112 | if (mm_has_pgste(mm)) { | ||
1113 | pgste = pgste_update_all(&pte, pgste); | ||
1114 | pgste_set(ptep, pgste); | ||
1115 | } | ||
1101 | return pte; | 1116 | return pte; |
1102 | } | 1117 | } |
1103 | 1118 | ||
@@ -1105,9 +1120,13 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, | |||
1105 | unsigned long address, | 1120 | unsigned long address, |
1106 | pte_t *ptep, pte_t pte) | 1121 | pte_t *ptep, pte_t pte) |
1107 | { | 1122 | { |
1123 | pgste_t pgste; | ||
1124 | |||
1108 | if (mm_has_pgste(mm)) { | 1125 | if (mm_has_pgste(mm)) { |
1126 | pgste = *(pgste_t *)(ptep + PTRS_PER_PTE); | ||
1127 | pgste_set_key(ptep, pgste, pte); | ||
1109 | pgste_set_pte(ptep, pte); | 1128 | pgste_set_pte(ptep, pte); |
1110 | pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE)); | 1129 | pgste_set_unlock(ptep, pgste); |
1111 | } else | 1130 | } else |
1112 | *ptep = pte; | 1131 | *ptep = pte; |
1113 | } | 1132 | } |
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index 298297477257..87acc38f73c6 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c | |||
@@ -74,6 +74,8 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) | |||
74 | 74 | ||
75 | static void show_trace(struct task_struct *task, unsigned long *stack) | 75 | static void show_trace(struct task_struct *task, unsigned long *stack) |
76 | { | 76 | { |
77 | const unsigned long frame_size = | ||
78 | STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); | ||
77 | register unsigned long __r15 asm ("15"); | 79 | register unsigned long __r15 asm ("15"); |
78 | unsigned long sp; | 80 | unsigned long sp; |
79 | 81 | ||
@@ -82,11 +84,13 @@ static void show_trace(struct task_struct *task, unsigned long *stack) | |||
82 | sp = task ? task->thread.ksp : __r15; | 84 | sp = task ? task->thread.ksp : __r15; |
83 | printk("Call Trace:\n"); | 85 | printk("Call Trace:\n"); |
84 | #ifdef CONFIG_CHECK_STACK | 86 | #ifdef CONFIG_CHECK_STACK |
85 | sp = __show_trace(sp, S390_lowcore.panic_stack - 4096, | 87 | sp = __show_trace(sp, |
86 | S390_lowcore.panic_stack); | 88 | S390_lowcore.panic_stack + frame_size - 4096, |
89 | S390_lowcore.panic_stack + frame_size); | ||
87 | #endif | 90 | #endif |
88 | sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE, | 91 | sp = __show_trace(sp, |
89 | S390_lowcore.async_stack); | 92 | S390_lowcore.async_stack + frame_size - ASYNC_SIZE, |
93 | S390_lowcore.async_stack + frame_size); | ||
90 | if (task) | 94 | if (task) |
91 | __show_trace(sp, (unsigned long) task_stack_page(task), | 95 | __show_trace(sp, (unsigned long) task_stack_page(task), |
92 | (unsigned long) task_stack_page(task) + THREAD_SIZE); | 96 | (unsigned long) task_stack_page(task) + THREAD_SIZE); |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index f7fb58903f6a..408e866ae548 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -311,3 +311,67 @@ void measurement_alert_subclass_unregister(void) | |||
311 | spin_unlock(&ma_subclass_lock); | 311 | spin_unlock(&ma_subclass_lock); |
312 | } | 312 | } |
313 | EXPORT_SYMBOL(measurement_alert_subclass_unregister); | 313 | EXPORT_SYMBOL(measurement_alert_subclass_unregister); |
314 | |||
315 | void synchronize_irq(unsigned int irq) | ||
316 | { | ||
317 | /* | ||
318 | * Not needed, the handler is protected by a lock and IRQs that occur | ||
319 | * after the handler is deleted are just NOPs. | ||
320 | */ | ||
321 | } | ||
322 | EXPORT_SYMBOL_GPL(synchronize_irq); | ||
323 | |||
324 | #ifndef CONFIG_PCI | ||
325 | |||
326 | /* Only PCI devices have dynamically-defined IRQ handlers */ | ||
327 | |||
328 | int request_irq(unsigned int irq, irq_handler_t handler, | ||
329 | unsigned long irqflags, const char *devname, void *dev_id) | ||
330 | { | ||
331 | return -EINVAL; | ||
332 | } | ||
333 | EXPORT_SYMBOL_GPL(request_irq); | ||
334 | |||
335 | void free_irq(unsigned int irq, void *dev_id) | ||
336 | { | ||
337 | WARN_ON(1); | ||
338 | } | ||
339 | EXPORT_SYMBOL_GPL(free_irq); | ||
340 | |||
341 | void enable_irq(unsigned int irq) | ||
342 | { | ||
343 | WARN_ON(1); | ||
344 | } | ||
345 | EXPORT_SYMBOL_GPL(enable_irq); | ||
346 | |||
347 | void disable_irq(unsigned int irq) | ||
348 | { | ||
349 | WARN_ON(1); | ||
350 | } | ||
351 | EXPORT_SYMBOL_GPL(disable_irq); | ||
352 | |||
353 | #endif /* !CONFIG_PCI */ | ||
354 | |||
355 | void disable_irq_nosync(unsigned int irq) | ||
356 | { | ||
357 | disable_irq(irq); | ||
358 | } | ||
359 | EXPORT_SYMBOL_GPL(disable_irq_nosync); | ||
360 | |||
361 | unsigned long probe_irq_on(void) | ||
362 | { | ||
363 | return 0; | ||
364 | } | ||
365 | EXPORT_SYMBOL_GPL(probe_irq_on); | ||
366 | |||
367 | int probe_irq_off(unsigned long val) | ||
368 | { | ||
369 | return 0; | ||
370 | } | ||
371 | EXPORT_SYMBOL_GPL(probe_irq_off); | ||
372 | |||
373 | unsigned int probe_irq_mask(unsigned long val) | ||
374 | { | ||
375 | return val; | ||
376 | } | ||
377 | EXPORT_SYMBOL_GPL(probe_irq_mask); | ||
diff --git a/arch/s390/kernel/sclp.S b/arch/s390/kernel/sclp.S index b6506ee32a36..29bd7bec4176 100644 --- a/arch/s390/kernel/sclp.S +++ b/arch/s390/kernel/sclp.S | |||
@@ -225,7 +225,7 @@ _sclp_print: | |||
225 | ahi %r2,1 | 225 | ahi %r2,1 |
226 | ltr %r0,%r0 # end of string? | 226 | ltr %r0,%r0 # end of string? |
227 | jz .LfinalizemtoS4 | 227 | jz .LfinalizemtoS4 |
228 | chi %r0,0x15 # end of line (NL)? | 228 | chi %r0,0x0a # end of line (NL)? |
229 | jz .LfinalizemtoS4 | 229 | jz .LfinalizemtoS4 |
230 | stc %r0,0(%r6,%r7) # copy to mto | 230 | stc %r0,0(%r6,%r7) # copy to mto |
231 | la %r11,0(%r6,%r7) | 231 | la %r11,0(%r6,%r7) |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 05674b669001..4f977d0d25c2 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -428,34 +428,27 @@ void smp_stop_cpu(void) | |||
428 | * This is the main routine where commands issued by other | 428 | * This is the main routine where commands issued by other |
429 | * cpus are handled. | 429 | * cpus are handled. |
430 | */ | 430 | */ |
431 | static void do_ext_call_interrupt(struct ext_code ext_code, | 431 | static void smp_handle_ext_call(void) |
432 | unsigned int param32, unsigned long param64) | ||
433 | { | 432 | { |
434 | unsigned long bits; | 433 | unsigned long bits; |
435 | int cpu; | ||
436 | |||
437 | cpu = smp_processor_id(); | ||
438 | if (ext_code.code == 0x1202) | ||
439 | inc_irq_stat(IRQEXT_EXC); | ||
440 | else | ||
441 | inc_irq_stat(IRQEXT_EMS); | ||
442 | /* | ||
443 | * handle bit signal external calls | ||
444 | */ | ||
445 | bits = xchg(&pcpu_devices[cpu].ec_mask, 0); | ||
446 | 434 | ||
435 | /* handle bit signal external calls */ | ||
436 | bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0); | ||
447 | if (test_bit(ec_stop_cpu, &bits)) | 437 | if (test_bit(ec_stop_cpu, &bits)) |
448 | smp_stop_cpu(); | 438 | smp_stop_cpu(); |
449 | |||
450 | if (test_bit(ec_schedule, &bits)) | 439 | if (test_bit(ec_schedule, &bits)) |
451 | scheduler_ipi(); | 440 | scheduler_ipi(); |
452 | |||
453 | if (test_bit(ec_call_function, &bits)) | 441 | if (test_bit(ec_call_function, &bits)) |
454 | generic_smp_call_function_interrupt(); | 442 | generic_smp_call_function_interrupt(); |
455 | |||
456 | if (test_bit(ec_call_function_single, &bits)) | 443 | if (test_bit(ec_call_function_single, &bits)) |
457 | generic_smp_call_function_single_interrupt(); | 444 | generic_smp_call_function_single_interrupt(); |
445 | } | ||
458 | 446 | ||
447 | static void do_ext_call_interrupt(struct ext_code ext_code, | ||
448 | unsigned int param32, unsigned long param64) | ||
449 | { | ||
450 | inc_irq_stat(ext_code.code == 0x1202 ? IRQEXT_EXC : IRQEXT_EMS); | ||
451 | smp_handle_ext_call(); | ||
459 | } | 452 | } |
460 | 453 | ||
461 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 454 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
@@ -760,6 +753,8 @@ int __cpu_disable(void) | |||
760 | { | 753 | { |
761 | unsigned long cregs[16]; | 754 | unsigned long cregs[16]; |
762 | 755 | ||
756 | /* Handle possible pending IPIs */ | ||
757 | smp_handle_ext_call(); | ||
763 | set_cpu_online(smp_processor_id(), false); | 758 | set_cpu_online(smp_processor_id(), false); |
764 | /* Disable pseudo page faults on this cpu. */ | 759 | /* Disable pseudo page faults on this cpu. */ |
765 | pfault_fini(); | 760 | pfault_fini(); |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 18dc417aaf79..a938b548f07e 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -492,7 +492,7 @@ static int gmap_connect_pgtable(unsigned long address, unsigned long segment, | |||
492 | mp = (struct gmap_pgtable *) page->index; | 492 | mp = (struct gmap_pgtable *) page->index; |
493 | rmap->gmap = gmap; | 493 | rmap->gmap = gmap; |
494 | rmap->entry = segment_ptr; | 494 | rmap->entry = segment_ptr; |
495 | rmap->vmaddr = address; | 495 | rmap->vmaddr = address & PMD_MASK; |
496 | spin_lock(&mm->page_table_lock); | 496 | spin_lock(&mm->page_table_lock); |
497 | if (*segment_ptr == segment) { | 497 | if (*segment_ptr == segment) { |
498 | list_add(&rmap->list, &mp->mapper); | 498 | list_add(&rmap->list, &mp->mapper); |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index e6f15b5d8b7d..f1e5be85d592 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -302,15 +302,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) | |||
302 | return rc; | 302 | return rc; |
303 | } | 303 | } |
304 | 304 | ||
305 | void synchronize_irq(unsigned int irq) | ||
306 | { | ||
307 | /* | ||
308 | * Not needed, the handler is protected by a lock and IRQs that occur | ||
309 | * after the handler is deleted are just NOPs. | ||
310 | */ | ||
311 | } | ||
312 | EXPORT_SYMBOL_GPL(synchronize_irq); | ||
313 | |||
314 | void enable_irq(unsigned int irq) | 305 | void enable_irq(unsigned int irq) |
315 | { | 306 | { |
316 | struct msi_desc *msi = irq_get_msi_desc(irq); | 307 | struct msi_desc *msi = irq_get_msi_desc(irq); |
@@ -327,30 +318,6 @@ void disable_irq(unsigned int irq) | |||
327 | } | 318 | } |
328 | EXPORT_SYMBOL_GPL(disable_irq); | 319 | EXPORT_SYMBOL_GPL(disable_irq); |
329 | 320 | ||
330 | void disable_irq_nosync(unsigned int irq) | ||
331 | { | ||
332 | disable_irq(irq); | ||
333 | } | ||
334 | EXPORT_SYMBOL_GPL(disable_irq_nosync); | ||
335 | |||
336 | unsigned long probe_irq_on(void) | ||
337 | { | ||
338 | return 0; | ||
339 | } | ||
340 | EXPORT_SYMBOL_GPL(probe_irq_on); | ||
341 | |||
342 | int probe_irq_off(unsigned long val) | ||
343 | { | ||
344 | return 0; | ||
345 | } | ||
346 | EXPORT_SYMBOL_GPL(probe_irq_off); | ||
347 | |||
348 | unsigned int probe_irq_mask(unsigned long val) | ||
349 | { | ||
350 | return val; | ||
351 | } | ||
352 | EXPORT_SYMBOL_GPL(probe_irq_mask); | ||
353 | |||
354 | void pcibios_fixup_bus(struct pci_bus *bus) | 321 | void pcibios_fixup_bus(struct pci_bus *bus) |
355 | { | 322 | { |
356 | } | 323 | } |
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild index ff18e3cfb6b1..7e4a97fbded4 100644 --- a/arch/sparc/include/asm/Kbuild +++ b/arch/sparc/include/asm/Kbuild | |||
@@ -6,6 +6,7 @@ generic-y += cputime.h | |||
6 | generic-y += div64.h | 6 | generic-y += div64.h |
7 | generic-y += emergency-restart.h | 7 | generic-y += emergency-restart.h |
8 | generic-y += exec.h | 8 | generic-y += exec.h |
9 | generic-y += linkage.h | ||
9 | generic-y += local64.h | 10 | generic-y += local64.h |
10 | generic-y += mutex.h | 11 | generic-y += mutex.h |
11 | generic-y += irq_regs.h | 12 | generic-y += irq_regs.h |
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h index 15a716934e4d..b836e9297f2a 100644 --- a/arch/sparc/include/asm/leon.h +++ b/arch/sparc/include/asm/leon.h | |||
@@ -135,7 +135,7 @@ static inline int sparc_leon3_cpuid(void) | |||
135 | 135 | ||
136 | #ifdef CONFIG_SMP | 136 | #ifdef CONFIG_SMP |
137 | # define LEON3_IRQ_IPI_DEFAULT 13 | 137 | # define LEON3_IRQ_IPI_DEFAULT 13 |
138 | # define LEON3_IRQ_TICKER (leon3_ticker_irq) | 138 | # define LEON3_IRQ_TICKER (leon3_gptimer_irq) |
139 | # define LEON3_IRQ_CROSS_CALL 15 | 139 | # define LEON3_IRQ_CROSS_CALL 15 |
140 | #endif | 140 | #endif |
141 | 141 | ||
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h index f3034eddf468..24ec48c3ff90 100644 --- a/arch/sparc/include/asm/leon_amba.h +++ b/arch/sparc/include/asm/leon_amba.h | |||
@@ -47,6 +47,7 @@ struct amba_prom_registers { | |||
47 | #define LEON3_GPTIMER_LD 4 | 47 | #define LEON3_GPTIMER_LD 4 |
48 | #define LEON3_GPTIMER_IRQEN 8 | 48 | #define LEON3_GPTIMER_IRQEN 8 |
49 | #define LEON3_GPTIMER_SEPIRQ 8 | 49 | #define LEON3_GPTIMER_SEPIRQ 8 |
50 | #define LEON3_GPTIMER_TIMERS 0x7 | ||
50 | 51 | ||
51 | #define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */ | 52 | #define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */ |
52 | /* 0 = hold scalar and counter */ | 53 | /* 0 = hold scalar and counter */ |
diff --git a/arch/sparc/include/asm/linkage.h b/arch/sparc/include/asm/linkage.h deleted file mode 100644 index 291c2d01c44f..000000000000 --- a/arch/sparc/include/asm/linkage.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_LINKAGE_H | ||
2 | #define __ASM_LINKAGE_H | ||
3 | |||
4 | /* Nothing to see here... */ | ||
5 | |||
6 | #endif | ||
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 75bb608c423e..5ef48dab5636 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c | |||
@@ -843,7 +843,8 @@ void ldom_reboot(const char *boot_command) | |||
843 | unsigned long len; | 843 | unsigned long len; |
844 | 844 | ||
845 | strcpy(full_boot_str, "boot "); | 845 | strcpy(full_boot_str, "boot "); |
846 | strcpy(full_boot_str + strlen("boot "), boot_command); | 846 | strlcpy(full_boot_str + strlen("boot "), boot_command, |
847 | sizeof(full_boot_str + strlen("boot "))); | ||
847 | len = strlen(full_boot_str); | 848 | len = strlen(full_boot_str); |
848 | 849 | ||
849 | if (reboot_data_supported) { | 850 | if (reboot_data_supported) { |
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 7c0231dabe44..b7c68976cbc7 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c | |||
@@ -38,7 +38,6 @@ static DEFINE_SPINLOCK(leon_irq_lock); | |||
38 | 38 | ||
39 | unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ | 39 | unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ |
40 | unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */ | 40 | unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */ |
41 | int leon3_ticker_irq; /* Timer ticker IRQ */ | ||
42 | unsigned int sparc_leon_eirq; | 41 | unsigned int sparc_leon_eirq; |
43 | #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) | 42 | #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) |
44 | #define LEON_IACK (&leon3_irqctrl_regs->iclear) | 43 | #define LEON_IACK (&leon3_irqctrl_regs->iclear) |
@@ -278,6 +277,9 @@ irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused) | |||
278 | 277 | ||
279 | leon_clear_profile_irq(cpu); | 278 | leon_clear_profile_irq(cpu); |
280 | 279 | ||
280 | if (cpu == boot_cpu_id) | ||
281 | timer_interrupt(irq, NULL); | ||
282 | |||
281 | ce = &per_cpu(sparc32_clockevent, cpu); | 283 | ce = &per_cpu(sparc32_clockevent, cpu); |
282 | 284 | ||
283 | irq_enter(); | 285 | irq_enter(); |
@@ -299,6 +301,7 @@ void __init leon_init_timers(void) | |||
299 | int icsel; | 301 | int icsel; |
300 | int ampopts; | 302 | int ampopts; |
301 | int err; | 303 | int err; |
304 | u32 config; | ||
302 | 305 | ||
303 | sparc_config.get_cycles_offset = leon_cycles_offset; | 306 | sparc_config.get_cycles_offset = leon_cycles_offset; |
304 | sparc_config.cs_period = 1000000 / HZ; | 307 | sparc_config.cs_period = 1000000 / HZ; |
@@ -377,23 +380,6 @@ void __init leon_init_timers(void) | |||
377 | LEON3_BYPASS_STORE_PA( | 380 | LEON3_BYPASS_STORE_PA( |
378 | &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0); | 381 | &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0); |
379 | 382 | ||
380 | #ifdef CONFIG_SMP | ||
381 | leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx; | ||
382 | |||
383 | if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) & | ||
384 | (1<<LEON3_GPTIMER_SEPIRQ))) { | ||
385 | printk(KERN_ERR "timer not configured with separate irqs\n"); | ||
386 | BUG(); | ||
387 | } | ||
388 | |||
389 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val, | ||
390 | 0); | ||
391 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld, | ||
392 | (((1000000/HZ) - 1))); | ||
393 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, | ||
394 | 0); | ||
395 | #endif | ||
396 | |||
397 | /* | 383 | /* |
398 | * The IRQ controller may (if implemented) consist of multiple | 384 | * The IRQ controller may (if implemented) consist of multiple |
399 | * IRQ controllers, each mapped on a 4Kb boundary. | 385 | * IRQ controllers, each mapped on a 4Kb boundary. |
@@ -416,13 +402,6 @@ void __init leon_init_timers(void) | |||
416 | if (eirq != 0) | 402 | if (eirq != 0) |
417 | leon_eirq_setup(eirq); | 403 | leon_eirq_setup(eirq); |
418 | 404 | ||
419 | irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx); | ||
420 | err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); | ||
421 | if (err) { | ||
422 | printk(KERN_ERR "unable to attach timer IRQ%d\n", irq); | ||
423 | prom_halt(); | ||
424 | } | ||
425 | |||
426 | #ifdef CONFIG_SMP | 405 | #ifdef CONFIG_SMP |
427 | { | 406 | { |
428 | unsigned long flags; | 407 | unsigned long flags; |
@@ -439,30 +418,31 @@ void __init leon_init_timers(void) | |||
439 | } | 418 | } |
440 | #endif | 419 | #endif |
441 | 420 | ||
442 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, | 421 | config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config); |
443 | LEON3_GPTIMER_EN | | 422 | if (config & (1 << LEON3_GPTIMER_SEPIRQ)) |
444 | LEON3_GPTIMER_RL | | 423 | leon3_gptimer_irq += leon3_gptimer_idx; |
445 | LEON3_GPTIMER_LD | | 424 | else if ((config & LEON3_GPTIMER_TIMERS) > 1) |
446 | LEON3_GPTIMER_IRQEN); | 425 | pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n"); |
447 | 426 | ||
448 | #ifdef CONFIG_SMP | 427 | #ifdef CONFIG_SMP |
449 | /* Install per-cpu IRQ handler for broadcasted ticker */ | 428 | /* Install per-cpu IRQ handler for broadcasted ticker */ |
450 | irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq, | 429 | irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq, |
451 | "per-cpu", 0); | 430 | "per-cpu", 0); |
452 | err = request_irq(irq, leon_percpu_timer_ce_interrupt, | 431 | err = request_irq(irq, leon_percpu_timer_ce_interrupt, |
453 | IRQF_PERCPU | IRQF_TIMER, "ticker", | 432 | IRQF_PERCPU | IRQF_TIMER, "timer", NULL); |
454 | NULL); | 433 | #else |
434 | irq = _leon_build_device_irq(NULL, leon3_gptimer_irq); | ||
435 | err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); | ||
436 | #endif | ||
455 | if (err) { | 437 | if (err) { |
456 | printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq); | 438 | pr_err("Unable to attach timer IRQ%d\n", irq); |
457 | prom_halt(); | 439 | prom_halt(); |
458 | } | 440 | } |
459 | 441 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, | |
460 | LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl, | ||
461 | LEON3_GPTIMER_EN | | 442 | LEON3_GPTIMER_EN | |
462 | LEON3_GPTIMER_RL | | 443 | LEON3_GPTIMER_RL | |
463 | LEON3_GPTIMER_LD | | 444 | LEON3_GPTIMER_LD | |
464 | LEON3_GPTIMER_IRQEN); | 445 | LEON3_GPTIMER_IRQEN); |
465 | #endif | ||
466 | return; | 446 | return; |
467 | bad: | 447 | bad: |
468 | printk(KERN_ERR "No Timer/irqctrl found\n"); | 448 | printk(KERN_ERR "No Timer/irqctrl found\n"); |
diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c index 7739a54315e2..6df26e37f879 100644 --- a/arch/sparc/kernel/leon_pci_grpci1.c +++ b/arch/sparc/kernel/leon_pci_grpci1.c | |||
@@ -536,11 +536,9 @@ static int grpci1_of_probe(struct platform_device *ofdev) | |||
536 | 536 | ||
537 | /* find device register base address */ | 537 | /* find device register base address */ |
538 | res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); | 538 | res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); |
539 | regs = devm_request_and_ioremap(&ofdev->dev, res); | 539 | regs = devm_ioremap_resource(&ofdev->dev, res); |
540 | if (!regs) { | 540 | if (IS_ERR(regs)) |
541 | dev_err(&ofdev->dev, "io-regs mapping failed\n"); | 541 | return PTR_ERR(regs); |
542 | return -EADDRNOTAVAIL; | ||
543 | } | ||
544 | 542 | ||
545 | /* | 543 | /* |
546 | * check that we're in Host Slot and that we can act as a Host Bridge | 544 | * check that we're in Host Slot and that we can act as a Host Bridge |
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c index bdf53d9a8d46..b0b3967a2dd2 100644 --- a/arch/sparc/kernel/leon_pmc.c +++ b/arch/sparc/kernel/leon_pmc.c | |||
@@ -47,6 +47,10 @@ void pmc_leon_idle_fixup(void) | |||
47 | * MMU does not get a TLB miss here by using the MMU BYPASS ASI. | 47 | * MMU does not get a TLB miss here by using the MMU BYPASS ASI. |
48 | */ | 48 | */ |
49 | register unsigned int address = (unsigned int)leon3_irqctrl_regs; | 49 | register unsigned int address = (unsigned int)leon3_irqctrl_regs; |
50 | |||
51 | /* Interrupts need to be enabled to not hang the CPU */ | ||
52 | local_irq_enable(); | ||
53 | |||
50 | __asm__ __volatile__ ( | 54 | __asm__ __volatile__ ( |
51 | "wr %%g0, %%asr19\n" | 55 | "wr %%g0, %%asr19\n" |
52 | "lda [%0] %1, %%g0\n" | 56 | "lda [%0] %1, %%g0\n" |
@@ -60,6 +64,9 @@ void pmc_leon_idle_fixup(void) | |||
60 | */ | 64 | */ |
61 | void pmc_leon_idle(void) | 65 | void pmc_leon_idle(void) |
62 | { | 66 | { |
67 | /* Interrupts need to be enabled to not hang the CPU */ | ||
68 | local_irq_enable(); | ||
69 | |||
63 | /* For systems without power-down, this will be no-op */ | 70 | /* For systems without power-down, this will be no-op */ |
64 | __asm__ __volatile__ ("wr %g0, %asr19\n\t"); | 71 | __asm__ __volatile__ ("wr %g0, %asr19\n\t"); |
65 | } | 72 | } |
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index 9f20566b0773..79cc0d1a477d 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c | |||
@@ -54,6 +54,7 @@ EXPORT_SYMBOL(of_set_property_mutex); | |||
54 | int of_set_property(struct device_node *dp, const char *name, void *val, int len) | 54 | int of_set_property(struct device_node *dp, const char *name, void *val, int len) |
55 | { | 55 | { |
56 | struct property **prevp; | 56 | struct property **prevp; |
57 | unsigned long flags; | ||
57 | void *new_val; | 58 | void *new_val; |
58 | int err; | 59 | int err; |
59 | 60 | ||
@@ -64,7 +65,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len | |||
64 | err = -ENODEV; | 65 | err = -ENODEV; |
65 | 66 | ||
66 | mutex_lock(&of_set_property_mutex); | 67 | mutex_lock(&of_set_property_mutex); |
67 | raw_spin_lock(&devtree_lock); | 68 | raw_spin_lock_irqsave(&devtree_lock, flags); |
68 | prevp = &dp->properties; | 69 | prevp = &dp->properties; |
69 | while (*prevp) { | 70 | while (*prevp) { |
70 | struct property *prop = *prevp; | 71 | struct property *prop = *prevp; |
@@ -91,7 +92,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len | |||
91 | } | 92 | } |
92 | prevp = &(*prevp)->next; | 93 | prevp = &(*prevp)->next; |
93 | } | 94 | } |
94 | raw_spin_unlock(&devtree_lock); | 95 | raw_spin_unlock_irqrestore(&devtree_lock, flags); |
95 | mutex_unlock(&of_set_property_mutex); | 96 | mutex_unlock(&of_set_property_mutex); |
96 | 97 | ||
97 | /* XXX Upate procfs if necessary... */ | 98 | /* XXX Upate procfs if necessary... */ |
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index 38bf80a22f02..1434526970a6 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c | |||
@@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p) | |||
304 | 304 | ||
305 | /* Initialize PROM console and command line. */ | 305 | /* Initialize PROM console and command line. */ |
306 | *cmdline_p = prom_getbootargs(); | 306 | *cmdline_p = prom_getbootargs(); |
307 | strcpy(boot_command_line, *cmdline_p); | 307 | strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); |
308 | parse_early_param(); | 308 | parse_early_param(); |
309 | 309 | ||
310 | boot_flags_init(*cmdline_p); | 310 | boot_flags_init(*cmdline_p); |
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 88a127b9c69e..13785547e435 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c | |||
@@ -555,7 +555,7 @@ void __init setup_arch(char **cmdline_p) | |||
555 | { | 555 | { |
556 | /* Initialize PROM console and command line. */ | 556 | /* Initialize PROM console and command line. */ |
557 | *cmdline_p = prom_getbootargs(); | 557 | *cmdline_p = prom_getbootargs(); |
558 | strcpy(boot_command_line, *cmdline_p); | 558 | strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); |
559 | parse_early_param(); | 559 | parse_early_param(); |
560 | 560 | ||
561 | boot_flags_init(*cmdline_p); | 561 | boot_flags_init(*cmdline_p); |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index a7171997adfd..04fd55a6e461 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -1098,7 +1098,14 @@ static int __init grab_mblocks(struct mdesc_handle *md) | |||
1098 | m->size = *val; | 1098 | m->size = *val; |
1099 | val = mdesc_get_property(md, node, | 1099 | val = mdesc_get_property(md, node, |
1100 | "address-congruence-offset", NULL); | 1100 | "address-congruence-offset", NULL); |
1101 | m->offset = *val; | 1101 | |
1102 | /* The address-congruence-offset property is optional. | ||
1103 | * Explicity zero it be identifty this. | ||
1104 | */ | ||
1105 | if (val) | ||
1106 | m->offset = *val; | ||
1107 | else | ||
1108 | m->offset = 0UL; | ||
1102 | 1109 | ||
1103 | numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", | 1110 | numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", |
1104 | count - 1, m->base, m->size, m->offset); | 1111 | count - 1, m->base, m->size, m->offset); |
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 83d89bcb44af..37e7bc4c95b3 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c | |||
@@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, | |||
85 | } | 85 | } |
86 | 86 | ||
87 | if (!tb->active) { | 87 | if (!tb->active) { |
88 | global_flush_tlb_page(mm, vaddr); | ||
89 | flush_tsb_user_page(mm, vaddr); | 88 | flush_tsb_user_page(mm, vaddr); |
89 | global_flush_tlb_page(mm, vaddr); | ||
90 | goto out; | 90 | goto out; |
91 | } | 91 | } |
92 | 92 | ||
diff --git a/arch/sparc/prom/bootstr_32.c b/arch/sparc/prom/bootstr_32.c index f5ec32e0d419..d2b49d2365e7 100644 --- a/arch/sparc/prom/bootstr_32.c +++ b/arch/sparc/prom/bootstr_32.c | |||
@@ -23,23 +23,25 @@ prom_getbootargs(void) | |||
23 | return barg_buf; | 23 | return barg_buf; |
24 | } | 24 | } |
25 | 25 | ||
26 | switch(prom_vers) { | 26 | switch (prom_vers) { |
27 | case PROM_V0: | 27 | case PROM_V0: |
28 | cp = barg_buf; | 28 | cp = barg_buf; |
29 | /* Start from 1 and go over fd(0,0,0)kernel */ | 29 | /* Start from 1 and go over fd(0,0,0)kernel */ |
30 | for(iter = 1; iter < 8; iter++) { | 30 | for (iter = 1; iter < 8; iter++) { |
31 | arg = (*(romvec->pv_v0bootargs))->argv[iter]; | 31 | arg = (*(romvec->pv_v0bootargs))->argv[iter]; |
32 | if (arg == NULL) | 32 | if (arg == NULL) |
33 | break; | 33 | break; |
34 | while(*arg != 0) { | 34 | while (*arg != 0) { |
35 | /* Leave place for space and null. */ | 35 | /* Leave place for space and null. */ |
36 | if(cp >= barg_buf + BARG_LEN-2){ | 36 | if (cp >= barg_buf + BARG_LEN - 2) |
37 | /* We might issue a warning here. */ | 37 | /* We might issue a warning here. */ |
38 | break; | 38 | break; |
39 | } | ||
40 | *cp++ = *arg++; | 39 | *cp++ = *arg++; |
41 | } | 40 | } |
42 | *cp++ = ' '; | 41 | *cp++ = ' '; |
42 | if (cp >= barg_buf + BARG_LEN - 1) | ||
43 | /* We might issue a warning here. */ | ||
44 | break; | ||
43 | } | 45 | } |
44 | *cp = 0; | 46 | *cp = 0; |
45 | break; | 47 | break; |
diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c index 92204c3800b5..bd1b2a3ac34e 100644 --- a/arch/sparc/prom/tree_64.c +++ b/arch/sparc/prom/tree_64.c | |||
@@ -39,7 +39,7 @@ inline phandle __prom_getchild(phandle node) | |||
39 | return prom_node_to_node("child", node); | 39 | return prom_node_to_node("child", node); |
40 | } | 40 | } |
41 | 41 | ||
42 | inline phandle prom_getchild(phandle node) | 42 | phandle prom_getchild(phandle node) |
43 | { | 43 | { |
44 | phandle cnode; | 44 | phandle cnode; |
45 | 45 | ||
@@ -72,7 +72,7 @@ inline phandle __prom_getsibling(phandle node) | |||
72 | return prom_node_to_node(prom_peer_name, node); | 72 | return prom_node_to_node(prom_peer_name, node); |
73 | } | 73 | } |
74 | 74 | ||
75 | inline phandle prom_getsibling(phandle node) | 75 | phandle prom_getsibling(phandle node) |
76 | { | 76 | { |
77 | phandle sibnode; | 77 | phandle sibnode; |
78 | 78 | ||
@@ -89,7 +89,7 @@ EXPORT_SYMBOL(prom_getsibling); | |||
89 | /* Return the length in bytes of property 'prop' at node 'node'. | 89 | /* Return the length in bytes of property 'prop' at node 'node'. |
90 | * Return -1 on error. | 90 | * Return -1 on error. |
91 | */ | 91 | */ |
92 | inline int prom_getproplen(phandle node, const char *prop) | 92 | int prom_getproplen(phandle node, const char *prop) |
93 | { | 93 | { |
94 | unsigned long args[6]; | 94 | unsigned long args[6]; |
95 | 95 | ||
@@ -113,8 +113,8 @@ EXPORT_SYMBOL(prom_getproplen); | |||
113 | * 'buffer' which has a size of 'bufsize'. If the acquisition | 113 | * 'buffer' which has a size of 'bufsize'. If the acquisition |
114 | * was successful the length will be returned, else -1 is returned. | 114 | * was successful the length will be returned, else -1 is returned. |
115 | */ | 115 | */ |
116 | inline int prom_getproperty(phandle node, const char *prop, | 116 | int prom_getproperty(phandle node, const char *prop, |
117 | char *buffer, int bufsize) | 117 | char *buffer, int bufsize) |
118 | { | 118 | { |
119 | unsigned long args[8]; | 119 | unsigned long args[8]; |
120 | int plen; | 120 | int plen; |
@@ -141,7 +141,7 @@ EXPORT_SYMBOL(prom_getproperty); | |||
141 | /* Acquire an integer property and return its value. Returns -1 | 141 | /* Acquire an integer property and return its value. Returns -1 |
142 | * on failure. | 142 | * on failure. |
143 | */ | 143 | */ |
144 | inline int prom_getint(phandle node, const char *prop) | 144 | int prom_getint(phandle node, const char *prop) |
145 | { | 145 | { |
146 | int intprop; | 146 | int intprop; |
147 | 147 | ||
@@ -235,7 +235,7 @@ static const char *prom_nextprop_name = "nextprop"; | |||
235 | /* Return the first property type for node 'node'. | 235 | /* Return the first property type for node 'node'. |
236 | * buffer should be at least 32B in length | 236 | * buffer should be at least 32B in length |
237 | */ | 237 | */ |
238 | inline char *prom_firstprop(phandle node, char *buffer) | 238 | char *prom_firstprop(phandle node, char *buffer) |
239 | { | 239 | { |
240 | unsigned long args[7]; | 240 | unsigned long args[7]; |
241 | 241 | ||
@@ -261,7 +261,7 @@ EXPORT_SYMBOL(prom_firstprop); | |||
261 | * at node 'node' . Returns NULL string if no more | 261 | * at node 'node' . Returns NULL string if no more |
262 | * property types for this node. | 262 | * property types for this node. |
263 | */ | 263 | */ |
264 | inline char *prom_nextprop(phandle node, const char *oprop, char *buffer) | 264 | char *prom_nextprop(phandle node, const char *oprop, char *buffer) |
265 | { | 265 | { |
266 | unsigned long args[7]; | 266 | unsigned long args[7]; |
267 | char buf[32]; | 267 | char buf[32]; |
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index 4385cb6fa00a..a93b02a25222 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c | |||
@@ -84,4 +84,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int); | |||
84 | EXPORT_SYMBOL(__ashrdi3); | 84 | EXPORT_SYMBOL(__ashrdi3); |
85 | uint64_t __ashldi3(uint64_t, unsigned int); | 85 | uint64_t __ashldi3(uint64_t, unsigned int); |
86 | EXPORT_SYMBOL(__ashldi3); | 86 | EXPORT_SYMBOL(__ashldi3); |
87 | int __ffsdi2(uint64_t); | ||
88 | EXPORT_SYMBOL(__ffsdi2); | ||
87 | #endif | 89 | #endif |
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c index d7d21851e60c..3df3bd544492 100644 --- a/arch/um/drivers/mconsole_kern.c +++ b/arch/um/drivers/mconsole_kern.c | |||
@@ -147,7 +147,7 @@ void mconsole_proc(struct mc_request *req) | |||
147 | } | 147 | } |
148 | 148 | ||
149 | do { | 149 | do { |
150 | loff_t pos; | 150 | loff_t pos = file->f_pos; |
151 | mm_segment_t old_fs = get_fs(); | 151 | mm_segment_t old_fs = get_fs(); |
152 | set_fs(KERNEL_DS); | 152 | set_fs(KERNEL_DS); |
153 | len = vfs_read(file, buf, PAGE_SIZE - 1, &pos); | 153 | len = vfs_read(file, buf, PAGE_SIZE - 1, &pos); |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 685692c94f05..fe120da25625 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -2265,6 +2265,7 @@ source "fs/Kconfig.binfmt" | |||
2265 | config IA32_EMULATION | 2265 | config IA32_EMULATION |
2266 | bool "IA32 Emulation" | 2266 | bool "IA32 Emulation" |
2267 | depends on X86_64 | 2267 | depends on X86_64 |
2268 | select BINFMT_ELF | ||
2268 | select COMPAT_BINFMT_ELF | 2269 | select COMPAT_BINFMT_ELF |
2269 | select HAVE_UID16 | 2270 | select HAVE_UID16 |
2270 | ---help--- | 2271 | ---help--- |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 35ee62fccf98..c205035a6b96 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -251,51 +251,6 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size) | |||
251 | *size = len; | 251 | *size = len; |
252 | } | 252 | } |
253 | 253 | ||
254 | static efi_status_t setup_efi_vars(struct boot_params *params) | ||
255 | { | ||
256 | struct setup_data *data; | ||
257 | struct efi_var_bootdata *efidata; | ||
258 | u64 store_size, remaining_size, var_size; | ||
259 | efi_status_t status; | ||
260 | |||
261 | if (sys_table->runtime->hdr.revision < EFI_2_00_SYSTEM_TABLE_REVISION) | ||
262 | return EFI_UNSUPPORTED; | ||
263 | |||
264 | data = (struct setup_data *)(unsigned long)params->hdr.setup_data; | ||
265 | |||
266 | while (data && data->next) | ||
267 | data = (struct setup_data *)(unsigned long)data->next; | ||
268 | |||
269 | status = efi_call_phys4((void *)sys_table->runtime->query_variable_info, | ||
270 | EFI_VARIABLE_NON_VOLATILE | | ||
271 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
272 | EFI_VARIABLE_RUNTIME_ACCESS, &store_size, | ||
273 | &remaining_size, &var_size); | ||
274 | |||
275 | if (status != EFI_SUCCESS) | ||
276 | return status; | ||
277 | |||
278 | status = efi_call_phys3(sys_table->boottime->allocate_pool, | ||
279 | EFI_LOADER_DATA, sizeof(*efidata), &efidata); | ||
280 | |||
281 | if (status != EFI_SUCCESS) | ||
282 | return status; | ||
283 | |||
284 | efidata->data.type = SETUP_EFI_VARS; | ||
285 | efidata->data.len = sizeof(struct efi_var_bootdata) - | ||
286 | sizeof(struct setup_data); | ||
287 | efidata->data.next = 0; | ||
288 | efidata->store_size = store_size; | ||
289 | efidata->remaining_size = remaining_size; | ||
290 | efidata->max_var_size = var_size; | ||
291 | |||
292 | if (data) | ||
293 | data->next = (unsigned long)efidata; | ||
294 | else | ||
295 | params->hdr.setup_data = (unsigned long)efidata; | ||
296 | |||
297 | } | ||
298 | |||
299 | static efi_status_t setup_efi_pci(struct boot_params *params) | 254 | static efi_status_t setup_efi_pci(struct boot_params *params) |
300 | { | 255 | { |
301 | efi_pci_io_protocol *pci; | 256 | efi_pci_io_protocol *pci; |
@@ -1202,8 +1157,6 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table, | |||
1202 | 1157 | ||
1203 | setup_graphics(boot_params); | 1158 | setup_graphics(boot_params); |
1204 | 1159 | ||
1205 | setup_efi_vars(boot_params); | ||
1206 | |||
1207 | setup_efi_pci(boot_params); | 1160 | setup_efi_pci(boot_params); |
1208 | 1161 | ||
1209 | status = efi_call_phys3(sys_table->boottime->allocate_pool, | 1162 | status = efi_call_phys3(sys_table->boottime->allocate_pool, |
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S index 62fe22cd4cba..477e9d75149b 100644 --- a/arch/x86/crypto/aesni-intel_asm.S +++ b/arch/x86/crypto/aesni-intel_asm.S | |||
@@ -2681,56 +2681,68 @@ ENTRY(aesni_xts_crypt8) | |||
2681 | addq %rcx, KEYP | 2681 | addq %rcx, KEYP |
2682 | 2682 | ||
2683 | movdqa IV, STATE1 | 2683 | movdqa IV, STATE1 |
2684 | pxor 0x00(INP), STATE1 | 2684 | movdqu 0x00(INP), INC |
2685 | pxor INC, STATE1 | ||
2685 | movdqu IV, 0x00(OUTP) | 2686 | movdqu IV, 0x00(OUTP) |
2686 | 2687 | ||
2687 | _aesni_gf128mul_x_ble() | 2688 | _aesni_gf128mul_x_ble() |
2688 | movdqa IV, STATE2 | 2689 | movdqa IV, STATE2 |
2689 | pxor 0x10(INP), STATE2 | 2690 | movdqu 0x10(INP), INC |
2691 | pxor INC, STATE2 | ||
2690 | movdqu IV, 0x10(OUTP) | 2692 | movdqu IV, 0x10(OUTP) |
2691 | 2693 | ||
2692 | _aesni_gf128mul_x_ble() | 2694 | _aesni_gf128mul_x_ble() |
2693 | movdqa IV, STATE3 | 2695 | movdqa IV, STATE3 |
2694 | pxor 0x20(INP), STATE3 | 2696 | movdqu 0x20(INP), INC |
2697 | pxor INC, STATE3 | ||
2695 | movdqu IV, 0x20(OUTP) | 2698 | movdqu IV, 0x20(OUTP) |
2696 | 2699 | ||
2697 | _aesni_gf128mul_x_ble() | 2700 | _aesni_gf128mul_x_ble() |
2698 | movdqa IV, STATE4 | 2701 | movdqa IV, STATE4 |
2699 | pxor 0x30(INP), STATE4 | 2702 | movdqu 0x30(INP), INC |
2703 | pxor INC, STATE4 | ||
2700 | movdqu IV, 0x30(OUTP) | 2704 | movdqu IV, 0x30(OUTP) |
2701 | 2705 | ||
2702 | call *%r11 | 2706 | call *%r11 |
2703 | 2707 | ||
2704 | pxor 0x00(OUTP), STATE1 | 2708 | movdqu 0x00(OUTP), INC |
2709 | pxor INC, STATE1 | ||
2705 | movdqu STATE1, 0x00(OUTP) | 2710 | movdqu STATE1, 0x00(OUTP) |
2706 | 2711 | ||
2707 | _aesni_gf128mul_x_ble() | 2712 | _aesni_gf128mul_x_ble() |
2708 | movdqa IV, STATE1 | 2713 | movdqa IV, STATE1 |
2709 | pxor 0x40(INP), STATE1 | 2714 | movdqu 0x40(INP), INC |
2715 | pxor INC, STATE1 | ||
2710 | movdqu IV, 0x40(OUTP) | 2716 | movdqu IV, 0x40(OUTP) |
2711 | 2717 | ||
2712 | pxor 0x10(OUTP), STATE2 | 2718 | movdqu 0x10(OUTP), INC |
2719 | pxor INC, STATE2 | ||
2713 | movdqu STATE2, 0x10(OUTP) | 2720 | movdqu STATE2, 0x10(OUTP) |
2714 | 2721 | ||
2715 | _aesni_gf128mul_x_ble() | 2722 | _aesni_gf128mul_x_ble() |
2716 | movdqa IV, STATE2 | 2723 | movdqa IV, STATE2 |
2717 | pxor 0x50(INP), STATE2 | 2724 | movdqu 0x50(INP), INC |
2725 | pxor INC, STATE2 | ||
2718 | movdqu IV, 0x50(OUTP) | 2726 | movdqu IV, 0x50(OUTP) |
2719 | 2727 | ||
2720 | pxor 0x20(OUTP), STATE3 | 2728 | movdqu 0x20(OUTP), INC |
2729 | pxor INC, STATE3 | ||
2721 | movdqu STATE3, 0x20(OUTP) | 2730 | movdqu STATE3, 0x20(OUTP) |
2722 | 2731 | ||
2723 | _aesni_gf128mul_x_ble() | 2732 | _aesni_gf128mul_x_ble() |
2724 | movdqa IV, STATE3 | 2733 | movdqa IV, STATE3 |
2725 | pxor 0x60(INP), STATE3 | 2734 | movdqu 0x60(INP), INC |
2735 | pxor INC, STATE3 | ||
2726 | movdqu IV, 0x60(OUTP) | 2736 | movdqu IV, 0x60(OUTP) |
2727 | 2737 | ||
2728 | pxor 0x30(OUTP), STATE4 | 2738 | movdqu 0x30(OUTP), INC |
2739 | pxor INC, STATE4 | ||
2729 | movdqu STATE4, 0x30(OUTP) | 2740 | movdqu STATE4, 0x30(OUTP) |
2730 | 2741 | ||
2731 | _aesni_gf128mul_x_ble() | 2742 | _aesni_gf128mul_x_ble() |
2732 | movdqa IV, STATE4 | 2743 | movdqa IV, STATE4 |
2733 | pxor 0x70(INP), STATE4 | 2744 | movdqu 0x70(INP), INC |
2745 | pxor INC, STATE4 | ||
2734 | movdqu IV, 0x70(OUTP) | 2746 | movdqu IV, 0x70(OUTP) |
2735 | 2747 | ||
2736 | _aesni_gf128mul_x_ble() | 2748 | _aesni_gf128mul_x_ble() |
@@ -2738,16 +2750,20 @@ ENTRY(aesni_xts_crypt8) | |||
2738 | 2750 | ||
2739 | call *%r11 | 2751 | call *%r11 |
2740 | 2752 | ||
2741 | pxor 0x40(OUTP), STATE1 | 2753 | movdqu 0x40(OUTP), INC |
2754 | pxor INC, STATE1 | ||
2742 | movdqu STATE1, 0x40(OUTP) | 2755 | movdqu STATE1, 0x40(OUTP) |
2743 | 2756 | ||
2744 | pxor 0x50(OUTP), STATE2 | 2757 | movdqu 0x50(OUTP), INC |
2758 | pxor INC, STATE2 | ||
2745 | movdqu STATE2, 0x50(OUTP) | 2759 | movdqu STATE2, 0x50(OUTP) |
2746 | 2760 | ||
2747 | pxor 0x60(OUTP), STATE3 | 2761 | movdqu 0x60(OUTP), INC |
2762 | pxor INC, STATE3 | ||
2748 | movdqu STATE3, 0x60(OUTP) | 2763 | movdqu STATE3, 0x60(OUTP) |
2749 | 2764 | ||
2750 | pxor 0x70(OUTP), STATE4 | 2765 | movdqu 0x70(OUTP), INC |
2766 | pxor INC, STATE4 | ||
2751 | movdqu STATE4, 0x70(OUTP) | 2767 | movdqu STATE4, 0x70(OUTP) |
2752 | 2768 | ||
2753 | ret | 2769 | ret |
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c index 805078e08013..52ff81cce008 100644 --- a/arch/x86/ia32/ia32_aout.c +++ b/arch/x86/ia32/ia32_aout.c | |||
@@ -192,7 +192,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, | |||
192 | /* struct user */ | 192 | /* struct user */ |
193 | DUMP_WRITE(&dump, sizeof(dump)); | 193 | DUMP_WRITE(&dump, sizeof(dump)); |
194 | /* Now dump all of the user data. Include malloced stuff as well */ | 194 | /* Now dump all of the user data. Include malloced stuff as well */ |
195 | DUMP_SEEK(PAGE_SIZE); | 195 | DUMP_SEEK(PAGE_SIZE - sizeof(dump)); |
196 | /* now we start writing out the user space info */ | 196 | /* now we start writing out the user space info */ |
197 | set_fs(USER_DS); | 197 | set_fs(USER_DS); |
198 | /* Dump the data area */ | 198 | /* Dump the data area */ |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 2fb5d5884e23..60c89f30c727 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -102,13 +102,6 @@ extern void efi_call_phys_epilog(void); | |||
102 | extern void efi_unmap_memmap(void); | 102 | extern void efi_unmap_memmap(void); |
103 | extern void efi_memory_uc(u64 addr, unsigned long size); | 103 | extern void efi_memory_uc(u64 addr, unsigned long size); |
104 | 104 | ||
105 | struct efi_var_bootdata { | ||
106 | struct setup_data data; | ||
107 | u64 store_size; | ||
108 | u64 remaining_size; | ||
109 | u64 max_var_size; | ||
110 | }; | ||
111 | |||
112 | #ifdef CONFIG_EFI | 105 | #ifdef CONFIG_EFI |
113 | 106 | ||
114 | static inline bool efi_is_native(void) | 107 | static inline bool efi_is_native(void) |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index ba870bb6dd8e..57873beb3292 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
@@ -41,4 +41,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector); | |||
41 | 41 | ||
42 | extern void init_ISA_irqs(void); | 42 | extern void init_ISA_irqs(void); |
43 | 43 | ||
44 | #ifdef CONFIG_X86_LOCAL_APIC | ||
45 | void arch_trigger_all_cpu_backtrace(void); | ||
46 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace | ||
47 | #endif | ||
48 | |||
44 | #endif /* _ASM_X86_IRQ_H */ | 49 | #endif /* _ASM_X86_IRQ_H */ |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 6825e2efd1b4..6bc3985ee473 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {} | |||
60 | #ifdef CONFIG_MICROCODE_EARLY | 60 | #ifdef CONFIG_MICROCODE_EARLY |
61 | #define MAX_UCODE_COUNT 128 | 61 | #define MAX_UCODE_COUNT 128 |
62 | extern void __init load_ucode_bsp(void); | 62 | extern void __init load_ucode_bsp(void); |
63 | extern __init void load_ucode_ap(void); | 63 | extern void __cpuinit load_ucode_ap(void); |
64 | extern int __init save_microcode_in_initrd(void); | 64 | extern int __init save_microcode_in_initrd(void); |
65 | #else | 65 | #else |
66 | static inline void __init load_ucode_bsp(void) {} | 66 | static inline void __init load_ucode_bsp(void) {} |
67 | static inline __init void load_ucode_ap(void) {} | 67 | static inline void __cpuinit load_ucode_ap(void) {} |
68 | static inline int __init save_microcode_in_initrd(void) | 68 | static inline int __init save_microcode_in_initrd(void) |
69 | { | 69 | { |
70 | return 0; | 70 | return 0; |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index c0fa356e90de..86f9301903c8 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -18,9 +18,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int , | |||
18 | void __user *, size_t *, loff_t *); | 18 | void __user *, size_t *, loff_t *); |
19 | extern int unknown_nmi_panic; | 19 | extern int unknown_nmi_panic; |
20 | 20 | ||
21 | void arch_trigger_all_cpu_backtrace(void); | 21 | #endif /* CONFIG_X86_LOCAL_APIC */ |
22 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace | ||
23 | #endif | ||
24 | 22 | ||
25 | #define NMI_FLAG_FIRST 1 | 23 | #define NMI_FLAG_FIRST 1 |
26 | 24 | ||
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 08744242b8d2..c15ddaf90710 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h | |||
@@ -6,7 +6,6 @@ | |||
6 | #define SETUP_E820_EXT 1 | 6 | #define SETUP_E820_EXT 1 |
7 | #define SETUP_DTB 2 | 7 | #define SETUP_DTB 2 |
8 | #define SETUP_PCI 3 | 8 | #define SETUP_PCI 3 |
9 | #define SETUP_EFI_VARS 4 | ||
10 | 9 | ||
11 | /* ram_size flags */ | 10 | /* ram_size flags */ |
12 | #define RAMDISK_IMAGE_START_MASK 0x07FF | 11 | #define RAMDISK_IMAGE_START_MASK 0x07FF |
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index 31cb9ae992b7..a698d7165c96 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | #include <asm/apic.h> | 11 | #include <asm/apic.h> |
12 | #include <asm/nmi.h> | ||
12 | 13 | ||
13 | #include <linux/cpumask.h> | 14 | #include <linux/cpumask.h> |
14 | #include <linux/kdebug.h> | 15 | #include <linux/kdebug.h> |
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c index 35ffda5d0727..5f90b85ff22e 100644 --- a/arch/x86/kernel/cpu/mtrr/cleanup.c +++ b/arch/x86/kernel/cpu/mtrr/cleanup.c | |||
@@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits) | |||
714 | if (mtrr_tom2) | 714 | if (mtrr_tom2) |
715 | x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; | 715 | x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base; |
716 | 716 | ||
717 | nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size); | ||
718 | /* | 717 | /* |
719 | * [0, 1M) should always be covered by var mtrr with WB | 718 | * [0, 1M) should always be covered by var mtrr with WB |
720 | * and fixed mtrrs should take effect before var mtrr for it: | 719 | * and fixed mtrrs should take effect before var mtrr for it: |
721 | */ | 720 | */ |
722 | nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0, | 721 | nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0, |
723 | 1ULL<<(20 - PAGE_SHIFT)); | 722 | 1ULL<<(20 - PAGE_SHIFT)); |
724 | /* Sort the ranges: */ | 723 | /* add from var mtrr at last */ |
725 | sort_range(range, nr_range); | 724 | nr_range = x86_get_mtrr_mem_range(range, nr_range, |
725 | x_remove_base, x_remove_size); | ||
726 | 726 | ||
727 | range_sums = sum_ranges(range, nr_range); | 727 | range_sums = sum_ranges(range, nr_range); |
728 | printk(KERN_INFO "total RAM covered: %ldM\n", | 728 | printk(KERN_INFO "total RAM covered: %ldM\n", |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index f60d41ff9a97..a9e22073bd56 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = { | |||
165 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), | 165 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), |
166 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), | 166 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), |
167 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 167 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), |
168 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | ||
169 | EVENT_EXTRA_END | 168 | EVENT_EXTRA_END |
170 | }; | 169 | }; |
171 | 170 | ||
172 | static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { | 171 | static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { |
173 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), | 172 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), |
174 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), | 173 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), |
174 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | ||
175 | EVENT_EXTRA_END | 175 | EVENT_EXTRA_END |
176 | }; | 176 | }; |
177 | 177 | ||
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index d2c381280e3c..3dd37ebd591b 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -242,6 +242,7 @@ void __init kvmclock_init(void) | |||
242 | if (!mem) | 242 | if (!mem) |
243 | return; | 243 | return; |
244 | hv_clock = __va(mem); | 244 | hv_clock = __va(mem); |
245 | memset(hv_clock, 0, size); | ||
245 | 246 | ||
246 | if (kvm_register_clock("boot clock")) { | 247 | if (kvm_register_clock("boot clock")) { |
247 | hv_clock = NULL; | 248 | hv_clock = NULL; |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 4e7a37ff03ab..81a5f5e8f142 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -277,18 +277,6 @@ void exit_idle(void) | |||
277 | } | 277 | } |
278 | #endif | 278 | #endif |
279 | 279 | ||
280 | void arch_cpu_idle_prepare(void) | ||
281 | { | ||
282 | /* | ||
283 | * If we're the non-boot CPU, nothing set the stack canary up | ||
284 | * for us. CPU0 already has it initialized but no harm in | ||
285 | * doing it again. This is a good place for updating it, as | ||
286 | * we wont ever return from this function (so the invalid | ||
287 | * canaries already on the stack wont ever trigger). | ||
288 | */ | ||
289 | boot_init_stack_canary(); | ||
290 | } | ||
291 | |||
292 | void arch_cpu_idle_enter(void) | 280 | void arch_cpu_idle_enter(void) |
293 | { | 281 | { |
294 | local_touch_nmi(); | 282 | local_touch_nmi(); |
diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 7a6f3b3be3cf..f2bb9c96720a 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S | |||
@@ -160,7 +160,7 @@ identity_mapped: | |||
160 | xorq %rbp, %rbp | 160 | xorq %rbp, %rbp |
161 | xorq %r8, %r8 | 161 | xorq %r8, %r8 |
162 | xorq %r9, %r9 | 162 | xorq %r9, %r9 |
163 | xorq %r10, %r9 | 163 | xorq %r10, %r10 |
164 | xorq %r11, %r11 | 164 | xorq %r11, %r11 |
165 | xorq %r12, %r12 | 165 | xorq %r12, %r12 |
166 | xorq %r13, %r13 | 166 | xorq %r13, %r13 |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9c73b51817e4..bfd348e99369 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
372 | 372 | ||
373 | void __cpuinit set_cpu_sibling_map(int cpu) | 373 | void __cpuinit set_cpu_sibling_map(int cpu) |
374 | { | 374 | { |
375 | bool has_mc = boot_cpu_data.x86_max_cores > 1; | ||
376 | bool has_smt = smp_num_siblings > 1; | 375 | bool has_smt = smp_num_siblings > 1; |
376 | bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; | ||
377 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 377 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
378 | struct cpuinfo_x86 *o; | 378 | struct cpuinfo_x86 *o; |
379 | int i; | 379 | int i; |
380 | 380 | ||
381 | cpumask_set_cpu(cpu, cpu_sibling_setup_mask); | 381 | cpumask_set_cpu(cpu, cpu_sibling_setup_mask); |
382 | 382 | ||
383 | if (!has_smt && !has_mc) { | 383 | if (!has_mp) { |
384 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); | 384 | cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); |
385 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); | 385 | cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu)); |
386 | cpumask_set_cpu(cpu, cpu_core_mask(cpu)); | 386 | cpumask_set_cpu(cpu, cpu_core_mask(cpu)); |
@@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
394 | if ((i == cpu) || (has_smt && match_smt(c, o))) | 394 | if ((i == cpu) || (has_smt && match_smt(c, o))) |
395 | link_mask(sibling, cpu, i); | 395 | link_mask(sibling, cpu, i); |
396 | 396 | ||
397 | if ((i == cpu) || (has_mc && match_llc(c, o))) | 397 | if ((i == cpu) || (has_mp && match_llc(c, o))) |
398 | link_mask(llc_shared, cpu, i); | 398 | link_mask(llc_shared, cpu, i); |
399 | 399 | ||
400 | } | 400 | } |
@@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu) | |||
406 | for_each_cpu(i, cpu_sibling_setup_mask) { | 406 | for_each_cpu(i, cpu_sibling_setup_mask) { |
407 | o = &cpu_data(i); | 407 | o = &cpu_data(i); |
408 | 408 | ||
409 | if ((i == cpu) || (has_mc && match_mc(c, o))) { | 409 | if ((i == cpu) || (has_mp && match_mc(c, o))) { |
410 | link_mask(core, cpu, i); | 410 | link_mask(core, cpu, i); |
411 | 411 | ||
412 | /* | 412 | /* |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 8db0010ed150..5953dcea752d 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -1240,9 +1240,12 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
1240 | ctxt->modrm_seg = VCPU_SREG_DS; | 1240 | ctxt->modrm_seg = VCPU_SREG_DS; |
1241 | 1241 | ||
1242 | if (ctxt->modrm_mod == 3) { | 1242 | if (ctxt->modrm_mod == 3) { |
1243 | int highbyte_regs = ctxt->rex_prefix == 0; | ||
1244 | |||
1243 | op->type = OP_REG; | 1245 | op->type = OP_REG; |
1244 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; | 1246 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; |
1245 | op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); | 1247 | op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, |
1248 | highbyte_regs && (ctxt->d & ByteOp)); | ||
1246 | if (ctxt->d & Sse) { | 1249 | if (ctxt->d & Sse) { |
1247 | op->type = OP_XMM; | 1250 | op->type = OP_XMM; |
1248 | op->bytes = 16; | 1251 | op->bytes = 16; |
@@ -3997,7 +4000,8 @@ static const struct opcode twobyte_table[256] = { | |||
3997 | DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, | 4000 | DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, |
3998 | N, D(ImplicitOps | ModRM), N, N, | 4001 | N, D(ImplicitOps | ModRM), N, N, |
3999 | /* 0x10 - 0x1F */ | 4002 | /* 0x10 - 0x1F */ |
4000 | N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N, | 4003 | N, N, N, N, N, N, N, N, |
4004 | D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), | ||
4001 | /* 0x20 - 0x2F */ | 4005 | /* 0x20 - 0x2F */ |
4002 | DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read), | 4006 | DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read), |
4003 | DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read), | 4007 | DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read), |
@@ -4836,6 +4840,7 @@ twobyte_insn: | |||
4836 | case 0x08: /* invd */ | 4840 | case 0x08: /* invd */ |
4837 | case 0x0d: /* GrpP (prefetch) */ | 4841 | case 0x0d: /* GrpP (prefetch) */ |
4838 | case 0x18: /* Grp16 (prefetch/nop) */ | 4842 | case 0x18: /* Grp16 (prefetch/nop) */ |
4843 | case 0x1f: /* nop */ | ||
4839 | break; | 4844 | break; |
4840 | case 0x20: /* mov cr, reg */ | 4845 | case 0x20: /* mov cr, reg */ |
4841 | ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); | 4846 | ctxt->dst.val = ops->get_cr(ctxt, ctxt->modrm_reg); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index e1adbb4aca75..0eee2c8b64d1 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1861,11 +1861,14 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) | |||
1861 | { | 1861 | { |
1862 | struct kvm_lapic *apic = vcpu->arch.apic; | 1862 | struct kvm_lapic *apic = vcpu->arch.apic; |
1863 | unsigned int sipi_vector; | 1863 | unsigned int sipi_vector; |
1864 | unsigned long pe; | ||
1864 | 1865 | ||
1865 | if (!kvm_vcpu_has_lapic(vcpu)) | 1866 | if (!kvm_vcpu_has_lapic(vcpu) || !apic->pending_events) |
1866 | return; | 1867 | return; |
1867 | 1868 | ||
1868 | if (test_and_clear_bit(KVM_APIC_INIT, &apic->pending_events)) { | 1869 | pe = xchg(&apic->pending_events, 0); |
1870 | |||
1871 | if (test_bit(KVM_APIC_INIT, &pe)) { | ||
1869 | kvm_lapic_reset(vcpu); | 1872 | kvm_lapic_reset(vcpu); |
1870 | kvm_vcpu_reset(vcpu); | 1873 | kvm_vcpu_reset(vcpu); |
1871 | if (kvm_vcpu_is_bsp(apic->vcpu)) | 1874 | if (kvm_vcpu_is_bsp(apic->vcpu)) |
@@ -1873,7 +1876,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) | |||
1873 | else | 1876 | else |
1874 | vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; | 1877 | vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; |
1875 | } | 1878 | } |
1876 | if (test_and_clear_bit(KVM_APIC_SIPI, &apic->pending_events) && | 1879 | if (test_bit(KVM_APIC_SIPI, &pe) && |
1877 | vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { | 1880 | vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) { |
1878 | /* evaluate pending_events before reading the vector */ | 1881 | /* evaluate pending_events before reading the vector */ |
1879 | smp_rmb(); | 1882 | smp_rmb(); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 094b5d96ab14..e8ba99c34180 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -582,8 +582,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | |||
582 | if (index != XCR_XFEATURE_ENABLED_MASK) | 582 | if (index != XCR_XFEATURE_ENABLED_MASK) |
583 | return 1; | 583 | return 1; |
584 | xcr0 = xcr; | 584 | xcr0 = xcr; |
585 | if (kvm_x86_ops->get_cpl(vcpu) != 0) | ||
586 | return 1; | ||
587 | if (!(xcr0 & XSTATE_FP)) | 585 | if (!(xcr0 & XSTATE_FP)) |
588 | return 1; | 586 | return 1; |
589 | if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) | 587 | if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) |
@@ -597,7 +595,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | |||
597 | 595 | ||
598 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) | 596 | int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) |
599 | { | 597 | { |
600 | if (__kvm_set_xcr(vcpu, index, xcr)) { | 598 | if (kvm_x86_ops->get_cpl(vcpu) != 0 || |
599 | __kvm_set_xcr(vcpu, index, xcr)) { | ||
601 | kvm_inject_gp(vcpu, 0); | 600 | kvm_inject_gp(vcpu, 0); |
602 | return 1; | 601 | return 1; |
603 | } | 602 | } |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index eaac1743def7..1f34e9219775 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -277,6 +277,9 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
277 | end_pfn = limit_pfn; | 277 | end_pfn = limit_pfn; |
278 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | 278 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |
279 | 279 | ||
280 | if (!after_bootmem) | ||
281 | adjust_range_page_size_mask(mr, nr_range); | ||
282 | |||
280 | /* try to merge same page size and continuous */ | 283 | /* try to merge same page size and continuous */ |
281 | for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { | 284 | for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { |
282 | unsigned long old_start; | 285 | unsigned long old_start; |
@@ -291,9 +294,6 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, | |||
291 | nr_range--; | 294 | nr_range--; |
292 | } | 295 | } |
293 | 296 | ||
294 | if (!after_bootmem) | ||
295 | adjust_range_page_size_mask(mr, nr_range); | ||
296 | |||
297 | for (i = 0; i < nr_range; i++) | 297 | for (i = 0; i < nr_range; i++) |
298 | printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", | 298 | printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", |
299 | mr[i].start, mr[i].end - 1, | 299 | mr[i].start, mr[i].end - 1, |
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 305c68b8d538..981c2dbd72cc 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c | |||
@@ -628,7 +628,9 @@ int pcibios_add_device(struct pci_dev *dev) | |||
628 | 628 | ||
629 | pa_data = boot_params.hdr.setup_data; | 629 | pa_data = boot_params.hdr.setup_data; |
630 | while (pa_data) { | 630 | while (pa_data) { |
631 | data = phys_to_virt(pa_data); | 631 | data = ioremap(pa_data, sizeof(*rom)); |
632 | if (!data) | ||
633 | return -ENOMEM; | ||
632 | 634 | ||
633 | if (data->type == SETUP_PCI) { | 635 | if (data->type == SETUP_PCI) { |
634 | rom = (struct pci_setup_rom *)data; | 636 | rom = (struct pci_setup_rom *)data; |
@@ -645,6 +647,7 @@ int pcibios_add_device(struct pci_dev *dev) | |||
645 | } | 647 | } |
646 | } | 648 | } |
647 | pa_data = data->next; | 649 | pa_data = data->next; |
650 | iounmap(data); | ||
648 | } | 651 | } |
649 | return 0; | 652 | return 0; |
650 | } | 653 | } |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 82089d8b1954..d2fbcedcf6ea 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include <linux/io.h> | 42 | #include <linux/io.h> |
43 | #include <linux/reboot.h> | 43 | #include <linux/reboot.h> |
44 | #include <linux/bcd.h> | 44 | #include <linux/bcd.h> |
45 | #include <linux/ucs2_string.h> | ||
46 | 45 | ||
47 | #include <asm/setup.h> | 46 | #include <asm/setup.h> |
48 | #include <asm/efi.h> | 47 | #include <asm/efi.h> |
@@ -54,12 +53,12 @@ | |||
54 | 53 | ||
55 | #define EFI_DEBUG 1 | 54 | #define EFI_DEBUG 1 |
56 | 55 | ||
57 | /* | 56 | #define EFI_MIN_RESERVE 5120 |
58 | * There's some additional metadata associated with each | 57 | |
59 | * variable. Intel's reference implementation is 60 bytes - bump that | 58 | #define EFI_DUMMY_GUID \ |
60 | * to account for potential alignment constraints | 59 | EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9) |
61 | */ | 60 | |
62 | #define VAR_METADATA_SIZE 64 | 61 | static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 }; |
63 | 62 | ||
64 | struct efi __read_mostly efi = { | 63 | struct efi __read_mostly efi = { |
65 | .mps = EFI_INVALID_TABLE_ADDR, | 64 | .mps = EFI_INVALID_TABLE_ADDR, |
@@ -79,13 +78,6 @@ struct efi_memory_map memmap; | |||
79 | static struct efi efi_phys __initdata; | 78 | static struct efi efi_phys __initdata; |
80 | static efi_system_table_t efi_systab __initdata; | 79 | static efi_system_table_t efi_systab __initdata; |
81 | 80 | ||
82 | static u64 efi_var_store_size; | ||
83 | static u64 efi_var_remaining_size; | ||
84 | static u64 efi_var_max_var_size; | ||
85 | static u64 boot_used_size; | ||
86 | static u64 boot_var_size; | ||
87 | static u64 active_size; | ||
88 | |||
89 | unsigned long x86_efi_facility; | 81 | unsigned long x86_efi_facility; |
90 | 82 | ||
91 | /* | 83 | /* |
@@ -188,53 +180,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, | |||
188 | efi_char16_t *name, | 180 | efi_char16_t *name, |
189 | efi_guid_t *vendor) | 181 | efi_guid_t *vendor) |
190 | { | 182 | { |
191 | efi_status_t status; | 183 | return efi_call_virt3(get_next_variable, |
192 | static bool finished = false; | 184 | name_size, name, vendor); |
193 | static u64 var_size; | ||
194 | |||
195 | status = efi_call_virt3(get_next_variable, | ||
196 | name_size, name, vendor); | ||
197 | |||
198 | if (status == EFI_NOT_FOUND) { | ||
199 | finished = true; | ||
200 | if (var_size < boot_used_size) { | ||
201 | boot_var_size = boot_used_size - var_size; | ||
202 | active_size += boot_var_size; | ||
203 | } else { | ||
204 | printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n"); | ||
205 | } | ||
206 | } | ||
207 | |||
208 | if (boot_used_size && !finished) { | ||
209 | unsigned long size = 0; | ||
210 | u32 attr; | ||
211 | efi_status_t s; | ||
212 | void *tmp; | ||
213 | |||
214 | s = virt_efi_get_variable(name, vendor, &attr, &size, NULL); | ||
215 | |||
216 | if (s != EFI_BUFFER_TOO_SMALL || !size) | ||
217 | return status; | ||
218 | |||
219 | tmp = kmalloc(size, GFP_ATOMIC); | ||
220 | |||
221 | if (!tmp) | ||
222 | return status; | ||
223 | |||
224 | s = virt_efi_get_variable(name, vendor, &attr, &size, tmp); | ||
225 | |||
226 | if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) { | ||
227 | var_size += size; | ||
228 | var_size += ucs2_strsize(name, 1024); | ||
229 | active_size += size; | ||
230 | active_size += VAR_METADATA_SIZE; | ||
231 | active_size += ucs2_strsize(name, 1024); | ||
232 | } | ||
233 | |||
234 | kfree(tmp); | ||
235 | } | ||
236 | |||
237 | return status; | ||
238 | } | 185 | } |
239 | 186 | ||
240 | static efi_status_t virt_efi_set_variable(efi_char16_t *name, | 187 | static efi_status_t virt_efi_set_variable(efi_char16_t *name, |
@@ -243,34 +190,9 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name, | |||
243 | unsigned long data_size, | 190 | unsigned long data_size, |
244 | void *data) | 191 | void *data) |
245 | { | 192 | { |
246 | efi_status_t status; | 193 | return efi_call_virt5(set_variable, |
247 | u32 orig_attr = 0; | 194 | name, vendor, attr, |
248 | unsigned long orig_size = 0; | 195 | data_size, data); |
249 | |||
250 | status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size, | ||
251 | NULL); | ||
252 | |||
253 | if (status != EFI_BUFFER_TOO_SMALL) | ||
254 | orig_size = 0; | ||
255 | |||
256 | status = efi_call_virt5(set_variable, | ||
257 | name, vendor, attr, | ||
258 | data_size, data); | ||
259 | |||
260 | if (status == EFI_SUCCESS) { | ||
261 | if (orig_size) { | ||
262 | active_size -= orig_size; | ||
263 | active_size -= ucs2_strsize(name, 1024); | ||
264 | active_size -= VAR_METADATA_SIZE; | ||
265 | } | ||
266 | if (data_size) { | ||
267 | active_size += data_size; | ||
268 | active_size += ucs2_strsize(name, 1024); | ||
269 | active_size += VAR_METADATA_SIZE; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | return status; | ||
274 | } | 196 | } |
275 | 197 | ||
276 | static efi_status_t virt_efi_query_variable_info(u32 attr, | 198 | static efi_status_t virt_efi_query_variable_info(u32 attr, |
@@ -786,9 +708,6 @@ void __init efi_init(void) | |||
786 | char vendor[100] = "unknown"; | 708 | char vendor[100] = "unknown"; |
787 | int i = 0; | 709 | int i = 0; |
788 | void *tmp; | 710 | void *tmp; |
789 | struct setup_data *data; | ||
790 | struct efi_var_bootdata *efi_var_data; | ||
791 | u64 pa_data; | ||
792 | 711 | ||
793 | #ifdef CONFIG_X86_32 | 712 | #ifdef CONFIG_X86_32 |
794 | if (boot_params.efi_info.efi_systab_hi || | 713 | if (boot_params.efi_info.efi_systab_hi || |
@@ -806,22 +725,6 @@ void __init efi_init(void) | |||
806 | if (efi_systab_init(efi_phys.systab)) | 725 | if (efi_systab_init(efi_phys.systab)) |
807 | return; | 726 | return; |
808 | 727 | ||
809 | pa_data = boot_params.hdr.setup_data; | ||
810 | while (pa_data) { | ||
811 | data = early_ioremap(pa_data, sizeof(*efi_var_data)); | ||
812 | if (data->type == SETUP_EFI_VARS) { | ||
813 | efi_var_data = (struct efi_var_bootdata *)data; | ||
814 | |||
815 | efi_var_store_size = efi_var_data->store_size; | ||
816 | efi_var_remaining_size = efi_var_data->remaining_size; | ||
817 | efi_var_max_var_size = efi_var_data->max_var_size; | ||
818 | } | ||
819 | pa_data = data->next; | ||
820 | early_iounmap(data, sizeof(*efi_var_data)); | ||
821 | } | ||
822 | |||
823 | boot_used_size = efi_var_store_size - efi_var_remaining_size; | ||
824 | |||
825 | set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); | 728 | set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility); |
826 | 729 | ||
827 | /* | 730 | /* |
@@ -1085,6 +988,13 @@ void __init efi_enter_virtual_mode(void) | |||
1085 | runtime_code_page_mkexec(); | 988 | runtime_code_page_mkexec(); |
1086 | 989 | ||
1087 | kfree(new_memmap); | 990 | kfree(new_memmap); |
991 | |||
992 | /* clean DUMMY object */ | ||
993 | efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, | ||
994 | EFI_VARIABLE_NON_VOLATILE | | ||
995 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
996 | EFI_VARIABLE_RUNTIME_ACCESS, | ||
997 | 0, NULL); | ||
1088 | } | 998 | } |
1089 | 999 | ||
1090 | /* | 1000 | /* |
@@ -1136,33 +1046,70 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) | |||
1136 | efi_status_t status; | 1046 | efi_status_t status; |
1137 | u64 storage_size, remaining_size, max_size; | 1047 | u64 storage_size, remaining_size, max_size; |
1138 | 1048 | ||
1049 | if (!(attributes & EFI_VARIABLE_NON_VOLATILE)) | ||
1050 | return 0; | ||
1051 | |||
1139 | status = efi.query_variable_info(attributes, &storage_size, | 1052 | status = efi.query_variable_info(attributes, &storage_size, |
1140 | &remaining_size, &max_size); | 1053 | &remaining_size, &max_size); |
1141 | if (status != EFI_SUCCESS) | 1054 | if (status != EFI_SUCCESS) |
1142 | return status; | 1055 | return status; |
1143 | 1056 | ||
1144 | if (!max_size && remaining_size > size) | ||
1145 | printk_once(KERN_ERR FW_BUG "Broken EFI implementation" | ||
1146 | " is returning MaxVariableSize=0\n"); | ||
1147 | /* | 1057 | /* |
1148 | * Some firmware implementations refuse to boot if there's insufficient | 1058 | * Some firmware implementations refuse to boot if there's insufficient |
1149 | * space in the variable store. We account for that by refusing the | 1059 | * space in the variable store. We account for that by refusing the |
1150 | * write if permitting it would reduce the available space to under | 1060 | * write if permitting it would reduce the available space to under |
1151 | * 50%. However, some firmware won't reclaim variable space until | 1061 | * 5KB. This figure was provided by Samsung, so should be safe. |
1152 | * after the used (not merely the actively used) space drops below | ||
1153 | * a threshold. We can approximate that case with the value calculated | ||
1154 | * above. If both the firmware and our calculations indicate that the | ||
1155 | * available space would drop below 50%, refuse the write. | ||
1156 | */ | 1062 | */ |
1063 | if ((remaining_size - size < EFI_MIN_RESERVE) && | ||
1064 | !efi_no_storage_paranoia) { | ||
1065 | |||
1066 | /* | ||
1067 | * Triggering garbage collection may require that the firmware | ||
1068 | * generate a real EFI_OUT_OF_RESOURCES error. We can force | ||
1069 | * that by attempting to use more space than is available. | ||
1070 | */ | ||
1071 | unsigned long dummy_size = remaining_size + 1024; | ||
1072 | void *dummy = kzalloc(dummy_size, GFP_ATOMIC); | ||
1073 | |||
1074 | if (!dummy) | ||
1075 | return EFI_OUT_OF_RESOURCES; | ||
1076 | |||
1077 | status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, | ||
1078 | EFI_VARIABLE_NON_VOLATILE | | ||
1079 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
1080 | EFI_VARIABLE_RUNTIME_ACCESS, | ||
1081 | dummy_size, dummy); | ||
1082 | |||
1083 | if (status == EFI_SUCCESS) { | ||
1084 | /* | ||
1085 | * This should have failed, so if it didn't make sure | ||
1086 | * that we delete it... | ||
1087 | */ | ||
1088 | efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, | ||
1089 | EFI_VARIABLE_NON_VOLATILE | | ||
1090 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
1091 | EFI_VARIABLE_RUNTIME_ACCESS, | ||
1092 | 0, dummy); | ||
1093 | } | ||
1094 | |||
1095 | kfree(dummy); | ||
1157 | 1096 | ||
1158 | if (!storage_size || size > remaining_size || | 1097 | /* |
1159 | (max_size && size > max_size)) | 1098 | * The runtime code may now have triggered a garbage collection |
1160 | return EFI_OUT_OF_RESOURCES; | 1099 | * run, so check the variable info again |
1100 | */ | ||
1101 | status = efi.query_variable_info(attributes, &storage_size, | ||
1102 | &remaining_size, &max_size); | ||
1161 | 1103 | ||
1162 | if (!efi_no_storage_paranoia && | 1104 | if (status != EFI_SUCCESS) |
1163 | ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) && | 1105 | return status; |
1164 | (remaining_size - size < storage_size / 2))) | 1106 | |
1165 | return EFI_OUT_OF_RESOURCES; | 1107 | /* |
1108 | * There still isn't enough room, so return an error | ||
1109 | */ | ||
1110 | if (remaining_size - size < EFI_MIN_RESERVE) | ||
1111 | return EFI_OUT_OF_RESOURCES; | ||
1112 | } | ||
1166 | 1113 | ||
1167 | return EFI_SUCCESS; | 1114 | return EFI_SUCCESS; |
1168 | } | 1115 | } |
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 590be1090892..f7bab68a4b83 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c | |||
@@ -42,9 +42,6 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { | |||
42 | "^(xen_irq_disable_direct_reloc$|" | 42 | "^(xen_irq_disable_direct_reloc$|" |
43 | "xen_save_fl_direct_reloc$|" | 43 | "xen_save_fl_direct_reloc$|" |
44 | "VDSO|" | 44 | "VDSO|" |
45 | #if ELF_BITS == 64 | ||
46 | "__vvar_page|" | ||
47 | #endif | ||
48 | "__crc_)", | 45 | "__crc_)", |
49 | 46 | ||
50 | /* | 47 | /* |
@@ -72,6 +69,7 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = { | |||
72 | "__per_cpu_load|" | 69 | "__per_cpu_load|" |
73 | "init_per_cpu__.*|" | 70 | "init_per_cpu__.*|" |
74 | "__end_rodata_hpage_align|" | 71 | "__end_rodata_hpage_align|" |
72 | "__vvar_page|" | ||
75 | #endif | 73 | #endif |
76 | "_end)$" | 74 | "_end)$" |
77 | }; | 75 | }; |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index fb44426fe931..d99cae8147d1 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
19 | #include <linux/irq_work.h> | 19 | #include <linux/irq_work.h> |
20 | #include <linux/tick.h> | ||
20 | 21 | ||
21 | #include <asm/paravirt.h> | 22 | #include <asm/paravirt.h> |
22 | #include <asm/desc.h> | 23 | #include <asm/desc.h> |
@@ -447,6 +448,13 @@ static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ | |||
447 | play_dead_common(); | 448 | play_dead_common(); |
448 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | 449 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); |
449 | cpu_bringup(); | 450 | cpu_bringup(); |
451 | /* | ||
452 | * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) | ||
453 | * clears certain data that the cpu_idle loop (which called us | ||
454 | * and that we return from) expects. The only way to get that | ||
455 | * data back is to call: | ||
456 | */ | ||
457 | tick_nohz_idle_enter(); | ||
450 | } | 458 | } |
451 | 459 | ||
452 | #else /* !CONFIG_HOTPLUG_CPU */ | 460 | #else /* !CONFIG_HOTPLUG_CPU */ |