diff options
Diffstat (limited to 'arch')
115 files changed, 1224 insertions, 626 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index dd0e8eb8042f..a4429bcd609e 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -213,6 +213,9 @@ config USE_GENERIC_SMP_HELPERS | |||
213 | config GENERIC_SMP_IDLE_THREAD | 213 | config GENERIC_SMP_IDLE_THREAD |
214 | bool | 214 | bool |
215 | 215 | ||
216 | config GENERIC_IDLE_POLL_SETUP | ||
217 | bool | ||
218 | |||
216 | # Select if arch init_task initializer is different to init/init_task.c | 219 | # Select if arch init_task initializer is different to init/init_task.c |
217 | config ARCH_INIT_TASK | 220 | config ARCH_INIT_TASK |
218 | bool | 221 | bool |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index d423d58f938d..49d993cee512 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -38,6 +38,7 @@ config ARM | |||
38 | select HAVE_GENERIC_HARDIRQS | 38 | select HAVE_GENERIC_HARDIRQS |
39 | select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) | 39 | select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) |
40 | select HAVE_IDE if PCI || ISA || PCMCIA | 40 | select HAVE_IDE if PCI || ISA || PCMCIA |
41 | select HAVE_IRQ_TIME_ACCOUNTING | ||
41 | select HAVE_KERNEL_GZIP | 42 | select HAVE_KERNEL_GZIP |
42 | select HAVE_KERNEL_LZMA | 43 | select HAVE_KERNEL_LZMA |
43 | select HAVE_KERNEL_LZO | 44 | select HAVE_KERNEL_LZO |
@@ -488,7 +489,7 @@ config ARCH_IXP4XX | |||
488 | config ARCH_DOVE | 489 | config ARCH_DOVE |
489 | bool "Marvell Dove" | 490 | bool "Marvell Dove" |
490 | select ARCH_REQUIRE_GPIOLIB | 491 | select ARCH_REQUIRE_GPIOLIB |
491 | select CPU_V7 | 492 | select CPU_PJ4 |
492 | select GENERIC_CLOCKEVENTS | 493 | select GENERIC_CLOCKEVENTS |
493 | select MIGHT_HAVE_PCI | 494 | select MIGHT_HAVE_PCI |
494 | select PINCTRL | 495 | select PINCTRL |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 47374085befd..1ba358ba16b8 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -309,7 +309,7 @@ define archhelp | |||
309 | echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' | 309 | echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' |
310 | echo '* xipImage - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)' | 310 | echo '* xipImage - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)' |
311 | echo ' uImage - U-Boot wrapped zImage' | 311 | echo ' uImage - U-Boot wrapped zImage' |
312 | echo ' bootpImage - Combined zImage and initial RAM disk' | 312 | echo ' bootpImage - Combined zImage and initial RAM disk' |
313 | echo ' (supply initrd image via make variable INITRD=<path>)' | 313 | echo ' (supply initrd image via make variable INITRD=<path>)' |
314 | echo '* dtbs - Build device tree blobs for enabled boards' | 314 | echo '* dtbs - Build device tree blobs for enabled boards' |
315 | echo ' install - Install uncompressed kernel' | 315 | echo ' install - Install uncompressed kernel' |
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi index 272bbc65fab0..550eb772c30e 100644 --- a/arch/arm/boot/dts/armada-370-xp.dtsi +++ b/arch/arm/boot/dts/armada-370-xp.dtsi | |||
@@ -33,7 +33,8 @@ | |||
33 | #size-cells = <1>; | 33 | #size-cells = <1>; |
34 | compatible = "simple-bus"; | 34 | compatible = "simple-bus"; |
35 | interrupt-parent = <&mpic>; | 35 | interrupt-parent = <&mpic>; |
36 | ranges = <0 0 0xd0000000 0x100000>; | 36 | ranges = <0 0 0xd0000000 0x0100000 /* internal registers */ |
37 | 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */>; | ||
37 | 38 | ||
38 | internal-regs { | 39 | internal-regs { |
39 | compatible = "simple-bus"; | 40 | compatible = "simple-bus"; |
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi index b2c1b5af9749..aee2b1866ce2 100644 --- a/arch/arm/boot/dts/armada-370.dtsi +++ b/arch/arm/boot/dts/armada-370.dtsi | |||
@@ -29,7 +29,8 @@ | |||
29 | }; | 29 | }; |
30 | 30 | ||
31 | soc { | 31 | soc { |
32 | ranges = <0 0xd0000000 0x100000>; | 32 | ranges = <0 0xd0000000 0x0100000 /* internal registers */ |
33 | 0xe0000000 0xe0000000 0x8100000 /* PCIe */>; | ||
33 | internal-regs { | 34 | internal-regs { |
34 | system-controller@18200 { | 35 | system-controller@18200 { |
35 | compatible = "marvell,armada-370-xp-system-controller"; | 36 | compatible = "marvell,armada-370-xp-system-controller"; |
@@ -38,12 +39,12 @@ | |||
38 | 39 | ||
39 | L2: l2-cache { | 40 | L2: l2-cache { |
40 | compatible = "marvell,aurora-outer-cache"; | 41 | compatible = "marvell,aurora-outer-cache"; |
41 | reg = <0xd0008000 0x1000>; | 42 | reg = <0x08000 0x1000>; |
42 | cache-id-part = <0x100>; | 43 | cache-id-part = <0x100>; |
43 | wt-override; | 44 | wt-override; |
44 | }; | 45 | }; |
45 | 46 | ||
46 | mpic: interrupt-controller@20000 { | 47 | interrupt-controller@20000 { |
47 | reg = <0x20a00 0x1d0>, <0x21870 0x58>; | 48 | reg = <0x20a00 0x1d0>, <0x21870 0x58>; |
48 | }; | 49 | }; |
49 | 50 | ||
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts index 26ad06fc147e..3ee63d128e27 100644 --- a/arch/arm/boot/dts/armada-xp-gp.dts +++ b/arch/arm/boot/dts/armada-xp-gp.dts | |||
@@ -39,6 +39,9 @@ | |||
39 | }; | 39 | }; |
40 | 40 | ||
41 | soc { | 41 | soc { |
42 | ranges = <0 0 0xd0000000 0x100000 | ||
43 | 0xf0000000 0 0xf0000000 0x1000000>; | ||
44 | |||
42 | internal-regs { | 45 | internal-regs { |
43 | serial@12000 { | 46 | serial@12000 { |
44 | clock-frequency = <250000000>; | 47 | clock-frequency = <250000000>; |
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts index f14d36c46159..46b785064dd8 100644 --- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts +++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts | |||
@@ -27,6 +27,9 @@ | |||
27 | }; | 27 | }; |
28 | 28 | ||
29 | soc { | 29 | soc { |
30 | ranges = <0 0 0xd0000000 0x100000 | ||
31 | 0xf0000000 0 0xf0000000 0x8000000>; | ||
32 | |||
30 | internal-regs { | 33 | internal-regs { |
31 | serial@12000 { | 34 | serial@12000 { |
32 | clock-frequency = <250000000>; | 35 | clock-frequency = <250000000>; |
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi index bacab11c10dc..5b902f9a3af2 100644 --- a/arch/arm/boot/dts/armada-xp.dtsi +++ b/arch/arm/boot/dts/armada-xp.dtsi | |||
@@ -31,7 +31,7 @@ | |||
31 | wt-override; | 31 | wt-override; |
32 | }; | 32 | }; |
33 | 33 | ||
34 | mpic: interrupt-controller@20000 { | 34 | interrupt-controller@20000 { |
35 | reg = <0x20a00 0x2d0>, <0x21070 0x58>; | 35 | reg = <0x20a00 0x2d0>, <0x21070 0x58>; |
36 | }; | 36 | }; |
37 | 37 | ||
diff --git a/arch/arm/boot/dts/at91sam9x25ek.dts b/arch/arm/boot/dts/at91sam9x25ek.dts index 3b40d11d65e7..315250b4995e 100644 --- a/arch/arm/boot/dts/at91sam9x25ek.dts +++ b/arch/arm/boot/dts/at91sam9x25ek.dts | |||
@@ -11,7 +11,7 @@ | |||
11 | /include/ "at91sam9x5ek.dtsi" | 11 | /include/ "at91sam9x5ek.dtsi" |
12 | 12 | ||
13 | / { | 13 | / { |
14 | model = "Atmel AT91SAM9G25-EK"; | 14 | model = "Atmel AT91SAM9X25-EK"; |
15 | compatible = "atmel,at91sam9x25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; | 15 | compatible = "atmel,at91sam9x25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; |
16 | 16 | ||
17 | ahb { | 17 | ahb { |
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index 82a404da1c0d..99ba6e14ebf3 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi | |||
@@ -516,7 +516,7 @@ | |||
516 | usb_otg_hs: usb_otg_hs@480ab000 { | 516 | usb_otg_hs: usb_otg_hs@480ab000 { |
517 | compatible = "ti,omap3-musb"; | 517 | compatible = "ti,omap3-musb"; |
518 | reg = <0x480ab000 0x1000>; | 518 | reg = <0x480ab000 0x1000>; |
519 | interrupts = <0 92 0x4>, <0 93 0x4>; | 519 | interrupts = <92>, <93>; |
520 | interrupt-names = "mc", "dma"; | 520 | interrupt-names = "mc", "dma"; |
521 | ti,hwmods = "usb_otg_hs"; | 521 | ti,hwmods = "usb_otg_hs"; |
522 | multipoint = <1>; | 522 | multipoint = <1>; |
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi index 2e643ea51cce..5000e0d42849 100644 --- a/arch/arm/boot/dts/sama5d3.dtsi +++ b/arch/arm/boot/dts/sama5d3.dtsi | |||
@@ -75,11 +75,6 @@ | |||
75 | compatible = "atmel,at91sam9x5-spi"; | 75 | compatible = "atmel,at91sam9x5-spi"; |
76 | reg = <0xf0004000 0x100>; | 76 | reg = <0xf0004000 0x100>; |
77 | interrupts = <24 4 3>; | 77 | interrupts = <24 4 3>; |
78 | cs-gpios = <&pioD 13 0 | ||
79 | &pioD 14 0 /* conflicts with SCK0 and CANRX0 */ | ||
80 | &pioD 15 0 /* conflicts with CTS0 and CANTX0 */ | ||
81 | &pioD 16 0 /* conflicts with RTS0 and PWMFI3 */ | ||
82 | >; | ||
83 | pinctrl-names = "default"; | 78 | pinctrl-names = "default"; |
84 | pinctrl-0 = <&pinctrl_spi0>; | 79 | pinctrl-0 = <&pinctrl_spi0>; |
85 | status = "disabled"; | 80 | status = "disabled"; |
@@ -156,7 +151,7 @@ | |||
156 | }; | 151 | }; |
157 | 152 | ||
158 | macb0: ethernet@f0028000 { | 153 | macb0: ethernet@f0028000 { |
159 | compatible = "cnds,pc302-gem", "cdns,gem"; | 154 | compatible = "cdns,pc302-gem", "cdns,gem"; |
160 | reg = <0xf0028000 0x100>; | 155 | reg = <0xf0028000 0x100>; |
161 | interrupts = <34 4 3>; | 156 | interrupts = <34 4 3>; |
162 | pinctrl-names = "default"; | 157 | pinctrl-names = "default"; |
@@ -203,11 +198,6 @@ | |||
203 | compatible = "atmel,at91sam9x5-spi"; | 198 | compatible = "atmel,at91sam9x5-spi"; |
204 | reg = <0xf8008000 0x100>; | 199 | reg = <0xf8008000 0x100>; |
205 | interrupts = <25 4 3>; | 200 | interrupts = <25 4 3>; |
206 | cs-gpios = <&pioC 25 0 | ||
207 | &pioC 26 0 /* conflitcs with TWD1 and ISI_D11 */ | ||
208 | &pioC 27 0 /* conflitcs with TWCK1 and ISI_D10 */ | ||
209 | &pioC 28 0 /* conflitcs with PWMFI0 and ISI_D9 */ | ||
210 | >; | ||
211 | pinctrl-names = "default"; | 201 | pinctrl-names = "default"; |
212 | pinctrl-0 = <&pinctrl_spi1>; | 202 | pinctrl-0 = <&pinctrl_spi1>; |
213 | status = "disabled"; | 203 | status = "disabled"; |
diff --git a/arch/arm/boot/dts/sama5d3xcm.dtsi b/arch/arm/boot/dts/sama5d3xcm.dtsi index 1f8ed404626c..b336e7787cb3 100644 --- a/arch/arm/boot/dts/sama5d3xcm.dtsi +++ b/arch/arm/boot/dts/sama5d3xcm.dtsi | |||
@@ -32,6 +32,10 @@ | |||
32 | 32 | ||
33 | ahb { | 33 | ahb { |
34 | apb { | 34 | apb { |
35 | spi0: spi@f0004000 { | ||
36 | cs-gpios = <&pioD 13 0>, <0>, <0>, <0>; | ||
37 | }; | ||
38 | |||
35 | macb0: ethernet@f0028000 { | 39 | macb0: ethernet@f0028000 { |
36 | phy-mode = "rgmii"; | 40 | phy-mode = "rgmii"; |
37 | }; | 41 | }; |
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c index 52b88d81b7bb..3caed0db6986 100644 --- a/arch/arm/common/mcpm_platsmp.c +++ b/arch/arm/common/mcpm_platsmp.c | |||
@@ -15,8 +15,6 @@ | |||
15 | #include <linux/smp.h> | 15 | #include <linux/smp.h> |
16 | #include <linux/spinlock.h> | 16 | #include <linux/spinlock.h> |
17 | 17 | ||
18 | #include <linux/irqchip/arm-gic.h> | ||
19 | |||
20 | #include <asm/mcpm.h> | 18 | #include <asm/mcpm.h> |
21 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
22 | #include <asm/smp_plat.h> | 20 | #include <asm/smp_plat.h> |
@@ -49,7 +47,6 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i | |||
49 | static void __cpuinit mcpm_secondary_init(unsigned int cpu) | 47 | static void __cpuinit mcpm_secondary_init(unsigned int cpu) |
50 | { | 48 | { |
51 | mcpm_cpu_powered_up(); | 49 | mcpm_cpu_powered_up(); |
52 | gic_secondary_init(0); | ||
53 | } | 50 | } |
54 | 51 | ||
55 | #ifdef CONFIG_HOTPLUG_CPU | 52 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index c1ef64bc5abd..f59090210ec9 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig | |||
@@ -20,6 +20,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y | |||
20 | CONFIG_MODVERSIONS=y | 20 | CONFIG_MODVERSIONS=y |
21 | CONFIG_MODULE_SRCVERSION_ALL=y | 21 | CONFIG_MODULE_SRCVERSION_ALL=y |
22 | # CONFIG_BLK_DEV_BSG is not set | 22 | # CONFIG_BLK_DEV_BSG is not set |
23 | CONFIG_ARCH_MULTI_V6=y | ||
23 | CONFIG_ARCH_OMAP2PLUS=y | 24 | CONFIG_ARCH_OMAP2PLUS=y |
24 | CONFIG_OMAP_RESET_CLOCKS=y | 25 | CONFIG_OMAP_RESET_CLOCKS=y |
25 | CONFIG_OMAP_MUX_DEBUG=y | 26 | CONFIG_OMAP_MUX_DEBUG=y |
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index 7eb18c1d8d6c..4f009c10540d 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h | |||
@@ -233,15 +233,15 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
233 | ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ | 233 | ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ |
234 | atomic64_t, \ | 234 | atomic64_t, \ |
235 | counter), \ | 235 | counter), \ |
236 | (unsigned long)(o), \ | 236 | (unsigned long long)(o), \ |
237 | (unsigned long)(n))) | 237 | (unsigned long long)(n))) |
238 | 238 | ||
239 | #define cmpxchg64_local(ptr, o, n) \ | 239 | #define cmpxchg64_local(ptr, o, n) \ |
240 | ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ | 240 | ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ |
241 | local64_t, \ | 241 | local64_t, \ |
242 | a), \ | 242 | a), \ |
243 | (unsigned long)(o), \ | 243 | (unsigned long long)(o), \ |
244 | (unsigned long)(n))) | 244 | (unsigned long long)(n))) |
245 | 245 | ||
246 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ | 246 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ |
247 | 247 | ||
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index 2acdff4c1dfe..180b3024bec3 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c | |||
@@ -174,6 +174,7 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev) | |||
174 | static struct clock_event_device clkevt = { | 174 | static struct clock_event_device clkevt = { |
175 | .name = "at91_tick", | 175 | .name = "at91_tick", |
176 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 176 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, |
177 | .shift = 32, | ||
177 | .rating = 150, | 178 | .rating = 150, |
178 | .set_next_event = clkevt32k_next_event, | 179 | .set_next_event = clkevt32k_next_event, |
179 | .set_mode = clkevt32k_mode, | 180 | .set_mode = clkevt32k_mode, |
@@ -264,9 +265,11 @@ void __init at91rm9200_timer_init(void) | |||
264 | at91_st_write(AT91_ST_RTMR, 1); | 265 | at91_st_write(AT91_ST_RTMR, 1); |
265 | 266 | ||
266 | /* Setup timer clockevent, with minimum of two ticks (important!!) */ | 267 | /* Setup timer clockevent, with minimum of two ticks (important!!) */ |
268 | clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); | ||
269 | clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); | ||
270 | clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; | ||
267 | clkevt.cpumask = cpumask_of(0); | 271 | clkevt.cpumask = cpumask_of(0); |
268 | clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK, | 272 | clockevents_register_device(&clkevt); |
269 | 2, AT91_ST_ALMV); | ||
270 | 273 | ||
271 | /* register clocksource */ | 274 | /* register clocksource */ |
272 | clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); | 275 | clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); |
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c index c2cae69e6d2b..f38922897563 100644 --- a/arch/arm/mach-kirkwood/common.c +++ b/arch/arm/mach-kirkwood/common.c | |||
@@ -528,12 +528,6 @@ void __init kirkwood_init_early(void) | |||
528 | { | 528 | { |
529 | orion_time_set_base(TIMER_VIRT_BASE); | 529 | orion_time_set_base(TIMER_VIRT_BASE); |
530 | 530 | ||
531 | /* | ||
532 | * Some Kirkwood devices allocate their coherent buffers from atomic | ||
533 | * context. Increase size of atomic coherent pool to make sure such | ||
534 | * the allocations won't fail. | ||
535 | */ | ||
536 | init_dma_coherent_pool_size(SZ_1M); | ||
537 | mvebu_mbus_init("marvell,kirkwood-mbus", | 531 | mvebu_mbus_init("marvell,kirkwood-mbus", |
538 | BRIDGE_WINS_BASE, BRIDGE_WINS_SZ, | 532 | BRIDGE_WINS_BASE, BRIDGE_WINS_SZ, |
539 | DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ); | 533 | DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ); |
diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c index 283abff90228..e1267d6b468f 100644 --- a/arch/arm/mach-kirkwood/ts219-setup.c +++ b/arch/arm/mach-kirkwood/ts219-setup.c | |||
@@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void) | |||
124 | static int __init ts219_pci_init(void) | 124 | static int __init ts219_pci_init(void) |
125 | { | 125 | { |
126 | if (machine_is_ts219()) | 126 | if (machine_is_ts219()) |
127 | kirkwood_pcie_init(KW_PCIE0); | 127 | kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0); |
128 | 128 | ||
129 | return 0; | 129 | return 0; |
130 | } | 130 | } |
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig index e11acbb0a46d..80a8bcacd9d5 100644 --- a/arch/arm/mach-mvebu/Kconfig +++ b/arch/arm/mach-mvebu/Kconfig | |||
@@ -15,6 +15,7 @@ config ARCH_MVEBU | |||
15 | select MVEBU_CLK_GATING | 15 | select MVEBU_CLK_GATING |
16 | select MVEBU_MBUS | 16 | select MVEBU_MBUS |
17 | select ZONE_DMA if ARM_LPAE | 17 | select ZONE_DMA if ARM_LPAE |
18 | select ARCH_REQUIRE_GPIOLIB | ||
18 | 19 | ||
19 | if ARCH_MVEBU | 20 | if ARCH_MVEBU |
20 | 21 | ||
diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c index 42a4cb3087e2..1c48890bb72b 100644 --- a/arch/arm/mach-mvebu/armada-370-xp.c +++ b/arch/arm/mach-mvebu/armada-370-xp.c | |||
@@ -54,13 +54,6 @@ void __init armada_370_xp_init_early(void) | |||
54 | char *mbus_soc_name; | 54 | char *mbus_soc_name; |
55 | 55 | ||
56 | /* | 56 | /* |
57 | * Some Armada 370/XP devices allocate their coherent buffers | ||
58 | * from atomic context. Increase size of atomic coherent pool | ||
59 | * to make sure such the allocations won't fail. | ||
60 | */ | ||
61 | init_dma_coherent_pool_size(SZ_1M); | ||
62 | |||
63 | /* | ||
64 | * This initialization will be replaced by a DT-based | 57 | * This initialization will be replaced by a DT-based |
65 | * initialization once the mvebu-mbus driver gains DT support. | 58 | * initialization once the mvebu-mbus driver gains DT support. |
66 | */ | 59 | */ |
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c index 68ab858e27b7..a94b3a718d1a 100644 --- a/arch/arm/mach-omap1/dma.c +++ b/arch/arm/mach-omap1/dma.c | |||
@@ -345,6 +345,7 @@ static int __init omap1_system_dma_init(void) | |||
345 | dev_err(&pdev->dev, | 345 | dev_err(&pdev->dev, |
346 | "%s: Memory allocation failed for d->chan!\n", | 346 | "%s: Memory allocation failed for d->chan!\n", |
347 | __func__); | 347 | __func__); |
348 | ret = -ENOMEM; | ||
348 | goto exit_release_d; | 349 | goto exit_release_d; |
349 | } | 350 | } |
350 | 351 | ||
diff --git a/arch/arm/mach-omap2/cclock33xx_data.c b/arch/arm/mach-omap2/cclock33xx_data.c index 6ebc7803bc3e..af3544ce4f02 100644 --- a/arch/arm/mach-omap2/cclock33xx_data.c +++ b/arch/arm/mach-omap2/cclock33xx_data.c | |||
@@ -454,9 +454,29 @@ DEFINE_CLK_GATE(cefuse_fck, "sys_clkin_ck", &sys_clkin_ck, 0x0, | |||
454 | */ | 454 | */ |
455 | DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732); | 455 | DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732); |
456 | 456 | ||
457 | DEFINE_CLK_GATE(clkdiv32k_ick, "clkdiv32k_ck", &clkdiv32k_ck, 0x0, | 457 | static struct clk clkdiv32k_ick; |
458 | AM33XX_CM_PER_CLKDIV32K_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT, | 458 | |
459 | 0x0, NULL); | 459 | static const char *clkdiv32k_ick_parent_names[] = { |
460 | "clkdiv32k_ck", | ||
461 | }; | ||
462 | |||
463 | static const struct clk_ops clkdiv32k_ick_ops = { | ||
464 | .enable = &omap2_dflt_clk_enable, | ||
465 | .disable = &omap2_dflt_clk_disable, | ||
466 | .is_enabled = &omap2_dflt_clk_is_enabled, | ||
467 | .init = &omap2_init_clk_clkdm, | ||
468 | }; | ||
469 | |||
470 | static struct clk_hw_omap clkdiv32k_ick_hw = { | ||
471 | .hw = { | ||
472 | .clk = &clkdiv32k_ick, | ||
473 | }, | ||
474 | .enable_reg = AM33XX_CM_PER_CLKDIV32K_CLKCTRL, | ||
475 | .enable_bit = AM33XX_MODULEMODE_SWCTRL_SHIFT, | ||
476 | .clkdm_name = "clk_24mhz_clkdm", | ||
477 | }; | ||
478 | |||
479 | DEFINE_STRUCT_CLK(clkdiv32k_ick, clkdiv32k_ick_parent_names, clkdiv32k_ick_ops); | ||
460 | 480 | ||
461 | /* "usbotg_fck" is an additional clock and not really a modulemode */ | 481 | /* "usbotg_fck" is an additional clock and not really a modulemode */ |
462 | DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0, | 482 | DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0, |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index d25a95fe9921..e3289888adfa 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -2065,7 +2065,7 @@ static int _omap4_get_context_lost(struct omap_hwmod *oh) | |||
2065 | * do so is present in the hwmod data, then call it and pass along the | 2065 | * do so is present in the hwmod data, then call it and pass along the |
2066 | * return value; otherwise, return 0. | 2066 | * return value; otherwise, return 0. |
2067 | */ | 2067 | */ |
2068 | static int __init _enable_preprogram(struct omap_hwmod *oh) | 2068 | static int _enable_preprogram(struct omap_hwmod *oh) |
2069 | { | 2069 | { |
2070 | if (!oh->class->enable_preprogram) | 2070 | if (!oh->class->enable_preprogram) |
2071 | return 0; | 2071 | return 0; |
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c index b97fd672e89d..f8a6db9239bf 100644 --- a/arch/arm/mach-orion5x/common.c +++ b/arch/arm/mach-orion5x/common.c | |||
@@ -199,13 +199,6 @@ void __init orion5x_init_early(void) | |||
199 | 199 | ||
200 | orion_time_set_base(TIMER_VIRT_BASE); | 200 | orion_time_set_base(TIMER_VIRT_BASE); |
201 | 201 | ||
202 | /* | ||
203 | * Some Orion5x devices allocate their coherent buffers from atomic | ||
204 | * context. Increase size of atomic coherent pool to make sure such | ||
205 | * the allocations won't fail. | ||
206 | */ | ||
207 | init_dma_coherent_pool_size(SZ_1M); | ||
208 | |||
209 | /* Initialize the MBUS driver */ | 202 | /* Initialize the MBUS driver */ |
210 | orion5x_pcie_id(&dev, &rev); | 203 | orion5x_pcie_id(&dev, &rev); |
211 | if (dev == MV88F5281_DEV_ID) | 204 | if (dev == MV88F5281_DEV_ID) |
diff --git a/arch/arm/mach-tegra/tegra2_emc.c b/arch/arm/mach-tegra/tegra2_emc.c index 9e8bdfa2b369..31e69a019bdd 100644 --- a/arch/arm/mach-tegra/tegra2_emc.c +++ b/arch/arm/mach-tegra/tegra2_emc.c | |||
@@ -307,11 +307,6 @@ static int tegra_emc_probe(struct platform_device *pdev) | |||
307 | } | 307 | } |
308 | 308 | ||
309 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 309 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
310 | if (!res) { | ||
311 | dev_err(&pdev->dev, "missing register base\n"); | ||
312 | return -ENOMEM; | ||
313 | } | ||
314 | |||
315 | emc_regbase = devm_ioremap_resource(&pdev->dev, res); | 310 | emc_regbase = devm_ioremap_resource(&pdev->dev, res); |
316 | if (IS_ERR(emc_regbase)) | 311 | if (IS_ERR(emc_regbase)) |
317 | return PTR_ERR(emc_regbase); | 312 | return PTR_ERR(emc_regbase); |
diff --git a/arch/arm/mach-vt8500/vt8500.c b/arch/arm/mach-vt8500/vt8500.c index 1dd281efc020..f5c33df7a597 100644 --- a/arch/arm/mach-vt8500/vt8500.c +++ b/arch/arm/mach-vt8500/vt8500.c | |||
@@ -173,6 +173,7 @@ static const char * const vt8500_dt_compat[] = { | |||
173 | "wm,wm8505", | 173 | "wm,wm8505", |
174 | "wm,wm8750", | 174 | "wm,wm8750", |
175 | "wm,wm8850", | 175 | "wm,wm8850", |
176 | NULL | ||
176 | }; | 177 | }; |
177 | 178 | ||
178 | DT_MACHINE_START(WMT_DT, "VIA/Wondermedia SoC (Device Tree Support)") | 179 | DT_MACHINE_START(WMT_DT, "VIA/Wondermedia SoC (Device Tree Support)") |
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 251f827271e9..c019b7aaf776 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c | |||
@@ -383,7 +383,7 @@ static struct resource orion_ge10_shared_resources[] = { | |||
383 | 383 | ||
384 | static struct platform_device orion_ge10_shared = { | 384 | static struct platform_device orion_ge10_shared = { |
385 | .name = MV643XX_ETH_SHARED_NAME, | 385 | .name = MV643XX_ETH_SHARED_NAME, |
386 | .id = 1, | 386 | .id = 2, |
387 | .dev = { | 387 | .dev = { |
388 | .platform_data = &orion_ge10_shared_data, | 388 | .platform_data = &orion_ge10_shared_data, |
389 | }, | 389 | }, |
@@ -398,8 +398,8 @@ static struct resource orion_ge10_resources[] = { | |||
398 | 398 | ||
399 | static struct platform_device orion_ge10 = { | 399 | static struct platform_device orion_ge10 = { |
400 | .name = MV643XX_ETH_NAME, | 400 | .name = MV643XX_ETH_NAME, |
401 | .id = 1, | 401 | .id = 2, |
402 | .num_resources = 2, | 402 | .num_resources = 1, |
403 | .resource = orion_ge10_resources, | 403 | .resource = orion_ge10_resources, |
404 | .dev = { | 404 | .dev = { |
405 | .coherent_dma_mask = DMA_BIT_MASK(32), | 405 | .coherent_dma_mask = DMA_BIT_MASK(32), |
@@ -432,7 +432,7 @@ static struct resource orion_ge11_shared_resources[] = { | |||
432 | 432 | ||
433 | static struct platform_device orion_ge11_shared = { | 433 | static struct platform_device orion_ge11_shared = { |
434 | .name = MV643XX_ETH_SHARED_NAME, | 434 | .name = MV643XX_ETH_SHARED_NAME, |
435 | .id = 1, | 435 | .id = 3, |
436 | .dev = { | 436 | .dev = { |
437 | .platform_data = &orion_ge11_shared_data, | 437 | .platform_data = &orion_ge11_shared_data, |
438 | }, | 438 | }, |
@@ -447,8 +447,8 @@ static struct resource orion_ge11_resources[] = { | |||
447 | 447 | ||
448 | static struct platform_device orion_ge11 = { | 448 | static struct platform_device orion_ge11 = { |
449 | .name = MV643XX_ETH_NAME, | 449 | .name = MV643XX_ETH_NAME, |
450 | .id = 1, | 450 | .id = 3, |
451 | .num_resources = 2, | 451 | .num_resources = 1, |
452 | .resource = orion_ge11_resources, | 452 | .resource = orion_ge11_resources, |
453 | .dev = { | 453 | .dev = { |
454 | .coherent_dma_mask = DMA_BIT_MASK(32), | 454 | .coherent_dma_mask = DMA_BIT_MASK(32), |
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h index e06fc5fefa14..d9a24f605a2b 100644 --- a/arch/arm/plat-orion/include/plat/common.h +++ b/arch/arm/plat-orion/include/plat/common.h | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | #ifndef __PLAT_COMMON_H | 11 | #ifndef __PLAT_COMMON_H |
12 | #include <linux/mv643xx_eth.h> | 12 | #include <linux/mv643xx_eth.h> |
13 | #include <linux/platform_data/usb-ehci-orion.h> | ||
13 | 14 | ||
14 | struct dsa_platform_data; | 15 | struct dsa_platform_data; |
15 | struct mv_sata_platform_data; | 16 | struct mv_sata_platform_data; |
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c index ca07cb1b155a..79690f2f6d3f 100644 --- a/arch/arm/plat-samsung/adc.c +++ b/arch/arm/plat-samsung/adc.c | |||
@@ -381,11 +381,6 @@ static int s3c_adc_probe(struct platform_device *pdev) | |||
381 | } | 381 | } |
382 | 382 | ||
383 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 383 | regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
384 | if (!regs) { | ||
385 | dev_err(dev, "failed to find registers\n"); | ||
386 | return -ENXIO; | ||
387 | } | ||
388 | |||
389 | adc->regs = devm_ioremap_resource(dev, regs); | 384 | adc->regs = devm_ioremap_resource(dev, regs); |
390 | if (IS_ERR(adc->regs)) | 385 | if (IS_ERR(adc->regs)) |
391 | return PTR_ERR(adc->regs); | 386 | return PTR_ERR(adc->regs); |
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index d30042e39974..13609e01f4b7 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
@@ -152,11 +152,12 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, | |||
152 | } | 152 | } |
153 | EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); | 153 | EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); |
154 | 154 | ||
155 | static int __init xen_secondary_init(unsigned int cpu) | 155 | static void __init xen_percpu_init(void *unused) |
156 | { | 156 | { |
157 | struct vcpu_register_vcpu_info info; | 157 | struct vcpu_register_vcpu_info info; |
158 | struct vcpu_info *vcpup; | 158 | struct vcpu_info *vcpup; |
159 | int err; | 159 | int err; |
160 | int cpu = get_cpu(); | ||
160 | 161 | ||
161 | pr_info("Xen: initializing cpu%d\n", cpu); | 162 | pr_info("Xen: initializing cpu%d\n", cpu); |
162 | vcpup = per_cpu_ptr(xen_vcpu_info, cpu); | 163 | vcpup = per_cpu_ptr(xen_vcpu_info, cpu); |
@@ -165,14 +166,10 @@ static int __init xen_secondary_init(unsigned int cpu) | |||
165 | info.offset = offset_in_page(vcpup); | 166 | info.offset = offset_in_page(vcpup); |
166 | 167 | ||
167 | err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); | 168 | err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); |
168 | if (err) { | 169 | BUG_ON(err); |
169 | pr_debug("register_vcpu_info failed: err=%d\n", err); | 170 | per_cpu(xen_vcpu, cpu) = vcpup; |
170 | } else { | 171 | |
171 | /* This cpu is using the registered vcpu info, even if | 172 | enable_percpu_irq(xen_events_irq, 0); |
172 | later ones fail to. */ | ||
173 | per_cpu(xen_vcpu, cpu) = vcpup; | ||
174 | } | ||
175 | return 0; | ||
176 | } | 173 | } |
177 | 174 | ||
178 | static void xen_restart(char str, const char *cmd) | 175 | static void xen_restart(char str, const char *cmd) |
@@ -208,7 +205,6 @@ static int __init xen_guest_init(void) | |||
208 | const char *version = NULL; | 205 | const char *version = NULL; |
209 | const char *xen_prefix = "xen,xen-"; | 206 | const char *xen_prefix = "xen,xen-"; |
210 | struct resource res; | 207 | struct resource res; |
211 | int i; | ||
212 | 208 | ||
213 | node = of_find_compatible_node(NULL, NULL, "xen,xen"); | 209 | node = of_find_compatible_node(NULL, NULL, "xen,xen"); |
214 | if (!node) { | 210 | if (!node) { |
@@ -265,19 +261,23 @@ static int __init xen_guest_init(void) | |||
265 | sizeof(struct vcpu_info)); | 261 | sizeof(struct vcpu_info)); |
266 | if (xen_vcpu_info == NULL) | 262 | if (xen_vcpu_info == NULL) |
267 | return -ENOMEM; | 263 | return -ENOMEM; |
268 | for_each_online_cpu(i) | ||
269 | xen_secondary_init(i); | ||
270 | 264 | ||
271 | gnttab_init(); | 265 | gnttab_init(); |
272 | if (!xen_initial_domain()) | 266 | if (!xen_initial_domain()) |
273 | xenbus_probe(NULL); | 267 | xenbus_probe(NULL); |
274 | 268 | ||
269 | return 0; | ||
270 | } | ||
271 | core_initcall(xen_guest_init); | ||
272 | |||
273 | static int __init xen_pm_init(void) | ||
274 | { | ||
275 | pm_power_off = xen_power_off; | 275 | pm_power_off = xen_power_off; |
276 | arm_pm_restart = xen_restart; | 276 | arm_pm_restart = xen_restart; |
277 | 277 | ||
278 | return 0; | 278 | return 0; |
279 | } | 279 | } |
280 | core_initcall(xen_guest_init); | 280 | subsys_initcall(xen_pm_init); |
281 | 281 | ||
282 | static irqreturn_t xen_arm_callback(int irq, void *arg) | 282 | static irqreturn_t xen_arm_callback(int irq, void *arg) |
283 | { | 283 | { |
@@ -285,11 +285,6 @@ static irqreturn_t xen_arm_callback(int irq, void *arg) | |||
285 | return IRQ_HANDLED; | 285 | return IRQ_HANDLED; |
286 | } | 286 | } |
287 | 287 | ||
288 | static __init void xen_percpu_enable_events(void *unused) | ||
289 | { | ||
290 | enable_percpu_irq(xen_events_irq, 0); | ||
291 | } | ||
292 | |||
293 | static int __init xen_init_events(void) | 288 | static int __init xen_init_events(void) |
294 | { | 289 | { |
295 | if (!xen_domain() || xen_events_irq < 0) | 290 | if (!xen_domain() || xen_events_irq < 0) |
@@ -303,7 +298,7 @@ static int __init xen_init_events(void) | |||
303 | return -EINVAL; | 298 | return -EINVAL; |
304 | } | 299 | } |
305 | 300 | ||
306 | on_each_cpu(xen_percpu_enable_events, NULL, 0); | 301 | on_each_cpu(xen_percpu_init, NULL, 0); |
307 | 302 | ||
308 | return 0; | 303 | return 0; |
309 | } | 304 | } |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 48347dcf0566..56b3f6d447ae 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -122,8 +122,6 @@ endmenu | |||
122 | 122 | ||
123 | menu "Kernel Features" | 123 | menu "Kernel Features" |
124 | 124 | ||
125 | source "kernel/time/Kconfig" | ||
126 | |||
127 | config ARM64_64K_PAGES | 125 | config ARM64_64K_PAGES |
128 | bool "Enable 64KB pages support" | 126 | bool "Enable 64KB pages support" |
129 | help | 127 | help |
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index c8eedc604984..5aceb83b3f5c 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
@@ -82,7 +82,7 @@ | |||
82 | 82 | ||
83 | .macro enable_dbg_if_not_stepping, tmp | 83 | .macro enable_dbg_if_not_stepping, tmp |
84 | mrs \tmp, mdscr_el1 | 84 | mrs \tmp, mdscr_el1 |
85 | tbnz \tmp, #1, 9990f | 85 | tbnz \tmp, #0, 9990f |
86 | enable_dbg | 86 | enable_dbg |
87 | 9990: | 87 | 9990: |
88 | .endm | 88 | .endm |
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index 0c3ba9f51376..f4726dc054b3 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c | |||
@@ -136,8 +136,6 @@ void disable_debug_monitors(enum debug_el el) | |||
136 | */ | 136 | */ |
137 | static void clear_os_lock(void *unused) | 137 | static void clear_os_lock(void *unused) |
138 | { | 138 | { |
139 | asm volatile("msr mdscr_el1, %0" : : "r" (0)); | ||
140 | isb(); | ||
141 | asm volatile("msr oslar_el1, %0" : : "r" (0)); | 139 | asm volatile("msr oslar_el1, %0" : : "r" (0)); |
142 | isb(); | 140 | isb(); |
143 | } | 141 | } |
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c index ac974f48a7a2..fbb6e1843659 100644 --- a/arch/arm64/kernel/early_printk.c +++ b/arch/arm64/kernel/early_printk.c | |||
@@ -95,7 +95,7 @@ static void early_write(struct console *con, const char *s, unsigned n) | |||
95 | } | 95 | } |
96 | } | 96 | } |
97 | 97 | ||
98 | static struct console early_console = { | 98 | static struct console early_console_dev = { |
99 | .name = "earlycon", | 99 | .name = "earlycon", |
100 | .write = early_write, | 100 | .write = early_write, |
101 | .flags = CON_PRINTBUFFER | CON_BOOT, | 101 | .flags = CON_PRINTBUFFER | CON_BOOT, |
@@ -145,7 +145,8 @@ static int __init setup_early_printk(char *buf) | |||
145 | early_base = early_io_map(paddr, EARLYCON_IOBASE); | 145 | early_base = early_io_map(paddr, EARLYCON_IOBASE); |
146 | 146 | ||
147 | printch = match->printch; | 147 | printch = match->printch; |
148 | register_console(&early_console); | 148 | early_console = &early_console_dev; |
149 | register_console(&early_console_dev); | ||
149 | 150 | ||
150 | return 0; | 151 | return 0; |
151 | } | 152 | } |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 6a9a53292590..add6ea616843 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -282,12 +282,13 @@ void __init setup_arch(char **cmdline_p) | |||
282 | #endif | 282 | #endif |
283 | } | 283 | } |
284 | 284 | ||
285 | static int __init arm64_of_clk_init(void) | 285 | static int __init arm64_device_init(void) |
286 | { | 286 | { |
287 | of_clk_init(NULL); | 287 | of_clk_init(NULL); |
288 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
288 | return 0; | 289 | return 0; |
289 | } | 290 | } |
290 | arch_initcall(arm64_of_clk_init); | 291 | arch_initcall(arm64_device_init); |
291 | 292 | ||
292 | static DEFINE_PER_CPU(struct cpu, cpu_data); | 293 | static DEFINE_PER_CPU(struct cpu, cpu_data); |
293 | 294 | ||
@@ -305,13 +306,6 @@ static int __init topology_init(void) | |||
305 | } | 306 | } |
306 | subsys_initcall(topology_init); | 307 | subsys_initcall(topology_init); |
307 | 308 | ||
308 | static int __init arm64_device_probe(void) | ||
309 | { | ||
310 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | ||
311 | return 0; | ||
312 | } | ||
313 | device_initcall(arm64_device_probe); | ||
314 | |||
315 | static const char *hwcap_str[] = { | 309 | static const char *hwcap_str[] = { |
316 | "fp", | 310 | "fp", |
317 | "asimd", | 311 | "asimd", |
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S index abe69b80cf7f..48a386094fa3 100644 --- a/arch/arm64/mm/cache.S +++ b/arch/arm64/mm/cache.S | |||
@@ -52,7 +52,7 @@ loop1: | |||
52 | add x2, x2, #4 // add 4 (line length offset) | 52 | add x2, x2, #4 // add 4 (line length offset) |
53 | mov x4, #0x3ff | 53 | mov x4, #0x3ff |
54 | and x4, x4, x1, lsr #3 // find maximum number on the way size | 54 | and x4, x4, x1, lsr #3 // find maximum number on the way size |
55 | clz x5, x4 // find bit position of way size increment | 55 | clz w5, w4 // find bit position of way size increment |
56 | mov x7, #0x7fff | 56 | mov x7, #0x7fff |
57 | and x7, x7, x1, lsr #13 // extract max number of the index size | 57 | and x7, x7, x1, lsr #13 // extract max number of the index size |
58 | loop2: | 58 | loop2: |
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index f1d8b9bbfdad..a82ae8868077 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S | |||
@@ -119,8 +119,7 @@ ENTRY(__cpu_setup) | |||
119 | 119 | ||
120 | mov x0, #3 << 20 | 120 | mov x0, #3 << 20 |
121 | msr cpacr_el1, x0 // Enable FP/ASIMD | 121 | msr cpacr_el1, x0 // Enable FP/ASIMD |
122 | mov x0, #1 | 122 | msr mdscr_el1, xzr // Reset mdscr_el1 |
123 | msr oslar_el1, x0 // Set the debug OS lock | ||
124 | tlbi vmalle1is // invalidate I + D TLBs | 123 | tlbi vmalle1is // invalidate I + D TLBs |
125 | /* | 124 | /* |
126 | * Memory region attributes for LPAE: | 125 | * Memory region attributes for LPAE: |
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h index 1be13727323f..b7e59853fd33 100644 --- a/arch/mips/include/asm/io.h +++ b/arch/mips/include/asm/io.h | |||
@@ -118,7 +118,7 @@ static inline void set_io_port_base(unsigned long base) | |||
118 | */ | 118 | */ |
119 | static inline unsigned long virt_to_phys(volatile const void *address) | 119 | static inline unsigned long virt_to_phys(volatile const void *address) |
120 | { | 120 | { |
121 | return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET; | 121 | return __pa(address); |
122 | } | 122 | } |
123 | 123 | ||
124 | /* | 124 | /* |
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index e68781e18387..143875c6c95a 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
@@ -336,7 +336,7 @@ enum emulation_result { | |||
336 | #define VPN2_MASK 0xffffe000 | 336 | #define VPN2_MASK 0xffffe000 |
337 | #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) | 337 | #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) |
338 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) | 338 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) |
339 | #define TLB_ASID(x) (ASID_MASK((x).tlb_hi)) | 339 | #define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) |
340 | #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) | 340 | #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) |
341 | 341 | ||
342 | struct kvm_mips_tlb { | 342 | struct kvm_mips_tlb { |
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 1554721e4808..820116067c10 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h | |||
@@ -67,68 +67,45 @@ extern unsigned long pgd_current[]; | |||
67 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) | 67 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) |
68 | #endif | 68 | #endif |
69 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ | 69 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ |
70 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||
70 | 71 | ||
71 | #define ASID_INC(asid) \ | 72 | #define ASID_INC 0x40 |
72 | ({ \ | 73 | #define ASID_MASK 0xfc0 |
73 | unsigned long __asid = asid; \ | 74 | |
74 | __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \ | 75 | #elif defined(CONFIG_CPU_R8000) |
75 | ".section\t__asid_inc,\"a\"\n\t" \ | 76 | |
76 | ".word\t1b\n\t" \ | 77 | #define ASID_INC 0x10 |
77 | ".previous" \ | 78 | #define ASID_MASK 0xff0 |
78 | :"=r" (__asid) \ | 79 | |
79 | :"0" (__asid)); \ | 80 | #elif defined(CONFIG_MIPS_MT_SMTC) |
80 | __asid; \ | 81 | |
81 | }) | 82 | #define ASID_INC 0x1 |
82 | #define ASID_MASK(asid) \ | 83 | extern unsigned long smtc_asid_mask; |
83 | ({ \ | 84 | #define ASID_MASK (smtc_asid_mask) |
84 | unsigned long __asid = asid; \ | 85 | #define HW_ASID_MASK 0xff |
85 | __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \ | 86 | /* End SMTC/34K debug hack */ |
86 | ".section\t__asid_mask,\"a\"\n\t" \ | 87 | #else /* FIXME: not correct for R6000 */ |
87 | ".word\t1b\n\t" \ | 88 | |
88 | ".previous" \ | 89 | #define ASID_INC 0x1 |
89 | :"=r" (__asid) \ | 90 | #define ASID_MASK 0xff |
90 | :"r" (__asid)); \ | ||
91 | __asid; \ | ||
92 | }) | ||
93 | #define ASID_VERSION_MASK \ | ||
94 | ({ \ | ||
95 | unsigned long __asid; \ | ||
96 | __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \ | ||
97 | ".section\t__asid_version_mask,\"a\"\n\t" \ | ||
98 | ".word\t1b\n\t" \ | ||
99 | ".previous" \ | ||
100 | :"=r" (__asid)); \ | ||
101 | __asid; \ | ||
102 | }) | ||
103 | #define ASID_FIRST_VERSION \ | ||
104 | ({ \ | ||
105 | unsigned long __asid = asid; \ | ||
106 | __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \ | ||
107 | ".section\t__asid_first_version,\"a\"\n\t" \ | ||
108 | ".word\t1b\n\t" \ | ||
109 | ".previous" \ | ||
110 | :"=r" (__asid)); \ | ||
111 | __asid; \ | ||
112 | }) | ||
113 | |||
114 | #define ASID_FIRST_VERSION_R3000 0x1000 | ||
115 | #define ASID_FIRST_VERSION_R4000 0x100 | ||
116 | #define ASID_FIRST_VERSION_R8000 0x1000 | ||
117 | #define ASID_FIRST_VERSION_RM9000 0x1000 | ||
118 | 91 | ||
119 | #ifdef CONFIG_MIPS_MT_SMTC | ||
120 | #define SMTC_HW_ASID_MASK 0xff | ||
121 | extern unsigned int smtc_asid_mask; | ||
122 | #endif | 92 | #endif |
123 | 93 | ||
124 | #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) | 94 | #define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) |
125 | #define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm))) | 95 | #define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK) |
126 | #define asid_cache(cpu) (cpu_data[cpu].asid_cache) | 96 | #define asid_cache(cpu) (cpu_data[cpu].asid_cache) |
127 | 97 | ||
128 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 98 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
129 | { | 99 | { |
130 | } | 100 | } |
131 | 101 | ||
102 | /* | ||
103 | * All unused by hardware upper bits will be considered | ||
104 | * as a software asid extension. | ||
105 | */ | ||
106 | #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) | ||
107 | #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) | ||
108 | |||
132 | #ifndef CONFIG_MIPS_MT_SMTC | 109 | #ifndef CONFIG_MIPS_MT_SMTC |
133 | /* Normal, classic MIPS get_new_mmu_context */ | 110 | /* Normal, classic MIPS get_new_mmu_context */ |
134 | static inline void | 111 | static inline void |
@@ -137,7 +114,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
137 | extern void kvm_local_flush_tlb_all(void); | 114 | extern void kvm_local_flush_tlb_all(void); |
138 | unsigned long asid = asid_cache(cpu); | 115 | unsigned long asid = asid_cache(cpu); |
139 | 116 | ||
140 | if (!ASID_MASK((asid = ASID_INC(asid)))) { | 117 | if (! ((asid += ASID_INC) & ASID_MASK) ) { |
141 | if (cpu_has_vtag_icache) | 118 | if (cpu_has_vtag_icache) |
142 | flush_icache_all(); | 119 | flush_icache_all(); |
143 | #ifdef CONFIG_VIRTUALIZATION | 120 | #ifdef CONFIG_VIRTUALIZATION |
@@ -200,7 +177,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
200 | * free up the ASID value for use and flush any old | 177 | * free up the ASID value for use and flush any old |
201 | * instances of it from the TLB. | 178 | * instances of it from the TLB. |
202 | */ | 179 | */ |
203 | oldasid = ASID_MASK(read_c0_entryhi()); | 180 | oldasid = (read_c0_entryhi() & ASID_MASK); |
204 | if(smtc_live_asid[mytlb][oldasid]) { | 181 | if(smtc_live_asid[mytlb][oldasid]) { |
205 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | 182 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); |
206 | if(smtc_live_asid[mytlb][oldasid] == 0) | 183 | if(smtc_live_asid[mytlb][oldasid] == 0) |
@@ -211,7 +188,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
211 | * having ASID_MASK smaller than the hardware maximum, | 188 | * having ASID_MASK smaller than the hardware maximum, |
212 | * make sure no "soft" bits become "hard"... | 189 | * make sure no "soft" bits become "hard"... |
213 | */ | 190 | */ |
214 | write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | | 191 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | |
215 | cpu_asid(cpu, next)); | 192 | cpu_asid(cpu, next)); |
216 | ehb(); /* Make sure it propagates to TCStatus */ | 193 | ehb(); /* Make sure it propagates to TCStatus */ |
217 | evpe(mtflags); | 194 | evpe(mtflags); |
@@ -264,15 +241,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) | |||
264 | #ifdef CONFIG_MIPS_MT_SMTC | 241 | #ifdef CONFIG_MIPS_MT_SMTC |
265 | /* See comments for similar code above */ | 242 | /* See comments for similar code above */ |
266 | mtflags = dvpe(); | 243 | mtflags = dvpe(); |
267 | oldasid = ASID_MASK(read_c0_entryhi()); | 244 | oldasid = read_c0_entryhi() & ASID_MASK; |
268 | if(smtc_live_asid[mytlb][oldasid]) { | 245 | if(smtc_live_asid[mytlb][oldasid]) { |
269 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | 246 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); |
270 | if(smtc_live_asid[mytlb][oldasid] == 0) | 247 | if(smtc_live_asid[mytlb][oldasid] == 0) |
271 | smtc_flush_tlb_asid(oldasid); | 248 | smtc_flush_tlb_asid(oldasid); |
272 | } | 249 | } |
273 | /* See comments for similar code above */ | 250 | /* See comments for similar code above */ |
274 | write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | | 251 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) | |
275 | cpu_asid(cpu, next)); | 252 | cpu_asid(cpu, next)); |
276 | ehb(); /* Make sure it propagates to TCStatus */ | 253 | ehb(); /* Make sure it propagates to TCStatus */ |
277 | evpe(mtflags); | 254 | evpe(mtflags); |
278 | #else | 255 | #else |
@@ -309,14 +286,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) | |||
309 | #ifdef CONFIG_MIPS_MT_SMTC | 286 | #ifdef CONFIG_MIPS_MT_SMTC |
310 | /* See comments for similar code above */ | 287 | /* See comments for similar code above */ |
311 | prevvpe = dvpe(); | 288 | prevvpe = dvpe(); |
312 | oldasid = ASID_MASK(read_c0_entryhi()); | 289 | oldasid = (read_c0_entryhi() & ASID_MASK); |
313 | if (smtc_live_asid[mytlb][oldasid]) { | 290 | if (smtc_live_asid[mytlb][oldasid]) { |
314 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); | 291 | smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); |
315 | if(smtc_live_asid[mytlb][oldasid] == 0) | 292 | if(smtc_live_asid[mytlb][oldasid] == 0) |
316 | smtc_flush_tlb_asid(oldasid); | 293 | smtc_flush_tlb_asid(oldasid); |
317 | } | 294 | } |
318 | /* See comments for similar code above */ | 295 | /* See comments for similar code above */ |
319 | write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | 296 | write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
320 | | cpu_asid(cpu, mm)); | 297 | | cpu_asid(cpu, mm)); |
321 | ehb(); /* Make sure it propagates to TCStatus */ | 298 | ehb(); /* Make sure it propagates to TCStatus */ |
322 | evpe(prevvpe); | 299 | evpe(prevvpe); |
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index eab99e536b5c..ec1ca537fbc1 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h | |||
@@ -46,7 +46,6 @@ | |||
46 | #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ | 46 | #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ |
47 | 47 | ||
48 | #include <linux/pfn.h> | 48 | #include <linux/pfn.h> |
49 | #include <asm/io.h> | ||
50 | 49 | ||
51 | extern void build_clear_page(void); | 50 | extern void build_clear_page(void); |
52 | extern void build_copy_page(void); | 51 | extern void build_copy_page(void); |
@@ -151,6 +150,7 @@ typedef struct { unsigned long pgprot; } pgprot_t; | |||
151 | ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) | 150 | ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) |
152 | #endif | 151 | #endif |
153 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) | 152 | #define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) |
153 | #include <asm/io.h> | ||
154 | 154 | ||
155 | /* | 155 | /* |
156 | * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad | 156 | * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad |
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c index 35bed0d2342c..3be9e7bb30ff 100644 --- a/arch/mips/kernel/crash_dump.c +++ b/arch/mips/kernel/crash_dump.c | |||
@@ -2,6 +2,7 @@ | |||
2 | #include <linux/bootmem.h> | 2 | #include <linux/bootmem.h> |
3 | #include <linux/crash_dump.h> | 3 | #include <linux/crash_dump.h> |
4 | #include <asm/uaccess.h> | 4 | #include <asm/uaccess.h> |
5 | #include <linux/slab.h> | ||
5 | 6 | ||
6 | static int __init parse_savemaxmem(char *p) | 7 | static int __init parse_savemaxmem(char *p) |
7 | { | 8 | { |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 5c2ba9f08a80..9098829bfcb0 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -493,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp) | |||
493 | .set noreorder | 493 | .set noreorder |
494 | /* check if TLB contains a entry for EPC */ | 494 | /* check if TLB contains a entry for EPC */ |
495 | MFC0 k1, CP0_ENTRYHI | 495 | MFC0 k1, CP0_ENTRYHI |
496 | andi k1, 0xff /* ASID_MASK patched at run-time!! */ | 496 | andi k1, 0xff /* ASID_MASK */ |
497 | MFC0 k0, CP0_EPC | 497 | MFC0 k0, CP0_EPC |
498 | PTR_SRL k0, _PAGE_SHIFT + 1 | 498 | PTR_SRL k0, _PAGE_SHIFT + 1 |
499 | PTR_SLL k0, _PAGE_SHIFT + 1 | 499 | PTR_SLL k0, _PAGE_SHIFT + 1 |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index eb902c1f0cad..a682a87bcc04 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -224,6 +224,9 @@ struct mips_frame_info { | |||
224 | int pc_offset; | 224 | int pc_offset; |
225 | }; | 225 | }; |
226 | 226 | ||
227 | #define J_TARGET(pc,target) \ | ||
228 | (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) | ||
229 | |||
227 | static inline int is_ra_save_ins(union mips_instruction *ip) | 230 | static inline int is_ra_save_ins(union mips_instruction *ip) |
228 | { | 231 | { |
229 | #ifdef CONFIG_CPU_MICROMIPS | 232 | #ifdef CONFIG_CPU_MICROMIPS |
@@ -264,7 +267,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip) | |||
264 | #endif | 267 | #endif |
265 | } | 268 | } |
266 | 269 | ||
267 | static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) | 270 | static inline int is_jump_ins(union mips_instruction *ip) |
268 | { | 271 | { |
269 | #ifdef CONFIG_CPU_MICROMIPS | 272 | #ifdef CONFIG_CPU_MICROMIPS |
270 | /* | 273 | /* |
@@ -288,6 +291,8 @@ static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) | |||
288 | return 0; | 291 | return 0; |
289 | return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); | 292 | return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); |
290 | #else | 293 | #else |
294 | if (ip->j_format.opcode == j_op) | ||
295 | return 1; | ||
291 | if (ip->j_format.opcode == jal_op) | 296 | if (ip->j_format.opcode == jal_op) |
292 | return 1; | 297 | return 1; |
293 | if (ip->r_format.opcode != spec_op) | 298 | if (ip->r_format.opcode != spec_op) |
@@ -350,7 +355,7 @@ static int get_frame_info(struct mips_frame_info *info) | |||
350 | 355 | ||
351 | for (i = 0; i < max_insns; i++, ip++) { | 356 | for (i = 0; i < max_insns; i++, ip++) { |
352 | 357 | ||
353 | if (is_jal_jalr_jr_ins(ip)) | 358 | if (is_jump_ins(ip)) |
354 | break; | 359 | break; |
355 | if (!info->frame_size) { | 360 | if (!info->frame_size) { |
356 | if (is_sp_move_ins(ip)) | 361 | if (is_sp_move_ins(ip)) |
@@ -393,15 +398,42 @@ err: | |||
393 | 398 | ||
394 | static struct mips_frame_info schedule_mfi __read_mostly; | 399 | static struct mips_frame_info schedule_mfi __read_mostly; |
395 | 400 | ||
401 | #ifdef CONFIG_KALLSYMS | ||
402 | static unsigned long get___schedule_addr(void) | ||
403 | { | ||
404 | return kallsyms_lookup_name("__schedule"); | ||
405 | } | ||
406 | #else | ||
407 | static unsigned long get___schedule_addr(void) | ||
408 | { | ||
409 | union mips_instruction *ip = (void *)schedule; | ||
410 | int max_insns = 8; | ||
411 | int i; | ||
412 | |||
413 | for (i = 0; i < max_insns; i++, ip++) { | ||
414 | if (ip->j_format.opcode == j_op) | ||
415 | return J_TARGET(ip, ip->j_format.target); | ||
416 | } | ||
417 | return 0; | ||
418 | } | ||
419 | #endif | ||
420 | |||
396 | static int __init frame_info_init(void) | 421 | static int __init frame_info_init(void) |
397 | { | 422 | { |
398 | unsigned long size = 0; | 423 | unsigned long size = 0; |
399 | #ifdef CONFIG_KALLSYMS | 424 | #ifdef CONFIG_KALLSYMS |
400 | unsigned long ofs; | 425 | unsigned long ofs; |
426 | #endif | ||
427 | unsigned long addr; | ||
401 | 428 | ||
402 | kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs); | 429 | addr = get___schedule_addr(); |
430 | if (!addr) | ||
431 | addr = (unsigned long)schedule; | ||
432 | |||
433 | #ifdef CONFIG_KALLSYMS | ||
434 | kallsyms_lookup_size_offset(addr, &size, &ofs); | ||
403 | #endif | 435 | #endif |
404 | schedule_mfi.func = schedule; | 436 | schedule_mfi.func = (void *)addr; |
405 | schedule_mfi.func_size = size; | 437 | schedule_mfi.func_size = size; |
406 | 438 | ||
407 | get_frame_info(&schedule_mfi); | 439 | get_frame_info(&schedule_mfi); |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 31d22f3121c9..7186222dc5bb 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -111,7 +111,7 @@ static int vpe0limit; | |||
111 | static int ipibuffers; | 111 | static int ipibuffers; |
112 | static int nostlb; | 112 | static int nostlb; |
113 | static int asidmask; | 113 | static int asidmask; |
114 | unsigned int smtc_asid_mask = 0xff; | 114 | unsigned long smtc_asid_mask = 0xff; |
115 | 115 | ||
116 | static int __init vpe0tcs(char *str) | 116 | static int __init vpe0tcs(char *str) |
117 | { | 117 | { |
@@ -1395,7 +1395,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
1395 | asid = asid_cache(cpu); | 1395 | asid = asid_cache(cpu); |
1396 | 1396 | ||
1397 | do { | 1397 | do { |
1398 | if (!ASID_MASK(ASID_INC(asid))) { | 1398 | if (!((asid += ASID_INC) & ASID_MASK) ) { |
1399 | if (cpu_has_vtag_icache) | 1399 | if (cpu_has_vtag_icache) |
1400 | flush_icache_all(); | 1400 | flush_icache_all(); |
1401 | /* Traverse all online CPUs (hack requires contiguous range) */ | 1401 | /* Traverse all online CPUs (hack requires contiguous range) */ |
@@ -1414,7 +1414,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
1414 | mips_ihb(); | 1414 | mips_ihb(); |
1415 | } | 1415 | } |
1416 | tcstat = read_tc_c0_tcstatus(); | 1416 | tcstat = read_tc_c0_tcstatus(); |
1417 | smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i); | 1417 | smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i); |
1418 | if (!prevhalt) | 1418 | if (!prevhalt) |
1419 | write_tc_c0_tchalt(0); | 1419 | write_tc_c0_tchalt(0); |
1420 | } | 1420 | } |
@@ -1423,7 +1423,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
1423 | asid = ASID_FIRST_VERSION; | 1423 | asid = ASID_FIRST_VERSION; |
1424 | local_flush_tlb_all(); /* start new asid cycle */ | 1424 | local_flush_tlb_all(); /* start new asid cycle */ |
1425 | } | 1425 | } |
1426 | } while (smtc_live_asid[tlb][ASID_MASK(asid)]); | 1426 | } while (smtc_live_asid[tlb][(asid & ASID_MASK)]); |
1427 | 1427 | ||
1428 | /* | 1428 | /* |
1429 | * SMTC shares the TLB within VPEs and possibly across all VPEs. | 1429 | * SMTC shares the TLB within VPEs and possibly across all VPEs. |
@@ -1461,7 +1461,7 @@ void smtc_flush_tlb_asid(unsigned long asid) | |||
1461 | tlb_read(); | 1461 | tlb_read(); |
1462 | ehb(); | 1462 | ehb(); |
1463 | ehi = read_c0_entryhi(); | 1463 | ehi = read_c0_entryhi(); |
1464 | if (ASID_MASK(ehi) == asid) { | 1464 | if ((ehi & ASID_MASK) == asid) { |
1465 | /* | 1465 | /* |
1466 | * Invalidate only entries with specified ASID, | 1466 | * Invalidate only entries with specified ASID, |
1467 | * makiing sure all entries differ. | 1467 | * makiing sure all entries differ. |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 77cff1f6d050..cb14db3c5764 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -1656,7 +1656,6 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
1656 | unsigned int cpu = smp_processor_id(); | 1656 | unsigned int cpu = smp_processor_id(); |
1657 | unsigned int status_set = ST0_CU0; | 1657 | unsigned int status_set = ST0_CU0; |
1658 | unsigned int hwrena = cpu_hwrena_impl_bits; | 1658 | unsigned int hwrena = cpu_hwrena_impl_bits; |
1659 | unsigned long asid = 0; | ||
1660 | #ifdef CONFIG_MIPS_MT_SMTC | 1659 | #ifdef CONFIG_MIPS_MT_SMTC |
1661 | int secondaryTC = 0; | 1660 | int secondaryTC = 0; |
1662 | int bootTC = (cpu == 0); | 1661 | int bootTC = (cpu == 0); |
@@ -1740,9 +1739,8 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu) | |||
1740 | } | 1739 | } |
1741 | #endif /* CONFIG_MIPS_MT_SMTC */ | 1740 | #endif /* CONFIG_MIPS_MT_SMTC */ |
1742 | 1741 | ||
1743 | asid = ASID_FIRST_VERSION; | 1742 | if (!cpu_data[cpu].asid_cache) |
1744 | cpu_data[cpu].asid_cache = asid; | 1743 | cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; |
1745 | TLBMISS_HANDLER_SETUP(); | ||
1746 | 1744 | ||
1747 | atomic_inc(&init_mm.mm_count); | 1745 | atomic_inc(&init_mm.mm_count); |
1748 | current->active_mm = &init_mm; | 1746 | current->active_mm = &init_mm; |
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c index 2b2bac9a40aa..4b6274b47f33 100644 --- a/arch/mips/kvm/kvm_mips_emul.c +++ b/arch/mips/kvm/kvm_mips_emul.c | |||
@@ -525,16 +525,18 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
525 | printk("MTCz, cop0->reg[EBASE]: %#lx\n", | 525 | printk("MTCz, cop0->reg[EBASE]: %#lx\n", |
526 | kvm_read_c0_guest_ebase(cop0)); | 526 | kvm_read_c0_guest_ebase(cop0)); |
527 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { | 527 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { |
528 | uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]); | 528 | uint32_t nasid = |
529 | vcpu->arch.gprs[rt] & ASID_MASK; | ||
529 | if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) | 530 | if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) |
530 | && | 531 | && |
531 | (ASID_MASK(kvm_read_c0_guest_entryhi(cop0)) | 532 | ((kvm_read_c0_guest_entryhi(cop0) & |
532 | != nasid)) { | 533 | ASID_MASK) != nasid)) { |
533 | 534 | ||
534 | kvm_debug | 535 | kvm_debug |
535 | ("MTCz, change ASID from %#lx to %#lx\n", | 536 | ("MTCz, change ASID from %#lx to %#lx\n", |
536 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0)), | 537 | kvm_read_c0_guest_entryhi(cop0) & |
537 | ASID_MASK(vcpu->arch.gprs[rt])); | 538 | ASID_MASK, |
539 | vcpu->arch.gprs[rt] & ASID_MASK); | ||
538 | 540 | ||
539 | /* Blow away the shadow host TLBs */ | 541 | /* Blow away the shadow host TLBs */ |
540 | kvm_mips_flush_host_tlb(1); | 542 | kvm_mips_flush_host_tlb(1); |
@@ -986,7 +988,8 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
986 | * resulting handler will do the right thing | 988 | * resulting handler will do the right thing |
987 | */ | 989 | */ |
988 | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | | 990 | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | |
989 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); | 991 | (kvm_read_c0_guest_entryhi |
992 | (cop0) & ASID_MASK)); | ||
990 | 993 | ||
991 | if (index < 0) { | 994 | if (index < 0) { |
992 | vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); | 995 | vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); |
@@ -1151,7 +1154,7 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, | |||
1151 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1154 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1152 | enum emulation_result er = EMULATE_DONE; | 1155 | enum emulation_result er = EMULATE_DONE; |
1153 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | | 1156 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | |
1154 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); | 1157 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1155 | 1158 | ||
1156 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1159 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
1157 | /* save old pc */ | 1160 | /* save old pc */ |
@@ -1198,7 +1201,7 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, | |||
1198 | enum emulation_result er = EMULATE_DONE; | 1201 | enum emulation_result er = EMULATE_DONE; |
1199 | unsigned long entryhi = | 1202 | unsigned long entryhi = |
1200 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1203 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1201 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); | 1204 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1202 | 1205 | ||
1203 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1206 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
1204 | /* save old pc */ | 1207 | /* save old pc */ |
@@ -1243,7 +1246,7 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, | |||
1243 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1246 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1244 | enum emulation_result er = EMULATE_DONE; | 1247 | enum emulation_result er = EMULATE_DONE; |
1245 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1248 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1246 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); | 1249 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1247 | 1250 | ||
1248 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1251 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
1249 | /* save old pc */ | 1252 | /* save old pc */ |
@@ -1287,7 +1290,7 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, | |||
1287 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1290 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1288 | enum emulation_result er = EMULATE_DONE; | 1291 | enum emulation_result er = EMULATE_DONE; |
1289 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1292 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1290 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); | 1293 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1291 | 1294 | ||
1292 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1295 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
1293 | /* save old pc */ | 1296 | /* save old pc */ |
@@ -1356,7 +1359,7 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, | |||
1356 | { | 1359 | { |
1357 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1360 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1358 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1361 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1359 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); | 1362 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1360 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1363 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1361 | enum emulation_result er = EMULATE_DONE; | 1364 | enum emulation_result er = EMULATE_DONE; |
1362 | 1365 | ||
@@ -1783,8 +1786,8 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, | |||
1783 | */ | 1786 | */ |
1784 | index = kvm_mips_guest_tlb_lookup(vcpu, | 1787 | index = kvm_mips_guest_tlb_lookup(vcpu, |
1785 | (va & VPN2_MASK) | | 1788 | (va & VPN2_MASK) | |
1786 | ASID_MASK(kvm_read_c0_guest_entryhi | 1789 | (kvm_read_c0_guest_entryhi |
1787 | (vcpu->arch.cop0))); | 1790 | (vcpu->arch.cop0) & ASID_MASK)); |
1788 | if (index < 0) { | 1791 | if (index < 0) { |
1789 | if (exccode == T_TLB_LD_MISS) { | 1792 | if (exccode == T_TLB_LD_MISS) { |
1790 | er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); | 1793 | er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); |
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c index 89511a9258d3..e3f0d9b8b6c5 100644 --- a/arch/mips/kvm/kvm_tlb.c +++ b/arch/mips/kvm/kvm_tlb.c | |||
@@ -51,13 +51,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn); | |||
51 | 51 | ||
52 | uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) | 52 | uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) |
53 | { | 53 | { |
54 | return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]); | 54 | return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; |
55 | } | 55 | } |
56 | 56 | ||
57 | 57 | ||
58 | uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) | 58 | uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) |
59 | { | 59 | { |
60 | return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]); | 60 | return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; |
61 | } | 61 | } |
62 | 62 | ||
63 | inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) | 63 | inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) |
@@ -84,7 +84,7 @@ void kvm_mips_dump_host_tlbs(void) | |||
84 | old_pagemask = read_c0_pagemask(); | 84 | old_pagemask = read_c0_pagemask(); |
85 | 85 | ||
86 | printk("HOST TLBs:\n"); | 86 | printk("HOST TLBs:\n"); |
87 | printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi())); | 87 | printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); |
88 | 88 | ||
89 | for (i = 0; i < current_cpu_data.tlbsize; i++) { | 89 | for (i = 0; i < current_cpu_data.tlbsize; i++) { |
90 | write_c0_index(i); | 90 | write_c0_index(i); |
@@ -428,7 +428,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) | |||
428 | 428 | ||
429 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { | 429 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { |
430 | if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && | 430 | if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && |
431 | (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) { | 431 | (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { |
432 | index = i; | 432 | index = i; |
433 | break; | 433 | break; |
434 | } | 434 | } |
@@ -626,7 +626,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, | |||
626 | { | 626 | { |
627 | unsigned long asid = asid_cache(cpu); | 627 | unsigned long asid = asid_cache(cpu); |
628 | 628 | ||
629 | if (!(ASID_MASK(ASID_INC(asid)))) { | 629 | if (!((asid += ASID_INC) & ASID_MASK)) { |
630 | if (cpu_has_vtag_icache) { | 630 | if (cpu_has_vtag_icache) { |
631 | flush_icache_all(); | 631 | flush_icache_all(); |
632 | } | 632 | } |
@@ -804,7 +804,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
804 | if (!newasid) { | 804 | if (!newasid) { |
805 | /* If we preempted while the guest was executing, then reload the pre-empted ASID */ | 805 | /* If we preempted while the guest was executing, then reload the pre-empted ASID */ |
806 | if (current->flags & PF_VCPU) { | 806 | if (current->flags & PF_VCPU) { |
807 | write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi)); | 807 | write_c0_entryhi(vcpu->arch. |
808 | preempt_entryhi & ASID_MASK); | ||
808 | ehb(); | 809 | ehb(); |
809 | } | 810 | } |
810 | } else { | 811 | } else { |
@@ -816,11 +817,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
816 | */ | 817 | */ |
817 | if (current->flags & PF_VCPU) { | 818 | if (current->flags & PF_VCPU) { |
818 | if (KVM_GUEST_KERNEL_MODE(vcpu)) | 819 | if (KVM_GUEST_KERNEL_MODE(vcpu)) |
819 | write_c0_entryhi(ASID_MASK(vcpu->arch. | 820 | write_c0_entryhi(vcpu->arch. |
820 | guest_kernel_asid[cpu])); | 821 | guest_kernel_asid[cpu] & |
822 | ASID_MASK); | ||
821 | else | 823 | else |
822 | write_c0_entryhi(ASID_MASK(vcpu->arch. | 824 | write_c0_entryhi(vcpu->arch. |
823 | guest_user_asid[cpu])); | 825 | guest_user_asid[cpu] & |
826 | ASID_MASK); | ||
824 | ehb(); | 827 | ehb(); |
825 | } | 828 | } |
826 | } | 829 | } |
@@ -879,7 +882,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) | |||
879 | kvm_mips_guest_tlb_lookup(vcpu, | 882 | kvm_mips_guest_tlb_lookup(vcpu, |
880 | ((unsigned long) opc & VPN2_MASK) | 883 | ((unsigned long) opc & VPN2_MASK) |
881 | | | 884 | | |
882 | ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); | 885 | (kvm_read_c0_guest_entryhi |
886 | (cop0) & ASID_MASK)); | ||
883 | if (index < 0) { | 887 | if (index < 0) { |
884 | kvm_err | 888 | kvm_err |
885 | ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", | 889 | ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", |
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c index 9861c8669fab..850821df924c 100644 --- a/arch/mips/lantiq/xway/gptu.c +++ b/arch/mips/lantiq/xway/gptu.c | |||
@@ -144,10 +144,6 @@ static int gptu_probe(struct platform_device *pdev) | |||
144 | } | 144 | } |
145 | 145 | ||
146 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 146 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
147 | if (!res) { | ||
148 | dev_err(&pdev->dev, "Failed to get resource\n"); | ||
149 | return -ENOMEM; | ||
150 | } | ||
151 | 147 | ||
152 | /* remap gptu register range */ | 148 | /* remap gptu register range */ |
153 | gptu_membase = devm_ioremap_resource(&pdev->dev, res); | 149 | gptu_membase = devm_ioremap_resource(&pdev->dev, res); |
@@ -169,6 +165,8 @@ static int gptu_probe(struct platform_device *pdev) | |||
169 | if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) { | 165 | if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) { |
170 | dev_err(&pdev->dev, "Failed to find magic\n"); | 166 | dev_err(&pdev->dev, "Failed to find magic\n"); |
171 | gptu_hwexit(); | 167 | gptu_hwexit(); |
168 | clk_disable(clk); | ||
169 | clk_put(clk); | ||
172 | return -ENAVAIL; | 170 | return -ENAVAIL; |
173 | } | 171 | } |
174 | 172 | ||
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c index 8a12d00908e0..32b9f21bfd85 100644 --- a/arch/mips/lib/dump_tlb.c +++ b/arch/mips/lib/dump_tlb.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <asm/page.h> | 11 | #include <asm/page.h> |
12 | #include <asm/pgtable.h> | 12 | #include <asm/pgtable.h> |
13 | #include <asm/tlbdebug.h> | 13 | #include <asm/tlbdebug.h> |
14 | #include <asm/mmu_context.h> | ||
15 | 14 | ||
16 | static inline const char *msk2str(unsigned int mask) | 15 | static inline const char *msk2str(unsigned int mask) |
17 | { | 16 | { |
@@ -56,7 +55,7 @@ static void dump_tlb(int first, int last) | |||
56 | s_pagemask = read_c0_pagemask(); | 55 | s_pagemask = read_c0_pagemask(); |
57 | s_entryhi = read_c0_entryhi(); | 56 | s_entryhi = read_c0_entryhi(); |
58 | s_index = read_c0_index(); | 57 | s_index = read_c0_index(); |
59 | asid = ASID_MASK(s_entryhi); | 58 | asid = s_entryhi & 0xff; |
60 | 59 | ||
61 | for (i = first; i <= last; i++) { | 60 | for (i = first; i <= last; i++) { |
62 | write_c0_index(i); | 61 | write_c0_index(i); |
@@ -86,7 +85,7 @@ static void dump_tlb(int first, int last) | |||
86 | 85 | ||
87 | printk("va=%0*lx asid=%02lx\n", | 86 | printk("va=%0*lx asid=%02lx\n", |
88 | width, (entryhi & ~0x1fffUL), | 87 | width, (entryhi & ~0x1fffUL), |
89 | ASID_MASK(entryhi)); | 88 | entryhi & 0xff); |
90 | printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", | 89 | printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", |
91 | width, | 90 | width, |
92 | (entrylo0 << 6) & PAGE_MASK, c0, | 91 | (entrylo0 << 6) & PAGE_MASK, c0, |
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c index 8327698b9937..91615c2ef0cf 100644 --- a/arch/mips/lib/r3k_dump_tlb.c +++ b/arch/mips/lib/r3k_dump_tlb.c | |||
@@ -9,7 +9,6 @@ | |||
9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
10 | 10 | ||
11 | #include <asm/mipsregs.h> | 11 | #include <asm/mipsregs.h> |
12 | #include <asm/mmu_context.h> | ||
13 | #include <asm/page.h> | 12 | #include <asm/page.h> |
14 | #include <asm/pgtable.h> | 13 | #include <asm/pgtable.h> |
15 | #include <asm/tlbdebug.h> | 14 | #include <asm/tlbdebug.h> |
@@ -22,7 +21,7 @@ static void dump_tlb(int first, int last) | |||
22 | unsigned int asid; | 21 | unsigned int asid; |
23 | unsigned long entryhi, entrylo0; | 22 | unsigned long entryhi, entrylo0; |
24 | 23 | ||
25 | asid = ASID_MASK(read_c0_entryhi()); | 24 | asid = read_c0_entryhi() & 0xfc0; |
26 | 25 | ||
27 | for (i = first; i <= last; i++) { | 26 | for (i = first; i <= last; i++) { |
28 | write_c0_index(i<<8); | 27 | write_c0_index(i<<8); |
@@ -36,7 +35,7 @@ static void dump_tlb(int first, int last) | |||
36 | 35 | ||
37 | /* Unused entries have a virtual address of KSEG0. */ | 36 | /* Unused entries have a virtual address of KSEG0. */ |
38 | if ((entryhi & 0xffffe000) != 0x80000000 | 37 | if ((entryhi & 0xffffe000) != 0x80000000 |
39 | && (ASID_MASK(entryhi) == asid)) { | 38 | && (entryhi & 0xfc0) == asid) { |
40 | /* | 39 | /* |
41 | * Only print entries in use | 40 | * Only print entries in use |
42 | */ | 41 | */ |
@@ -45,7 +44,7 @@ static void dump_tlb(int first, int last) | |||
45 | printk("va=%08lx asid=%08lx" | 44 | printk("va=%08lx asid=%08lx" |
46 | " [pa=%06lx n=%d d=%d v=%d g=%d]", | 45 | " [pa=%06lx n=%d d=%d v=%d g=%d]", |
47 | (entryhi & 0xffffe000), | 46 | (entryhi & 0xffffe000), |
48 | ASID_MASK(entryhi), | 47 | entryhi & 0xfc0, |
49 | entrylo0 & PAGE_MASK, | 48 | entrylo0 & PAGE_MASK, |
50 | (entrylo0 & (1 << 11)) ? 1 : 0, | 49 | (entrylo0 & (1 << 11)) ? 1 : 0, |
51 | (entrylo0 & (1 << 10)) ? 1 : 0, | 50 | (entrylo0 & (1 << 10)) ? 1 : 0, |
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c index 4a13c150f31b..a63d1ed0827f 100644 --- a/arch/mips/mm/tlb-r3k.c +++ b/arch/mips/mm/tlb-r3k.c | |||
@@ -51,7 +51,7 @@ void local_flush_tlb_all(void) | |||
51 | #endif | 51 | #endif |
52 | 52 | ||
53 | local_irq_save(flags); | 53 | local_irq_save(flags); |
54 | old_ctx = ASID_MASK(read_c0_entryhi()); | 54 | old_ctx = read_c0_entryhi() & ASID_MASK; |
55 | write_c0_entrylo0(0); | 55 | write_c0_entrylo0(0); |
56 | entry = r3k_have_wired_reg ? read_c0_wired() : 8; | 56 | entry = r3k_have_wired_reg ? read_c0_wired() : 8; |
57 | for (; entry < current_cpu_data.tlbsize; entry++) { | 57 | for (; entry < current_cpu_data.tlbsize; entry++) { |
@@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | |||
87 | 87 | ||
88 | #ifdef DEBUG_TLB | 88 | #ifdef DEBUG_TLB |
89 | printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", | 89 | printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", |
90 | ASID_MASK(cpu_context(cpu, mm)), start, end); | 90 | cpu_context(cpu, mm) & ASID_MASK, start, end); |
91 | #endif | 91 | #endif |
92 | local_irq_save(flags); | 92 | local_irq_save(flags); |
93 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; | 93 | size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; |
94 | if (size <= current_cpu_data.tlbsize) { | 94 | if (size <= current_cpu_data.tlbsize) { |
95 | int oldpid = ASID_MASK(read_c0_entryhi()); | 95 | int oldpid = read_c0_entryhi() & ASID_MASK; |
96 | int newpid = ASID_MASK(cpu_context(cpu, mm)); | 96 | int newpid = cpu_context(cpu, mm) & ASID_MASK; |
97 | 97 | ||
98 | start &= PAGE_MASK; | 98 | start &= PAGE_MASK; |
99 | end += PAGE_SIZE - 1; | 99 | end += PAGE_SIZE - 1; |
@@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
166 | #ifdef DEBUG_TLB | 166 | #ifdef DEBUG_TLB |
167 | printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); | 167 | printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); |
168 | #endif | 168 | #endif |
169 | newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm)); | 169 | newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK; |
170 | page &= PAGE_MASK; | 170 | page &= PAGE_MASK; |
171 | local_irq_save(flags); | 171 | local_irq_save(flags); |
172 | oldpid = ASID_MASK(read_c0_entryhi()); | 172 | oldpid = read_c0_entryhi() & ASID_MASK; |
173 | write_c0_entryhi(page | newpid); | 173 | write_c0_entryhi(page | newpid); |
174 | BARRIER; | 174 | BARRIER; |
175 | tlb_probe(); | 175 | tlb_probe(); |
@@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) | |||
197 | if (current->active_mm != vma->vm_mm) | 197 | if (current->active_mm != vma->vm_mm) |
198 | return; | 198 | return; |
199 | 199 | ||
200 | pid = ASID_MASK(read_c0_entryhi()); | 200 | pid = read_c0_entryhi() & ASID_MASK; |
201 | 201 | ||
202 | #ifdef DEBUG_TLB | 202 | #ifdef DEBUG_TLB |
203 | if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) { | 203 | if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) { |
204 | printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", | 204 | printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", |
205 | (cpu_context(cpu, vma->vm_mm)), pid); | 205 | (cpu_context(cpu, vma->vm_mm)), pid); |
206 | } | 206 | } |
@@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
241 | 241 | ||
242 | local_irq_save(flags); | 242 | local_irq_save(flags); |
243 | /* Save old context and create impossible VPN2 value */ | 243 | /* Save old context and create impossible VPN2 value */ |
244 | old_ctx = ASID_MASK(read_c0_entryhi()); | 244 | old_ctx = read_c0_entryhi() & ASID_MASK; |
245 | old_pagemask = read_c0_pagemask(); | 245 | old_pagemask = read_c0_pagemask(); |
246 | w = read_c0_wired(); | 246 | w = read_c0_wired(); |
247 | write_c0_wired(w + 1); | 247 | write_c0_wired(w + 1); |
@@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
264 | #endif | 264 | #endif |
265 | 265 | ||
266 | local_irq_save(flags); | 266 | local_irq_save(flags); |
267 | old_ctx = ASID_MASK(read_c0_entryhi()); | 267 | old_ctx = read_c0_entryhi() & ASID_MASK; |
268 | write_c0_entrylo0(entrylo0); | 268 | write_c0_entrylo0(entrylo0); |
269 | write_c0_entryhi(entryhi); | 269 | write_c0_entryhi(entryhi); |
270 | write_c0_index(wired); | 270 | write_c0_index(wired); |
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 09653b290d53..c643de4c473a 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
@@ -287,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
287 | 287 | ||
288 | ENTER_CRITICAL(flags); | 288 | ENTER_CRITICAL(flags); |
289 | 289 | ||
290 | pid = ASID_MASK(read_c0_entryhi()); | 290 | pid = read_c0_entryhi() & ASID_MASK; |
291 | address &= (PAGE_MASK << 1); | 291 | address &= (PAGE_MASK << 1); |
292 | write_c0_entryhi(address | pid); | 292 | write_c0_entryhi(address | pid); |
293 | pgdp = pgd_offset(vma->vm_mm, address); | 293 | pgdp = pgd_offset(vma->vm_mm, address); |
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c index 122f9207f49e..91c2499f806a 100644 --- a/arch/mips/mm/tlb-r8k.c +++ b/arch/mips/mm/tlb-r8k.c | |||
@@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) | |||
195 | if (current->active_mm != vma->vm_mm) | 195 | if (current->active_mm != vma->vm_mm) |
196 | return; | 196 | return; |
197 | 197 | ||
198 | pid = ASID_MASK(read_c0_entryhi()); | 198 | pid = read_c0_entryhi() & ASID_MASK; |
199 | 199 | ||
200 | local_irq_save(flags); | 200 | local_irq_save(flags); |
201 | address &= PAGE_MASK; | 201 | address &= PAGE_MASK; |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 4d46d3787576..ce9818eef7d3 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/cache.h> | 30 | #include <linux/cache.h> |
31 | 31 | ||
32 | #include <asm/mmu_context.h> | ||
33 | #include <asm/cacheflush.h> | 32 | #include <asm/cacheflush.h> |
34 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
35 | #include <asm/war.h> | 34 | #include <asm/war.h> |
@@ -306,78 +305,6 @@ static struct uasm_reloc relocs[128] __cpuinitdata; | |||
306 | static int check_for_high_segbits __cpuinitdata; | 305 | static int check_for_high_segbits __cpuinitdata; |
307 | #endif | 306 | #endif |
308 | 307 | ||
309 | static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop, | ||
310 | unsigned int i_const) | ||
311 | { | ||
312 | unsigned int **p; | ||
313 | |||
314 | for (p = start; p < stop; p++) { | ||
315 | #ifndef CONFIG_CPU_MICROMIPS | ||
316 | unsigned int *ip; | ||
317 | |||
318 | ip = *p; | ||
319 | *ip = (*ip & 0xffff0000) | i_const; | ||
320 | #else | ||
321 | unsigned short *ip; | ||
322 | |||
323 | ip = ((unsigned short *)((unsigned int)*p - 1)); | ||
324 | if ((*ip & 0xf000) == 0x4000) { | ||
325 | *ip &= 0xfff1; | ||
326 | *ip |= (i_const << 1); | ||
327 | } else if ((*ip & 0xf000) == 0x6000) { | ||
328 | *ip &= 0xfff1; | ||
329 | *ip |= ((i_const >> 2) << 1); | ||
330 | } else { | ||
331 | ip++; | ||
332 | *ip = i_const; | ||
333 | } | ||
334 | #endif | ||
335 | local_flush_icache_range((unsigned long)ip, | ||
336 | (unsigned long)ip + sizeof(*ip)); | ||
337 | } | ||
338 | } | ||
339 | |||
340 | #define asid_insn_fixup(section, const) \ | ||
341 | do { \ | ||
342 | extern unsigned int *__start_ ## section; \ | ||
343 | extern unsigned int *__stop_ ## section; \ | ||
344 | insn_fixup(&__start_ ## section, &__stop_ ## section, const); \ | ||
345 | } while(0) | ||
346 | |||
347 | /* | ||
348 | * Caller is assumed to flush the caches before the first context switch. | ||
349 | */ | ||
350 | static void __cpuinit setup_asid(unsigned int inc, unsigned int mask, | ||
351 | unsigned int version_mask, | ||
352 | unsigned int first_version) | ||
353 | { | ||
354 | extern asmlinkage void handle_ri_rdhwr_vivt(void); | ||
355 | unsigned long *vivt_exc; | ||
356 | |||
357 | #ifdef CONFIG_CPU_MICROMIPS | ||
358 | /* | ||
359 | * Worst case optimised microMIPS addiu instructions support | ||
360 | * only a 3-bit immediate value. | ||
361 | */ | ||
362 | if(inc > 7) | ||
363 | panic("Invalid ASID increment value!"); | ||
364 | #endif | ||
365 | asid_insn_fixup(__asid_inc, inc); | ||
366 | asid_insn_fixup(__asid_mask, mask); | ||
367 | asid_insn_fixup(__asid_version_mask, version_mask); | ||
368 | asid_insn_fixup(__asid_first_version, first_version); | ||
369 | |||
370 | /* Patch up the 'handle_ri_rdhwr_vivt' handler. */ | ||
371 | vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt; | ||
372 | #ifdef CONFIG_CPU_MICROMIPS | ||
373 | vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1); | ||
374 | #endif | ||
375 | vivt_exc++; | ||
376 | *vivt_exc = (*vivt_exc & ~mask) | mask; | ||
377 | |||
378 | current_cpu_data.asid_cache = first_version; | ||
379 | } | ||
380 | |||
381 | static int check_for_high_segbits __cpuinitdata; | 308 | static int check_for_high_segbits __cpuinitdata; |
382 | 309 | ||
383 | static unsigned int kscratch_used_mask __cpuinitdata; | 310 | static unsigned int kscratch_used_mask __cpuinitdata; |
@@ -2256,7 +2183,6 @@ void __cpuinit build_tlb_refill_handler(void) | |||
2256 | case CPU_TX3922: | 2183 | case CPU_TX3922: |
2257 | case CPU_TX3927: | 2184 | case CPU_TX3927: |
2258 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 2185 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
2259 | setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000); | ||
2260 | if (cpu_has_local_ebase) | 2186 | if (cpu_has_local_ebase) |
2261 | build_r3000_tlb_refill_handler(); | 2187 | build_r3000_tlb_refill_handler(); |
2262 | if (!run_once) { | 2188 | if (!run_once) { |
@@ -2282,11 +2208,6 @@ void __cpuinit build_tlb_refill_handler(void) | |||
2282 | break; | 2208 | break; |
2283 | 2209 | ||
2284 | default: | 2210 | default: |
2285 | #ifndef CONFIG_MIPS_MT_SMTC | ||
2286 | setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000); | ||
2287 | #else | ||
2288 | setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000); | ||
2289 | #endif | ||
2290 | if (!run_once) { | 2211 | if (!run_once) { |
2291 | scratch_reg = allocate_kscratch(); | 2212 | scratch_reg = allocate_kscratch(); |
2292 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT | 2213 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT |
diff --git a/arch/mips/pmcs-msp71xx/msp_prom.c b/arch/mips/pmcs-msp71xx/msp_prom.c index 0edb89a63516..1c9897531660 100644 --- a/arch/mips/pmcs-msp71xx/msp_prom.c +++ b/arch/mips/pmcs-msp71xx/msp_prom.c | |||
@@ -83,7 +83,7 @@ static inline unsigned char str2hexnum(unsigned char c) | |||
83 | return 0; /* foo */ | 83 | return 0; /* foo */ |
84 | } | 84 | } |
85 | 85 | ||
86 | static inline int str2eaddr(unsigned char *ea, unsigned char *str) | 86 | int str2eaddr(unsigned char *ea, unsigned char *str) |
87 | { | 87 | { |
88 | int index = 0; | 88 | int index = 0; |
89 | unsigned char num = 0; | 89 | unsigned char num = 0; |
diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/ralink/dts/rt3050.dtsi index ef7da1e227e6..e3203d414fee 100644 --- a/arch/mips/ralink/dts/rt3050.dtsi +++ b/arch/mips/ralink/dts/rt3050.dtsi | |||
@@ -55,4 +55,14 @@ | |||
55 | reg-shift = <2>; | 55 | reg-shift = <2>; |
56 | }; | 56 | }; |
57 | }; | 57 | }; |
58 | |||
59 | usb@101c0000 { | ||
60 | compatible = "ralink,rt3050-usb", "snps,dwc2"; | ||
61 | reg = <0x101c0000 40000>; | ||
62 | |||
63 | interrupt-parent = <&intc>; | ||
64 | interrupts = <18>; | ||
65 | |||
66 | status = "disabled"; | ||
67 | }; | ||
58 | }; | 68 | }; |
diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts index c18c9a84f4c4..0ac73ea28198 100644 --- a/arch/mips/ralink/dts/rt3052_eval.dts +++ b/arch/mips/ralink/dts/rt3052_eval.dts | |||
@@ -43,4 +43,8 @@ | |||
43 | reg = <0x50000 0x7b0000>; | 43 | reg = <0x50000 0x7b0000>; |
44 | }; | 44 | }; |
45 | }; | 45 | }; |
46 | |||
47 | usb@101c0000 { | ||
48 | status = "ok"; | ||
49 | }; | ||
46 | }; | 50 | }; |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index cad060f288cf..6507dabdd5dd 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -245,7 +245,7 @@ config SMP | |||
245 | 245 | ||
246 | config IRQSTACKS | 246 | config IRQSTACKS |
247 | bool "Use separate kernel stacks when processing interrupts" | 247 | bool "Use separate kernel stacks when processing interrupts" |
248 | default n | 248 | default y |
249 | help | 249 | help |
250 | If you say Y here the kernel will use separate kernel stacks | 250 | If you say Y here the kernel will use separate kernel stacks |
251 | for handling hard and soft interrupts. This can help avoid | 251 | for handling hard and soft interrupts. This can help avoid |
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 2f967cc6649e..197690068f88 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile | |||
@@ -23,24 +23,21 @@ NM = sh $(srctree)/arch/parisc/nm | |||
23 | CHECKFLAGS += -D__hppa__=1 | 23 | CHECKFLAGS += -D__hppa__=1 |
24 | LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) | 24 | LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) |
25 | 25 | ||
26 | MACHINE := $(shell uname -m) | ||
27 | NATIVE := $(if $(filter parisc%,$(MACHINE)),1,0) | ||
28 | |||
29 | ifdef CONFIG_64BIT | 26 | ifdef CONFIG_64BIT |
30 | UTS_MACHINE := parisc64 | 27 | UTS_MACHINE := parisc64 |
31 | CHECKFLAGS += -D__LP64__=1 -m64 | 28 | CHECKFLAGS += -D__LP64__=1 -m64 |
32 | WIDTH := 64 | 29 | CC_ARCHES = hppa64 |
33 | else # 32-bit | 30 | else # 32-bit |
34 | WIDTH := | 31 | CC_ARCHES = hppa hppa2.0 hppa1.1 |
35 | endif | 32 | endif |
36 | 33 | ||
37 | # attempt to help out folks who are cross-compiling | 34 | ifneq ($(SUBARCH),$(UTS_MACHINE)) |
38 | ifeq ($(NATIVE),1) | 35 | ifeq ($(CROSS_COMPILE),) |
39 | CROSS_COMPILE := hppa$(WIDTH)-linux- | 36 | CC_SUFFIXES = linux linux-gnu unknown-linux-gnu |
40 | else | 37 | CROSS_COMPILE := $(call cc-cross-prefix, \ |
41 | ifeq ($(CROSS_COMPILE),) | 38 | $(foreach a,$(CC_ARCHES), \ |
42 | CROSS_COMPILE := hppa$(WIDTH)-linux-gnu- | 39 | $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-))) |
43 | endif | 40 | endif |
44 | endif | 41 | endif |
45 | 42 | ||
46 | OBJCOPY_FLAGS =-O binary -R .note -R .comment -S | 43 | OBJCOPY_FLAGS =-O binary -R .note -R .comment -S |
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h index 12373c4dabab..c19f7138ba48 100644 --- a/arch/parisc/include/asm/hardirq.h +++ b/arch/parisc/include/asm/hardirq.h | |||
@@ -11,10 +11,18 @@ | |||
11 | #include <linux/threads.h> | 11 | #include <linux/threads.h> |
12 | #include <linux/irq.h> | 12 | #include <linux/irq.h> |
13 | 13 | ||
14 | #ifdef CONFIG_IRQSTACKS | ||
15 | #define __ARCH_HAS_DO_SOFTIRQ | ||
16 | #endif | ||
17 | |||
14 | typedef struct { | 18 | typedef struct { |
15 | unsigned int __softirq_pending; | 19 | unsigned int __softirq_pending; |
16 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 20 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
17 | unsigned int kernel_stack_usage; | 21 | unsigned int kernel_stack_usage; |
22 | #ifdef CONFIG_IRQSTACKS | ||
23 | unsigned int irq_stack_usage; | ||
24 | unsigned int irq_stack_counter; | ||
25 | #endif | ||
18 | #endif | 26 | #endif |
19 | #ifdef CONFIG_SMP | 27 | #ifdef CONFIG_SMP |
20 | unsigned int irq_resched_count; | 28 | unsigned int irq_resched_count; |
@@ -28,6 +36,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); | |||
28 | #define __ARCH_IRQ_STAT | 36 | #define __ARCH_IRQ_STAT |
29 | #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) | 37 | #define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) |
30 | #define inc_irq_stat(member) this_cpu_inc(irq_stat.member) | 38 | #define inc_irq_stat(member) this_cpu_inc(irq_stat.member) |
39 | #define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member) | ||
31 | #define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) | 40 | #define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) |
32 | 41 | ||
33 | #define __ARCH_SET_SOFTIRQ_PENDING | 42 | #define __ARCH_SET_SOFTIRQ_PENDING |
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index 064015547d1e..cfbc43929cf6 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h | |||
@@ -63,10 +63,13 @@ | |||
63 | */ | 63 | */ |
64 | #ifdef __KERNEL__ | 64 | #ifdef __KERNEL__ |
65 | 65 | ||
66 | #include <linux/spinlock_types.h> | ||
67 | |||
66 | #define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ | 68 | #define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */ |
67 | 69 | ||
68 | union irq_stack_union { | 70 | union irq_stack_union { |
69 | unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; | 71 | unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)]; |
72 | raw_spinlock_t lock; | ||
70 | }; | 73 | }; |
71 | 74 | ||
72 | DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); | 75 | DECLARE_PER_CPU(union irq_stack_union, irq_stack_union); |
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index 4bb96ad9b0b1..ae27cb6ce19a 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S | |||
@@ -452,9 +452,41 @@ | |||
452 | L2_ptep \pgd,\pte,\index,\va,\fault | 452 | L2_ptep \pgd,\pte,\index,\va,\fault |
453 | .endm | 453 | .endm |
454 | 454 | ||
455 | /* Acquire pa_dbit_lock lock. */ | ||
456 | .macro dbit_lock spc,tmp,tmp1 | ||
457 | #ifdef CONFIG_SMP | ||
458 | cmpib,COND(=),n 0,\spc,2f | ||
459 | load32 PA(pa_dbit_lock),\tmp | ||
460 | 1: LDCW 0(\tmp),\tmp1 | ||
461 | cmpib,COND(=) 0,\tmp1,1b | ||
462 | nop | ||
463 | 2: | ||
464 | #endif | ||
465 | .endm | ||
466 | |||
467 | /* Release pa_dbit_lock lock without reloading lock address. */ | ||
468 | .macro dbit_unlock0 spc,tmp | ||
469 | #ifdef CONFIG_SMP | ||
470 | or,COND(=) %r0,\spc,%r0 | ||
471 | stw \spc,0(\tmp) | ||
472 | #endif | ||
473 | .endm | ||
474 | |||
475 | /* Release pa_dbit_lock lock. */ | ||
476 | .macro dbit_unlock1 spc,tmp | ||
477 | #ifdef CONFIG_SMP | ||
478 | load32 PA(pa_dbit_lock),\tmp | ||
479 | dbit_unlock0 \spc,\tmp | ||
480 | #endif | ||
481 | .endm | ||
482 | |||
455 | /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and | 483 | /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and |
456 | * don't needlessly dirty the cache line if it was already set */ | 484 | * don't needlessly dirty the cache line if it was already set */ |
457 | .macro update_ptep ptep,pte,tmp,tmp1 | 485 | .macro update_ptep spc,ptep,pte,tmp,tmp1 |
486 | #ifdef CONFIG_SMP | ||
487 | or,COND(=) %r0,\spc,%r0 | ||
488 | LDREG 0(\ptep),\pte | ||
489 | #endif | ||
458 | ldi _PAGE_ACCESSED,\tmp1 | 490 | ldi _PAGE_ACCESSED,\tmp1 |
459 | or \tmp1,\pte,\tmp | 491 | or \tmp1,\pte,\tmp |
460 | and,COND(<>) \tmp1,\pte,%r0 | 492 | and,COND(<>) \tmp1,\pte,%r0 |
@@ -463,7 +495,11 @@ | |||
463 | 495 | ||
464 | /* Set the dirty bit (and accessed bit). No need to be | 496 | /* Set the dirty bit (and accessed bit). No need to be |
465 | * clever, this is only used from the dirty fault */ | 497 | * clever, this is only used from the dirty fault */ |
466 | .macro update_dirty ptep,pte,tmp | 498 | .macro update_dirty spc,ptep,pte,tmp |
499 | #ifdef CONFIG_SMP | ||
500 | or,COND(=) %r0,\spc,%r0 | ||
501 | LDREG 0(\ptep),\pte | ||
502 | #endif | ||
467 | ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp | 503 | ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp |
468 | or \tmp,\pte,\pte | 504 | or \tmp,\pte,\pte |
469 | STREG \pte,0(\ptep) | 505 | STREG \pte,0(\ptep) |
@@ -1111,11 +1147,13 @@ dtlb_miss_20w: | |||
1111 | 1147 | ||
1112 | L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w | 1148 | L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w |
1113 | 1149 | ||
1114 | update_ptep ptp,pte,t0,t1 | 1150 | dbit_lock spc,t0,t1 |
1151 | update_ptep spc,ptp,pte,t0,t1 | ||
1115 | 1152 | ||
1116 | make_insert_tlb spc,pte,prot | 1153 | make_insert_tlb spc,pte,prot |
1117 | 1154 | ||
1118 | idtlbt pte,prot | 1155 | idtlbt pte,prot |
1156 | dbit_unlock1 spc,t0 | ||
1119 | 1157 | ||
1120 | rfir | 1158 | rfir |
1121 | nop | 1159 | nop |
@@ -1135,11 +1173,13 @@ nadtlb_miss_20w: | |||
1135 | 1173 | ||
1136 | L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w | 1174 | L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w |
1137 | 1175 | ||
1138 | update_ptep ptp,pte,t0,t1 | 1176 | dbit_lock spc,t0,t1 |
1177 | update_ptep spc,ptp,pte,t0,t1 | ||
1139 | 1178 | ||
1140 | make_insert_tlb spc,pte,prot | 1179 | make_insert_tlb spc,pte,prot |
1141 | 1180 | ||
1142 | idtlbt pte,prot | 1181 | idtlbt pte,prot |
1182 | dbit_unlock1 spc,t0 | ||
1143 | 1183 | ||
1144 | rfir | 1184 | rfir |
1145 | nop | 1185 | nop |
@@ -1161,7 +1201,8 @@ dtlb_miss_11: | |||
1161 | 1201 | ||
1162 | L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 | 1202 | L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 |
1163 | 1203 | ||
1164 | update_ptep ptp,pte,t0,t1 | 1204 | dbit_lock spc,t0,t1 |
1205 | update_ptep spc,ptp,pte,t0,t1 | ||
1165 | 1206 | ||
1166 | make_insert_tlb_11 spc,pte,prot | 1207 | make_insert_tlb_11 spc,pte,prot |
1167 | 1208 | ||
@@ -1172,6 +1213,7 @@ dtlb_miss_11: | |||
1172 | idtlbp prot,(%sr1,va) | 1213 | idtlbp prot,(%sr1,va) |
1173 | 1214 | ||
1174 | mtsp t0, %sr1 /* Restore sr1 */ | 1215 | mtsp t0, %sr1 /* Restore sr1 */ |
1216 | dbit_unlock1 spc,t0 | ||
1175 | 1217 | ||
1176 | rfir | 1218 | rfir |
1177 | nop | 1219 | nop |
@@ -1192,7 +1234,8 @@ nadtlb_miss_11: | |||
1192 | 1234 | ||
1193 | L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 | 1235 | L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 |
1194 | 1236 | ||
1195 | update_ptep ptp,pte,t0,t1 | 1237 | dbit_lock spc,t0,t1 |
1238 | update_ptep spc,ptp,pte,t0,t1 | ||
1196 | 1239 | ||
1197 | make_insert_tlb_11 spc,pte,prot | 1240 | make_insert_tlb_11 spc,pte,prot |
1198 | 1241 | ||
@@ -1204,6 +1247,7 @@ nadtlb_miss_11: | |||
1204 | idtlbp prot,(%sr1,va) | 1247 | idtlbp prot,(%sr1,va) |
1205 | 1248 | ||
1206 | mtsp t0, %sr1 /* Restore sr1 */ | 1249 | mtsp t0, %sr1 /* Restore sr1 */ |
1250 | dbit_unlock1 spc,t0 | ||
1207 | 1251 | ||
1208 | rfir | 1252 | rfir |
1209 | nop | 1253 | nop |
@@ -1224,13 +1268,15 @@ dtlb_miss_20: | |||
1224 | 1268 | ||
1225 | L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 | 1269 | L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 |
1226 | 1270 | ||
1227 | update_ptep ptp,pte,t0,t1 | 1271 | dbit_lock spc,t0,t1 |
1272 | update_ptep spc,ptp,pte,t0,t1 | ||
1228 | 1273 | ||
1229 | make_insert_tlb spc,pte,prot | 1274 | make_insert_tlb spc,pte,prot |
1230 | 1275 | ||
1231 | f_extend pte,t0 | 1276 | f_extend pte,t0 |
1232 | 1277 | ||
1233 | idtlbt pte,prot | 1278 | idtlbt pte,prot |
1279 | dbit_unlock1 spc,t0 | ||
1234 | 1280 | ||
1235 | rfir | 1281 | rfir |
1236 | nop | 1282 | nop |
@@ -1250,13 +1296,15 @@ nadtlb_miss_20: | |||
1250 | 1296 | ||
1251 | L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 | 1297 | L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 |
1252 | 1298 | ||
1253 | update_ptep ptp,pte,t0,t1 | 1299 | dbit_lock spc,t0,t1 |
1300 | update_ptep spc,ptp,pte,t0,t1 | ||
1254 | 1301 | ||
1255 | make_insert_tlb spc,pte,prot | 1302 | make_insert_tlb spc,pte,prot |
1256 | 1303 | ||
1257 | f_extend pte,t0 | 1304 | f_extend pte,t0 |
1258 | 1305 | ||
1259 | idtlbt pte,prot | 1306 | idtlbt pte,prot |
1307 | dbit_unlock1 spc,t0 | ||
1260 | 1308 | ||
1261 | rfir | 1309 | rfir |
1262 | nop | 1310 | nop |
@@ -1357,11 +1405,13 @@ itlb_miss_20w: | |||
1357 | 1405 | ||
1358 | L3_ptep ptp,pte,t0,va,itlb_fault | 1406 | L3_ptep ptp,pte,t0,va,itlb_fault |
1359 | 1407 | ||
1360 | update_ptep ptp,pte,t0,t1 | 1408 | dbit_lock spc,t0,t1 |
1409 | update_ptep spc,ptp,pte,t0,t1 | ||
1361 | 1410 | ||
1362 | make_insert_tlb spc,pte,prot | 1411 | make_insert_tlb spc,pte,prot |
1363 | 1412 | ||
1364 | iitlbt pte,prot | 1413 | iitlbt pte,prot |
1414 | dbit_unlock1 spc,t0 | ||
1365 | 1415 | ||
1366 | rfir | 1416 | rfir |
1367 | nop | 1417 | nop |
@@ -1379,11 +1429,13 @@ naitlb_miss_20w: | |||
1379 | 1429 | ||
1380 | L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w | 1430 | L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w |
1381 | 1431 | ||
1382 | update_ptep ptp,pte,t0,t1 | 1432 | dbit_lock spc,t0,t1 |
1433 | update_ptep spc,ptp,pte,t0,t1 | ||
1383 | 1434 | ||
1384 | make_insert_tlb spc,pte,prot | 1435 | make_insert_tlb spc,pte,prot |
1385 | 1436 | ||
1386 | iitlbt pte,prot | 1437 | iitlbt pte,prot |
1438 | dbit_unlock1 spc,t0 | ||
1387 | 1439 | ||
1388 | rfir | 1440 | rfir |
1389 | nop | 1441 | nop |
@@ -1405,7 +1457,8 @@ itlb_miss_11: | |||
1405 | 1457 | ||
1406 | L2_ptep ptp,pte,t0,va,itlb_fault | 1458 | L2_ptep ptp,pte,t0,va,itlb_fault |
1407 | 1459 | ||
1408 | update_ptep ptp,pte,t0,t1 | 1460 | dbit_lock spc,t0,t1 |
1461 | update_ptep spc,ptp,pte,t0,t1 | ||
1409 | 1462 | ||
1410 | make_insert_tlb_11 spc,pte,prot | 1463 | make_insert_tlb_11 spc,pte,prot |
1411 | 1464 | ||
@@ -1416,6 +1469,7 @@ itlb_miss_11: | |||
1416 | iitlbp prot,(%sr1,va) | 1469 | iitlbp prot,(%sr1,va) |
1417 | 1470 | ||
1418 | mtsp t0, %sr1 /* Restore sr1 */ | 1471 | mtsp t0, %sr1 /* Restore sr1 */ |
1472 | dbit_unlock1 spc,t0 | ||
1419 | 1473 | ||
1420 | rfir | 1474 | rfir |
1421 | nop | 1475 | nop |
@@ -1427,7 +1481,8 @@ naitlb_miss_11: | |||
1427 | 1481 | ||
1428 | L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 | 1482 | L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 |
1429 | 1483 | ||
1430 | update_ptep ptp,pte,t0,t1 | 1484 | dbit_lock spc,t0,t1 |
1485 | update_ptep spc,ptp,pte,t0,t1 | ||
1431 | 1486 | ||
1432 | make_insert_tlb_11 spc,pte,prot | 1487 | make_insert_tlb_11 spc,pte,prot |
1433 | 1488 | ||
@@ -1438,6 +1493,7 @@ naitlb_miss_11: | |||
1438 | iitlbp prot,(%sr1,va) | 1493 | iitlbp prot,(%sr1,va) |
1439 | 1494 | ||
1440 | mtsp t0, %sr1 /* Restore sr1 */ | 1495 | mtsp t0, %sr1 /* Restore sr1 */ |
1496 | dbit_unlock1 spc,t0 | ||
1441 | 1497 | ||
1442 | rfir | 1498 | rfir |
1443 | nop | 1499 | nop |
@@ -1459,13 +1515,15 @@ itlb_miss_20: | |||
1459 | 1515 | ||
1460 | L2_ptep ptp,pte,t0,va,itlb_fault | 1516 | L2_ptep ptp,pte,t0,va,itlb_fault |
1461 | 1517 | ||
1462 | update_ptep ptp,pte,t0,t1 | 1518 | dbit_lock spc,t0,t1 |
1519 | update_ptep spc,ptp,pte,t0,t1 | ||
1463 | 1520 | ||
1464 | make_insert_tlb spc,pte,prot | 1521 | make_insert_tlb spc,pte,prot |
1465 | 1522 | ||
1466 | f_extend pte,t0 | 1523 | f_extend pte,t0 |
1467 | 1524 | ||
1468 | iitlbt pte,prot | 1525 | iitlbt pte,prot |
1526 | dbit_unlock1 spc,t0 | ||
1469 | 1527 | ||
1470 | rfir | 1528 | rfir |
1471 | nop | 1529 | nop |
@@ -1477,13 +1535,15 @@ naitlb_miss_20: | |||
1477 | 1535 | ||
1478 | L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 | 1536 | L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 |
1479 | 1537 | ||
1480 | update_ptep ptp,pte,t0,t1 | 1538 | dbit_lock spc,t0,t1 |
1539 | update_ptep spc,ptp,pte,t0,t1 | ||
1481 | 1540 | ||
1482 | make_insert_tlb spc,pte,prot | 1541 | make_insert_tlb spc,pte,prot |
1483 | 1542 | ||
1484 | f_extend pte,t0 | 1543 | f_extend pte,t0 |
1485 | 1544 | ||
1486 | iitlbt pte,prot | 1545 | iitlbt pte,prot |
1546 | dbit_unlock1 spc,t0 | ||
1487 | 1547 | ||
1488 | rfir | 1548 | rfir |
1489 | nop | 1549 | nop |
@@ -1507,29 +1567,13 @@ dbit_trap_20w: | |||
1507 | 1567 | ||
1508 | L3_ptep ptp,pte,t0,va,dbit_fault | 1568 | L3_ptep ptp,pte,t0,va,dbit_fault |
1509 | 1569 | ||
1510 | #ifdef CONFIG_SMP | 1570 | dbit_lock spc,t0,t1 |
1511 | cmpib,COND(=),n 0,spc,dbit_nolock_20w | 1571 | update_dirty spc,ptp,pte,t1 |
1512 | load32 PA(pa_dbit_lock),t0 | ||
1513 | |||
1514 | dbit_spin_20w: | ||
1515 | LDCW 0(t0),t1 | ||
1516 | cmpib,COND(=) 0,t1,dbit_spin_20w | ||
1517 | nop | ||
1518 | |||
1519 | dbit_nolock_20w: | ||
1520 | #endif | ||
1521 | update_dirty ptp,pte,t1 | ||
1522 | 1572 | ||
1523 | make_insert_tlb spc,pte,prot | 1573 | make_insert_tlb spc,pte,prot |
1524 | 1574 | ||
1525 | idtlbt pte,prot | 1575 | idtlbt pte,prot |
1526 | #ifdef CONFIG_SMP | 1576 | dbit_unlock0 spc,t0 |
1527 | cmpib,COND(=),n 0,spc,dbit_nounlock_20w | ||
1528 | ldi 1,t1 | ||
1529 | stw t1,0(t0) | ||
1530 | |||
1531 | dbit_nounlock_20w: | ||
1532 | #endif | ||
1533 | 1577 | ||
1534 | rfir | 1578 | rfir |
1535 | nop | 1579 | nop |
@@ -1543,18 +1587,8 @@ dbit_trap_11: | |||
1543 | 1587 | ||
1544 | L2_ptep ptp,pte,t0,va,dbit_fault | 1588 | L2_ptep ptp,pte,t0,va,dbit_fault |
1545 | 1589 | ||
1546 | #ifdef CONFIG_SMP | 1590 | dbit_lock spc,t0,t1 |
1547 | cmpib,COND(=),n 0,spc,dbit_nolock_11 | 1591 | update_dirty spc,ptp,pte,t1 |
1548 | load32 PA(pa_dbit_lock),t0 | ||
1549 | |||
1550 | dbit_spin_11: | ||
1551 | LDCW 0(t0),t1 | ||
1552 | cmpib,= 0,t1,dbit_spin_11 | ||
1553 | nop | ||
1554 | |||
1555 | dbit_nolock_11: | ||
1556 | #endif | ||
1557 | update_dirty ptp,pte,t1 | ||
1558 | 1592 | ||
1559 | make_insert_tlb_11 spc,pte,prot | 1593 | make_insert_tlb_11 spc,pte,prot |
1560 | 1594 | ||
@@ -1565,13 +1599,7 @@ dbit_nolock_11: | |||
1565 | idtlbp prot,(%sr1,va) | 1599 | idtlbp prot,(%sr1,va) |
1566 | 1600 | ||
1567 | mtsp t1, %sr1 /* Restore sr1 */ | 1601 | mtsp t1, %sr1 /* Restore sr1 */ |
1568 | #ifdef CONFIG_SMP | 1602 | dbit_unlock0 spc,t0 |
1569 | cmpib,COND(=),n 0,spc,dbit_nounlock_11 | ||
1570 | ldi 1,t1 | ||
1571 | stw t1,0(t0) | ||
1572 | |||
1573 | dbit_nounlock_11: | ||
1574 | #endif | ||
1575 | 1603 | ||
1576 | rfir | 1604 | rfir |
1577 | nop | 1605 | nop |
@@ -1583,32 +1611,15 @@ dbit_trap_20: | |||
1583 | 1611 | ||
1584 | L2_ptep ptp,pte,t0,va,dbit_fault | 1612 | L2_ptep ptp,pte,t0,va,dbit_fault |
1585 | 1613 | ||
1586 | #ifdef CONFIG_SMP | 1614 | dbit_lock spc,t0,t1 |
1587 | cmpib,COND(=),n 0,spc,dbit_nolock_20 | 1615 | update_dirty spc,ptp,pte,t1 |
1588 | load32 PA(pa_dbit_lock),t0 | ||
1589 | |||
1590 | dbit_spin_20: | ||
1591 | LDCW 0(t0),t1 | ||
1592 | cmpib,= 0,t1,dbit_spin_20 | ||
1593 | nop | ||
1594 | |||
1595 | dbit_nolock_20: | ||
1596 | #endif | ||
1597 | update_dirty ptp,pte,t1 | ||
1598 | 1616 | ||
1599 | make_insert_tlb spc,pte,prot | 1617 | make_insert_tlb spc,pte,prot |
1600 | 1618 | ||
1601 | f_extend pte,t1 | 1619 | f_extend pte,t1 |
1602 | 1620 | ||
1603 | idtlbt pte,prot | 1621 | idtlbt pte,prot |
1604 | 1622 | dbit_unlock0 spc,t0 | |
1605 | #ifdef CONFIG_SMP | ||
1606 | cmpib,COND(=),n 0,spc,dbit_nounlock_20 | ||
1607 | ldi 1,t1 | ||
1608 | stw t1,0(t0) | ||
1609 | |||
1610 | dbit_nounlock_20: | ||
1611 | #endif | ||
1612 | 1623 | ||
1613 | rfir | 1624 | rfir |
1614 | nop | 1625 | nop |
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index e255db0bb761..55237a70e197 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c | |||
@@ -166,22 +166,32 @@ int arch_show_interrupts(struct seq_file *p, int prec) | |||
166 | seq_printf(p, "%*s: ", prec, "STK"); | 166 | seq_printf(p, "%*s: ", prec, "STK"); |
167 | for_each_online_cpu(j) | 167 | for_each_online_cpu(j) |
168 | seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage); | 168 | seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage); |
169 | seq_printf(p, " Kernel stack usage\n"); | 169 | seq_puts(p, " Kernel stack usage\n"); |
170 | # ifdef CONFIG_IRQSTACKS | ||
171 | seq_printf(p, "%*s: ", prec, "IST"); | ||
172 | for_each_online_cpu(j) | ||
173 | seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage); | ||
174 | seq_puts(p, " Interrupt stack usage\n"); | ||
175 | seq_printf(p, "%*s: ", prec, "ISC"); | ||
176 | for_each_online_cpu(j) | ||
177 | seq_printf(p, "%10u ", irq_stats(j)->irq_stack_counter); | ||
178 | seq_puts(p, " Interrupt stack usage counter\n"); | ||
179 | # endif | ||
170 | #endif | 180 | #endif |
171 | #ifdef CONFIG_SMP | 181 | #ifdef CONFIG_SMP |
172 | seq_printf(p, "%*s: ", prec, "RES"); | 182 | seq_printf(p, "%*s: ", prec, "RES"); |
173 | for_each_online_cpu(j) | 183 | for_each_online_cpu(j) |
174 | seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); | 184 | seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); |
175 | seq_printf(p, " Rescheduling interrupts\n"); | 185 | seq_puts(p, " Rescheduling interrupts\n"); |
176 | seq_printf(p, "%*s: ", prec, "CAL"); | 186 | seq_printf(p, "%*s: ", prec, "CAL"); |
177 | for_each_online_cpu(j) | 187 | for_each_online_cpu(j) |
178 | seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); | 188 | seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); |
179 | seq_printf(p, " Function call interrupts\n"); | 189 | seq_puts(p, " Function call interrupts\n"); |
180 | #endif | 190 | #endif |
181 | seq_printf(p, "%*s: ", prec, "TLB"); | 191 | seq_printf(p, "%*s: ", prec, "TLB"); |
182 | for_each_online_cpu(j) | 192 | for_each_online_cpu(j) |
183 | seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); | 193 | seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); |
184 | seq_printf(p, " TLB shootdowns\n"); | 194 | seq_puts(p, " TLB shootdowns\n"); |
185 | return 0; | 195 | return 0; |
186 | } | 196 | } |
187 | 197 | ||
@@ -378,6 +388,7 @@ static inline void stack_overflow_check(struct pt_regs *regs) | |||
378 | unsigned long sp = regs->gr[30]; | 388 | unsigned long sp = regs->gr[30]; |
379 | unsigned long stack_usage; | 389 | unsigned long stack_usage; |
380 | unsigned int *last_usage; | 390 | unsigned int *last_usage; |
391 | int cpu = smp_processor_id(); | ||
381 | 392 | ||
382 | /* if sr7 != 0, we interrupted a userspace process which we do not want | 393 | /* if sr7 != 0, we interrupted a userspace process which we do not want |
383 | * to check for stack overflow. We will only check the kernel stack. */ | 394 | * to check for stack overflow. We will only check the kernel stack. */ |
@@ -386,7 +397,31 @@ static inline void stack_overflow_check(struct pt_regs *regs) | |||
386 | 397 | ||
387 | /* calculate kernel stack usage */ | 398 | /* calculate kernel stack usage */ |
388 | stack_usage = sp - stack_start; | 399 | stack_usage = sp - stack_start; |
389 | last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id()); | 400 | #ifdef CONFIG_IRQSTACKS |
401 | if (likely(stack_usage <= THREAD_SIZE)) | ||
402 | goto check_kernel_stack; /* found kernel stack */ | ||
403 | |||
404 | /* check irq stack usage */ | ||
405 | stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack; | ||
406 | stack_usage = sp - stack_start; | ||
407 | |||
408 | last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu); | ||
409 | if (unlikely(stack_usage > *last_usage)) | ||
410 | *last_usage = stack_usage; | ||
411 | |||
412 | if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN))) | ||
413 | return; | ||
414 | |||
415 | pr_emerg("stackcheck: %s will most likely overflow irq stack " | ||
416 | "(sp:%lx, stk bottom-top:%lx-%lx)\n", | ||
417 | current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE); | ||
418 | goto panic_check; | ||
419 | |||
420 | check_kernel_stack: | ||
421 | #endif | ||
422 | |||
423 | /* check kernel stack usage */ | ||
424 | last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu); | ||
390 | 425 | ||
391 | if (unlikely(stack_usage > *last_usage)) | 426 | if (unlikely(stack_usage > *last_usage)) |
392 | *last_usage = stack_usage; | 427 | *last_usage = stack_usage; |
@@ -398,31 +433,69 @@ static inline void stack_overflow_check(struct pt_regs *regs) | |||
398 | "(sp:%lx, stk bottom-top:%lx-%lx)\n", | 433 | "(sp:%lx, stk bottom-top:%lx-%lx)\n", |
399 | current->comm, sp, stack_start, stack_start + THREAD_SIZE); | 434 | current->comm, sp, stack_start, stack_start + THREAD_SIZE); |
400 | 435 | ||
436 | #ifdef CONFIG_IRQSTACKS | ||
437 | panic_check: | ||
438 | #endif | ||
401 | if (sysctl_panic_on_stackoverflow) | 439 | if (sysctl_panic_on_stackoverflow) |
402 | panic("low stack detected by irq handler - check messages\n"); | 440 | panic("low stack detected by irq handler - check messages\n"); |
403 | #endif | 441 | #endif |
404 | } | 442 | } |
405 | 443 | ||
406 | #ifdef CONFIG_IRQSTACKS | 444 | #ifdef CONFIG_IRQSTACKS |
407 | DEFINE_PER_CPU(union irq_stack_union, irq_stack_union); | 445 | DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = { |
446 | .lock = __RAW_SPIN_LOCK_UNLOCKED((irq_stack_union).lock) | ||
447 | }; | ||
408 | 448 | ||
409 | static void execute_on_irq_stack(void *func, unsigned long param1) | 449 | static void execute_on_irq_stack(void *func, unsigned long param1) |
410 | { | 450 | { |
411 | unsigned long *irq_stack_start; | 451 | union irq_stack_union *union_ptr; |
412 | unsigned long irq_stack; | 452 | unsigned long irq_stack; |
413 | int cpu = smp_processor_id(); | 453 | raw_spinlock_t *irq_stack_in_use; |
414 | 454 | ||
415 | irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0]; | 455 | union_ptr = &per_cpu(irq_stack_union, smp_processor_id()); |
416 | irq_stack = (unsigned long) irq_stack_start; | 456 | irq_stack = (unsigned long) &union_ptr->stack; |
417 | irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */ | 457 | irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.lock), |
458 | 64); /* align for stack frame usage */ | ||
418 | 459 | ||
419 | BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */ | 460 | /* We may be called recursive. If we are already using the irq stack, |
420 | *irq_stack_start = 1; | 461 | * just continue to use it. Use spinlocks to serialize |
462 | * the irq stack usage. | ||
463 | */ | ||
464 | irq_stack_in_use = &union_ptr->lock; | ||
465 | if (!raw_spin_trylock(irq_stack_in_use)) { | ||
466 | void (*direct_call)(unsigned long p1) = func; | ||
467 | |||
468 | /* We are using the IRQ stack already. | ||
469 | * Do direct call on current stack. */ | ||
470 | direct_call(param1); | ||
471 | return; | ||
472 | } | ||
421 | 473 | ||
422 | /* This is where we switch to the IRQ stack. */ | 474 | /* This is where we switch to the IRQ stack. */ |
423 | call_on_stack(param1, func, irq_stack); | 475 | call_on_stack(param1, func, irq_stack); |
424 | 476 | ||
425 | *irq_stack_start = 0; | 477 | __inc_irq_stat(irq_stack_counter); |
478 | |||
479 | /* free up irq stack usage. */ | ||
480 | do_raw_spin_unlock(irq_stack_in_use); | ||
481 | } | ||
482 | |||
483 | asmlinkage void do_softirq(void) | ||
484 | { | ||
485 | __u32 pending; | ||
486 | unsigned long flags; | ||
487 | |||
488 | if (in_interrupt()) | ||
489 | return; | ||
490 | |||
491 | local_irq_save(flags); | ||
492 | |||
493 | pending = local_softirq_pending(); | ||
494 | |||
495 | if (pending) | ||
496 | execute_on_irq_stack(__do_softirq, 0); | ||
497 | |||
498 | local_irq_restore(flags); | ||
426 | } | 499 | } |
427 | #endif /* CONFIG_IRQSTACKS */ | 500 | #endif /* CONFIG_IRQSTACKS */ |
428 | 501 | ||
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index ce939ac8622b..1c965642068b 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -1069,7 +1069,7 @@ void flush_tlb_all(void) | |||
1069 | { | 1069 | { |
1070 | int do_recycle; | 1070 | int do_recycle; |
1071 | 1071 | ||
1072 | inc_irq_stat(irq_tlb_count); | 1072 | __inc_irq_stat(irq_tlb_count); |
1073 | do_recycle = 0; | 1073 | do_recycle = 0; |
1074 | spin_lock(&sid_lock); | 1074 | spin_lock(&sid_lock); |
1075 | if (dirty_space_ids > RECYCLE_THRESHOLD) { | 1075 | if (dirty_space_ids > RECYCLE_THRESHOLD) { |
@@ -1090,7 +1090,7 @@ void flush_tlb_all(void) | |||
1090 | #else | 1090 | #else |
1091 | void flush_tlb_all(void) | 1091 | void flush_tlb_all(void) |
1092 | { | 1092 | { |
1093 | inc_irq_stat(irq_tlb_count); | 1093 | __inc_irq_stat(irq_tlb_count); |
1094 | spin_lock(&sid_lock); | 1094 | spin_lock(&sid_lock); |
1095 | flush_tlb_all_local(NULL); | 1095 | flush_tlb_all_local(NULL); |
1096 | recycle_sids(); | 1096 | recycle_sids(); |
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 5416e28a7538..863d877e0b5f 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug | |||
@@ -262,8 +262,31 @@ config PPC_EARLY_DEBUG_OPAL_HVSI | |||
262 | Select this to enable early debugging for the PowerNV platform | 262 | Select this to enable early debugging for the PowerNV platform |
263 | using an "hvsi" console | 263 | using an "hvsi" console |
264 | 264 | ||
265 | config PPC_EARLY_DEBUG_MEMCONS | ||
266 | bool "In memory console" | ||
267 | help | ||
268 | Select this to enable early debugging using an in memory console. | ||
269 | This console provides input and output buffers stored within the | ||
270 | kernel BSS and should be safe to select on any system. A debugger | ||
271 | can then be used to read kernel output or send input to the console. | ||
265 | endchoice | 272 | endchoice |
266 | 273 | ||
274 | config PPC_MEMCONS_OUTPUT_SIZE | ||
275 | int "In memory console output buffer size" | ||
276 | depends on PPC_EARLY_DEBUG_MEMCONS | ||
277 | default 4096 | ||
278 | help | ||
279 | Selects the size of the output buffer (in bytes) of the in memory | ||
280 | console. | ||
281 | |||
282 | config PPC_MEMCONS_INPUT_SIZE | ||
283 | int "In memory console input buffer size" | ||
284 | depends on PPC_EARLY_DEBUG_MEMCONS | ||
285 | default 128 | ||
286 | help | ||
287 | Selects the size of the input buffer (in bytes) of the in memory | ||
288 | console. | ||
289 | |||
267 | config PPC_EARLY_DEBUG_OPAL | 290 | config PPC_EARLY_DEBUG_OPAL |
268 | def_bool y | 291 | def_bool y |
269 | depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI | 292 | depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI |
diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h new file mode 100644 index 000000000000..b6f5a33b8ee2 --- /dev/null +++ b/arch/powerpc/include/asm/context_tracking.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef _ASM_POWERPC_CONTEXT_TRACKING_H | ||
2 | #define _ASM_POWERPC_CONTEXT_TRACKING_H | ||
3 | |||
4 | #ifdef CONFIG_CONTEXT_TRACKING | ||
5 | #define SCHEDULE_USER bl .schedule_user | ||
6 | #else | ||
7 | #define SCHEDULE_USER bl .schedule | ||
8 | #endif | ||
9 | |||
10 | #endif | ||
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index 0df54646f968..681bc0314b6b 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h | |||
@@ -52,6 +52,7 @@ | |||
52 | #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) | 52 | #define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) |
53 | #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) | 53 | #define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) |
54 | #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) | 54 | #define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) |
55 | #define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000) | ||
55 | 56 | ||
56 | #ifndef __ASSEMBLY__ | 57 | #ifndef __ASSEMBLY__ |
57 | 58 | ||
@@ -69,7 +70,8 @@ enum { | |||
69 | FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | | 70 | FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | |
70 | FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, | 71 | FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, |
71 | FW_FEATURE_PSERIES_ALWAYS = 0, | 72 | FW_FEATURE_PSERIES_ALWAYS = 0, |
72 | FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, | 73 | FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 | |
74 | FW_FEATURE_OPALv3, | ||
73 | FW_FEATURE_POWERNV_ALWAYS = 0, | 75 | FW_FEATURE_POWERNV_ALWAYS = 0, |
74 | FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, | 76 | FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, |
75 | FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, | 77 | FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, |
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index d615b28dda82..ba713f166fa5 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
@@ -96,11 +96,12 @@ static inline bool arch_irqs_disabled(void) | |||
96 | #endif | 96 | #endif |
97 | 97 | ||
98 | #define hard_irq_disable() do { \ | 98 | #define hard_irq_disable() do { \ |
99 | u8 _was_enabled = get_paca()->soft_enabled; \ | ||
99 | __hard_irq_disable(); \ | 100 | __hard_irq_disable(); \ |
100 | if (local_paca->soft_enabled) \ | ||
101 | trace_hardirqs_off(); \ | ||
102 | get_paca()->soft_enabled = 0; \ | 101 | get_paca()->soft_enabled = 0; \ |
103 | get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \ | 102 | get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \ |
103 | if (_was_enabled) \ | ||
104 | trace_hardirqs_off(); \ | ||
104 | } while(0) | 105 | } while(0) |
105 | 106 | ||
106 | static inline bool lazy_irq_pending(void) | 107 | static inline bool lazy_irq_pending(void) |
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index b6c8b58b1d76..cbb9305ab15a 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h | |||
@@ -243,7 +243,8 @@ enum OpalMCE_TlbErrorType { | |||
243 | 243 | ||
244 | enum OpalThreadStatus { | 244 | enum OpalThreadStatus { |
245 | OPAL_THREAD_INACTIVE = 0x0, | 245 | OPAL_THREAD_INACTIVE = 0x0, |
246 | OPAL_THREAD_STARTED = 0x1 | 246 | OPAL_THREAD_STARTED = 0x1, |
247 | OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */ | ||
247 | }; | 248 | }; |
248 | 249 | ||
249 | enum OpalPciBusCompare { | 250 | enum OpalPciBusCompare { |
@@ -563,6 +564,8 @@ extern void opal_nvram_init(void); | |||
563 | 564 | ||
564 | extern int opal_machine_check(struct pt_regs *regs); | 565 | extern int opal_machine_check(struct pt_regs *regs); |
565 | 566 | ||
567 | extern void opal_shutdown(void); | ||
568 | |||
566 | #endif /* __ASSEMBLY__ */ | 569 | #endif /* __ASSEMBLY__ */ |
567 | 570 | ||
568 | #endif /* __OPAL_H */ | 571 | #endif /* __OPAL_H */ |
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index 91acb12bac92..b66ae722a8e9 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h | |||
@@ -186,7 +186,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, | |||
186 | 186 | ||
187 | static inline pgtable_t pmd_pgtable(pmd_t pmd) | 187 | static inline pgtable_t pmd_pgtable(pmd_t pmd) |
188 | { | 188 | { |
189 | return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE); | 189 | return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS); |
190 | } | 190 | } |
191 | 191 | ||
192 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, | 192 | static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, |
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index 3e13e23e4fdf..d836d945068d 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h | |||
@@ -47,7 +47,7 @@ | |||
47 | * generic accessors and iterators here | 47 | * generic accessors and iterators here |
48 | */ | 48 | */ |
49 | #define __real_pte(e,p) ((real_pte_t) { \ | 49 | #define __real_pte(e,p) ((real_pte_t) { \ |
50 | (e), ((e) & _PAGE_COMBO) ? \ | 50 | (e), (pte_val(e) & _PAGE_COMBO) ? \ |
51 | (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) | 51 | (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) |
52 | #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ | 52 | #define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ |
53 | (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) | 53 | (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) |
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index a8bc2bb4adc9..34fd70488d83 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h | |||
@@ -264,6 +264,8 @@ extern void rtas_progress(char *s, unsigned short hex); | |||
264 | extern void rtas_initialize(void); | 264 | extern void rtas_initialize(void); |
265 | extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); | 265 | extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); |
266 | extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); | 266 | extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); |
267 | extern int rtas_online_cpus_mask(cpumask_var_t cpus); | ||
268 | extern int rtas_offline_cpus_mask(cpumask_var_t cpus); | ||
267 | extern int rtas_ibm_suspend_me(struct rtas_args *); | 269 | extern int rtas_ibm_suspend_me(struct rtas_args *); |
268 | 270 | ||
269 | struct rtc_time; | 271 | struct rtc_time; |
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 8ceea14d6fe4..ba7b1973866e 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h | |||
@@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void) | |||
97 | #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ | 97 | #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ |
98 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | 98 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
99 | #define TIF_SINGLESTEP 8 /* singlestepping active */ | 99 | #define TIF_SINGLESTEP 8 /* singlestepping active */ |
100 | #define TIF_MEMDIE 9 /* is terminating due to OOM killer */ | 100 | #define TIF_NOHZ 9 /* in adaptive nohz mode */ |
101 | #define TIF_SECCOMP 10 /* secure computing */ | 101 | #define TIF_SECCOMP 10 /* secure computing */ |
102 | #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ | 102 | #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ |
103 | #define TIF_NOERROR 12 /* Force successful syscall return */ | 103 | #define TIF_NOERROR 12 /* Force successful syscall return */ |
@@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void) | |||
106 | #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ | 106 | #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ |
107 | #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation | 107 | #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation |
108 | for stack store? */ | 108 | for stack store? */ |
109 | #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ | ||
109 | 110 | ||
110 | /* as above, but as bit values */ | 111 | /* as above, but as bit values */ |
111 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 112 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
@@ -124,8 +125,10 @@ static inline struct thread_info *current_thread_info(void) | |||
124 | #define _TIF_UPROBE (1<<TIF_UPROBE) | 125 | #define _TIF_UPROBE (1<<TIF_UPROBE) |
125 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) | 126 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) |
126 | #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) | 127 | #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) |
128 | #define _TIF_NOHZ (1<<TIF_NOHZ) | ||
127 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | 129 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
128 | _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) | 130 | _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ |
131 | _TIF_NOHZ) | ||
129 | 132 | ||
130 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ | 133 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ |
131 | _TIF_NOTIFY_RESUME | _TIF_UPROBE) | 134 | _TIF_NOTIFY_RESUME | _TIF_UPROBE) |
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index 5a7510e9d09d..dc590919f8eb 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h | |||
@@ -52,6 +52,7 @@ extern void __init udbg_init_40x_realmode(void); | |||
52 | extern void __init udbg_init_cpm(void); | 52 | extern void __init udbg_init_cpm(void); |
53 | extern void __init udbg_init_usbgecko(void); | 53 | extern void __init udbg_init_usbgecko(void); |
54 | extern void __init udbg_init_wsp(void); | 54 | extern void __init udbg_init_wsp(void); |
55 | extern void __init udbg_init_memcons(void); | ||
55 | extern void __init udbg_init_ehv_bc(void); | 56 | extern void __init udbg_init_ehv_bc(void); |
56 | extern void __init udbg_init_ps3gelic(void); | 57 | extern void __init udbg_init_ps3gelic(void); |
57 | extern void __init udbg_init_debug_opal_raw(void); | 58 | extern void __init udbg_init_debug_opal_raw(void); |
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index e514de57a125..d22e73e4618b 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
@@ -439,8 +439,6 @@ ret_from_fork: | |||
439 | ret_from_kernel_thread: | 439 | ret_from_kernel_thread: |
440 | REST_NVGPRS(r1) | 440 | REST_NVGPRS(r1) |
441 | bl schedule_tail | 441 | bl schedule_tail |
442 | li r3,0 | ||
443 | stw r3,0(r1) | ||
444 | mtlr r14 | 442 | mtlr r14 |
445 | mr r3,r15 | 443 | mr r3,r15 |
446 | PPC440EP_ERR42 | 444 | PPC440EP_ERR42 |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 915fbb4fc2fe..51cfb8fc301f 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/irqflags.h> | 33 | #include <asm/irqflags.h> |
34 | #include <asm/ftrace.h> | 34 | #include <asm/ftrace.h> |
35 | #include <asm/hw_irq.h> | 35 | #include <asm/hw_irq.h> |
36 | #include <asm/context_tracking.h> | ||
36 | 37 | ||
37 | /* | 38 | /* |
38 | * System calls. | 39 | * System calls. |
@@ -376,8 +377,6 @@ _GLOBAL(ret_from_fork) | |||
376 | _GLOBAL(ret_from_kernel_thread) | 377 | _GLOBAL(ret_from_kernel_thread) |
377 | bl .schedule_tail | 378 | bl .schedule_tail |
378 | REST_NVGPRS(r1) | 379 | REST_NVGPRS(r1) |
379 | li r3,0 | ||
380 | std r3,0(r1) | ||
381 | ld r14, 0(r14) | 380 | ld r14, 0(r14) |
382 | mtlr r14 | 381 | mtlr r14 |
383 | mr r3,r15 | 382 | mr r3,r15 |
@@ -634,7 +633,7 @@ _GLOBAL(ret_from_except_lite) | |||
634 | andi. r0,r4,_TIF_NEED_RESCHED | 633 | andi. r0,r4,_TIF_NEED_RESCHED |
635 | beq 1f | 634 | beq 1f |
636 | bl .restore_interrupts | 635 | bl .restore_interrupts |
637 | bl .schedule | 636 | SCHEDULE_USER |
638 | b .ret_from_except_lite | 637 | b .ret_from_except_lite |
639 | 638 | ||
640 | 1: bl .save_nvgprs | 639 | 1: bl .save_nvgprs |
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 42a756eec9ff..645170a07ada 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S | |||
@@ -489,7 +489,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
489 | */ | 489 | */ |
490 | 490 | ||
491 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ | 491 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ |
492 | andis. r15,r14,DBSR_IC@h | 492 | andis. r15,r14,(DBSR_IC|DBSR_BT)@h |
493 | beq+ 1f | 493 | beq+ 1f |
494 | 494 | ||
495 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) | 495 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) |
@@ -500,7 +500,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
500 | bge+ cr1,1f | 500 | bge+ cr1,1f |
501 | 501 | ||
502 | /* here it looks like we got an inappropriate debug exception. */ | 502 | /* here it looks like we got an inappropriate debug exception. */ |
503 | lis r14,DBSR_IC@h /* clear the IC event */ | 503 | lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ |
504 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ | 504 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ |
505 | mtspr SPRN_DBSR,r14 | 505 | mtspr SPRN_DBSR,r14 |
506 | mtspr SPRN_CSRR1,r11 | 506 | mtspr SPRN_CSRR1,r11 |
@@ -555,7 +555,7 @@ kernel_dbg_exc: | |||
555 | */ | 555 | */ |
556 | 556 | ||
557 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ | 557 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ |
558 | andis. r15,r14,DBSR_IC@h | 558 | andis. r15,r14,(DBSR_IC|DBSR_BT)@h |
559 | beq+ 1f | 559 | beq+ 1f |
560 | 560 | ||
561 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) | 561 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) |
@@ -566,7 +566,7 @@ kernel_dbg_exc: | |||
566 | bge+ cr1,1f | 566 | bge+ cr1,1f |
567 | 567 | ||
568 | /* here it looks like we got an inappropriate debug exception. */ | 568 | /* here it looks like we got an inappropriate debug exception. */ |
569 | lis r14,DBSR_IC@h /* clear the IC event */ | 569 | lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */ |
570 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ | 570 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ |
571 | mtspr SPRN_DBSR,r14 | 571 | mtspr SPRN_DBSR,r14 |
572 | mtspr SPRN_DSRR1,r11 | 572 | mtspr SPRN_DSRR1,r11 |
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 466a2908bb63..611acdf30096 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
20 | #include <linux/hardirq.h> | ||
20 | 21 | ||
21 | #include <asm/page.h> | 22 | #include <asm/page.h> |
22 | #include <asm/current.h> | 23 | #include <asm/current.h> |
@@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage *image) | |||
335 | pr_debug("kexec: Starting switchover sequence.\n"); | 336 | pr_debug("kexec: Starting switchover sequence.\n"); |
336 | 337 | ||
337 | /* switch to a staticly allocated stack. Based on irq stack code. | 338 | /* switch to a staticly allocated stack. Based on irq stack code. |
339 | * We setup preempt_count to avoid using VMX in memcpy. | ||
338 | * XXX: the task struct will likely be invalid once we do the copy! | 340 | * XXX: the task struct will likely be invalid once we do the copy! |
339 | */ | 341 | */ |
340 | kexec_stack.thread_info.task = current_thread_info()->task; | 342 | kexec_stack.thread_info.task = current_thread_info()->task; |
341 | kexec_stack.thread_info.flags = 0; | 343 | kexec_stack.thread_info.flags = 0; |
344 | kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET; | ||
345 | kexec_stack.thread_info.cpu = current_thread_info()->cpu; | ||
342 | 346 | ||
343 | /* We need a static PACA, too; copy this CPU's PACA over and switch to | 347 | /* We need a static PACA, too; copy this CPU's PACA over and switch to |
344 | * it. Also poison per_cpu_offset to catch anyone using non-static | 348 | * it. Also poison per_cpu_offset to catch anyone using non-static |
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 19e096bd0e73..e469f30e6eeb 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
@@ -657,6 +657,17 @@ _GLOBAL(__ucmpdi2) | |||
657 | li r3,2 | 657 | li r3,2 |
658 | blr | 658 | blr |
659 | 659 | ||
660 | _GLOBAL(__bswapdi2) | ||
661 | rotlwi r9,r4,8 | ||
662 | rotlwi r10,r3,8 | ||
663 | rlwimi r9,r4,24,0,7 | ||
664 | rlwimi r10,r3,24,0,7 | ||
665 | rlwimi r9,r4,24,16,23 | ||
666 | rlwimi r10,r3,24,16,23 | ||
667 | mr r3,r9 | ||
668 | mr r4,r10 | ||
669 | blr | ||
670 | |||
660 | _GLOBAL(abs) | 671 | _GLOBAL(abs) |
661 | srawi r4,r3,31 | 672 | srawi r4,r3,31 |
662 | xor r3,r3,r4 | 673 | xor r3,r3,r4 |
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 5cfa8008693b..6820e45f557b 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
@@ -234,6 +234,17 @@ _GLOBAL(__flush_dcache_icache) | |||
234 | isync | 234 | isync |
235 | blr | 235 | blr |
236 | 236 | ||
237 | _GLOBAL(__bswapdi2) | ||
238 | srdi r8,r3,32 | ||
239 | rlwinm r7,r3,8,0xffffffff | ||
240 | rlwimi r7,r3,24,0,7 | ||
241 | rlwinm r9,r8,8,0xffffffff | ||
242 | rlwimi r7,r3,24,16,23 | ||
243 | rlwimi r9,r8,24,0,7 | ||
244 | rlwimi r9,r8,24,16,23 | ||
245 | sldi r7,r7,32 | ||
246 | or r3,r7,r9 | ||
247 | blr | ||
237 | 248 | ||
238 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) | 249 | #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) |
239 | /* | 250 | /* |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index f5c5c90799a7..6053f037ef0a 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -359,7 +359,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, | |||
359 | enum pci_mmap_state mmap_state, | 359 | enum pci_mmap_state mmap_state, |
360 | int write_combine) | 360 | int write_combine) |
361 | { | 361 | { |
362 | unsigned long prot = pgprot_val(protection); | ||
363 | 362 | ||
364 | /* Write combine is always 0 on non-memory space mappings. On | 363 | /* Write combine is always 0 on non-memory space mappings. On |
365 | * memory space, if the user didn't pass 1, we check for a | 364 | * memory space, if the user didn't pass 1, we check for a |
@@ -376,9 +375,9 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, | |||
376 | 375 | ||
377 | /* XXX would be nice to have a way to ask for write-through */ | 376 | /* XXX would be nice to have a way to ask for write-through */ |
378 | if (write_combine) | 377 | if (write_combine) |
379 | return pgprot_noncached_wc(prot); | 378 | return pgprot_noncached_wc(protection); |
380 | else | 379 | else |
381 | return pgprot_noncached(prot); | 380 | return pgprot_noncached(protection); |
382 | } | 381 | } |
383 | 382 | ||
384 | /* | 383 | /* |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 78b8766fd79e..c29666586998 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -143,7 +143,8 @@ EXPORT_SYMBOL(__lshrdi3); | |||
143 | int __ucmpdi2(unsigned long long, unsigned long long); | 143 | int __ucmpdi2(unsigned long long, unsigned long long); |
144 | EXPORT_SYMBOL(__ucmpdi2); | 144 | EXPORT_SYMBOL(__ucmpdi2); |
145 | #endif | 145 | #endif |
146 | 146 | long long __bswapdi2(long long); | |
147 | EXPORT_SYMBOL(__bswapdi2); | ||
147 | EXPORT_SYMBOL(memcpy); | 148 | EXPORT_SYMBOL(memcpy); |
148 | EXPORT_SYMBOL(memset); | 149 | EXPORT_SYMBOL(memset); |
149 | EXPORT_SYMBOL(memmove); | 150 | EXPORT_SYMBOL(memmove); |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index ceb4e7b62cf4..a902723fdc69 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -339,6 +339,13 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
339 | 339 | ||
340 | static void prime_debug_regs(struct thread_struct *thread) | 340 | static void prime_debug_regs(struct thread_struct *thread) |
341 | { | 341 | { |
342 | /* | ||
343 | * We could have inherited MSR_DE from userspace, since | ||
344 | * it doesn't get cleared on exception entry. Make sure | ||
345 | * MSR_DE is clear before we enable any debug events. | ||
346 | */ | ||
347 | mtmsr(mfmsr() & ~MSR_DE); | ||
348 | |||
342 | mtspr(SPRN_IAC1, thread->iac1); | 349 | mtspr(SPRN_IAC1, thread->iac1); |
343 | mtspr(SPRN_IAC2, thread->iac2); | 350 | mtspr(SPRN_IAC2, thread->iac2); |
344 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 351 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
@@ -971,6 +978,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
971 | * do some house keeping and then return from the fork or clone | 978 | * do some house keeping and then return from the fork or clone |
972 | * system call, using the stack frame created above. | 979 | * system call, using the stack frame created above. |
973 | */ | 980 | */ |
981 | ((unsigned long *)sp)[0] = 0; | ||
974 | sp -= sizeof(struct pt_regs); | 982 | sp -= sizeof(struct pt_regs); |
975 | kregs = (struct pt_regs *) sp; | 983 | kregs = (struct pt_regs *) sp; |
976 | sp -= STACK_FRAME_OVERHEAD; | 984 | sp -= STACK_FRAME_OVERHEAD; |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 3b14d320e69f..98c2fc198712 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <trace/syscall.h> | 32 | #include <trace/syscall.h> |
33 | #include <linux/hw_breakpoint.h> | 33 | #include <linux/hw_breakpoint.h> |
34 | #include <linux/perf_event.h> | 34 | #include <linux/perf_event.h> |
35 | #include <linux/context_tracking.h> | ||
35 | 36 | ||
36 | #include <asm/uaccess.h> | 37 | #include <asm/uaccess.h> |
37 | #include <asm/page.h> | 38 | #include <asm/page.h> |
@@ -1788,6 +1789,8 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
1788 | { | 1789 | { |
1789 | long ret = 0; | 1790 | long ret = 0; |
1790 | 1791 | ||
1792 | user_exit(); | ||
1793 | |||
1791 | secure_computing_strict(regs->gpr[0]); | 1794 | secure_computing_strict(regs->gpr[0]); |
1792 | 1795 | ||
1793 | if (test_thread_flag(TIF_SYSCALL_TRACE) && | 1796 | if (test_thread_flag(TIF_SYSCALL_TRACE) && |
@@ -1832,4 +1835,6 @@ void do_syscall_trace_leave(struct pt_regs *regs) | |||
1832 | step = test_thread_flag(TIF_SINGLESTEP); | 1835 | step = test_thread_flag(TIF_SINGLESTEP); |
1833 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) | 1836 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) |
1834 | tracehook_report_syscall_exit(regs, step); | 1837 | tracehook_report_syscall_exit(regs, step); |
1838 | |||
1839 | user_enter(); | ||
1835 | } | 1840 | } |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 1fd6e7b2f390..52add6f3e201 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/init.h> | 19 | #include <linux/init.h> |
20 | #include <linux/capability.h> | 20 | #include <linux/capability.h> |
21 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
22 | #include <linux/cpu.h> | ||
22 | #include <linux/smp.h> | 23 | #include <linux/smp.h> |
23 | #include <linux/completion.h> | 24 | #include <linux/completion.h> |
24 | #include <linux/cpumask.h> | 25 | #include <linux/cpumask.h> |
@@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void *info) | |||
807 | __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); | 808 | __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); |
808 | } | 809 | } |
809 | 810 | ||
811 | enum rtas_cpu_state { | ||
812 | DOWN, | ||
813 | UP, | ||
814 | }; | ||
815 | |||
816 | #ifndef CONFIG_SMP | ||
817 | static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, | ||
818 | cpumask_var_t cpus) | ||
819 | { | ||
820 | if (!cpumask_empty(cpus)) { | ||
821 | cpumask_clear(cpus); | ||
822 | return -EINVAL; | ||
823 | } else | ||
824 | return 0; | ||
825 | } | ||
826 | #else | ||
827 | /* On return cpumask will be altered to indicate CPUs changed. | ||
828 | * CPUs with states changed will be set in the mask, | ||
829 | * CPUs with status unchanged will be unset in the mask. */ | ||
830 | static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, | ||
831 | cpumask_var_t cpus) | ||
832 | { | ||
833 | int cpu; | ||
834 | int cpuret = 0; | ||
835 | int ret = 0; | ||
836 | |||
837 | if (cpumask_empty(cpus)) | ||
838 | return 0; | ||
839 | |||
840 | for_each_cpu(cpu, cpus) { | ||
841 | switch (state) { | ||
842 | case DOWN: | ||
843 | cpuret = cpu_down(cpu); | ||
844 | break; | ||
845 | case UP: | ||
846 | cpuret = cpu_up(cpu); | ||
847 | break; | ||
848 | } | ||
849 | if (cpuret) { | ||
850 | pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", | ||
851 | __func__, | ||
852 | ((state == UP) ? "up" : "down"), | ||
853 | cpu, cpuret); | ||
854 | if (!ret) | ||
855 | ret = cpuret; | ||
856 | if (state == UP) { | ||
857 | /* clear bits for unchanged cpus, return */ | ||
858 | cpumask_shift_right(cpus, cpus, cpu); | ||
859 | cpumask_shift_left(cpus, cpus, cpu); | ||
860 | break; | ||
861 | } else { | ||
862 | /* clear bit for unchanged cpu, continue */ | ||
863 | cpumask_clear_cpu(cpu, cpus); | ||
864 | } | ||
865 | } | ||
866 | } | ||
867 | |||
868 | return ret; | ||
869 | } | ||
870 | #endif | ||
871 | |||
872 | int rtas_online_cpus_mask(cpumask_var_t cpus) | ||
873 | { | ||
874 | int ret; | ||
875 | |||
876 | ret = rtas_cpu_state_change_mask(UP, cpus); | ||
877 | |||
878 | if (ret) { | ||
879 | cpumask_var_t tmp_mask; | ||
880 | |||
881 | if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY)) | ||
882 | return ret; | ||
883 | |||
884 | /* Use tmp_mask to preserve cpus mask from first failure */ | ||
885 | cpumask_copy(tmp_mask, cpus); | ||
886 | rtas_offline_cpus_mask(tmp_mask); | ||
887 | free_cpumask_var(tmp_mask); | ||
888 | } | ||
889 | |||
890 | return ret; | ||
891 | } | ||
892 | EXPORT_SYMBOL(rtas_online_cpus_mask); | ||
893 | |||
894 | int rtas_offline_cpus_mask(cpumask_var_t cpus) | ||
895 | { | ||
896 | return rtas_cpu_state_change_mask(DOWN, cpus); | ||
897 | } | ||
898 | EXPORT_SYMBOL(rtas_offline_cpus_mask); | ||
899 | |||
810 | int rtas_ibm_suspend_me(struct rtas_args *args) | 900 | int rtas_ibm_suspend_me(struct rtas_args *args) |
811 | { | 901 | { |
812 | long state; | 902 | long state; |
@@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args) | |||
814 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | 904 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
815 | struct rtas_suspend_me_data data; | 905 | struct rtas_suspend_me_data data; |
816 | DECLARE_COMPLETION_ONSTACK(done); | 906 | DECLARE_COMPLETION_ONSTACK(done); |
907 | cpumask_var_t offline_mask; | ||
908 | int cpuret; | ||
817 | 909 | ||
818 | if (!rtas_service_present("ibm,suspend-me")) | 910 | if (!rtas_service_present("ibm,suspend-me")) |
819 | return -ENOSYS; | 911 | return -ENOSYS; |
@@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args) | |||
837 | return 0; | 929 | return 0; |
838 | } | 930 | } |
839 | 931 | ||
932 | if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) | ||
933 | return -ENOMEM; | ||
934 | |||
840 | atomic_set(&data.working, 0); | 935 | atomic_set(&data.working, 0); |
841 | atomic_set(&data.done, 0); | 936 | atomic_set(&data.done, 0); |
842 | atomic_set(&data.error, 0); | 937 | atomic_set(&data.error, 0); |
843 | data.token = rtas_token("ibm,suspend-me"); | 938 | data.token = rtas_token("ibm,suspend-me"); |
844 | data.complete = &done; | 939 | data.complete = &done; |
940 | |||
941 | /* All present CPUs must be online */ | ||
942 | cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); | ||
943 | cpuret = rtas_online_cpus_mask(offline_mask); | ||
944 | if (cpuret) { | ||
945 | pr_err("%s: Could not bring present CPUs online.\n", __func__); | ||
946 | atomic_set(&data.error, cpuret); | ||
947 | goto out; | ||
948 | } | ||
949 | |||
845 | stop_topology_update(); | 950 | stop_topology_update(); |
846 | 951 | ||
847 | /* Call function on all CPUs. One of us will make the | 952 | /* Call function on all CPUs. One of us will make the |
@@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args) | |||
857 | 962 | ||
858 | start_topology_update(); | 963 | start_topology_update(); |
859 | 964 | ||
965 | /* Take down CPUs not online prior to suspend */ | ||
966 | cpuret = rtas_offline_cpus_mask(offline_mask); | ||
967 | if (cpuret) | ||
968 | pr_warn("%s: Could not restore CPUs to offline state.\n", | ||
969 | __func__); | ||
970 | |||
971 | out: | ||
972 | free_cpumask_var(offline_mask); | ||
860 | return atomic_read(&data.error); | 973 | return atomic_read(&data.error); |
861 | } | 974 | } |
862 | #else /* CONFIG_PPC_PSERIES */ | 975 | #else /* CONFIG_PPC_PSERIES */ |
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 5b3022470126..2f3cdb01506d 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c | |||
@@ -89,6 +89,7 @@ | |||
89 | 89 | ||
90 | /* Array sizes */ | 90 | /* Array sizes */ |
91 | #define VALIDATE_BUF_SIZE 4096 | 91 | #define VALIDATE_BUF_SIZE 4096 |
92 | #define VALIDATE_MSG_LEN 256 | ||
92 | #define RTAS_MSG_MAXLEN 64 | 93 | #define RTAS_MSG_MAXLEN 64 |
93 | 94 | ||
94 | /* Quirk - RTAS requires 4k list length and block size */ | 95 | /* Quirk - RTAS requires 4k list length and block size */ |
@@ -466,7 +467,7 @@ static void validate_flash(struct rtas_validate_flash_t *args_buf) | |||
466 | } | 467 | } |
467 | 468 | ||
468 | static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, | 469 | static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, |
469 | char *msg) | 470 | char *msg, int msglen) |
470 | { | 471 | { |
471 | int n; | 472 | int n; |
472 | 473 | ||
@@ -474,7 +475,8 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, | |||
474 | n = sprintf(msg, "%d\n", args_buf->update_results); | 475 | n = sprintf(msg, "%d\n", args_buf->update_results); |
475 | if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) || | 476 | if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) || |
476 | (args_buf->update_results == VALIDATE_TMP_UPDATE)) | 477 | (args_buf->update_results == VALIDATE_TMP_UPDATE)) |
477 | n += sprintf(msg + n, "%s\n", args_buf->buf); | 478 | n += snprintf(msg + n, msglen - n, "%s\n", |
479 | args_buf->buf); | ||
478 | } else { | 480 | } else { |
479 | n = sprintf(msg, "%d\n", args_buf->status); | 481 | n = sprintf(msg, "%d\n", args_buf->status); |
480 | } | 482 | } |
@@ -486,11 +488,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf, | |||
486 | { | 488 | { |
487 | struct rtas_validate_flash_t *const args_buf = | 489 | struct rtas_validate_flash_t *const args_buf = |
488 | &rtas_validate_flash_data; | 490 | &rtas_validate_flash_data; |
489 | char msg[RTAS_MSG_MAXLEN]; | 491 | char msg[VALIDATE_MSG_LEN]; |
490 | int msglen; | 492 | int msglen; |
491 | 493 | ||
492 | mutex_lock(&rtas_validate_flash_mutex); | 494 | mutex_lock(&rtas_validate_flash_mutex); |
493 | msglen = get_validate_flash_msg(args_buf, msg); | 495 | msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN); |
494 | mutex_unlock(&rtas_validate_flash_mutex); | 496 | mutex_unlock(&rtas_validate_flash_mutex); |
495 | 497 | ||
496 | return simple_read_from_buffer(buf, count, ppos, msg, msglen); | 498 | return simple_read_from_buffer(buf, count, ppos, msg, msglen); |
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index cf12eae02de5..577a8aa69c6e 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/signal.h> | 13 | #include <linux/signal.h> |
14 | #include <linux/uprobes.h> | 14 | #include <linux/uprobes.h> |
15 | #include <linux/key.h> | 15 | #include <linux/key.h> |
16 | #include <linux/context_tracking.h> | ||
16 | #include <asm/hw_breakpoint.h> | 17 | #include <asm/hw_breakpoint.h> |
17 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
18 | #include <asm/unistd.h> | 19 | #include <asm/unistd.h> |
@@ -24,7 +25,7 @@ | |||
24 | * through debug.exception-trace sysctl. | 25 | * through debug.exception-trace sysctl. |
25 | */ | 26 | */ |
26 | 27 | ||
27 | int show_unhandled_signals = 0; | 28 | int show_unhandled_signals = 1; |
28 | 29 | ||
29 | /* | 30 | /* |
30 | * Allocate space for the signal frame | 31 | * Allocate space for the signal frame |
@@ -159,6 +160,8 @@ static int do_signal(struct pt_regs *regs) | |||
159 | 160 | ||
160 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) | 161 | void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) |
161 | { | 162 | { |
163 | user_exit(); | ||
164 | |||
162 | if (thread_info_flags & _TIF_UPROBE) | 165 | if (thread_info_flags & _TIF_UPROBE) |
163 | uprobe_notify_resume(regs); | 166 | uprobe_notify_resume(regs); |
164 | 167 | ||
@@ -169,4 +172,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) | |||
169 | clear_thread_flag(TIF_NOTIFY_RESUME); | 172 | clear_thread_flag(TIF_NOTIFY_RESUME); |
170 | tracehook_notify_resume(regs); | 173 | tracehook_notify_resume(regs); |
171 | } | 174 | } |
175 | |||
176 | user_enter(); | ||
172 | } | 177 | } |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 83efa2f7d926..a7a648f6b750 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/kdebug.h> | 35 | #include <linux/kdebug.h> |
36 | #include <linux/debugfs.h> | 36 | #include <linux/debugfs.h> |
37 | #include <linux/ratelimit.h> | 37 | #include <linux/ratelimit.h> |
38 | #include <linux/context_tracking.h> | ||
38 | 39 | ||
39 | #include <asm/emulated_ops.h> | 40 | #include <asm/emulated_ops.h> |
40 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
@@ -667,6 +668,7 @@ int machine_check_generic(struct pt_regs *regs) | |||
667 | 668 | ||
668 | void machine_check_exception(struct pt_regs *regs) | 669 | void machine_check_exception(struct pt_regs *regs) |
669 | { | 670 | { |
671 | enum ctx_state prev_state = exception_enter(); | ||
670 | int recover = 0; | 672 | int recover = 0; |
671 | 673 | ||
672 | __get_cpu_var(irq_stat).mce_exceptions++; | 674 | __get_cpu_var(irq_stat).mce_exceptions++; |
@@ -683,7 +685,7 @@ void machine_check_exception(struct pt_regs *regs) | |||
683 | recover = cur_cpu_spec->machine_check(regs); | 685 | recover = cur_cpu_spec->machine_check(regs); |
684 | 686 | ||
685 | if (recover > 0) | 687 | if (recover > 0) |
686 | return; | 688 | goto bail; |
687 | 689 | ||
688 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) | 690 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) |
689 | /* the qspan pci read routines can cause machine checks -- Cort | 691 | /* the qspan pci read routines can cause machine checks -- Cort |
@@ -693,20 +695,23 @@ void machine_check_exception(struct pt_regs *regs) | |||
693 | * -- BenH | 695 | * -- BenH |
694 | */ | 696 | */ |
695 | bad_page_fault(regs, regs->dar, SIGBUS); | 697 | bad_page_fault(regs, regs->dar, SIGBUS); |
696 | return; | 698 | goto bail; |
697 | #endif | 699 | #endif |
698 | 700 | ||
699 | if (debugger_fault_handler(regs)) | 701 | if (debugger_fault_handler(regs)) |
700 | return; | 702 | goto bail; |
701 | 703 | ||
702 | if (check_io_access(regs)) | 704 | if (check_io_access(regs)) |
703 | return; | 705 | goto bail; |
704 | 706 | ||
705 | die("Machine check", regs, SIGBUS); | 707 | die("Machine check", regs, SIGBUS); |
706 | 708 | ||
707 | /* Must die if the interrupt is not recoverable */ | 709 | /* Must die if the interrupt is not recoverable */ |
708 | if (!(regs->msr & MSR_RI)) | 710 | if (!(regs->msr & MSR_RI)) |
709 | panic("Unrecoverable Machine check"); | 711 | panic("Unrecoverable Machine check"); |
712 | |||
713 | bail: | ||
714 | exception_exit(prev_state); | ||
710 | } | 715 | } |
711 | 716 | ||
712 | void SMIException(struct pt_regs *regs) | 717 | void SMIException(struct pt_regs *regs) |
@@ -716,20 +721,29 @@ void SMIException(struct pt_regs *regs) | |||
716 | 721 | ||
717 | void unknown_exception(struct pt_regs *regs) | 722 | void unknown_exception(struct pt_regs *regs) |
718 | { | 723 | { |
724 | enum ctx_state prev_state = exception_enter(); | ||
725 | |||
719 | printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", | 726 | printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", |
720 | regs->nip, regs->msr, regs->trap); | 727 | regs->nip, regs->msr, regs->trap); |
721 | 728 | ||
722 | _exception(SIGTRAP, regs, 0, 0); | 729 | _exception(SIGTRAP, regs, 0, 0); |
730 | |||
731 | exception_exit(prev_state); | ||
723 | } | 732 | } |
724 | 733 | ||
725 | void instruction_breakpoint_exception(struct pt_regs *regs) | 734 | void instruction_breakpoint_exception(struct pt_regs *regs) |
726 | { | 735 | { |
736 | enum ctx_state prev_state = exception_enter(); | ||
737 | |||
727 | if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, | 738 | if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, |
728 | 5, SIGTRAP) == NOTIFY_STOP) | 739 | 5, SIGTRAP) == NOTIFY_STOP) |
729 | return; | 740 | goto bail; |
730 | if (debugger_iabr_match(regs)) | 741 | if (debugger_iabr_match(regs)) |
731 | return; | 742 | goto bail; |
732 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); | 743 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); |
744 | |||
745 | bail: | ||
746 | exception_exit(prev_state); | ||
733 | } | 747 | } |
734 | 748 | ||
735 | void RunModeException(struct pt_regs *regs) | 749 | void RunModeException(struct pt_regs *regs) |
@@ -739,15 +753,20 @@ void RunModeException(struct pt_regs *regs) | |||
739 | 753 | ||
740 | void __kprobes single_step_exception(struct pt_regs *regs) | 754 | void __kprobes single_step_exception(struct pt_regs *regs) |
741 | { | 755 | { |
756 | enum ctx_state prev_state = exception_enter(); | ||
757 | |||
742 | clear_single_step(regs); | 758 | clear_single_step(regs); |
743 | 759 | ||
744 | if (notify_die(DIE_SSTEP, "single_step", regs, 5, | 760 | if (notify_die(DIE_SSTEP, "single_step", regs, 5, |
745 | 5, SIGTRAP) == NOTIFY_STOP) | 761 | 5, SIGTRAP) == NOTIFY_STOP) |
746 | return; | 762 | goto bail; |
747 | if (debugger_sstep(regs)) | 763 | if (debugger_sstep(regs)) |
748 | return; | 764 | goto bail; |
749 | 765 | ||
750 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); | 766 | _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); |
767 | |||
768 | bail: | ||
769 | exception_exit(prev_state); | ||
751 | } | 770 | } |
752 | 771 | ||
753 | /* | 772 | /* |
@@ -1005,6 +1024,7 @@ int is_valid_bugaddr(unsigned long addr) | |||
1005 | 1024 | ||
1006 | void __kprobes program_check_exception(struct pt_regs *regs) | 1025 | void __kprobes program_check_exception(struct pt_regs *regs) |
1007 | { | 1026 | { |
1027 | enum ctx_state prev_state = exception_enter(); | ||
1008 | unsigned int reason = get_reason(regs); | 1028 | unsigned int reason = get_reason(regs); |
1009 | extern int do_mathemu(struct pt_regs *regs); | 1029 | extern int do_mathemu(struct pt_regs *regs); |
1010 | 1030 | ||
@@ -1014,26 +1034,26 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
1014 | if (reason & REASON_FP) { | 1034 | if (reason & REASON_FP) { |
1015 | /* IEEE FP exception */ | 1035 | /* IEEE FP exception */ |
1016 | parse_fpe(regs); | 1036 | parse_fpe(regs); |
1017 | return; | 1037 | goto bail; |
1018 | } | 1038 | } |
1019 | if (reason & REASON_TRAP) { | 1039 | if (reason & REASON_TRAP) { |
1020 | /* Debugger is first in line to stop recursive faults in | 1040 | /* Debugger is first in line to stop recursive faults in |
1021 | * rcu_lock, notify_die, or atomic_notifier_call_chain */ | 1041 | * rcu_lock, notify_die, or atomic_notifier_call_chain */ |
1022 | if (debugger_bpt(regs)) | 1042 | if (debugger_bpt(regs)) |
1023 | return; | 1043 | goto bail; |
1024 | 1044 | ||
1025 | /* trap exception */ | 1045 | /* trap exception */ |
1026 | if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) | 1046 | if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) |
1027 | == NOTIFY_STOP) | 1047 | == NOTIFY_STOP) |
1028 | return; | 1048 | goto bail; |
1029 | 1049 | ||
1030 | if (!(regs->msr & MSR_PR) && /* not user-mode */ | 1050 | if (!(regs->msr & MSR_PR) && /* not user-mode */ |
1031 | report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { | 1051 | report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { |
1032 | regs->nip += 4; | 1052 | regs->nip += 4; |
1033 | return; | 1053 | goto bail; |
1034 | } | 1054 | } |
1035 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); | 1055 | _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); |
1036 | return; | 1056 | goto bail; |
1037 | } | 1057 | } |
1038 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1058 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1039 | if (reason & REASON_TM) { | 1059 | if (reason & REASON_TM) { |
@@ -1049,7 +1069,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
1049 | if (!user_mode(regs) && | 1069 | if (!user_mode(regs) && |
1050 | report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { | 1070 | report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { |
1051 | regs->nip += 4; | 1071 | regs->nip += 4; |
1052 | return; | 1072 | goto bail; |
1053 | } | 1073 | } |
1054 | /* If usermode caused this, it's done something illegal and | 1074 | /* If usermode caused this, it's done something illegal and |
1055 | * gets a SIGILL slap on the wrist. We call it an illegal | 1075 | * gets a SIGILL slap on the wrist. We call it an illegal |
@@ -1059,7 +1079,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
1059 | */ | 1079 | */ |
1060 | if (user_mode(regs)) { | 1080 | if (user_mode(regs)) { |
1061 | _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); | 1081 | _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); |
1062 | return; | 1082 | goto bail; |
1063 | } else { | 1083 | } else { |
1064 | printk(KERN_EMERG "Unexpected TM Bad Thing exception " | 1084 | printk(KERN_EMERG "Unexpected TM Bad Thing exception " |
1065 | "at %lx (msr 0x%x)\n", regs->nip, reason); | 1085 | "at %lx (msr 0x%x)\n", regs->nip, reason); |
@@ -1083,16 +1103,16 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
1083 | switch (do_mathemu(regs)) { | 1103 | switch (do_mathemu(regs)) { |
1084 | case 0: | 1104 | case 0: |
1085 | emulate_single_step(regs); | 1105 | emulate_single_step(regs); |
1086 | return; | 1106 | goto bail; |
1087 | case 1: { | 1107 | case 1: { |
1088 | int code = 0; | 1108 | int code = 0; |
1089 | code = __parse_fpscr(current->thread.fpscr.val); | 1109 | code = __parse_fpscr(current->thread.fpscr.val); |
1090 | _exception(SIGFPE, regs, code, regs->nip); | 1110 | _exception(SIGFPE, regs, code, regs->nip); |
1091 | return; | 1111 | goto bail; |
1092 | } | 1112 | } |
1093 | case -EFAULT: | 1113 | case -EFAULT: |
1094 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | 1114 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); |
1095 | return; | 1115 | goto bail; |
1096 | } | 1116 | } |
1097 | /* fall through on any other errors */ | 1117 | /* fall through on any other errors */ |
1098 | #endif /* CONFIG_MATH_EMULATION */ | 1118 | #endif /* CONFIG_MATH_EMULATION */ |
@@ -1103,10 +1123,10 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
1103 | case 0: | 1123 | case 0: |
1104 | regs->nip += 4; | 1124 | regs->nip += 4; |
1105 | emulate_single_step(regs); | 1125 | emulate_single_step(regs); |
1106 | return; | 1126 | goto bail; |
1107 | case -EFAULT: | 1127 | case -EFAULT: |
1108 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); | 1128 | _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); |
1109 | return; | 1129 | goto bail; |
1110 | } | 1130 | } |
1111 | } | 1131 | } |
1112 | 1132 | ||
@@ -1114,10 +1134,14 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
1114 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); | 1134 | _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); |
1115 | else | 1135 | else |
1116 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | 1136 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); |
1137 | |||
1138 | bail: | ||
1139 | exception_exit(prev_state); | ||
1117 | } | 1140 | } |
1118 | 1141 | ||
1119 | void alignment_exception(struct pt_regs *regs) | 1142 | void alignment_exception(struct pt_regs *regs) |
1120 | { | 1143 | { |
1144 | enum ctx_state prev_state = exception_enter(); | ||
1121 | int sig, code, fixed = 0; | 1145 | int sig, code, fixed = 0; |
1122 | 1146 | ||
1123 | /* We restore the interrupt state now */ | 1147 | /* We restore the interrupt state now */ |
@@ -1131,7 +1155,7 @@ void alignment_exception(struct pt_regs *regs) | |||
1131 | if (fixed == 1) { | 1155 | if (fixed == 1) { |
1132 | regs->nip += 4; /* skip over emulated instruction */ | 1156 | regs->nip += 4; /* skip over emulated instruction */ |
1133 | emulate_single_step(regs); | 1157 | emulate_single_step(regs); |
1134 | return; | 1158 | goto bail; |
1135 | } | 1159 | } |
1136 | 1160 | ||
1137 | /* Operand address was bad */ | 1161 | /* Operand address was bad */ |
@@ -1146,6 +1170,9 @@ void alignment_exception(struct pt_regs *regs) | |||
1146 | _exception(sig, regs, code, regs->dar); | 1170 | _exception(sig, regs, code, regs->dar); |
1147 | else | 1171 | else |
1148 | bad_page_fault(regs, regs->dar, sig); | 1172 | bad_page_fault(regs, regs->dar, sig); |
1173 | |||
1174 | bail: | ||
1175 | exception_exit(prev_state); | ||
1149 | } | 1176 | } |
1150 | 1177 | ||
1151 | void StackOverflow(struct pt_regs *regs) | 1178 | void StackOverflow(struct pt_regs *regs) |
@@ -1174,23 +1201,32 @@ void trace_syscall(struct pt_regs *regs) | |||
1174 | 1201 | ||
1175 | void kernel_fp_unavailable_exception(struct pt_regs *regs) | 1202 | void kernel_fp_unavailable_exception(struct pt_regs *regs) |
1176 | { | 1203 | { |
1204 | enum ctx_state prev_state = exception_enter(); | ||
1205 | |||
1177 | printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " | 1206 | printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " |
1178 | "%lx at %lx\n", regs->trap, regs->nip); | 1207 | "%lx at %lx\n", regs->trap, regs->nip); |
1179 | die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); | 1208 | die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); |
1209 | |||
1210 | exception_exit(prev_state); | ||
1180 | } | 1211 | } |
1181 | 1212 | ||
1182 | void altivec_unavailable_exception(struct pt_regs *regs) | 1213 | void altivec_unavailable_exception(struct pt_regs *regs) |
1183 | { | 1214 | { |
1215 | enum ctx_state prev_state = exception_enter(); | ||
1216 | |||
1184 | if (user_mode(regs)) { | 1217 | if (user_mode(regs)) { |
1185 | /* A user program has executed an altivec instruction, | 1218 | /* A user program has executed an altivec instruction, |
1186 | but this kernel doesn't support altivec. */ | 1219 | but this kernel doesn't support altivec. */ |
1187 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | 1220 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); |
1188 | return; | 1221 | goto bail; |
1189 | } | 1222 | } |
1190 | 1223 | ||
1191 | printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " | 1224 | printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " |
1192 | "%lx at %lx\n", regs->trap, regs->nip); | 1225 | "%lx at %lx\n", regs->trap, regs->nip); |
1193 | die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); | 1226 | die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); |
1227 | |||
1228 | bail: | ||
1229 | exception_exit(prev_state); | ||
1194 | } | 1230 | } |
1195 | 1231 | ||
1196 | void vsx_unavailable_exception(struct pt_regs *regs) | 1232 | void vsx_unavailable_exception(struct pt_regs *regs) |
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index 13b867093499..9d3fdcd66290 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c | |||
@@ -64,6 +64,9 @@ void __init udbg_early_init(void) | |||
64 | udbg_init_usbgecko(); | 64 | udbg_init_usbgecko(); |
65 | #elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) | 65 | #elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) |
66 | udbg_init_wsp(); | 66 | udbg_init_wsp(); |
67 | #elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS) | ||
68 | /* In memory console */ | ||
69 | udbg_init_memcons(); | ||
67 | #elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC) | 70 | #elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC) |
68 | udbg_init_ehv_bc(); | 71 | udbg_init_ehv_bc(); |
69 | #elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) | 72 | #elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) |
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 229951ffc351..8726779e1409 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/perf_event.h> | 32 | #include <linux/perf_event.h> |
33 | #include <linux/magic.h> | 33 | #include <linux/magic.h> |
34 | #include <linux/ratelimit.h> | 34 | #include <linux/ratelimit.h> |
35 | #include <linux/context_tracking.h> | ||
35 | 36 | ||
36 | #include <asm/firmware.h> | 37 | #include <asm/firmware.h> |
37 | #include <asm/page.h> | 38 | #include <asm/page.h> |
@@ -196,6 +197,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault) | |||
196 | int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | 197 | int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, |
197 | unsigned long error_code) | 198 | unsigned long error_code) |
198 | { | 199 | { |
200 | enum ctx_state prev_state = exception_enter(); | ||
199 | struct vm_area_struct * vma; | 201 | struct vm_area_struct * vma; |
200 | struct mm_struct *mm = current->mm; | 202 | struct mm_struct *mm = current->mm; |
201 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | 203 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
@@ -204,6 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
204 | int trap = TRAP(regs); | 206 | int trap = TRAP(regs); |
205 | int is_exec = trap == 0x400; | 207 | int is_exec = trap == 0x400; |
206 | int fault; | 208 | int fault; |
209 | int rc = 0; | ||
207 | 210 | ||
208 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | 211 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) |
209 | /* | 212 | /* |
@@ -230,28 +233,30 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
230 | * look at it | 233 | * look at it |
231 | */ | 234 | */ |
232 | if (error_code & ICSWX_DSI_UCT) { | 235 | if (error_code & ICSWX_DSI_UCT) { |
233 | int rc = acop_handle_fault(regs, address, error_code); | 236 | rc = acop_handle_fault(regs, address, error_code); |
234 | if (rc) | 237 | if (rc) |
235 | return rc; | 238 | goto bail; |
236 | } | 239 | } |
237 | #endif /* CONFIG_PPC_ICSWX */ | 240 | #endif /* CONFIG_PPC_ICSWX */ |
238 | 241 | ||
239 | if (notify_page_fault(regs)) | 242 | if (notify_page_fault(regs)) |
240 | return 0; | 243 | goto bail; |
241 | 244 | ||
242 | if (unlikely(debugger_fault_handler(regs))) | 245 | if (unlikely(debugger_fault_handler(regs))) |
243 | return 0; | 246 | goto bail; |
244 | 247 | ||
245 | /* On a kernel SLB miss we can only check for a valid exception entry */ | 248 | /* On a kernel SLB miss we can only check for a valid exception entry */ |
246 | if (!user_mode(regs) && (address >= TASK_SIZE)) | 249 | if (!user_mode(regs) && (address >= TASK_SIZE)) { |
247 | return SIGSEGV; | 250 | rc = SIGSEGV; |
251 | goto bail; | ||
252 | } | ||
248 | 253 | ||
249 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ | 254 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ |
250 | defined(CONFIG_PPC_BOOK3S_64)) | 255 | defined(CONFIG_PPC_BOOK3S_64)) |
251 | if (error_code & DSISR_DABRMATCH) { | 256 | if (error_code & DSISR_DABRMATCH) { |
252 | /* breakpoint match */ | 257 | /* breakpoint match */ |
253 | do_break(regs, address, error_code); | 258 | do_break(regs, address, error_code); |
254 | return 0; | 259 | goto bail; |
255 | } | 260 | } |
256 | #endif | 261 | #endif |
257 | 262 | ||
@@ -260,8 +265,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
260 | local_irq_enable(); | 265 | local_irq_enable(); |
261 | 266 | ||
262 | if (in_atomic() || mm == NULL) { | 267 | if (in_atomic() || mm == NULL) { |
263 | if (!user_mode(regs)) | 268 | if (!user_mode(regs)) { |
264 | return SIGSEGV; | 269 | rc = SIGSEGV; |
270 | goto bail; | ||
271 | } | ||
265 | /* in_atomic() in user mode is really bad, | 272 | /* in_atomic() in user mode is really bad, |
266 | as is current->mm == NULL. */ | 273 | as is current->mm == NULL. */ |
267 | printk(KERN_EMERG "Page fault in user mode with " | 274 | printk(KERN_EMERG "Page fault in user mode with " |
@@ -417,9 +424,11 @@ good_area: | |||
417 | */ | 424 | */ |
418 | fault = handle_mm_fault(mm, vma, address, flags); | 425 | fault = handle_mm_fault(mm, vma, address, flags); |
419 | if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { | 426 | if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { |
420 | int rc = mm_fault_error(regs, address, fault); | 427 | rc = mm_fault_error(regs, address, fault); |
421 | if (rc >= MM_FAULT_RETURN) | 428 | if (rc >= MM_FAULT_RETURN) |
422 | return rc; | 429 | goto bail; |
430 | else | ||
431 | rc = 0; | ||
423 | } | 432 | } |
424 | 433 | ||
425 | /* | 434 | /* |
@@ -454,7 +463,7 @@ good_area: | |||
454 | } | 463 | } |
455 | 464 | ||
456 | up_read(&mm->mmap_sem); | 465 | up_read(&mm->mmap_sem); |
457 | return 0; | 466 | goto bail; |
458 | 467 | ||
459 | bad_area: | 468 | bad_area: |
460 | up_read(&mm->mmap_sem); | 469 | up_read(&mm->mmap_sem); |
@@ -463,7 +472,7 @@ bad_area_nosemaphore: | |||
463 | /* User mode accesses cause a SIGSEGV */ | 472 | /* User mode accesses cause a SIGSEGV */ |
464 | if (user_mode(regs)) { | 473 | if (user_mode(regs)) { |
465 | _exception(SIGSEGV, regs, code, address); | 474 | _exception(SIGSEGV, regs, code, address); |
466 | return 0; | 475 | goto bail; |
467 | } | 476 | } |
468 | 477 | ||
469 | if (is_exec && (error_code & DSISR_PROTFAULT)) | 478 | if (is_exec && (error_code & DSISR_PROTFAULT)) |
@@ -471,7 +480,11 @@ bad_area_nosemaphore: | |||
471 | " page (%lx) - exploit attempt? (uid: %d)\n", | 480 | " page (%lx) - exploit attempt? (uid: %d)\n", |
472 | address, from_kuid(&init_user_ns, current_uid())); | 481 | address, from_kuid(&init_user_ns, current_uid())); |
473 | 482 | ||
474 | return SIGSEGV; | 483 | rc = SIGSEGV; |
484 | |||
485 | bail: | ||
486 | exception_exit(prev_state); | ||
487 | return rc; | ||
475 | 488 | ||
476 | } | 489 | } |
477 | 490 | ||
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 88ac0eeaadde..e303a6d74e3a 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/signal.h> | 34 | #include <linux/signal.h> |
35 | #include <linux/memblock.h> | 35 | #include <linux/memblock.h> |
36 | #include <linux/context_tracking.h> | ||
36 | 37 | ||
37 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
38 | #include <asm/pgtable.h> | 39 | #include <asm/pgtable.h> |
@@ -954,6 +955,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access, | |||
954 | */ | 955 | */ |
955 | int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | 956 | int hash_page(unsigned long ea, unsigned long access, unsigned long trap) |
956 | { | 957 | { |
958 | enum ctx_state prev_state = exception_enter(); | ||
957 | pgd_t *pgdir; | 959 | pgd_t *pgdir; |
958 | unsigned long vsid; | 960 | unsigned long vsid; |
959 | struct mm_struct *mm; | 961 | struct mm_struct *mm; |
@@ -973,7 +975,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
973 | mm = current->mm; | 975 | mm = current->mm; |
974 | if (! mm) { | 976 | if (! mm) { |
975 | DBG_LOW(" user region with no mm !\n"); | 977 | DBG_LOW(" user region with no mm !\n"); |
976 | return 1; | 978 | rc = 1; |
979 | goto bail; | ||
977 | } | 980 | } |
978 | psize = get_slice_psize(mm, ea); | 981 | psize = get_slice_psize(mm, ea); |
979 | ssize = user_segment_size(ea); | 982 | ssize = user_segment_size(ea); |
@@ -992,19 +995,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
992 | /* Not a valid range | 995 | /* Not a valid range |
993 | * Send the problem up to do_page_fault | 996 | * Send the problem up to do_page_fault |
994 | */ | 997 | */ |
995 | return 1; | 998 | rc = 1; |
999 | goto bail; | ||
996 | } | 1000 | } |
997 | DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); | 1001 | DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); |
998 | 1002 | ||
999 | /* Bad address. */ | 1003 | /* Bad address. */ |
1000 | if (!vsid) { | 1004 | if (!vsid) { |
1001 | DBG_LOW("Bad address!\n"); | 1005 | DBG_LOW("Bad address!\n"); |
1002 | return 1; | 1006 | rc = 1; |
1007 | goto bail; | ||
1003 | } | 1008 | } |
1004 | /* Get pgdir */ | 1009 | /* Get pgdir */ |
1005 | pgdir = mm->pgd; | 1010 | pgdir = mm->pgd; |
1006 | if (pgdir == NULL) | 1011 | if (pgdir == NULL) { |
1007 | return 1; | 1012 | rc = 1; |
1013 | goto bail; | ||
1014 | } | ||
1008 | 1015 | ||
1009 | /* Check CPU locality */ | 1016 | /* Check CPU locality */ |
1010 | tmp = cpumask_of(smp_processor_id()); | 1017 | tmp = cpumask_of(smp_processor_id()); |
@@ -1027,7 +1034,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
1027 | ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); | 1034 | ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); |
1028 | if (ptep == NULL || !pte_present(*ptep)) { | 1035 | if (ptep == NULL || !pte_present(*ptep)) { |
1029 | DBG_LOW(" no PTE !\n"); | 1036 | DBG_LOW(" no PTE !\n"); |
1030 | return 1; | 1037 | rc = 1; |
1038 | goto bail; | ||
1031 | } | 1039 | } |
1032 | 1040 | ||
1033 | /* Add _PAGE_PRESENT to the required access perm */ | 1041 | /* Add _PAGE_PRESENT to the required access perm */ |
@@ -1038,13 +1046,16 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
1038 | */ | 1046 | */ |
1039 | if (access & ~pte_val(*ptep)) { | 1047 | if (access & ~pte_val(*ptep)) { |
1040 | DBG_LOW(" no access !\n"); | 1048 | DBG_LOW(" no access !\n"); |
1041 | return 1; | 1049 | rc = 1; |
1050 | goto bail; | ||
1042 | } | 1051 | } |
1043 | 1052 | ||
1044 | #ifdef CONFIG_HUGETLB_PAGE | 1053 | #ifdef CONFIG_HUGETLB_PAGE |
1045 | if (hugeshift) | 1054 | if (hugeshift) { |
1046 | return __hash_page_huge(ea, access, vsid, ptep, trap, local, | 1055 | rc = __hash_page_huge(ea, access, vsid, ptep, trap, local, |
1047 | ssize, hugeshift, psize); | 1056 | ssize, hugeshift, psize); |
1057 | goto bail; | ||
1058 | } | ||
1048 | #endif /* CONFIG_HUGETLB_PAGE */ | 1059 | #endif /* CONFIG_HUGETLB_PAGE */ |
1049 | 1060 | ||
1050 | #ifndef CONFIG_PPC_64K_PAGES | 1061 | #ifndef CONFIG_PPC_64K_PAGES |
@@ -1124,6 +1135,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
1124 | pte_val(*(ptep + PTRS_PER_PTE))); | 1135 | pte_val(*(ptep + PTRS_PER_PTE))); |
1125 | #endif | 1136 | #endif |
1126 | DBG_LOW(" -> rc=%d\n", rc); | 1137 | DBG_LOW(" -> rc=%d\n", rc); |
1138 | |||
1139 | bail: | ||
1140 | exception_exit(prev_state); | ||
1127 | return rc; | 1141 | return rc; |
1128 | } | 1142 | } |
1129 | EXPORT_SYMBOL_GPL(hash_page); | 1143 | EXPORT_SYMBOL_GPL(hash_page); |
@@ -1259,6 +1273,8 @@ void flush_hash_range(unsigned long number, int local) | |||
1259 | */ | 1273 | */ |
1260 | void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) | 1274 | void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) |
1261 | { | 1275 | { |
1276 | enum ctx_state prev_state = exception_enter(); | ||
1277 | |||
1262 | if (user_mode(regs)) { | 1278 | if (user_mode(regs)) { |
1263 | #ifdef CONFIG_PPC_SUBPAGE_PROT | 1279 | #ifdef CONFIG_PPC_SUBPAGE_PROT |
1264 | if (rc == -2) | 1280 | if (rc == -2) |
@@ -1268,6 +1284,8 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) | |||
1268 | _exception(SIGBUS, regs, BUS_ADRERR, address); | 1284 | _exception(SIGBUS, regs, BUS_ADRERR, address); |
1269 | } else | 1285 | } else |
1270 | bad_page_fault(regs, address, SIGBUS); | 1286 | bad_page_fault(regs, address, SIGBUS); |
1287 | |||
1288 | exception_exit(prev_state); | ||
1271 | } | 1289 | } |
1272 | 1290 | ||
1273 | long hpte_insert_repeating(unsigned long hash, unsigned long vpn, | 1291 | long hpte_insert_repeating(unsigned long hash, unsigned long vpn, |
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index c2787bf779ca..a90b9c458990 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -215,7 +215,8 @@ static void __meminit vmemmap_create_mapping(unsigned long start, | |||
215 | unsigned long phys) | 215 | unsigned long phys) |
216 | { | 216 | { |
217 | int mapped = htab_bolt_mapping(start, start + page_size, phys, | 217 | int mapped = htab_bolt_mapping(start, start + page_size, phys, |
218 | PAGE_KERNEL, mmu_vmemmap_psize, | 218 | pgprot_val(PAGE_KERNEL), |
219 | mmu_vmemmap_psize, | ||
219 | mmu_kernel_ssize); | 220 | mmu_kernel_ssize); |
220 | BUG_ON(mapped < 0); | 221 | BUG_ON(mapped < 0); |
221 | } | 222 | } |
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index c627843c5b2e..426180b84978 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -13,11 +13,13 @@ | |||
13 | #include <linux/perf_event.h> | 13 | #include <linux/perf_event.h> |
14 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
15 | #include <linux/hardirq.h> | 15 | #include <linux/hardirq.h> |
16 | #include <linux/uaccess.h> | ||
16 | #include <asm/reg.h> | 17 | #include <asm/reg.h> |
17 | #include <asm/pmc.h> | 18 | #include <asm/pmc.h> |
18 | #include <asm/machdep.h> | 19 | #include <asm/machdep.h> |
19 | #include <asm/firmware.h> | 20 | #include <asm/firmware.h> |
20 | #include <asm/ptrace.h> | 21 | #include <asm/ptrace.h> |
22 | #include <asm/code-patching.h> | ||
21 | 23 | ||
22 | #define BHRB_MAX_ENTRIES 32 | 24 | #define BHRB_MAX_ENTRIES 32 |
23 | #define BHRB_TARGET 0x0000000000000002 | 25 | #define BHRB_TARGET 0x0000000000000002 |
@@ -100,6 +102,10 @@ static inline int siar_valid(struct pt_regs *regs) | |||
100 | return 1; | 102 | return 1; |
101 | } | 103 | } |
102 | 104 | ||
105 | static inline void power_pmu_bhrb_enable(struct perf_event *event) {} | ||
106 | static inline void power_pmu_bhrb_disable(struct perf_event *event) {} | ||
107 | void power_pmu_flush_branch_stack(void) {} | ||
108 | static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} | ||
103 | #endif /* CONFIG_PPC32 */ | 109 | #endif /* CONFIG_PPC32 */ |
104 | 110 | ||
105 | static bool regs_use_siar(struct pt_regs *regs) | 111 | static bool regs_use_siar(struct pt_regs *regs) |
@@ -308,6 +314,159 @@ static inline int siar_valid(struct pt_regs *regs) | |||
308 | return 1; | 314 | return 1; |
309 | } | 315 | } |
310 | 316 | ||
317 | |||
318 | /* Reset all possible BHRB entries */ | ||
319 | static void power_pmu_bhrb_reset(void) | ||
320 | { | ||
321 | asm volatile(PPC_CLRBHRB); | ||
322 | } | ||
323 | |||
324 | static void power_pmu_bhrb_enable(struct perf_event *event) | ||
325 | { | ||
326 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
327 | |||
328 | if (!ppmu->bhrb_nr) | ||
329 | return; | ||
330 | |||
331 | /* Clear BHRB if we changed task context to avoid data leaks */ | ||
332 | if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { | ||
333 | power_pmu_bhrb_reset(); | ||
334 | cpuhw->bhrb_context = event->ctx; | ||
335 | } | ||
336 | cpuhw->bhrb_users++; | ||
337 | } | ||
338 | |||
339 | static void power_pmu_bhrb_disable(struct perf_event *event) | ||
340 | { | ||
341 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
342 | |||
343 | if (!ppmu->bhrb_nr) | ||
344 | return; | ||
345 | |||
346 | cpuhw->bhrb_users--; | ||
347 | WARN_ON_ONCE(cpuhw->bhrb_users < 0); | ||
348 | |||
349 | if (!cpuhw->disabled && !cpuhw->bhrb_users) { | ||
350 | /* BHRB cannot be turned off when other | ||
351 | * events are active on the PMU. | ||
352 | */ | ||
353 | |||
354 | /* avoid stale pointer */ | ||
355 | cpuhw->bhrb_context = NULL; | ||
356 | } | ||
357 | } | ||
358 | |||
359 | /* Called from ctxsw to prevent one process's branch entries to | ||
360 | * mingle with the other process's entries during context switch. | ||
361 | */ | ||
362 | void power_pmu_flush_branch_stack(void) | ||
363 | { | ||
364 | if (ppmu->bhrb_nr) | ||
365 | power_pmu_bhrb_reset(); | ||
366 | } | ||
367 | /* Calculate the to address for a branch */ | ||
368 | static __u64 power_pmu_bhrb_to(u64 addr) | ||
369 | { | ||
370 | unsigned int instr; | ||
371 | int ret; | ||
372 | __u64 target; | ||
373 | |||
374 | if (is_kernel_addr(addr)) | ||
375 | return branch_target((unsigned int *)addr); | ||
376 | |||
377 | /* Userspace: need copy instruction here then translate it */ | ||
378 | pagefault_disable(); | ||
379 | ret = __get_user_inatomic(instr, (unsigned int __user *)addr); | ||
380 | if (ret) { | ||
381 | pagefault_enable(); | ||
382 | return 0; | ||
383 | } | ||
384 | pagefault_enable(); | ||
385 | |||
386 | target = branch_target(&instr); | ||
387 | if ((!target) || (instr & BRANCH_ABSOLUTE)) | ||
388 | return target; | ||
389 | |||
390 | /* Translate relative branch target from kernel to user address */ | ||
391 | return target - (unsigned long)&instr + addr; | ||
392 | } | ||
393 | |||
394 | /* Processing BHRB entries */ | ||
395 | void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) | ||
396 | { | ||
397 | u64 val; | ||
398 | u64 addr; | ||
399 | int r_index, u_index, pred; | ||
400 | |||
401 | r_index = 0; | ||
402 | u_index = 0; | ||
403 | while (r_index < ppmu->bhrb_nr) { | ||
404 | /* Assembly read function */ | ||
405 | val = read_bhrb(r_index++); | ||
406 | if (!val) | ||
407 | /* Terminal marker: End of valid BHRB entries */ | ||
408 | break; | ||
409 | else { | ||
410 | addr = val & BHRB_EA; | ||
411 | pred = val & BHRB_PREDICTION; | ||
412 | |||
413 | if (!addr) | ||
414 | /* invalid entry */ | ||
415 | continue; | ||
416 | |||
417 | /* Branches are read most recent first (ie. mfbhrb 0 is | ||
418 | * the most recent branch). | ||
419 | * There are two types of valid entries: | ||
420 | * 1) a target entry which is the to address of a | ||
421 | * computed goto like a blr,bctr,btar. The next | ||
422 | * entry read from the bhrb will be branch | ||
423 | * corresponding to this target (ie. the actual | ||
424 | * blr/bctr/btar instruction). | ||
425 | * 2) a from address which is an actual branch. If a | ||
426 | * target entry proceeds this, then this is the | ||
427 | * matching branch for that target. If this is not | ||
428 | * following a target entry, then this is a branch | ||
429 | * where the target is given as an immediate field | ||
430 | * in the instruction (ie. an i or b form branch). | ||
431 | * In this case we need to read the instruction from | ||
432 | * memory to determine the target/to address. | ||
433 | */ | ||
434 | |||
435 | if (val & BHRB_TARGET) { | ||
436 | /* Target branches use two entries | ||
437 | * (ie. computed gotos/XL form) | ||
438 | */ | ||
439 | cpuhw->bhrb_entries[u_index].to = addr; | ||
440 | cpuhw->bhrb_entries[u_index].mispred = pred; | ||
441 | cpuhw->bhrb_entries[u_index].predicted = ~pred; | ||
442 | |||
443 | /* Get from address in next entry */ | ||
444 | val = read_bhrb(r_index++); | ||
445 | addr = val & BHRB_EA; | ||
446 | if (val & BHRB_TARGET) { | ||
447 | /* Shouldn't have two targets in a | ||
448 | row.. Reset index and try again */ | ||
449 | r_index--; | ||
450 | addr = 0; | ||
451 | } | ||
452 | cpuhw->bhrb_entries[u_index].from = addr; | ||
453 | } else { | ||
454 | /* Branches to immediate field | ||
455 | (ie I or B form) */ | ||
456 | cpuhw->bhrb_entries[u_index].from = addr; | ||
457 | cpuhw->bhrb_entries[u_index].to = | ||
458 | power_pmu_bhrb_to(addr); | ||
459 | cpuhw->bhrb_entries[u_index].mispred = pred; | ||
460 | cpuhw->bhrb_entries[u_index].predicted = ~pred; | ||
461 | } | ||
462 | u_index++; | ||
463 | |||
464 | } | ||
465 | } | ||
466 | cpuhw->bhrb_stack.nr = u_index; | ||
467 | return; | ||
468 | } | ||
469 | |||
311 | #endif /* CONFIG_PPC64 */ | 470 | #endif /* CONFIG_PPC64 */ |
312 | 471 | ||
313 | static void perf_event_interrupt(struct pt_regs *regs); | 472 | static void perf_event_interrupt(struct pt_regs *regs); |
@@ -904,47 +1063,6 @@ static int collect_events(struct perf_event *group, int max_count, | |||
904 | return n; | 1063 | return n; |
905 | } | 1064 | } |
906 | 1065 | ||
907 | /* Reset all possible BHRB entries */ | ||
908 | static void power_pmu_bhrb_reset(void) | ||
909 | { | ||
910 | asm volatile(PPC_CLRBHRB); | ||
911 | } | ||
912 | |||
913 | void power_pmu_bhrb_enable(struct perf_event *event) | ||
914 | { | ||
915 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
916 | |||
917 | if (!ppmu->bhrb_nr) | ||
918 | return; | ||
919 | |||
920 | /* Clear BHRB if we changed task context to avoid data leaks */ | ||
921 | if (event->ctx->task && cpuhw->bhrb_context != event->ctx) { | ||
922 | power_pmu_bhrb_reset(); | ||
923 | cpuhw->bhrb_context = event->ctx; | ||
924 | } | ||
925 | cpuhw->bhrb_users++; | ||
926 | } | ||
927 | |||
928 | void power_pmu_bhrb_disable(struct perf_event *event) | ||
929 | { | ||
930 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | ||
931 | |||
932 | if (!ppmu->bhrb_nr) | ||
933 | return; | ||
934 | |||
935 | cpuhw->bhrb_users--; | ||
936 | WARN_ON_ONCE(cpuhw->bhrb_users < 0); | ||
937 | |||
938 | if (!cpuhw->disabled && !cpuhw->bhrb_users) { | ||
939 | /* BHRB cannot be turned off when other | ||
940 | * events are active on the PMU. | ||
941 | */ | ||
942 | |||
943 | /* avoid stale pointer */ | ||
944 | cpuhw->bhrb_context = NULL; | ||
945 | } | ||
946 | } | ||
947 | |||
948 | /* | 1066 | /* |
949 | * Add a event to the PMU. | 1067 | * Add a event to the PMU. |
950 | * If all events are not already frozen, then we disable and | 1068 | * If all events are not already frozen, then we disable and |
@@ -1180,15 +1298,6 @@ int power_pmu_commit_txn(struct pmu *pmu) | |||
1180 | return 0; | 1298 | return 0; |
1181 | } | 1299 | } |
1182 | 1300 | ||
1183 | /* Called from ctxsw to prevent one process's branch entries to | ||
1184 | * mingle with the other process's entries during context switch. | ||
1185 | */ | ||
1186 | void power_pmu_flush_branch_stack(void) | ||
1187 | { | ||
1188 | if (ppmu->bhrb_nr) | ||
1189 | power_pmu_bhrb_reset(); | ||
1190 | } | ||
1191 | |||
1192 | /* | 1301 | /* |
1193 | * Return 1 if we might be able to put event on a limited PMC, | 1302 | * Return 1 if we might be able to put event on a limited PMC, |
1194 | * or 0 if not. | 1303 | * or 0 if not. |
@@ -1458,77 +1567,6 @@ struct pmu power_pmu = { | |||
1458 | .flush_branch_stack = power_pmu_flush_branch_stack, | 1567 | .flush_branch_stack = power_pmu_flush_branch_stack, |
1459 | }; | 1568 | }; |
1460 | 1569 | ||
1461 | /* Processing BHRB entries */ | ||
1462 | void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) | ||
1463 | { | ||
1464 | u64 val; | ||
1465 | u64 addr; | ||
1466 | int r_index, u_index, target, pred; | ||
1467 | |||
1468 | r_index = 0; | ||
1469 | u_index = 0; | ||
1470 | while (r_index < ppmu->bhrb_nr) { | ||
1471 | /* Assembly read function */ | ||
1472 | val = read_bhrb(r_index); | ||
1473 | |||
1474 | /* Terminal marker: End of valid BHRB entries */ | ||
1475 | if (val == 0) { | ||
1476 | break; | ||
1477 | } else { | ||
1478 | /* BHRB field break up */ | ||
1479 | addr = val & BHRB_EA; | ||
1480 | pred = val & BHRB_PREDICTION; | ||
1481 | target = val & BHRB_TARGET; | ||
1482 | |||
1483 | /* Probable Missed entry: Not applicable for POWER8 */ | ||
1484 | if ((addr == 0) && (target == 0) && (pred == 1)) { | ||
1485 | r_index++; | ||
1486 | continue; | ||
1487 | } | ||
1488 | |||
1489 | /* Real Missed entry: Power8 based missed entry */ | ||
1490 | if ((addr == 0) && (target == 1) && (pred == 1)) { | ||
1491 | r_index++; | ||
1492 | continue; | ||
1493 | } | ||
1494 | |||
1495 | /* Reserved condition: Not a valid entry */ | ||
1496 | if ((addr == 0) && (target == 1) && (pred == 0)) { | ||
1497 | r_index++; | ||
1498 | continue; | ||
1499 | } | ||
1500 | |||
1501 | /* Is a target address */ | ||
1502 | if (val & BHRB_TARGET) { | ||
1503 | /* First address cannot be a target address */ | ||
1504 | if (r_index == 0) { | ||
1505 | r_index++; | ||
1506 | continue; | ||
1507 | } | ||
1508 | |||
1509 | /* Update target address for the previous entry */ | ||
1510 | cpuhw->bhrb_entries[u_index - 1].to = addr; | ||
1511 | cpuhw->bhrb_entries[u_index - 1].mispred = pred; | ||
1512 | cpuhw->bhrb_entries[u_index - 1].predicted = ~pred; | ||
1513 | |||
1514 | /* Dont increment u_index */ | ||
1515 | r_index++; | ||
1516 | } else { | ||
1517 | /* Update address, flags for current entry */ | ||
1518 | cpuhw->bhrb_entries[u_index].from = addr; | ||
1519 | cpuhw->bhrb_entries[u_index].mispred = pred; | ||
1520 | cpuhw->bhrb_entries[u_index].predicted = ~pred; | ||
1521 | |||
1522 | /* Successfully popullated one entry */ | ||
1523 | u_index++; | ||
1524 | r_index++; | ||
1525 | } | ||
1526 | } | ||
1527 | } | ||
1528 | cpuhw->bhrb_stack.nr = u_index; | ||
1529 | return; | ||
1530 | } | ||
1531 | |||
1532 | /* | 1570 | /* |
1533 | * A counter has overflowed; update its count and record | 1571 | * A counter has overflowed; update its count and record |
1534 | * things if requested. Note that interrupts are hard-disabled | 1572 | * things if requested. Note that interrupts are hard-disabled |
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index a881232a3cce..b62aab3e22ec 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig | |||
@@ -128,7 +128,7 @@ config PPC_RTAS_DAEMON | |||
128 | 128 | ||
129 | config RTAS_PROC | 129 | config RTAS_PROC |
130 | bool "Proc interface to RTAS" | 130 | bool "Proc interface to RTAS" |
131 | depends on PPC_RTAS | 131 | depends on PPC_RTAS && PROC_FS |
132 | default y | 132 | default y |
133 | 133 | ||
134 | config RTAS_FLASH | 134 | config RTAS_FLASH |
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index ade4463226c6..628c564ceadb 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/of.h> | 15 | #include <linux/of.h> |
16 | #include <linux/of_platform.h> | 16 | #include <linux/of_platform.h> |
17 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
18 | #include <linux/slab.h> | ||
18 | #include <asm/opal.h> | 19 | #include <asm/opal.h> |
19 | #include <asm/firmware.h> | 20 | #include <asm/firmware.h> |
20 | 21 | ||
@@ -28,6 +29,8 @@ struct opal { | |||
28 | static struct device_node *opal_node; | 29 | static struct device_node *opal_node; |
29 | static DEFINE_SPINLOCK(opal_write_lock); | 30 | static DEFINE_SPINLOCK(opal_write_lock); |
30 | extern u64 opal_mc_secondary_handler[]; | 31 | extern u64 opal_mc_secondary_handler[]; |
32 | static unsigned int *opal_irqs; | ||
33 | static unsigned int opal_irq_count; | ||
31 | 34 | ||
32 | int __init early_init_dt_scan_opal(unsigned long node, | 35 | int __init early_init_dt_scan_opal(unsigned long node, |
33 | const char *uname, int depth, void *data) | 36 | const char *uname, int depth, void *data) |
@@ -53,7 +56,11 @@ int __init early_init_dt_scan_opal(unsigned long node, | |||
53 | opal.entry, entryp, entrysz); | 56 | opal.entry, entryp, entrysz); |
54 | 57 | ||
55 | powerpc_firmware_features |= FW_FEATURE_OPAL; | 58 | powerpc_firmware_features |= FW_FEATURE_OPAL; |
56 | if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { | 59 | if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) { |
60 | powerpc_firmware_features |= FW_FEATURE_OPALv2; | ||
61 | powerpc_firmware_features |= FW_FEATURE_OPALv3; | ||
62 | printk("OPAL V3 detected !\n"); | ||
63 | } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { | ||
57 | powerpc_firmware_features |= FW_FEATURE_OPALv2; | 64 | powerpc_firmware_features |= FW_FEATURE_OPALv2; |
58 | printk("OPAL V2 detected !\n"); | 65 | printk("OPAL V2 detected !\n"); |
59 | } else { | 66 | } else { |
@@ -144,6 +151,13 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len) | |||
144 | rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { | 151 | rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { |
145 | len = total_len; | 152 | len = total_len; |
146 | rc = opal_console_write(vtermno, &len, data); | 153 | rc = opal_console_write(vtermno, &len, data); |
154 | |||
155 | /* Closed or other error drop */ | ||
156 | if (rc != OPAL_SUCCESS && rc != OPAL_BUSY && | ||
157 | rc != OPAL_BUSY_EVENT) { | ||
158 | written = total_len; | ||
159 | break; | ||
160 | } | ||
147 | if (rc == OPAL_SUCCESS) { | 161 | if (rc == OPAL_SUCCESS) { |
148 | total_len -= len; | 162 | total_len -= len; |
149 | data += len; | 163 | data += len; |
@@ -316,6 +330,8 @@ static int __init opal_init(void) | |||
316 | irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); | 330 | irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); |
317 | pr_debug("opal: Found %d interrupts reserved for OPAL\n", | 331 | pr_debug("opal: Found %d interrupts reserved for OPAL\n", |
318 | irqs ? (irqlen / 4) : 0); | 332 | irqs ? (irqlen / 4) : 0); |
333 | opal_irq_count = irqlen / 4; | ||
334 | opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL); | ||
319 | for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) { | 335 | for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) { |
320 | unsigned int hwirq = be32_to_cpup(irqs); | 336 | unsigned int hwirq = be32_to_cpup(irqs); |
321 | unsigned int irq = irq_create_mapping(NULL, hwirq); | 337 | unsigned int irq = irq_create_mapping(NULL, hwirq); |
@@ -327,7 +343,19 @@ static int __init opal_init(void) | |||
327 | if (rc) | 343 | if (rc) |
328 | pr_warning("opal: Error %d requesting irq %d" | 344 | pr_warning("opal: Error %d requesting irq %d" |
329 | " (0x%x)\n", rc, irq, hwirq); | 345 | " (0x%x)\n", rc, irq, hwirq); |
346 | opal_irqs[i] = irq; | ||
330 | } | 347 | } |
331 | return 0; | 348 | return 0; |
332 | } | 349 | } |
333 | subsys_initcall(opal_init); | 350 | subsys_initcall(opal_init); |
351 | |||
352 | void opal_shutdown(void) | ||
353 | { | ||
354 | unsigned int i; | ||
355 | |||
356 | for (i = 0; i < opal_irq_count; i++) { | ||
357 | if (opal_irqs[i]) | ||
358 | free_irq(opal_irqs[i], 0); | ||
359 | opal_irqs[i] = 0; | ||
360 | } | ||
361 | } | ||
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 1da578b7c1bf..3937aaae5bc4 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c | |||
@@ -1048,6 +1048,12 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus, | |||
1048 | return phb->ioda.pe_rmap[(bus->number << 8) | devfn]; | 1048 | return phb->ioda.pe_rmap[(bus->number << 8) | devfn]; |
1049 | } | 1049 | } |
1050 | 1050 | ||
1051 | static void pnv_pci_ioda_shutdown(struct pnv_phb *phb) | ||
1052 | { | ||
1053 | opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET, | ||
1054 | OPAL_ASSERT_RESET); | ||
1055 | } | ||
1056 | |||
1051 | void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) | 1057 | void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) |
1052 | { | 1058 | { |
1053 | struct pci_controller *hose; | 1059 | struct pci_controller *hose; |
@@ -1178,6 +1184,9 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) | |||
1178 | /* Setup TCEs */ | 1184 | /* Setup TCEs */ |
1179 | phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; | 1185 | phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; |
1180 | 1186 | ||
1187 | /* Setup shutdown function for kexec */ | ||
1188 | phb->shutdown = pnv_pci_ioda_shutdown; | ||
1189 | |||
1181 | /* Setup MSI support */ | 1190 | /* Setup MSI support */ |
1182 | pnv_pci_init_ioda_msis(phb); | 1191 | pnv_pci_init_ioda_msis(phb); |
1183 | 1192 | ||
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 55dfca844ddf..163bd7422f1c 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -450,6 +450,18 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev) | |||
450 | pnv_pci_dma_fallback_setup(hose, pdev); | 450 | pnv_pci_dma_fallback_setup(hose, pdev); |
451 | } | 451 | } |
452 | 452 | ||
453 | void pnv_pci_shutdown(void) | ||
454 | { | ||
455 | struct pci_controller *hose; | ||
456 | |||
457 | list_for_each_entry(hose, &hose_list, list_node) { | ||
458 | struct pnv_phb *phb = hose->private_data; | ||
459 | |||
460 | if (phb && phb->shutdown) | ||
461 | phb->shutdown(phb); | ||
462 | } | ||
463 | } | ||
464 | |||
453 | /* Fixup wrong class code in p7ioc and p8 root complex */ | 465 | /* Fixup wrong class code in p7ioc and p8 root complex */ |
454 | static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) | 466 | static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) |
455 | { | 467 | { |
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 48dc4bb856a1..25d76c4df50b 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
@@ -86,6 +86,7 @@ struct pnv_phb { | |||
86 | void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); | 86 | void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); |
87 | void (*fixup_phb)(struct pci_controller *hose); | 87 | void (*fixup_phb)(struct pci_controller *hose); |
88 | u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); | 88 | u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); |
89 | void (*shutdown)(struct pnv_phb *phb); | ||
89 | 90 | ||
90 | union { | 91 | union { |
91 | struct { | 92 | struct { |
@@ -158,4 +159,5 @@ extern void pnv_pci_init_ioda_hub(struct device_node *np); | |||
158 | extern void pnv_pci_init_ioda2_phb(struct device_node *np); | 159 | extern void pnv_pci_init_ioda2_phb(struct device_node *np); |
159 | extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, | 160 | extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, |
160 | u64 *startp, u64 *endp); | 161 | u64 *startp, u64 *endp); |
162 | |||
161 | #endif /* __POWERNV_PCI_H */ | 163 | #endif /* __POWERNV_PCI_H */ |
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h index 8a9df7f9667e..a1c6f83fc391 100644 --- a/arch/powerpc/platforms/powernv/powernv.h +++ b/arch/powerpc/platforms/powernv/powernv.h | |||
@@ -9,8 +9,10 @@ static inline void pnv_smp_init(void) { } | |||
9 | 9 | ||
10 | #ifdef CONFIG_PCI | 10 | #ifdef CONFIG_PCI |
11 | extern void pnv_pci_init(void); | 11 | extern void pnv_pci_init(void); |
12 | extern void pnv_pci_shutdown(void); | ||
12 | #else | 13 | #else |
13 | static inline void pnv_pci_init(void) { } | 14 | static inline void pnv_pci_init(void) { } |
15 | static inline void pnv_pci_shutdown(void) { } | ||
14 | #endif | 16 | #endif |
15 | 17 | ||
16 | #endif /* _POWERNV_H */ | 18 | #endif /* _POWERNV_H */ |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index db1ad1c8f68f..d4459bfc92f7 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
@@ -78,7 +78,9 @@ static void pnv_show_cpuinfo(struct seq_file *m) | |||
78 | if (root) | 78 | if (root) |
79 | model = of_get_property(root, "model", NULL); | 79 | model = of_get_property(root, "model", NULL); |
80 | seq_printf(m, "machine\t\t: PowerNV %s\n", model); | 80 | seq_printf(m, "machine\t\t: PowerNV %s\n", model); |
81 | if (firmware_has_feature(FW_FEATURE_OPALv2)) | 81 | if (firmware_has_feature(FW_FEATURE_OPALv3)) |
82 | seq_printf(m, "firmware\t: OPAL v3\n"); | ||
83 | else if (firmware_has_feature(FW_FEATURE_OPALv2)) | ||
82 | seq_printf(m, "firmware\t: OPAL v2\n"); | 84 | seq_printf(m, "firmware\t: OPAL v2\n"); |
83 | else if (firmware_has_feature(FW_FEATURE_OPAL)) | 85 | else if (firmware_has_feature(FW_FEATURE_OPAL)) |
84 | seq_printf(m, "firmware\t: OPAL v1\n"); | 86 | seq_printf(m, "firmware\t: OPAL v1\n"); |
@@ -126,6 +128,17 @@ static void pnv_progress(char *s, unsigned short hex) | |||
126 | { | 128 | { |
127 | } | 129 | } |
128 | 130 | ||
131 | static void pnv_shutdown(void) | ||
132 | { | ||
133 | /* Let the PCI code clear up IODA tables */ | ||
134 | pnv_pci_shutdown(); | ||
135 | |||
136 | /* And unregister all OPAL interrupts so they don't fire | ||
137 | * up while we kexec | ||
138 | */ | ||
139 | opal_shutdown(); | ||
140 | } | ||
141 | |||
129 | #ifdef CONFIG_KEXEC | 142 | #ifdef CONFIG_KEXEC |
130 | static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) | 143 | static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) |
131 | { | 144 | { |
@@ -187,6 +200,7 @@ define_machine(powernv) { | |||
187 | .init_IRQ = pnv_init_IRQ, | 200 | .init_IRQ = pnv_init_IRQ, |
188 | .show_cpuinfo = pnv_show_cpuinfo, | 201 | .show_cpuinfo = pnv_show_cpuinfo, |
189 | .progress = pnv_progress, | 202 | .progress = pnv_progress, |
203 | .machine_shutdown = pnv_shutdown, | ||
190 | .power_save = power7_idle, | 204 | .power_save = power7_idle, |
191 | .calibrate_decr = generic_calibrate_decr, | 205 | .calibrate_decr = generic_calibrate_decr, |
192 | #ifdef CONFIG_KEXEC | 206 | #ifdef CONFIG_KEXEC |
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 6a3ecca5b725..88c9459c3e07 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c | |||
@@ -71,18 +71,68 @@ int pnv_smp_kick_cpu(int nr) | |||
71 | 71 | ||
72 | BUG_ON(nr < 0 || nr >= NR_CPUS); | 72 | BUG_ON(nr < 0 || nr >= NR_CPUS); |
73 | 73 | ||
74 | /* On OPAL v2 the CPU are still spinning inside OPAL itself, | 74 | /* |
75 | * get them back now | 75 | * If we already started or OPALv2 is not supported, we just |
76 | * kick the CPU via the PACA | ||
76 | */ | 77 | */ |
77 | if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) { | 78 | if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2)) |
78 | pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu); | 79 | goto kick; |
79 | rc = opal_start_cpu(pcpu, start_here); | 80 | |
81 | /* | ||
82 | * At this point, the CPU can either be spinning on the way in | ||
83 | * from kexec or be inside OPAL waiting to be started for the | ||
84 | * first time. OPAL v3 allows us to query OPAL to know if it | ||
85 | * has the CPUs, so we do that | ||
86 | */ | ||
87 | if (firmware_has_feature(FW_FEATURE_OPALv3)) { | ||
88 | uint8_t status; | ||
89 | |||
90 | rc = opal_query_cpu_status(pcpu, &status); | ||
80 | if (rc != OPAL_SUCCESS) { | 91 | if (rc != OPAL_SUCCESS) { |
81 | pr_warn("OPAL Error %ld starting CPU %d\n", | 92 | pr_warn("OPAL Error %ld querying CPU %d state\n", |
82 | rc, nr); | 93 | rc, nr); |
83 | return -ENODEV; | 94 | return -ENODEV; |
84 | } | 95 | } |
96 | |||
97 | /* | ||
98 | * Already started, just kick it, probably coming from | ||
99 | * kexec and spinning | ||
100 | */ | ||
101 | if (status == OPAL_THREAD_STARTED) | ||
102 | goto kick; | ||
103 | |||
104 | /* | ||
105 | * Available/inactive, let's kick it | ||
106 | */ | ||
107 | if (status == OPAL_THREAD_INACTIVE) { | ||
108 | pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", | ||
109 | nr, pcpu); | ||
110 | rc = opal_start_cpu(pcpu, start_here); | ||
111 | if (rc != OPAL_SUCCESS) { | ||
112 | pr_warn("OPAL Error %ld starting CPU %d\n", | ||
113 | rc, nr); | ||
114 | return -ENODEV; | ||
115 | } | ||
116 | } else { | ||
117 | /* | ||
118 | * An unavailable CPU (or any other unknown status) | ||
119 | * shouldn't be started. It should also | ||
120 | * not be in the possible map but currently it can | ||
121 | * happen | ||
122 | */ | ||
123 | pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable" | ||
124 | " (status %d)...\n", nr, pcpu, status); | ||
125 | return -ENODEV; | ||
126 | } | ||
127 | } else { | ||
128 | /* | ||
129 | * On OPAL v2, we just kick it and hope for the best, | ||
130 | * we must not test the error from opal_start_cpu() or | ||
131 | * we would fail to get CPUs from kexec. | ||
132 | */ | ||
133 | opal_start_cpu(pcpu, start_here); | ||
85 | } | 134 | } |
135 | kick: | ||
86 | return smp_generic_kick_cpu(nr); | 136 | return smp_generic_kick_cpu(nr); |
87 | } | 137 | } |
88 | 138 | ||
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 9a0941bc4d31..023b288f895b 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
@@ -18,6 +18,7 @@ config PPC_PSERIES | |||
18 | select PPC_PCI_CHOICE if EXPERT | 18 | select PPC_PCI_CHOICE if EXPERT |
19 | select ZLIB_DEFLATE | 19 | select ZLIB_DEFLATE |
20 | select PPC_DOORBELL | 20 | select PPC_DOORBELL |
21 | select HAVE_CONTEXT_TRACKING | ||
21 | default y | 22 | default y |
22 | 23 | ||
23 | config PPC_SPLPAR | 24 | config PPC_SPLPAR |
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index 47226e04126d..5f997e79d570 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c | |||
@@ -16,6 +16,7 @@ | |||
16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 16 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/cpu.h> | ||
19 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
20 | #include <linux/suspend.h> | 21 | #include <linux/suspend.h> |
21 | #include <linux/stat.h> | 22 | #include <linux/stat.h> |
@@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev, | |||
126 | struct device_attribute *attr, | 127 | struct device_attribute *attr, |
127 | const char *buf, size_t count) | 128 | const char *buf, size_t count) |
128 | { | 129 | { |
130 | cpumask_var_t offline_mask; | ||
129 | int rc; | 131 | int rc; |
130 | 132 | ||
131 | if (!capable(CAP_SYS_ADMIN)) | 133 | if (!capable(CAP_SYS_ADMIN)) |
132 | return -EPERM; | 134 | return -EPERM; |
133 | 135 | ||
136 | if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) | ||
137 | return -ENOMEM; | ||
138 | |||
134 | stream_id = simple_strtoul(buf, NULL, 16); | 139 | stream_id = simple_strtoul(buf, NULL, 16); |
135 | 140 | ||
136 | do { | 141 | do { |
@@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev, | |||
140 | } while (rc == -EAGAIN); | 145 | } while (rc == -EAGAIN); |
141 | 146 | ||
142 | if (!rc) { | 147 | if (!rc) { |
148 | /* All present CPUs must be online */ | ||
149 | cpumask_andnot(offline_mask, cpu_present_mask, | ||
150 | cpu_online_mask); | ||
151 | rc = rtas_online_cpus_mask(offline_mask); | ||
152 | if (rc) { | ||
153 | pr_err("%s: Could not bring present CPUs online.\n", | ||
154 | __func__); | ||
155 | goto out; | ||
156 | } | ||
157 | |||
143 | stop_topology_update(); | 158 | stop_topology_update(); |
144 | rc = pm_suspend(PM_SUSPEND_MEM); | 159 | rc = pm_suspend(PM_SUSPEND_MEM); |
145 | start_topology_update(); | 160 | start_topology_update(); |
161 | |||
162 | /* Take down CPUs not online prior to suspend */ | ||
163 | if (!rtas_offline_cpus_mask(offline_mask)) | ||
164 | pr_warn("%s: Could not restore CPUs to offline " | ||
165 | "state.\n", __func__); | ||
146 | } | 166 | } |
147 | 167 | ||
148 | stream_id = 0; | 168 | stream_id = 0; |
149 | 169 | ||
150 | if (!rc) | 170 | if (!rc) |
151 | rc = count; | 171 | rc = count; |
172 | out: | ||
173 | free_cpumask_var(offline_mask); | ||
152 | return rc; | 174 | return rc; |
153 | } | 175 | } |
154 | 176 | ||
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c index 97fe82ee8633..2d3b1dd9571d 100644 --- a/arch/powerpc/platforms/wsp/ics.c +++ b/arch/powerpc/platforms/wsp/ics.c | |||
@@ -361,7 +361,7 @@ static int wsp_chip_set_affinity(struct irq_data *d, | |||
361 | xive = xive_set_server(xive, get_irq_server(ics, hw_irq)); | 361 | xive = xive_set_server(xive, get_irq_server(ics, hw_irq)); |
362 | wsp_ics_set_xive(ics, hw_irq, xive); | 362 | wsp_ics_set_xive(ics, hw_irq, xive); |
363 | 363 | ||
364 | return 0; | 364 | return IRQ_SET_MASK_OK; |
365 | } | 365 | } |
366 | 366 | ||
367 | static struct irq_chip wsp_irq_chip = { | 367 | static struct irq_chip wsp_irq_chip = { |
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index b0a518e97599..99464a7bdb3b 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile | |||
@@ -64,6 +64,8 @@ endif | |||
64 | 64 | ||
65 | obj-$(CONFIG_PPC_SCOM) += scom.o | 65 | obj-$(CONFIG_PPC_SCOM) += scom.o |
66 | 66 | ||
67 | obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o | ||
68 | |||
67 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | 69 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror |
68 | 70 | ||
69 | obj-$(CONFIG_PPC_XICS) += xics/ | 71 | obj-$(CONFIG_PPC_XICS) += xics/ |
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c index 6e0e1005227f..9cd0e60716fe 100644 --- a/arch/powerpc/sysdev/ehv_pic.c +++ b/arch/powerpc/sysdev/ehv_pic.c | |||
@@ -81,7 +81,7 @@ int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest, | |||
81 | ev_int_set_config(src, config, prio, cpuid); | 81 | ev_int_set_config(src, config, prio, cpuid); |
82 | spin_unlock_irqrestore(&ehv_pic_lock, flags); | 82 | spin_unlock_irqrestore(&ehv_pic_lock, flags); |
83 | 83 | ||
84 | return 0; | 84 | return IRQ_SET_MASK_OK; |
85 | } | 85 | } |
86 | 86 | ||
87 | static unsigned int ehv_pic_type_to_vecpri(unsigned int type) | 87 | static unsigned int ehv_pic_type_to_vecpri(unsigned int type) |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index ee21b5e71aec..0a13ecb270c7 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -836,7 +836,7 @@ int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, | |||
836 | mpic_physmask(mask)); | 836 | mpic_physmask(mask)); |
837 | } | 837 | } |
838 | 838 | ||
839 | return 0; | 839 | return IRQ_SET_MASK_OK; |
840 | } | 840 | } |
841 | 841 | ||
842 | static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) | 842 | static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) |
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c new file mode 100644 index 000000000000..ce5a7b489e4b --- /dev/null +++ b/arch/powerpc/sysdev/udbg_memcons.c | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * A udbg backend which logs messages and reads input from in memory | ||
3 | * buffers. | ||
4 | * | ||
5 | * The console output can be read from memcons_output which is a | ||
6 | * circular buffer whose next write position is stored in memcons.output_pos. | ||
7 | * | ||
8 | * Input may be passed by writing into the memcons_input buffer when it is | ||
9 | * empty. The input buffer is empty when both input_pos == input_start and | ||
10 | * *input_start == '\0'. | ||
11 | * | ||
12 | * Copyright (C) 2003-2005 Anton Blanchard and Milton Miller, IBM Corp | ||
13 | * Copyright (C) 2013 Alistair Popple, IBM Corp | ||
14 | * | ||
15 | * This program is free software; you can redistribute it and/or | ||
16 | * modify it under the terms of the GNU General Public License | ||
17 | * as published by the Free Software Foundation; either version | ||
18 | * 2 of the License, or (at your option) any later version. | ||
19 | */ | ||
20 | |||
21 | #include <linux/init.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <asm/barrier.h> | ||
24 | #include <asm/page.h> | ||
25 | #include <asm/processor.h> | ||
26 | #include <asm/udbg.h> | ||
27 | |||
28 | struct memcons { | ||
29 | char *output_start; | ||
30 | char *output_pos; | ||
31 | char *output_end; | ||
32 | char *input_start; | ||
33 | char *input_pos; | ||
34 | char *input_end; | ||
35 | }; | ||
36 | |||
37 | static char memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE]; | ||
38 | static char memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE]; | ||
39 | |||
40 | struct memcons memcons = { | ||
41 | .output_start = memcons_output, | ||
42 | .output_pos = memcons_output, | ||
43 | .output_end = &memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE], | ||
44 | .input_start = memcons_input, | ||
45 | .input_pos = memcons_input, | ||
46 | .input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE], | ||
47 | }; | ||
48 | |||
49 | void memcons_putc(char c) | ||
50 | { | ||
51 | char *new_output_pos; | ||
52 | |||
53 | *memcons.output_pos = c; | ||
54 | wmb(); | ||
55 | new_output_pos = memcons.output_pos + 1; | ||
56 | if (new_output_pos >= memcons.output_end) | ||
57 | new_output_pos = memcons.output_start; | ||
58 | |||
59 | memcons.output_pos = new_output_pos; | ||
60 | } | ||
61 | |||
62 | int memcons_getc_poll(void) | ||
63 | { | ||
64 | char c; | ||
65 | char *new_input_pos; | ||
66 | |||
67 | if (*memcons.input_pos) { | ||
68 | c = *memcons.input_pos; | ||
69 | |||
70 | new_input_pos = memcons.input_pos + 1; | ||
71 | if (new_input_pos >= memcons.input_end) | ||
72 | new_input_pos = memcons.input_start; | ||
73 | else if (*new_input_pos == '\0') | ||
74 | new_input_pos = memcons.input_start; | ||
75 | |||
76 | *memcons.input_pos = '\0'; | ||
77 | wmb(); | ||
78 | memcons.input_pos = new_input_pos; | ||
79 | return c; | ||
80 | } | ||
81 | |||
82 | return -1; | ||
83 | } | ||
84 | |||
85 | int memcons_getc(void) | ||
86 | { | ||
87 | int c; | ||
88 | |||
89 | while (1) { | ||
90 | c = memcons_getc_poll(); | ||
91 | if (c == -1) | ||
92 | cpu_relax(); | ||
93 | else | ||
94 | break; | ||
95 | } | ||
96 | |||
97 | return c; | ||
98 | } | ||
99 | |||
100 | void udbg_init_memcons(void) | ||
101 | { | ||
102 | udbg_putc = memcons_putc; | ||
103 | udbg_getc = memcons_getc; | ||
104 | udbg_getc_poll = memcons_getc_poll; | ||
105 | } | ||
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c index f7e8609df0d5..39d72212655e 100644 --- a/arch/powerpc/sysdev/xics/ics-opal.c +++ b/arch/powerpc/sysdev/xics/ics-opal.c | |||
@@ -148,7 +148,7 @@ static int ics_opal_set_affinity(struct irq_data *d, | |||
148 | __func__, d->irq, hw_irq, server, rc); | 148 | __func__, d->irq, hw_irq, server, rc); |
149 | return -1; | 149 | return -1; |
150 | } | 150 | } |
151 | return 0; | 151 | return IRQ_SET_MASK_OK; |
152 | } | 152 | } |
153 | 153 | ||
154 | static struct irq_chip ics_opal_irq_chip = { | 154 | static struct irq_chip ics_opal_irq_chip = { |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6a154a91c7e7..685692c94f05 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -108,7 +108,6 @@ config X86 | |||
108 | select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) | 108 | select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) |
109 | select GENERIC_TIME_VSYSCALL if X86_64 | 109 | select GENERIC_TIME_VSYSCALL if X86_64 |
110 | select KTIME_SCALAR if X86_32 | 110 | select KTIME_SCALAR if X86_32 |
111 | select ALWAYS_USE_PERSISTENT_CLOCK | ||
112 | select GENERIC_STRNCPY_FROM_USER | 111 | select GENERIC_STRNCPY_FROM_USER |
113 | select GENERIC_STRNLEN_USER | 112 | select GENERIC_STRNLEN_USER |
114 | select HAVE_CONTEXT_TRACKING if X86_64 | 113 | select HAVE_CONTEXT_TRACKING if X86_64 |
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index dab95a85f7f8..55b67614ed94 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -34,7 +34,7 @@ | |||
34 | extern pgd_t early_level4_pgt[PTRS_PER_PGD]; | 34 | extern pgd_t early_level4_pgt[PTRS_PER_PGD]; |
35 | extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; | 35 | extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; |
36 | static unsigned int __initdata next_early_pgt = 2; | 36 | static unsigned int __initdata next_early_pgt = 2; |
37 | pmdval_t __initdata early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); | 37 | pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); |
38 | 38 | ||
39 | /* Wipe all early page tables except for the kernel symbol map */ | 39 | /* Wipe all early page tables except for the kernel symbol map */ |
40 | static void __init reset_early_page_tables(void) | 40 | static void __init reset_early_page_tables(void) |
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c index d893e8ed8ac9..2e9e12871c2b 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/microcode_intel_early.c | |||
@@ -487,6 +487,7 @@ static inline void show_saved_mc(void) | |||
487 | #endif | 487 | #endif |
488 | 488 | ||
489 | #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) | 489 | #if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) |
490 | static DEFINE_MUTEX(x86_cpu_microcode_mutex); | ||
490 | /* | 491 | /* |
491 | * Save this mc into mc_saved_data. So it will be loaded early when a CPU is | 492 | * Save this mc into mc_saved_data. So it will be loaded early when a CPU is |
492 | * hot added or resumes. | 493 | * hot added or resumes. |
@@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc) | |||
507 | * Hold hotplug lock so mc_saved_data is not accessed by a CPU in | 508 | * Hold hotplug lock so mc_saved_data is not accessed by a CPU in |
508 | * hotplug. | 509 | * hotplug. |
509 | */ | 510 | */ |
510 | cpu_hotplug_driver_lock(); | 511 | mutex_lock(&x86_cpu_microcode_mutex); |
511 | 512 | ||
512 | mc_saved_count_init = mc_saved_data.mc_saved_count; | 513 | mc_saved_count_init = mc_saved_data.mc_saved_count; |
513 | mc_saved_count = mc_saved_data.mc_saved_count; | 514 | mc_saved_count = mc_saved_data.mc_saved_count; |
@@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc) | |||
544 | } | 545 | } |
545 | 546 | ||
546 | out: | 547 | out: |
547 | cpu_hotplug_driver_unlock(); | 548 | mutex_unlock(&x86_cpu_microcode_mutex); |
548 | 549 | ||
549 | return ret; | 550 | return ret; |
550 | } | 551 | } |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 607af0d4d5ef..4e7a37ff03ab 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -312,6 +312,8 @@ void arch_cpu_idle(void) | |||
312 | { | 312 | { |
313 | if (cpuidle_idle_call()) | 313 | if (cpuidle_idle_call()) |
314 | x86_idle(); | 314 | x86_idle(); |
315 | else | ||
316 | local_irq_enable(); | ||
315 | } | 317 | } |
316 | 318 | ||
317 | /* | 319 | /* |
@@ -368,9 +370,6 @@ void amd_e400_remove_cpu(int cpu) | |||
368 | */ | 370 | */ |
369 | static void amd_e400_idle(void) | 371 | static void amd_e400_idle(void) |
370 | { | 372 | { |
371 | if (need_resched()) | ||
372 | return; | ||
373 | |||
374 | if (!amd_e400_c1e_detected) { | 373 | if (!amd_e400_c1e_detected) { |
375 | u32 lo, hi; | 374 | u32 lo, hi; |
376 | 375 | ||
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index fdc5dca14fb3..eaac1743def7 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -359,7 +359,17 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
359 | } | 359 | } |
360 | 360 | ||
361 | /* | 361 | /* |
362 | * would have hole in the middle or ends, and only ram parts will be mapped. | 362 | * We need to iterate through the E820 memory map and create direct mappings |
363 | * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply | ||
364 | * create direct mappings for all pfns from [0 to max_low_pfn) and | ||
365 | * [4GB to max_pfn) because of possible memory holes in high addresses | ||
366 | * that cannot be marked as UC by fixed/variable range MTRRs. | ||
367 | * Depending on the alignment of E820 ranges, this may possibly result | ||
368 | * in using smaller size (i.e. 4K instead of 2M or 1G) page tables. | ||
369 | * | ||
370 | * init_mem_mapping() calls init_range_memory_mapping() with big range. | ||
371 | * That range would have hole in the middle or ends, and only ram parts | ||
372 | * will be mapped in init_range_memory_mapping(). | ||
363 | */ | 373 | */ |
364 | static unsigned long __init init_range_memory_mapping( | 374 | static unsigned long __init init_range_memory_mapping( |
365 | unsigned long r_start, | 375 | unsigned long r_start, |
@@ -419,6 +429,13 @@ void __init init_mem_mapping(void) | |||
419 | max_pfn_mapped = 0; /* will get exact value next */ | 429 | max_pfn_mapped = 0; /* will get exact value next */ |
420 | min_pfn_mapped = real_end >> PAGE_SHIFT; | 430 | min_pfn_mapped = real_end >> PAGE_SHIFT; |
421 | last_start = start = real_end; | 431 | last_start = start = real_end; |
432 | |||
433 | /* | ||
434 | * We start from the top (end of memory) and go to the bottom. | ||
435 | * The memblock_find_in_range() gets us a block of RAM from the | ||
436 | * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages | ||
437 | * for page table. | ||
438 | */ | ||
422 | while (last_start > ISA_END_ADDRESS) { | 439 | while (last_start > ISA_END_ADDRESS) { |
423 | if (last_start > step_size) { | 440 | if (last_start > step_size) { |
424 | start = round_down(last_start - 1, step_size); | 441 | start = round_down(last_start - 1, step_size); |