diff options
author | Ingo Molnar <mingo@kernel.org> | 2014-01-16 03:33:30 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-01-16 03:33:30 -0500 |
commit | 860fc2f2640ec348b9520ca4649b1bfd23d91bc2 (patch) | |
tree | 73d90d6ef86893c89bb70e78a2b63295d531f371 /arch | |
parent | 197749981e539c1eb5863f417de6dd4e2c02b76c (diff) | |
parent | bee09ed91cacdbffdbcd3b05de8409c77ec9fcd6 (diff) |
Merge branch 'perf/urgent' into perf/core
Pick up the latest fixes, refresh the development tree.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
96 files changed, 601 insertions, 379 deletions
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h index 68125dd766c6..39e58d1cdf90 100644 --- a/arch/arc/include/uapi/asm/unistd.h +++ b/arch/arc/include/uapi/asm/unistd.h | |||
@@ -8,7 +8,11 @@ | |||
8 | 8 | ||
9 | /******** no-legacy-syscalls-ABI *******/ | 9 | /******** no-legacy-syscalls-ABI *******/ |
10 | 10 | ||
11 | #ifndef _UAPI_ASM_ARC_UNISTD_H | 11 | /* |
12 | * Non-typical guard macro to enable inclusion twice in ARCH sys.c | ||
13 | * That is how the Generic syscall wrapper generator works | ||
14 | */ | ||
15 | #if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL) | ||
12 | #define _UAPI_ASM_ARC_UNISTD_H | 16 | #define _UAPI_ASM_ARC_UNISTD_H |
13 | 17 | ||
14 | #define __ARCH_WANT_SYS_EXECVE | 18 | #define __ARCH_WANT_SYS_EXECVE |
@@ -36,4 +40,6 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls) | |||
36 | #define __NR_sysfs (__NR_arch_specific_syscall + 3) | 40 | #define __NR_sysfs (__NR_arch_specific_syscall + 3) |
37 | __SYSCALL(__NR_sysfs, sys_sysfs) | 41 | __SYSCALL(__NR_sysfs, sys_sysfs) |
38 | 42 | ||
43 | #undef __SYSCALL | ||
44 | |||
39 | #endif | 45 | #endif |
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 9db5047812f3..177becde7a26 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi | |||
@@ -559,7 +559,7 @@ | |||
559 | compatible = "arm,pl330", "arm,primecell"; | 559 | compatible = "arm,pl330", "arm,primecell"; |
560 | reg = <0x10800000 0x1000>; | 560 | reg = <0x10800000 0x1000>; |
561 | interrupts = <0 33 0>; | 561 | interrupts = <0 33 0>; |
562 | clocks = <&clock 271>; | 562 | clocks = <&clock 346>; |
563 | clock-names = "apb_pclk"; | 563 | clock-names = "apb_pclk"; |
564 | #dma-cells = <1>; | 564 | #dma-cells = <1>; |
565 | #dma-channels = <8>; | 565 | #dma-channels = <8>; |
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi index ee845fad939b..9987dd0e9c59 100644 --- a/arch/arm/boot/dts/r8a7790.dtsi +++ b/arch/arm/boot/dts/r8a7790.dtsi | |||
@@ -87,9 +87,9 @@ | |||
87 | interrupts = <1 9 0xf04>; | 87 | interrupts = <1 9 0xf04>; |
88 | }; | 88 | }; |
89 | 89 | ||
90 | gpio0: gpio@ffc40000 { | 90 | gpio0: gpio@e6050000 { |
91 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 91 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
92 | reg = <0 0xffc40000 0 0x2c>; | 92 | reg = <0 0xe6050000 0 0x50>; |
93 | interrupt-parent = <&gic>; | 93 | interrupt-parent = <&gic>; |
94 | interrupts = <0 4 0x4>; | 94 | interrupts = <0 4 0x4>; |
95 | #gpio-cells = <2>; | 95 | #gpio-cells = <2>; |
@@ -99,9 +99,9 @@ | |||
99 | interrupt-controller; | 99 | interrupt-controller; |
100 | }; | 100 | }; |
101 | 101 | ||
102 | gpio1: gpio@ffc41000 { | 102 | gpio1: gpio@e6051000 { |
103 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 103 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
104 | reg = <0 0xffc41000 0 0x2c>; | 104 | reg = <0 0xe6051000 0 0x50>; |
105 | interrupt-parent = <&gic>; | 105 | interrupt-parent = <&gic>; |
106 | interrupts = <0 5 0x4>; | 106 | interrupts = <0 5 0x4>; |
107 | #gpio-cells = <2>; | 107 | #gpio-cells = <2>; |
@@ -111,9 +111,9 @@ | |||
111 | interrupt-controller; | 111 | interrupt-controller; |
112 | }; | 112 | }; |
113 | 113 | ||
114 | gpio2: gpio@ffc42000 { | 114 | gpio2: gpio@e6052000 { |
115 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 115 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
116 | reg = <0 0xffc42000 0 0x2c>; | 116 | reg = <0 0xe6052000 0 0x50>; |
117 | interrupt-parent = <&gic>; | 117 | interrupt-parent = <&gic>; |
118 | interrupts = <0 6 0x4>; | 118 | interrupts = <0 6 0x4>; |
119 | #gpio-cells = <2>; | 119 | #gpio-cells = <2>; |
@@ -123,9 +123,9 @@ | |||
123 | interrupt-controller; | 123 | interrupt-controller; |
124 | }; | 124 | }; |
125 | 125 | ||
126 | gpio3: gpio@ffc43000 { | 126 | gpio3: gpio@e6053000 { |
127 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 127 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
128 | reg = <0 0xffc43000 0 0x2c>; | 128 | reg = <0 0xe6053000 0 0x50>; |
129 | interrupt-parent = <&gic>; | 129 | interrupt-parent = <&gic>; |
130 | interrupts = <0 7 0x4>; | 130 | interrupts = <0 7 0x4>; |
131 | #gpio-cells = <2>; | 131 | #gpio-cells = <2>; |
@@ -135,9 +135,9 @@ | |||
135 | interrupt-controller; | 135 | interrupt-controller; |
136 | }; | 136 | }; |
137 | 137 | ||
138 | gpio4: gpio@ffc44000 { | 138 | gpio4: gpio@e6054000 { |
139 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 139 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
140 | reg = <0 0xffc44000 0 0x2c>; | 140 | reg = <0 0xe6054000 0 0x50>; |
141 | interrupt-parent = <&gic>; | 141 | interrupt-parent = <&gic>; |
142 | interrupts = <0 8 0x4>; | 142 | interrupts = <0 8 0x4>; |
143 | #gpio-cells = <2>; | 143 | #gpio-cells = <2>; |
@@ -147,9 +147,9 @@ | |||
147 | interrupt-controller; | 147 | interrupt-controller; |
148 | }; | 148 | }; |
149 | 149 | ||
150 | gpio5: gpio@ffc45000 { | 150 | gpio5: gpio@e6055000 { |
151 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; | 151 | compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar"; |
152 | reg = <0 0xffc45000 0 0x2c>; | 152 | reg = <0 0xe6055000 0 0x50>; |
153 | interrupt-parent = <&gic>; | 153 | interrupt-parent = <&gic>; |
154 | interrupts = <0 9 0x4>; | 154 | interrupts = <0 9 0x4>; |
155 | #gpio-cells = <2>; | 155 | #gpio-cells = <2>; |
@@ -241,7 +241,7 @@ | |||
241 | 241 | ||
242 | sdhi0: sdhi@ee100000 { | 242 | sdhi0: sdhi@ee100000 { |
243 | compatible = "renesas,sdhi-r8a7790"; | 243 | compatible = "renesas,sdhi-r8a7790"; |
244 | reg = <0 0xee100000 0 0x100>; | 244 | reg = <0 0xee100000 0 0x200>; |
245 | interrupt-parent = <&gic>; | 245 | interrupt-parent = <&gic>; |
246 | interrupts = <0 165 4>; | 246 | interrupts = <0 165 4>; |
247 | cap-sd-highspeed; | 247 | cap-sd-highspeed; |
@@ -250,7 +250,7 @@ | |||
250 | 250 | ||
251 | sdhi1: sdhi@ee120000 { | 251 | sdhi1: sdhi@ee120000 { |
252 | compatible = "renesas,sdhi-r8a7790"; | 252 | compatible = "renesas,sdhi-r8a7790"; |
253 | reg = <0 0xee120000 0 0x100>; | 253 | reg = <0 0xee120000 0 0x200>; |
254 | interrupt-parent = <&gic>; | 254 | interrupt-parent = <&gic>; |
255 | interrupts = <0 166 4>; | 255 | interrupts = <0 166 4>; |
256 | cap-sd-highspeed; | 256 | cap-sd-highspeed; |
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped index 64205d453260..71e5fc7cfb18 100644 --- a/arch/arm/crypto/aesbs-core.S_shipped +++ b/arch/arm/crypto/aesbs-core.S_shipped | |||
@@ -58,7 +58,7 @@ | |||
58 | # define VFP_ABI_FRAME 0 | 58 | # define VFP_ABI_FRAME 0 |
59 | # define BSAES_ASM_EXTENDED_KEY | 59 | # define BSAES_ASM_EXTENDED_KEY |
60 | # define XTS_CHAIN_TWEAK | 60 | # define XTS_CHAIN_TWEAK |
61 | # define __ARM_ARCH__ __LINUX_ARM_ARCH__ | 61 | # define __ARM_ARCH__ 7 |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #ifdef __thumb__ | 64 | #ifdef __thumb__ |
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl index f3d96d932573..be068db960ee 100644 --- a/arch/arm/crypto/bsaes-armv7.pl +++ b/arch/arm/crypto/bsaes-armv7.pl | |||
@@ -701,7 +701,7 @@ $code.=<<___; | |||
701 | # define VFP_ABI_FRAME 0 | 701 | # define VFP_ABI_FRAME 0 |
702 | # define BSAES_ASM_EXTENDED_KEY | 702 | # define BSAES_ASM_EXTENDED_KEY |
703 | # define XTS_CHAIN_TWEAK | 703 | # define XTS_CHAIN_TWEAK |
704 | # define __ARM_ARCH__ __LINUX_ARM_ARCH__ | 704 | # define __ARM_ARCH__ 7 |
705 | #endif | 705 | #endif |
706 | 706 | ||
707 | #ifdef __thumb__ | 707 | #ifdef __thumb__ |
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 3c597c222ef2..fbeb39c869e9 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h | |||
@@ -329,7 +329,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t); | |||
329 | */ | 329 | */ |
330 | #define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) | 330 | #define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) |
331 | #define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) | 331 | #define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE) |
332 | #define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) | 332 | #define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED) |
333 | #define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) | 333 | #define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC) |
334 | #define iounmap __arm_iounmap | 334 | #define iounmap __arm_iounmap |
335 | 335 | ||
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 6976b03e5213..8756e4bcdba0 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -347,7 +347,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x) | |||
347 | #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET | 347 | #define ARCH_PFN_OFFSET PHYS_PFN_OFFSET |
348 | 348 | ||
349 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 349 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
350 | #define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) | 350 | #define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \ |
351 | && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) ) | ||
351 | 352 | ||
352 | #endif | 353 | #endif |
353 | 354 | ||
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 75579a9d6f76..3759cacdd7f8 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h | |||
@@ -117,6 +117,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
117 | return __set_phys_to_machine(pfn, mfn); | 117 | return __set_phys_to_machine(pfn, mfn); |
118 | } | 118 | } |
119 | 119 | ||
120 | #define xen_remap(cookie, size) ioremap_cached((cookie), (size)); | 120 | #define xen_remap(cookie, size) ioremap_cache((cookie), (size)); |
121 | 121 | ||
122 | #endif /* _ASM_ARM_XEN_PAGE_H */ | 122 | #endif /* _ASM_ARM_XEN_PAGE_H */ |
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 739c3dfc1da2..34d5fd585bbb 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c | |||
@@ -171,7 +171,7 @@ void __init arm_dt_init_cpu_maps(void) | |||
171 | 171 | ||
172 | bool arch_match_cpu_phys_id(int cpu, u64 phys_id) | 172 | bool arch_match_cpu_phys_id(int cpu, u64 phys_id) |
173 | { | 173 | { |
174 | return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu); | 174 | return phys_id == cpu_logical_map(cpu); |
175 | } | 175 | } |
176 | 176 | ||
177 | static const void * __init arch_get_next_mach(const char *const **match) | 177 | static const void * __init arch_get_next_mach(const char *const **match) |
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index d85055cd24ba..20d553c9f5e2 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
@@ -254,7 +254,7 @@ static int probe_current_pmu(struct arm_pmu *pmu) | |||
254 | static int cpu_pmu_device_probe(struct platform_device *pdev) | 254 | static int cpu_pmu_device_probe(struct platform_device *pdev) |
255 | { | 255 | { |
256 | const struct of_device_id *of_id; | 256 | const struct of_device_id *of_id; |
257 | int (*init_fn)(struct arm_pmu *); | 257 | const int (*init_fn)(struct arm_pmu *); |
258 | struct device_node *node = pdev->dev.of_node; | 258 | struct device_node *node = pdev->dev.of_node; |
259 | struct arm_pmu *pmu; | 259 | struct arm_pmu *pmu; |
260 | int ret = -ENODEV; | 260 | int ret = -ENODEV; |
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 7940241f0576..4636d56af2db 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -36,7 +36,13 @@ | |||
36 | #include <asm/system_misc.h> | 36 | #include <asm/system_misc.h> |
37 | #include <asm/opcodes.h> | 37 | #include <asm/opcodes.h> |
38 | 38 | ||
39 | static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; | 39 | static const char *handler[]= { |
40 | "prefetch abort", | ||
41 | "data abort", | ||
42 | "address exception", | ||
43 | "interrupt", | ||
44 | "undefined instruction", | ||
45 | }; | ||
40 | 46 | ||
41 | void *vectors_page; | 47 | void *vectors_page; |
42 | 48 | ||
@@ -425,9 +431,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) | |||
425 | instr2 = __mem_to_opcode_thumb16(instr2); | 431 | instr2 = __mem_to_opcode_thumb16(instr2); |
426 | instr = __opcode_thumb32_compose(instr, instr2); | 432 | instr = __opcode_thumb32_compose(instr, instr2); |
427 | } | 433 | } |
428 | } else if (get_user(instr, (u32 __user *)pc)) { | 434 | } else { |
435 | if (get_user(instr, (u32 __user *)pc)) | ||
436 | goto die_sig; | ||
429 | instr = __mem_to_opcode_arm(instr); | 437 | instr = __mem_to_opcode_arm(instr); |
430 | goto die_sig; | ||
431 | } | 438 | } |
432 | 439 | ||
433 | if (call_undef_hook(regs, instr) == 0) | 440 | if (call_undef_hook(regs, instr) == 0) |
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c index 9ee78f7b4990..782f6c71fa0a 100644 --- a/arch/arm/mach-footbridge/dc21285-timer.c +++ b/arch/arm/mach-footbridge/dc21285-timer.c | |||
@@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = { | |||
96 | void __init footbridge_timer_init(void) | 96 | void __init footbridge_timer_init(void) |
97 | { | 97 | { |
98 | struct clock_event_device *ce = &ckevt_dc21285; | 98 | struct clock_event_device *ce = &ckevt_dc21285; |
99 | unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16); | ||
99 | 100 | ||
100 | clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16); | 101 | clocksource_register_hz(&cksrc_dc21285, rate); |
101 | 102 | ||
102 | setup_irq(ce->irq, &footbridge_timer_irq); | 103 | setup_irq(ce->irq, &footbridge_timer_irq); |
103 | 104 | ||
104 | ce->cpumask = cpumask_of(smp_processor_id()); | 105 | ce->cpumask = cpumask_of(smp_processor_id()); |
105 | clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff); | 106 | clockevents_config_and_register(ce, rate, 0x4, 0xffffff); |
106 | } | 107 | } |
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index bd3bf66ce344..c7de89b263dd 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c | |||
@@ -53,6 +53,7 @@ static void __init highbank_scu_map_io(void) | |||
53 | 53 | ||
54 | static void highbank_l2x0_disable(void) | 54 | static void highbank_l2x0_disable(void) |
55 | { | 55 | { |
56 | outer_flush_all(); | ||
56 | /* Disable PL310 L2 Cache controller */ | 57 | /* Disable PL310 L2 Cache controller */ |
57 | highbank_smc1(0x102, 0x0); | 58 | highbank_smc1(0x102, 0x0); |
58 | } | 59 | } |
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index 4ec8d82b0492..44a59c3abfb0 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c | |||
@@ -242,12 +242,18 @@ static void __init ldp_display_init(void) | |||
242 | 242 | ||
243 | static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) | 243 | static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio) |
244 | { | 244 | { |
245 | int res; | ||
246 | |||
245 | /* LCD enable GPIO */ | 247 | /* LCD enable GPIO */ |
246 | ldp_lcd_pdata.enable_gpio = gpio + 7; | 248 | ldp_lcd_pdata.enable_gpio = gpio + 7; |
247 | 249 | ||
248 | /* Backlight enable GPIO */ | 250 | /* Backlight enable GPIO */ |
249 | ldp_lcd_pdata.backlight_gpio = gpio + 15; | 251 | ldp_lcd_pdata.backlight_gpio = gpio + 15; |
250 | 252 | ||
253 | res = platform_device_register(&ldp_lcd_device); | ||
254 | if (res) | ||
255 | pr_err("Unable to register LCD: %d\n", res); | ||
256 | |||
251 | return 0; | 257 | return 0; |
252 | } | 258 | } |
253 | 259 | ||
@@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = { | |||
346 | 352 | ||
347 | static struct platform_device *ldp_devices[] __initdata = { | 353 | static struct platform_device *ldp_devices[] __initdata = { |
348 | &ldp_gpio_keys_device, | 354 | &ldp_gpio_keys_device, |
349 | &ldp_lcd_device, | ||
350 | }; | 355 | }; |
351 | 356 | ||
352 | #ifdef CONFIG_OMAP_MUX | 357 | #ifdef CONFIG_OMAP_MUX |
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 58347bb874a0..4cf165502b35 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
@@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = { | |||
101 | { "dss_hdmi", "omapdss_hdmi", -1 }, | 101 | { "dss_hdmi", "omapdss_hdmi", -1 }, |
102 | }; | 102 | }; |
103 | 103 | ||
104 | static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes) | ||
105 | { | ||
106 | u32 enable_mask, enable_shift; | ||
107 | u32 pipd_mask, pipd_shift; | ||
108 | u32 reg; | ||
109 | |||
110 | if (dsi_id == 0) { | ||
111 | enable_mask = OMAP4_DSI1_LANEENABLE_MASK; | ||
112 | enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT; | ||
113 | pipd_mask = OMAP4_DSI1_PIPD_MASK; | ||
114 | pipd_shift = OMAP4_DSI1_PIPD_SHIFT; | ||
115 | } else if (dsi_id == 1) { | ||
116 | enable_mask = OMAP4_DSI2_LANEENABLE_MASK; | ||
117 | enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT; | ||
118 | pipd_mask = OMAP4_DSI2_PIPD_MASK; | ||
119 | pipd_shift = OMAP4_DSI2_PIPD_SHIFT; | ||
120 | } else { | ||
121 | return -ENODEV; | ||
122 | } | ||
123 | |||
124 | reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); | ||
125 | |||
126 | reg &= ~enable_mask; | ||
127 | reg &= ~pipd_mask; | ||
128 | |||
129 | reg |= (lanes << enable_shift) & enable_mask; | ||
130 | reg |= (lanes << pipd_shift) & pipd_mask; | ||
131 | |||
132 | omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY); | ||
133 | |||
134 | return 0; | ||
135 | } | ||
136 | |||
104 | static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) | 137 | static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask) |
105 | { | 138 | { |
139 | if (cpu_is_omap44xx()) | ||
140 | return omap4_dsi_mux_pads(dsi_id, lane_mask); | ||
141 | |||
106 | return 0; | 142 | return 0; |
107 | } | 143 | } |
108 | 144 | ||
109 | static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) | 145 | static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask) |
110 | { | 146 | { |
147 | if (cpu_is_omap44xx()) | ||
148 | omap4_dsi_mux_pads(dsi_id, 0); | ||
111 | } | 149 | } |
112 | 150 | ||
113 | static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) | 151 | static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput) |
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index b39efd46abf9..c0ab9b26be3d 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c | |||
@@ -162,6 +162,7 @@ void __iomem *omap4_get_l2cache_base(void) | |||
162 | 162 | ||
163 | static void omap4_l2x0_disable(void) | 163 | static void omap4_l2x0_disable(void) |
164 | { | 164 | { |
165 | outer_flush_all(); | ||
165 | /* Disable PL310 L2 Cache controller */ | 166 | /* Disable PL310 L2 Cache controller */ |
166 | omap_smc1(0x102, 0x0); | 167 | omap_smc1(0x102, 0x0); |
167 | } | 168 | } |
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index 56cebb05509e..d23c77fadb31 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c | |||
@@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = { | |||
796 | 796 | ||
797 | /* gpmc */ | 797 | /* gpmc */ |
798 | static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { | 798 | static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = { |
799 | { .irq = 20 }, | 799 | { .irq = 20 + OMAP_INTC_START, }, |
800 | { .irq = -1 } | 800 | { .irq = -1 } |
801 | }; | 801 | }; |
802 | 802 | ||
@@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = { | |||
841 | }; | 841 | }; |
842 | 842 | ||
843 | static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { | 843 | static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = { |
844 | { .irq = 52 }, | 844 | { .irq = 52 + OMAP_INTC_START, }, |
845 | { .irq = -1 } | 845 | { .irq = -1 } |
846 | }; | 846 | }; |
847 | 847 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index d33742908f97..4c3b1e6df508 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
@@ -2165,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = { | |||
2165 | }; | 2165 | }; |
2166 | 2166 | ||
2167 | static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { | 2167 | static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = { |
2168 | { .irq = 20 }, | 2168 | { .irq = 20 + OMAP_INTC_START, }, |
2169 | { .irq = -1 } | 2169 | { .irq = -1 } |
2170 | }; | 2170 | }; |
2171 | 2171 | ||
@@ -2999,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = { | |||
2999 | 2999 | ||
3000 | static struct omap_hwmod omap3xxx_mmu_isp_hwmod; | 3000 | static struct omap_hwmod omap3xxx_mmu_isp_hwmod; |
3001 | static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { | 3001 | static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = { |
3002 | { .irq = 24 }, | 3002 | { .irq = 24 + OMAP_INTC_START, }, |
3003 | { .irq = -1 } | 3003 | { .irq = -1 } |
3004 | }; | 3004 | }; |
3005 | 3005 | ||
@@ -3041,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = { | |||
3041 | 3041 | ||
3042 | static struct omap_hwmod omap3xxx_mmu_iva_hwmod; | 3042 | static struct omap_hwmod omap3xxx_mmu_iva_hwmod; |
3043 | static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { | 3043 | static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = { |
3044 | { .irq = 28 }, | 3044 | { .irq = 28 + OMAP_INTC_START, }, |
3045 | { .irq = -1 } | 3045 | { .irq = -1 } |
3046 | }; | 3046 | }; |
3047 | 3047 | ||
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index db32d5380b11..18f333c440db 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c | |||
@@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = { | |||
1637 | .class = &dra7xx_uart_hwmod_class, | 1637 | .class = &dra7xx_uart_hwmod_class, |
1638 | .clkdm_name = "l4per_clkdm", | 1638 | .clkdm_name = "l4per_clkdm", |
1639 | .main_clk = "uart1_gfclk_mux", | 1639 | .main_clk = "uart1_gfclk_mux", |
1640 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 1640 | .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS, |
1641 | .prcm = { | 1641 | .prcm = { |
1642 | .omap4 = { | 1642 | .omap4 = { |
1643 | .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, | 1643 | .clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET, |
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h index 2a086e8373eb..958cd6af9384 100644 --- a/arch/arm/mach-pxa/include/mach/lubbock.h +++ b/arch/arm/mach-pxa/include/mach/lubbock.h | |||
@@ -10,6 +10,8 @@ | |||
10 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <mach/irqs.h> | ||
14 | |||
13 | #define LUBBOCK_ETH_PHYS PXA_CS3_PHYS | 15 | #define LUBBOCK_ETH_PHYS PXA_CS3_PHYS |
14 | 16 | ||
15 | #define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS | 17 | #define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS |
diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c index 7eb9a10fc1af..2fddf38192df 100644 --- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c +++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c | |||
@@ -8,8 +8,6 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/clk-provider.h> | ||
12 | #include <linux/irqchip.h> | ||
13 | #include <linux/of_platform.h> | 11 | #include <linux/of_platform.h> |
14 | 12 | ||
15 | #include <asm/mach/arch.h> | 13 | #include <asm/mach/arch.h> |
@@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void) | |||
48 | panic("SoC is not S3C64xx!"); | 46 | panic("SoC is not S3C64xx!"); |
49 | } | 47 | } |
50 | 48 | ||
51 | static void __init s3c64xx_dt_init_irq(void) | ||
52 | { | ||
53 | of_clk_init(NULL); | ||
54 | samsung_wdt_reset_of_init(); | ||
55 | irqchip_init(); | ||
56 | }; | ||
57 | |||
58 | static void __init s3c64xx_dt_init_machine(void) | 49 | static void __init s3c64xx_dt_init_machine(void) |
59 | { | 50 | { |
51 | samsung_wdt_reset_of_init(); | ||
60 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 52 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
61 | } | 53 | } |
62 | 54 | ||
@@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)") | |||
79 | /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ | 71 | /* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */ |
80 | .dt_compat = s3c64xx_dt_compat, | 72 | .dt_compat = s3c64xx_dt_compat, |
81 | .map_io = s3c64xx_dt_map_io, | 73 | .map_io = s3c64xx_dt_map_io, |
82 | .init_irq = s3c64xx_dt_init_irq, | ||
83 | .init_machine = s3c64xx_dt_init_machine, | 74 | .init_machine = s3c64xx_dt_init_machine, |
84 | .restart = s3c64xx_dt_restart, | 75 | .restart = s3c64xx_dt_restart, |
85 | MACHINE_END | 76 | MACHINE_END |
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 958e3cbf0ac2..8ea87bd45c33 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c | |||
@@ -483,7 +483,7 @@ static struct platform_device lcdc0_device = { | |||
483 | .id = 0, | 483 | .id = 0, |
484 | .dev = { | 484 | .dev = { |
485 | .platform_data = &lcdc0_info, | 485 | .platform_data = &lcdc0_info, |
486 | .coherent_dma_mask = ~0, | 486 | .coherent_dma_mask = DMA_BIT_MASK(32), |
487 | }, | 487 | }, |
488 | }; | 488 | }; |
489 | 489 | ||
@@ -580,7 +580,7 @@ static struct platform_device hdmi_lcdc_device = { | |||
580 | .id = 1, | 580 | .id = 1, |
581 | .dev = { | 581 | .dev = { |
582 | .platform_data = &hdmi_lcdc_info, | 582 | .platform_data = &hdmi_lcdc_info, |
583 | .coherent_dma_mask = ~0, | 583 | .coherent_dma_mask = DMA_BIT_MASK(32), |
584 | }, | 584 | }, |
585 | }; | 585 | }; |
586 | 586 | ||
@@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = { | |||
614 | REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), | 614 | REGULATOR_SUPPLY("vqmmc", "sh_mmcif"), |
615 | }; | 615 | }; |
616 | 616 | ||
617 | /* Fixed 3.3V regulator used by LCD backlight */ | ||
618 | static struct regulator_consumer_supply fixed5v0_power_consumers[] = { | ||
619 | REGULATOR_SUPPLY("power", "pwm-backlight.0"), | ||
620 | }; | ||
621 | |||
617 | /* Fixed 3.3V regulator to be used by SDHI0 */ | 622 | /* Fixed 3.3V regulator to be used by SDHI0 */ |
618 | static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { | 623 | static struct regulator_consumer_supply vcc_sdhi0_consumers[] = { |
619 | REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), | 624 | REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"), |
@@ -1196,6 +1201,8 @@ static void __init eva_init(void) | |||
1196 | 1201 | ||
1197 | regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, | 1202 | regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers, |
1198 | ARRAY_SIZE(fixed3v3_power_consumers), 3300000); | 1203 | ARRAY_SIZE(fixed3v3_power_consumers), 3300000); |
1204 | regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers, | ||
1205 | ARRAY_SIZE(fixed5v0_power_consumers), 5000000); | ||
1199 | 1206 | ||
1200 | pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); | 1207 | pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map)); |
1201 | pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); | 1208 | pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup)); |
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index 38611526fe9a..3c4995aebd22 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c | |||
@@ -679,7 +679,7 @@ static void __init bockw_init(void) | |||
679 | .id = i, | 679 | .id = i, |
680 | .data = &rsnd_card_info[i], | 680 | .data = &rsnd_card_info[i], |
681 | .size_data = sizeof(struct asoc_simple_card_info), | 681 | .size_data = sizeof(struct asoc_simple_card_info), |
682 | .dma_mask = ~0, | 682 | .dma_mask = DMA_BIT_MASK(32), |
683 | }; | 683 | }; |
684 | 684 | ||
685 | platform_device_register_full(&cardinfo); | 685 | platform_device_register_full(&cardinfo); |
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c index fe689b7fdc9e..bc40b853ffd3 100644 --- a/arch/arm/mach-shmobile/board-kzm9g.c +++ b/arch/arm/mach-shmobile/board-kzm9g.c | |||
@@ -334,7 +334,7 @@ static struct platform_device lcdc_device = { | |||
334 | .resource = lcdc_resources, | 334 | .resource = lcdc_resources, |
335 | .dev = { | 335 | .dev = { |
336 | .platform_data = &lcdc_info, | 336 | .platform_data = &lcdc_info, |
337 | .coherent_dma_mask = ~0, | 337 | .coherent_dma_mask = DMA_BIT_MASK(32), |
338 | }, | 338 | }, |
339 | }; | 339 | }; |
340 | 340 | ||
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index a8d3ce646fb9..e0406fd37390 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c | |||
@@ -245,7 +245,9 @@ static void __init lager_init(void) | |||
245 | { | 245 | { |
246 | lager_add_standard_devices(); | 246 | lager_add_standard_devices(); |
247 | 247 | ||
248 | phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup); | 248 | if (IS_ENABLED(CONFIG_PHYLIB)) |
249 | phy_register_fixup_for_id("r8a7790-ether-ff:01", | ||
250 | lager_ksz8041_fixup); | ||
249 | } | 251 | } |
250 | 252 | ||
251 | static const char * const lager_boards_compat_dt[] __initconst = { | 253 | static const char * const lager_boards_compat_dt[] __initconst = { |
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index af06753eb809..e721d2ccceae 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c | |||
@@ -409,7 +409,7 @@ static struct platform_device lcdc_device = { | |||
409 | .resource = lcdc_resources, | 409 | .resource = lcdc_resources, |
410 | .dev = { | 410 | .dev = { |
411 | .platform_data = &lcdc_info, | 411 | .platform_data = &lcdc_info, |
412 | .coherent_dma_mask = ~0, | 412 | .coherent_dma_mask = DMA_BIT_MASK(32), |
413 | }, | 413 | }, |
414 | }; | 414 | }; |
415 | 415 | ||
@@ -499,7 +499,7 @@ static struct platform_device hdmi_lcdc_device = { | |||
499 | .id = 1, | 499 | .id = 1, |
500 | .dev = { | 500 | .dev = { |
501 | .platform_data = &hdmi_lcdc_info, | 501 | .platform_data = &hdmi_lcdc_info, |
502 | .coherent_dma_mask = ~0, | 502 | .coherent_dma_mask = DMA_BIT_MASK(32), |
503 | }, | 503 | }, |
504 | }; | 504 | }; |
505 | 505 | ||
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 6d5ba9afb16a..3387e60e4ea3 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
175 | unsigned long i; | 175 | unsigned long i; |
176 | if (cache_is_vipt_nonaliasing()) { | 176 | if (cache_is_vipt_nonaliasing()) { |
177 | for (i = 0; i < (1 << compound_order(page)); i++) { | 177 | for (i = 0; i < (1 << compound_order(page)); i++) { |
178 | void *addr = kmap_atomic(page); | 178 | void *addr = kmap_atomic(page + i); |
179 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 179 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
180 | kunmap_atomic(addr); | 180 | kunmap_atomic(addr); |
181 | } | 181 | } |
182 | } else { | 182 | } else { |
183 | for (i = 0; i < (1 << compound_order(page)); i++) { | 183 | for (i = 0; i < (1 << compound_order(page)); i++) { |
184 | void *addr = kmap_high_get(page); | 184 | void *addr = kmap_high_get(page + i); |
185 | if (addr) { | 185 | if (addr) { |
186 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); | 186 | __cpuc_flush_dcache_area(addr, PAGE_SIZE); |
187 | kunmap_high(page); | 187 | kunmap_high(page + i); |
188 | } | 188 | } |
189 | } | 189 | } |
190 | } | 190 | } |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 1f7b19a47060..3e8f106ee5fe 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc) | |||
229 | #ifdef CONFIG_ZONE_DMA | 229 | #ifdef CONFIG_ZONE_DMA |
230 | if (mdesc->dma_zone_size) { | 230 | if (mdesc->dma_zone_size) { |
231 | arm_dma_zone_size = mdesc->dma_zone_size; | 231 | arm_dma_zone_size = mdesc->dma_zone_size; |
232 | arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1; | 232 | arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; |
233 | } else | 233 | } else |
234 | arm_dma_limit = 0xffffffff; | 234 | arm_dma_limit = 0xffffffff; |
235 | arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; | 235 | arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT; |
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 83e4f959ee47..85501238b425 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
@@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, | |||
96 | struct remap_data *info = data; | 96 | struct remap_data *info = data; |
97 | struct page *page = info->pages[info->index++]; | 97 | struct page *page = info->pages[info->index++]; |
98 | unsigned long pfn = page_to_pfn(page); | 98 | unsigned long pfn = page_to_pfn(page); |
99 | pte_t pte = pfn_pte(pfn, info->prot); | 99 | pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); |
100 | 100 | ||
101 | if (map_foreign_page(pfn, info->fgmfn, info->domid)) | 101 | if (map_foreign_page(pfn, info->fgmfn, info->domid)) |
102 | return -EFAULT; | 102 | return -EFAULT; |
@@ -224,10 +224,10 @@ static int __init xen_guest_init(void) | |||
224 | } | 224 | } |
225 | if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) | 225 | if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) |
226 | return 0; | 226 | return 0; |
227 | xen_hvm_resume_frames = res.start >> PAGE_SHIFT; | 227 | xen_hvm_resume_frames = res.start; |
228 | xen_events_irq = irq_of_parse_and_map(node, 0); | 228 | xen_events_irq = irq_of_parse_and_map(node, 0); |
229 | pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", | 229 | pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", |
230 | version, xen_events_irq, xen_hvm_resume_frames); | 230 | version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT)); |
231 | xen_domain_type = XEN_HVM_DOMAIN; | 231 | xen_domain_type = XEN_HVM_DOMAIN; |
232 | 232 | ||
233 | xen_setup_features(); | 233 | xen_setup_features(); |
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h index 2820f1a6eebe..dde3fc9c49f0 100644 --- a/arch/arm64/include/asm/xen/page-coherent.h +++ b/arch/arm64/include/asm/xen/page-coherent.h | |||
@@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, | |||
23 | unsigned long offset, size_t size, enum dma_data_direction dir, | 23 | unsigned long offset, size_t size, enum dma_data_direction dir, |
24 | struct dma_attrs *attrs) | 24 | struct dma_attrs *attrs) |
25 | { | 25 | { |
26 | __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); | ||
27 | } | 26 | } |
28 | 27 | ||
29 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, | 28 | static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, |
30 | size_t size, enum dma_data_direction dir, | 29 | size_t size, enum dma_data_direction dir, |
31 | struct dma_attrs *attrs) | 30 | struct dma_attrs *attrs) |
32 | { | 31 | { |
33 | __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); | ||
34 | } | 32 | } |
35 | 33 | ||
36 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, | 34 | static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, |
37 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 35 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
38 | { | 36 | { |
39 | __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); | ||
40 | } | 37 | } |
41 | 38 | ||
42 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, | 39 | static inline void xen_dma_sync_single_for_device(struct device *hwdev, |
43 | dma_addr_t handle, size_t size, enum dma_data_direction dir) | 40 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
44 | { | 41 | { |
45 | __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); | ||
46 | } | 42 | } |
47 | #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ | 43 | #endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */ |
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 6777a2192b83..6a8928bba03c 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
@@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, | |||
214 | { | 214 | { |
215 | int err, len, type, disabled = !ctrl.enabled; | 215 | int err, len, type, disabled = !ctrl.enabled; |
216 | 216 | ||
217 | if (disabled) { | 217 | attr->disabled = disabled; |
218 | len = 0; | 218 | if (disabled) |
219 | type = HW_BREAKPOINT_EMPTY; | 219 | return 0; |
220 | } else { | 220 | |
221 | err = arch_bp_generic_fields(ctrl, &len, &type); | 221 | err = arch_bp_generic_fields(ctrl, &len, &type); |
222 | if (err) | 222 | if (err) |
223 | return err; | 223 | return err; |
224 | 224 | ||
225 | switch (note_type) { | 225 | switch (note_type) { |
226 | case NT_ARM_HW_BREAK: | 226 | case NT_ARM_HW_BREAK: |
227 | if ((type & HW_BREAKPOINT_X) != type) | 227 | if ((type & HW_BREAKPOINT_X) != type) |
228 | return -EINVAL; | ||
229 | break; | ||
230 | case NT_ARM_HW_WATCH: | ||
231 | if ((type & HW_BREAKPOINT_RW) != type) | ||
232 | return -EINVAL; | ||
233 | break; | ||
234 | default: | ||
235 | return -EINVAL; | 228 | return -EINVAL; |
236 | } | 229 | break; |
230 | case NT_ARM_HW_WATCH: | ||
231 | if ((type & HW_BREAKPOINT_RW) != type) | ||
232 | return -EINVAL; | ||
233 | break; | ||
234 | default: | ||
235 | return -EINVAL; | ||
237 | } | 236 | } |
238 | 237 | ||
239 | attr->bp_len = len; | 238 | attr->bp_len = len; |
240 | attr->bp_type = type; | 239 | attr->bp_type = type; |
241 | attr->disabled = disabled; | ||
242 | 240 | ||
243 | return 0; | 241 | return 0; |
244 | } | 242 | } |
diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h index c75025f27c20..06b9bc7ea14b 100644 --- a/arch/mips/include/asm/cacheops.h +++ b/arch/mips/include/asm/cacheops.h | |||
@@ -83,6 +83,6 @@ | |||
83 | /* | 83 | /* |
84 | * Loongson2-specific cacheops | 84 | * Loongson2-specific cacheops |
85 | */ | 85 | */ |
86 | #define Hit_Invalidate_I_Loongson23 0x00 | 86 | #define Hit_Invalidate_I_Loongson2 0x00 |
87 | 87 | ||
88 | #endif /* __ASM_CACHEOPS_H */ | 88 | #endif /* __ASM_CACHEOPS_H */ |
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 34d1a1917125..c84caddb8bde 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h | |||
@@ -165,7 +165,7 @@ static inline void flush_icache_line(unsigned long addr) | |||
165 | __iflush_prologue | 165 | __iflush_prologue |
166 | switch (boot_cpu_type()) { | 166 | switch (boot_cpu_type()) { |
167 | case CPU_LOONGSON2: | 167 | case CPU_LOONGSON2: |
168 | cache_op(Hit_Invalidate_I_Loongson23, addr); | 168 | cache_op(Hit_Invalidate_I_Loongson2, addr); |
169 | break; | 169 | break; |
170 | 170 | ||
171 | default: | 171 | default: |
@@ -219,7 +219,7 @@ static inline void protected_flush_icache_line(unsigned long addr) | |||
219 | { | 219 | { |
220 | switch (boot_cpu_type()) { | 220 | switch (boot_cpu_type()) { |
221 | case CPU_LOONGSON2: | 221 | case CPU_LOONGSON2: |
222 | protected_cache_op(Hit_Invalidate_I_Loongson23, addr); | 222 | protected_cache_op(Hit_Invalidate_I_Loongson2, addr); |
223 | break; | 223 | break; |
224 | 224 | ||
225 | default: | 225 | default: |
@@ -357,8 +357,8 @@ static inline void invalidate_tcache_page(unsigned long addr) | |||
357 | "i" (op)); | 357 | "i" (op)); |
358 | 358 | ||
359 | /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ | 359 | /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */ |
360 | #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize) \ | 360 | #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra) \ |
361 | static inline void blast_##pfx##cache##lsize(void) \ | 361 | static inline void extra##blast_##pfx##cache##lsize(void) \ |
362 | { \ | 362 | { \ |
363 | unsigned long start = INDEX_BASE; \ | 363 | unsigned long start = INDEX_BASE; \ |
364 | unsigned long end = start + current_cpu_data.desc.waysize; \ | 364 | unsigned long end = start + current_cpu_data.desc.waysize; \ |
@@ -376,7 +376,7 @@ static inline void blast_##pfx##cache##lsize(void) \ | |||
376 | __##pfx##flush_epilogue \ | 376 | __##pfx##flush_epilogue \ |
377 | } \ | 377 | } \ |
378 | \ | 378 | \ |
379 | static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ | 379 | static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \ |
380 | { \ | 380 | { \ |
381 | unsigned long start = page; \ | 381 | unsigned long start = page; \ |
382 | unsigned long end = page + PAGE_SIZE; \ | 382 | unsigned long end = page + PAGE_SIZE; \ |
@@ -391,7 +391,7 @@ static inline void blast_##pfx##cache##lsize##_page(unsigned long page) \ | |||
391 | __##pfx##flush_epilogue \ | 391 | __##pfx##flush_epilogue \ |
392 | } \ | 392 | } \ |
393 | \ | 393 | \ |
394 | static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ | 394 | static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \ |
395 | { \ | 395 | { \ |
396 | unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ | 396 | unsigned long indexmask = current_cpu_data.desc.waysize - 1; \ |
397 | unsigned long start = INDEX_BASE + (page & indexmask); \ | 397 | unsigned long start = INDEX_BASE + (page & indexmask); \ |
@@ -410,23 +410,24 @@ static inline void blast_##pfx##cache##lsize##_page_indexed(unsigned long page) | |||
410 | __##pfx##flush_epilogue \ | 410 | __##pfx##flush_epilogue \ |
411 | } | 411 | } |
412 | 412 | ||
413 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16) | 413 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, ) |
414 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16) | 414 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, ) |
415 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16) | 415 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, ) |
416 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32) | 416 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, ) |
417 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32) | 417 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, ) |
418 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32) | 418 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_) |
419 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64) | 419 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, ) |
420 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64) | 420 | __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, ) |
421 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64) | 421 | __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, ) |
422 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128) | 422 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, ) |
423 | 423 | __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, ) | |
424 | __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16) | 424 | |
425 | __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32) | 425 | __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, ) |
426 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16) | 426 | __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, ) |
427 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32) | 427 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, ) |
428 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64) | 428 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, ) |
429 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128) | 429 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, ) |
430 | __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, ) | ||
430 | 431 | ||
431 | /* build blast_xxx_range, protected_blast_xxx_range */ | 432 | /* build blast_xxx_range, protected_blast_xxx_range */ |
432 | #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ | 433 | #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra) \ |
@@ -452,8 +453,8 @@ static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, | |||
452 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) | 453 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, ) |
453 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) | 454 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, ) |
454 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) | 455 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, ) |
455 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson23, \ | 456 | __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \ |
456 | protected_, loongson23_) | 457 | protected_, loongson2_) |
457 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) | 458 | __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , ) |
458 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) | 459 | __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , ) |
459 | /* blast_inv_dcache_range */ | 460 | /* blast_inv_dcache_range */ |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 62ffd20ea869..49e572d879e1 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -237,6 +237,8 @@ static void r4k_blast_icache_page_setup(void) | |||
237 | r4k_blast_icache_page = (void *)cache_noop; | 237 | r4k_blast_icache_page = (void *)cache_noop; |
238 | else if (ic_lsize == 16) | 238 | else if (ic_lsize == 16) |
239 | r4k_blast_icache_page = blast_icache16_page; | 239 | r4k_blast_icache_page = blast_icache16_page; |
240 | else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2) | ||
241 | r4k_blast_icache_page = loongson2_blast_icache32_page; | ||
240 | else if (ic_lsize == 32) | 242 | else if (ic_lsize == 32) |
241 | r4k_blast_icache_page = blast_icache32_page; | 243 | r4k_blast_icache_page = blast_icache32_page; |
242 | else if (ic_lsize == 64) | 244 | else if (ic_lsize == 64) |
@@ -261,6 +263,9 @@ static void r4k_blast_icache_page_indexed_setup(void) | |||
261 | else if (TX49XX_ICACHE_INDEX_INV_WAR) | 263 | else if (TX49XX_ICACHE_INDEX_INV_WAR) |
262 | r4k_blast_icache_page_indexed = | 264 | r4k_blast_icache_page_indexed = |
263 | tx49_blast_icache32_page_indexed; | 265 | tx49_blast_icache32_page_indexed; |
266 | else if (current_cpu_type() == CPU_LOONGSON2) | ||
267 | r4k_blast_icache_page_indexed = | ||
268 | loongson2_blast_icache32_page_indexed; | ||
264 | else | 269 | else |
265 | r4k_blast_icache_page_indexed = | 270 | r4k_blast_icache_page_indexed = |
266 | blast_icache32_page_indexed; | 271 | blast_icache32_page_indexed; |
@@ -284,6 +289,8 @@ static void r4k_blast_icache_setup(void) | |||
284 | r4k_blast_icache = blast_r4600_v1_icache32; | 289 | r4k_blast_icache = blast_r4600_v1_icache32; |
285 | else if (TX49XX_ICACHE_INDEX_INV_WAR) | 290 | else if (TX49XX_ICACHE_INDEX_INV_WAR) |
286 | r4k_blast_icache = tx49_blast_icache32; | 291 | r4k_blast_icache = tx49_blast_icache32; |
292 | else if (current_cpu_type() == CPU_LOONGSON2) | ||
293 | r4k_blast_icache = loongson2_blast_icache32; | ||
287 | else | 294 | else |
288 | r4k_blast_icache = blast_icache32; | 295 | r4k_blast_icache = blast_icache32; |
289 | } else if (ic_lsize == 64) | 296 | } else if (ic_lsize == 64) |
@@ -580,11 +587,11 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo | |||
580 | else { | 587 | else { |
581 | switch (boot_cpu_type()) { | 588 | switch (boot_cpu_type()) { |
582 | case CPU_LOONGSON2: | 589 | case CPU_LOONGSON2: |
583 | protected_blast_icache_range(start, end); | 590 | protected_loongson2_blast_icache_range(start, end); |
584 | break; | 591 | break; |
585 | 592 | ||
586 | default: | 593 | default: |
587 | protected_loongson23_blast_icache_range(start, end); | 594 | protected_blast_icache_range(start, end); |
588 | break; | 595 | break; |
589 | } | 596 | } |
590 | } | 597 | } |
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index f0e2784e7cca..2f9b751878ba 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h | |||
@@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma | |||
125 | void mark_rodata_ro(void); | 125 | void mark_rodata_ro(void); |
126 | #endif | 126 | #endif |
127 | 127 | ||
128 | #ifdef CONFIG_PA8X00 | ||
129 | /* Only pa8800, pa8900 needs this */ | ||
130 | |||
131 | #include <asm/kmap_types.h> | 128 | #include <asm/kmap_types.h> |
132 | 129 | ||
133 | #define ARCH_HAS_KMAP | 130 | #define ARCH_HAS_KMAP |
134 | 131 | ||
135 | void kunmap_parisc(void *addr); | ||
136 | |||
137 | static inline void *kmap(struct page *page) | 132 | static inline void *kmap(struct page *page) |
138 | { | 133 | { |
139 | might_sleep(); | 134 | might_sleep(); |
135 | flush_dcache_page(page); | ||
140 | return page_address(page); | 136 | return page_address(page); |
141 | } | 137 | } |
142 | 138 | ||
143 | static inline void kunmap(struct page *page) | 139 | static inline void kunmap(struct page *page) |
144 | { | 140 | { |
145 | kunmap_parisc(page_address(page)); | 141 | flush_kernel_dcache_page_addr(page_address(page)); |
146 | } | 142 | } |
147 | 143 | ||
148 | static inline void *kmap_atomic(struct page *page) | 144 | static inline void *kmap_atomic(struct page *page) |
149 | { | 145 | { |
150 | pagefault_disable(); | 146 | pagefault_disable(); |
147 | flush_dcache_page(page); | ||
151 | return page_address(page); | 148 | return page_address(page); |
152 | } | 149 | } |
153 | 150 | ||
154 | static inline void __kunmap_atomic(void *addr) | 151 | static inline void __kunmap_atomic(void *addr) |
155 | { | 152 | { |
156 | kunmap_parisc(addr); | 153 | flush_kernel_dcache_page_addr(addr); |
157 | pagefault_enable(); | 154 | pagefault_enable(); |
158 | } | 155 | } |
159 | 156 | ||
160 | #define kmap_atomic_prot(page, prot) kmap_atomic(page) | 157 | #define kmap_atomic_prot(page, prot) kmap_atomic(page) |
161 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) | 158 | #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) |
162 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) | 159 | #define kmap_atomic_to_page(ptr) virt_to_page(ptr) |
163 | #endif | ||
164 | 160 | ||
165 | #endif /* _PARISC_CACHEFLUSH_H */ | 161 | #endif /* _PARISC_CACHEFLUSH_H */ |
166 | 162 | ||
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h index b7adb2ac049c..c53fc63149e8 100644 --- a/arch/parisc/include/asm/page.h +++ b/arch/parisc/include/asm/page.h | |||
@@ -28,9 +28,8 @@ struct page; | |||
28 | 28 | ||
29 | void clear_page_asm(void *page); | 29 | void clear_page_asm(void *page); |
30 | void copy_page_asm(void *to, void *from); | 30 | void copy_page_asm(void *to, void *from); |
31 | void clear_user_page(void *vto, unsigned long vaddr, struct page *pg); | 31 | #define clear_user_page(vto, vaddr, page) clear_page_asm(vto) |
32 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | 32 | #define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom) |
33 | struct page *pg); | ||
34 | 33 | ||
35 | /* #define CONFIG_PARISC_TMPALIAS */ | 34 | /* #define CONFIG_PARISC_TMPALIAS */ |
36 | 35 | ||
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index c035673209f7..a72545554a31 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr) | |||
388 | } | 388 | } |
389 | EXPORT_SYMBOL(flush_kernel_dcache_page_addr); | 389 | EXPORT_SYMBOL(flush_kernel_dcache_page_addr); |
390 | 390 | ||
391 | void clear_user_page(void *vto, unsigned long vaddr, struct page *page) | ||
392 | { | ||
393 | clear_page_asm(vto); | ||
394 | if (!parisc_requires_coherency()) | ||
395 | flush_kernel_dcache_page_asm(vto); | ||
396 | } | ||
397 | EXPORT_SYMBOL(clear_user_page); | ||
398 | |||
399 | void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, | ||
400 | struct page *pg) | ||
401 | { | ||
402 | /* Copy using kernel mapping. No coherency is needed | ||
403 | (all in kmap/kunmap) on machines that don't support | ||
404 | non-equivalent aliasing. However, the `from' page | ||
405 | needs to be flushed before it can be accessed through | ||
406 | the kernel mapping. */ | ||
407 | preempt_disable(); | ||
408 | flush_dcache_page_asm(__pa(vfrom), vaddr); | ||
409 | preempt_enable(); | ||
410 | copy_page_asm(vto, vfrom); | ||
411 | if (!parisc_requires_coherency()) | ||
412 | flush_kernel_dcache_page_asm(vto); | ||
413 | } | ||
414 | EXPORT_SYMBOL(copy_user_page); | ||
415 | |||
416 | #ifdef CONFIG_PA8X00 | ||
417 | |||
418 | void kunmap_parisc(void *addr) | ||
419 | { | ||
420 | if (parisc_requires_coherency()) | ||
421 | flush_kernel_dcache_page_addr(addr); | ||
422 | } | ||
423 | EXPORT_SYMBOL(kunmap_parisc); | ||
424 | #endif | ||
425 | |||
426 | void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) | 391 | void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) |
427 | { | 392 | { |
428 | unsigned long flags; | 393 | unsigned long flags; |
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts index 4177b62240c2..a618dfc13e4c 100644 --- a/arch/powerpc/boot/dts/mpc5125twr.dts +++ b/arch/powerpc/boot/dts/mpc5125twr.dts | |||
@@ -58,7 +58,6 @@ | |||
58 | compatible = "fsl,mpc5121-immr"; | 58 | compatible = "fsl,mpc5121-immr"; |
59 | #address-cells = <1>; | 59 | #address-cells = <1>; |
60 | #size-cells = <1>; | 60 | #size-cells = <1>; |
61 | #interrupt-cells = <2>; | ||
62 | ranges = <0x0 0x80000000 0x400000>; | 61 | ranges = <0x0 0x80000000 0x400000>; |
63 | reg = <0x80000000 0x400000>; | 62 | reg = <0x80000000 0x400000>; |
64 | bus-frequency = <66000000>; // 66 MHz ips bus | 63 | bus-frequency = <66000000>; // 66 MHz ips bus |
@@ -189,6 +188,10 @@ | |||
189 | reg = <0xA000 0x1000>; | 188 | reg = <0xA000 0x1000>; |
190 | }; | 189 | }; |
191 | 190 | ||
191 | // disable USB1 port | ||
192 | // TODO: | ||
193 | // correct pinmux config and fix USB3320 ulpi dependency | ||
194 | // before re-enabling it | ||
192 | usb@3000 { | 195 | usb@3000 { |
193 | compatible = "fsl,mpc5121-usb2-dr"; | 196 | compatible = "fsl,mpc5121-usb2-dr"; |
194 | reg = <0x3000 0x400>; | 197 | reg = <0x3000 0x400>; |
@@ -197,6 +200,7 @@ | |||
197 | interrupts = <43 0x8>; | 200 | interrupts = <43 0x8>; |
198 | dr_mode = "host"; | 201 | dr_mode = "host"; |
199 | phy_type = "ulpi"; | 202 | phy_type = "ulpi"; |
203 | status = "disabled"; | ||
200 | }; | 204 | }; |
201 | 205 | ||
202 | // 5125 PSCs are not 52xx or 5121 PSC compatible | 206 | // 5125 PSCs are not 52xx or 5121 PSC compatible |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 894662a5d4d5..243ce69ad685 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -284,7 +284,7 @@ do_kvm_##n: \ | |||
284 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ | 284 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ |
285 | beq- 1f; \ | 285 | beq- 1f; \ |
286 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ | 286 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ |
287 | 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ | 287 | 1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \ |
288 | blt+ cr1,3f; /* abort if it is */ \ | 288 | blt+ cr1,3f; /* abort if it is */ \ |
289 | li r1,(n); /* will be reloaded later */ \ | 289 | li r1,(n); /* will be reloaded later */ \ |
290 | sth r1,PACA_TRAP_SAVE(r13); \ | 290 | sth r1,PACA_TRAP_SAVE(r13); \ |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 4a594b76674d..bc23b1ba7980 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void); | |||
192 | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); | 192 | extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); |
193 | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); | 193 | extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); |
194 | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); | 194 | extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); |
195 | extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | ||
196 | struct kvm_vcpu *vcpu); | ||
197 | extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | ||
198 | struct kvmppc_book3s_shadow_vcpu *svcpu); | ||
195 | 199 | ||
196 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) | 200 | static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) |
197 | { | 201 | { |
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 0bd9348a4db9..192917d2239c 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
@@ -79,6 +79,7 @@ struct kvmppc_host_state { | |||
79 | ulong vmhandler; | 79 | ulong vmhandler; |
80 | ulong scratch0; | 80 | ulong scratch0; |
81 | ulong scratch1; | 81 | ulong scratch1; |
82 | ulong scratch2; | ||
82 | u8 in_guest; | 83 | u8 in_guest; |
83 | u8 restore_hid5; | 84 | u8 restore_hid5; |
84 | u8 napping; | 85 | u8 napping; |
@@ -106,6 +107,7 @@ struct kvmppc_host_state { | |||
106 | }; | 107 | }; |
107 | 108 | ||
108 | struct kvmppc_book3s_shadow_vcpu { | 109 | struct kvmppc_book3s_shadow_vcpu { |
110 | bool in_use; | ||
109 | ulong gpr[14]; | 111 | ulong gpr[14]; |
110 | u32 cr; | 112 | u32 cr; |
111 | u32 xer; | 113 | u32 xer; |
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 033c06be1d84..7bdcf340016c 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h | |||
@@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, | |||
720 | int64_t opal_pci_poll(uint64_t phb_id); | 720 | int64_t opal_pci_poll(uint64_t phb_id); |
721 | int64_t opal_return_cpu(void); | 721 | int64_t opal_return_cpu(void); |
722 | 722 | ||
723 | int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); | 723 | int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); |
724 | int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); | 724 | int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); |
725 | 725 | ||
726 | int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, | 726 | int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, |
727 | uint32_t addr, uint32_t data, uint32_t sz); | 727 | uint32_t addr, uint32_t data, uint32_t sz); |
728 | int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, | 728 | int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, |
729 | uint32_t addr, uint32_t *data, uint32_t sz); | 729 | uint32_t addr, __be32 *data, uint32_t sz); |
730 | int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); | 730 | int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); |
731 | int64_t opal_manage_flash(uint8_t op); | 731 | int64_t opal_manage_flash(uint8_t op); |
732 | int64_t opal_update_flash(uint64_t blk_list); | 732 | int64_t opal_update_flash(uint64_t blk_list); |
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 9ee12610af02..aace90547614 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
@@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *); | |||
35 | extern void enable_kernel_spe(void); | 35 | extern void enable_kernel_spe(void); |
36 | extern void giveup_spe(struct task_struct *); | 36 | extern void giveup_spe(struct task_struct *); |
37 | extern void load_up_spe(struct task_struct *); | 37 | extern void load_up_spe(struct task_struct *); |
38 | extern void switch_booke_debug_regs(struct thread_struct *new_thread); | 38 | extern void switch_booke_debug_regs(struct debug_reg *new_debug); |
39 | 39 | ||
40 | #ifndef CONFIG_SMP | 40 | #ifndef CONFIG_SMP |
41 | extern void discard_lazy_cpu_state(void); | 41 | extern void discard_lazy_cpu_state(void); |
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h index 5f1b1e3c2137..8296381ae432 100644 --- a/arch/powerpc/include/asm/unaligned.h +++ b/arch/powerpc/include/asm/unaligned.h | |||
@@ -4,13 +4,18 @@ | |||
4 | #ifdef __KERNEL__ | 4 | #ifdef __KERNEL__ |
5 | 5 | ||
6 | /* | 6 | /* |
7 | * The PowerPC can do unaligned accesses itself in big endian mode. | 7 | * The PowerPC can do unaligned accesses itself based on its endian mode. |
8 | */ | 8 | */ |
9 | #include <linux/unaligned/access_ok.h> | 9 | #include <linux/unaligned/access_ok.h> |
10 | #include <linux/unaligned/generic.h> | 10 | #include <linux/unaligned/generic.h> |
11 | 11 | ||
12 | #ifdef __LITTLE_ENDIAN__ | ||
13 | #define get_unaligned __get_unaligned_le | ||
14 | #define put_unaligned __put_unaligned_le | ||
15 | #else | ||
12 | #define get_unaligned __get_unaligned_be | 16 | #define get_unaligned __get_unaligned_be |
13 | #define put_unaligned __put_unaligned_be | 17 | #define put_unaligned __put_unaligned_be |
18 | #endif | ||
14 | 19 | ||
15 | #endif /* __KERNEL__ */ | 20 | #endif /* __KERNEL__ */ |
16 | #endif /* _ASM_POWERPC_UNALIGNED_H */ | 21 | #endif /* _ASM_POWERPC_UNALIGNED_H */ |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 2ea5cc033ec8..d3de01066f7d 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -576,6 +576,7 @@ int main(void) | |||
576 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); | 576 | HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler); |
577 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); | 577 | HSTATE_FIELD(HSTATE_SCRATCH0, scratch0); |
578 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); | 578 | HSTATE_FIELD(HSTATE_SCRATCH1, scratch1); |
579 | HSTATE_FIELD(HSTATE_SCRATCH2, scratch2); | ||
579 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); | 580 | HSTATE_FIELD(HSTATE_IN_GUEST, in_guest); |
580 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); | 581 | HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5); |
581 | HSTATE_FIELD(HSTATE_NAPPING, napping); | 582 | HSTATE_FIELD(HSTATE_NAPPING, napping); |
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 779a78c26435..11c1d069d920 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c | |||
@@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
124 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) | 124 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) |
125 | { | 125 | { |
126 | unsigned long addr; | 126 | unsigned long addr; |
127 | const u32 *basep, *sizep; | 127 | const __be32 *basep, *sizep; |
128 | unsigned int rtas_start = 0, rtas_end = 0; | 128 | unsigned int rtas_start = 0, rtas_end = 0; |
129 | 129 | ||
130 | basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); | 130 | basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); |
131 | sizep = of_get_property(rtas.dev, "rtas-size", NULL); | 131 | sizep = of_get_property(rtas.dev, "rtas-size", NULL); |
132 | 132 | ||
133 | if (basep && sizep) { | 133 | if (basep && sizep) { |
134 | rtas_start = *basep; | 134 | rtas_start = be32_to_cpup(basep); |
135 | rtas_end = *basep + *sizep; | 135 | rtas_end = rtas_start + be32_to_cpup(sizep); |
136 | } | 136 | } |
137 | 137 | ||
138 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | 138 | for (addr = begin; addr < end; addr += PAGE_SIZE) { |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 2ae41aba4053..4f0946de2d5c 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1) | |||
80 | * of the function that the cpu should jump to to continue | 80 | * of the function that the cpu should jump to to continue |
81 | * initialization. | 81 | * initialization. |
82 | */ | 82 | */ |
83 | .balign 8 | ||
83 | .globl __secondary_hold_spinloop | 84 | .globl __secondary_hold_spinloop |
84 | __secondary_hold_spinloop: | 85 | __secondary_hold_spinloop: |
85 | .llong 0x0 | 86 | .llong 0x0 |
@@ -470,6 +471,7 @@ _STATIC(__after_prom_start) | |||
470 | mtctr r8 | 471 | mtctr r8 |
471 | bctr | 472 | bctr |
472 | 473 | ||
474 | .balign 8 | ||
473 | p_end: .llong _end - _stext | 475 | p_end: .llong _end - _stext |
474 | 476 | ||
475 | 4: /* Now copy the rest of the kernel up to _end */ | 477 | 4: /* Now copy the rest of the kernel up to _end */ |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 3386d8ab7eb0..4a96556fd2d4 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
339 | #endif | 339 | #endif |
340 | } | 340 | } |
341 | 341 | ||
342 | static void prime_debug_regs(struct thread_struct *thread) | 342 | static void prime_debug_regs(struct debug_reg *debug) |
343 | { | 343 | { |
344 | /* | 344 | /* |
345 | * We could have inherited MSR_DE from userspace, since | 345 | * We could have inherited MSR_DE from userspace, since |
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread) | |||
348 | */ | 348 | */ |
349 | mtmsr(mfmsr() & ~MSR_DE); | 349 | mtmsr(mfmsr() & ~MSR_DE); |
350 | 350 | ||
351 | mtspr(SPRN_IAC1, thread->debug.iac1); | 351 | mtspr(SPRN_IAC1, debug->iac1); |
352 | mtspr(SPRN_IAC2, thread->debug.iac2); | 352 | mtspr(SPRN_IAC2, debug->iac2); |
353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | 353 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 |
354 | mtspr(SPRN_IAC3, thread->debug.iac3); | 354 | mtspr(SPRN_IAC3, debug->iac3); |
355 | mtspr(SPRN_IAC4, thread->debug.iac4); | 355 | mtspr(SPRN_IAC4, debug->iac4); |
356 | #endif | 356 | #endif |
357 | mtspr(SPRN_DAC1, thread->debug.dac1); | 357 | mtspr(SPRN_DAC1, debug->dac1); |
358 | mtspr(SPRN_DAC2, thread->debug.dac2); | 358 | mtspr(SPRN_DAC2, debug->dac2); |
359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 | 359 | #if CONFIG_PPC_ADV_DEBUG_DVCS > 0 |
360 | mtspr(SPRN_DVC1, thread->debug.dvc1); | 360 | mtspr(SPRN_DVC1, debug->dvc1); |
361 | mtspr(SPRN_DVC2, thread->debug.dvc2); | 361 | mtspr(SPRN_DVC2, debug->dvc2); |
362 | #endif | 362 | #endif |
363 | mtspr(SPRN_DBCR0, thread->debug.dbcr0); | 363 | mtspr(SPRN_DBCR0, debug->dbcr0); |
364 | mtspr(SPRN_DBCR1, thread->debug.dbcr1); | 364 | mtspr(SPRN_DBCR1, debug->dbcr1); |
365 | #ifdef CONFIG_BOOKE | 365 | #ifdef CONFIG_BOOKE |
366 | mtspr(SPRN_DBCR2, thread->debug.dbcr2); | 366 | mtspr(SPRN_DBCR2, debug->dbcr2); |
367 | #endif | 367 | #endif |
368 | } | 368 | } |
369 | /* | 369 | /* |
@@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread) | |||
371 | * debug registers, set the debug registers from the values | 371 | * debug registers, set the debug registers from the values |
372 | * stored in the new thread. | 372 | * stored in the new thread. |
373 | */ | 373 | */ |
374 | void switch_booke_debug_regs(struct thread_struct *new_thread) | 374 | void switch_booke_debug_regs(struct debug_reg *new_debug) |
375 | { | 375 | { |
376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) | 376 | if ((current->thread.debug.dbcr0 & DBCR0_IDM) |
377 | || (new_thread->debug.dbcr0 & DBCR0_IDM)) | 377 | || (new_debug->dbcr0 & DBCR0_IDM)) |
378 | prime_debug_regs(new_thread); | 378 | prime_debug_regs(new_debug); |
379 | } | 379 | } |
380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); | 380 | EXPORT_SYMBOL_GPL(switch_booke_debug_regs); |
381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 381 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
@@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
683 | #endif /* CONFIG_SMP */ | 683 | #endif /* CONFIG_SMP */ |
684 | 684 | ||
685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 685 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
686 | switch_booke_debug_regs(&new->thread); | 686 | switch_booke_debug_regs(&new->thread.debug); |
687 | #else | 687 | #else |
688 | /* | 688 | /* |
689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would | 689 | * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index cb64a6e1dc51..078145acf7fb 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -1986,19 +1986,23 @@ static void __init prom_init_stdout(void) | |||
1986 | /* Get the full OF pathname of the stdout device */ | 1986 | /* Get the full OF pathname of the stdout device */ |
1987 | memset(path, 0, 256); | 1987 | memset(path, 0, 256); |
1988 | call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); | 1988 | call_prom("instance-to-path", 3, 1, prom.stdout, path, 255); |
1989 | stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); | ||
1990 | val = cpu_to_be32(stdout_node); | ||
1991 | prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", | ||
1992 | &val, sizeof(val)); | ||
1993 | prom_printf("OF stdout device is: %s\n", of_stdout_device); | 1989 | prom_printf("OF stdout device is: %s\n", of_stdout_device); |
1994 | prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", | 1990 | prom_setprop(prom.chosen, "/chosen", "linux,stdout-path", |
1995 | path, strlen(path) + 1); | 1991 | path, strlen(path) + 1); |
1996 | 1992 | ||
1997 | /* If it's a display, note it */ | 1993 | /* instance-to-package fails on PA-Semi */ |
1998 | memset(type, 0, sizeof(type)); | 1994 | stdout_node = call_prom("instance-to-package", 1, 1, prom.stdout); |
1999 | prom_getprop(stdout_node, "device_type", type, sizeof(type)); | 1995 | if (stdout_node != PROM_ERROR) { |
2000 | if (strcmp(type, "display") == 0) | 1996 | val = cpu_to_be32(stdout_node); |
2001 | prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); | 1997 | prom_setprop(prom.chosen, "/chosen", "linux,stdout-package", |
1998 | &val, sizeof(val)); | ||
1999 | |||
2000 | /* If it's a display, note it */ | ||
2001 | memset(type, 0, sizeof(type)); | ||
2002 | prom_getprop(stdout_node, "device_type", type, sizeof(type)); | ||
2003 | if (strcmp(type, "display") == 0) | ||
2004 | prom_setprop(stdout_node, path, "linux,boot-display", NULL, 0); | ||
2005 | } | ||
2002 | } | 2006 | } |
2003 | 2007 | ||
2004 | static int __init prom_find_machine_type(void) | 2008 | static int __init prom_find_machine_type(void) |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 75fb40498b41..2e3d2bf536c5 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
1555 | 1555 | ||
1556 | flush_fp_to_thread(child); | 1556 | flush_fp_to_thread(child); |
1557 | if (fpidx < (PT_FPSCR - PT_FPR0)) | 1557 | if (fpidx < (PT_FPSCR - PT_FPR0)) |
1558 | memcpy(&tmp, &child->thread.fp_state.fpr, | 1558 | memcpy(&tmp, &child->thread.TS_FPR(fpidx), |
1559 | sizeof(long)); | 1559 | sizeof(long)); |
1560 | else | 1560 | else |
1561 | tmp = child->thread.fp_state.fpscr; | 1561 | tmp = child->thread.fp_state.fpscr; |
@@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request, | |||
1588 | 1588 | ||
1589 | flush_fp_to_thread(child); | 1589 | flush_fp_to_thread(child); |
1590 | if (fpidx < (PT_FPSCR - PT_FPR0)) | 1590 | if (fpidx < (PT_FPSCR - PT_FPR0)) |
1591 | memcpy(&child->thread.fp_state.fpr, &data, | 1591 | memcpy(&child->thread.TS_FPR(fpidx), &data, |
1592 | sizeof(long)); | 1592 | sizeof(long)); |
1593 | else | 1593 | else |
1594 | child->thread.fp_state.fpscr = data; | 1594 | child->thread.fp_state.fpscr = data; |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index febc80445d25..bc76cc6b419c 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void) | |||
479 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && | 479 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) && |
480 | (dn = of_find_node_by_path("/rtas"))) { | 480 | (dn = of_find_node_by_path("/rtas"))) { |
481 | int num_addr_cell, num_size_cell, maxcpus; | 481 | int num_addr_cell, num_size_cell, maxcpus; |
482 | const unsigned int *ireg; | 482 | const __be32 *ireg; |
483 | 483 | ||
484 | num_addr_cell = of_n_addr_cells(dn); | 484 | num_addr_cell = of_n_addr_cells(dn); |
485 | num_size_cell = of_n_size_cells(dn); | 485 | num_size_cell = of_n_size_cells(dn); |
@@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void) | |||
489 | if (!ireg) | 489 | if (!ireg) |
490 | goto out; | 490 | goto out; |
491 | 491 | ||
492 | maxcpus = ireg[num_addr_cell + num_size_cell]; | 492 | maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell); |
493 | 493 | ||
494 | /* Double maxcpus for processors which have SMT capability */ | 494 | /* Double maxcpus for processors which have SMT capability */ |
495 | if (cpu_has_feature(CPU_FTR_SMT)) | 495 | if (cpu_has_feature(CPU_FTR_SMT)) |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index a3b64f3bf9a2..c1cf4a1522d9 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
580 | int cpu_to_core_id(int cpu) | 580 | int cpu_to_core_id(int cpu) |
581 | { | 581 | { |
582 | struct device_node *np; | 582 | struct device_node *np; |
583 | const int *reg; | 583 | const __be32 *reg; |
584 | int id = -1; | 584 | int id = -1; |
585 | 585 | ||
586 | np = of_get_cpu_node(cpu, NULL); | 586 | np = of_get_cpu_node(cpu, NULL); |
@@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu) | |||
591 | if (!reg) | 591 | if (!reg) |
592 | goto out; | 592 | goto out; |
593 | 593 | ||
594 | id = *reg; | 594 | id = be32_to_cpup(reg); |
595 | out: | 595 | out: |
596 | of_node_put(np); | 596 | of_node_put(np); |
597 | return id; | 597 | return id; |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f3ff587a8b7d..c5d148434c08 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
469 | slb_v = vcpu->kvm->arch.vrma_slb_v; | 469 | slb_v = vcpu->kvm->arch.vrma_slb_v; |
470 | } | 470 | } |
471 | 471 | ||
472 | preempt_disable(); | ||
472 | /* Find the HPTE in the hash table */ | 473 | /* Find the HPTE in the hash table */ |
473 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, | 474 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, |
474 | HPTE_V_VALID | HPTE_V_ABSENT); | 475 | HPTE_V_VALID | HPTE_V_ABSENT); |
475 | if (index < 0) | 476 | if (index < 0) { |
477 | preempt_enable(); | ||
476 | return -ENOENT; | 478 | return -ENOENT; |
479 | } | ||
477 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); | 480 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); |
478 | v = hptep[0] & ~HPTE_V_HVLOCK; | 481 | v = hptep[0] & ~HPTE_V_HVLOCK; |
479 | gr = kvm->arch.revmap[index].guest_rpte; | 482 | gr = kvm->arch.revmap[index].guest_rpte; |
@@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
481 | /* Unlock the HPTE */ | 484 | /* Unlock the HPTE */ |
482 | asm volatile("lwsync" : : : "memory"); | 485 | asm volatile("lwsync" : : : "memory"); |
483 | hptep[0] = v; | 486 | hptep[0] = v; |
487 | preempt_enable(); | ||
484 | 488 | ||
485 | gpte->eaddr = eaddr; | 489 | gpte->eaddr = eaddr; |
486 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); | 490 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); |
@@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
665 | return -EFAULT; | 669 | return -EFAULT; |
666 | } else { | 670 | } else { |
667 | page = pages[0]; | 671 | page = pages[0]; |
672 | pfn = page_to_pfn(page); | ||
668 | if (PageHuge(page)) { | 673 | if (PageHuge(page)) { |
669 | page = compound_head(page); | 674 | page = compound_head(page); |
670 | pte_size <<= compound_order(page); | 675 | pte_size <<= compound_order(page); |
@@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
689 | } | 694 | } |
690 | rcu_read_unlock_sched(); | 695 | rcu_read_unlock_sched(); |
691 | } | 696 | } |
692 | pfn = page_to_pfn(page); | ||
693 | } | 697 | } |
694 | 698 | ||
695 | ret = -EFAULT; | 699 | ret = -EFAULT; |
@@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
707 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; | 711 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; |
708 | } | 712 | } |
709 | 713 | ||
710 | /* Set the HPTE to point to pfn */ | 714 | /* |
711 | r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT); | 715 | * Set the HPTE to point to pfn. |
716 | * Since the pfn is at PAGE_SIZE granularity, make sure we | ||
717 | * don't mask out lower-order bits if psize < PAGE_SIZE. | ||
718 | */ | ||
719 | if (psize < PAGE_SIZE) | ||
720 | psize = PAGE_SIZE; | ||
721 | r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1)); | ||
712 | if (hpte_is_writable(r) && !write_ok) | 722 | if (hpte_is_writable(r) && !write_ok) |
713 | r = hpte_make_readonly(r); | 723 | r = hpte_make_readonly(r); |
714 | ret = RESUME_GUEST; | 724 | ret = RESUME_GUEST; |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 072287f1c3bc..b51d5db78068 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) | |||
131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | 131 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) |
132 | { | 132 | { |
133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 133 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
134 | unsigned long flags; | ||
134 | 135 | ||
135 | spin_lock(&vcpu->arch.tbacct_lock); | 136 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
136 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && | 137 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE && |
137 | vc->preempt_tb != TB_NIL) { | 138 | vc->preempt_tb != TB_NIL) { |
138 | vc->stolen_tb += mftb() - vc->preempt_tb; | 139 | vc->stolen_tb += mftb() - vc->preempt_tb; |
@@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) | |||
143 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; | 144 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; |
144 | vcpu->arch.busy_preempt = TB_NIL; | 145 | vcpu->arch.busy_preempt = TB_NIL; |
145 | } | 146 | } |
146 | spin_unlock(&vcpu->arch.tbacct_lock); | 147 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
147 | } | 148 | } |
148 | 149 | ||
149 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) | 150 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) |
150 | { | 151 | { |
151 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 152 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
153 | unsigned long flags; | ||
152 | 154 | ||
153 | spin_lock(&vcpu->arch.tbacct_lock); | 155 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
154 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) | 156 | if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE) |
155 | vc->preempt_tb = mftb(); | 157 | vc->preempt_tb = mftb(); |
156 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) | 158 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) |
157 | vcpu->arch.busy_preempt = mftb(); | 159 | vcpu->arch.busy_preempt = mftb(); |
158 | spin_unlock(&vcpu->arch.tbacct_lock); | 160 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
159 | } | 161 | } |
160 | 162 | ||
161 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) | 163 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) |
@@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) | |||
486 | */ | 488 | */ |
487 | if (vc->vcore_state != VCORE_INACTIVE && | 489 | if (vc->vcore_state != VCORE_INACTIVE && |
488 | vc->runner->arch.run_task != current) { | 490 | vc->runner->arch.run_task != current) { |
489 | spin_lock(&vc->runner->arch.tbacct_lock); | 491 | spin_lock_irq(&vc->runner->arch.tbacct_lock); |
490 | p = vc->stolen_tb; | 492 | p = vc->stolen_tb; |
491 | if (vc->preempt_tb != TB_NIL) | 493 | if (vc->preempt_tb != TB_NIL) |
492 | p += now - vc->preempt_tb; | 494 | p += now - vc->preempt_tb; |
493 | spin_unlock(&vc->runner->arch.tbacct_lock); | 495 | spin_unlock_irq(&vc->runner->arch.tbacct_lock); |
494 | } else { | 496 | } else { |
495 | p = vc->stolen_tb; | 497 | p = vc->stolen_tb; |
496 | } | 498 | } |
@@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, | |||
512 | core_stolen = vcore_stolen_time(vc, now); | 514 | core_stolen = vcore_stolen_time(vc, now); |
513 | stolen = core_stolen - vcpu->arch.stolen_logged; | 515 | stolen = core_stolen - vcpu->arch.stolen_logged; |
514 | vcpu->arch.stolen_logged = core_stolen; | 516 | vcpu->arch.stolen_logged = core_stolen; |
515 | spin_lock(&vcpu->arch.tbacct_lock); | 517 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
516 | stolen += vcpu->arch.busy_stolen; | 518 | stolen += vcpu->arch.busy_stolen; |
517 | vcpu->arch.busy_stolen = 0; | 519 | vcpu->arch.busy_stolen = 0; |
518 | spin_unlock(&vcpu->arch.tbacct_lock); | 520 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
519 | if (!dt || !vpa) | 521 | if (!dt || !vpa) |
520 | return; | 522 | return; |
521 | memset(dt, 0, sizeof(struct dtl_entry)); | 523 | memset(dt, 0, sizeof(struct dtl_entry)); |
@@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
589 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) | 591 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) |
590 | return RESUME_HOST; | 592 | return RESUME_HOST; |
591 | 593 | ||
594 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
592 | rc = kvmppc_rtas_hcall(vcpu); | 595 | rc = kvmppc_rtas_hcall(vcpu); |
596 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
593 | 597 | ||
594 | if (rc == -ENOENT) | 598 | if (rc == -ENOENT) |
595 | return RESUME_HOST; | 599 | return RESUME_HOST; |
@@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, | |||
1115 | 1119 | ||
1116 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) | 1120 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
1117 | return; | 1121 | return; |
1118 | spin_lock(&vcpu->arch.tbacct_lock); | 1122 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
1119 | now = mftb(); | 1123 | now = mftb(); |
1120 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - | 1124 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - |
1121 | vcpu->arch.stolen_logged; | 1125 | vcpu->arch.stolen_logged; |
1122 | vcpu->arch.busy_preempt = now; | 1126 | vcpu->arch.busy_preempt = now; |
1123 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | 1127 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; |
1124 | spin_unlock(&vcpu->arch.tbacct_lock); | 1128 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
1125 | --vc->n_runnable; | 1129 | --vc->n_runnable; |
1126 | list_del(&vcpu->arch.run_list); | 1130 | list_del(&vcpu->arch.run_list); |
1127 | } | 1131 | } |
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 9c515440ad1a..8689e2e30857 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
@@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
225 | is_io = pa & (HPTE_R_I | HPTE_R_W); | 225 | is_io = pa & (HPTE_R_I | HPTE_R_W); |
226 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); | 226 | pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK); |
227 | pa &= PAGE_MASK; | 227 | pa &= PAGE_MASK; |
228 | pa |= gpa & ~PAGE_MASK; | ||
228 | } else { | 229 | } else { |
229 | /* Translate to host virtual address */ | 230 | /* Translate to host virtual address */ |
230 | hva = __gfn_to_hva_memslot(memslot, gfn); | 231 | hva = __gfn_to_hva_memslot(memslot, gfn); |
@@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, | |||
238 | ptel = hpte_make_readonly(ptel); | 239 | ptel = hpte_make_readonly(ptel); |
239 | is_io = hpte_cache_bits(pte_val(pte)); | 240 | is_io = hpte_cache_bits(pte_val(pte)); |
240 | pa = pte_pfn(pte) << PAGE_SHIFT; | 241 | pa = pte_pfn(pte) << PAGE_SHIFT; |
242 | pa |= hva & (pte_size - 1); | ||
243 | pa |= gpa & ~PAGE_MASK; | ||
241 | } | 244 | } |
242 | } | 245 | } |
243 | 246 | ||
244 | if (pte_size < psize) | 247 | if (pte_size < psize) |
245 | return H_PARAMETER; | 248 | return H_PARAMETER; |
246 | if (pa && pte_size > psize) | ||
247 | pa |= gpa & (pte_size - 1); | ||
248 | 249 | ||
249 | ptel &= ~(HPTE_R_PP0 - psize); | 250 | ptel &= ~(HPTE_R_PP0 - psize); |
250 | ptel |= pa; | 251 | ptel |= pa; |
@@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = { | |||
749 | 20, /* 1M, unsupported */ | 750 | 20, /* 1M, unsupported */ |
750 | }; | 751 | }; |
751 | 752 | ||
753 | /* When called from virtmode, this func should be protected by | ||
754 | * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK | ||
755 | * can trigger deadlock issue. | ||
756 | */ | ||
752 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | 757 | long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, |
753 | unsigned long valid) | 758 | unsigned long valid) |
754 | { | 759 | { |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bc8de75b1925..be4fa04a37c9 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) | |||
153 | 153 | ||
154 | 13: b machine_check_fwnmi | 154 | 13: b machine_check_fwnmi |
155 | 155 | ||
156 | |||
157 | /* | 156 | /* |
158 | * We come in here when wakened from nap mode on a secondary hw thread. | 157 | * We come in here when wakened from nap mode on a secondary hw thread. |
159 | * Relocation is off and most register values are lost. | 158 | * Relocation is off and most register values are lost. |
@@ -224,6 +223,11 @@ kvm_start_guest: | |||
224 | /* Clear our vcpu pointer so we don't come back in early */ | 223 | /* Clear our vcpu pointer so we don't come back in early */ |
225 | li r0, 0 | 224 | li r0, 0 |
226 | std r0, HSTATE_KVM_VCPU(r13) | 225 | std r0, HSTATE_KVM_VCPU(r13) |
226 | /* | ||
227 | * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing | ||
228 | * the nap_count, because once the increment to nap_count is | ||
229 | * visible we could be given another vcpu. | ||
230 | */ | ||
227 | lwsync | 231 | lwsync |
228 | /* Clear any pending IPI - we're an offline thread */ | 232 | /* Clear any pending IPI - we're an offline thread */ |
229 | ld r5, HSTATE_XICS_PHYS(r13) | 233 | ld r5, HSTATE_XICS_PHYS(r13) |
@@ -241,7 +245,6 @@ kvm_start_guest: | |||
241 | /* increment the nap count and then go to nap mode */ | 245 | /* increment the nap count and then go to nap mode */ |
242 | ld r4, HSTATE_KVM_VCORE(r13) | 246 | ld r4, HSTATE_KVM_VCORE(r13) |
243 | addi r4, r4, VCORE_NAP_COUNT | 247 | addi r4, r4, VCORE_NAP_COUNT |
244 | lwsync /* make previous updates visible */ | ||
245 | 51: lwarx r3, 0, r4 | 248 | 51: lwarx r3, 0, r4 |
246 | addi r3, r3, 1 | 249 | addi r3, r3, 1 |
247 | stwcx. r3, 0, r4 | 250 | stwcx. r3, 0, r4 |
@@ -751,15 +754,14 @@ kvmppc_interrupt_hv: | |||
751 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | 754 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 |
752 | * guest R13 saved in SPRN_SCRATCH0 | 755 | * guest R13 saved in SPRN_SCRATCH0 |
753 | */ | 756 | */ |
754 | /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ | 757 | std r9, HSTATE_SCRATCH2(r13) |
755 | std r9, HSTATE_HOST_R2(r13) | ||
756 | 758 | ||
757 | lbz r9, HSTATE_IN_GUEST(r13) | 759 | lbz r9, HSTATE_IN_GUEST(r13) |
758 | cmpwi r9, KVM_GUEST_MODE_HOST_HV | 760 | cmpwi r9, KVM_GUEST_MODE_HOST_HV |
759 | beq kvmppc_bad_host_intr | 761 | beq kvmppc_bad_host_intr |
760 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE | 762 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
761 | cmpwi r9, KVM_GUEST_MODE_GUEST | 763 | cmpwi r9, KVM_GUEST_MODE_GUEST |
762 | ld r9, HSTATE_HOST_R2(r13) | 764 | ld r9, HSTATE_SCRATCH2(r13) |
763 | beq kvmppc_interrupt_pr | 765 | beq kvmppc_interrupt_pr |
764 | #endif | 766 | #endif |
765 | /* We're now back in the host but in guest MMU context */ | 767 | /* We're now back in the host but in guest MMU context */ |
@@ -779,7 +781,7 @@ kvmppc_interrupt_hv: | |||
779 | std r6, VCPU_GPR(R6)(r9) | 781 | std r6, VCPU_GPR(R6)(r9) |
780 | std r7, VCPU_GPR(R7)(r9) | 782 | std r7, VCPU_GPR(R7)(r9) |
781 | std r8, VCPU_GPR(R8)(r9) | 783 | std r8, VCPU_GPR(R8)(r9) |
782 | ld r0, HSTATE_HOST_R2(r13) | 784 | ld r0, HSTATE_SCRATCH2(r13) |
783 | std r0, VCPU_GPR(R9)(r9) | 785 | std r0, VCPU_GPR(R9)(r9) |
784 | std r10, VCPU_GPR(R10)(r9) | 786 | std r10, VCPU_GPR(R10)(r9) |
785 | std r11, VCPU_GPR(R11)(r9) | 787 | std r11, VCPU_GPR(R11)(r9) |
@@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
990 | */ | 992 | */ |
991 | /* Increment the threads-exiting-guest count in the 0xff00 | 993 | /* Increment the threads-exiting-guest count in the 0xff00 |
992 | bits of vcore->entry_exit_count */ | 994 | bits of vcore->entry_exit_count */ |
993 | lwsync | ||
994 | ld r5,HSTATE_KVM_VCORE(r13) | 995 | ld r5,HSTATE_KVM_VCORE(r13) |
995 | addi r6,r5,VCORE_ENTRY_EXIT | 996 | addi r6,r5,VCORE_ENTRY_EXIT |
996 | 41: lwarx r3,0,r6 | 997 | 41: lwarx r3,0,r6 |
997 | addi r0,r3,0x100 | 998 | addi r0,r3,0x100 |
998 | stwcx. r0,0,r6 | 999 | stwcx. r0,0,r6 |
999 | bne 41b | 1000 | bne 41b |
1000 | lwsync | 1001 | isync /* order stwcx. vs. reading napping_threads */ |
1001 | 1002 | ||
1002 | /* | 1003 | /* |
1003 | * At this point we have an interrupt that we have to pass | 1004 | * At this point we have an interrupt that we have to pass |
@@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) | |||
1030 | sld r0,r0,r4 | 1031 | sld r0,r0,r4 |
1031 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ | 1032 | andc. r3,r3,r0 /* no sense IPI'ing ourselves */ |
1032 | beq 43f | 1033 | beq 43f |
1034 | /* Order entry/exit update vs. IPIs */ | ||
1035 | sync | ||
1033 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ | 1036 | mulli r4,r4,PACA_SIZE /* get paca for thread 0 */ |
1034 | subf r6,r4,r13 | 1037 | subf r6,r4,r13 |
1035 | 42: andi. r0,r3,1 | 1038 | 42: andi. r0,r3,1 |
@@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206) | |||
1638 | bge kvm_cede_exit | 1641 | bge kvm_cede_exit |
1639 | stwcx. r4,0,r6 | 1642 | stwcx. r4,0,r6 |
1640 | bne 31b | 1643 | bne 31b |
1644 | /* order napping_threads update vs testing entry_exit_count */ | ||
1645 | isync | ||
1641 | li r0,1 | 1646 | li r0,1 |
1642 | stb r0,HSTATE_NAPPING(r13) | 1647 | stb r0,HSTATE_NAPPING(r13) |
1643 | /* order napping_threads update vs testing entry_exit_count */ | ||
1644 | lwsync | ||
1645 | mr r4,r3 | 1648 | mr r4,r3 |
1646 | lwz r7,VCORE_ENTRY_EXIT(r5) | 1649 | lwz r7,VCORE_ENTRY_EXIT(r5) |
1647 | cmpwi r7,0x100 | 1650 | cmpwi r7,0x100 |
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index f4dd041c14ea..f779450cb07c 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S | |||
@@ -129,29 +129,32 @@ kvm_start_lightweight: | |||
129 | * R12 = exit handler id | 129 | * R12 = exit handler id |
130 | * R13 = PACA | 130 | * R13 = PACA |
131 | * SVCPU.* = guest * | 131 | * SVCPU.* = guest * |
132 | * MSR.EE = 1 | ||
132 | * | 133 | * |
133 | */ | 134 | */ |
134 | 135 | ||
136 | PPC_LL r3, GPR4(r1) /* vcpu pointer */ | ||
137 | |||
138 | /* | ||
139 | * kvmppc_copy_from_svcpu can clobber volatile registers, save | ||
140 | * the exit handler id to the vcpu and restore it from there later. | ||
141 | */ | ||
142 | stw r12, VCPU_TRAP(r3) | ||
143 | |||
135 | /* Transfer reg values from shadow vcpu back to vcpu struct */ | 144 | /* Transfer reg values from shadow vcpu back to vcpu struct */ |
136 | /* On 64-bit, interrupts are still off at this point */ | 145 | /* On 64-bit, interrupts are still off at this point */ |
137 | PPC_LL r3, GPR4(r1) /* vcpu pointer */ | 146 | |
138 | GET_SHADOW_VCPU(r4) | 147 | GET_SHADOW_VCPU(r4) |
139 | bl FUNC(kvmppc_copy_from_svcpu) | 148 | bl FUNC(kvmppc_copy_from_svcpu) |
140 | nop | 149 | nop |
141 | 150 | ||
142 | #ifdef CONFIG_PPC_BOOK3S_64 | 151 | #ifdef CONFIG_PPC_BOOK3S_64 |
143 | /* Re-enable interrupts */ | ||
144 | ld r3, HSTATE_HOST_MSR(r13) | ||
145 | ori r3, r3, MSR_EE | ||
146 | MTMSR_EERI(r3) | ||
147 | |||
148 | /* | 152 | /* |
149 | * Reload kernel SPRG3 value. | 153 | * Reload kernel SPRG3 value. |
150 | * No need to save guest value as usermode can't modify SPRG3. | 154 | * No need to save guest value as usermode can't modify SPRG3. |
151 | */ | 155 | */ |
152 | ld r3, PACA_SPRG3(r13) | 156 | ld r3, PACA_SPRG3(r13) |
153 | mtspr SPRN_SPRG3, r3 | 157 | mtspr SPRN_SPRG3, r3 |
154 | |||
155 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 158 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
156 | 159 | ||
157 | /* R7 = vcpu */ | 160 | /* R7 = vcpu */ |
@@ -177,7 +180,7 @@ kvm_start_lightweight: | |||
177 | PPC_STL r31, VCPU_GPR(R31)(r7) | 180 | PPC_STL r31, VCPU_GPR(R31)(r7) |
178 | 181 | ||
179 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | 182 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ |
180 | mr r5, r12 | 183 | lwz r5, VCPU_TRAP(r7) |
181 | 184 | ||
182 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | 185 | /* Restore r3 (kvm_run) and r4 (vcpu) */ |
183 | REST_2GPRS(3, r1) | 186 | REST_2GPRS(3, r1) |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index fe14ca3dd171..5b9e9063cfaf 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) | |||
66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 66 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | 67 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); |
68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; | 68 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
69 | svcpu->in_use = 0; | ||
69 | svcpu_put(svcpu); | 70 | svcpu_put(svcpu); |
70 | #endif | 71 | #endif |
71 | vcpu->cpu = smp_processor_id(); | 72 | vcpu->cpu = smp_processor_id(); |
@@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) | |||
78 | { | 79 | { |
79 | #ifdef CONFIG_PPC_BOOK3S_64 | 80 | #ifdef CONFIG_PPC_BOOK3S_64 |
80 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | 81 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
82 | if (svcpu->in_use) { | ||
83 | kvmppc_copy_from_svcpu(vcpu, svcpu); | ||
84 | } | ||
81 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); | 85 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); |
82 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; | 86 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
83 | svcpu_put(svcpu); | 87 | svcpu_put(svcpu); |
@@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | |||
110 | svcpu->ctr = vcpu->arch.ctr; | 114 | svcpu->ctr = vcpu->arch.ctr; |
111 | svcpu->lr = vcpu->arch.lr; | 115 | svcpu->lr = vcpu->arch.lr; |
112 | svcpu->pc = vcpu->arch.pc; | 116 | svcpu->pc = vcpu->arch.pc; |
117 | svcpu->in_use = true; | ||
113 | } | 118 | } |
114 | 119 | ||
115 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | 120 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ |
116 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | 121 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, |
117 | struct kvmppc_book3s_shadow_vcpu *svcpu) | 122 | struct kvmppc_book3s_shadow_vcpu *svcpu) |
118 | { | 123 | { |
124 | /* | ||
125 | * vcpu_put would just call us again because in_use hasn't | ||
126 | * been updated yet. | ||
127 | */ | ||
128 | preempt_disable(); | ||
129 | |||
130 | /* | ||
131 | * Maybe we were already preempted and synced the svcpu from | ||
132 | * our preempt notifiers. Don't bother touching this svcpu then. | ||
133 | */ | ||
134 | if (!svcpu->in_use) | ||
135 | goto out; | ||
136 | |||
119 | vcpu->arch.gpr[0] = svcpu->gpr[0]; | 137 | vcpu->arch.gpr[0] = svcpu->gpr[0]; |
120 | vcpu->arch.gpr[1] = svcpu->gpr[1]; | 138 | vcpu->arch.gpr[1] = svcpu->gpr[1]; |
121 | vcpu->arch.gpr[2] = svcpu->gpr[2]; | 139 | vcpu->arch.gpr[2] = svcpu->gpr[2]; |
@@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | |||
139 | vcpu->arch.fault_dar = svcpu->fault_dar; | 157 | vcpu->arch.fault_dar = svcpu->fault_dar; |
140 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | 158 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; |
141 | vcpu->arch.last_inst = svcpu->last_inst; | 159 | vcpu->arch.last_inst = svcpu->last_inst; |
160 | svcpu->in_use = false; | ||
161 | |||
162 | out: | ||
163 | preempt_enable(); | ||
142 | } | 164 | } |
143 | 165 | ||
144 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) | 166 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index a38c4c9edab8..c3c5231adade 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline) | |||
153 | 153 | ||
154 | li r6, MSR_IR | MSR_DR | 154 | li r6, MSR_IR | MSR_DR |
155 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ | 155 | andc r6, r5, r6 /* Clear DR and IR in MSR value */ |
156 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
157 | /* | 156 | /* |
158 | * Set EE in HOST_MSR so that it's enabled when we get into our | 157 | * Set EE in HOST_MSR so that it's enabled when we get into our |
159 | * C exit handler function. On 64-bit we delay enabling | 158 | * C exit handler function. |
160 | * interrupts until we have finished transferring stuff | ||
161 | * to or from the PACA. | ||
162 | */ | 159 | */ |
163 | ori r5, r5, MSR_EE | 160 | ori r5, r5, MSR_EE |
164 | #endif | ||
165 | mtsrr0 r7 | 161 | mtsrr0 r7 |
166 | mtsrr1 r6 | 162 | mtsrr1 r6 |
167 | RFI | 163 | RFI |
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 53e65a210b9a..0591e05db74b 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) | |||
681 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | 681 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
682 | { | 682 | { |
683 | int ret, s; | 683 | int ret, s; |
684 | struct thread_struct thread; | 684 | struct debug_reg debug; |
685 | #ifdef CONFIG_PPC_FPU | 685 | #ifdef CONFIG_PPC_FPU |
686 | struct thread_fp_state fp; | 686 | struct thread_fp_state fp; |
687 | int fpexc_mode; | 687 | int fpexc_mode; |
@@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
723 | #endif | 723 | #endif |
724 | 724 | ||
725 | /* Switch to guest debug context */ | 725 | /* Switch to guest debug context */ |
726 | thread.debug = vcpu->arch.shadow_dbg_reg; | 726 | debug = vcpu->arch.shadow_dbg_reg; |
727 | switch_booke_debug_regs(&thread); | 727 | switch_booke_debug_regs(&debug); |
728 | thread.debug = current->thread.debug; | 728 | debug = current->thread.debug; |
729 | current->thread.debug = vcpu->arch.shadow_dbg_reg; | 729 | current->thread.debug = vcpu->arch.shadow_dbg_reg; |
730 | 730 | ||
731 | kvmppc_fix_ee_before_entry(); | 731 | kvmppc_fix_ee_before_entry(); |
@@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
736 | We also get here with interrupts enabled. */ | 736 | We also get here with interrupts enabled. */ |
737 | 737 | ||
738 | /* Switch back to user space debug context */ | 738 | /* Switch back to user space debug context */ |
739 | switch_booke_debug_regs(&thread); | 739 | switch_booke_debug_regs(&debug); |
740 | current->thread.debug = thread.debug; | 740 | current->thread.debug = debug; |
741 | 741 | ||
742 | #ifdef CONFIG_PPC_FPU | 742 | #ifdef CONFIG_PPC_FPU |
743 | kvmppc_save_guest_fp(vcpu); | 743 | kvmppc_save_guest_fp(vcpu); |
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index d73a59014900..596a285c0755 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S | |||
@@ -9,6 +9,14 @@ | |||
9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
10 | #include <asm/ppc_asm.h> | 10 | #include <asm/ppc_asm.h> |
11 | 11 | ||
12 | #ifdef __BIG_ENDIAN__ | ||
13 | #define sLd sld /* Shift towards low-numbered address. */ | ||
14 | #define sHd srd /* Shift towards high-numbered address. */ | ||
15 | #else | ||
16 | #define sLd srd /* Shift towards low-numbered address. */ | ||
17 | #define sHd sld /* Shift towards high-numbered address. */ | ||
18 | #endif | ||
19 | |||
12 | .align 7 | 20 | .align 7 |
13 | _GLOBAL(__copy_tofrom_user) | 21 | _GLOBAL(__copy_tofrom_user) |
14 | BEGIN_FTR_SECTION | 22 | BEGIN_FTR_SECTION |
@@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
118 | 126 | ||
119 | 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ | 127 | 24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */ |
120 | 25: ld r0,8(r4) | 128 | 25: ld r0,8(r4) |
121 | sld r6,r9,r10 | 129 | sLd r6,r9,r10 |
122 | 26: ldu r9,16(r4) | 130 | 26: ldu r9,16(r4) |
123 | srd r7,r0,r11 | 131 | sHd r7,r0,r11 |
124 | sld r8,r0,r10 | 132 | sLd r8,r0,r10 |
125 | or r7,r7,r6 | 133 | or r7,r7,r6 |
126 | blt cr6,79f | 134 | blt cr6,79f |
127 | 27: ld r0,8(r4) | 135 | 27: ld r0,8(r4) |
@@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
129 | 137 | ||
130 | 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ | 138 | 28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */ |
131 | 29: ldu r9,8(r4) | 139 | 29: ldu r9,8(r4) |
132 | sld r8,r0,r10 | 140 | sLd r8,r0,r10 |
133 | addi r3,r3,-8 | 141 | addi r3,r3,-8 |
134 | blt cr6,5f | 142 | blt cr6,5f |
135 | 30: ld r0,8(r4) | 143 | 30: ld r0,8(r4) |
136 | srd r12,r9,r11 | 144 | sHd r12,r9,r11 |
137 | sld r6,r9,r10 | 145 | sLd r6,r9,r10 |
138 | 31: ldu r9,16(r4) | 146 | 31: ldu r9,16(r4) |
139 | or r12,r8,r12 | 147 | or r12,r8,r12 |
140 | srd r7,r0,r11 | 148 | sHd r7,r0,r11 |
141 | sld r8,r0,r10 | 149 | sLd r8,r0,r10 |
142 | addi r3,r3,16 | 150 | addi r3,r3,16 |
143 | beq cr6,78f | 151 | beq cr6,78f |
144 | 152 | ||
145 | 1: or r7,r7,r6 | 153 | 1: or r7,r7,r6 |
146 | 32: ld r0,8(r4) | 154 | 32: ld r0,8(r4) |
147 | 76: std r12,8(r3) | 155 | 76: std r12,8(r3) |
148 | 2: srd r12,r9,r11 | 156 | 2: sHd r12,r9,r11 |
149 | sld r6,r9,r10 | 157 | sLd r6,r9,r10 |
150 | 33: ldu r9,16(r4) | 158 | 33: ldu r9,16(r4) |
151 | or r12,r8,r12 | 159 | or r12,r8,r12 |
152 | 77: stdu r7,16(r3) | 160 | 77: stdu r7,16(r3) |
153 | srd r7,r0,r11 | 161 | sHd r7,r0,r11 |
154 | sld r8,r0,r10 | 162 | sLd r8,r0,r10 |
155 | bdnz 1b | 163 | bdnz 1b |
156 | 164 | ||
157 | 78: std r12,8(r3) | 165 | 78: std r12,8(r3) |
158 | or r7,r7,r6 | 166 | or r7,r7,r6 |
159 | 79: std r7,16(r3) | 167 | 79: std r7,16(r3) |
160 | 5: srd r12,r9,r11 | 168 | 5: sHd r12,r9,r11 |
161 | or r12,r8,r12 | 169 | or r12,r8,r12 |
162 | 80: std r12,24(r3) | 170 | 80: std r12,24(r3) |
163 | bne 6f | 171 | bne 6f |
@@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) | |||
165 | blr | 173 | blr |
166 | 6: cmpwi cr1,r5,8 | 174 | 6: cmpwi cr1,r5,8 |
167 | addi r3,r3,32 | 175 | addi r3,r3,32 |
168 | sld r9,r9,r10 | 176 | sLd r9,r9,r10 |
169 | ble cr1,7f | 177 | ble cr1,7f |
170 | 34: ld r0,8(r4) | 178 | 34: ld r0,8(r4) |
171 | srd r7,r0,r11 | 179 | sHd r7,r0,r11 |
172 | or r9,r7,r9 | 180 | or r9,r7,r9 |
173 | 7: | 181 | 7: |
174 | bf cr7*4+1,1f | 182 | bf cr7*4+1,1f |
183 | #ifdef __BIG_ENDIAN__ | ||
175 | rotldi r9,r9,32 | 184 | rotldi r9,r9,32 |
185 | #endif | ||
176 | 94: stw r9,0(r3) | 186 | 94: stw r9,0(r3) |
187 | #ifdef __LITTLE_ENDIAN__ | ||
188 | rotrdi r9,r9,32 | ||
189 | #endif | ||
177 | addi r3,r3,4 | 190 | addi r3,r3,4 |
178 | 1: bf cr7*4+2,2f | 191 | 1: bf cr7*4+2,2f |
192 | #ifdef __BIG_ENDIAN__ | ||
179 | rotldi r9,r9,16 | 193 | rotldi r9,r9,16 |
194 | #endif | ||
180 | 95: sth r9,0(r3) | 195 | 95: sth r9,0(r3) |
196 | #ifdef __LITTLE_ENDIAN__ | ||
197 | rotrdi r9,r9,16 | ||
198 | #endif | ||
181 | addi r3,r3,2 | 199 | addi r3,r3,2 |
182 | 2: bf cr7*4+3,3f | 200 | 2: bf cr7*4+3,3f |
201 | #ifdef __BIG_ENDIAN__ | ||
183 | rotldi r9,r9,8 | 202 | rotldi r9,r9,8 |
203 | #endif | ||
184 | 96: stb r9,0(r3) | 204 | 96: stb r9,0(r3) |
205 | #ifdef __LITTLE_ENDIAN__ | ||
206 | rotrdi r9,r9,8 | ||
207 | #endif | ||
185 | 3: li r3,0 | 208 | 3: li r3,0 |
186 | blr | 209 | blr |
187 | 210 | ||
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 02245cee7818..d7ddcee7feb8 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c | |||
@@ -36,7 +36,6 @@ | |||
36 | #include "powernv.h" | 36 | #include "powernv.h" |
37 | #include "pci.h" | 37 | #include "pci.h" |
38 | 38 | ||
39 | static char *hub_diag = NULL; | ||
40 | static int ioda_eeh_nb_init = 0; | 39 | static int ioda_eeh_nb_init = 0; |
41 | 40 | ||
42 | static int ioda_eeh_event(struct notifier_block *nb, | 41 | static int ioda_eeh_event(struct notifier_block *nb, |
@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose) | |||
140 | ioda_eeh_nb_init = 1; | 139 | ioda_eeh_nb_init = 1; |
141 | } | 140 | } |
142 | 141 | ||
143 | /* We needn't HUB diag-data on PHB3 */ | ||
144 | if (phb->type == PNV_PHB_IODA1 && !hub_diag) { | ||
145 | hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | ||
146 | if (!hub_diag) { | ||
147 | pr_err("%s: Out of memory !\n", __func__); | ||
148 | return -ENOMEM; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | #ifdef CONFIG_DEBUG_FS | 142 | #ifdef CONFIG_DEBUG_FS |
153 | if (phb->dbgfs) { | 143 | if (phb->dbgfs) { |
154 | debugfs_create_file("err_injct_outbound", 0600, | 144 | debugfs_create_file("err_injct_outbound", 0600, |
@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data) | |||
633 | static void ioda_eeh_hub_diag(struct pci_controller *hose) | 623 | static void ioda_eeh_hub_diag(struct pci_controller *hose) |
634 | { | 624 | { |
635 | struct pnv_phb *phb = hose->private_data; | 625 | struct pnv_phb *phb = hose->private_data; |
636 | struct OpalIoP7IOCErrorData *data; | 626 | struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag; |
637 | long rc; | 627 | long rc; |
638 | 628 | ||
639 | data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag; | 629 | rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data)); |
640 | rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE); | ||
641 | if (rc != OPAL_SUCCESS) { | 630 | if (rc != OPAL_SUCCESS) { |
642 | pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", | 631 | pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n", |
643 | __func__, phb->hub_id, rc); | 632 | __func__, phb->hub_id, rc); |
@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose) | |||
820 | struct OpalIoPhbErrorCommon *common; | 809 | struct OpalIoPhbErrorCommon *common; |
821 | long rc; | 810 | long rc; |
822 | 811 | ||
823 | common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; | 812 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, |
824 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE); | 813 | PNV_PCI_DIAG_BUF_SIZE); |
825 | if (rc != OPAL_SUCCESS) { | 814 | if (rc != OPAL_SUCCESS) { |
826 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", | 815 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", |
827 | __func__, hose->global_number, rc); | 816 | __func__, hose->global_number, rc); |
828 | return; | 817 | return; |
829 | } | 818 | } |
830 | 819 | ||
820 | common = (struct OpalIoPhbErrorCommon *)phb->diag.blob; | ||
831 | switch (common->ioType) { | 821 | switch (common->ioType) { |
832 | case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: | 822 | case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: |
833 | ioda_eeh_p7ioc_phb_diag(hose, common); | 823 | ioda_eeh_p7ioc_phb_diag(hose, common); |
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c index e7e59e4f9892..79d83cad3d67 100644 --- a/arch/powerpc/platforms/powernv/opal-lpc.c +++ b/arch/powerpc/platforms/powernv/opal-lpc.c | |||
@@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1; | |||
24 | static u8 opal_lpc_inb(unsigned long port) | 24 | static u8 opal_lpc_inb(unsigned long port) |
25 | { | 25 | { |
26 | int64_t rc; | 26 | int64_t rc; |
27 | uint32_t data; | 27 | __be32 data; |
28 | 28 | ||
29 | if (opal_lpc_chip_id < 0 || port > 0xffff) | 29 | if (opal_lpc_chip_id < 0 || port > 0xffff) |
30 | return 0xff; | 30 | return 0xff; |
31 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); | 31 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1); |
32 | return rc ? 0xff : data; | 32 | return rc ? 0xff : be32_to_cpu(data); |
33 | } | 33 | } |
34 | 34 | ||
35 | static __le16 __opal_lpc_inw(unsigned long port) | 35 | static __le16 __opal_lpc_inw(unsigned long port) |
36 | { | 36 | { |
37 | int64_t rc; | 37 | int64_t rc; |
38 | uint32_t data; | 38 | __be32 data; |
39 | 39 | ||
40 | if (opal_lpc_chip_id < 0 || port > 0xfffe) | 40 | if (opal_lpc_chip_id < 0 || port > 0xfffe) |
41 | return 0xffff; | 41 | return 0xffff; |
42 | if (port & 1) | 42 | if (port & 1) |
43 | return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); | 43 | return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1); |
44 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); | 44 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2); |
45 | return rc ? 0xffff : data; | 45 | return rc ? 0xffff : be32_to_cpu(data); |
46 | } | 46 | } |
47 | static u16 opal_lpc_inw(unsigned long port) | 47 | static u16 opal_lpc_inw(unsigned long port) |
48 | { | 48 | { |
@@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port) | |||
52 | static __le32 __opal_lpc_inl(unsigned long port) | 52 | static __le32 __opal_lpc_inl(unsigned long port) |
53 | { | 53 | { |
54 | int64_t rc; | 54 | int64_t rc; |
55 | uint32_t data; | 55 | __be32 data; |
56 | 56 | ||
57 | if (opal_lpc_chip_id < 0 || port > 0xfffc) | 57 | if (opal_lpc_chip_id < 0 || port > 0xfffc) |
58 | return 0xffffffff; | 58 | return 0xffffffff; |
@@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port) | |||
62 | (__le32)opal_lpc_inb(port + 2) << 8 | | 62 | (__le32)opal_lpc_inb(port + 2) << 8 | |
63 | opal_lpc_inb(port + 3); | 63 | opal_lpc_inb(port + 3); |
64 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); | 64 | rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4); |
65 | return rc ? 0xffffffff : data; | 65 | return rc ? 0xffffffff : be32_to_cpu(data); |
66 | } | 66 | } |
67 | 67 | ||
68 | static u32 opal_lpc_inl(unsigned long port) | 68 | static u32 opal_lpc_inl(unsigned long port) |
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 4d99a8fd55ac..4fbf276ac99e 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c | |||
@@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) | |||
96 | { | 96 | { |
97 | struct opal_scom_map *m = map; | 97 | struct opal_scom_map *m = map; |
98 | int64_t rc; | 98 | int64_t rc; |
99 | __be64 v; | ||
99 | 100 | ||
100 | reg = opal_scom_unmangle(reg); | 101 | reg = opal_scom_unmangle(reg); |
101 | rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value)); | 102 | rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); |
103 | *value = be64_to_cpu(v); | ||
102 | return opal_xscom_err_xlate(rc); | 104 | return opal_xscom_err_xlate(rc); |
103 | } | 105 | } |
104 | 106 | ||
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h index 911c24ef033e..1ed8d5f40f5a 100644 --- a/arch/powerpc/platforms/powernv/pci.h +++ b/arch/powerpc/platforms/powernv/pci.h | |||
@@ -172,11 +172,13 @@ struct pnv_phb { | |||
172 | } ioda; | 172 | } ioda; |
173 | }; | 173 | }; |
174 | 174 | ||
175 | /* PHB status structure */ | 175 | /* PHB and hub status structure */ |
176 | union { | 176 | union { |
177 | unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; | 177 | unsigned char blob[PNV_PCI_DIAG_BUF_SIZE]; |
178 | struct OpalIoP7IOCPhbErrorData p7ioc; | 178 | struct OpalIoP7IOCPhbErrorData p7ioc; |
179 | struct OpalIoP7IOCErrorData hub_diag; | ||
179 | } diag; | 180 | } diag; |
181 | |||
180 | }; | 182 | }; |
181 | 183 | ||
182 | extern struct pci_ops pnv_pci_ops; | 184 | extern struct pci_ops pnv_pci_ops; |
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c index e738007eae64..c9fecf09b8fa 100644 --- a/arch/powerpc/platforms/pseries/lparcfg.c +++ b/arch/powerpc/platforms/pseries/lparcfg.c | |||
@@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m) | |||
157 | { | 157 | { |
158 | struct hvcall_ppp_data ppp_data; | 158 | struct hvcall_ppp_data ppp_data; |
159 | struct device_node *root; | 159 | struct device_node *root; |
160 | const int *perf_level; | 160 | const __be32 *perf_level; |
161 | int rc; | 161 | int rc; |
162 | 162 | ||
163 | rc = h_get_ppp(&ppp_data); | 163 | rc = h_get_ppp(&ppp_data); |
@@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m) | |||
201 | perf_level = of_get_property(root, | 201 | perf_level = of_get_property(root, |
202 | "ibm,partition-performance-parameters-level", | 202 | "ibm,partition-performance-parameters-level", |
203 | NULL); | 203 | NULL); |
204 | if (perf_level && (*perf_level >= 1)) { | 204 | if (perf_level && (be32_to_cpup(perf_level) >= 1)) { |
205 | seq_printf(m, | 205 | seq_printf(m, |
206 | "physical_procs_allocated_to_virtualization=%d\n", | 206 | "physical_procs_allocated_to_virtualization=%d\n", |
207 | ppp_data.phys_platform_procs); | 207 | ppp_data.phys_platform_procs); |
@@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | |||
435 | int partition_potential_processors; | 435 | int partition_potential_processors; |
436 | int partition_active_processors; | 436 | int partition_active_processors; |
437 | struct device_node *rtas_node; | 437 | struct device_node *rtas_node; |
438 | const int *lrdrp = NULL; | 438 | const __be32 *lrdrp = NULL; |
439 | 439 | ||
440 | rtas_node = of_find_node_by_path("/rtas"); | 440 | rtas_node = of_find_node_by_path("/rtas"); |
441 | if (rtas_node) | 441 | if (rtas_node) |
@@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | |||
444 | if (lrdrp == NULL) { | 444 | if (lrdrp == NULL) { |
445 | partition_potential_processors = vdso_data->processorCount; | 445 | partition_potential_processors = vdso_data->processorCount; |
446 | } else { | 446 | } else { |
447 | partition_potential_processors = *(lrdrp + 4); | 447 | partition_potential_processors = be32_to_cpup(lrdrp + 4); |
448 | } | 448 | } |
449 | of_node_put(rtas_node); | 449 | of_node_put(rtas_node); |
450 | 450 | ||
@@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v) | |||
654 | const char *model = ""; | 654 | const char *model = ""; |
655 | const char *system_id = ""; | 655 | const char *system_id = ""; |
656 | const char *tmp; | 656 | const char *tmp; |
657 | const unsigned int *lp_index_ptr; | 657 | const __be32 *lp_index_ptr; |
658 | unsigned int lp_index = 0; | 658 | unsigned int lp_index = 0; |
659 | 659 | ||
660 | seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); | 660 | seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS); |
@@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v) | |||
670 | lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", | 670 | lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", |
671 | NULL); | 671 | NULL); |
672 | if (lp_index_ptr) | 672 | if (lp_index_ptr) |
673 | lp_index = *lp_index_ptr; | 673 | lp_index = be32_to_cpup(lp_index_ptr); |
674 | of_node_put(rootdn); | 674 | of_node_put(rootdn); |
675 | } | 675 | } |
676 | seq_printf(m, "serial_number=%s\n", system_id); | 676 | seq_printf(m, "serial_number=%s\n", system_id); |
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 6d2f0abce6fa..0c882e83c4ce 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c | |||
@@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) | |||
130 | { | 130 | { |
131 | struct device_node *dn; | 131 | struct device_node *dn; |
132 | struct pci_dn *pdn; | 132 | struct pci_dn *pdn; |
133 | const u32 *req_msi; | 133 | const __be32 *p; |
134 | u32 req_msi; | ||
134 | 135 | ||
135 | pdn = pci_get_pdn(pdev); | 136 | pdn = pci_get_pdn(pdev); |
136 | if (!pdn) | 137 | if (!pdn) |
@@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name) | |||
138 | 139 | ||
139 | dn = pdn->node; | 140 | dn = pdn->node; |
140 | 141 | ||
141 | req_msi = of_get_property(dn, prop_name, NULL); | 142 | p = of_get_property(dn, prop_name, NULL); |
142 | if (!req_msi) { | 143 | if (!p) { |
143 | pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); | 144 | pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name); |
144 | return -ENOENT; | 145 | return -ENOENT; |
145 | } | 146 | } |
146 | 147 | ||
147 | if (*req_msi < nvec) { | 148 | req_msi = be32_to_cpup(p); |
149 | if (req_msi < nvec) { | ||
148 | pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); | 150 | pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec); |
149 | 151 | ||
150 | if (*req_msi == 0) /* Be paranoid */ | 152 | if (req_msi == 0) /* Be paranoid */ |
151 | return -ENOSPC; | 153 | return -ENOSPC; |
152 | 154 | ||
153 | return *req_msi; | 155 | return req_msi; |
154 | } | 156 | } |
155 | 157 | ||
156 | return 0; | 158 | return 0; |
@@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec) | |||
171 | static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) | 173 | static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) |
172 | { | 174 | { |
173 | struct device_node *dn; | 175 | struct device_node *dn; |
174 | const u32 *p; | 176 | const __be32 *p; |
175 | 177 | ||
176 | dn = of_node_get(pci_device_to_OF_node(dev)); | 178 | dn = of_node_get(pci_device_to_OF_node(dev)); |
177 | while (dn) { | 179 | while (dn) { |
@@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total) | |||
179 | if (p) { | 181 | if (p) { |
180 | pr_debug("rtas_msi: found prop on dn %s\n", | 182 | pr_debug("rtas_msi: found prop on dn %s\n", |
181 | dn->full_name); | 183 | dn->full_name); |
182 | *total = *p; | 184 | *total = be32_to_cpup(p); |
183 | return dn; | 185 | return dn; |
184 | } | 186 | } |
185 | 187 | ||
@@ -232,13 +234,13 @@ struct msi_counts { | |||
232 | static void *count_non_bridge_devices(struct device_node *dn, void *data) | 234 | static void *count_non_bridge_devices(struct device_node *dn, void *data) |
233 | { | 235 | { |
234 | struct msi_counts *counts = data; | 236 | struct msi_counts *counts = data; |
235 | const u32 *p; | 237 | const __be32 *p; |
236 | u32 class; | 238 | u32 class; |
237 | 239 | ||
238 | pr_debug("rtas_msi: counting %s\n", dn->full_name); | 240 | pr_debug("rtas_msi: counting %s\n", dn->full_name); |
239 | 241 | ||
240 | p = of_get_property(dn, "class-code", NULL); | 242 | p = of_get_property(dn, "class-code", NULL); |
241 | class = p ? *p : 0; | 243 | class = p ? be32_to_cpup(p) : 0; |
242 | 244 | ||
243 | if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) | 245 | if ((class >> 8) != PCI_CLASS_BRIDGE_PCI) |
244 | counts->num_devices++; | 246 | counts->num_devices++; |
@@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data) | |||
249 | static void *count_spare_msis(struct device_node *dn, void *data) | 251 | static void *count_spare_msis(struct device_node *dn, void *data) |
250 | { | 252 | { |
251 | struct msi_counts *counts = data; | 253 | struct msi_counts *counts = data; |
252 | const u32 *p; | 254 | const __be32 *p; |
253 | int req; | 255 | int req; |
254 | 256 | ||
255 | if (dn == counts->requestor) | 257 | if (dn == counts->requestor) |
@@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data) | |||
260 | req = 0; | 262 | req = 0; |
261 | p = of_get_property(dn, "ibm,req#msi", NULL); | 263 | p = of_get_property(dn, "ibm,req#msi", NULL); |
262 | if (p) | 264 | if (p) |
263 | req = *p; | 265 | req = be32_to_cpup(p); |
264 | 266 | ||
265 | p = of_get_property(dn, "ibm,req#msi-x", NULL); | 267 | p = of_get_property(dn, "ibm,req#msi-x", NULL); |
266 | if (p) | 268 | if (p) |
267 | req = max(req, (int)*p); | 269 | req = max(req, (int)be32_to_cpup(p)); |
268 | } | 270 | } |
269 | 271 | ||
270 | if (req < counts->quota) | 272 | if (req < counts->quota) |
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 7bfaf58d4664..d7096f2f7751 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c | |||
@@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ | |||
43 | static DEFINE_SPINLOCK(nvram_lock); | 43 | static DEFINE_SPINLOCK(nvram_lock); |
44 | 44 | ||
45 | struct err_log_info { | 45 | struct err_log_info { |
46 | int error_type; | 46 | __be32 error_type; |
47 | unsigned int seq_num; | 47 | __be32 seq_num; |
48 | }; | 48 | }; |
49 | 49 | ||
50 | struct nvram_os_partition { | 50 | struct nvram_os_partition { |
@@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = { | |||
79 | }; | 79 | }; |
80 | 80 | ||
81 | struct oops_log_info { | 81 | struct oops_log_info { |
82 | u16 version; | 82 | __be16 version; |
83 | u16 report_length; | 83 | __be16 report_length; |
84 | u64 timestamp; | 84 | __be64 timestamp; |
85 | } __attribute__((packed)); | 85 | } __attribute__((packed)); |
86 | 86 | ||
87 | static void oops_to_nvram(struct kmsg_dumper *dumper, | 87 | static void oops_to_nvram(struct kmsg_dumper *dumper, |
@@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, | |||
291 | length = part->size; | 291 | length = part->size; |
292 | } | 292 | } |
293 | 293 | ||
294 | info.error_type = err_type; | 294 | info.error_type = cpu_to_be32(err_type); |
295 | info.seq_num = error_log_cnt; | 295 | info.seq_num = cpu_to_be32(error_log_cnt); |
296 | 296 | ||
297 | tmp_index = part->index; | 297 | tmp_index = part->index; |
298 | 298 | ||
@@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff, | |||
364 | } | 364 | } |
365 | 365 | ||
366 | if (part->os_partition) { | 366 | if (part->os_partition) { |
367 | *error_log_cnt = info.seq_num; | 367 | *error_log_cnt = be32_to_cpu(info.seq_num); |
368 | *err_type = info.error_type; | 368 | *err_type = be32_to_cpu(info.error_type); |
369 | } | 369 | } |
370 | 370 | ||
371 | return 0; | 371 | return 0; |
@@ -529,9 +529,9 @@ static int zip_oops(size_t text_len) | |||
529 | pr_err("nvram: logging uncompressed oops/panic report\n"); | 529 | pr_err("nvram: logging uncompressed oops/panic report\n"); |
530 | return -1; | 530 | return -1; |
531 | } | 531 | } |
532 | oops_hdr->version = OOPS_HDR_VERSION; | 532 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); |
533 | oops_hdr->report_length = (u16) zipped_len; | 533 | oops_hdr->report_length = cpu_to_be16(zipped_len); |
534 | oops_hdr->timestamp = get_seconds(); | 534 | oops_hdr->timestamp = cpu_to_be64(get_seconds()); |
535 | return 0; | 535 | return 0; |
536 | } | 536 | } |
537 | 537 | ||
@@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type, | |||
574 | clobbering_unread_rtas_event()) | 574 | clobbering_unread_rtas_event()) |
575 | return -1; | 575 | return -1; |
576 | 576 | ||
577 | oops_hdr->version = OOPS_HDR_VERSION; | 577 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); |
578 | oops_hdr->report_length = (u16) size; | 578 | oops_hdr->report_length = cpu_to_be16(size); |
579 | oops_hdr->timestamp = get_seconds(); | 579 | oops_hdr->timestamp = cpu_to_be64(get_seconds()); |
580 | 580 | ||
581 | if (compressed) | 581 | if (compressed) |
582 | err_type = ERR_TYPE_KERNEL_PANIC_GZ; | 582 | err_type = ERR_TYPE_KERNEL_PANIC_GZ; |
@@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, | |||
670 | size_t length, hdr_size; | 670 | size_t length, hdr_size; |
671 | 671 | ||
672 | oops_hdr = (struct oops_log_info *)buff; | 672 | oops_hdr = (struct oops_log_info *)buff; |
673 | if (oops_hdr->version < OOPS_HDR_VERSION) { | 673 | if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) { |
674 | /* Old format oops header had 2-byte record size */ | 674 | /* Old format oops header had 2-byte record size */ |
675 | hdr_size = sizeof(u16); | 675 | hdr_size = sizeof(u16); |
676 | length = oops_hdr->version; | 676 | length = be16_to_cpu(oops_hdr->version); |
677 | time->tv_sec = 0; | 677 | time->tv_sec = 0; |
678 | time->tv_nsec = 0; | 678 | time->tv_nsec = 0; |
679 | } else { | 679 | } else { |
680 | hdr_size = sizeof(*oops_hdr); | 680 | hdr_size = sizeof(*oops_hdr); |
681 | length = oops_hdr->report_length; | 681 | length = be16_to_cpu(oops_hdr->report_length); |
682 | time->tv_sec = oops_hdr->timestamp; | 682 | time->tv_sec = be64_to_cpu(oops_hdr->timestamp); |
683 | time->tv_nsec = 0; | 683 | time->tv_nsec = 0; |
684 | } | 684 | } |
685 | *buf = kmalloc(length, GFP_KERNEL); | 685 | *buf = kmalloc(length, GFP_KERNEL); |
@@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper, | |||
889 | kmsg_dump_get_buffer(dumper, false, | 889 | kmsg_dump_get_buffer(dumper, false, |
890 | oops_data, oops_data_sz, &text_len); | 890 | oops_data, oops_data_sz, &text_len); |
891 | err_type = ERR_TYPE_KERNEL_PANIC; | 891 | err_type = ERR_TYPE_KERNEL_PANIC; |
892 | oops_hdr->version = OOPS_HDR_VERSION; | 892 | oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION); |
893 | oops_hdr->report_length = (u16) text_len; | 893 | oops_hdr->report_length = cpu_to_be16(text_len); |
894 | oops_hdr->timestamp = get_seconds(); | 894 | oops_hdr->timestamp = cpu_to_be64(get_seconds()); |
895 | } | 895 | } |
896 | 896 | ||
897 | (void) nvram_write_os_partition(&oops_log_partition, oops_buf, | 897 | (void) nvram_write_os_partition(&oops_log_partition, oops_buf, |
898 | (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, | 898 | (int) (sizeof(*oops_hdr) + text_len), err_type, |
899 | ++oops_count); | 899 | ++oops_count); |
900 | 900 | ||
901 | spin_unlock_irqrestore(&lock, flags); | 901 | spin_unlock_irqrestore(&lock, flags); |
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c index 5f93856cdf47..70670a2d9cf2 100644 --- a/arch/powerpc/platforms/pseries/pci.c +++ b/arch/powerpc/platforms/pseries/pci.c | |||
@@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
113 | { | 113 | { |
114 | struct device_node *dn, *pdn; | 114 | struct device_node *dn, *pdn; |
115 | struct pci_bus *bus; | 115 | struct pci_bus *bus; |
116 | const uint32_t *pcie_link_speed_stats; | 116 | const __be32 *pcie_link_speed_stats; |
117 | 117 | ||
118 | bus = bridge->bus; | 118 | bus = bridge->bus; |
119 | 119 | ||
@@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
122 | return 0; | 122 | return 0; |
123 | 123 | ||
124 | for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { | 124 | for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) { |
125 | pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn, | 125 | pcie_link_speed_stats = of_get_property(pdn, |
126 | "ibm,pcie-link-speed-stats", NULL); | 126 | "ibm,pcie-link-speed-stats", NULL); |
127 | if (pcie_link_speed_stats) | 127 | if (pcie_link_speed_stats) |
128 | break; | 128 | break; |
@@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | 137 | ||
138 | switch (pcie_link_speed_stats[0]) { | 138 | switch (be32_to_cpup(pcie_link_speed_stats)) { |
139 | case 0x01: | 139 | case 0x01: |
140 | bus->max_bus_speed = PCIE_SPEED_2_5GT; | 140 | bus->max_bus_speed = PCIE_SPEED_2_5GT; |
141 | break; | 141 | break; |
@@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge) | |||
147 | break; | 147 | break; |
148 | } | 148 | } |
149 | 149 | ||
150 | switch (pcie_link_speed_stats[1]) { | 150 | switch (be32_to_cpup(pcie_link_speed_stats)) { |
151 | case 0x01: | 151 | case 0x01: |
152 | bus->cur_bus_speed = PCIE_SPEED_2_5GT; | 152 | bus->cur_bus_speed = PCIE_SPEED_2_5GT; |
153 | break; | 153 | break; |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 1e1a03d2d19f..e9f312532526 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -135,7 +135,6 @@ config S390 | |||
135 | select HAVE_SYSCALL_TRACEPOINTS | 135 | select HAVE_SYSCALL_TRACEPOINTS |
136 | select HAVE_UID16 if 32BIT | 136 | select HAVE_UID16 if 32BIT |
137 | select HAVE_VIRT_CPU_ACCOUNTING | 137 | select HAVE_VIRT_CPU_ACCOUNTING |
138 | select INIT_ALL_POSSIBLE | ||
139 | select KTIME_SCALAR if 32BIT | 138 | select KTIME_SCALAR if 32BIT |
140 | select MODULES_USE_ELF_RELA | 139 | select MODULES_USE_ELF_RELA |
141 | select OLD_SIGACTION | 140 | select OLD_SIGACTION |
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index ac9bed8e103f..160779394096 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h | |||
@@ -31,6 +31,7 @@ extern void smp_yield(void); | |||
31 | extern void smp_stop_cpu(void); | 31 | extern void smp_stop_cpu(void); |
32 | extern void smp_cpu_set_polarization(int cpu, int val); | 32 | extern void smp_cpu_set_polarization(int cpu, int val); |
33 | extern int smp_cpu_get_polarization(int cpu); | 33 | extern int smp_cpu_get_polarization(int cpu); |
34 | extern void smp_fill_possible_mask(void); | ||
34 | 35 | ||
35 | #else /* CONFIG_SMP */ | 36 | #else /* CONFIG_SMP */ |
36 | 37 | ||
@@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; } | |||
50 | static inline void smp_yield_cpu(int cpu) { } | 51 | static inline void smp_yield_cpu(int cpu) { } |
51 | static inline void smp_yield(void) { } | 52 | static inline void smp_yield(void) { } |
52 | static inline void smp_stop_cpu(void) { } | 53 | static inline void smp_stop_cpu(void) { } |
54 | static inline void smp_fill_possible_mask(void) { } | ||
53 | 55 | ||
54 | #endif /* CONFIG_SMP */ | 56 | #endif /* CONFIG_SMP */ |
55 | 57 | ||
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 4444875266ee..0f3d44ecbfc6 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p) | |||
1023 | setup_vmcoreinfo(); | 1023 | setup_vmcoreinfo(); |
1024 | setup_lowcore(); | 1024 | setup_lowcore(); |
1025 | 1025 | ||
1026 | smp_fill_possible_mask(); | ||
1026 | cpu_init(); | 1027 | cpu_init(); |
1027 | s390_init_cpu_topology(); | 1028 | s390_init_cpu_topology(); |
1028 | 1029 | ||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index dc4a53465060..958704798f4a 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle) | |||
721 | return 0; | 721 | return 0; |
722 | } | 722 | } |
723 | 723 | ||
724 | static int __init setup_possible_cpus(char *s) | 724 | static unsigned int setup_possible_cpus __initdata; |
725 | { | ||
726 | int max, cpu; | ||
727 | 725 | ||
728 | if (kstrtoint(s, 0, &max) < 0) | 726 | static int __init _setup_possible_cpus(char *s) |
729 | return 0; | 727 | { |
730 | init_cpu_possible(cpumask_of(0)); | 728 | get_option(&s, &setup_possible_cpus); |
731 | for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++) | ||
732 | set_cpu_possible(cpu, true); | ||
733 | return 0; | 729 | return 0; |
734 | } | 730 | } |
735 | early_param("possible_cpus", setup_possible_cpus); | 731 | early_param("possible_cpus", _setup_possible_cpus); |
736 | 732 | ||
737 | #ifdef CONFIG_HOTPLUG_CPU | 733 | #ifdef CONFIG_HOTPLUG_CPU |
738 | 734 | ||
@@ -775,6 +771,17 @@ void __noreturn cpu_die(void) | |||
775 | 771 | ||
776 | #endif /* CONFIG_HOTPLUG_CPU */ | 772 | #endif /* CONFIG_HOTPLUG_CPU */ |
777 | 773 | ||
774 | void __init smp_fill_possible_mask(void) | ||
775 | { | ||
776 | unsigned int possible, cpu; | ||
777 | |||
778 | possible = setup_possible_cpus; | ||
779 | if (!possible) | ||
780 | possible = MACHINE_IS_VM ? 64 : nr_cpu_ids; | ||
781 | for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++) | ||
782 | set_cpu_possible(cpu, true); | ||
783 | } | ||
784 | |||
778 | void __init smp_prepare_cpus(unsigned int max_cpus) | 785 | void __init smp_prepare_cpus(unsigned int max_cpus) |
779 | { | 786 | { |
780 | /* request the 0x1201 emergency signal external interrupt */ | 787 | /* request the 0x1201 emergency signal external interrupt */ |
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index 800f064b0da7..069607209a30 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c | |||
@@ -75,6 +75,7 @@ void zpci_event_availability(void *data) | |||
75 | if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED) | 75 | if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED) |
76 | break; | 76 | break; |
77 | zdev->state = ZPCI_FN_STATE_CONFIGURED; | 77 | zdev->state = ZPCI_FN_STATE_CONFIGURED; |
78 | zdev->fh = ccdf->fh; | ||
78 | ret = zpci_enable_device(zdev); | 79 | ret = zpci_enable_device(zdev); |
79 | if (ret) | 80 | if (ret) |
80 | break; | 81 | break; |
@@ -101,6 +102,7 @@ void zpci_event_availability(void *data) | |||
101 | if (pdev) | 102 | if (pdev) |
102 | pci_stop_and_remove_bus_device(pdev); | 103 | pci_stop_and_remove_bus_device(pdev); |
103 | 104 | ||
105 | zdev->fh = ccdf->fh; | ||
104 | zpci_disable_device(zdev); | 106 | zpci_disable_device(zdev); |
105 | zdev->state = ZPCI_FN_STATE_STANDBY; | 107 | zdev->state = ZPCI_FN_STATE_STANDBY; |
106 | break; | 108 | break; |
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c index 2a0a596ebf67..d77f2f6c7ff0 100644 --- a/arch/sh/kernel/sh_ksyms_32.c +++ b/arch/sh/kernel/sh_ksyms_32.c | |||
@@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic); | |||
20 | EXPORT_SYMBOL(copy_page); | 20 | EXPORT_SYMBOL(copy_page); |
21 | EXPORT_SYMBOL(__clear_user); | 21 | EXPORT_SYMBOL(__clear_user); |
22 | EXPORT_SYMBOL(empty_zero_page); | 22 | EXPORT_SYMBOL(empty_zero_page); |
23 | #ifdef CONFIG_FLATMEM | ||
24 | /* need in pfn_valid macro */ | ||
25 | EXPORT_SYMBOL(min_low_pfn); | ||
26 | EXPORT_SYMBOL(max_low_pfn); | ||
27 | #endif | ||
23 | 28 | ||
24 | #define DECLARE_EXPORT(name) \ | 29 | #define DECLARE_EXPORT(name) \ |
25 | extern void name(void);EXPORT_SYMBOL(name) | 30 | extern void name(void);EXPORT_SYMBOL(name) |
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile index 7b95f29e3174..3baff31e58cf 100644 --- a/arch/sh/lib/Makefile +++ b/arch/sh/lib/Makefile | |||
@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \ | |||
6 | checksum.o strlen.o div64.o div64-generic.o | 6 | checksum.o strlen.o div64.o div64-generic.o |
7 | 7 | ||
8 | # Extracted from libgcc | 8 | # Extracted from libgcc |
9 | lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ | 9 | obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ |
10 | ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ | 10 | ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ |
11 | udiv_qrnnd.o | 11 | udiv_qrnnd.o |
12 | 12 | ||
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 8358dc144959..0f9e94537eee 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h | |||
@@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte) | |||
619 | } | 619 | } |
620 | 620 | ||
621 | #define pte_accessible pte_accessible | 621 | #define pte_accessible pte_accessible |
622 | static inline unsigned long pte_accessible(pte_t a) | 622 | static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a) |
623 | { | 623 | { |
624 | return pte_val(a) & _PAGE_VALID; | 624 | return pte_val(a) & _PAGE_VALID; |
625 | } | 625 | } |
@@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
847 | * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U | 847 | * SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U |
848 | * and SUN4V pte layout, so this inline test is fine. | 848 | * and SUN4V pte layout, so this inline test is fine. |
849 | */ | 849 | */ |
850 | if (likely(mm != &init_mm) && pte_accessible(orig)) | 850 | if (likely(mm != &init_mm) && pte_accessible(mm, orig)) |
851 | tlb_batch_add(mm, addr, ptep, orig, fullmm); | 851 | tlb_batch_add(mm, addr, ptep, orig, fullmm); |
852 | } | 852 | } |
853 | 853 | ||
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index e562d3caee57..ad7e178337f1 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h | |||
@@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long); | |||
262 | extern __must_check long strlen_user(const char __user *str); | 262 | extern __must_check long strlen_user(const char __user *str); |
263 | extern __must_check long strnlen_user(const char __user *str, long n); | 263 | extern __must_check long strnlen_user(const char __user *str, long n); |
264 | 264 | ||
265 | #define __copy_to_user_inatomic ___copy_to_user | 265 | #define __copy_to_user_inatomic __copy_to_user |
266 | #define __copy_from_user_inatomic ___copy_from_user | 266 | #define __copy_from_user_inatomic __copy_from_user |
267 | 267 | ||
268 | struct pt_regs; | 268 | struct pt_regs; |
269 | extern unsigned long compute_effective_address(struct pt_regs *, | 269 | extern unsigned long compute_effective_address(struct pt_regs *, |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 070ed141aac7..76663b019eb5 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask) | |||
854 | return 1; | 854 | return 1; |
855 | 855 | ||
856 | #ifdef CONFIG_PCI | 856 | #ifdef CONFIG_PCI |
857 | if (dev->bus == &pci_bus_type) | 857 | if (dev_is_pci(dev)) |
858 | return pci64_dma_supported(to_pci_dev(dev), device_mask); | 858 | return pci64_dma_supported(to_pci_dev(dev), device_mask); |
859 | #endif | 859 | #endif |
860 | 860 | ||
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 2096468de9b2..e7e215dfa866 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c | |||
@@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops); | |||
666 | */ | 666 | */ |
667 | int dma_supported(struct device *dev, u64 mask) | 667 | int dma_supported(struct device *dev, u64 mask) |
668 | { | 668 | { |
669 | #ifdef CONFIG_PCI | 669 | if (dev_is_pci(dev)) |
670 | if (dev->bus == &pci_bus_type) | ||
671 | return 1; | 670 | return 1; |
672 | #endif | 671 | |
673 | return 0; | 672 | return 0; |
674 | } | 673 | } |
675 | EXPORT_SYMBOL(dma_supported); | 674 | EXPORT_SYMBOL(dma_supported); |
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index 60b19f50c80a..b45fe3fb4d2c 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/kgdb.h> | 6 | #include <linux/kgdb.h> |
7 | #include <linux/kdebug.h> | 7 | #include <linux/kdebug.h> |
8 | #include <linux/ftrace.h> | 8 | #include <linux/ftrace.h> |
9 | #include <linux/context_tracking.h> | ||
9 | 10 | ||
10 | #include <asm/cacheflush.h> | 11 | #include <asm/cacheflush.h> |
11 | #include <asm/kdebug.h> | 12 | #include <asm/kdebug.h> |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b66a5338231e..b085311dcd0e 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -123,11 +123,12 @@ void smp_callin(void) | |||
123 | rmb(); | 123 | rmb(); |
124 | 124 | ||
125 | set_cpu_online(cpuid, true); | 125 | set_cpu_online(cpuid, true); |
126 | local_irq_enable(); | ||
127 | 126 | ||
128 | /* idle thread is expected to have preempt disabled */ | 127 | /* idle thread is expected to have preempt disabled */ |
129 | preempt_disable(); | 128 | preempt_disable(); |
130 | 129 | ||
130 | local_irq_enable(); | ||
131 | |||
131 | cpu_startup_entry(CPUHP_ONLINE); | 132 | cpu_startup_entry(CPUHP_ONLINE); |
132 | } | 133 | } |
133 | 134 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e903c71f7e69..0952ecd60eca 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -26,6 +26,7 @@ config X86 | |||
26 | select HAVE_AOUT if X86_32 | 26 | select HAVE_AOUT if X86_32 |
27 | select HAVE_UNSTABLE_SCHED_CLOCK | 27 | select HAVE_UNSTABLE_SCHED_CLOCK |
28 | select ARCH_SUPPORTS_NUMA_BALANCING | 28 | select ARCH_SUPPORTS_NUMA_BALANCING |
29 | select ARCH_SUPPORTS_INT128 if X86_64 | ||
29 | select ARCH_WANTS_PROT_NUMA_PROT_NONE | 30 | select ARCH_WANTS_PROT_NUMA_PROT_NONE |
30 | select HAVE_IDE | 31 | select HAVE_IDE |
31 | select HAVE_OPROFILE | 32 | select HAVE_OPROFILE |
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index c49a613c6452..cea1c76d49bf 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h | |||
@@ -293,12 +293,13 @@ static inline int restore_fpu_checking(struct task_struct *tsk) | |||
293 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 293 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception |
294 | is pending. Clear the x87 state here by setting it to fixed | 294 | is pending. Clear the x87 state here by setting it to fixed |
295 | values. "m" is a random variable that should be in L1 */ | 295 | values. "m" is a random variable that should be in L1 */ |
296 | alternative_input( | 296 | if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) { |
297 | ASM_NOP8 ASM_NOP2, | 297 | asm volatile( |
298 | "emms\n\t" /* clear stack tags */ | 298 | "fnclex\n\t" |
299 | "fildl %P[addr]", /* set F?P to defined value */ | 299 | "emms\n\t" |
300 | X86_FEATURE_FXSAVE_LEAK, | 300 | "fildl %P[addr]" /* set F?P to defined value */ |
301 | [addr] "m" (tsk->thread.fpu.has_fpu)); | 301 | : : [addr] "m" (tsk->thread.fpu.has_fpu)); |
302 | } | ||
302 | 303 | ||
303 | return fpu_restore_checking(&tsk->thread.fpu); | 304 | return fpu_restore_checking(&tsk->thread.fpu); |
304 | } | 305 | } |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 3d1999458709..bbc8b12fa443 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -452,9 +452,16 @@ static inline int pte_present(pte_t a) | |||
452 | } | 452 | } |
453 | 453 | ||
454 | #define pte_accessible pte_accessible | 454 | #define pte_accessible pte_accessible |
455 | static inline int pte_accessible(pte_t a) | 455 | static inline bool pte_accessible(struct mm_struct *mm, pte_t a) |
456 | { | 456 | { |
457 | return pte_flags(a) & _PAGE_PRESENT; | 457 | if (pte_flags(a) & _PAGE_PRESENT) |
458 | return true; | ||
459 | |||
460 | if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) && | ||
461 | mm_tlb_flush_pending(mm)) | ||
462 | return true; | ||
463 | |||
464 | return false; | ||
458 | } | 465 | } |
459 | 466 | ||
460 | static inline int pte_hidden(pte_t pte) | 467 | static inline int pte_hidden(pte_t pte) |
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 8729723636fd..c8b051933b1b 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h | |||
@@ -8,6 +8,12 @@ | |||
8 | DECLARE_PER_CPU(int, __preempt_count); | 8 | DECLARE_PER_CPU(int, __preempt_count); |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such | ||
12 | * that a decrement hitting 0 means we can and should reschedule. | ||
13 | */ | ||
14 | #define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED) | ||
15 | |||
16 | /* | ||
11 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users | 17 | * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users |
12 | * that think a non-zero value indicates we cannot preempt. | 18 | * that think a non-zero value indicates we cannot preempt. |
13 | */ | 19 | */ |
@@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val) | |||
74 | __this_cpu_add_4(__preempt_count, -val); | 80 | __this_cpu_add_4(__preempt_count, -val); |
75 | } | 81 | } |
76 | 82 | ||
83 | /* | ||
84 | * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule | ||
85 | * a decrement which hits zero means we have no preempt_count and should | ||
86 | * reschedule. | ||
87 | */ | ||
77 | static __always_inline bool __preempt_count_dec_and_test(void) | 88 | static __always_inline bool __preempt_count_dec_and_test(void) |
78 | { | 89 | { |
79 | GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); | 90 | GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index dc1ec0dff939..ea04b342c026 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c) | |||
387 | set_cpu_cap(c, X86_FEATURE_PEBS); | 387 | set_cpu_cap(c, X86_FEATURE_PEBS); |
388 | } | 388 | } |
389 | 389 | ||
390 | if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush) | 390 | if (c->x86 == 6 && cpu_has_clflush && |
391 | (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) | ||
391 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | 392 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); |
392 | 393 | ||
393 | #ifdef CONFIG_X86_64 | 394 | #ifdef CONFIG_X86_64 |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index fd00bb29425d..c1a861829d81 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -262,11 +262,20 @@ struct cpu_hw_events { | |||
262 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ | 262 | __EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \ |
263 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) | 263 | HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW) |
264 | 264 | ||
265 | #define EVENT_CONSTRAINT_END \ | 265 | /* |
266 | EVENT_CONSTRAINT(0, 0, 0) | 266 | * We define the end marker as having a weight of -1 |
267 | * to enable blacklisting of events using a counter bitmask | ||
268 | * of zero and thus a weight of zero. | ||
269 | * The end marker has a weight that cannot possibly be | ||
270 | * obtained from counting the bits in the bitmask. | ||
271 | */ | ||
272 | #define EVENT_CONSTRAINT_END { .weight = -1 } | ||
267 | 273 | ||
274 | /* | ||
275 | * Check for end marker with weight == -1 | ||
276 | */ | ||
268 | #define for_each_event_constraint(e, c) \ | 277 | #define for_each_event_constraint(e, c) \ |
269 | for ((e) = (c); (e)->weight; (e)++) | 278 | for ((e) = (c); (e)->weight != -1; (e)++) |
270 | 279 | ||
271 | /* | 280 | /* |
272 | * Extra registers for specific events. | 281 | * Extra registers for specific events. |
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index e09f0bfb7b8f..4b8e4d3cd6ea 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <linux/ptrace.h> | 12 | #include <linux/ptrace.h> |
13 | #include <linux/syscore_ops.h> | ||
13 | 14 | ||
14 | #include <asm/apic.h> | 15 | #include <asm/apic.h> |
15 | 16 | ||
@@ -816,6 +817,18 @@ out: | |||
816 | return ret; | 817 | return ret; |
817 | } | 818 | } |
818 | 819 | ||
820 | static void ibs_eilvt_setup(void) | ||
821 | { | ||
822 | /* | ||
823 | * Force LVT offset assignment for family 10h: The offsets are | ||
824 | * not assigned by the BIOS for this family, so the OS is | ||
825 | * responsible for doing it. If the OS assignment fails, fall | ||
826 | * back to BIOS settings and try to setup this. | ||
827 | */ | ||
828 | if (boot_cpu_data.x86 == 0x10) | ||
829 | force_ibs_eilvt_setup(); | ||
830 | } | ||
831 | |||
819 | static inline int get_ibs_lvt_offset(void) | 832 | static inline int get_ibs_lvt_offset(void) |
820 | { | 833 | { |
821 | u64 val; | 834 | u64 val; |
@@ -851,6 +864,36 @@ static void clear_APIC_ibs(void *dummy) | |||
851 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); | 864 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); |
852 | } | 865 | } |
853 | 866 | ||
867 | #ifdef CONFIG_PM | ||
868 | |||
869 | static int perf_ibs_suspend(void) | ||
870 | { | ||
871 | clear_APIC_ibs(NULL); | ||
872 | return 0; | ||
873 | } | ||
874 | |||
875 | static void perf_ibs_resume(void) | ||
876 | { | ||
877 | ibs_eilvt_setup(); | ||
878 | setup_APIC_ibs(NULL); | ||
879 | } | ||
880 | |||
881 | static struct syscore_ops perf_ibs_syscore_ops = { | ||
882 | .resume = perf_ibs_resume, | ||
883 | .suspend = perf_ibs_suspend, | ||
884 | }; | ||
885 | |||
886 | static void perf_ibs_pm_init(void) | ||
887 | { | ||
888 | register_syscore_ops(&perf_ibs_syscore_ops); | ||
889 | } | ||
890 | |||
891 | #else | ||
892 | |||
893 | static inline void perf_ibs_pm_init(void) { } | ||
894 | |||
895 | #endif | ||
896 | |||
854 | static int | 897 | static int |
855 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 898 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
856 | { | 899 | { |
@@ -877,18 +920,12 @@ static __init int amd_ibs_init(void) | |||
877 | if (!caps) | 920 | if (!caps) |
878 | return -ENODEV; /* ibs not supported by the cpu */ | 921 | return -ENODEV; /* ibs not supported by the cpu */ |
879 | 922 | ||
880 | /* | 923 | ibs_eilvt_setup(); |
881 | * Force LVT offset assignment for family 10h: The offsets are | ||
882 | * not assigned by the BIOS for this family, so the OS is | ||
883 | * responsible for doing it. If the OS assignment fails, fall | ||
884 | * back to BIOS settings and try to setup this. | ||
885 | */ | ||
886 | if (boot_cpu_data.x86 == 0x10) | ||
887 | force_ibs_eilvt_setup(); | ||
888 | 924 | ||
889 | if (!ibs_eilvt_valid()) | 925 | if (!ibs_eilvt_valid()) |
890 | goto out; | 926 | goto out; |
891 | 927 | ||
928 | perf_ibs_pm_init(); | ||
892 | get_online_cpus(); | 929 | get_online_cpus(); |
893 | ibs_caps = caps; | 930 | ibs_caps = caps; |
894 | /* make ibs_caps visible to other cpus: */ | 931 | /* make ibs_caps visible to other cpus: */ |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 51e2988c5728..a2a4f4697889 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -1082,7 +1082,7 @@ ENTRY(ftrace_caller) | |||
1082 | pushl $0 /* Pass NULL as regs pointer */ | 1082 | pushl $0 /* Pass NULL as regs pointer */ |
1083 | movl 4*4(%esp), %eax | 1083 | movl 4*4(%esp), %eax |
1084 | movl 0x4(%ebp), %edx | 1084 | movl 0x4(%ebp), %edx |
1085 | leal function_trace_op, %ecx | 1085 | movl function_trace_op, %ecx |
1086 | subl $MCOUNT_INSN_SIZE, %eax | 1086 | subl $MCOUNT_INSN_SIZE, %eax |
1087 | 1087 | ||
1088 | .globl ftrace_call | 1088 | .globl ftrace_call |
@@ -1140,7 +1140,7 @@ ENTRY(ftrace_regs_caller) | |||
1140 | movl 12*4(%esp), %eax /* Load ip (1st parameter) */ | 1140 | movl 12*4(%esp), %eax /* Load ip (1st parameter) */ |
1141 | subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ | 1141 | subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */ |
1142 | movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ | 1142 | movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */ |
1143 | leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ | 1143 | movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */ |
1144 | pushl %esp /* Save pt_regs as 4th parameter */ | 1144 | pushl %esp /* Save pt_regs as 4th parameter */ |
1145 | 1145 | ||
1146 | GLOBAL(ftrace_regs_call) | 1146 | GLOBAL(ftrace_regs_call) |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index e21b0785a85b..1e96c3628bf2 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -88,7 +88,7 @@ END(function_hook) | |||
88 | MCOUNT_SAVE_FRAME \skip | 88 | MCOUNT_SAVE_FRAME \skip |
89 | 89 | ||
90 | /* Load the ftrace_ops into the 3rd parameter */ | 90 | /* Load the ftrace_ops into the 3rd parameter */ |
91 | leaq function_trace_op, %rdx | 91 | movq function_trace_op(%rip), %rdx |
92 | 92 | ||
93 | /* Load ip into the first parameter */ | 93 | /* Load ip into the first parameter */ |
94 | movq RIP(%rsp), %rdi | 94 | movq RIP(%rsp), %rdi |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index dec48bfaddb8..1673940cf9c3 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1350,6 +1350,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) | |||
1350 | return; | 1350 | return; |
1351 | } | 1351 | } |
1352 | 1352 | ||
1353 | if (!kvm_vcpu_is_bsp(apic->vcpu)) | ||
1354 | value &= ~MSR_IA32_APICBASE_BSP; | ||
1355 | vcpu->arch.apic_base = value; | ||
1356 | |||
1353 | /* update jump label if enable bit changes */ | 1357 | /* update jump label if enable bit changes */ |
1354 | if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { | 1358 | if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) { |
1355 | if (value & MSR_IA32_APICBASE_ENABLE) | 1359 | if (value & MSR_IA32_APICBASE_ENABLE) |
@@ -1359,10 +1363,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) | |||
1359 | recalculate_apic_map(vcpu->kvm); | 1363 | recalculate_apic_map(vcpu->kvm); |
1360 | } | 1364 | } |
1361 | 1365 | ||
1362 | if (!kvm_vcpu_is_bsp(apic->vcpu)) | ||
1363 | value &= ~MSR_IA32_APICBASE_BSP; | ||
1364 | |||
1365 | vcpu->arch.apic_base = value; | ||
1366 | if ((old_value ^ value) & X2APIC_ENABLE) { | 1366 | if ((old_value ^ value) & X2APIC_ENABLE) { |
1367 | if (value & X2APIC_ENABLE) { | 1367 | if (value & X2APIC_ENABLE) { |
1368 | u32 id = kvm_apic_id(apic); | 1368 | u32 id = kvm_apic_id(apic); |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index b2fe1c252f35..da7837e1349d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -8283,8 +8283,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, | |||
8283 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); | 8283 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); |
8284 | kvm_set_cr4(vcpu, vmcs12->host_cr4); | 8284 | kvm_set_cr4(vcpu, vmcs12->host_cr4); |
8285 | 8285 | ||
8286 | if (nested_cpu_has_ept(vmcs12)) | 8286 | nested_ept_uninit_mmu_context(vcpu); |
8287 | nested_ept_uninit_mmu_context(vcpu); | ||
8288 | 8287 | ||
8289 | kvm_set_cr3(vcpu, vmcs12->host_cr3); | 8288 | kvm_set_cr3(vcpu, vmcs12->host_cr3); |
8290 | kvm_mmu_reset_context(vcpu); | 8289 | kvm_mmu_reset_context(vcpu); |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 9ff85bb8dd69..9d591c895803 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -641,6 +641,20 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
641 | 641 | ||
642 | /* Are we prepared to handle this kernel fault? */ | 642 | /* Are we prepared to handle this kernel fault? */ |
643 | if (fixup_exception(regs)) { | 643 | if (fixup_exception(regs)) { |
644 | /* | ||
645 | * Any interrupt that takes a fault gets the fixup. This makes | ||
646 | * the below recursive fault logic only apply to a faults from | ||
647 | * task context. | ||
648 | */ | ||
649 | if (in_interrupt()) | ||
650 | return; | ||
651 | |||
652 | /* | ||
653 | * Per the above we're !in_interrupt(), aka. task context. | ||
654 | * | ||
655 | * In this case we need to make sure we're not recursively | ||
656 | * faulting through the emulate_vsyscall() logic. | ||
657 | */ | ||
644 | if (current_thread_info()->sig_on_uaccess_error && signal) { | 658 | if (current_thread_info()->sig_on_uaccess_error && signal) { |
645 | tsk->thread.trap_nr = X86_TRAP_PF; | 659 | tsk->thread.trap_nr = X86_TRAP_PF; |
646 | tsk->thread.error_code = error_code | PF_USER; | 660 | tsk->thread.error_code = error_code | PF_USER; |
@@ -649,6 +663,10 @@ no_context(struct pt_regs *regs, unsigned long error_code, | |||
649 | /* XXX: hwpoison faults will set the wrong code. */ | 663 | /* XXX: hwpoison faults will set the wrong code. */ |
650 | force_sig_info_fault(signal, si_code, address, tsk, 0); | 664 | force_sig_info_fault(signal, si_code, address, tsk, 0); |
651 | } | 665 | } |
666 | |||
667 | /* | ||
668 | * Barring that, we can do the fixup and be happy. | ||
669 | */ | ||
652 | return; | 670 | return; |
653 | } | 671 | } |
654 | 672 | ||
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index dd74e46828c0..0596e8e0cc19 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c | |||
@@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |||
83 | pte_t pte = gup_get_pte(ptep); | 83 | pte_t pte = gup_get_pte(ptep); |
84 | struct page *page; | 84 | struct page *page; |
85 | 85 | ||
86 | /* Similar to the PMD case, NUMA hinting must take slow path */ | ||
87 | if (pte_numa(pte)) { | ||
88 | pte_unmap(ptep); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
86 | if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { | 92 | if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) { |
87 | pte_unmap(ptep); | 93 | pte_unmap(ptep); |
88 | return 0; | 94 | return 0; |
@@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, | |||
167 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) | 173 | if (pmd_none(pmd) || pmd_trans_splitting(pmd)) |
168 | return 0; | 174 | return 0; |
169 | if (unlikely(pmd_large(pmd))) { | 175 | if (unlikely(pmd_large(pmd))) { |
176 | /* | ||
177 | * NUMA hinting faults need to be handled in the GUP | ||
178 | * slowpath for accounting purposes and so that they | ||
179 | * can be serialised against THP migration. | ||
180 | */ | ||
181 | if (pmd_numa(pmd)) | ||
182 | return 0; | ||
170 | if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) | 183 | if (!gup_huge_pmd(pmd, addr, next, write, pages, nr)) |
171 | return 0; | 184 | return 0; |
172 | } else { | 185 | } else { |
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index 2ada505067cc..eb5d7a56f8d4 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c | |||
@@ -178,7 +178,7 @@ notrace static int __always_inline do_realtime(struct timespec *ts) | |||
178 | 178 | ||
179 | ts->tv_nsec = 0; | 179 | ts->tv_nsec = 0; |
180 | do { | 180 | do { |
181 | seq = read_seqcount_begin_no_lockdep(>od->seq); | 181 | seq = raw_read_seqcount_begin(>od->seq); |
182 | mode = gtod->clock.vclock_mode; | 182 | mode = gtod->clock.vclock_mode; |
183 | ts->tv_sec = gtod->wall_time_sec; | 183 | ts->tv_sec = gtod->wall_time_sec; |
184 | ns = gtod->wall_time_snsec; | 184 | ns = gtod->wall_time_snsec; |
@@ -198,7 +198,7 @@ notrace static int do_monotonic(struct timespec *ts) | |||
198 | 198 | ||
199 | ts->tv_nsec = 0; | 199 | ts->tv_nsec = 0; |
200 | do { | 200 | do { |
201 | seq = read_seqcount_begin_no_lockdep(>od->seq); | 201 | seq = raw_read_seqcount_begin(>od->seq); |
202 | mode = gtod->clock.vclock_mode; | 202 | mode = gtod->clock.vclock_mode; |
203 | ts->tv_sec = gtod->monotonic_time_sec; | 203 | ts->tv_sec = gtod->monotonic_time_sec; |
204 | ns = gtod->monotonic_time_snsec; | 204 | ns = gtod->monotonic_time_snsec; |
@@ -214,7 +214,7 @@ notrace static int do_realtime_coarse(struct timespec *ts) | |||
214 | { | 214 | { |
215 | unsigned long seq; | 215 | unsigned long seq; |
216 | do { | 216 | do { |
217 | seq = read_seqcount_begin_no_lockdep(>od->seq); | 217 | seq = raw_read_seqcount_begin(>od->seq); |
218 | ts->tv_sec = gtod->wall_time_coarse.tv_sec; | 218 | ts->tv_sec = gtod->wall_time_coarse.tv_sec; |
219 | ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; | 219 | ts->tv_nsec = gtod->wall_time_coarse.tv_nsec; |
220 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); | 220 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
@@ -225,7 +225,7 @@ notrace static int do_monotonic_coarse(struct timespec *ts) | |||
225 | { | 225 | { |
226 | unsigned long seq; | 226 | unsigned long seq; |
227 | do { | 227 | do { |
228 | seq = read_seqcount_begin_no_lockdep(>od->seq); | 228 | seq = raw_read_seqcount_begin(>od->seq); |
229 | ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; | 229 | ts->tv_sec = gtod->monotonic_time_coarse.tv_sec; |
230 | ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; | 230 | ts->tv_nsec = gtod->monotonic_time_coarse.tv_nsec; |
231 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); | 231 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |