diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-04-02 11:17:46 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-04-02 11:17:46 -0400 |
commit | c2b078e78ace39710356a7bb6b984177d942a699 (patch) | |
tree | e9617aec87d9720e902ce1fca983a955a6104d40 | |
parent | 8062382c8dbe2dc11d37e7f0b139508cf10de9d4 (diff) | |
parent | c420f19b9cdc59662dbb56677417487efc1729ec (diff) |
Merge branch 'perf/urgent' into perf/core, before applying dependent patches
Signed-off-by: Ingo Molnar <mingo@kernel.org>
37 files changed, 269 insertions, 162 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 88c09ca2584f..1de6afa8ee51 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1362,6 +1362,7 @@ F: drivers/i2c/busses/i2c-rk3x.c | |||
1362 | F: drivers/*/*rockchip* | 1362 | F: drivers/*/*rockchip* |
1363 | F: drivers/*/*/*rockchip* | 1363 | F: drivers/*/*/*rockchip* |
1364 | F: sound/soc/rockchip/ | 1364 | F: sound/soc/rockchip/ |
1365 | N: rockchip | ||
1365 | 1366 | ||
1366 | ARM/SAMSUNG EXYNOS ARM ARCHITECTURES | 1367 | ARM/SAMSUNG EXYNOS ARM ARCHITECTURES |
1367 | M: Kukjin Kim <kgene@kernel.org> | 1368 | M: Kukjin Kim <kgene@kernel.org> |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 0 | 2 | PATCHLEVEL = 0 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc6 |
5 | NAME = Hurr durr I'ma sheep | 5 | NAME = Hurr durr I'ma sheep |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c index 114234e83caa..edda76fae83f 100644 --- a/arch/arc/kernel/signal.c +++ b/arch/arc/kernel/signal.c | |||
@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs, | |||
67 | sigset_t *set) | 67 | sigset_t *set) |
68 | { | 68 | { |
69 | int err; | 69 | int err; |
70 | err = __copy_to_user(&(sf->uc.uc_mcontext.regs), regs, | 70 | err = __copy_to_user(&(sf->uc.uc_mcontext.regs.scratch), regs, |
71 | sizeof(sf->uc.uc_mcontext.regs.scratch)); | 71 | sizeof(sf->uc.uc_mcontext.regs.scratch)); |
72 | err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); | 72 | err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(sigset_t)); |
73 | 73 | ||
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf) | |||
83 | if (!err) | 83 | if (!err) |
84 | set_current_blocked(&set); | 84 | set_current_blocked(&set); |
85 | 85 | ||
86 | err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs), | 86 | err |= __copy_from_user(regs, &(sf->uc.uc_mcontext.regs.scratch), |
87 | sizeof(sf->uc.uc_mcontext.regs.scratch)); | 87 | sizeof(sf->uc.uc_mcontext.regs.scratch)); |
88 | 88 | ||
89 | return err; | 89 | return err; |
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn) | |||
131 | /* Don't restart from sigreturn */ | 131 | /* Don't restart from sigreturn */ |
132 | syscall_wont_restart(regs); | 132 | syscall_wont_restart(regs); |
133 | 133 | ||
134 | /* | ||
135 | * Ensure that sigreturn always returns to user mode (in case the | ||
136 | * regs saved on user stack got fudged between save and sigreturn) | ||
137 | * Otherwise it is easy to panic the kernel with a custom | ||
138 | * signal handler and/or restorer which clobberes the status32/ret | ||
139 | * to return to a bogus location in kernel mode. | ||
140 | */ | ||
141 | regs->status32 |= STATUS_U_MASK; | ||
142 | |||
134 | return regs->r0; | 143 | return regs->r0; |
135 | 144 | ||
136 | badframe: | 145 | badframe: |
@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) | |||
229 | 238 | ||
230 | /* | 239 | /* |
231 | * handler returns using sigreturn stub provided already by userpsace | 240 | * handler returns using sigreturn stub provided already by userpsace |
241 | * If not, nuke the process right away | ||
232 | */ | 242 | */ |
233 | BUG_ON(!(ksig->ka.sa.sa_flags & SA_RESTORER)); | 243 | if(!(ksig->ka.sa.sa_flags & SA_RESTORER)) |
244 | return 1; | ||
245 | |||
234 | regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; | 246 | regs->blink = (unsigned long)ksig->ka.sa.sa_restorer; |
235 | 247 | ||
236 | /* User Stack for signal handler will be above the frame just carved */ | 248 | /* User Stack for signal handler will be above the frame just carved */ |
@@ -296,12 +308,12 @@ static void | |||
296 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) | 308 | handle_signal(struct ksignal *ksig, struct pt_regs *regs) |
297 | { | 309 | { |
298 | sigset_t *oldset = sigmask_to_save(); | 310 | sigset_t *oldset = sigmask_to_save(); |
299 | int ret; | 311 | int failed; |
300 | 312 | ||
301 | /* Set up the stack frame */ | 313 | /* Set up the stack frame */ |
302 | ret = setup_rt_frame(ksig, oldset, regs); | 314 | failed = setup_rt_frame(ksig, oldset, regs); |
303 | 315 | ||
304 | signal_setup_done(ret, ksig, 0); | 316 | signal_setup_done(failed, ksig, 0); |
305 | } | 317 | } |
306 | 318 | ||
307 | void do_signal(struct pt_regs *regs) | 319 | void do_signal(struct pt_regs *regs) |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 9f1f09a2bc9b..cf4c0c99aa25 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -619,6 +619,7 @@ config ARCH_PXA | |||
619 | select GENERIC_CLOCKEVENTS | 619 | select GENERIC_CLOCKEVENTS |
620 | select GPIO_PXA | 620 | select GPIO_PXA |
621 | select HAVE_IDE | 621 | select HAVE_IDE |
622 | select IRQ_DOMAIN | ||
622 | select MULTI_IRQ_HANDLER | 623 | select MULTI_IRQ_HANDLER |
623 | select PLAT_PXA | 624 | select PLAT_PXA |
624 | select SPARSE_IRQ | 625 | select SPARSE_IRQ |
diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts index d3a29c1b8417..afe678f6d2e9 100644 --- a/arch/arm/boot/dts/dm8168-evm.dts +++ b/arch/arm/boot/dts/dm8168-evm.dts | |||
@@ -36,6 +36,20 @@ | |||
36 | >; | 36 | >; |
37 | }; | 37 | }; |
38 | 38 | ||
39 | mmc_pins: pinmux_mmc_pins { | ||
40 | pinctrl-single,pins = < | ||
41 | DM816X_IOPAD(0x0a70, MUX_MODE0) /* SD_POW */ | ||
42 | DM816X_IOPAD(0x0a74, MUX_MODE0) /* SD_CLK */ | ||
43 | DM816X_IOPAD(0x0a78, MUX_MODE0) /* SD_CMD */ | ||
44 | DM816X_IOPAD(0x0a7C, MUX_MODE0) /* SD_DAT0 */ | ||
45 | DM816X_IOPAD(0x0a80, MUX_MODE0) /* SD_DAT1 */ | ||
46 | DM816X_IOPAD(0x0a84, MUX_MODE0) /* SD_DAT2 */ | ||
47 | DM816X_IOPAD(0x0a88, MUX_MODE0) /* SD_DAT2 */ | ||
48 | DM816X_IOPAD(0x0a8c, MUX_MODE2) /* GP1[7] */ | ||
49 | DM816X_IOPAD(0x0a90, MUX_MODE2) /* GP1[8] */ | ||
50 | >; | ||
51 | }; | ||
52 | |||
39 | usb0_pins: pinmux_usb0_pins { | 53 | usb0_pins: pinmux_usb0_pins { |
40 | pinctrl-single,pins = < | 54 | pinctrl-single,pins = < |
41 | DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ | 55 | DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ |
@@ -137,7 +151,12 @@ | |||
137 | }; | 151 | }; |
138 | 152 | ||
139 | &mmc1 { | 153 | &mmc1 { |
154 | pinctrl-names = "default"; | ||
155 | pinctrl-0 = <&mmc_pins>; | ||
140 | vmmc-supply = <&vmmcsd_fixed>; | 156 | vmmc-supply = <&vmmcsd_fixed>; |
157 | bus-width = <4>; | ||
158 | cd-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>; | ||
159 | wp-gpios = <&gpio2 8 GPIO_ACTIVE_LOW>; | ||
141 | }; | 160 | }; |
142 | 161 | ||
143 | /* At least dm8168-evm rev c won't support multipoint, later may */ | 162 | /* At least dm8168-evm rev c won't support multipoint, later may */ |
diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi index 3c97b5f2addc..f35715bc6992 100644 --- a/arch/arm/boot/dts/dm816x.dtsi +++ b/arch/arm/boot/dts/dm816x.dtsi | |||
@@ -150,17 +150,27 @@ | |||
150 | }; | 150 | }; |
151 | 151 | ||
152 | gpio1: gpio@48032000 { | 152 | gpio1: gpio@48032000 { |
153 | compatible = "ti,omap3-gpio"; | 153 | compatible = "ti,omap4-gpio"; |
154 | ti,hwmods = "gpio1"; | 154 | ti,hwmods = "gpio1"; |
155 | ti,gpio-always-on; | ||
155 | reg = <0x48032000 0x1000>; | 156 | reg = <0x48032000 0x1000>; |
156 | interrupts = <97>; | 157 | interrupts = <96>; |
158 | gpio-controller; | ||
159 | #gpio-cells = <2>; | ||
160 | interrupt-controller; | ||
161 | #interrupt-cells = <2>; | ||
157 | }; | 162 | }; |
158 | 163 | ||
159 | gpio2: gpio@4804c000 { | 164 | gpio2: gpio@4804c000 { |
160 | compatible = "ti,omap3-gpio"; | 165 | compatible = "ti,omap4-gpio"; |
161 | ti,hwmods = "gpio2"; | 166 | ti,hwmods = "gpio2"; |
167 | ti,gpio-always-on; | ||
162 | reg = <0x4804c000 0x1000>; | 168 | reg = <0x4804c000 0x1000>; |
163 | interrupts = <99>; | 169 | interrupts = <98>; |
170 | gpio-controller; | ||
171 | #gpio-cells = <2>; | ||
172 | interrupt-controller; | ||
173 | #interrupt-cells = <2>; | ||
164 | }; | 174 | }; |
165 | 175 | ||
166 | gpmc: gpmc@50000000 { | 176 | gpmc: gpmc@50000000 { |
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 127608d79033..c4659a979c41 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi | |||
@@ -1111,7 +1111,6 @@ | |||
1111 | "wkupclk", "refclk", | 1111 | "wkupclk", "refclk", |
1112 | "div-clk", "phy-div"; | 1112 | "div-clk", "phy-div"; |
1113 | #phy-cells = <0>; | 1113 | #phy-cells = <0>; |
1114 | ti,hwmods = "pcie1-phy"; | ||
1115 | }; | 1114 | }; |
1116 | 1115 | ||
1117 | pcie2_phy: pciephy@4a095000 { | 1116 | pcie2_phy: pciephy@4a095000 { |
@@ -1130,7 +1129,6 @@ | |||
1130 | "wkupclk", "refclk", | 1129 | "wkupclk", "refclk", |
1131 | "div-clk", "phy-div"; | 1130 | "div-clk", "phy-div"; |
1132 | #phy-cells = <0>; | 1131 | #phy-cells = <0>; |
1133 | ti,hwmods = "pcie2-phy"; | ||
1134 | status = "disabled"; | 1132 | status = "disabled"; |
1135 | }; | 1133 | }; |
1136 | }; | 1134 | }; |
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index f4f78c40b564..3fdc84fddb70 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi | |||
@@ -92,6 +92,8 @@ | |||
92 | ti,hwmods = "aes"; | 92 | ti,hwmods = "aes"; |
93 | reg = <0x480c5000 0x50>; | 93 | reg = <0x480c5000 0x50>; |
94 | interrupts = <0>; | 94 | interrupts = <0>; |
95 | dmas = <&sdma 65 &sdma 66>; | ||
96 | dma-names = "tx", "rx"; | ||
95 | }; | 97 | }; |
96 | 98 | ||
97 | prm: prm@48306000 { | 99 | prm: prm@48306000 { |
@@ -550,6 +552,8 @@ | |||
550 | ti,hwmods = "sham"; | 552 | ti,hwmods = "sham"; |
551 | reg = <0x480c3000 0x64>; | 553 | reg = <0x480c3000 0x64>; |
552 | interrupts = <49>; | 554 | interrupts = <49>; |
555 | dmas = <&sdma 69>; | ||
556 | dma-names = "rx"; | ||
553 | }; | 557 | }; |
554 | 558 | ||
555 | smartreflex_core: smartreflex@480cb000 { | 559 | smartreflex_core: smartreflex@480cb000 { |
diff --git a/arch/arm/boot/dts/rk3288.dtsi b/arch/arm/boot/dts/rk3288.dtsi index d771f687a13b..eccc78d3220b 100644 --- a/arch/arm/boot/dts/rk3288.dtsi +++ b/arch/arm/boot/dts/rk3288.dtsi | |||
@@ -411,6 +411,7 @@ | |||
411 | "mac_clk_rx", "mac_clk_tx", | 411 | "mac_clk_rx", "mac_clk_tx", |
412 | "clk_mac_ref", "clk_mac_refout", | 412 | "clk_mac_ref", "clk_mac_refout", |
413 | "aclk_mac", "pclk_mac"; | 413 | "aclk_mac", "pclk_mac"; |
414 | status = "disabled"; | ||
414 | }; | 415 | }; |
415 | 416 | ||
416 | usb_host0_ehci: usb@ff500000 { | 417 | usb_host0_ehci: usb@ff500000 { |
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi index 9d8760956752..d9176e606173 100644 --- a/arch/arm/boot/dts/socfpga.dtsi +++ b/arch/arm/boot/dts/socfpga.dtsi | |||
@@ -660,7 +660,7 @@ | |||
660 | #address-cells = <1>; | 660 | #address-cells = <1>; |
661 | #size-cells = <0>; | 661 | #size-cells = <0>; |
662 | reg = <0xfff01000 0x1000>; | 662 | reg = <0xfff01000 0x1000>; |
663 | interrupts = <0 156 4>; | 663 | interrupts = <0 155 4>; |
664 | num-cs = <4>; | 664 | num-cs = <4>; |
665 | clocks = <&spi_m_clk>; | 665 | clocks = <&spi_m_clk>; |
666 | status = "disabled"; | 666 | status = "disabled"; |
diff --git a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts index ab7891c43231..75742f8f96f3 100644 --- a/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts +++ b/arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts | |||
@@ -56,6 +56,22 @@ | |||
56 | model = "Olimex A10-OLinuXino-LIME"; | 56 | model = "Olimex A10-OLinuXino-LIME"; |
57 | compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10"; | 57 | compatible = "olimex,a10-olinuxino-lime", "allwinner,sun4i-a10"; |
58 | 58 | ||
59 | cpus { | ||
60 | cpu0: cpu@0 { | ||
61 | /* | ||
62 | * The A10-Lime is known to be unstable | ||
63 | * when running at 1008 MHz | ||
64 | */ | ||
65 | operating-points = < | ||
66 | /* kHz uV */ | ||
67 | 912000 1350000 | ||
68 | 864000 1300000 | ||
69 | 624000 1250000 | ||
70 | >; | ||
71 | cooling-max-level = <2>; | ||
72 | }; | ||
73 | }; | ||
74 | |||
59 | soc@01c00000 { | 75 | soc@01c00000 { |
60 | emac: ethernet@01c0b000 { | 76 | emac: ethernet@01c0b000 { |
61 | pinctrl-names = "default"; | 77 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 5c2925831f20..eebb7853e00b 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi | |||
@@ -75,7 +75,6 @@ | |||
75 | clock-latency = <244144>; /* 8 32k periods */ | 75 | clock-latency = <244144>; /* 8 32k periods */ |
76 | operating-points = < | 76 | operating-points = < |
77 | /* kHz uV */ | 77 | /* kHz uV */ |
78 | 1056000 1500000 | ||
79 | 1008000 1400000 | 78 | 1008000 1400000 |
80 | 912000 1350000 | 79 | 912000 1350000 |
81 | 864000 1300000 | 80 | 864000 1300000 |
@@ -83,7 +82,7 @@ | |||
83 | >; | 82 | >; |
84 | #cooling-cells = <2>; | 83 | #cooling-cells = <2>; |
85 | cooling-min-level = <0>; | 84 | cooling-min-level = <0>; |
86 | cooling-max-level = <4>; | 85 | cooling-max-level = <3>; |
87 | }; | 86 | }; |
88 | }; | 87 | }; |
89 | 88 | ||
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index f8818f1edbbe..883cb4873688 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi | |||
@@ -47,7 +47,6 @@ | |||
47 | clock-latency = <244144>; /* 8 32k periods */ | 47 | clock-latency = <244144>; /* 8 32k periods */ |
48 | operating-points = < | 48 | operating-points = < |
49 | /* kHz uV */ | 49 | /* kHz uV */ |
50 | 1104000 1500000 | ||
51 | 1008000 1400000 | 50 | 1008000 1400000 |
52 | 912000 1350000 | 51 | 912000 1350000 |
53 | 864000 1300000 | 52 | 864000 1300000 |
@@ -57,7 +56,7 @@ | |||
57 | >; | 56 | >; |
58 | #cooling-cells = <2>; | 57 | #cooling-cells = <2>; |
59 | cooling-min-level = <0>; | 58 | cooling-min-level = <0>; |
60 | cooling-max-level = <6>; | 59 | cooling-max-level = <5>; |
61 | }; | 60 | }; |
62 | }; | 61 | }; |
63 | 62 | ||
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 3a8530b79f1c..fdd181792b4b 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi | |||
@@ -105,7 +105,6 @@ | |||
105 | clock-latency = <244144>; /* 8 32k periods */ | 105 | clock-latency = <244144>; /* 8 32k periods */ |
106 | operating-points = < | 106 | operating-points = < |
107 | /* kHz uV */ | 107 | /* kHz uV */ |
108 | 1008000 1450000 | ||
109 | 960000 1400000 | 108 | 960000 1400000 |
110 | 912000 1400000 | 109 | 912000 1400000 |
111 | 864000 1300000 | 110 | 864000 1300000 |
@@ -116,7 +115,7 @@ | |||
116 | >; | 115 | >; |
117 | #cooling-cells = <2>; | 116 | #cooling-cells = <2>; |
118 | cooling-min-level = <0>; | 117 | cooling-min-level = <0>; |
119 | cooling-max-level = <7>; | 118 | cooling-max-level = <6>; |
120 | }; | 119 | }; |
121 | 120 | ||
122 | cpu@1 { | 121 | cpu@1 { |
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 2a2f4d56e4c8..25f1beea453e 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c | |||
@@ -720,6 +720,8 @@ static const char * __init omap_get_family(void) | |||
720 | return kasprintf(GFP_KERNEL, "OMAP4"); | 720 | return kasprintf(GFP_KERNEL, "OMAP4"); |
721 | else if (soc_is_omap54xx()) | 721 | else if (soc_is_omap54xx()) |
722 | return kasprintf(GFP_KERNEL, "OMAP5"); | 722 | return kasprintf(GFP_KERNEL, "OMAP5"); |
723 | else if (soc_is_am33xx() || soc_is_am335x()) | ||
724 | return kasprintf(GFP_KERNEL, "AM33xx"); | ||
723 | else if (soc_is_am43xx()) | 725 | else if (soc_is_am43xx()) |
724 | return kasprintf(GFP_KERNEL, "AM43xx"); | 726 | return kasprintf(GFP_KERNEL, "AM43xx"); |
725 | else if (soc_is_dra7xx()) | 727 | else if (soc_is_dra7xx()) |
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c index 0eecd83c624e..89a7c06570d3 100644 --- a/arch/arm/mach-pxa/irq.c +++ b/arch/arm/mach-pxa/irq.c | |||
@@ -11,6 +11,7 @@ | |||
11 | * it under the terms of the GNU General Public License version 2 as | 11 | * it under the terms of the GNU General Public License version 2 as |
12 | * published by the Free Software Foundation. | 12 | * published by the Free Software Foundation. |
13 | */ | 13 | */ |
14 | #include <linux/bitops.h> | ||
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <linux/interrupt.h> | 17 | #include <linux/interrupt.h> |
@@ -40,7 +41,6 @@ | |||
40 | #define ICHP_VAL_IRQ (1 << 31) | 41 | #define ICHP_VAL_IRQ (1 << 31) |
41 | #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) | 42 | #define ICHP_IRQ(i) (((i) >> 16) & 0x7fff) |
42 | #define IPR_VALID (1 << 31) | 43 | #define IPR_VALID (1 << 31) |
43 | #define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f) | ||
44 | 44 | ||
45 | #define MAX_INTERNAL_IRQS 128 | 45 | #define MAX_INTERNAL_IRQS 128 |
46 | 46 | ||
@@ -51,6 +51,7 @@ | |||
51 | static void __iomem *pxa_irq_base; | 51 | static void __iomem *pxa_irq_base; |
52 | static int pxa_internal_irq_nr; | 52 | static int pxa_internal_irq_nr; |
53 | static bool cpu_has_ipr; | 53 | static bool cpu_has_ipr; |
54 | static struct irq_domain *pxa_irq_domain; | ||
54 | 55 | ||
55 | static inline void __iomem *irq_base(int i) | 56 | static inline void __iomem *irq_base(int i) |
56 | { | 57 | { |
@@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i) | |||
66 | void pxa_mask_irq(struct irq_data *d) | 67 | void pxa_mask_irq(struct irq_data *d) |
67 | { | 68 | { |
68 | void __iomem *base = irq_data_get_irq_chip_data(d); | 69 | void __iomem *base = irq_data_get_irq_chip_data(d); |
70 | irq_hw_number_t irq = irqd_to_hwirq(d); | ||
69 | uint32_t icmr = __raw_readl(base + ICMR); | 71 | uint32_t icmr = __raw_readl(base + ICMR); |
70 | 72 | ||
71 | icmr &= ~(1 << IRQ_BIT(d->irq)); | 73 | icmr &= ~BIT(irq & 0x1f); |
72 | __raw_writel(icmr, base + ICMR); | 74 | __raw_writel(icmr, base + ICMR); |
73 | } | 75 | } |
74 | 76 | ||
75 | void pxa_unmask_irq(struct irq_data *d) | 77 | void pxa_unmask_irq(struct irq_data *d) |
76 | { | 78 | { |
77 | void __iomem *base = irq_data_get_irq_chip_data(d); | 79 | void __iomem *base = irq_data_get_irq_chip_data(d); |
80 | irq_hw_number_t irq = irqd_to_hwirq(d); | ||
78 | uint32_t icmr = __raw_readl(base + ICMR); | 81 | uint32_t icmr = __raw_readl(base + ICMR); |
79 | 82 | ||
80 | icmr |= 1 << IRQ_BIT(d->irq); | 83 | icmr |= BIT(irq & 0x1f); |
81 | __raw_writel(icmr, base + ICMR); | 84 | __raw_writel(icmr, base + ICMR); |
82 | } | 85 | } |
83 | 86 | ||
@@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs) | |||
118 | } while (1); | 121 | } while (1); |
119 | } | 122 | } |
120 | 123 | ||
121 | void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) | 124 | static int pxa_irq_map(struct irq_domain *h, unsigned int virq, |
125 | irq_hw_number_t hw) | ||
122 | { | 126 | { |
123 | int irq, i, n; | 127 | void __iomem *base = irq_base(hw / 32); |
124 | 128 | ||
125 | BUG_ON(irq_nr > MAX_INTERNAL_IRQS); | 129 | /* initialize interrupt priority */ |
130 | if (cpu_has_ipr) | ||
131 | __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw)); | ||
132 | |||
133 | irq_set_chip_and_handler(virq, &pxa_internal_irq_chip, | ||
134 | handle_level_irq); | ||
135 | irq_set_chip_data(virq, base); | ||
136 | set_irq_flags(virq, IRQF_VALID); | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | static struct irq_domain_ops pxa_irq_ops = { | ||
142 | .map = pxa_irq_map, | ||
143 | .xlate = irq_domain_xlate_onecell, | ||
144 | }; | ||
145 | |||
146 | static __init void | ||
147 | pxa_init_irq_common(struct device_node *node, int irq_nr, | ||
148 | int (*fn)(struct irq_data *, unsigned int)) | ||
149 | { | ||
150 | int n; | ||
126 | 151 | ||
127 | pxa_internal_irq_nr = irq_nr; | 152 | pxa_internal_irq_nr = irq_nr; |
128 | cpu_has_ipr = !cpu_is_pxa25x(); | 153 | pxa_irq_domain = irq_domain_add_legacy(node, irq_nr, |
129 | pxa_irq_base = io_p2v(0x40d00000); | 154 | PXA_IRQ(0), 0, |
155 | &pxa_irq_ops, NULL); | ||
156 | if (!pxa_irq_domain) | ||
157 | panic("Unable to add PXA IRQ domain\n"); | ||
158 | irq_set_default_host(pxa_irq_domain); | ||
130 | 159 | ||
131 | for (n = 0; n < irq_nr; n += 32) { | 160 | for (n = 0; n < irq_nr; n += 32) { |
132 | void __iomem *base = irq_base(n >> 5); | 161 | void __iomem *base = irq_base(n >> 5); |
133 | 162 | ||
134 | __raw_writel(0, base + ICMR); /* disable all IRQs */ | 163 | __raw_writel(0, base + ICMR); /* disable all IRQs */ |
135 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ | 164 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ |
136 | for (i = n; (i < (n + 32)) && (i < irq_nr); i++) { | ||
137 | /* initialize interrupt priority */ | ||
138 | if (cpu_has_ipr) | ||
139 | __raw_writel(i | IPR_VALID, pxa_irq_base + IPR(i)); | ||
140 | |||
141 | irq = PXA_IRQ(i); | ||
142 | irq_set_chip_and_handler(irq, &pxa_internal_irq_chip, | ||
143 | handle_level_irq); | ||
144 | irq_set_chip_data(irq, base); | ||
145 | set_irq_flags(irq, IRQF_VALID); | ||
146 | } | ||
147 | } | 165 | } |
148 | |||
149 | /* only unmasked interrupts kick us out of idle */ | 166 | /* only unmasked interrupts kick us out of idle */ |
150 | __raw_writel(1, irq_base(0) + ICCR); | 167 | __raw_writel(1, irq_base(0) + ICCR); |
151 | 168 | ||
152 | pxa_internal_irq_chip.irq_set_wake = fn; | 169 | pxa_internal_irq_chip.irq_set_wake = fn; |
153 | } | 170 | } |
154 | 171 | ||
172 | void __init pxa_init_irq(int irq_nr, int (*fn)(struct irq_data *, unsigned int)) | ||
173 | { | ||
174 | BUG_ON(irq_nr > MAX_INTERNAL_IRQS); | ||
175 | |||
176 | pxa_irq_base = io_p2v(0x40d00000); | ||
177 | cpu_has_ipr = !cpu_is_pxa25x(); | ||
178 | pxa_init_irq_common(NULL, irq_nr, fn); | ||
179 | } | ||
180 | |||
155 | #ifdef CONFIG_PM | 181 | #ifdef CONFIG_PM |
156 | static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; | 182 | static unsigned long saved_icmr[MAX_INTERNAL_IRQS/32]; |
157 | static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; | 183 | static unsigned long saved_ipr[MAX_INTERNAL_IRQS]; |
@@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = { | |||
203 | }; | 229 | }; |
204 | 230 | ||
205 | #ifdef CONFIG_OF | 231 | #ifdef CONFIG_OF |
206 | static struct irq_domain *pxa_irq_domain; | ||
207 | |||
208 | static int pxa_irq_map(struct irq_domain *h, unsigned int virq, | ||
209 | irq_hw_number_t hw) | ||
210 | { | ||
211 | void __iomem *base = irq_base(hw / 32); | ||
212 | |||
213 | /* initialize interrupt priority */ | ||
214 | if (cpu_has_ipr) | ||
215 | __raw_writel(hw | IPR_VALID, pxa_irq_base + IPR(hw)); | ||
216 | |||
217 | irq_set_chip_and_handler(hw, &pxa_internal_irq_chip, | ||
218 | handle_level_irq); | ||
219 | irq_set_chip_data(hw, base); | ||
220 | set_irq_flags(hw, IRQF_VALID); | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | |||
225 | static struct irq_domain_ops pxa_irq_ops = { | ||
226 | .map = pxa_irq_map, | ||
227 | .xlate = irq_domain_xlate_onecell, | ||
228 | }; | ||
229 | |||
230 | static const struct of_device_id intc_ids[] __initconst = { | 232 | static const struct of_device_id intc_ids[] __initconst = { |
231 | { .compatible = "marvell,pxa-intc", }, | 233 | { .compatible = "marvell,pxa-intc", }, |
232 | {} | 234 | {} |
@@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)) | |||
236 | { | 238 | { |
237 | struct device_node *node; | 239 | struct device_node *node; |
238 | struct resource res; | 240 | struct resource res; |
239 | int n, ret; | 241 | int ret; |
240 | 242 | ||
241 | node = of_find_matching_node(NULL, intc_ids); | 243 | node = of_find_matching_node(NULL, intc_ids); |
242 | if (!node) { | 244 | if (!node) { |
@@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int)) | |||
267 | return; | 269 | return; |
268 | } | 270 | } |
269 | 271 | ||
270 | pxa_irq_domain = irq_domain_add_legacy(node, pxa_internal_irq_nr, 0, 0, | 272 | pxa_init_irq_common(node, pxa_internal_irq_nr, fn); |
271 | &pxa_irq_ops, NULL); | ||
272 | if (!pxa_irq_domain) | ||
273 | panic("Unable to add PXA IRQ domain\n"); | ||
274 | |||
275 | irq_set_default_host(pxa_irq_domain); | ||
276 | |||
277 | for (n = 0; n < pxa_internal_irq_nr; n += 32) { | ||
278 | void __iomem *base = irq_base(n >> 5); | ||
279 | |||
280 | __raw_writel(0, base + ICMR); /* disable all IRQs */ | ||
281 | __raw_writel(0, base + ICLR); /* all IRQs are IRQ, not FIQ */ | ||
282 | } | ||
283 | |||
284 | /* only unmasked interrupts kick us out of idle */ | ||
285 | __raw_writel(1, irq_base(0) + ICCR); | ||
286 | |||
287 | pxa_internal_irq_chip.irq_set_wake = fn; | ||
288 | } | 273 | } |
289 | #endif /* CONFIG_OF */ | 274 | #endif /* CONFIG_OF */ |
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c index 205f9bf3821e..ac2ae5c71ab4 100644 --- a/arch/arm/mach-pxa/zeus.c +++ b/arch/arm/mach-pxa/zeus.c | |||
@@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = { | |||
412 | }; | 412 | }; |
413 | 413 | ||
414 | static struct platform_device can_regulator_device = { | 414 | static struct platform_device can_regulator_device = { |
415 | .name = "reg-fixed-volage", | 415 | .name = "reg-fixed-voltage", |
416 | .id = 0, | 416 | .id = 0, |
417 | .dev = { | 417 | .dev = { |
418 | .platform_data = &can_regulator_pdata, | 418 | .platform_data = &can_regulator_pdata, |
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig index a77604fbaf25..81502b90dd91 100644 --- a/arch/arm/mach-sunxi/Kconfig +++ b/arch/arm/mach-sunxi/Kconfig | |||
@@ -1,10 +1,12 @@ | |||
1 | menuconfig ARCH_SUNXI | 1 | menuconfig ARCH_SUNXI |
2 | bool "Allwinner SoCs" if ARCH_MULTI_V7 | 2 | bool "Allwinner SoCs" if ARCH_MULTI_V7 |
3 | select ARCH_REQUIRE_GPIOLIB | 3 | select ARCH_REQUIRE_GPIOLIB |
4 | select ARCH_HAS_RESET_CONTROLLER | ||
4 | select CLKSRC_MMIO | 5 | select CLKSRC_MMIO |
5 | select GENERIC_IRQ_CHIP | 6 | select GENERIC_IRQ_CHIP |
6 | select PINCTRL | 7 | select PINCTRL |
7 | select SUN4I_TIMER | 8 | select SUN4I_TIMER |
9 | select RESET_CONTROLLER | ||
8 | 10 | ||
9 | if ARCH_SUNXI | 11 | if ARCH_SUNXI |
10 | 12 | ||
@@ -20,10 +22,8 @@ config MACH_SUN5I | |||
20 | config MACH_SUN6I | 22 | config MACH_SUN6I |
21 | bool "Allwinner A31 (sun6i) SoCs support" | 23 | bool "Allwinner A31 (sun6i) SoCs support" |
22 | default ARCH_SUNXI | 24 | default ARCH_SUNXI |
23 | select ARCH_HAS_RESET_CONTROLLER | ||
24 | select ARM_GIC | 25 | select ARM_GIC |
25 | select MFD_SUN6I_PRCM | 26 | select MFD_SUN6I_PRCM |
26 | select RESET_CONTROLLER | ||
27 | select SUN5I_HSTIMER | 27 | select SUN5I_HSTIMER |
28 | 28 | ||
29 | config MACH_SUN7I | 29 | config MACH_SUN7I |
@@ -37,16 +37,12 @@ config MACH_SUN7I | |||
37 | config MACH_SUN8I | 37 | config MACH_SUN8I |
38 | bool "Allwinner A23 (sun8i) SoCs support" | 38 | bool "Allwinner A23 (sun8i) SoCs support" |
39 | default ARCH_SUNXI | 39 | default ARCH_SUNXI |
40 | select ARCH_HAS_RESET_CONTROLLER | ||
41 | select ARM_GIC | 40 | select ARM_GIC |
42 | select MFD_SUN6I_PRCM | 41 | select MFD_SUN6I_PRCM |
43 | select RESET_CONTROLLER | ||
44 | 42 | ||
45 | config MACH_SUN9I | 43 | config MACH_SUN9I |
46 | bool "Allwinner (sun9i) SoCs support" | 44 | bool "Allwinner (sun9i) SoCs support" |
47 | default ARCH_SUNXI | 45 | default ARCH_SUNXI |
48 | select ARCH_HAS_RESET_CONTROLLER | ||
49 | select ARM_GIC | 46 | select ARM_GIC |
50 | select RESET_CONTROLLER | ||
51 | 47 | ||
52 | endif | 48 | endif |
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c index db10169a08de..8ca94d379bc3 100644 --- a/arch/arm/plat-omap/dmtimer.c +++ b/arch/arm/plat-omap/dmtimer.c | |||
@@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
799 | struct device *dev = &pdev->dev; | 799 | struct device *dev = &pdev->dev; |
800 | const struct of_device_id *match; | 800 | const struct of_device_id *match; |
801 | const struct dmtimer_platform_data *pdata; | 801 | const struct dmtimer_platform_data *pdata; |
802 | int ret; | ||
802 | 803 | ||
803 | match = of_match_device(of_match_ptr(omap_timer_match), dev); | 804 | match = of_match_device(of_match_ptr(omap_timer_match), dev); |
804 | pdata = match ? match->data : dev->platform_data; | 805 | pdata = match ? match->data : dev->platform_data; |
@@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
860 | } | 861 | } |
861 | 862 | ||
862 | if (!timer->reserved) { | 863 | if (!timer->reserved) { |
863 | pm_runtime_get_sync(dev); | 864 | ret = pm_runtime_get_sync(dev); |
865 | if (ret < 0) { | ||
866 | dev_err(dev, "%s: pm_runtime_get_sync failed!\n", | ||
867 | __func__); | ||
868 | goto err_get_sync; | ||
869 | } | ||
864 | __omap_dm_timer_init_regs(timer); | 870 | __omap_dm_timer_init_regs(timer); |
865 | pm_runtime_put(dev); | 871 | pm_runtime_put(dev); |
866 | } | 872 | } |
@@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev) | |||
873 | dev_dbg(dev, "Device Probed.\n"); | 879 | dev_dbg(dev, "Device Probed.\n"); |
874 | 880 | ||
875 | return 0; | 881 | return 0; |
882 | |||
883 | err_get_sync: | ||
884 | pm_runtime_put_noidle(dev); | ||
885 | pm_runtime_disable(dev); | ||
886 | return ret; | ||
876 | } | 887 | } |
877 | 888 | ||
878 | /** | 889 | /** |
@@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev) | |||
899 | } | 910 | } |
900 | spin_unlock_irqrestore(&dm_timer_lock, flags); | 911 | spin_unlock_irqrestore(&dm_timer_lock, flags); |
901 | 912 | ||
913 | pm_runtime_disable(&pdev->dev); | ||
914 | |||
902 | return ret; | 915 | return ret; |
903 | } | 916 | } |
904 | 917 | ||
diff --git a/arch/arm64/boot/dts/arm/juno-clocks.dtsi b/arch/arm64/boot/dts/arm/juno-clocks.dtsi index ea2b5666a16f..c9b89efe0f56 100644 --- a/arch/arm64/boot/dts/arm/juno-clocks.dtsi +++ b/arch/arm64/boot/dts/arm/juno-clocks.dtsi | |||
@@ -8,7 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | /* SoC fixed clocks */ | 10 | /* SoC fixed clocks */ |
11 | soc_uartclk: refclk72738khz { | 11 | soc_uartclk: refclk7273800hz { |
12 | compatible = "fixed-clock"; | 12 | compatible = "fixed-clock"; |
13 | #clock-cells = <0>; | 13 | #clock-cells = <0>; |
14 | clock-frequency = <7273800>; | 14 | clock-frequency = <7273800>; |
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h index f213f5b4c423..d17437238a2c 100644 --- a/arch/parisc/include/asm/pgalloc.h +++ b/arch/parisc/include/asm/pgalloc.h | |||
@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
26 | 26 | ||
27 | if (likely(pgd != NULL)) { | 27 | if (likely(pgd != NULL)) { |
28 | memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); | 28 | memset(pgd, 0, PAGE_SIZE<<PGD_ALLOC_ORDER); |
29 | #ifdef CONFIG_64BIT | 29 | #if PT_NLEVELS == 3 |
30 | actual_pgd += PTRS_PER_PGD; | 30 | actual_pgd += PTRS_PER_PGD; |
31 | /* Populate first pmd with allocated memory. We mark it | 31 | /* Populate first pmd with allocated memory. We mark it |
32 | * with PxD_FLAG_ATTACHED as a signal to the system that this | 32 | * with PxD_FLAG_ATTACHED as a signal to the system that this |
@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) | |||
45 | 45 | ||
46 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) | 46 | static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
47 | { | 47 | { |
48 | #ifdef CONFIG_64BIT | 48 | #if PT_NLEVELS == 3 |
49 | pgd -= PTRS_PER_PGD; | 49 | pgd -= PTRS_PER_PGD; |
50 | #endif | 50 | #endif |
51 | free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); | 51 | free_pages((unsigned long)pgd, PGD_ALLOC_ORDER); |
@@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) | |||
72 | 72 | ||
73 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | 73 | static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) |
74 | { | 74 | { |
75 | #ifdef CONFIG_64BIT | ||
76 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | 75 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) |
77 | /* This is the permanent pmd attached to the pgd; | 76 | /* |
78 | * cannot free it */ | 77 | * This is the permanent pmd attached to the pgd; |
78 | * cannot free it. | ||
79 | * Increment the counter to compensate for the decrement | ||
80 | * done by generic mm code. | ||
81 | */ | ||
82 | mm_inc_nr_pmds(mm); | ||
79 | return; | 83 | return; |
80 | #endif | ||
81 | free_pages((unsigned long)pmd, PMD_ORDER); | 84 | free_pages((unsigned long)pmd, PMD_ORDER); |
82 | } | 85 | } |
83 | 86 | ||
@@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) | |||
99 | static inline void | 102 | static inline void |
100 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) | 103 | pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) |
101 | { | 104 | { |
102 | #ifdef CONFIG_64BIT | 105 | #if PT_NLEVELS == 3 |
103 | /* preserve the gateway marker if this is the beginning of | 106 | /* preserve the gateway marker if this is the beginning of |
104 | * the permanent pmd */ | 107 | * the permanent pmd */ |
105 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) | 108 | if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED) |
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 5a8997d63899..8eefb12d1d33 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
@@ -55,8 +55,8 @@ | |||
55 | #define ENTRY_COMP(_name_) .word sys_##_name_ | 55 | #define ENTRY_COMP(_name_) .word sys_##_name_ |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | ENTRY_SAME(restart_syscall) /* 0 */ | 58 | 90: ENTRY_SAME(restart_syscall) /* 0 */ |
59 | ENTRY_SAME(exit) | 59 | 91: ENTRY_SAME(exit) |
60 | ENTRY_SAME(fork_wrapper) | 60 | ENTRY_SAME(fork_wrapper) |
61 | ENTRY_SAME(read) | 61 | ENTRY_SAME(read) |
62 | ENTRY_SAME(write) | 62 | ENTRY_SAME(write) |
@@ -439,7 +439,10 @@ | |||
439 | ENTRY_SAME(bpf) | 439 | ENTRY_SAME(bpf) |
440 | ENTRY_COMP(execveat) | 440 | ENTRY_COMP(execveat) |
441 | 441 | ||
442 | /* Nothing yet */ | 442 | |
443 | .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) | ||
444 | .error "size of syscall table does not fit value of __NR_Linux_syscalls" | ||
445 | .endif | ||
443 | 446 | ||
444 | #undef ENTRY_SAME | 447 | #undef ENTRY_SAME |
445 | #undef ENTRY_DIFF | 448 | #undef ENTRY_DIFF |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index de4018a1bc4b..de747563d29d 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) | |||
636 | spin_lock(&vcpu->arch.vpa_update_lock); | 636 | spin_lock(&vcpu->arch.vpa_update_lock); |
637 | lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; | 637 | lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; |
638 | if (lppaca) | 638 | if (lppaca) |
639 | yield_count = lppaca->yield_count; | 639 | yield_count = be32_to_cpu(lppaca->yield_count); |
640 | spin_unlock(&vcpu->arch.vpa_update_lock); | 640 | spin_unlock(&vcpu->arch.vpa_update_lock); |
641 | return yield_count; | 641 | return yield_count; |
642 | } | 642 | } |
@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, | |||
942 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, | 942 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, |
943 | bool preserve_top32) | 943 | bool preserve_top32) |
944 | { | 944 | { |
945 | struct kvm *kvm = vcpu->kvm; | ||
945 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | 946 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
946 | u64 mask; | 947 | u64 mask; |
947 | 948 | ||
949 | mutex_lock(&kvm->lock); | ||
948 | spin_lock(&vc->lock); | 950 | spin_lock(&vc->lock); |
949 | /* | 951 | /* |
950 | * If ILE (interrupt little-endian) has changed, update the | 952 | * If ILE (interrupt little-endian) has changed, update the |
951 | * MSR_LE bit in the intr_msr for each vcpu in this vcore. | 953 | * MSR_LE bit in the intr_msr for each vcpu in this vcore. |
952 | */ | 954 | */ |
953 | if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { | 955 | if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { |
954 | struct kvm *kvm = vcpu->kvm; | ||
955 | struct kvm_vcpu *vcpu; | 956 | struct kvm_vcpu *vcpu; |
956 | int i; | 957 | int i; |
957 | 958 | ||
958 | mutex_lock(&kvm->lock); | ||
959 | kvm_for_each_vcpu(i, vcpu, kvm) { | 959 | kvm_for_each_vcpu(i, vcpu, kvm) { |
960 | if (vcpu->arch.vcore != vc) | 960 | if (vcpu->arch.vcore != vc) |
961 | continue; | 961 | continue; |
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, | |||
964 | else | 964 | else |
965 | vcpu->arch.intr_msr &= ~MSR_LE; | 965 | vcpu->arch.intr_msr &= ~MSR_LE; |
966 | } | 966 | } |
967 | mutex_unlock(&kvm->lock); | ||
968 | } | 967 | } |
969 | 968 | ||
970 | /* | 969 | /* |
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, | |||
981 | mask &= 0xFFFFFFFF; | 980 | mask &= 0xFFFFFFFF; |
982 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); | 981 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); |
983 | spin_unlock(&vc->lock); | 982 | spin_unlock(&vc->lock); |
983 | mutex_unlock(&kvm->lock); | ||
984 | } | 984 | } |
985 | 985 | ||
986 | static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, | 986 | static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index bb94e6f20c81..6cbf1630cb70 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |||
1005 | /* Save HEIR (HV emulation assist reg) in emul_inst | 1005 | /* Save HEIR (HV emulation assist reg) in emul_inst |
1006 | if this is an HEI (HV emulation interrupt, e40) */ | 1006 | if this is an HEI (HV emulation interrupt, e40) */ |
1007 | li r3,KVM_INST_FETCH_FAILED | 1007 | li r3,KVM_INST_FETCH_FAILED |
1008 | stw r3,VCPU_LAST_INST(r9) | ||
1008 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST | 1009 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST |
1009 | bne 11f | 1010 | bne 11f |
1010 | mfspr r3,SPRN_HEIR | 1011 | mfspr r3,SPRN_HEIR |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index b9861e19cd3d..1c78f44f4f93 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = { | |||
212 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ | 212 | INTEL_UEVENT_CONSTRAINT(0x01c0, 0x2), /* INST_RETIRED.PREC_DIST */ |
213 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ | 213 | INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.LOAD_LATENCY */ |
214 | /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ | 214 | /* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */ |
215 | INTEL_EVENT_CONSTRAINT(0x08a3, 0x4), | 215 | INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4), |
216 | /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ | 216 | /* CYCLE_ACTIVITY.STALLS_L1D_PENDING */ |
217 | INTEL_EVENT_CONSTRAINT(0x0ca3, 0x4), | 217 | INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4), |
218 | /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ | 218 | /* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */ |
219 | INTEL_EVENT_CONSTRAINT(0x04a3, 0xf), | 219 | INTEL_UEVENT_CONSTRAINT(0x04a3, 0xf), |
220 | EVENT_CONSTRAINT_END | 220 | EVENT_CONSTRAINT_END |
221 | }; | 221 | }; |
222 | 222 | ||
@@ -1852,11 +1852,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event | |||
1852 | if (c) | 1852 | if (c) |
1853 | return c; | 1853 | return c; |
1854 | 1854 | ||
1855 | c = intel_pebs_constraints(event); | 1855 | c = intel_shared_regs_constraints(cpuc, event); |
1856 | if (c) | 1856 | if (c) |
1857 | return c; | 1857 | return c; |
1858 | 1858 | ||
1859 | c = intel_shared_regs_constraints(cpuc, event); | 1859 | c = intel_pebs_constraints(event); |
1860 | if (c) | 1860 | if (c) |
1861 | return c; | 1861 | return c; |
1862 | 1862 | ||
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 1d74d161687c..2babb393915e 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -364,12 +364,21 @@ system_call_fastpath: | |||
364 | * Has incomplete stack frame and undefined top of stack. | 364 | * Has incomplete stack frame and undefined top of stack. |
365 | */ | 365 | */ |
366 | ret_from_sys_call: | 366 | ret_from_sys_call: |
367 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | ||
368 | jnz int_ret_from_sys_call_fixup /* Go the the slow path */ | ||
369 | |||
370 | LOCKDEP_SYS_EXIT | 367 | LOCKDEP_SYS_EXIT |
371 | DISABLE_INTERRUPTS(CLBR_NONE) | 368 | DISABLE_INTERRUPTS(CLBR_NONE) |
372 | TRACE_IRQS_OFF | 369 | TRACE_IRQS_OFF |
370 | |||
371 | /* | ||
372 | * We must check ti flags with interrupts (or at least preemption) | ||
373 | * off because we must *never* return to userspace without | ||
374 | * processing exit work that is enqueued if we're preempted here. | ||
375 | * In particular, returning to userspace with any of the one-shot | ||
376 | * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is | ||
377 | * very bad. | ||
378 | */ | ||
379 | testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) | ||
380 | jnz int_ret_from_sys_call_fixup /* Go the the slow path */ | ||
381 | |||
373 | CFI_REMEMBER_STATE | 382 | CFI_REMEMBER_STATE |
374 | /* | 383 | /* |
375 | * sysretq will re-enable interrupts: | 384 | * sysretq will re-enable interrupts: |
@@ -386,7 +395,7 @@ ret_from_sys_call: | |||
386 | 395 | ||
387 | int_ret_from_sys_call_fixup: | 396 | int_ret_from_sys_call_fixup: |
388 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET | 397 | FIXUP_TOP_OF_STACK %r11, -ARGOFFSET |
389 | jmp int_ret_from_sys_call | 398 | jmp int_ret_from_sys_call_irqs_off |
390 | 399 | ||
391 | /* Do syscall tracing */ | 400 | /* Do syscall tracing */ |
392 | tracesys: | 401 | tracesys: |
@@ -432,6 +441,7 @@ tracesys_phase2: | |||
432 | GLOBAL(int_ret_from_sys_call) | 441 | GLOBAL(int_ret_from_sys_call) |
433 | DISABLE_INTERRUPTS(CLBR_NONE) | 442 | DISABLE_INTERRUPTS(CLBR_NONE) |
434 | TRACE_IRQS_OFF | 443 | TRACE_IRQS_OFF |
444 | int_ret_from_sys_call_irqs_off: | ||
435 | movl $_TIF_ALLWORK_MASK,%edi | 445 | movl $_TIF_ALLWORK_MASK,%edi |
436 | /* edi: mask to check */ | 446 | /* edi: mask to check */ |
437 | GLOBAL(int_with_check) | 447 | GLOBAL(int_with_check) |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 68161f7a07d6..a0b036ccb118 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI | |||
192 | config SH_TIMER_CMT | 192 | config SH_TIMER_CMT |
193 | bool "Renesas CMT timer driver" if COMPILE_TEST | 193 | bool "Renesas CMT timer driver" if COMPILE_TEST |
194 | depends on GENERIC_CLOCKEVENTS | 194 | depends on GENERIC_CLOCKEVENTS |
195 | depends on HAS_IOMEM | ||
195 | default SYS_SUPPORTS_SH_CMT | 196 | default SYS_SUPPORTS_SH_CMT |
196 | help | 197 | help |
197 | This enables build of a clocksource and clockevent driver for | 198 | This enables build of a clocksource and clockevent driver for |
@@ -201,6 +202,7 @@ config SH_TIMER_CMT | |||
201 | config SH_TIMER_MTU2 | 202 | config SH_TIMER_MTU2 |
202 | bool "Renesas MTU2 timer driver" if COMPILE_TEST | 203 | bool "Renesas MTU2 timer driver" if COMPILE_TEST |
203 | depends on GENERIC_CLOCKEVENTS | 204 | depends on GENERIC_CLOCKEVENTS |
205 | depends on HAS_IOMEM | ||
204 | default SYS_SUPPORTS_SH_MTU2 | 206 | default SYS_SUPPORTS_SH_MTU2 |
205 | help | 207 | help |
206 | This enables build of a clockevent driver for the Multi-Function | 208 | This enables build of a clockevent driver for the Multi-Function |
@@ -210,6 +212,7 @@ config SH_TIMER_MTU2 | |||
210 | config SH_TIMER_TMU | 212 | config SH_TIMER_TMU |
211 | bool "Renesas TMU timer driver" if COMPILE_TEST | 213 | bool "Renesas TMU timer driver" if COMPILE_TEST |
212 | depends on GENERIC_CLOCKEVENTS | 214 | depends on GENERIC_CLOCKEVENTS |
215 | depends on HAS_IOMEM | ||
213 | default SYS_SUPPORTS_SH_TMU | 216 | default SYS_SUPPORTS_SH_TMU |
214 | help | 217 | help |
215 | This enables build of a clocksource and clockevent driver for | 218 | This enables build of a clocksource and clockevent driver for |
diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 5dcbf90b8015..58597fbcc046 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
18 | #include <linux/irqreturn.h> | 18 | #include <linux/irqreturn.h> |
19 | #include <linux/reset.h> | 19 | #include <linux/reset.h> |
20 | #include <linux/sched_clock.h> | ||
21 | #include <linux/of.h> | 20 | #include <linux/of.h> |
22 | #include <linux/of_address.h> | 21 | #include <linux/of_address.h> |
23 | #include <linux/of_irq.h> | 22 | #include <linux/of_irq.h> |
@@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = { | |||
137 | .dev_id = &sun5i_clockevent, | 136 | .dev_id = &sun5i_clockevent, |
138 | }; | 137 | }; |
139 | 138 | ||
140 | static u64 sun5i_timer_sched_read(void) | ||
141 | { | ||
142 | return ~readl(timer_base + TIMER_CNTVAL_LO_REG(1)); | ||
143 | } | ||
144 | |||
145 | static void __init sun5i_timer_init(struct device_node *node) | 139 | static void __init sun5i_timer_init(struct device_node *node) |
146 | { | 140 | { |
147 | struct reset_control *rstc; | 141 | struct reset_control *rstc; |
@@ -172,7 +166,6 @@ static void __init sun5i_timer_init(struct device_node *node) | |||
172 | writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, | 166 | writel(TIMER_CTL_ENABLE | TIMER_CTL_RELOAD, |
173 | timer_base + TIMER_CTL_REG(1)); | 167 | timer_base + TIMER_CTL_REG(1)); |
174 | 168 | ||
175 | sched_clock_register(sun5i_timer_sched_read, 32, rate); | ||
176 | clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, | 169 | clocksource_mmio_init(timer_base + TIMER_CNTVAL_LO_REG(1), node->name, |
177 | rate, 340, 32, clocksource_mmio_readl_down); | 170 | rate, 340, 32, clocksource_mmio_readl_down); |
178 | 171 | ||
diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c index c8def68d9e4c..0deaa4f971f5 100644 --- a/drivers/watchdog/imgpdc_wdt.c +++ b/drivers/watchdog/imgpdc_wdt.c | |||
@@ -42,10 +42,10 @@ | |||
42 | #define PDC_WDT_MIN_TIMEOUT 1 | 42 | #define PDC_WDT_MIN_TIMEOUT 1 |
43 | #define PDC_WDT_DEF_TIMEOUT 64 | 43 | #define PDC_WDT_DEF_TIMEOUT 64 |
44 | 44 | ||
45 | static int heartbeat; | 45 | static int heartbeat = PDC_WDT_DEF_TIMEOUT; |
46 | module_param(heartbeat, int, 0); | 46 | module_param(heartbeat, int, 0); |
47 | MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. " | 47 | MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds " |
48 | "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); | 48 | "(default=" __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); |
49 | 49 | ||
50 | static bool nowayout = WATCHDOG_NOWAYOUT; | 50 | static bool nowayout = WATCHDOG_NOWAYOUT; |
51 | module_param(nowayout, bool, 0); | 51 | module_param(nowayout, bool, 0); |
@@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev) | |||
191 | pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; | 191 | pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; |
192 | pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; | 192 | pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; |
193 | pdc_wdt->wdt_dev.parent = &pdev->dev; | 193 | pdc_wdt->wdt_dev.parent = &pdev->dev; |
194 | watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt); | ||
194 | 195 | ||
195 | ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); | 196 | ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); |
196 | if (ret < 0) { | 197 | if (ret < 0) { |
@@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev) | |||
232 | watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); | 233 | watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); |
233 | 234 | ||
234 | platform_set_drvdata(pdev, pdc_wdt); | 235 | platform_set_drvdata(pdev, pdc_wdt); |
235 | watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt); | ||
236 | 236 | ||
237 | ret = watchdog_register_device(&pdc_wdt->wdt_dev); | 237 | ret = watchdog_register_device(&pdc_wdt->wdt_dev); |
238 | if (ret) | 238 | if (ret) |
diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c index a87f6df6e85f..938b987de551 100644 --- a/drivers/watchdog/mtk_wdt.c +++ b/drivers/watchdog/mtk_wdt.c | |||
@@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev) | |||
133 | u32 reg; | 133 | u32 reg; |
134 | struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); | 134 | struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); |
135 | void __iomem *wdt_base = mtk_wdt->wdt_base; | 135 | void __iomem *wdt_base = mtk_wdt->wdt_base; |
136 | u32 ret; | 136 | int ret; |
137 | 137 | ||
138 | ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); | 138 | ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); |
139 | if (ret < 0) | 139 | if (ret < 0) |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 88d0d4420ad2..ba77ab5f64dd 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class) | |||
633 | if (!new_class->name) | 633 | if (!new_class->name) |
634 | return 0; | 634 | return 0; |
635 | 635 | ||
636 | list_for_each_entry(class, &all_lock_classes, lock_entry) { | 636 | list_for_each_entry_rcu(class, &all_lock_classes, lock_entry) { |
637 | if (new_class->key - new_class->subclass == class->key) | 637 | if (new_class->key - new_class->subclass == class->key) |
638 | return class->name_version; | 638 | return class->name_version; |
639 | if (class->name && !strcmp(class->name, new_class->name)) | 639 | if (class->name && !strcmp(class->name, new_class->name)) |
@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
700 | hash_head = classhashentry(key); | 700 | hash_head = classhashentry(key); |
701 | 701 | ||
702 | /* | 702 | /* |
703 | * We can walk the hash lockfree, because the hash only | 703 | * We do an RCU walk of the hash, see lockdep_free_key_range(). |
704 | * grows, and we are careful when adding entries to the end: | ||
705 | */ | 704 | */ |
706 | list_for_each_entry(class, hash_head, hash_entry) { | 705 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
706 | return NULL; | ||
707 | |||
708 | list_for_each_entry_rcu(class, hash_head, hash_entry) { | ||
707 | if (class->key == key) { | 709 | if (class->key == key) { |
708 | /* | 710 | /* |
709 | * Huh! same key, different name? Did someone trample | 711 | * Huh! same key, different name? Did someone trample |
@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
728 | struct lockdep_subclass_key *key; | 730 | struct lockdep_subclass_key *key; |
729 | struct list_head *hash_head; | 731 | struct list_head *hash_head; |
730 | struct lock_class *class; | 732 | struct lock_class *class; |
731 | unsigned long flags; | 733 | |
734 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); | ||
732 | 735 | ||
733 | class = look_up_lock_class(lock, subclass); | 736 | class = look_up_lock_class(lock, subclass); |
734 | if (likely(class)) | 737 | if (likely(class)) |
@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
750 | key = lock->key->subkeys + subclass; | 753 | key = lock->key->subkeys + subclass; |
751 | hash_head = classhashentry(key); | 754 | hash_head = classhashentry(key); |
752 | 755 | ||
753 | raw_local_irq_save(flags); | ||
754 | if (!graph_lock()) { | 756 | if (!graph_lock()) { |
755 | raw_local_irq_restore(flags); | ||
756 | return NULL; | 757 | return NULL; |
757 | } | 758 | } |
758 | /* | 759 | /* |
759 | * We have to do the hash-walk again, to avoid races | 760 | * We have to do the hash-walk again, to avoid races |
760 | * with another CPU: | 761 | * with another CPU: |
761 | */ | 762 | */ |
762 | list_for_each_entry(class, hash_head, hash_entry) | 763 | list_for_each_entry_rcu(class, hash_head, hash_entry) { |
763 | if (class->key == key) | 764 | if (class->key == key) |
764 | goto out_unlock_set; | 765 | goto out_unlock_set; |
766 | } | ||
767 | |||
765 | /* | 768 | /* |
766 | * Allocate a new key from the static array, and add it to | 769 | * Allocate a new key from the static array, and add it to |
767 | * the hash: | 770 | * the hash: |
768 | */ | 771 | */ |
769 | if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { | 772 | if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { |
770 | if (!debug_locks_off_graph_unlock()) { | 773 | if (!debug_locks_off_graph_unlock()) { |
771 | raw_local_irq_restore(flags); | ||
772 | return NULL; | 774 | return NULL; |
773 | } | 775 | } |
774 | raw_local_irq_restore(flags); | ||
775 | 776 | ||
776 | print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); | 777 | print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!"); |
777 | dump_stack(); | 778 | dump_stack(); |
@@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
798 | 799 | ||
799 | if (verbose(class)) { | 800 | if (verbose(class)) { |
800 | graph_unlock(); | 801 | graph_unlock(); |
801 | raw_local_irq_restore(flags); | ||
802 | 802 | ||
803 | printk("\nnew class %p: %s", class->key, class->name); | 803 | printk("\nnew class %p: %s", class->key, class->name); |
804 | if (class->name_version > 1) | 804 | if (class->name_version > 1) |
@@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
806 | printk("\n"); | 806 | printk("\n"); |
807 | dump_stack(); | 807 | dump_stack(); |
808 | 808 | ||
809 | raw_local_irq_save(flags); | ||
810 | if (!graph_lock()) { | 809 | if (!graph_lock()) { |
811 | raw_local_irq_restore(flags); | ||
812 | return NULL; | 810 | return NULL; |
813 | } | 811 | } |
814 | } | 812 | } |
815 | out_unlock_set: | 813 | out_unlock_set: |
816 | graph_unlock(); | 814 | graph_unlock(); |
817 | raw_local_irq_restore(flags); | ||
818 | 815 | ||
819 | out_set_class_cache: | 816 | out_set_class_cache: |
820 | if (!subclass || force) | 817 | if (!subclass || force) |
@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | |||
870 | entry->distance = distance; | 867 | entry->distance = distance; |
871 | entry->trace = *trace; | 868 | entry->trace = *trace; |
872 | /* | 869 | /* |
873 | * Since we never remove from the dependency list, the list can | 870 | * Both allocation and removal are done under the graph lock; but |
874 | * be walked lockless by other CPUs, it's only allocation | 871 | * iteration is under RCU-sched; see look_up_lock_class() and |
875 | * that must be protected by the spinlock. But this also means | 872 | * lockdep_free_key_range(). |
876 | * we must make new entries visible only once writes to the | ||
877 | * entry become visible - hence the RCU op: | ||
878 | */ | 873 | */ |
879 | list_add_tail_rcu(&entry->entry, head); | 874 | list_add_tail_rcu(&entry->entry, head); |
880 | 875 | ||
@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry, | |||
1025 | else | 1020 | else |
1026 | head = &lock->class->locks_before; | 1021 | head = &lock->class->locks_before; |
1027 | 1022 | ||
1028 | list_for_each_entry(entry, head, entry) { | 1023 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); |
1024 | |||
1025 | list_for_each_entry_rcu(entry, head, entry) { | ||
1029 | if (!lock_accessed(entry)) { | 1026 | if (!lock_accessed(entry)) { |
1030 | unsigned int cq_depth; | 1027 | unsigned int cq_depth; |
1031 | mark_lock_accessed(entry, lock); | 1028 | mark_lock_accessed(entry, lock); |
@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
2022 | * We can walk it lock-free, because entries only get added | 2019 | * We can walk it lock-free, because entries only get added |
2023 | * to the hash: | 2020 | * to the hash: |
2024 | */ | 2021 | */ |
2025 | list_for_each_entry(chain, hash_head, entry) { | 2022 | list_for_each_entry_rcu(chain, hash_head, entry) { |
2026 | if (chain->chain_key == chain_key) { | 2023 | if (chain->chain_key == chain_key) { |
2027 | cache_hit: | 2024 | cache_hit: |
2028 | debug_atomic_inc(chain_lookup_hits); | 2025 | debug_atomic_inc(chain_lookup_hits); |
@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
2996 | if (unlikely(!debug_locks)) | 2993 | if (unlikely(!debug_locks)) |
2997 | return; | 2994 | return; |
2998 | 2995 | ||
2999 | if (subclass) | 2996 | if (subclass) { |
2997 | unsigned long flags; | ||
2998 | |||
2999 | if (DEBUG_LOCKS_WARN_ON(current->lockdep_recursion)) | ||
3000 | return; | ||
3001 | |||
3002 | raw_local_irq_save(flags); | ||
3003 | current->lockdep_recursion = 1; | ||
3000 | register_lock_class(lock, subclass, 1); | 3004 | register_lock_class(lock, subclass, 1); |
3005 | current->lockdep_recursion = 0; | ||
3006 | raw_local_irq_restore(flags); | ||
3007 | } | ||
3001 | } | 3008 | } |
3002 | EXPORT_SYMBOL_GPL(lockdep_init_map); | 3009 | EXPORT_SYMBOL_GPL(lockdep_init_map); |
3003 | 3010 | ||
@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size) | |||
3887 | return addr >= start && addr < start + size; | 3894 | return addr >= start && addr < start + size; |
3888 | } | 3895 | } |
3889 | 3896 | ||
3897 | /* | ||
3898 | * Used in module.c to remove lock classes from memory that is going to be | ||
3899 | * freed; and possibly re-used by other modules. | ||
3900 | * | ||
3901 | * We will have had one sync_sched() before getting here, so we're guaranteed | ||
3902 | * nobody will look up these exact classes -- they're properly dead but still | ||
3903 | * allocated. | ||
3904 | */ | ||
3890 | void lockdep_free_key_range(void *start, unsigned long size) | 3905 | void lockdep_free_key_range(void *start, unsigned long size) |
3891 | { | 3906 | { |
3892 | struct lock_class *class, *next; | 3907 | struct lock_class *class; |
3893 | struct list_head *head; | 3908 | struct list_head *head; |
3894 | unsigned long flags; | 3909 | unsigned long flags; |
3895 | int i; | 3910 | int i; |
@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size) | |||
3905 | head = classhash_table + i; | 3920 | head = classhash_table + i; |
3906 | if (list_empty(head)) | 3921 | if (list_empty(head)) |
3907 | continue; | 3922 | continue; |
3908 | list_for_each_entry_safe(class, next, head, hash_entry) { | 3923 | list_for_each_entry_rcu(class, head, hash_entry) { |
3909 | if (within(class->key, start, size)) | 3924 | if (within(class->key, start, size)) |
3910 | zap_class(class); | 3925 | zap_class(class); |
3911 | else if (within(class->name, start, size)) | 3926 | else if (within(class->name, start, size)) |
@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size) | |||
3916 | if (locked) | 3931 | if (locked) |
3917 | graph_unlock(); | 3932 | graph_unlock(); |
3918 | raw_local_irq_restore(flags); | 3933 | raw_local_irq_restore(flags); |
3934 | |||
3935 | /* | ||
3936 | * Wait for any possible iterators from look_up_lock_class() to pass | ||
3937 | * before continuing to free the memory they refer to. | ||
3938 | * | ||
3939 | * sync_sched() is sufficient because the read-side is IRQ disable. | ||
3940 | */ | ||
3941 | synchronize_sched(); | ||
3942 | |||
3943 | /* | ||
3944 | * XXX at this point we could return the resources to the pool; | ||
3945 | * instead we leak them. We would need to change to bitmap allocators | ||
3946 | * instead of the linear allocators we have now. | ||
3947 | */ | ||
3919 | } | 3948 | } |
3920 | 3949 | ||
3921 | void lockdep_reset_lock(struct lockdep_map *lock) | 3950 | void lockdep_reset_lock(struct lockdep_map *lock) |
3922 | { | 3951 | { |
3923 | struct lock_class *class, *next; | 3952 | struct lock_class *class; |
3924 | struct list_head *head; | 3953 | struct list_head *head; |
3925 | unsigned long flags; | 3954 | unsigned long flags; |
3926 | int i, j; | 3955 | int i, j; |
@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock) | |||
3948 | head = classhash_table + i; | 3977 | head = classhash_table + i; |
3949 | if (list_empty(head)) | 3978 | if (list_empty(head)) |
3950 | continue; | 3979 | continue; |
3951 | list_for_each_entry_safe(class, next, head, hash_entry) { | 3980 | list_for_each_entry_rcu(class, head, hash_entry) { |
3952 | int match = 0; | 3981 | int match = 0; |
3953 | 3982 | ||
3954 | for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) | 3983 | for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++) |
diff --git a/kernel/module.c b/kernel/module.c index b3d634ed06c9..99fdf94efce8 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -1865,7 +1865,7 @@ static void free_module(struct module *mod) | |||
1865 | kfree(mod->args); | 1865 | kfree(mod->args); |
1866 | percpu_modfree(mod); | 1866 | percpu_modfree(mod); |
1867 | 1867 | ||
1868 | /* Free lock-classes: */ | 1868 | /* Free lock-classes; relies on the preceding sync_rcu(). */ |
1869 | lockdep_free_key_range(mod->module_core, mod->core_size); | 1869 | lockdep_free_key_range(mod->module_core, mod->core_size); |
1870 | 1870 | ||
1871 | /* Finally, free the core (containing the module structure) */ | 1871 | /* Finally, free the core (containing the module structure) */ |
@@ -3349,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3349 | module_bug_cleanup(mod); | 3349 | module_bug_cleanup(mod); |
3350 | mutex_unlock(&module_mutex); | 3350 | mutex_unlock(&module_mutex); |
3351 | 3351 | ||
3352 | /* Free lock-classes: */ | ||
3353 | lockdep_free_key_range(mod->module_core, mod->core_size); | ||
3354 | |||
3355 | /* we can't deallocate the module until we clear memory protection */ | 3352 | /* we can't deallocate the module until we clear memory protection */ |
3356 | unset_module_init_ro_nx(mod); | 3353 | unset_module_init_ro_nx(mod); |
3357 | unset_module_core_ro_nx(mod); | 3354 | unset_module_core_ro_nx(mod); |
@@ -3375,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs, | |||
3375 | synchronize_rcu(); | 3372 | synchronize_rcu(); |
3376 | mutex_unlock(&module_mutex); | 3373 | mutex_unlock(&module_mutex); |
3377 | free_module: | 3374 | free_module: |
3375 | /* Free lock-classes; relies on the preceding sync_rcu() */ | ||
3376 | lockdep_free_key_range(mod->module_core, mod->core_size); | ||
3377 | |||
3378 | module_deallocate(mod, info); | 3378 | module_deallocate(mod, info); |
3379 | free_copy: | 3379 | free_copy: |
3380 | free_copy(info); | 3380 | free_copy(info); |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f0f831e8a345..62671f53202a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -3034,6 +3034,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio) | |||
3034 | } else { | 3034 | } else { |
3035 | if (dl_prio(oldprio)) | 3035 | if (dl_prio(oldprio)) |
3036 | p->dl.dl_boosted = 0; | 3036 | p->dl.dl_boosted = 0; |
3037 | if (rt_prio(oldprio)) | ||
3038 | p->rt.timeout = 0; | ||
3037 | p->sched_class = &fair_sched_class; | 3039 | p->sched_class = &fair_sched_class; |
3038 | } | 3040 | } |
3039 | 3041 | ||
diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index eb682d5c697c..6aac4beedbbe 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c | |||
@@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode, | |||
49 | */ | 49 | */ |
50 | static int bc_set_next(ktime_t expires, struct clock_event_device *bc) | 50 | static int bc_set_next(ktime_t expires, struct clock_event_device *bc) |
51 | { | 51 | { |
52 | int bc_moved; | ||
52 | /* | 53 | /* |
53 | * We try to cancel the timer first. If the callback is on | 54 | * We try to cancel the timer first. If the callback is on |
54 | * flight on some other cpu then we let it handle it. If we | 55 | * flight on some other cpu then we let it handle it. If we |
@@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc) | |||
60 | * restart the timer because we are in the callback, but we | 61 | * restart the timer because we are in the callback, but we |
61 | * can set the expiry time and let the callback return | 62 | * can set the expiry time and let the callback return |
62 | * HRTIMER_RESTART. | 63 | * HRTIMER_RESTART. |
64 | * | ||
65 | * Since we are in the idle loop at this point and because | ||
66 | * hrtimer_{start/cancel} functions call into tracing, | ||
67 | * calls to these functions must be bound within RCU_NONIDLE. | ||
63 | */ | 68 | */ |
64 | if (hrtimer_try_to_cancel(&bctimer) >= 0) { | 69 | RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ? |
65 | hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED); | 70 | !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) : |
71 | 0); | ||
72 | if (bc_moved) { | ||
66 | /* Bind the "device" to the cpu */ | 73 | /* Bind the "device" to the cpu */ |
67 | bc->bound_on = smp_processor_id(); | 74 | bc->bound_on = smp_processor_id(); |
68 | } else if (bc->bound_on == smp_processor_id()) { | 75 | } else if (bc->bound_on == smp_processor_id()) { |
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 1684bcc78b34..5fde34326dcf 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c | |||
@@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf, | |||
152 | goto out; | 152 | goto out; |
153 | 153 | ||
154 | /* No partial writes. */ | 154 | /* No partial writes. */ |
155 | length = EINVAL; | 155 | length = -EINVAL; |
156 | if (*ppos != 0) | 156 | if (*ppos != 0) |
157 | goto out; | 157 | goto out; |
158 | 158 | ||
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 4ca3d5d02436..a8a1e14272a1 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -1989,7 +1989,7 @@ static const struct pci_device_id azx_ids[] = { | |||
1989 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, | 1989 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, |
1990 | /* Sunrise Point */ | 1990 | /* Sunrise Point */ |
1991 | { PCI_DEVICE(0x8086, 0xa170), | 1991 | { PCI_DEVICE(0x8086, 0xa170), |
1992 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, | 1992 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, |
1993 | /* Sunrise Point-LP */ | 1993 | /* Sunrise Point-LP */ |
1994 | { PCI_DEVICE(0x8086, 0x9d70), | 1994 | { PCI_DEVICE(0x8086, 0x9d70), |
1995 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, | 1995 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE }, |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 526398a4a442..74382137b9f5 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -396,7 +396,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on) | |||
396 | { | 396 | { |
397 | /* We currently only handle front, HP */ | 397 | /* We currently only handle front, HP */ |
398 | static hda_nid_t pins[] = { | 398 | static hda_nid_t pins[] = { |
399 | 0x0f, 0x10, 0x14, 0x15, 0 | 399 | 0x0f, 0x10, 0x14, 0x15, 0x17, 0 |
400 | }; | 400 | }; |
401 | hda_nid_t *p; | 401 | hda_nid_t *p; |
402 | for (p = pins; *p; p++) | 402 | for (p = pins; *p; p++) |
@@ -5036,6 +5036,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { | |||
5036 | SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), | 5036 | SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), |
5037 | SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), | 5037 | SND_PCI_QUIRK(0x17aa, 0x501e, "Thinkpad L440", ALC292_FIXUP_TPT440_DOCK), |
5038 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5038 | SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
5039 | SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK), | ||
5039 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), | 5040 | SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), |
5040 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), | 5041 | SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K), |
5041 | SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), | 5042 | SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD), |