diff options
Diffstat (limited to 'arch')
254 files changed, 3758 insertions, 2741 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 8d2ae24b9f4a..1feb169274fe 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -407,6 +407,12 @@ config CLONE_BACKWARDS2 | |||
407 | help | 407 | help |
408 | Architecture has the first two arguments of clone(2) swapped. | 408 | Architecture has the first two arguments of clone(2) swapped. |
409 | 409 | ||
410 | config CLONE_BACKWARDS3 | ||
411 | bool | ||
412 | help | ||
413 | Architecture has tls passed as the 3rd argument of clone(2), | ||
414 | not the 5th one. | ||
415 | |||
410 | config ODD_RT_SIGACTION | 416 | config ODD_RT_SIGACTION |
411 | bool | 417 | bool |
412 | help | 418 | help |
diff --git a/arch/arc/lib/strchr-700.S b/arch/arc/lib/strchr-700.S index 99c10475d477..9c548c7cf001 100644 --- a/arch/arc/lib/strchr-700.S +++ b/arch/arc/lib/strchr-700.S | |||
@@ -39,9 +39,18 @@ ARC_ENTRY strchr | |||
39 | ld.a r2,[r0,4] | 39 | ld.a r2,[r0,4] |
40 | sub r12,r6,r7 | 40 | sub r12,r6,r7 |
41 | bic r12,r12,r6 | 41 | bic r12,r12,r6 |
42 | #ifdef __LITTLE_ENDIAN__ | ||
42 | and r7,r12,r4 | 43 | and r7,r12,r4 |
43 | breq r7,0,.Loop ; For speed, we want this branch to be unaligned. | 44 | breq r7,0,.Loop ; For speed, we want this branch to be unaligned. |
44 | b .Lfound_char ; Likewise this one. | 45 | b .Lfound_char ; Likewise this one. |
46 | #else | ||
47 | and r12,r12,r4 | ||
48 | breq r12,0,.Loop ; For speed, we want this branch to be unaligned. | ||
49 | lsr_s r12,r12,7 | ||
50 | bic r2,r7,r6 | ||
51 | b.d .Lfound_char_b | ||
52 | and_s r2,r2,r12 | ||
53 | #endif | ||
45 | ; /* We require this code address to be unaligned for speed... */ | 54 | ; /* We require this code address to be unaligned for speed... */ |
46 | .Laligned: | 55 | .Laligned: |
47 | ld_s r2,[r0] | 56 | ld_s r2,[r0] |
@@ -95,6 +104,7 @@ ARC_ENTRY strchr | |||
95 | lsr r7,r7,7 | 104 | lsr r7,r7,7 |
96 | 105 | ||
97 | bic r2,r7,r6 | 106 | bic r2,r7,r6 |
107 | .Lfound_char_b: | ||
98 | norm r2,r2 | 108 | norm r2,r2 |
99 | sub_s r0,r0,4 | 109 | sub_s r0,r0,4 |
100 | asr_s r2,r2,3 | 110 | asr_s r2,r2,3 |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 43594d5116ef..cd5c1c97b043 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -2064,8 +2064,7 @@ config KEXEC | |||
2064 | 2064 | ||
2065 | It is an ongoing process to be certain the hardware in a machine | 2065 | It is an ongoing process to be certain the hardware in a machine |
2066 | is properly shutdown, so do not be surprised if this code does not | 2066 | is properly shutdown, so do not be surprised if this code does not |
2067 | initially work for you. It may help to enable device hotplugging | 2067 | initially work for you. |
2068 | support. | ||
2069 | 2068 | ||
2070 | config ATAGS_PROC | 2069 | config ATAGS_PROC |
2071 | bool "Export atags in procfs" | 2070 | bool "Export atags in procfs" |
diff --git a/arch/arm/boot/dts/am335x-bone.dts b/arch/arm/boot/dts/am335x-bone.dts index 444b4ede0d60..d318987d44a1 100644 --- a/arch/arm/boot/dts/am335x-bone.dts +++ b/arch/arm/boot/dts/am335x-bone.dts | |||
@@ -120,6 +120,35 @@ | |||
120 | status = "okay"; | 120 | status = "okay"; |
121 | }; | 121 | }; |
122 | 122 | ||
123 | musb: usb@47400000 { | ||
124 | status = "okay"; | ||
125 | |||
126 | control@44e10000 { | ||
127 | status = "okay"; | ||
128 | }; | ||
129 | |||
130 | usb-phy@47401300 { | ||
131 | status = "okay"; | ||
132 | }; | ||
133 | |||
134 | usb-phy@47401b00 { | ||
135 | status = "okay"; | ||
136 | }; | ||
137 | |||
138 | usb@47401000 { | ||
139 | status = "okay"; | ||
140 | }; | ||
141 | |||
142 | usb@47401800 { | ||
143 | status = "okay"; | ||
144 | dr_mode = "host"; | ||
145 | }; | ||
146 | |||
147 | dma-controller@07402000 { | ||
148 | status = "okay"; | ||
149 | }; | ||
150 | }; | ||
151 | |||
123 | i2c0: i2c@44e0b000 { | 152 | i2c0: i2c@44e0b000 { |
124 | pinctrl-names = "default"; | 153 | pinctrl-names = "default"; |
125 | pinctrl-0 = <&i2c0_pins>; | 154 | pinctrl-0 = <&i2c0_pins>; |
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts index 3aee1a43782d..e8ec8756e498 100644 --- a/arch/arm/boot/dts/am335x-evm.dts +++ b/arch/arm/boot/dts/am335x-evm.dts | |||
@@ -171,6 +171,35 @@ | |||
171 | }; | 171 | }; |
172 | }; | 172 | }; |
173 | 173 | ||
174 | musb: usb@47400000 { | ||
175 | status = "okay"; | ||
176 | |||
177 | control@44e10000 { | ||
178 | status = "okay"; | ||
179 | }; | ||
180 | |||
181 | usb-phy@47401300 { | ||
182 | status = "okay"; | ||
183 | }; | ||
184 | |||
185 | usb-phy@47401b00 { | ||
186 | status = "okay"; | ||
187 | }; | ||
188 | |||
189 | usb@47401000 { | ||
190 | status = "okay"; | ||
191 | }; | ||
192 | |||
193 | usb@47401800 { | ||
194 | status = "okay"; | ||
195 | dr_mode = "host"; | ||
196 | }; | ||
197 | |||
198 | dma-controller@07402000 { | ||
199 | status = "okay"; | ||
200 | }; | ||
201 | }; | ||
202 | |||
174 | i2c1: i2c@4802a000 { | 203 | i2c1: i2c@4802a000 { |
175 | pinctrl-names = "default"; | 204 | pinctrl-names = "default"; |
176 | pinctrl-0 = <&i2c1_pins>; | 205 | pinctrl-0 = <&i2c1_pins>; |
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts index 0c8ad173d2b0..4f339fa91c57 100644 --- a/arch/arm/boot/dts/am335x-evmsk.dts +++ b/arch/arm/boot/dts/am335x-evmsk.dts | |||
@@ -14,6 +14,7 @@ | |||
14 | /dts-v1/; | 14 | /dts-v1/; |
15 | 15 | ||
16 | #include "am33xx.dtsi" | 16 | #include "am33xx.dtsi" |
17 | #include <dt-bindings/pwm/pwm.h> | ||
17 | 18 | ||
18 | / { | 19 | / { |
19 | model = "TI AM335x EVM-SK"; | 20 | model = "TI AM335x EVM-SK"; |
@@ -207,6 +208,22 @@ | |||
207 | }; | 208 | }; |
208 | }; | 209 | }; |
209 | 210 | ||
211 | musb: usb@47400000 { | ||
212 | status = "okay"; | ||
213 | |||
214 | control@44e10000 { | ||
215 | status = "okay"; | ||
216 | }; | ||
217 | |||
218 | usb-phy@47401300 { | ||
219 | status = "okay"; | ||
220 | }; | ||
221 | |||
222 | usb@47401000 { | ||
223 | status = "okay"; | ||
224 | }; | ||
225 | }; | ||
226 | |||
210 | epwmss2: epwmss@48304000 { | 227 | epwmss2: epwmss@48304000 { |
211 | status = "okay"; | 228 | status = "okay"; |
212 | 229 | ||
@@ -298,7 +315,7 @@ | |||
298 | 315 | ||
299 | backlight { | 316 | backlight { |
300 | compatible = "pwm-backlight"; | 317 | compatible = "pwm-backlight"; |
301 | pwms = <&ecap2 0 50000 1>; | 318 | pwms = <&ecap2 0 50000 PWM_POLARITY_INVERTED>; |
302 | brightness-levels = <0 58 61 66 75 90 125 170 255>; | 319 | brightness-levels = <0 58 61 66 75 90 125 170 255>; |
303 | default-brightness-level = <8>; | 320 | default-brightness-level = <8>; |
304 | }; | 321 | }; |
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 38b446ba1ce1..f9c5da9c7fe1 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi | |||
@@ -26,6 +26,10 @@ | |||
26 | serial5 = &uart5; | 26 | serial5 = &uart5; |
27 | d_can0 = &dcan0; | 27 | d_can0 = &dcan0; |
28 | d_can1 = &dcan1; | 28 | d_can1 = &dcan1; |
29 | usb0 = &usb0; | ||
30 | usb1 = &usb1; | ||
31 | phy0 = &usb0_phy; | ||
32 | phy1 = &usb1_phy; | ||
29 | }; | 33 | }; |
30 | 34 | ||
31 | cpus { | 35 | cpus { |
@@ -333,21 +337,132 @@ | |||
333 | status = "disabled"; | 337 | status = "disabled"; |
334 | }; | 338 | }; |
335 | 339 | ||
336 | usb@47400000 { | 340 | usb: usb@47400000 { |
337 | compatible = "ti,musb-am33xx"; | 341 | compatible = "ti,am33xx-usb"; |
338 | reg = <0x47400000 0x1000 /* usbss */ | 342 | reg = <0x47400000 0x1000>; |
339 | 0x47401000 0x800 /* musb instance 0 */ | 343 | ranges; |
340 | 0x47401800 0x800>; /* musb instance 1 */ | 344 | #address-cells = <1>; |
341 | interrupts = <17 /* usbss */ | 345 | #size-cells = <1>; |
342 | 18 /* musb instance 0 */ | ||
343 | 19>; /* musb instance 1 */ | ||
344 | multipoint = <1>; | ||
345 | num-eps = <16>; | ||
346 | ram-bits = <12>; | ||
347 | port0-mode = <3>; | ||
348 | port1-mode = <3>; | ||
349 | power = <250>; | ||
350 | ti,hwmods = "usb_otg_hs"; | 346 | ti,hwmods = "usb_otg_hs"; |
347 | status = "disabled"; | ||
348 | |||
349 | ctrl_mod: control@44e10000 { | ||
350 | compatible = "ti,am335x-usb-ctrl-module"; | ||
351 | reg = <0x44e10620 0x10 | ||
352 | 0x44e10648 0x4>; | ||
353 | reg-names = "phy_ctrl", "wakeup"; | ||
354 | status = "disabled"; | ||
355 | }; | ||
356 | |||
357 | usb0_phy: usb-phy@47401300 { | ||
358 | compatible = "ti,am335x-usb-phy"; | ||
359 | reg = <0x47401300 0x100>; | ||
360 | reg-names = "phy"; | ||
361 | status = "disabled"; | ||
362 | ti,ctrl_mod = <&ctrl_mod>; | ||
363 | }; | ||
364 | |||
365 | usb0: usb@47401000 { | ||
366 | compatible = "ti,musb-am33xx"; | ||
367 | status = "disabled"; | ||
368 | reg = <0x47401400 0x400 | ||
369 | 0x47401000 0x200>; | ||
370 | reg-names = "mc", "control"; | ||
371 | |||
372 | interrupts = <18>; | ||
373 | interrupt-names = "mc"; | ||
374 | dr_mode = "otg"; | ||
375 | mentor,multipoint = <1>; | ||
376 | mentor,num-eps = <16>; | ||
377 | mentor,ram-bits = <12>; | ||
378 | mentor,power = <500>; | ||
379 | phys = <&usb0_phy>; | ||
380 | |||
381 | dmas = <&cppi41dma 0 0 &cppi41dma 1 0 | ||
382 | &cppi41dma 2 0 &cppi41dma 3 0 | ||
383 | &cppi41dma 4 0 &cppi41dma 5 0 | ||
384 | &cppi41dma 6 0 &cppi41dma 7 0 | ||
385 | &cppi41dma 8 0 &cppi41dma 9 0 | ||
386 | &cppi41dma 10 0 &cppi41dma 11 0 | ||
387 | &cppi41dma 12 0 &cppi41dma 13 0 | ||
388 | &cppi41dma 14 0 &cppi41dma 0 1 | ||
389 | &cppi41dma 1 1 &cppi41dma 2 1 | ||
390 | &cppi41dma 3 1 &cppi41dma 4 1 | ||
391 | &cppi41dma 5 1 &cppi41dma 6 1 | ||
392 | &cppi41dma 7 1 &cppi41dma 8 1 | ||
393 | &cppi41dma 9 1 &cppi41dma 10 1 | ||
394 | &cppi41dma 11 1 &cppi41dma 12 1 | ||
395 | &cppi41dma 13 1 &cppi41dma 14 1>; | ||
396 | dma-names = | ||
397 | "rx1", "rx2", "rx3", "rx4", "rx5", "rx6", "rx7", | ||
398 | "rx8", "rx9", "rx10", "rx11", "rx12", "rx13", | ||
399 | "rx14", "rx15", | ||
400 | "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", "tx7", | ||
401 | "tx8", "tx9", "tx10", "tx11", "tx12", "tx13", | ||
402 | "tx14", "tx15"; | ||
403 | }; | ||
404 | |||
405 | usb1_phy: usb-phy@47401b00 { | ||
406 | compatible = "ti,am335x-usb-phy"; | ||
407 | reg = <0x47401b00 0x100>; | ||
408 | reg-names = "phy"; | ||
409 | status = "disabled"; | ||
410 | ti,ctrl_mod = <&ctrl_mod>; | ||
411 | }; | ||
412 | |||
413 | usb1: usb@47401800 { | ||
414 | compatible = "ti,musb-am33xx"; | ||
415 | status = "disabled"; | ||
416 | reg = <0x47401c00 0x400 | ||
417 | 0x47401800 0x200>; | ||
418 | reg-names = "mc", "control"; | ||
419 | interrupts = <19>; | ||
420 | interrupt-names = "mc"; | ||
421 | dr_mode = "otg"; | ||
422 | mentor,multipoint = <1>; | ||
423 | mentor,num-eps = <16>; | ||
424 | mentor,ram-bits = <12>; | ||
425 | mentor,power = <500>; | ||
426 | phys = <&usb1_phy>; | ||
427 | |||
428 | dmas = <&cppi41dma 15 0 &cppi41dma 16 0 | ||
429 | &cppi41dma 17 0 &cppi41dma 18 0 | ||
430 | &cppi41dma 19 0 &cppi41dma 20 0 | ||
431 | &cppi41dma 21 0 &cppi41dma 22 0 | ||
432 | &cppi41dma 23 0 &cppi41dma 24 0 | ||
433 | &cppi41dma 25 0 &cppi41dma 26 0 | ||
434 | &cppi41dma 27 0 &cppi41dma 28 0 | ||
435 | &cppi41dma 29 0 &cppi41dma 15 1 | ||
436 | &cppi41dma 16 1 &cppi41dma 17 1 | ||
437 | &cppi41dma 18 1 &cppi41dma 19 1 | ||
438 | &cppi41dma 20 1 &cppi41dma 21 1 | ||
439 | &cppi41dma 22 1 &cppi41dma 23 1 | ||
440 | &cppi41dma 24 1 &cppi41dma 25 1 | ||
441 | &cppi41dma 26 1 &cppi41dma 27 1 | ||
442 | &cppi41dma 28 1 &cppi41dma 29 1>; | ||
443 | dma-names = | ||
444 | "rx1", "rx2", "rx3", "rx4", "rx5", "rx6", "rx7", | ||
445 | "rx8", "rx9", "rx10", "rx11", "rx12", "rx13", | ||
446 | "rx14", "rx15", | ||
447 | "tx1", "tx2", "tx3", "tx4", "tx5", "tx6", "tx7", | ||
448 | "tx8", "tx9", "tx10", "tx11", "tx12", "tx13", | ||
449 | "tx14", "tx15"; | ||
450 | }; | ||
451 | |||
452 | cppi41dma: dma-controller@07402000 { | ||
453 | compatible = "ti,am3359-cppi41"; | ||
454 | reg = <0x47400000 0x1000 | ||
455 | 0x47402000 0x1000 | ||
456 | 0x47403000 0x1000 | ||
457 | 0x47404000 0x4000>; | ||
458 | reg-names = "glue", "controller", "scheduler", "queuemgr"; | ||
459 | interrupts = <17>; | ||
460 | interrupt-names = "glue"; | ||
461 | #dma-cells = <2>; | ||
462 | #dma-channels = <30>; | ||
463 | #dma-requests = <256>; | ||
464 | status = "disabled"; | ||
465 | }; | ||
351 | }; | 466 | }; |
352 | 467 | ||
353 | epwmss0: epwmss@48300000 { | 468 | epwmss0: epwmss@48300000 { |
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts index d59b70c6a6a0..3d77dbe406f4 100644 --- a/arch/arm/boot/dts/at91sam9n12ek.dts +++ b/arch/arm/boot/dts/at91sam9n12ek.dts | |||
@@ -14,11 +14,11 @@ | |||
14 | compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; | 14 | compatible = "atmel,at91sam9n12ek", "atmel,at91sam9n12", "atmel,at91sam9"; |
15 | 15 | ||
16 | chosen { | 16 | chosen { |
17 | bootargs = "mem=128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; | 17 | bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=jffs2"; |
18 | }; | 18 | }; |
19 | 19 | ||
20 | memory { | 20 | memory { |
21 | reg = <0x20000000 0x10000000>; | 21 | reg = <0x20000000 0x8000000>; |
22 | }; | 22 | }; |
23 | 23 | ||
24 | clocks { | 24 | clocks { |
diff --git a/arch/arm/boot/dts/at91sam9x5ek.dtsi b/arch/arm/boot/dts/at91sam9x5ek.dtsi index b753855b2058..49e3c45818c2 100644 --- a/arch/arm/boot/dts/at91sam9x5ek.dtsi +++ b/arch/arm/boot/dts/at91sam9x5ek.dtsi | |||
@@ -94,8 +94,9 @@ | |||
94 | 94 | ||
95 | usb0: ohci@00600000 { | 95 | usb0: ohci@00600000 { |
96 | status = "okay"; | 96 | status = "okay"; |
97 | num-ports = <2>; | 97 | num-ports = <3>; |
98 | atmel,vbus-gpio = <&pioD 19 GPIO_ACTIVE_LOW | 98 | atmel,vbus-gpio = <0 /* &pioD 18 GPIO_ACTIVE_LOW *//* Activate to have access to port A */ |
99 | &pioD 19 GPIO_ACTIVE_LOW | ||
99 | &pioD 20 GPIO_ACTIVE_LOW | 100 | &pioD 20 GPIO_ACTIVE_LOW |
100 | >; | 101 | >; |
101 | }; | 102 | }; |
diff --git a/arch/arm/boot/dts/exynos5440.dtsi b/arch/arm/boot/dts/exynos5440.dtsi index ff7f5d855845..586134e2a382 100644 --- a/arch/arm/boot/dts/exynos5440.dtsi +++ b/arch/arm/boot/dts/exynos5440.dtsi | |||
@@ -248,6 +248,7 @@ | |||
248 | #interrupt-cells = <1>; | 248 | #interrupt-cells = <1>; |
249 | interrupt-map-mask = <0 0 0 0>; | 249 | interrupt-map-mask = <0 0 0 0>; |
250 | interrupt-map = <0x0 0 &gic 53>; | 250 | interrupt-map = <0x0 0 &gic 53>; |
251 | num-lanes = <4>; | ||
251 | }; | 252 | }; |
252 | 253 | ||
253 | pcie@2a0000 { | 254 | pcie@2a0000 { |
@@ -267,5 +268,6 @@ | |||
267 | #interrupt-cells = <1>; | 268 | #interrupt-cells = <1>; |
268 | interrupt-map-mask = <0 0 0 0>; | 269 | interrupt-map-mask = <0 0 0 0>; |
269 | interrupt-map = <0x0 0 &gic 56>; | 270 | interrupt-map = <0x0 0 &gic 56>; |
271 | num-lanes = <4>; | ||
270 | }; | 272 | }; |
271 | }; | 273 | }; |
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts index e035f4664b97..15715d921d14 100644 --- a/arch/arm/boot/dts/imx28-evk.dts +++ b/arch/arm/boot/dts/imx28-evk.dts | |||
@@ -220,6 +220,7 @@ | |||
220 | auart0: serial@8006a000 { | 220 | auart0: serial@8006a000 { |
221 | pinctrl-names = "default"; | 221 | pinctrl-names = "default"; |
222 | pinctrl-0 = <&auart0_pins_a>; | 222 | pinctrl-0 = <&auart0_pins_a>; |
223 | fsl,uart-has-rtscts; | ||
223 | status = "okay"; | 224 | status = "okay"; |
224 | }; | 225 | }; |
225 | 226 | ||
diff --git a/arch/arm/boot/dts/msm8660-surf.dts b/arch/arm/boot/dts/msm8660-surf.dts index cdc010e0f93e..386d42870215 100644 --- a/arch/arm/boot/dts/msm8660-surf.dts +++ b/arch/arm/boot/dts/msm8660-surf.dts | |||
@@ -38,7 +38,7 @@ | |||
38 | }; | 38 | }; |
39 | 39 | ||
40 | serial@19c40000 { | 40 | serial@19c40000 { |
41 | compatible = "qcom,msm-hsuart", "qcom,msm-uart"; | 41 | compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm"; |
42 | reg = <0x19c40000 0x1000>, | 42 | reg = <0x19c40000 0x1000>, |
43 | <0x19c00000 0x1000>; | 43 | <0x19c00000 0x1000>; |
44 | interrupts = <0 195 0x0>; | 44 | interrupts = <0 195 0x0>; |
diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/msm8960-cdp.dts index db2060c46540..93e9f7e0b7ad 100644 --- a/arch/arm/boot/dts/msm8960-cdp.dts +++ b/arch/arm/boot/dts/msm8960-cdp.dts | |||
@@ -26,7 +26,7 @@ | |||
26 | cpu-offset = <0x80000>; | 26 | cpu-offset = <0x80000>; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | msmgpio: gpio@fd510000 { | 29 | msmgpio: gpio@800000 { |
30 | compatible = "qcom,msm-gpio"; | 30 | compatible = "qcom,msm-gpio"; |
31 | gpio-controller; | 31 | gpio-controller; |
32 | #gpio-cells = <2>; | 32 | #gpio-cells = <2>; |
@@ -34,11 +34,11 @@ | |||
34 | interrupts = <0 32 0x4>; | 34 | interrupts = <0 32 0x4>; |
35 | interrupt-controller; | 35 | interrupt-controller; |
36 | #interrupt-cells = <2>; | 36 | #interrupt-cells = <2>; |
37 | reg = <0xfd510000 0x4000>; | 37 | reg = <0x800000 0x4000>; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | serial@16440000 { | 40 | serial@16440000 { |
41 | compatible = "qcom,msm-hsuart", "qcom,msm-uart"; | 41 | compatible = "qcom,msm-uartdm-v1.3", "qcom,msm-uartdm"; |
42 | reg = <0x16440000 0x1000>, | 42 | reg = <0x16440000 0x1000>, |
43 | <0x16400000 0x1000>; | 43 | <0x16400000 0x1000>; |
44 | interrupts = <0 154 0x0>; | 44 | interrupts = <0 154 0x0>; |
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts index 08b72678abff..65d7b601651c 100644 --- a/arch/arm/boot/dts/omap5-uevm.dts +++ b/arch/arm/boot/dts/omap5-uevm.dts | |||
@@ -235,7 +235,7 @@ | |||
235 | }; | 235 | }; |
236 | 236 | ||
237 | &mmc1 { | 237 | &mmc1 { |
238 | vmmc-supply = <&vmmcsd_fixed>; | 238 | vmmc-supply = <&ldo9_reg>; |
239 | bus-width = <4>; | 239 | bus-width = <4>; |
240 | }; | 240 | }; |
241 | 241 | ||
@@ -282,6 +282,7 @@ | |||
282 | 282 | ||
283 | regulators { | 283 | regulators { |
284 | smps123_reg: smps123 { | 284 | smps123_reg: smps123 { |
285 | /* VDD_OPP_MPU */ | ||
285 | regulator-name = "smps123"; | 286 | regulator-name = "smps123"; |
286 | regulator-min-microvolt = < 600000>; | 287 | regulator-min-microvolt = < 600000>; |
287 | regulator-max-microvolt = <1500000>; | 288 | regulator-max-microvolt = <1500000>; |
@@ -290,6 +291,7 @@ | |||
290 | }; | 291 | }; |
291 | 292 | ||
292 | smps45_reg: smps45 { | 293 | smps45_reg: smps45 { |
294 | /* VDD_OPP_MM */ | ||
293 | regulator-name = "smps45"; | 295 | regulator-name = "smps45"; |
294 | regulator-min-microvolt = < 600000>; | 296 | regulator-min-microvolt = < 600000>; |
295 | regulator-max-microvolt = <1310000>; | 297 | regulator-max-microvolt = <1310000>; |
@@ -298,6 +300,7 @@ | |||
298 | }; | 300 | }; |
299 | 301 | ||
300 | smps6_reg: smps6 { | 302 | smps6_reg: smps6 { |
303 | /* VDD_DDR3 - over VDD_SMPS6 */ | ||
301 | regulator-name = "smps6"; | 304 | regulator-name = "smps6"; |
302 | regulator-min-microvolt = <1200000>; | 305 | regulator-min-microvolt = <1200000>; |
303 | regulator-max-microvolt = <1200000>; | 306 | regulator-max-microvolt = <1200000>; |
@@ -306,6 +309,7 @@ | |||
306 | }; | 309 | }; |
307 | 310 | ||
308 | smps7_reg: smps7 { | 311 | smps7_reg: smps7 { |
312 | /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */ | ||
309 | regulator-name = "smps7"; | 313 | regulator-name = "smps7"; |
310 | regulator-min-microvolt = <1800000>; | 314 | regulator-min-microvolt = <1800000>; |
311 | regulator-max-microvolt = <1800000>; | 315 | regulator-max-microvolt = <1800000>; |
@@ -314,6 +318,7 @@ | |||
314 | }; | 318 | }; |
315 | 319 | ||
316 | smps8_reg: smps8 { | 320 | smps8_reg: smps8 { |
321 | /* VDD_OPP_CORE */ | ||
317 | regulator-name = "smps8"; | 322 | regulator-name = "smps8"; |
318 | regulator-min-microvolt = < 600000>; | 323 | regulator-min-microvolt = < 600000>; |
319 | regulator-max-microvolt = <1310000>; | 324 | regulator-max-microvolt = <1310000>; |
@@ -322,15 +327,15 @@ | |||
322 | }; | 327 | }; |
323 | 328 | ||
324 | smps9_reg: smps9 { | 329 | smps9_reg: smps9 { |
330 | /* VDDA_2v1_AUD over VDD_2v1 */ | ||
325 | regulator-name = "smps9"; | 331 | regulator-name = "smps9"; |
326 | regulator-min-microvolt = <2100000>; | 332 | regulator-min-microvolt = <2100000>; |
327 | regulator-max-microvolt = <2100000>; | 333 | regulator-max-microvolt = <2100000>; |
328 | regulator-always-on; | ||
329 | regulator-boot-on; | ||
330 | ti,smps-range = <0x80>; | 334 | ti,smps-range = <0x80>; |
331 | }; | 335 | }; |
332 | 336 | ||
333 | smps10_reg: smps10 { | 337 | smps10_reg: smps10 { |
338 | /* VBUS_5V_OTG */ | ||
334 | regulator-name = "smps10"; | 339 | regulator-name = "smps10"; |
335 | regulator-min-microvolt = <5000000>; | 340 | regulator-min-microvolt = <5000000>; |
336 | regulator-max-microvolt = <5000000>; | 341 | regulator-max-microvolt = <5000000>; |
@@ -339,38 +344,40 @@ | |||
339 | }; | 344 | }; |
340 | 345 | ||
341 | ldo1_reg: ldo1 { | 346 | ldo1_reg: ldo1 { |
347 | /* VDDAPHY_CAM: vdda_csiport */ | ||
342 | regulator-name = "ldo1"; | 348 | regulator-name = "ldo1"; |
343 | regulator-min-microvolt = <2800000>; | 349 | regulator-min-microvolt = <1500000>; |
344 | regulator-max-microvolt = <2800000>; | 350 | regulator-max-microvolt = <1800000>; |
345 | regulator-always-on; | ||
346 | regulator-boot-on; | ||
347 | }; | 351 | }; |
348 | 352 | ||
349 | ldo2_reg: ldo2 { | 353 | ldo2_reg: ldo2 { |
354 | /* VCC_2V8_DISP: Does not go anywhere */ | ||
350 | regulator-name = "ldo2"; | 355 | regulator-name = "ldo2"; |
351 | regulator-min-microvolt = <2900000>; | 356 | regulator-min-microvolt = <2800000>; |
352 | regulator-max-microvolt = <2900000>; | 357 | regulator-max-microvolt = <2800000>; |
353 | regulator-always-on; | 358 | /* Unused */ |
354 | regulator-boot-on; | 359 | status = "disabled"; |
355 | }; | 360 | }; |
356 | 361 | ||
357 | ldo3_reg: ldo3 { | 362 | ldo3_reg: ldo3 { |
363 | /* VDDAPHY_MDM: vdda_lli */ | ||
358 | regulator-name = "ldo3"; | 364 | regulator-name = "ldo3"; |
359 | regulator-min-microvolt = <3000000>; | 365 | regulator-min-microvolt = <1500000>; |
360 | regulator-max-microvolt = <3000000>; | 366 | regulator-max-microvolt = <1500000>; |
361 | regulator-always-on; | ||
362 | regulator-boot-on; | 367 | regulator-boot-on; |
368 | /* Only if Modem is used */ | ||
369 | status = "disabled"; | ||
363 | }; | 370 | }; |
364 | 371 | ||
365 | ldo4_reg: ldo4 { | 372 | ldo4_reg: ldo4 { |
373 | /* VDDAPHY_DISP: vdda_dsiport/hdmi */ | ||
366 | regulator-name = "ldo4"; | 374 | regulator-name = "ldo4"; |
367 | regulator-min-microvolt = <2200000>; | 375 | regulator-min-microvolt = <1500000>; |
368 | regulator-max-microvolt = <2200000>; | 376 | regulator-max-microvolt = <1800000>; |
369 | regulator-always-on; | ||
370 | regulator-boot-on; | ||
371 | }; | 377 | }; |
372 | 378 | ||
373 | ldo5_reg: ldo5 { | 379 | ldo5_reg: ldo5 { |
380 | /* VDDA_1V8_PHY: usb/sata/hdmi.. */ | ||
374 | regulator-name = "ldo5"; | 381 | regulator-name = "ldo5"; |
375 | regulator-min-microvolt = <1800000>; | 382 | regulator-min-microvolt = <1800000>; |
376 | regulator-max-microvolt = <1800000>; | 383 | regulator-max-microvolt = <1800000>; |
@@ -379,38 +386,43 @@ | |||
379 | }; | 386 | }; |
380 | 387 | ||
381 | ldo6_reg: ldo6 { | 388 | ldo6_reg: ldo6 { |
389 | /* VDDS_1V2_WKUP: hsic/ldo_emu_wkup */ | ||
382 | regulator-name = "ldo6"; | 390 | regulator-name = "ldo6"; |
383 | regulator-min-microvolt = <1500000>; | 391 | regulator-min-microvolt = <1200000>; |
384 | regulator-max-microvolt = <1500000>; | 392 | regulator-max-microvolt = <1200000>; |
385 | regulator-always-on; | 393 | regulator-always-on; |
386 | regulator-boot-on; | 394 | regulator-boot-on; |
387 | }; | 395 | }; |
388 | 396 | ||
389 | ldo7_reg: ldo7 { | 397 | ldo7_reg: ldo7 { |
398 | /* VDD_VPP: vpp1 */ | ||
390 | regulator-name = "ldo7"; | 399 | regulator-name = "ldo7"; |
391 | regulator-min-microvolt = <1500000>; | 400 | regulator-min-microvolt = <2000000>; |
392 | regulator-max-microvolt = <1500000>; | 401 | regulator-max-microvolt = <2000000>; |
393 | regulator-always-on; | 402 | /* Only for efuse reprograming! */ |
394 | regulator-boot-on; | 403 | status = "disabled"; |
395 | }; | 404 | }; |
396 | 405 | ||
397 | ldo8_reg: ldo8 { | 406 | ldo8_reg: ldo8 { |
407 | /* VDD_3v0: Does not go anywhere */ | ||
398 | regulator-name = "ldo8"; | 408 | regulator-name = "ldo8"; |
399 | regulator-min-microvolt = <1500000>; | 409 | regulator-min-microvolt = <3000000>; |
400 | regulator-max-microvolt = <1500000>; | 410 | regulator-max-microvolt = <3000000>; |
401 | regulator-always-on; | ||
402 | regulator-boot-on; | 411 | regulator-boot-on; |
412 | /* Unused */ | ||
413 | status = "disabled"; | ||
403 | }; | 414 | }; |
404 | 415 | ||
405 | ldo9_reg: ldo9 { | 416 | ldo9_reg: ldo9 { |
417 | /* VCC_DV_SDIO: vdds_sdcard */ | ||
406 | regulator-name = "ldo9"; | 418 | regulator-name = "ldo9"; |
407 | regulator-min-microvolt = <1800000>; | 419 | regulator-min-microvolt = <1800000>; |
408 | regulator-max-microvolt = <3300000>; | 420 | regulator-max-microvolt = <3000000>; |
409 | regulator-always-on; | ||
410 | regulator-boot-on; | 421 | regulator-boot-on; |
411 | }; | 422 | }; |
412 | 423 | ||
413 | ldoln_reg: ldoln { | 424 | ldoln_reg: ldoln { |
425 | /* VDDA_1v8_REF: vdds_osc/mm_l4per.. */ | ||
414 | regulator-name = "ldoln"; | 426 | regulator-name = "ldoln"; |
415 | regulator-min-microvolt = <1800000>; | 427 | regulator-min-microvolt = <1800000>; |
416 | regulator-max-microvolt = <1800000>; | 428 | regulator-max-microvolt = <1800000>; |
@@ -419,12 +431,20 @@ | |||
419 | }; | 431 | }; |
420 | 432 | ||
421 | ldousb_reg: ldousb { | 433 | ldousb_reg: ldousb { |
434 | /* VDDA_3V_USB: VDDA_USBHS33 */ | ||
422 | regulator-name = "ldousb"; | 435 | regulator-name = "ldousb"; |
423 | regulator-min-microvolt = <3250000>; | 436 | regulator-min-microvolt = <3250000>; |
424 | regulator-max-microvolt = <3250000>; | 437 | regulator-max-microvolt = <3250000>; |
425 | regulator-always-on; | 438 | regulator-always-on; |
426 | regulator-boot-on; | 439 | regulator-boot-on; |
427 | }; | 440 | }; |
441 | |||
442 | regen3_reg: regen3 { | ||
443 | /* REGEN3 controls LDO9 supply to card */ | ||
444 | regulator-name = "regen3"; | ||
445 | regulator-always-on; | ||
446 | regulator-boot-on; | ||
447 | }; | ||
428 | }; | 448 | }; |
429 | }; | 449 | }; |
430 | }; | 450 | }; |
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index e643620417a9..07be2cd7b318 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi | |||
@@ -644,7 +644,7 @@ | |||
644 | utmi-mode = <2>; | 644 | utmi-mode = <2>; |
645 | ranges; | 645 | ranges; |
646 | dwc3@4a030000 { | 646 | dwc3@4a030000 { |
647 | compatible = "synopsys,dwc3"; | 647 | compatible = "snps,dwc3"; |
648 | reg = <0x4a030000 0x1000>; | 648 | reg = <0x4a030000 0x1000>; |
649 | interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>; | 649 | interrupts = <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>; |
650 | usb-phy = <&usb2_phy>, <&usb3_phy>; | 650 | usb-phy = <&usb2_phy>, <&usb3_phy>; |
diff --git a/arch/arm/boot/dts/stih41x.dtsi b/arch/arm/boot/dts/stih41x.dtsi index 7321403cab8a..f5b9898d9c6e 100644 --- a/arch/arm/boot/dts/stih41x.dtsi +++ b/arch/arm/boot/dts/stih41x.dtsi | |||
@@ -6,10 +6,12 @@ | |||
6 | #address-cells = <1>; | 6 | #address-cells = <1>; |
7 | #size-cells = <0>; | 7 | #size-cells = <0>; |
8 | cpu@0 { | 8 | cpu@0 { |
9 | device_type = "cpu"; | ||
9 | compatible = "arm,cortex-a9"; | 10 | compatible = "arm,cortex-a9"; |
10 | reg = <0>; | 11 | reg = <0>; |
11 | }; | 12 | }; |
12 | cpu@1 { | 13 | cpu@1 { |
14 | device_type = "cpu"; | ||
13 | compatible = "arm,cortex-a9"; | 15 | compatible = "arm,cortex-a9"; |
14 | reg = <1>; | 16 | reg = <1>; |
15 | }; | 17 | }; |
diff --git a/arch/arm/boot/dts/tegra20-colibri-512.dtsi b/arch/arm/boot/dts/tegra20-colibri-512.dtsi index 2fcb3f2ca160..5592be6f2f7a 100644 --- a/arch/arm/boot/dts/tegra20-colibri-512.dtsi +++ b/arch/arm/boot/dts/tegra20-colibri-512.dtsi | |||
@@ -457,6 +457,7 @@ | |||
457 | }; | 457 | }; |
458 | 458 | ||
459 | usb-phy@c5004000 { | 459 | usb-phy@c5004000 { |
460 | status = "okay"; | ||
460 | nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1) | 461 | nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1) |
461 | GPIO_ACTIVE_LOW>; | 462 | GPIO_ACTIVE_LOW>; |
462 | }; | 463 | }; |
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts index 365760b33a26..c8242533268f 100644 --- a/arch/arm/boot/dts/tegra20-seaboard.dts +++ b/arch/arm/boot/dts/tegra20-seaboard.dts | |||
@@ -566,7 +566,6 @@ | |||
566 | 566 | ||
567 | usb@c5000000 { | 567 | usb@c5000000 { |
568 | status = "okay"; | 568 | status = "okay"; |
569 | nvidia,vbus-gpio = <&gpio TEGRA_GPIO(D, 0) GPIO_ACTIVE_HIGH>; | ||
570 | dr_mode = "otg"; | 569 | dr_mode = "otg"; |
571 | }; | 570 | }; |
572 | 571 | ||
@@ -830,6 +829,8 @@ | |||
830 | regulator-max-microvolt = <5000000>; | 829 | regulator-max-microvolt = <5000000>; |
831 | enable-active-high; | 830 | enable-active-high; |
832 | gpio = <&gpio 24 0>; /* PD0 */ | 831 | gpio = <&gpio 24 0>; /* PD0 */ |
832 | regulator-always-on; | ||
833 | regulator-boot-on; | ||
833 | }; | 834 | }; |
834 | }; | 835 | }; |
835 | 836 | ||
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts index ed4b901b0227..1e9d33adb925 100644 --- a/arch/arm/boot/dts/tegra20-trimslice.dts +++ b/arch/arm/boot/dts/tegra20-trimslice.dts | |||
@@ -312,7 +312,6 @@ | |||
312 | 312 | ||
313 | usb@c5000000 { | 313 | usb@c5000000 { |
314 | status = "okay"; | 314 | status = "okay"; |
315 | nvidia,vbus-gpio = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_HIGH>; | ||
316 | }; | 315 | }; |
317 | 316 | ||
318 | usb-phy@c5000000 { | 317 | usb-phy@c5000000 { |
@@ -412,6 +411,8 @@ | |||
412 | regulator-max-microvolt = <5000000>; | 411 | regulator-max-microvolt = <5000000>; |
413 | enable-active-high; | 412 | enable-active-high; |
414 | gpio = <&gpio 170 0>; /* PV2 */ | 413 | gpio = <&gpio 170 0>; /* PV2 */ |
414 | regulator-always-on; | ||
415 | regulator-boot-on; | ||
415 | }; | 416 | }; |
416 | }; | 417 | }; |
417 | 418 | ||
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts index ab67c94db280..c703197dca6e 100644 --- a/arch/arm/boot/dts/tegra20-whistler.dts +++ b/arch/arm/boot/dts/tegra20-whistler.dts | |||
@@ -509,7 +509,6 @@ | |||
509 | 509 | ||
510 | usb@c5000000 { | 510 | usb@c5000000 { |
511 | status = "okay"; | 511 | status = "okay"; |
512 | nvidia,vbus-gpio = <&tca6416 0 GPIO_ACTIVE_HIGH>; | ||
513 | }; | 512 | }; |
514 | 513 | ||
515 | usb-phy@c5000000 { | 514 | usb-phy@c5000000 { |
@@ -519,7 +518,6 @@ | |||
519 | 518 | ||
520 | usb@c5008000 { | 519 | usb@c5008000 { |
521 | status = "okay"; | 520 | status = "okay"; |
522 | nvidia,vbus-gpio = <&tca6416 1 GPIO_ACTIVE_HIGH>; | ||
523 | }; | 521 | }; |
524 | 522 | ||
525 | usb-phy@c5008000 { | 523 | usb-phy@c5008000 { |
@@ -588,6 +586,8 @@ | |||
588 | regulator-max-microvolt = <5000000>; | 586 | regulator-max-microvolt = <5000000>; |
589 | enable-active-high; | 587 | enable-active-high; |
590 | gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ | 588 | gpio = <&tca6416 0 0>; /* GPIO_PMU0 */ |
589 | regulator-always-on; | ||
590 | regulator-boot-on; | ||
591 | }; | 591 | }; |
592 | 592 | ||
593 | vbus3_reg: regulator@3 { | 593 | vbus3_reg: regulator@3 { |
@@ -598,6 +598,8 @@ | |||
598 | regulator-max-microvolt = <5000000>; | 598 | regulator-max-microvolt = <5000000>; |
599 | enable-active-high; | 599 | enable-active-high; |
600 | gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ | 600 | gpio = <&tca6416 1 0>; /* GPIO_PMU1 */ |
601 | regulator-always-on; | ||
602 | regulator-boot-on; | ||
601 | }; | 603 | }; |
602 | }; | 604 | }; |
603 | 605 | ||
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi index 9653fd8288d2..e4570834512e 100644 --- a/arch/arm/boot/dts/tegra20.dtsi +++ b/arch/arm/boot/dts/tegra20.dtsi | |||
@@ -477,13 +477,13 @@ | |||
477 | <&tegra_car TEGRA20_CLK_USBD>; | 477 | <&tegra_car TEGRA20_CLK_USBD>; |
478 | clock-names = "reg", "pll_u", "timer", "utmi-pads"; | 478 | clock-names = "reg", "pll_u", "timer", "utmi-pads"; |
479 | nvidia,has-legacy-mode; | 479 | nvidia,has-legacy-mode; |
480 | hssync_start_delay = <9>; | 480 | nvidia,hssync-start-delay = <9>; |
481 | idle_wait_delay = <17>; | 481 | nvidia,idle-wait-delay = <17>; |
482 | elastic_limit = <16>; | 482 | nvidia,elastic-limit = <16>; |
483 | term_range_adj = <6>; | 483 | nvidia,term-range-adj = <6>; |
484 | xcvr_setup = <9>; | 484 | nvidia,xcvr-setup = <9>; |
485 | xcvr_lsfslew = <1>; | 485 | nvidia,xcvr-lsfslew = <1>; |
486 | xcvr_lsrslew = <1>; | 486 | nvidia,xcvr-lsrslew = <1>; |
487 | status = "disabled"; | 487 | status = "disabled"; |
488 | }; | 488 | }; |
489 | 489 | ||
@@ -527,13 +527,13 @@ | |||
527 | <&tegra_car TEGRA20_CLK_CLK_M>, | 527 | <&tegra_car TEGRA20_CLK_CLK_M>, |
528 | <&tegra_car TEGRA20_CLK_USBD>; | 528 | <&tegra_car TEGRA20_CLK_USBD>; |
529 | clock-names = "reg", "pll_u", "timer", "utmi-pads"; | 529 | clock-names = "reg", "pll_u", "timer", "utmi-pads"; |
530 | hssync_start_delay = <9>; | 530 | nvidia,hssync-start-delay = <9>; |
531 | idle_wait_delay = <17>; | 531 | nvidia,idle-wait-delay = <17>; |
532 | elastic_limit = <16>; | 532 | nvidia,elastic-limit = <16>; |
533 | term_range_adj = <6>; | 533 | nvidia,term-range-adj = <6>; |
534 | xcvr_setup = <9>; | 534 | nvidia,xcvr-setup = <9>; |
535 | xcvr_lsfslew = <2>; | 535 | nvidia,xcvr-lsfslew = <2>; |
536 | xcvr_lsrslew = <2>; | 536 | nvidia,xcvr-lsrslew = <2>; |
537 | status = "disabled"; | 537 | status = "disabled"; |
538 | }; | 538 | }; |
539 | 539 | ||
diff --git a/arch/arm/boot/dts/wm8850-w70v2.dts b/arch/arm/boot/dts/wm8850-w70v2.dts index 90e913fb64be..7a563d2523b0 100644 --- a/arch/arm/boot/dts/wm8850-w70v2.dts +++ b/arch/arm/boot/dts/wm8850-w70v2.dts | |||
@@ -11,13 +11,14 @@ | |||
11 | 11 | ||
12 | /dts-v1/; | 12 | /dts-v1/; |
13 | /include/ "wm8850.dtsi" | 13 | /include/ "wm8850.dtsi" |
14 | #include <dt-bindings/pwm/pwm.h> | ||
14 | 15 | ||
15 | / { | 16 | / { |
16 | model = "Wondermedia WM8850-W70v2 Tablet"; | 17 | model = "Wondermedia WM8850-W70v2 Tablet"; |
17 | 18 | ||
18 | backlight { | 19 | backlight { |
19 | compatible = "pwm-backlight"; | 20 | compatible = "pwm-backlight"; |
20 | pwms = <&pwm 0 50000 1>; /* duty inverted */ | 21 | pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>; |
21 | 22 | ||
22 | brightness-levels = <0 40 60 80 100 130 190 255>; | 23 | brightness-levels = <0 40 60 80 100 130 190 255>; |
23 | default-brightness-level = <5>; | 24 | default-brightness-level = <5>; |
diff --git a/arch/arm/include/asm/arch_timer.h b/arch/arm/include/asm/arch_timer.h index e406d575c94f..5665134bfa3e 100644 --- a/arch/arm/include/asm/arch_timer.h +++ b/arch/arm/include/asm/arch_timer.h | |||
@@ -17,7 +17,8 @@ int arch_timer_arch_init(void); | |||
17 | * nicely work out which register we want, and chuck away the rest of | 17 | * nicely work out which register we want, and chuck away the rest of |
18 | * the code. At least it does so with a recent GCC (4.6.3). | 18 | * the code. At least it does so with a recent GCC (4.6.3). |
19 | */ | 19 | */ |
20 | static inline void arch_timer_reg_write(const int access, const int reg, u32 val) | 20 | static __always_inline |
21 | void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) | ||
21 | { | 22 | { |
22 | if (access == ARCH_TIMER_PHYS_ACCESS) { | 23 | if (access == ARCH_TIMER_PHYS_ACCESS) { |
23 | switch (reg) { | 24 | switch (reg) { |
@@ -28,9 +29,7 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val | |||
28 | asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); | 29 | asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val)); |
29 | break; | 30 | break; |
30 | } | 31 | } |
31 | } | 32 | } else if (access == ARCH_TIMER_VIRT_ACCESS) { |
32 | |||
33 | if (access == ARCH_TIMER_VIRT_ACCESS) { | ||
34 | switch (reg) { | 33 | switch (reg) { |
35 | case ARCH_TIMER_REG_CTRL: | 34 | case ARCH_TIMER_REG_CTRL: |
36 | asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); | 35 | asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val)); |
@@ -44,7 +43,8 @@ static inline void arch_timer_reg_write(const int access, const int reg, u32 val | |||
44 | isb(); | 43 | isb(); |
45 | } | 44 | } |
46 | 45 | ||
47 | static inline u32 arch_timer_reg_read(const int access, const int reg) | 46 | static __always_inline |
47 | u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg) | ||
48 | { | 48 | { |
49 | u32 val = 0; | 49 | u32 val = 0; |
50 | 50 | ||
@@ -57,9 +57,7 @@ static inline u32 arch_timer_reg_read(const int access, const int reg) | |||
57 | asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); | 57 | asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val)); |
58 | break; | 58 | break; |
59 | } | 59 | } |
60 | } | 60 | } else if (access == ARCH_TIMER_VIRT_ACCESS) { |
61 | |||
62 | if (access == ARCH_TIMER_VIRT_ACCESS) { | ||
63 | switch (reg) { | 61 | switch (reg) { |
64 | case ARCH_TIMER_REG_CTRL: | 62 | case ARCH_TIMER_REG_CTRL: |
65 | asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); | 63 | asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val)); |
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 6462a721ebd4..a252c0bfacf5 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h | |||
@@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void) | |||
88 | { | 88 | { |
89 | return 1 << mpidr_hash.bits; | 89 | return 1 << mpidr_hash.bits; |
90 | } | 90 | } |
91 | |||
92 | extern int platform_can_cpu_hotplug(void); | ||
93 | |||
91 | #endif | 94 | #endif |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index f8b8965666e9..b07c09e5a0ac 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
107 | " subs %1, %0, %0, ror #16\n" | 107 | " subs %1, %0, %0, ror #16\n" |
108 | " addeq %0, %0, %4\n" | 108 | " addeq %0, %0, %4\n" |
109 | " strexeq %2, %0, [%3]" | 109 | " strexeq %2, %0, [%3]" |
110 | : "=&r" (slock), "=&r" (contended), "=r" (res) | 110 | : "=&r" (slock), "=&r" (contended), "=&r" (res) |
111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | 111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) |
112 | : "cc"); | 112 | : "cc"); |
113 | } while (res); | 113 | } while (res); |
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
168 | 168 | ||
169 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 169 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
170 | { | 170 | { |
171 | unsigned long tmp; | 171 | unsigned long contended, res; |
172 | 172 | ||
173 | __asm__ __volatile__( | 173 | do { |
174 | " ldrex %0, [%1]\n" | 174 | __asm__ __volatile__( |
175 | " teq %0, #0\n" | 175 | " ldrex %0, [%2]\n" |
176 | " strexeq %0, %2, [%1]" | 176 | " mov %1, #0\n" |
177 | : "=&r" (tmp) | 177 | " teq %0, #0\n" |
178 | : "r" (&rw->lock), "r" (0x80000000) | 178 | " strexeq %1, %3, [%2]" |
179 | : "cc"); | 179 | : "=&r" (contended), "=&r" (res) |
180 | : "r" (&rw->lock), "r" (0x80000000) | ||
181 | : "cc"); | ||
182 | } while (res); | ||
180 | 183 | ||
181 | if (tmp == 0) { | 184 | if (!contended) { |
182 | smp_mb(); | 185 | smp_mb(); |
183 | return 1; | 186 | return 1; |
184 | } else { | 187 | } else { |
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
254 | 257 | ||
255 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 258 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
256 | { | 259 | { |
257 | unsigned long tmp, tmp2 = 1; | 260 | unsigned long contended, res; |
258 | 261 | ||
259 | __asm__ __volatile__( | 262 | do { |
260 | " ldrex %0, [%2]\n" | 263 | __asm__ __volatile__( |
261 | " adds %0, %0, #1\n" | 264 | " ldrex %0, [%2]\n" |
262 | " strexpl %1, %0, [%2]\n" | 265 | " mov %1, #0\n" |
263 | : "=&r" (tmp), "+r" (tmp2) | 266 | " adds %0, %0, #1\n" |
264 | : "r" (&rw->lock) | 267 | " strexpl %1, %0, [%2]" |
265 | : "cc"); | 268 | : "=&r" (contended), "=&r" (res) |
269 | : "r" (&rw->lock) | ||
270 | : "cc"); | ||
271 | } while (res); | ||
266 | 272 | ||
267 | smp_mb(); | 273 | /* If the lock is negative, then it is already held for write. */ |
268 | return tmp2 == 0; | 274 | if (contended < 0x80000000) { |
275 | smp_mb(); | ||
276 | return 1; | ||
277 | } else { | ||
278 | return 0; | ||
279 | } | ||
269 | } | 280 | } |
270 | 281 | ||
271 | /* read_can_lock - would read_trylock() succeed? */ | 282 | /* read_can_lock - would read_trylock() succeed? */ |
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 46e7cfb3e721..0baf7f0d9394 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -43,6 +43,7 @@ struct mmu_gather { | |||
43 | struct mm_struct *mm; | 43 | struct mm_struct *mm; |
44 | unsigned int fullmm; | 44 | unsigned int fullmm; |
45 | struct vm_area_struct *vma; | 45 | struct vm_area_struct *vma; |
46 | unsigned long start, end; | ||
46 | unsigned long range_start; | 47 | unsigned long range_start; |
47 | unsigned long range_end; | 48 | unsigned long range_end; |
48 | unsigned int nr; | 49 | unsigned int nr; |
@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) | |||
107 | } | 108 | } |
108 | 109 | ||
109 | static inline void | 110 | static inline void |
110 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) | 111 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
111 | { | 112 | { |
112 | tlb->mm = mm; | 113 | tlb->mm = mm; |
113 | tlb->fullmm = fullmm; | 114 | tlb->fullmm = !(start | (end+1)); |
115 | tlb->start = start; | ||
116 | tlb->end = end; | ||
114 | tlb->vma = NULL; | 117 | tlb->vma = NULL; |
115 | tlb->max = ARRAY_SIZE(tlb->local); | 118 | tlb->max = ARRAY_SIZE(tlb->local); |
116 | tlb->pages = tlb->local; | 119 | tlb->pages = tlb->local; |
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 261fcc826169..88e14d74b6de 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c | |||
@@ -525,11 +525,6 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw) | |||
525 | * Assign resources. | 525 | * Assign resources. |
526 | */ | 526 | */ |
527 | pci_bus_assign_resources(bus); | 527 | pci_bus_assign_resources(bus); |
528 | |||
529 | /* | ||
530 | * Enable bridges | ||
531 | */ | ||
532 | pci_enable_bridges(bus); | ||
533 | } | 528 | } |
534 | 529 | ||
535 | /* | 530 | /* |
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index 5859c8bc727c..2ee8a17d2b01 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c | |||
@@ -169,6 +169,11 @@ void __init arm_dt_init_cpu_maps(void) | |||
169 | } | 169 | } |
170 | } | 170 | } |
171 | 171 | ||
172 | bool arch_match_cpu_phys_id(int cpu, u64 phys_id) | ||
173 | { | ||
174 | return (phys_id & MPIDR_HWID_BITMASK) == cpu_logical_map(cpu); | ||
175 | } | ||
176 | |||
172 | /** | 177 | /** |
173 | * setup_machine_fdt - Machine setup when an dtb was passed to the kernel | 178 | * setup_machine_fdt - Machine setup when an dtb was passed to the kernel |
174 | * @dt_phys: physical address of dt blob | 179 | * @dt_phys: physical address of dt blob |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d40d0ef389db..9cbe70c8b0ef 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -357,7 +357,8 @@ ENDPROC(__pabt_svc) | |||
357 | .endm | 357 | .endm |
358 | 358 | ||
359 | .macro kuser_cmpxchg_check | 359 | .macro kuser_cmpxchg_check |
360 | #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | 360 | #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ |
361 | !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | ||
361 | #ifndef CONFIG_MMU | 362 | #ifndef CONFIG_MMU |
362 | #warning "NPTL on non MMU needs fixing" | 363 | #warning "NPTL on non MMU needs fixing" |
363 | #else | 364 | #else |
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 25442f451148..918875d96d5d 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c | |||
@@ -84,17 +84,14 @@ int show_fiq_list(struct seq_file *p, int prec) | |||
84 | 84 | ||
85 | void set_fiq_handler(void *start, unsigned int length) | 85 | void set_fiq_handler(void *start, unsigned int length) |
86 | { | 86 | { |
87 | #if defined(CONFIG_CPU_USE_DOMAINS) | ||
88 | void *base = (void *)0xffff0000; | ||
89 | #else | ||
90 | void *base = vectors_page; | 87 | void *base = vectors_page; |
91 | #endif | ||
92 | unsigned offset = FIQ_OFFSET; | 88 | unsigned offset = FIQ_OFFSET; |
93 | 89 | ||
94 | memcpy(base + offset, start, length); | 90 | memcpy(base + offset, start, length); |
91 | if (!cache_is_vipt_nonaliasing()) | ||
92 | flush_icache_range((unsigned long)base + offset, offset + | ||
93 | length); | ||
95 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); | 94 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); |
96 | if (!vectors_high()) | ||
97 | flush_icache_range(offset, offset + length); | ||
98 | } | 95 | } |
99 | 96 | ||
100 | int claim_fiq(struct fiq_handler *f) | 97 | int claim_fiq(struct fiq_handler *f) |
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 4fb074c446bf..57221e349a7c 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
17 | #include <asm/mach-types.h> | 17 | #include <asm/mach-types.h> |
18 | #include <asm/smp_plat.h> | ||
18 | #include <asm/system_misc.h> | 19 | #include <asm/system_misc.h> |
19 | 20 | ||
20 | extern const unsigned char relocate_new_kernel[]; | 21 | extern const unsigned char relocate_new_kernel[]; |
@@ -39,6 +40,14 @@ int machine_kexec_prepare(struct kimage *image) | |||
39 | int i, err; | 40 | int i, err; |
40 | 41 | ||
41 | /* | 42 | /* |
43 | * Validate that if the current HW supports SMP, then the SW supports | ||
44 | * and implements CPU hotplug for the current HW. If not, we won't be | ||
45 | * able to kexec reliably, so fail the prepare operation. | ||
46 | */ | ||
47 | if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) | ||
48 | return -EINVAL; | ||
49 | |||
50 | /* | ||
42 | * No segment at default ATAGs address. try to locate | 51 | * No segment at default ATAGs address. try to locate |
43 | * a dtb using magic. | 52 | * a dtb using magic. |
44 | */ | 53 | */ |
@@ -73,6 +82,7 @@ void machine_crash_nonpanic_core(void *unused) | |||
73 | crash_save_cpu(®s, smp_processor_id()); | 82 | crash_save_cpu(®s, smp_processor_id()); |
74 | flush_cache_all(); | 83 | flush_cache_all(); |
75 | 84 | ||
85 | set_cpu_online(smp_processor_id(), false); | ||
76 | atomic_dec(&waiting_for_crash_ipi); | 86 | atomic_dec(&waiting_for_crash_ipi); |
77 | while (1) | 87 | while (1) |
78 | cpu_relax(); | 88 | cpu_relax(); |
@@ -134,10 +144,13 @@ void machine_kexec(struct kimage *image) | |||
134 | unsigned long reboot_code_buffer_phys; | 144 | unsigned long reboot_code_buffer_phys; |
135 | void *reboot_code_buffer; | 145 | void *reboot_code_buffer; |
136 | 146 | ||
137 | if (num_online_cpus() > 1) { | 147 | /* |
138 | pr_err("kexec: error: multiple CPUs still online\n"); | 148 | * This can only happen if machine_shutdown() failed to disable some |
139 | return; | 149 | * CPU, and that can only happen if the checks in |
140 | } | 150 | * machine_kexec_prepare() were not correct. If this fails, we can't |
151 | * reliably kexec anyway, so BUG_ON is appropriate. | ||
152 | */ | ||
153 | BUG_ON(num_online_cpus() > 1); | ||
141 | 154 | ||
142 | page_list = image->head & PAGE_MASK; | 155 | page_list = image->head & PAGE_MASK; |
143 | 156 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d9f5cd4e533f..e186ee1e63f6 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) | |||
53 | static int | 53 | static int |
54 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) | 54 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
55 | { | 55 | { |
56 | int mapping = (*event_map)[config]; | 56 | int mapping; |
57 | |||
58 | if (config >= PERF_COUNT_HW_MAX) | ||
59 | return -EINVAL; | ||
60 | |||
61 | mapping = (*event_map)[config]; | ||
57 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; | 62 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
58 | } | 63 | } |
59 | 64 | ||
@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events, | |||
253 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 258 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
254 | struct pmu *leader_pmu = event->group_leader->pmu; | 259 | struct pmu *leader_pmu = event->group_leader->pmu; |
255 | 260 | ||
261 | if (is_software_event(event)) | ||
262 | return 1; | ||
263 | |||
256 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) | 264 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) |
257 | return 1; | 265 | return 1; |
258 | 266 | ||
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 536c85fe72a8..94f6b05f9e24 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -462,7 +462,7 @@ int in_gate_area_no_mm(unsigned long addr) | |||
462 | { | 462 | { |
463 | return in_gate_area(NULL, addr); | 463 | return in_gate_area(NULL, addr); |
464 | } | 464 | } |
465 | #define is_gate_vma(vma) ((vma) = &gate_vma) | 465 | #define is_gate_vma(vma) ((vma) == &gate_vma) |
466 | #else | 466 | #else |
467 | #define is_gate_vma(vma) 0 | 467 | #define is_gate_vma(vma) 0 |
468 | #endif | 468 | #endif |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index c2b4f8f0be9a..2dc19349eb19 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle) | |||
145 | return -ENOSYS; | 145 | return -ENOSYS; |
146 | } | 146 | } |
147 | 147 | ||
148 | int platform_can_cpu_hotplug(void) | ||
149 | { | ||
150 | #ifdef CONFIG_HOTPLUG_CPU | ||
151 | if (smp_ops.cpu_kill) | ||
152 | return 1; | ||
153 | #endif | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
148 | #ifdef CONFIG_HOTPLUG_CPU | 158 | #ifdef CONFIG_HOTPLUG_CPU |
149 | static void percpu_timer_stop(void); | 159 | static void percpu_timer_stop(void); |
150 | 160 | ||
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index c5a59546a256..85a87370f144 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c | |||
@@ -74,12 +74,8 @@ struct cpu_efficiency table_efficiency[] = { | |||
74 | {NULL, }, | 74 | {NULL, }, |
75 | }; | 75 | }; |
76 | 76 | ||
77 | struct cpu_capacity { | 77 | unsigned long *__cpu_capacity; |
78 | unsigned long hwid; | 78 | #define cpu_capacity(cpu) __cpu_capacity[cpu] |
79 | unsigned long capacity; | ||
80 | }; | ||
81 | |||
82 | struct cpu_capacity *cpu_capacity; | ||
83 | 79 | ||
84 | unsigned long middle_capacity = 1; | 80 | unsigned long middle_capacity = 1; |
85 | 81 | ||
@@ -100,15 +96,19 @@ static void __init parse_dt_topology(void) | |||
100 | unsigned long capacity = 0; | 96 | unsigned long capacity = 0; |
101 | int alloc_size, cpu = 0; | 97 | int alloc_size, cpu = 0; |
102 | 98 | ||
103 | alloc_size = nr_cpu_ids * sizeof(struct cpu_capacity); | 99 | alloc_size = nr_cpu_ids * sizeof(*__cpu_capacity); |
104 | cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT); | 100 | __cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT); |
105 | 101 | ||
106 | while ((cn = of_find_node_by_type(cn, "cpu"))) { | 102 | for_each_possible_cpu(cpu) { |
107 | const u32 *rate, *reg; | 103 | const u32 *rate; |
108 | int len; | 104 | int len; |
109 | 105 | ||
110 | if (cpu >= num_possible_cpus()) | 106 | /* too early to use cpu->of_node */ |
111 | break; | 107 | cn = of_get_cpu_node(cpu, NULL); |
108 | if (!cn) { | ||
109 | pr_err("missing device node for CPU %d\n", cpu); | ||
110 | continue; | ||
111 | } | ||
112 | 112 | ||
113 | for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) | 113 | for (cpu_eff = table_efficiency; cpu_eff->compatible; cpu_eff++) |
114 | if (of_device_is_compatible(cn, cpu_eff->compatible)) | 114 | if (of_device_is_compatible(cn, cpu_eff->compatible)) |
@@ -124,12 +124,6 @@ static void __init parse_dt_topology(void) | |||
124 | continue; | 124 | continue; |
125 | } | 125 | } |
126 | 126 | ||
127 | reg = of_get_property(cn, "reg", &len); | ||
128 | if (!reg || len != 4) { | ||
129 | pr_err("%s missing reg property\n", cn->full_name); | ||
130 | continue; | ||
131 | } | ||
132 | |||
133 | capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency; | 127 | capacity = ((be32_to_cpup(rate)) >> 20) * cpu_eff->efficiency; |
134 | 128 | ||
135 | /* Save min capacity of the system */ | 129 | /* Save min capacity of the system */ |
@@ -140,13 +134,9 @@ static void __init parse_dt_topology(void) | |||
140 | if (capacity > max_capacity) | 134 | if (capacity > max_capacity) |
141 | max_capacity = capacity; | 135 | max_capacity = capacity; |
142 | 136 | ||
143 | cpu_capacity[cpu].capacity = capacity; | 137 | cpu_capacity(cpu) = capacity; |
144 | cpu_capacity[cpu++].hwid = be32_to_cpup(reg); | ||
145 | } | 138 | } |
146 | 139 | ||
147 | if (cpu < num_possible_cpus()) | ||
148 | cpu_capacity[cpu].hwid = (unsigned long)(-1); | ||
149 | |||
150 | /* If min and max capacities are equals, we bypass the update of the | 140 | /* If min and max capacities are equals, we bypass the update of the |
151 | * cpu_scale because all CPUs have the same capacity. Otherwise, we | 141 | * cpu_scale because all CPUs have the same capacity. Otherwise, we |
152 | * compute a middle_capacity factor that will ensure that the capacity | 142 | * compute a middle_capacity factor that will ensure that the capacity |
@@ -154,9 +144,7 @@ static void __init parse_dt_topology(void) | |||
154 | * SCHED_POWER_SCALE, which is the default value, but with the | 144 | * SCHED_POWER_SCALE, which is the default value, but with the |
155 | * constraint explained near table_efficiency[]. | 145 | * constraint explained near table_efficiency[]. |
156 | */ | 146 | */ |
157 | if (min_capacity == max_capacity) | 147 | if (4*max_capacity < (3*(max_capacity + min_capacity))) |
158 | cpu_capacity[0].hwid = (unsigned long)(-1); | ||
159 | else if (4*max_capacity < (3*(max_capacity + min_capacity))) | ||
160 | middle_capacity = (min_capacity + max_capacity) | 148 | middle_capacity = (min_capacity + max_capacity) |
161 | >> (SCHED_POWER_SHIFT+1); | 149 | >> (SCHED_POWER_SHIFT+1); |
162 | else | 150 | else |
@@ -170,23 +158,12 @@ static void __init parse_dt_topology(void) | |||
170 | * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the | 158 | * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the |
171 | * function returns directly for SMP system. | 159 | * function returns directly for SMP system. |
172 | */ | 160 | */ |
173 | void update_cpu_power(unsigned int cpu, unsigned long hwid) | 161 | void update_cpu_power(unsigned int cpu) |
174 | { | 162 | { |
175 | unsigned int idx = 0; | 163 | if (!cpu_capacity(cpu)) |
176 | |||
177 | /* look for the cpu's hwid in the cpu capacity table */ | ||
178 | for (idx = 0; idx < num_possible_cpus(); idx++) { | ||
179 | if (cpu_capacity[idx].hwid == hwid) | ||
180 | break; | ||
181 | |||
182 | if (cpu_capacity[idx].hwid == -1) | ||
183 | return; | ||
184 | } | ||
185 | |||
186 | if (idx == num_possible_cpus()) | ||
187 | return; | 164 | return; |
188 | 165 | ||
189 | set_power_scale(cpu, cpu_capacity[idx].capacity / middle_capacity); | 166 | set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity); |
190 | 167 | ||
191 | printk(KERN_INFO "CPU%u: update cpu_power %lu\n", | 168 | printk(KERN_INFO "CPU%u: update cpu_power %lu\n", |
192 | cpu, arch_scale_freq_power(NULL, cpu)); | 169 | cpu, arch_scale_freq_power(NULL, cpu)); |
@@ -194,7 +171,7 @@ void update_cpu_power(unsigned int cpu, unsigned long hwid) | |||
194 | 171 | ||
195 | #else | 172 | #else |
196 | static inline void parse_dt_topology(void) {} | 173 | static inline void parse_dt_topology(void) {} |
197 | static inline void update_cpu_power(unsigned int cpuid, unsigned int mpidr) {} | 174 | static inline void update_cpu_power(unsigned int cpuid) {} |
198 | #endif | 175 | #endif |
199 | 176 | ||
200 | /* | 177 | /* |
@@ -281,7 +258,7 @@ void store_cpu_topology(unsigned int cpuid) | |||
281 | 258 | ||
282 | update_siblings_masks(cpuid); | 259 | update_siblings_masks(cpuid); |
283 | 260 | ||
284 | update_cpu_power(cpuid, mpidr & MPIDR_HWID_BITMASK); | 261 | update_cpu_power(cpuid); |
285 | 262 | ||
286 | printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", | 263 | printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", |
287 | cpuid, cpu_topology[cpuid].thread_id, | 264 | cpuid, cpu_topology[cpuid].thread_id, |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 4a5199070430..db9cf692d4dd 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -146,7 +146,11 @@ static bool pm_fake(struct kvm_vcpu *vcpu, | |||
146 | #define access_pmintenclr pm_fake | 146 | #define access_pmintenclr pm_fake |
147 | 147 | ||
148 | /* Architected CP15 registers. | 148 | /* Architected CP15 registers. |
149 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 | 149 | * CRn denotes the primary register number, but is copied to the CRm in the |
150 | * user space API for 64-bit register access in line with the terminology used | ||
151 | * in the ARM ARM. | ||
152 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit | ||
153 | * registers preceding 32-bit ones. | ||
150 | */ | 154 | */ |
151 | static const struct coproc_reg cp15_regs[] = { | 155 | static const struct coproc_reg cp15_regs[] = { |
152 | /* CSSELR: swapped by interrupt.S. */ | 156 | /* CSSELR: swapped by interrupt.S. */ |
@@ -154,8 +158,8 @@ static const struct coproc_reg cp15_regs[] = { | |||
154 | NULL, reset_unknown, c0_CSSELR }, | 158 | NULL, reset_unknown, c0_CSSELR }, |
155 | 159 | ||
156 | /* TTBR0/TTBR1: swapped by interrupt.S. */ | 160 | /* TTBR0/TTBR1: swapped by interrupt.S. */ |
157 | { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, | 161 | { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, |
158 | { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, | 162 | { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, |
159 | 163 | ||
160 | /* TTBCR: swapped by interrupt.S. */ | 164 | /* TTBCR: swapped by interrupt.S. */ |
161 | { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, | 165 | { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, |
@@ -182,7 +186,7 @@ static const struct coproc_reg cp15_regs[] = { | |||
182 | NULL, reset_unknown, c6_IFAR }, | 186 | NULL, reset_unknown, c6_IFAR }, |
183 | 187 | ||
184 | /* PAR swapped by interrupt.S */ | 188 | /* PAR swapped by interrupt.S */ |
185 | { CRn( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, | 189 | { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, |
186 | 190 | ||
187 | /* | 191 | /* |
188 | * DC{C,I,CI}SW operations: | 192 | * DC{C,I,CI}SW operations: |
@@ -399,12 +403,13 @@ static bool index_to_params(u64 id, struct coproc_params *params) | |||
399 | | KVM_REG_ARM_OPC1_MASK)) | 403 | | KVM_REG_ARM_OPC1_MASK)) |
400 | return false; | 404 | return false; |
401 | params->is_64bit = true; | 405 | params->is_64bit = true; |
402 | params->CRm = ((id & KVM_REG_ARM_CRM_MASK) | 406 | /* CRm to CRn: see cp15_to_index for details */ |
407 | params->CRn = ((id & KVM_REG_ARM_CRM_MASK) | ||
403 | >> KVM_REG_ARM_CRM_SHIFT); | 408 | >> KVM_REG_ARM_CRM_SHIFT); |
404 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) | 409 | params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) |
405 | >> KVM_REG_ARM_OPC1_SHIFT); | 410 | >> KVM_REG_ARM_OPC1_SHIFT); |
406 | params->Op2 = 0; | 411 | params->Op2 = 0; |
407 | params->CRn = 0; | 412 | params->CRm = 0; |
408 | return true; | 413 | return true; |
409 | default: | 414 | default: |
410 | return false; | 415 | return false; |
@@ -898,7 +903,14 @@ static u64 cp15_to_index(const struct coproc_reg *reg) | |||
898 | if (reg->is_64) { | 903 | if (reg->is_64) { |
899 | val |= KVM_REG_SIZE_U64; | 904 | val |= KVM_REG_SIZE_U64; |
900 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); | 905 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); |
901 | val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); | 906 | /* |
907 | * CRn always denotes the primary coproc. reg. nr. for the | ||
908 | * in-kernel representation, but the user space API uses the | ||
909 | * CRm for the encoding, because it is modelled after the | ||
910 | * MRRC/MCRR instructions: see the ARM ARM rev. c page | ||
911 | * B3-1445 | ||
912 | */ | ||
913 | val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT); | ||
902 | } else { | 914 | } else { |
903 | val |= KVM_REG_SIZE_U32; | 915 | val |= KVM_REG_SIZE_U32; |
904 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); | 916 | val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); |
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index b7301d3e4799..0461d5c8d3de 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h | |||
@@ -135,6 +135,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, | |||
135 | return -1; | 135 | return -1; |
136 | if (i1->CRn != i2->CRn) | 136 | if (i1->CRn != i2->CRn) |
137 | return i1->CRn - i2->CRn; | 137 | return i1->CRn - i2->CRn; |
138 | if (i1->is_64 != i2->is_64) | ||
139 | return i2->is_64 - i1->is_64; | ||
138 | if (i1->CRm != i2->CRm) | 140 | if (i1->CRm != i2->CRm) |
139 | return i1->CRm - i2->CRm; | 141 | return i1->CRm - i2->CRm; |
140 | if (i1->Op1 != i2->Op1) | 142 | if (i1->Op1 != i2->Op1) |
@@ -145,6 +147,7 @@ static inline int cmp_reg(const struct coproc_reg *i1, | |||
145 | 147 | ||
146 | #define CRn(_x) .CRn = _x | 148 | #define CRn(_x) .CRn = _x |
147 | #define CRm(_x) .CRm = _x | 149 | #define CRm(_x) .CRm = _x |
150 | #define CRm64(_x) .CRn = _x, .CRm = 0 | ||
148 | #define Op1(_x) .Op1 = _x | 151 | #define Op1(_x) .Op1 = _x |
149 | #define Op2(_x) .Op2 = _x | 152 | #define Op2(_x) .Op2 = _x |
150 | #define is64 .is_64 = true | 153 | #define is64 .is_64 = true |
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c index 685063a6d0cf..cf93472b9dd6 100644 --- a/arch/arm/kvm/coproc_a15.c +++ b/arch/arm/kvm/coproc_a15.c | |||
@@ -114,7 +114,11 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu, | |||
114 | 114 | ||
115 | /* | 115 | /* |
116 | * A15-specific CP15 registers. | 116 | * A15-specific CP15 registers. |
117 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 | 117 | * CRn denotes the primary register number, but is copied to the CRm in the |
118 | * user space API for 64-bit register access in line with the terminology used | ||
119 | * in the ARM ARM. | ||
120 | * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit | ||
121 | * registers preceding 32-bit ones. | ||
118 | */ | 122 | */ |
119 | static const struct coproc_reg a15_regs[] = { | 123 | static const struct coproc_reg a15_regs[] = { |
120 | /* MPIDR: we use VMPIDR for guest access. */ | 124 | /* MPIDR: we use VMPIDR for guest access. */ |
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c index b8e06b7a2833..0c25d9487d53 100644 --- a/arch/arm/kvm/mmio.c +++ b/arch/arm/kvm/mmio.c | |||
@@ -63,7 +63,8 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
63 | static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | 63 | static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
64 | struct kvm_exit_mmio *mmio) | 64 | struct kvm_exit_mmio *mmio) |
65 | { | 65 | { |
66 | unsigned long rt, len; | 66 | unsigned long rt; |
67 | int len; | ||
67 | bool is_write, sign_extend; | 68 | bool is_write, sign_extend; |
68 | 69 | ||
69 | if (kvm_vcpu_dabt_isextabt(vcpu)) { | 70 | if (kvm_vcpu_dabt_isextabt(vcpu)) { |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index ca6bea4859b4..0988d9e04dd4 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -85,6 +85,12 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) | |||
85 | return p; | 85 | return p; |
86 | } | 86 | } |
87 | 87 | ||
88 | static bool page_empty(void *ptr) | ||
89 | { | ||
90 | struct page *ptr_page = virt_to_page(ptr); | ||
91 | return page_count(ptr_page) == 1; | ||
92 | } | ||
93 | |||
88 | static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) | 94 | static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr) |
89 | { | 95 | { |
90 | pmd_t *pmd_table = pmd_offset(pud, 0); | 96 | pmd_t *pmd_table = pmd_offset(pud, 0); |
@@ -103,12 +109,6 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) | |||
103 | put_page(virt_to_page(pmd)); | 109 | put_page(virt_to_page(pmd)); |
104 | } | 110 | } |
105 | 111 | ||
106 | static bool pmd_empty(pmd_t *pmd) | ||
107 | { | ||
108 | struct page *pmd_page = virt_to_page(pmd); | ||
109 | return page_count(pmd_page) == 1; | ||
110 | } | ||
111 | |||
112 | static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) | 112 | static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) |
113 | { | 113 | { |
114 | if (pte_present(*pte)) { | 114 | if (pte_present(*pte)) { |
@@ -118,12 +118,6 @@ static void clear_pte_entry(struct kvm *kvm, pte_t *pte, phys_addr_t addr) | |||
118 | } | 118 | } |
119 | } | 119 | } |
120 | 120 | ||
121 | static bool pte_empty(pte_t *pte) | ||
122 | { | ||
123 | struct page *pte_page = virt_to_page(pte); | ||
124 | return page_count(pte_page) == 1; | ||
125 | } | ||
126 | |||
127 | static void unmap_range(struct kvm *kvm, pgd_t *pgdp, | 121 | static void unmap_range(struct kvm *kvm, pgd_t *pgdp, |
128 | unsigned long long start, u64 size) | 122 | unsigned long long start, u64 size) |
129 | { | 123 | { |
@@ -132,37 +126,37 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp, | |||
132 | pmd_t *pmd; | 126 | pmd_t *pmd; |
133 | pte_t *pte; | 127 | pte_t *pte; |
134 | unsigned long long addr = start, end = start + size; | 128 | unsigned long long addr = start, end = start + size; |
135 | u64 range; | 129 | u64 next; |
136 | 130 | ||
137 | while (addr < end) { | 131 | while (addr < end) { |
138 | pgd = pgdp + pgd_index(addr); | 132 | pgd = pgdp + pgd_index(addr); |
139 | pud = pud_offset(pgd, addr); | 133 | pud = pud_offset(pgd, addr); |
140 | if (pud_none(*pud)) { | 134 | if (pud_none(*pud)) { |
141 | addr += PUD_SIZE; | 135 | addr = pud_addr_end(addr, end); |
142 | continue; | 136 | continue; |
143 | } | 137 | } |
144 | 138 | ||
145 | pmd = pmd_offset(pud, addr); | 139 | pmd = pmd_offset(pud, addr); |
146 | if (pmd_none(*pmd)) { | 140 | if (pmd_none(*pmd)) { |
147 | addr += PMD_SIZE; | 141 | addr = pmd_addr_end(addr, end); |
148 | continue; | 142 | continue; |
149 | } | 143 | } |
150 | 144 | ||
151 | pte = pte_offset_kernel(pmd, addr); | 145 | pte = pte_offset_kernel(pmd, addr); |
152 | clear_pte_entry(kvm, pte, addr); | 146 | clear_pte_entry(kvm, pte, addr); |
153 | range = PAGE_SIZE; | 147 | next = addr + PAGE_SIZE; |
154 | 148 | ||
155 | /* If we emptied the pte, walk back up the ladder */ | 149 | /* If we emptied the pte, walk back up the ladder */ |
156 | if (pte_empty(pte)) { | 150 | if (page_empty(pte)) { |
157 | clear_pmd_entry(kvm, pmd, addr); | 151 | clear_pmd_entry(kvm, pmd, addr); |
158 | range = PMD_SIZE; | 152 | next = pmd_addr_end(addr, end); |
159 | if (pmd_empty(pmd)) { | 153 | if (page_empty(pmd) && !page_empty(pud)) { |
160 | clear_pud_entry(kvm, pud, addr); | 154 | clear_pud_entry(kvm, pud, addr); |
161 | range = PUD_SIZE; | 155 | next = pud_addr_end(addr, end); |
162 | } | 156 | } |
163 | } | 157 | } |
164 | 158 | ||
165 | addr += range; | 159 | addr = next; |
166 | } | 160 | } |
167 | } | 161 | } |
168 | 162 | ||
diff --git a/arch/arm/mach-at91/at91sam9x5.c b/arch/arm/mach-at91/at91sam9x5.c index 2abee6626aac..916e5a142917 100644 --- a/arch/arm/mach-at91/at91sam9x5.c +++ b/arch/arm/mach-at91/at91sam9x5.c | |||
@@ -227,6 +227,8 @@ static struct clk_lookup periph_clocks_lookups[] = { | |||
227 | CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk), | 227 | CLKDEV_CON_DEV_ID("usart", "f8020000.serial", &usart1_clk), |
228 | CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk), | 228 | CLKDEV_CON_DEV_ID("usart", "f8024000.serial", &usart2_clk), |
229 | CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), | 229 | CLKDEV_CON_DEV_ID("usart", "f8028000.serial", &usart3_clk), |
230 | CLKDEV_CON_DEV_ID("usart", "f8040000.serial", &uart0_clk), | ||
231 | CLKDEV_CON_DEV_ID("usart", "f8044000.serial", &uart1_clk), | ||
230 | CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), | 232 | CLKDEV_CON_DEV_ID("t0_clk", "f8008000.timer", &tcb0_clk), |
231 | CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), | 233 | CLKDEV_CON_DEV_ID("t0_clk", "f800c000.timer", &tcb0_clk), |
232 | CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk), | 234 | CLKDEV_CON_DEV_ID("mci_clk", "f0008000.mmc", &mmc0_clk), |
diff --git a/arch/arm/mach-at91/include/mach/at91_adc.h b/arch/arm/mach-at91/include/mach/at91_adc.h index 8e7ed5c90817..048a57f76bd3 100644 --- a/arch/arm/mach-at91/include/mach/at91_adc.h +++ b/arch/arm/mach-at91/include/mach/at91_adc.h | |||
@@ -28,9 +28,12 @@ | |||
28 | #define AT91_ADC_TRGSEL_EXTERNAL (6 << 1) | 28 | #define AT91_ADC_TRGSEL_EXTERNAL (6 << 1) |
29 | #define AT91_ADC_LOWRES (1 << 4) /* Low Resolution */ | 29 | #define AT91_ADC_LOWRES (1 << 4) /* Low Resolution */ |
30 | #define AT91_ADC_SLEEP (1 << 5) /* Sleep Mode */ | 30 | #define AT91_ADC_SLEEP (1 << 5) /* Sleep Mode */ |
31 | #define AT91_ADC_PRESCAL (0x3f << 8) /* Prescalar Rate Selection */ | 31 | #define AT91_ADC_PRESCAL_9260 (0x3f << 8) /* Prescalar Rate Selection */ |
32 | #define AT91_ADC_PRESCAL_9G45 (0xff << 8) | ||
32 | #define AT91_ADC_PRESCAL_(x) ((x) << 8) | 33 | #define AT91_ADC_PRESCAL_(x) ((x) << 8) |
33 | #define AT91_ADC_STARTUP (0x1f << 16) /* Startup Up Time */ | 34 | #define AT91_ADC_STARTUP_9260 (0x1f << 16) /* Startup Up Time */ |
35 | #define AT91_ADC_STARTUP_9G45 (0x7f << 16) | ||
36 | #define AT91_ADC_STARTUP_9X5 (0xf << 16) | ||
34 | #define AT91_ADC_STARTUP_(x) ((x) << 16) | 37 | #define AT91_ADC_STARTUP_(x) ((x) << 16) |
35 | #define AT91_ADC_SHTIM (0xf << 24) /* Sample & Hold Time */ | 38 | #define AT91_ADC_SHTIM (0xf << 24) /* Sample & Hold Time */ |
36 | #define AT91_ADC_SHTIM_(x) ((x) << 24) | 39 | #define AT91_ADC_SHTIM_(x) ((x) << 24) |
@@ -48,6 +51,9 @@ | |||
48 | #define AT91_ADC_ENDRX (1 << 18) /* End of RX Buffer */ | 51 | #define AT91_ADC_ENDRX (1 << 18) /* End of RX Buffer */ |
49 | #define AT91_ADC_RXFUFF (1 << 19) /* RX Buffer Full */ | 52 | #define AT91_ADC_RXFUFF (1 << 19) /* RX Buffer Full */ |
50 | 53 | ||
54 | #define AT91_ADC_SR_9X5 0x30 /* Status Register for 9x5 */ | ||
55 | #define AT91_ADC_SR_DRDY_9X5 (1 << 24) /* Data Ready */ | ||
56 | |||
51 | #define AT91_ADC_LCDR 0x20 /* Last Converted Data Register */ | 57 | #define AT91_ADC_LCDR 0x20 /* Last Converted Data Register */ |
52 | #define AT91_ADC_LDATA (0x3ff) | 58 | #define AT91_ADC_LDATA (0x3ff) |
53 | 59 | ||
@@ -58,4 +64,10 @@ | |||
58 | #define AT91_ADC_CHR(n) (0x30 + ((n) * 4)) /* Channel Data Register N */ | 64 | #define AT91_ADC_CHR(n) (0x30 + ((n) * 4)) /* Channel Data Register N */ |
59 | #define AT91_ADC_DATA (0x3ff) | 65 | #define AT91_ADC_DATA (0x3ff) |
60 | 66 | ||
67 | #define AT91_ADC_CDR0_9X5 (0x50) /* Channel Data Register 0 for 9X5 */ | ||
68 | |||
69 | #define AT91_ADC_TRGR_9260 AT91_ADC_MR | ||
70 | #define AT91_ADC_TRGR_9G45 0x08 | ||
71 | #define AT91_ADC_TRGR_9X5 0xC0 | ||
72 | |||
61 | #endif | 73 | #endif |
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c index dff4ddc5ef81..139e42da25f0 100644 --- a/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/arch/arm/mach-davinci/board-dm355-leopard.c | |||
@@ -75,6 +75,7 @@ static struct davinci_nand_pdata davinci_nand_data = { | |||
75 | .parts = davinci_nand_partitions, | 75 | .parts = davinci_nand_partitions, |
76 | .nr_parts = ARRAY_SIZE(davinci_nand_partitions), | 76 | .nr_parts = ARRAY_SIZE(davinci_nand_partitions), |
77 | .ecc_mode = NAND_ECC_HW_SYNDROME, | 77 | .ecc_mode = NAND_ECC_HW_SYNDROME, |
78 | .ecc_bits = 4, | ||
78 | .bbt_options = NAND_BBT_USE_FLASH, | 79 | .bbt_options = NAND_BBT_USE_FLASH, |
79 | }; | 80 | }; |
80 | 81 | ||
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c index a33686a6fbb2..fa4bfaf952d8 100644 --- a/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/arch/arm/mach-davinci/board-dm644x-evm.c | |||
@@ -153,6 +153,7 @@ static struct davinci_nand_pdata davinci_evm_nandflash_data = { | |||
153 | .parts = davinci_evm_nandflash_partition, | 153 | .parts = davinci_evm_nandflash_partition, |
154 | .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), | 154 | .nr_parts = ARRAY_SIZE(davinci_evm_nandflash_partition), |
155 | .ecc_mode = NAND_ECC_HW, | 155 | .ecc_mode = NAND_ECC_HW, |
156 | .ecc_bits = 1, | ||
156 | .bbt_options = NAND_BBT_USE_FLASH, | 157 | .bbt_options = NAND_BBT_USE_FLASH, |
157 | .timing = &davinci_evm_nandflash_timing, | 158 | .timing = &davinci_evm_nandflash_timing, |
158 | }; | 159 | }; |
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c index fbb8e5ab1dc1..0c005e876cac 100644 --- a/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/arch/arm/mach-davinci/board-dm646x-evm.c | |||
@@ -90,6 +90,7 @@ static struct davinci_nand_pdata davinci_nand_data = { | |||
90 | .parts = davinci_nand_partitions, | 90 | .parts = davinci_nand_partitions, |
91 | .nr_parts = ARRAY_SIZE(davinci_nand_partitions), | 91 | .nr_parts = ARRAY_SIZE(davinci_nand_partitions), |
92 | .ecc_mode = NAND_ECC_HW, | 92 | .ecc_mode = NAND_ECC_HW, |
93 | .ecc_bits = 1, | ||
93 | .options = 0, | 94 | .options = 0, |
94 | }; | 95 | }; |
95 | 96 | ||
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c index 2bc112adf565..808233b60e3d 100644 --- a/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/arch/arm/mach-davinci/board-neuros-osd2.c | |||
@@ -88,6 +88,7 @@ static struct davinci_nand_pdata davinci_ntosd2_nandflash_data = { | |||
88 | .parts = davinci_ntosd2_nandflash_partition, | 88 | .parts = davinci_ntosd2_nandflash_partition, |
89 | .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), | 89 | .nr_parts = ARRAY_SIZE(davinci_ntosd2_nandflash_partition), |
90 | .ecc_mode = NAND_ECC_HW, | 90 | .ecc_mode = NAND_ECC_HW, |
91 | .ecc_bits = 1, | ||
91 | .bbt_options = NAND_BBT_USE_FLASH, | 92 | .bbt_options = NAND_BBT_USE_FLASH, |
92 | }; | 93 | }; |
93 | 94 | ||
diff --git a/arch/arm/mach-davinci/cpuidle.c b/arch/arm/mach-davinci/cpuidle.c index 36aef3a7dedb..f1ac1c94ac0f 100644 --- a/arch/arm/mach-davinci/cpuidle.c +++ b/arch/arm/mach-davinci/cpuidle.c | |||
@@ -65,7 +65,7 @@ static struct cpuidle_driver davinci_idle_driver = { | |||
65 | .states[1] = { | 65 | .states[1] = { |
66 | .enter = davinci_enter_idle, | 66 | .enter = davinci_enter_idle, |
67 | .exit_latency = 10, | 67 | .exit_latency = 10, |
68 | .target_residency = 100000, | 68 | .target_residency = 10000, |
69 | .flags = CPUIDLE_FLAG_TIME_VALID, | 69 | .flags = CPUIDLE_FLAG_TIME_VALID, |
70 | .name = "DDR SR", | 70 | .name = "DDR SR", |
71 | .desc = "WFI and DDR Self Refresh", | 71 | .desc = "WFI and DDR Self Refresh", |
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c index 7be13f8e69a0..a02f275a198d 100644 --- a/arch/arm/mach-imx/mach-imx6q.c +++ b/arch/arm/mach-imx/mach-imx6q.c | |||
@@ -254,13 +254,12 @@ static void __init imx6q_opp_init(struct device *cpu_dev) | |||
254 | { | 254 | { |
255 | struct device_node *np; | 255 | struct device_node *np; |
256 | 256 | ||
257 | np = of_find_node_by_path("/cpus/cpu@0"); | 257 | np = of_node_get(cpu_dev->of_node); |
258 | if (!np) { | 258 | if (!np) { |
259 | pr_warn("failed to find cpu0 node\n"); | 259 | pr_warn("failed to find cpu0 node\n"); |
260 | return; | 260 | return; |
261 | } | 261 | } |
262 | 262 | ||
263 | cpu_dev->of_node = np; | ||
264 | if (of_init_opp_table(cpu_dev)) { | 263 | if (of_init_opp_table(cpu_dev)) { |
265 | pr_warn("failed to init OPP table\n"); | 264 | pr_warn("failed to init OPP table\n"); |
266 | goto put_node; | 265 | goto put_node; |
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index 614e41e7881b..905efc8cac79 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig | |||
@@ -121,8 +121,7 @@ config MSM_SMD | |||
121 | bool | 121 | bool |
122 | 122 | ||
123 | config MSM_GPIOMUX | 123 | config MSM_GPIOMUX |
124 | depends on !(ARCH_MSM8X60 || ARCH_MSM8960) | 124 | bool |
125 | bool "MSM V1 TLMM GPIOMUX architecture" | ||
126 | help | 125 | help |
127 | Support for MSM V1 TLMM GPIOMUX architecture. | 126 | Support for MSM V1 TLMM GPIOMUX architecture. |
128 | 127 | ||
diff --git a/arch/arm/mach-msm/devices-msm7x00.c b/arch/arm/mach-msm/devices-msm7x00.c index 6d50fb964863..d83404d4b328 100644 --- a/arch/arm/mach-msm/devices-msm7x00.c +++ b/arch/arm/mach-msm/devices-msm7x00.c | |||
@@ -456,9 +456,9 @@ static struct clk_pcom_desc msm_clocks_7x01a[] = { | |||
456 | CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0), | 456 | CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0), |
457 | CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), | 457 | CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), |
458 | CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), | 458 | CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), |
459 | CLK_PCOM("uart_clk", UART1_CLK, "msm_serial.0", OFF), | 459 | CLK_PCOM("core", UART1_CLK, "msm_serial.0", OFF), |
460 | CLK_PCOM("uart_clk", UART2_CLK, "msm_serial.1", 0), | 460 | CLK_PCOM("core", UART2_CLK, "msm_serial.1", 0), |
461 | CLK_PCOM("uart_clk", UART3_CLK, "msm_serial.2", OFF), | 461 | CLK_PCOM("core", UART3_CLK, "msm_serial.2", OFF), |
462 | CLK_PCOM("uart1dm_clk", UART1DM_CLK, NULL, OFF), | 462 | CLK_PCOM("uart1dm_clk", UART1DM_CLK, NULL, OFF), |
463 | CLK_PCOM("uart2dm_clk", UART2DM_CLK, NULL, 0), | 463 | CLK_PCOM("uart2dm_clk", UART2DM_CLK, NULL, 0), |
464 | CLK_PCOM("usb_hs_clk", USB_HS_CLK, "msm_hsusb", OFF), | 464 | CLK_PCOM("usb_hs_clk", USB_HS_CLK, "msm_hsusb", OFF), |
diff --git a/arch/arm/mach-msm/devices-msm7x30.c b/arch/arm/mach-msm/devices-msm7x30.c index d4db75acff56..14e286948f69 100644 --- a/arch/arm/mach-msm/devices-msm7x30.c +++ b/arch/arm/mach-msm/devices-msm7x30.c | |||
@@ -211,7 +211,7 @@ static struct clk_pcom_desc msm_clocks_7x30[] = { | |||
211 | CLK_PCOM("spi_pclk", SPI_P_CLK, NULL, 0), | 211 | CLK_PCOM("spi_pclk", SPI_P_CLK, NULL, 0), |
212 | CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), | 212 | CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), |
213 | CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), | 213 | CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), |
214 | CLK_PCOM("uart_clk", UART2_CLK, "msm_serial.1", 0), | 214 | CLK_PCOM("core", UART2_CLK, "msm_serial.1", 0), |
215 | CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0), | 215 | CLK_PCOM("usb_phy_clk", USB_PHY_CLK, NULL, 0), |
216 | CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF), | 216 | CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF), |
217 | CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF), | 217 | CLK_PCOM("usb_hs_pclk", USB_HS_P_CLK, NULL, OFF), |
diff --git a/arch/arm/mach-msm/devices-qsd8x50.c b/arch/arm/mach-msm/devices-qsd8x50.c index f5518112284b..2ed89b25d304 100644 --- a/arch/arm/mach-msm/devices-qsd8x50.c +++ b/arch/arm/mach-msm/devices-qsd8x50.c | |||
@@ -358,9 +358,9 @@ static struct clk_pcom_desc msm_clocks_8x50[] = { | |||
358 | CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0), | 358 | CLK_PCOM("tsif_ref_clk", TSIF_REF_CLK, NULL, 0), |
359 | CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), | 359 | CLK_PCOM("tv_dac_clk", TV_DAC_CLK, NULL, 0), |
360 | CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), | 360 | CLK_PCOM("tv_enc_clk", TV_ENC_CLK, NULL, 0), |
361 | CLK_PCOM("uart_clk", UART1_CLK, NULL, OFF), | 361 | CLK_PCOM("core", UART1_CLK, NULL, OFF), |
362 | CLK_PCOM("uart_clk", UART2_CLK, NULL, 0), | 362 | CLK_PCOM("core", UART2_CLK, NULL, 0), |
363 | CLK_PCOM("uart_clk", UART3_CLK, "msm_serial.2", OFF), | 363 | CLK_PCOM("core", UART3_CLK, "msm_serial.2", OFF), |
364 | CLK_PCOM("uartdm_clk", UART1DM_CLK, NULL, OFF), | 364 | CLK_PCOM("uartdm_clk", UART1DM_CLK, NULL, OFF), |
365 | CLK_PCOM("uartdm_clk", UART2DM_CLK, NULL, 0), | 365 | CLK_PCOM("uartdm_clk", UART2DM_CLK, NULL, 0), |
366 | CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF), | 366 | CLK_PCOM("usb_hs_clk", USB_HS_CLK, NULL, OFF), |
diff --git a/arch/arm/mach-msm/gpiomux-v1.c b/arch/arm/mach-msm/gpiomux-v1.c deleted file mode 100644 index 27de2abd7144..000000000000 --- a/arch/arm/mach-msm/gpiomux-v1.c +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | /* Copyright (c) 2010, Code Aurora Forum. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
15 | * 02110-1301, USA. | ||
16 | */ | ||
17 | #include <linux/kernel.h> | ||
18 | #include "gpiomux.h" | ||
19 | #include "proc_comm.h" | ||
20 | |||
21 | void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val) | ||
22 | { | ||
23 | unsigned tlmm_config = (val & ~GPIOMUX_CTL_MASK) | | ||
24 | ((gpio & 0x3ff) << 4); | ||
25 | unsigned tlmm_disable = 0; | ||
26 | int rc; | ||
27 | |||
28 | rc = msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, | ||
29 | &tlmm_config, &tlmm_disable); | ||
30 | if (rc) | ||
31 | pr_err("%s: unexpected proc_comm failure %d: %08x %08x\n", | ||
32 | __func__, rc, tlmm_config, tlmm_disable); | ||
33 | } | ||
diff --git a/arch/arm/mach-msm/gpiomux.h b/arch/arm/mach-msm/gpiomux.h index 8e82f41a8923..4410d7766f93 100644 --- a/arch/arm/mach-msm/gpiomux.h +++ b/arch/arm/mach-msm/gpiomux.h | |||
@@ -73,16 +73,6 @@ extern struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS]; | |||
73 | int msm_gpiomux_write(unsigned gpio, | 73 | int msm_gpiomux_write(unsigned gpio, |
74 | gpiomux_config_t active, | 74 | gpiomux_config_t active, |
75 | gpiomux_config_t suspended); | 75 | gpiomux_config_t suspended); |
76 | |||
77 | /* Architecture-internal function for use by the framework only. | ||
78 | * This function can assume the following: | ||
79 | * - the gpio value has passed a bounds-check | ||
80 | * - the gpiomux spinlock has been obtained | ||
81 | * | ||
82 | * This function is not for public consumption. External users | ||
83 | * should use msm_gpiomux_write. | ||
84 | */ | ||
85 | void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val); | ||
86 | #else | 76 | #else |
87 | static inline int msm_gpiomux_write(unsigned gpio, | 77 | static inline int msm_gpiomux_write(unsigned gpio, |
88 | gpiomux_config_t active, | 78 | gpiomux_config_t active, |
diff --git a/arch/arm/mach-mvebu/platsmp.c b/arch/arm/mach-mvebu/platsmp.c index ce81d3031405..594b63db4215 100644 --- a/arch/arm/mach-mvebu/platsmp.c +++ b/arch/arm/mach-mvebu/platsmp.c | |||
@@ -29,45 +29,40 @@ | |||
29 | #include "pmsu.h" | 29 | #include "pmsu.h" |
30 | #include "coherency.h" | 30 | #include "coherency.h" |
31 | 31 | ||
32 | static struct clk *__init get_cpu_clk(int cpu) | ||
33 | { | ||
34 | struct clk *cpu_clk; | ||
35 | struct device_node *np = of_get_cpu_node(cpu, NULL); | ||
36 | |||
37 | if (WARN(!np, "missing cpu node\n")) | ||
38 | return NULL; | ||
39 | cpu_clk = of_clk_get(np, 0); | ||
40 | if (WARN_ON(IS_ERR(cpu_clk))) | ||
41 | return NULL; | ||
42 | return cpu_clk; | ||
43 | } | ||
44 | |||
32 | void __init set_secondary_cpus_clock(void) | 45 | void __init set_secondary_cpus_clock(void) |
33 | { | 46 | { |
34 | int thiscpu; | 47 | int thiscpu, cpu; |
35 | unsigned long rate; | 48 | unsigned long rate; |
36 | struct clk *cpu_clk = NULL; | 49 | struct clk *cpu_clk; |
37 | struct device_node *np = NULL; | ||
38 | 50 | ||
39 | thiscpu = smp_processor_id(); | 51 | thiscpu = smp_processor_id(); |
40 | for_each_node_by_type(np, "cpu") { | 52 | cpu_clk = get_cpu_clk(thiscpu); |
41 | int err; | 53 | if (!cpu_clk) |
42 | int cpu; | ||
43 | |||
44 | err = of_property_read_u32(np, "reg", &cpu); | ||
45 | if (WARN_ON(err)) | ||
46 | return; | ||
47 | |||
48 | if (cpu == thiscpu) { | ||
49 | cpu_clk = of_clk_get(np, 0); | ||
50 | break; | ||
51 | } | ||
52 | } | ||
53 | if (WARN_ON(IS_ERR(cpu_clk))) | ||
54 | return; | 54 | return; |
55 | clk_prepare_enable(cpu_clk); | 55 | clk_prepare_enable(cpu_clk); |
56 | rate = clk_get_rate(cpu_clk); | 56 | rate = clk_get_rate(cpu_clk); |
57 | 57 | ||
58 | /* set all the other CPU clk to the same rate than the boot CPU */ | 58 | /* set all the other CPU clk to the same rate than the boot CPU */ |
59 | for_each_node_by_type(np, "cpu") { | 59 | for_each_possible_cpu(cpu) { |
60 | int err; | 60 | if (cpu == thiscpu) |
61 | int cpu; | 61 | continue; |
62 | 62 | cpu_clk = get_cpu_clk(cpu); | |
63 | err = of_property_read_u32(np, "reg", &cpu); | 63 | if (!cpu_clk) |
64 | if (WARN_ON(err)) | ||
65 | return; | 64 | return; |
66 | 65 | clk_set_rate(cpu_clk, rate); | |
67 | if (cpu != thiscpu) { | ||
68 | cpu_clk = of_clk_get(np, 0); | ||
69 | clk_set_rate(cpu_clk, rate); | ||
70 | } | ||
71 | } | 66 | } |
72 | } | 67 | } |
73 | 68 | ||
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index f6eeb87e4e95..827d15009a86 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c | |||
@@ -122,11 +122,7 @@ static struct musb_hdrc_config musb_config = { | |||
122 | }; | 122 | }; |
123 | 123 | ||
124 | static struct musb_hdrc_platform_data tusb_data = { | 124 | static struct musb_hdrc_platform_data tusb_data = { |
125 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
126 | .mode = MUSB_OTG, | 125 | .mode = MUSB_OTG, |
127 | #else | ||
128 | .mode = MUSB_HOST, | ||
129 | #endif | ||
130 | .set_power = tusb_set_power, | 126 | .set_power = tusb_set_power, |
131 | .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ | 127 | .min_power = 25, /* x2 = 50 mA drawn from VBUS as peripheral */ |
132 | .power = 100, /* Max 100 mA VBUS for host mode */ | 128 | .power = 100, /* Max 100 mA VBUS for host mode */ |
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 04c116555412..1c6ae5f5bae7 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <linux/mtd/nand.h> | 33 | #include <linux/mtd/nand.h> |
34 | #include <linux/mmc/host.h> | 34 | #include <linux/mmc/host.h> |
35 | #include <linux/usb/phy.h> | 35 | #include <linux/usb/phy.h> |
36 | #include <linux/usb/nop-usb-xceiv.h> | 36 | #include <linux/usb/usb_phy_gen_xceiv.h> |
37 | 37 | ||
38 | #include <linux/regulator/machine.h> | 38 | #include <linux/regulator/machine.h> |
39 | #include <linux/i2c/twl.h> | 39 | #include <linux/i2c/twl.h> |
@@ -279,7 +279,7 @@ static struct regulator_consumer_supply beagle_vsim_supply[] = { | |||
279 | static struct gpio_led gpio_leds[]; | 279 | static struct gpio_led gpio_leds[]; |
280 | 280 | ||
281 | /* PHY's VCC regulator might be added later, so flag that we need it */ | 281 | /* PHY's VCC regulator might be added later, so flag that we need it */ |
282 | static struct nop_usb_xceiv_platform_data hsusb2_phy_data = { | 282 | static struct usb_phy_gen_xceiv_platform_data hsusb2_phy_data = { |
283 | .needs_vcc = true, | 283 | .needs_vcc = true, |
284 | }; | 284 | }; |
285 | 285 | ||
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index 8c026269baca..52bdddd41e0e 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include <linux/i2c/twl.h> | 33 | #include <linux/i2c/twl.h> |
34 | #include <linux/usb/otg.h> | 34 | #include <linux/usb/otg.h> |
35 | #include <linux/usb/musb.h> | 35 | #include <linux/usb/musb.h> |
36 | #include <linux/usb/nop-usb-xceiv.h> | 36 | #include <linux/usb/usb_phy_gen_xceiv.h> |
37 | #include <linux/smsc911x.h> | 37 | #include <linux/smsc911x.h> |
38 | 38 | ||
39 | #include <linux/wl12xx.h> | 39 | #include <linux/wl12xx.h> |
@@ -468,7 +468,7 @@ struct wl12xx_platform_data omap3evm_wlan_data __initdata = { | |||
468 | static struct regulator_consumer_supply omap3evm_vaux2_supplies[] = { | 468 | static struct regulator_consumer_supply omap3evm_vaux2_supplies[] = { |
469 | REGULATOR_SUPPLY("VDD_CSIPHY1", "omap3isp"), /* OMAP ISP */ | 469 | REGULATOR_SUPPLY("VDD_CSIPHY1", "omap3isp"), /* OMAP ISP */ |
470 | REGULATOR_SUPPLY("VDD_CSIPHY2", "omap3isp"), /* OMAP ISP */ | 470 | REGULATOR_SUPPLY("VDD_CSIPHY2", "omap3isp"), /* OMAP ISP */ |
471 | REGULATOR_SUPPLY("vcc", "nop_usb_xceiv.2"), /* hsusb port 2 */ | 471 | REGULATOR_SUPPLY("vcc", "usb_phy_gen_xceiv.2"), /* hsusb port 2 */ |
472 | REGULATOR_SUPPLY("vaux2", NULL), | 472 | REGULATOR_SUPPLY("vaux2", NULL), |
473 | }; | 473 | }; |
474 | 474 | ||
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c index b1547a0edfcd..d2b455e70486 100644 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ b/arch/arm/mach-omap2/board-omap3pandora.c | |||
@@ -352,7 +352,7 @@ static struct regulator_consumer_supply pandora_vcc_lcd_supply[] = { | |||
352 | }; | 352 | }; |
353 | 353 | ||
354 | static struct regulator_consumer_supply pandora_usb_phy_supply[] = { | 354 | static struct regulator_consumer_supply pandora_usb_phy_supply[] = { |
355 | REGULATOR_SUPPLY("vcc", "nop_usb_xceiv.2"), /* hsusb port 2 */ | 355 | REGULATOR_SUPPLY("vcc", "usb_phy_gen_xceiv.2"), /* hsusb port 2 */ |
356 | }; | 356 | }; |
357 | 357 | ||
358 | /* ads7846 on SPI and 2 nub controllers on I2C */ | 358 | /* ads7846 on SPI and 2 nub controllers on I2C */ |
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c index d2ea68ea678a..7735105561d8 100644 --- a/arch/arm/mach-omap2/board-rx51.c +++ b/arch/arm/mach-omap2/board-rx51.c | |||
@@ -85,7 +85,7 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
85 | 85 | ||
86 | static struct omap_musb_board_data musb_board_data = { | 86 | static struct omap_musb_board_data musb_board_data = { |
87 | .interface_type = MUSB_INTERFACE_ULPI, | 87 | .interface_type = MUSB_INTERFACE_ULPI, |
88 | .mode = MUSB_PERIPHERAL, | 88 | .mode = MUSB_OTG, |
89 | .power = 0, | 89 | .power = 0, |
90 | }; | 90 | }; |
91 | 91 | ||
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c index 393aeefaebb0..043e5705f2a6 100644 --- a/arch/arm/mach-omap2/dss-common.c +++ b/arch/arm/mach-omap2/dss-common.c | |||
@@ -42,7 +42,7 @@ | |||
42 | 42 | ||
43 | /* Using generic display panel */ | 43 | /* Using generic display panel */ |
44 | static struct tfp410_platform_data omap4_dvi_panel = { | 44 | static struct tfp410_platform_data omap4_dvi_panel = { |
45 | .i2c_bus_num = 3, | 45 | .i2c_bus_num = 2, |
46 | .power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO, | 46 | .power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO, |
47 | }; | 47 | }; |
48 | 48 | ||
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 5cc92874be7e..f99f68e1e85b 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c | |||
@@ -129,6 +129,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev) | |||
129 | struct device_node *node = pdev->dev.of_node; | 129 | struct device_node *node = pdev->dev.of_node; |
130 | const char *oh_name; | 130 | const char *oh_name; |
131 | int oh_cnt, i, ret = 0; | 131 | int oh_cnt, i, ret = 0; |
132 | bool device_active = false; | ||
132 | 133 | ||
133 | oh_cnt = of_property_count_strings(node, "ti,hwmods"); | 134 | oh_cnt = of_property_count_strings(node, "ti,hwmods"); |
134 | if (oh_cnt <= 0) { | 135 | if (oh_cnt <= 0) { |
@@ -152,6 +153,8 @@ static int omap_device_build_from_dt(struct platform_device *pdev) | |||
152 | goto odbfd_exit1; | 153 | goto odbfd_exit1; |
153 | } | 154 | } |
154 | hwmods[i] = oh; | 155 | hwmods[i] = oh; |
156 | if (oh->flags & HWMOD_INIT_NO_IDLE) | ||
157 | device_active = true; | ||
155 | } | 158 | } |
156 | 159 | ||
157 | od = omap_device_alloc(pdev, hwmods, oh_cnt); | 160 | od = omap_device_alloc(pdev, hwmods, oh_cnt); |
@@ -172,6 +175,11 @@ static int omap_device_build_from_dt(struct platform_device *pdev) | |||
172 | 175 | ||
173 | pdev->dev.pm_domain = &omap_device_pm_domain; | 176 | pdev->dev.pm_domain = &omap_device_pm_domain; |
174 | 177 | ||
178 | if (device_active) { | ||
179 | omap_device_enable(pdev); | ||
180 | pm_runtime_set_active(&pdev->dev); | ||
181 | } | ||
182 | |||
175 | odbfd_exit1: | 183 | odbfd_exit1: |
176 | kfree(hwmods); | 184 | kfree(hwmods); |
177 | odbfd_exit: | 185 | odbfd_exit: |
@@ -842,6 +850,7 @@ static int __init omap_device_late_idle(struct device *dev, void *data) | |||
842 | { | 850 | { |
843 | struct platform_device *pdev = to_platform_device(dev); | 851 | struct platform_device *pdev = to_platform_device(dev); |
844 | struct omap_device *od = to_omap_device(pdev); | 852 | struct omap_device *od = to_omap_device(pdev); |
853 | int i; | ||
845 | 854 | ||
846 | if (!od) | 855 | if (!od) |
847 | return 0; | 856 | return 0; |
@@ -850,6 +859,15 @@ static int __init omap_device_late_idle(struct device *dev, void *data) | |||
850 | * If omap_device state is enabled, but has no driver bound, | 859 | * If omap_device state is enabled, but has no driver bound, |
851 | * idle it. | 860 | * idle it. |
852 | */ | 861 | */ |
862 | |||
863 | /* | ||
864 | * Some devices (like memory controllers) are always kept | ||
865 | * enabled, and should not be idled even with no drivers. | ||
866 | */ | ||
867 | for (i = 0; i < od->hwmods_cnt; i++) | ||
868 | if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE) | ||
869 | return 0; | ||
870 | |||
853 | if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { | 871 | if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { |
854 | if (od->_state == OMAP_DEVICE_STATE_ENABLED) { | 872 | if (od->_state == OMAP_DEVICE_STATE_ENABLED) { |
855 | dev_warn(dev, "%s: enabled but no driver. Idling\n", | 873 | dev_warn(dev, "%s: enabled but no driver. Idling\n", |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 7341eff63f56..7f4db12b1459 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -2386,7 +2386,7 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data) | |||
2386 | 2386 | ||
2387 | np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); | 2387 | np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); |
2388 | if (np) | 2388 | if (np) |
2389 | va_start = of_iomap(np, 0); | 2389 | va_start = of_iomap(np, oh->mpu_rt_idx); |
2390 | } else { | 2390 | } else { |
2391 | va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); | 2391 | va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); |
2392 | } | 2392 | } |
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index aab33fd814c0..e1482a9b3bc2 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h | |||
@@ -95,6 +95,54 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3; | |||
95 | #define MODULEMODE_HWCTRL 1 | 95 | #define MODULEMODE_HWCTRL 1 |
96 | #define MODULEMODE_SWCTRL 2 | 96 | #define MODULEMODE_SWCTRL 2 |
97 | 97 | ||
98 | #define DEBUG_OMAP2UART1_FLAGS 0 | ||
99 | #define DEBUG_OMAP2UART2_FLAGS 0 | ||
100 | #define DEBUG_OMAP2UART3_FLAGS 0 | ||
101 | #define DEBUG_OMAP3UART3_FLAGS 0 | ||
102 | #define DEBUG_OMAP3UART4_FLAGS 0 | ||
103 | #define DEBUG_OMAP4UART3_FLAGS 0 | ||
104 | #define DEBUG_OMAP4UART4_FLAGS 0 | ||
105 | #define DEBUG_TI81XXUART1_FLAGS 0 | ||
106 | #define DEBUG_TI81XXUART2_FLAGS 0 | ||
107 | #define DEBUG_TI81XXUART3_FLAGS 0 | ||
108 | #define DEBUG_AM33XXUART1_FLAGS 0 | ||
109 | |||
110 | #define DEBUG_OMAPUART_FLAGS (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET) | ||
111 | |||
112 | #if defined(CONFIG_DEBUG_OMAP2UART1) | ||
113 | #undef DEBUG_OMAP2UART1_FLAGS | ||
114 | #define DEBUG_OMAP2UART1_FLAGS DEBUG_OMAPUART_FLAGS | ||
115 | #elif defined(CONFIG_DEBUG_OMAP2UART2) | ||
116 | #undef DEBUG_OMAP2UART2_FLAGS | ||
117 | #define DEBUG_OMAP2UART2_FLAGS DEBUG_OMAPUART_FLAGS | ||
118 | #elif defined(CONFIG_DEBUG_OMAP2UART3) | ||
119 | #undef DEBUG_OMAP2UART3_FLAGS | ||
120 | #define DEBUG_OMAP2UART3_FLAGS DEBUG_OMAPUART_FLAGS | ||
121 | #elif defined(CONFIG_DEBUG_OMAP3UART3) | ||
122 | #undef DEBUG_OMAP3UART3_FLAGS | ||
123 | #define DEBUG_OMAP3UART3_FLAGS DEBUG_OMAPUART_FLAGS | ||
124 | #elif defined(CONFIG_DEBUG_OMAP3UART4) | ||
125 | #undef DEBUG_OMAP3UART4_FLAGS | ||
126 | #define DEBUG_OMAP3UART4_FLAGS DEBUG_OMAPUART_FLAGS | ||
127 | #elif defined(CONFIG_DEBUG_OMAP4UART3) | ||
128 | #undef DEBUG_OMAP4UART3_FLAGS | ||
129 | #define DEBUG_OMAP4UART3_FLAGS DEBUG_OMAPUART_FLAGS | ||
130 | #elif defined(CONFIG_DEBUG_OMAP4UART4) | ||
131 | #undef DEBUG_OMAP4UART4_FLAGS | ||
132 | #define DEBUG_OMAP4UART4_FLAGS DEBUG_OMAPUART_FLAGS | ||
133 | #elif defined(CONFIG_DEBUG_TI81XXUART1) | ||
134 | #undef DEBUG_TI81XXUART1_FLAGS | ||
135 | #define DEBUG_TI81XXUART1_FLAGS DEBUG_OMAPUART_FLAGS | ||
136 | #elif defined(CONFIG_DEBUG_TI81XXUART2) | ||
137 | #undef DEBUG_TI81XXUART2_FLAGS | ||
138 | #define DEBUG_TI81XXUART2_FLAGS DEBUG_OMAPUART_FLAGS | ||
139 | #elif defined(CONFIG_DEBUG_TI81XXUART3) | ||
140 | #undef DEBUG_TI81XXUART3_FLAGS | ||
141 | #define DEBUG_TI81XXUART3_FLAGS DEBUG_OMAPUART_FLAGS | ||
142 | #elif defined(CONFIG_DEBUG_AM33XXUART1) | ||
143 | #undef DEBUG_AM33XXUART1_FLAGS | ||
144 | #define DEBUG_AM33XXUART1_FLAGS DEBUG_OMAPUART_FLAGS | ||
145 | #endif | ||
98 | 146 | ||
99 | /** | 147 | /** |
100 | * struct omap_hwmod_mux_info - hwmod specific mux configuration | 148 | * struct omap_hwmod_mux_info - hwmod specific mux configuration |
@@ -568,6 +616,7 @@ struct omap_hwmod_link { | |||
568 | * @voltdm: pointer to voltage domain (filled in at runtime) | 616 | * @voltdm: pointer to voltage domain (filled in at runtime) |
569 | * @dev_attr: arbitrary device attributes that can be passed to the driver | 617 | * @dev_attr: arbitrary device attributes that can be passed to the driver |
570 | * @_sysc_cache: internal-use hwmod flags | 618 | * @_sysc_cache: internal-use hwmod flags |
619 | * @mpu_rt_idx: index of device address space for register target (for DT boot) | ||
571 | * @_mpu_rt_va: cached register target start address (internal use) | 620 | * @_mpu_rt_va: cached register target start address (internal use) |
572 | * @_mpu_port: cached MPU register target slave (internal use) | 621 | * @_mpu_port: cached MPU register target slave (internal use) |
573 | * @opt_clks_cnt: number of @opt_clks | 622 | * @opt_clks_cnt: number of @opt_clks |
@@ -617,6 +666,7 @@ struct omap_hwmod { | |||
617 | struct list_head node; | 666 | struct list_head node; |
618 | struct omap_hwmod_ocp_if *_mpu_port; | 667 | struct omap_hwmod_ocp_if *_mpu_port; |
619 | u16 flags; | 668 | u16 flags; |
669 | u8 mpu_rt_idx; | ||
620 | u8 response_lat; | 670 | u8 response_lat; |
621 | u8 rst_lines_cnt; | 671 | u8 rst_lines_cnt; |
622 | u8 opt_clks_cnt; | 672 | u8 opt_clks_cnt; |
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index d05fc7b54567..56cebb05509e 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c | |||
@@ -512,7 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = { | |||
512 | .mpu_irqs = omap2_uart1_mpu_irqs, | 512 | .mpu_irqs = omap2_uart1_mpu_irqs, |
513 | .sdma_reqs = omap2_uart1_sdma_reqs, | 513 | .sdma_reqs = omap2_uart1_sdma_reqs, |
514 | .main_clk = "uart1_fck", | 514 | .main_clk = "uart1_fck", |
515 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 515 | .flags = DEBUG_OMAP2UART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
516 | .prcm = { | 516 | .prcm = { |
517 | .omap2 = { | 517 | .omap2 = { |
518 | .module_offs = CORE_MOD, | 518 | .module_offs = CORE_MOD, |
@@ -532,7 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = { | |||
532 | .mpu_irqs = omap2_uart2_mpu_irqs, | 532 | .mpu_irqs = omap2_uart2_mpu_irqs, |
533 | .sdma_reqs = omap2_uart2_sdma_reqs, | 533 | .sdma_reqs = omap2_uart2_sdma_reqs, |
534 | .main_clk = "uart2_fck", | 534 | .main_clk = "uart2_fck", |
535 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 535 | .flags = DEBUG_OMAP2UART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
536 | .prcm = { | 536 | .prcm = { |
537 | .omap2 = { | 537 | .omap2 = { |
538 | .module_offs = CORE_MOD, | 538 | .module_offs = CORE_MOD, |
@@ -552,7 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = { | |||
552 | .mpu_irqs = omap2_uart3_mpu_irqs, | 552 | .mpu_irqs = omap2_uart3_mpu_irqs, |
553 | .sdma_reqs = omap2_uart3_sdma_reqs, | 553 | .sdma_reqs = omap2_uart3_sdma_reqs, |
554 | .main_clk = "uart3_fck", | 554 | .main_clk = "uart3_fck", |
555 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 555 | .flags = DEBUG_OMAP2UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
556 | .prcm = { | 556 | .prcm = { |
557 | .omap2 = { | 557 | .omap2 = { |
558 | .module_offs = CORE_MOD, | 558 | .module_offs = CORE_MOD, |
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c index 28bbd56346a9..eb2f3b93b51c 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c | |||
@@ -562,6 +562,7 @@ static struct omap_hwmod am33xx_cpgmac0_hwmod = { | |||
562 | .clkdm_name = "cpsw_125mhz_clkdm", | 562 | .clkdm_name = "cpsw_125mhz_clkdm", |
563 | .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY), | 563 | .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY), |
564 | .main_clk = "cpsw_125mhz_gclk", | 564 | .main_clk = "cpsw_125mhz_gclk", |
565 | .mpu_rt_idx = 1, | ||
565 | .prcm = { | 566 | .prcm = { |
566 | .omap4 = { | 567 | .omap4 = { |
567 | .clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET, | 568 | .clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET, |
@@ -1512,7 +1513,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = { | |||
1512 | .name = "uart1", | 1513 | .name = "uart1", |
1513 | .class = &uart_class, | 1514 | .class = &uart_class, |
1514 | .clkdm_name = "l4_wkup_clkdm", | 1515 | .clkdm_name = "l4_wkup_clkdm", |
1515 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 1516 | .flags = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
1516 | .main_clk = "dpll_per_m2_div4_wkupdm_ck", | 1517 | .main_clk = "dpll_per_m2_div4_wkupdm_ck", |
1517 | .prcm = { | 1518 | .prcm = { |
1518 | .omap4 = { | 1519 | .omap4 = { |
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index f7a3df2fb579..0c3a427da544 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
@@ -490,7 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = { | |||
490 | .mpu_irqs = omap2_uart1_mpu_irqs, | 490 | .mpu_irqs = omap2_uart1_mpu_irqs, |
491 | .sdma_reqs = omap2_uart1_sdma_reqs, | 491 | .sdma_reqs = omap2_uart1_sdma_reqs, |
492 | .main_clk = "uart1_fck", | 492 | .main_clk = "uart1_fck", |
493 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 493 | .flags = DEBUG_TI81XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
494 | .prcm = { | 494 | .prcm = { |
495 | .omap2 = { | 495 | .omap2 = { |
496 | .module_offs = CORE_MOD, | 496 | .module_offs = CORE_MOD, |
@@ -509,7 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = { | |||
509 | .mpu_irqs = omap2_uart2_mpu_irqs, | 509 | .mpu_irqs = omap2_uart2_mpu_irqs, |
510 | .sdma_reqs = omap2_uart2_sdma_reqs, | 510 | .sdma_reqs = omap2_uart2_sdma_reqs, |
511 | .main_clk = "uart2_fck", | 511 | .main_clk = "uart2_fck", |
512 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 512 | .flags = DEBUG_TI81XXUART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
513 | .prcm = { | 513 | .prcm = { |
514 | .omap2 = { | 514 | .omap2 = { |
515 | .module_offs = CORE_MOD, | 515 | .module_offs = CORE_MOD, |
@@ -528,7 +528,8 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = { | |||
528 | .mpu_irqs = omap2_uart3_mpu_irqs, | 528 | .mpu_irqs = omap2_uart3_mpu_irqs, |
529 | .sdma_reqs = omap2_uart3_sdma_reqs, | 529 | .sdma_reqs = omap2_uart3_sdma_reqs, |
530 | .main_clk = "uart3_fck", | 530 | .main_clk = "uart3_fck", |
531 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 531 | .flags = DEBUG_OMAP3UART3_FLAGS | DEBUG_TI81XXUART3_FLAGS | |
532 | HWMOD_SWSUP_SIDLE_ACT, | ||
532 | .prcm = { | 533 | .prcm = { |
533 | .omap2 = { | 534 | .omap2 = { |
534 | .module_offs = OMAP3430_PER_MOD, | 535 | .module_offs = OMAP3430_PER_MOD, |
@@ -558,7 +559,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = { | |||
558 | .mpu_irqs = uart4_mpu_irqs, | 559 | .mpu_irqs = uart4_mpu_irqs, |
559 | .sdma_reqs = uart4_sdma_reqs, | 560 | .sdma_reqs = uart4_sdma_reqs, |
560 | .main_clk = "uart4_fck", | 561 | .main_clk = "uart4_fck", |
561 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 562 | .flags = DEBUG_OMAP3UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
562 | .prcm = { | 563 | .prcm = { |
563 | .omap2 = { | 564 | .omap2 = { |
564 | .module_offs = OMAP3430_PER_MOD, | 565 | .module_offs = OMAP3430_PER_MOD, |
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index d04b5e60fdbe..9c3b504477d7 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c | |||
@@ -2858,8 +2858,7 @@ static struct omap_hwmod omap44xx_uart3_hwmod = { | |||
2858 | .name = "uart3", | 2858 | .name = "uart3", |
2859 | .class = &omap44xx_uart_hwmod_class, | 2859 | .class = &omap44xx_uart_hwmod_class, |
2860 | .clkdm_name = "l4_per_clkdm", | 2860 | .clkdm_name = "l4_per_clkdm", |
2861 | .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET | | 2861 | .flags = DEBUG_OMAP4UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
2862 | HWMOD_SWSUP_SIDLE_ACT, | ||
2863 | .main_clk = "func_48m_fclk", | 2862 | .main_clk = "func_48m_fclk", |
2864 | .prcm = { | 2863 | .prcm = { |
2865 | .omap4 = { | 2864 | .omap4 = { |
@@ -2875,7 +2874,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = { | |||
2875 | .name = "uart4", | 2874 | .name = "uart4", |
2876 | .class = &omap44xx_uart_hwmod_class, | 2875 | .class = &omap44xx_uart_hwmod_class, |
2877 | .clkdm_name = "l4_per_clkdm", | 2876 | .clkdm_name = "l4_per_clkdm", |
2878 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 2877 | .flags = DEBUG_OMAP4UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
2879 | .main_clk = "func_48m_fclk", | 2878 | .main_clk = "func_48m_fclk", |
2880 | .prcm = { | 2879 | .prcm = { |
2881 | .omap4 = { | 2880 | .omap4 = { |
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c index f37ae96b70a1..3c70f5c1860f 100644 --- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c | |||
@@ -1375,7 +1375,7 @@ static struct omap_hwmod omap54xx_uart3_hwmod = { | |||
1375 | .name = "uart3", | 1375 | .name = "uart3", |
1376 | .class = &omap54xx_uart_hwmod_class, | 1376 | .class = &omap54xx_uart_hwmod_class, |
1377 | .clkdm_name = "l4per_clkdm", | 1377 | .clkdm_name = "l4per_clkdm", |
1378 | .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, | 1378 | .flags = DEBUG_OMAP4UART3_FLAGS, |
1379 | .main_clk = "func_48m_fclk", | 1379 | .main_clk = "func_48m_fclk", |
1380 | .prcm = { | 1380 | .prcm = { |
1381 | .omap4 = { | 1381 | .omap4 = { |
@@ -1391,6 +1391,7 @@ static struct omap_hwmod omap54xx_uart4_hwmod = { | |||
1391 | .name = "uart4", | 1391 | .name = "uart4", |
1392 | .class = &omap54xx_uart_hwmod_class, | 1392 | .class = &omap54xx_uart_hwmod_class, |
1393 | .clkdm_name = "l4per_clkdm", | 1393 | .clkdm_name = "l4per_clkdm", |
1394 | .flags = DEBUG_OMAP4UART4_FLAGS, | ||
1394 | .main_clk = "func_48m_fclk", | 1395 | .main_clk = "func_48m_fclk", |
1395 | .prcm = { | 1396 | .prcm = { |
1396 | .omap4 = { | 1397 | .omap4 = { |
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index 3a674de6cb63..a388f8c1bcb3 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c | |||
@@ -208,17 +208,6 @@ static int __init omap_serial_early_init(void) | |||
208 | pr_info("%s used as console in debug mode: uart%d clocks will not be gated", | 208 | pr_info("%s used as console in debug mode: uart%d clocks will not be gated", |
209 | uart_name, uart->num); | 209 | uart_name, uart->num); |
210 | } | 210 | } |
211 | |||
212 | /* | ||
213 | * omap-uart can be used for earlyprintk logs | ||
214 | * So if omap-uart is used as console then prevent | ||
215 | * uart reset and idle to get logs from omap-uart | ||
216 | * until uart console driver is available to take | ||
217 | * care for console messages. | ||
218 | * Idling or resetting omap-uart while printing logs | ||
219 | * early boot logs can stall the boot-up. | ||
220 | */ | ||
221 | oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET; | ||
222 | } | 211 | } |
223 | } while (1); | 212 | } while (1); |
224 | 213 | ||
diff --git a/arch/arm/mach-omap2/usb-host.c b/arch/arm/mach-omap2/usb-host.c index 2eb19d4d0aa1..e83a6a4b184a 100644 --- a/arch/arm/mach-omap2/usb-host.c +++ b/arch/arm/mach-omap2/usb-host.c | |||
@@ -28,7 +28,7 @@ | |||
28 | #include <linux/io.h> | 28 | #include <linux/io.h> |
29 | #include <linux/gpio.h> | 29 | #include <linux/gpio.h> |
30 | #include <linux/usb/phy.h> | 30 | #include <linux/usb/phy.h> |
31 | #include <linux/usb/nop-usb-xceiv.h> | 31 | #include <linux/usb/usb_phy_gen_xceiv.h> |
32 | 32 | ||
33 | #include "soc.h" | 33 | #include "soc.h" |
34 | #include "omap_device.h" | 34 | #include "omap_device.h" |
@@ -349,7 +349,7 @@ static struct fixed_voltage_config hsusb_reg_config = { | |||
349 | /* .init_data filled later */ | 349 | /* .init_data filled later */ |
350 | }; | 350 | }; |
351 | 351 | ||
352 | static const char *nop_name = "nop_usb_xceiv"; /* NOP PHY driver */ | 352 | static const char *nop_name = "usb_phy_gen_xceiv"; /* NOP PHY driver */ |
353 | static const char *reg_name = "reg-fixed-voltage"; /* Regulator driver */ | 353 | static const char *reg_name = "reg-fixed-voltage"; /* Regulator driver */ |
354 | 354 | ||
355 | /** | 355 | /** |
@@ -460,9 +460,9 @@ int usbhs_init_phys(struct usbhs_phy_data *phy, int num_phys) | |||
460 | pdevinfo.name = nop_name; | 460 | pdevinfo.name = nop_name; |
461 | pdevinfo.id = phy->port; | 461 | pdevinfo.id = phy->port; |
462 | pdevinfo.data = phy->platform_data; | 462 | pdevinfo.data = phy->platform_data; |
463 | pdevinfo.size_data = sizeof(struct nop_usb_xceiv_platform_data); | 463 | pdevinfo.size_data = |
464 | 464 | sizeof(struct usb_phy_gen_xceiv_platform_data); | |
465 | scnprintf(phy_id, MAX_STR, "nop_usb_xceiv.%d", | 465 | scnprintf(phy_id, MAX_STR, "usb_phy_gen_xceiv.%d", |
466 | phy->port); | 466 | phy->port); |
467 | pdev = platform_device_register_full(&pdevinfo); | 467 | pdev = platform_device_register_full(&pdevinfo); |
468 | if (IS_ERR(pdev)) { | 468 | if (IS_ERR(pdev)) { |
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index 8c4de2708cf2..bc897231bd10 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c | |||
@@ -38,11 +38,8 @@ static struct musb_hdrc_config musb_config = { | |||
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct musb_hdrc_platform_data musb_plat = { | 40 | static struct musb_hdrc_platform_data musb_plat = { |
41 | #ifdef CONFIG_USB_GADGET_MUSB_HDRC | ||
42 | .mode = MUSB_OTG, | 41 | .mode = MUSB_OTG, |
43 | #else | 42 | |
44 | .mode = MUSB_HOST, | ||
45 | #endif | ||
46 | /* .clock is set dynamically */ | 43 | /* .clock is set dynamically */ |
47 | .config = &musb_config, | 44 | .config = &musb_config, |
48 | 45 | ||
diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c index 2c70f74fed5d..e110b6d4ae8c 100644 --- a/arch/arm/mach-prima2/common.c +++ b/arch/arm/mach-prima2/common.c | |||
@@ -42,7 +42,6 @@ static const char *atlas6_dt_match[] __initdata = { | |||
42 | 42 | ||
43 | DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") | 43 | DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") |
44 | /* Maintainer: Barry Song <baohua.song@csr.com> */ | 44 | /* Maintainer: Barry Song <baohua.song@csr.com> */ |
45 | .nr_irqs = 128, | ||
46 | .map_io = sirfsoc_map_io, | 45 | .map_io = sirfsoc_map_io, |
47 | .init_time = sirfsoc_init_time, | 46 | .init_time = sirfsoc_init_time, |
48 | .init_late = sirfsoc_init_late, | 47 | .init_late = sirfsoc_init_late, |
@@ -59,7 +58,6 @@ static const char *prima2_dt_match[] __initdata = { | |||
59 | 58 | ||
60 | DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") | 59 | DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") |
61 | /* Maintainer: Barry Song <baohua.song@csr.com> */ | 60 | /* Maintainer: Barry Song <baohua.song@csr.com> */ |
62 | .nr_irqs = 128, | ||
63 | .map_io = sirfsoc_map_io, | 61 | .map_io = sirfsoc_map_io, |
64 | .init_time = sirfsoc_init_time, | 62 | .init_time = sirfsoc_init_time, |
65 | .dma_zone_size = SZ_256M, | 63 | .dma_zone_size = SZ_256M, |
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index e115f6742107..c5be60d85e4b 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c | |||
@@ -1162,9 +1162,6 @@ static void __init eva_init(void) | |||
1162 | gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */ | 1162 | gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */ |
1163 | gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */ | 1163 | gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */ |
1164 | 1164 | ||
1165 | /* Touchscreen */ | ||
1166 | gpio_request_one(166, GPIOF_OUT_INIT_HIGH, NULL); /* TP_RST_B */ | ||
1167 | |||
1168 | /* GETHER */ | 1165 | /* GETHER */ |
1169 | gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */ | 1166 | gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */ |
1170 | 1167 | ||
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index d5554646916c..3354a85c90f7 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c | |||
@@ -167,7 +167,13 @@ static const struct pinctrl_map bockw_pinctrl_map[] = { | |||
167 | "usb1", "usb1"), | 167 | "usb1", "usb1"), |
168 | /* SDHI0 */ | 168 | /* SDHI0 */ |
169 | PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", | 169 | PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", |
170 | "sdhi0", "sdhi0"), | 170 | "sdhi0_data4", "sdhi0"), |
171 | PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", | ||
172 | "sdhi0_ctrl", "sdhi0"), | ||
173 | PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", | ||
174 | "sdhi0_cd", "sdhi0"), | ||
175 | PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", | ||
176 | "sdhi0_wp", "sdhi0"), | ||
171 | }; | 177 | }; |
172 | 178 | ||
173 | #define FPGA 0x18200000 | 179 | #define FPGA 0x18200000 |
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index d73e21d3ea8a..8d6bd5c5efb9 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c | |||
@@ -59,7 +59,7 @@ static __initdata struct gpio_led_platform_data lager_leds_pdata = { | |||
59 | #define GPIO_KEY(c, g, d, ...) \ | 59 | #define GPIO_KEY(c, g, d, ...) \ |
60 | { .code = c, .gpio = g, .desc = d, .active_low = 1 } | 60 | { .code = c, .gpio = g, .desc = d, .active_low = 1 } |
61 | 61 | ||
62 | static __initdata struct gpio_keys_button gpio_buttons[] = { | 62 | static struct gpio_keys_button gpio_buttons[] = { |
63 | GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"), | 63 | GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"), |
64 | GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"), | 64 | GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"), |
65 | GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"), | 65 | GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"), |
diff --git a/arch/arm/mach-sti/headsmp.S b/arch/arm/mach-sti/headsmp.S index 78ebc7559f53..4c09bae86edf 100644 --- a/arch/arm/mach-sti/headsmp.S +++ b/arch/arm/mach-sti/headsmp.S | |||
@@ -16,8 +16,6 @@ | |||
16 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | 18 | ||
19 | __INIT | ||
20 | |||
21 | /* | 19 | /* |
22 | * ST specific entry point for secondary CPUs. This provides | 20 | * ST specific entry point for secondary CPUs. This provides |
23 | * a "holding pen" into which all secondary cores are held until we're | 21 | * a "holding pen" into which all secondary cores are held until we're |
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c index 0d1e4128d460..fc97cfd52769 100644 --- a/arch/arm/mach-tegra/tegra.c +++ b/arch/arm/mach-tegra/tegra.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/of_fdt.h> | 29 | #include <linux/of_fdt.h> |
30 | #include <linux/of_platform.h> | 30 | #include <linux/of_platform.h> |
31 | #include <linux/pda_power.h> | 31 | #include <linux/pda_power.h> |
32 | #include <linux/platform_data/tegra_usb.h> | ||
33 | #include <linux/io.h> | 32 | #include <linux/io.h> |
34 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
35 | #include <linux/sys_soc.h> | 34 | #include <linux/sys_soc.h> |
@@ -46,40 +45,6 @@ | |||
46 | #include "fuse.h" | 45 | #include "fuse.h" |
47 | #include "iomap.h" | 46 | #include "iomap.h" |
48 | 47 | ||
49 | static struct tegra_ehci_platform_data tegra_ehci1_pdata = { | ||
50 | .operating_mode = TEGRA_USB_OTG, | ||
51 | .power_down_on_bus_suspend = 1, | ||
52 | .vbus_gpio = -1, | ||
53 | }; | ||
54 | |||
55 | static struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = { | ||
56 | .reset_gpio = -1, | ||
57 | .clk = "cdev2", | ||
58 | }; | ||
59 | |||
60 | static struct tegra_ehci_platform_data tegra_ehci2_pdata = { | ||
61 | .phy_config = &tegra_ehci2_ulpi_phy_config, | ||
62 | .operating_mode = TEGRA_USB_HOST, | ||
63 | .power_down_on_bus_suspend = 1, | ||
64 | .vbus_gpio = -1, | ||
65 | }; | ||
66 | |||
67 | static struct tegra_ehci_platform_data tegra_ehci3_pdata = { | ||
68 | .operating_mode = TEGRA_USB_HOST, | ||
69 | .power_down_on_bus_suspend = 1, | ||
70 | .vbus_gpio = -1, | ||
71 | }; | ||
72 | |||
73 | static struct of_dev_auxdata tegra20_auxdata_lookup[] __initdata = { | ||
74 | OF_DEV_AUXDATA("nvidia,tegra20-ehci", 0xC5000000, "tegra-ehci.0", | ||
75 | &tegra_ehci1_pdata), | ||
76 | OF_DEV_AUXDATA("nvidia,tegra20-ehci", 0xC5004000, "tegra-ehci.1", | ||
77 | &tegra_ehci2_pdata), | ||
78 | OF_DEV_AUXDATA("nvidia,tegra20-ehci", 0xC5008000, "tegra-ehci.2", | ||
79 | &tegra_ehci3_pdata), | ||
80 | {} | ||
81 | }; | ||
82 | |||
83 | static void __init tegra_dt_init(void) | 48 | static void __init tegra_dt_init(void) |
84 | { | 49 | { |
85 | struct soc_device_attribute *soc_dev_attr; | 50 | struct soc_device_attribute *soc_dev_attr; |
@@ -112,8 +77,7 @@ static void __init tegra_dt_init(void) | |||
112 | * devices | 77 | * devices |
113 | */ | 78 | */ |
114 | out: | 79 | out: |
115 | of_platform_populate(NULL, of_default_bus_match_table, | 80 | of_platform_populate(NULL, of_default_bus_match_table, NULL, parent); |
116 | tegra20_auxdata_lookup, parent); | ||
117 | } | 81 | } |
118 | 82 | ||
119 | static void __init trimslice_init(void) | 83 | static void __init trimslice_init(void) |
diff --git a/arch/arm/mach-ux500/Makefile b/arch/arm/mach-ux500/Makefile index bf9b6be5b180..fe1f3e26b88b 100644 --- a/arch/arm/mach-ux500/Makefile +++ b/arch/arm/mach-ux500/Makefile | |||
@@ -4,7 +4,6 @@ | |||
4 | 4 | ||
5 | obj-y := cpu.o devices.o devices-common.o \ | 5 | obj-y := cpu.o devices.o devices-common.o \ |
6 | id.o usb.o timer.o pm.o | 6 | id.o usb.o timer.o pm.o |
7 | obj-$(CONFIG_CPU_IDLE) += cpuidle.o | ||
8 | obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o | 7 | obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o |
9 | obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o | 8 | obj-$(CONFIG_UX500_SOC_DB8500) += cpu-db8500.o devices-db8500.o |
10 | obj-$(CONFIG_MACH_MOP500) += board-mop500.o board-mop500-sdi.o \ | 9 | obj-$(CONFIG_MACH_MOP500) += board-mop500.o board-mop500-sdi.o \ |
diff --git a/arch/arm/mach-ux500/cpuidle.c b/arch/arm/mach-ux500/cpuidle.c deleted file mode 100644 index a45dd09daed9..000000000000 --- a/arch/arm/mach-ux500/cpuidle.c +++ /dev/null | |||
@@ -1,128 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2012 Linaro : Daniel Lezcano <daniel.lezcano@linaro.org> (IBM) | ||
3 | * | ||
4 | * Based on the work of Rickard Andersson <rickard.andersson@stericsson.com> | ||
5 | * and Jonas Aaberg <jonas.aberg@stericsson.com>. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/cpuidle.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/atomic.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/mfd/dbx500-prcmu.h> | ||
18 | #include <linux/platform_data/arm-ux500-pm.h> | ||
19 | |||
20 | #include <asm/cpuidle.h> | ||
21 | #include <asm/proc-fns.h> | ||
22 | |||
23 | #include "db8500-regs.h" | ||
24 | #include "id.h" | ||
25 | |||
26 | static atomic_t master = ATOMIC_INIT(0); | ||
27 | static DEFINE_SPINLOCK(master_lock); | ||
28 | |||
29 | static inline int ux500_enter_idle(struct cpuidle_device *dev, | ||
30 | struct cpuidle_driver *drv, int index) | ||
31 | { | ||
32 | int this_cpu = smp_processor_id(); | ||
33 | bool recouple = false; | ||
34 | |||
35 | if (atomic_inc_return(&master) == num_online_cpus()) { | ||
36 | |||
37 | /* With this lock, we prevent the other cpu to exit and enter | ||
38 | * this function again and become the master */ | ||
39 | if (!spin_trylock(&master_lock)) | ||
40 | goto wfi; | ||
41 | |||
42 | /* decouple the gic from the A9 cores */ | ||
43 | if (prcmu_gic_decouple()) { | ||
44 | spin_unlock(&master_lock); | ||
45 | goto out; | ||
46 | } | ||
47 | |||
48 | /* If an error occur, we will have to recouple the gic | ||
49 | * manually */ | ||
50 | recouple = true; | ||
51 | |||
52 | /* At this state, as the gic is decoupled, if the other | ||
53 | * cpu is in WFI, we have the guarantee it won't be wake | ||
54 | * up, so we can safely go to retention */ | ||
55 | if (!prcmu_is_cpu_in_wfi(this_cpu ? 0 : 1)) | ||
56 | goto out; | ||
57 | |||
58 | /* The prcmu will be in charge of watching the interrupts | ||
59 | * and wake up the cpus */ | ||
60 | if (prcmu_copy_gic_settings()) | ||
61 | goto out; | ||
62 | |||
63 | /* Check in the meantime an interrupt did | ||
64 | * not occur on the gic ... */ | ||
65 | if (prcmu_gic_pending_irq()) | ||
66 | goto out; | ||
67 | |||
68 | /* ... and the prcmu */ | ||
69 | if (prcmu_pending_irq()) | ||
70 | goto out; | ||
71 | |||
72 | /* Go to the retention state, the prcmu will wait for the | ||
73 | * cpu to go WFI and this is what happens after exiting this | ||
74 | * 'master' critical section */ | ||
75 | if (prcmu_set_power_state(PRCMU_AP_IDLE, true, true)) | ||
76 | goto out; | ||
77 | |||
78 | /* When we switch to retention, the prcmu is in charge | ||
79 | * of recoupling the gic automatically */ | ||
80 | recouple = false; | ||
81 | |||
82 | spin_unlock(&master_lock); | ||
83 | } | ||
84 | wfi: | ||
85 | cpu_do_idle(); | ||
86 | out: | ||
87 | atomic_dec(&master); | ||
88 | |||
89 | if (recouple) { | ||
90 | prcmu_gic_recouple(); | ||
91 | spin_unlock(&master_lock); | ||
92 | } | ||
93 | |||
94 | return index; | ||
95 | } | ||
96 | |||
97 | static struct cpuidle_driver ux500_idle_driver = { | ||
98 | .name = "ux500_idle", | ||
99 | .owner = THIS_MODULE, | ||
100 | .states = { | ||
101 | ARM_CPUIDLE_WFI_STATE, | ||
102 | { | ||
103 | .enter = ux500_enter_idle, | ||
104 | .exit_latency = 70, | ||
105 | .target_residency = 260, | ||
106 | .flags = CPUIDLE_FLAG_TIME_VALID | | ||
107 | CPUIDLE_FLAG_TIMER_STOP, | ||
108 | .name = "ApIdle", | ||
109 | .desc = "ARM Retention", | ||
110 | }, | ||
111 | }, | ||
112 | .safe_state_index = 0, | ||
113 | .state_count = 2, | ||
114 | }; | ||
115 | |||
116 | int __init ux500_idle_init(void) | ||
117 | { | ||
118 | if (!(cpu_is_u8500_family() || cpu_is_ux540_family())) | ||
119 | return -ENODEV; | ||
120 | |||
121 | /* Configure wake up reasons */ | ||
122 | prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) | | ||
123 | PRCMU_WAKEUP(ABB)); | ||
124 | |||
125 | return cpuidle_register(&ux500_idle_driver, NULL); | ||
126 | } | ||
127 | |||
128 | device_initcall(ux500_idle_init); | ||
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index db5c2cab8fda..cd2c88e7a8f7 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -809,15 +809,18 @@ config KUSER_HELPERS | |||
809 | the CPU type fitted to the system. This permits binaries to be | 809 | the CPU type fitted to the system. This permits binaries to be |
810 | run on ARMv4 through to ARMv7 without modification. | 810 | run on ARMv4 through to ARMv7 without modification. |
811 | 811 | ||
812 | See Documentation/arm/kernel_user_helpers.txt for details. | ||
813 | |||
812 | However, the fixed address nature of these helpers can be used | 814 | However, the fixed address nature of these helpers can be used |
813 | by ROP (return orientated programming) authors when creating | 815 | by ROP (return orientated programming) authors when creating |
814 | exploits. | 816 | exploits. |
815 | 817 | ||
816 | If all of the binaries and libraries which run on your platform | 818 | If all of the binaries and libraries which run on your platform |
817 | are built specifically for your platform, and make no use of | 819 | are built specifically for your platform, and make no use of |
818 | these helpers, then you can turn this option off. However, | 820 | these helpers, then you can turn this option off to hinder |
819 | when such an binary or library is run, it will receive a SIGILL | 821 | such exploits. However, in that case, if a binary or library |
820 | signal, which will terminate the program. | 822 | relying on those helpers is run, it will receive a SIGILL signal, |
823 | which will terminate the program. | ||
821 | 824 | ||
822 | Say N here only if you are absolutely certain that you do not | 825 | Say N here only if you are absolutely certain that you do not |
823 | need these helpers; otherwise, the safe option is to say Y. | 826 | need these helpers; otherwise, the safe option is to say Y. |
diff --git a/arch/arm/plat-samsung/init.c b/arch/arm/plat-samsung/init.c index 3e5c4619caa5..50a3ea0037db 100644 --- a/arch/arm/plat-samsung/init.c +++ b/arch/arm/plat-samsung/init.c | |||
@@ -55,12 +55,13 @@ void __init s3c_init_cpu(unsigned long idcode, | |||
55 | 55 | ||
56 | printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); | 56 | printk("CPU %s (id 0x%08lx)\n", cpu->name, idcode); |
57 | 57 | ||
58 | if (cpu->map_io == NULL || cpu->init == NULL) { | 58 | if (cpu->init == NULL) { |
59 | printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); | 59 | printk(KERN_ERR "CPU %s support not enabled\n", cpu->name); |
60 | panic("Unsupported Samsung CPU"); | 60 | panic("Unsupported Samsung CPU"); |
61 | } | 61 | } |
62 | 62 | ||
63 | cpu->map_io(); | 63 | if (cpu->map_io) |
64 | cpu->map_io(); | ||
64 | } | 65 | } |
65 | 66 | ||
66 | /* s3c24xx_init_clocks | 67 | /* s3c24xx_init_clocks |
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index c9770ba5c7df..8a6295c86209 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c | |||
@@ -170,6 +170,7 @@ static void __init xen_percpu_init(void *unused) | |||
170 | per_cpu(xen_vcpu, cpu) = vcpup; | 170 | per_cpu(xen_vcpu, cpu) = vcpup; |
171 | 171 | ||
172 | enable_percpu_irq(xen_events_irq, 0); | 172 | enable_percpu_irq(xen_events_irq, 0); |
173 | put_cpu(); | ||
173 | } | 174 | } |
174 | 175 | ||
175 | static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) | 176 | static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) |
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index 98abd476992d..c9f1d2816c2b 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h | |||
@@ -26,7 +26,13 @@ | |||
26 | 26 | ||
27 | #include <clocksource/arm_arch_timer.h> | 27 | #include <clocksource/arm_arch_timer.h> |
28 | 28 | ||
29 | static inline void arch_timer_reg_write(int access, int reg, u32 val) | 29 | /* |
30 | * These register accessors are marked inline so the compiler can | ||
31 | * nicely work out which register we want, and chuck away the rest of | ||
32 | * the code. | ||
33 | */ | ||
34 | static __always_inline | ||
35 | void arch_timer_reg_write_cp15(int access, enum arch_timer_reg reg, u32 val) | ||
30 | { | 36 | { |
31 | if (access == ARCH_TIMER_PHYS_ACCESS) { | 37 | if (access == ARCH_TIMER_PHYS_ACCESS) { |
32 | switch (reg) { | 38 | switch (reg) { |
@@ -36,8 +42,6 @@ static inline void arch_timer_reg_write(int access, int reg, u32 val) | |||
36 | case ARCH_TIMER_REG_TVAL: | 42 | case ARCH_TIMER_REG_TVAL: |
37 | asm volatile("msr cntp_tval_el0, %0" : : "r" (val)); | 43 | asm volatile("msr cntp_tval_el0, %0" : : "r" (val)); |
38 | break; | 44 | break; |
39 | default: | ||
40 | BUILD_BUG(); | ||
41 | } | 45 | } |
42 | } else if (access == ARCH_TIMER_VIRT_ACCESS) { | 46 | } else if (access == ARCH_TIMER_VIRT_ACCESS) { |
43 | switch (reg) { | 47 | switch (reg) { |
@@ -47,17 +51,14 @@ static inline void arch_timer_reg_write(int access, int reg, u32 val) | |||
47 | case ARCH_TIMER_REG_TVAL: | 51 | case ARCH_TIMER_REG_TVAL: |
48 | asm volatile("msr cntv_tval_el0, %0" : : "r" (val)); | 52 | asm volatile("msr cntv_tval_el0, %0" : : "r" (val)); |
49 | break; | 53 | break; |
50 | default: | ||
51 | BUILD_BUG(); | ||
52 | } | 54 | } |
53 | } else { | ||
54 | BUILD_BUG(); | ||
55 | } | 55 | } |
56 | 56 | ||
57 | isb(); | 57 | isb(); |
58 | } | 58 | } |
59 | 59 | ||
60 | static inline u32 arch_timer_reg_read(int access, int reg) | 60 | static __always_inline |
61 | u32 arch_timer_reg_read_cp15(int access, enum arch_timer_reg reg) | ||
61 | { | 62 | { |
62 | u32 val; | 63 | u32 val; |
63 | 64 | ||
@@ -69,8 +70,6 @@ static inline u32 arch_timer_reg_read(int access, int reg) | |||
69 | case ARCH_TIMER_REG_TVAL: | 70 | case ARCH_TIMER_REG_TVAL: |
70 | asm volatile("mrs %0, cntp_tval_el0" : "=r" (val)); | 71 | asm volatile("mrs %0, cntp_tval_el0" : "=r" (val)); |
71 | break; | 72 | break; |
72 | default: | ||
73 | BUILD_BUG(); | ||
74 | } | 73 | } |
75 | } else if (access == ARCH_TIMER_VIRT_ACCESS) { | 74 | } else if (access == ARCH_TIMER_VIRT_ACCESS) { |
76 | switch (reg) { | 75 | switch (reg) { |
@@ -80,11 +79,7 @@ static inline u32 arch_timer_reg_read(int access, int reg) | |||
80 | case ARCH_TIMER_REG_TVAL: | 79 | case ARCH_TIMER_REG_TVAL: |
81 | asm volatile("mrs %0, cntv_tval_el0" : "=r" (val)); | 80 | asm volatile("mrs %0, cntv_tval_el0" : "=r" (val)); |
82 | break; | 81 | break; |
83 | default: | ||
84 | BUILD_BUG(); | ||
85 | } | 82 | } |
86 | } else { | ||
87 | BUILD_BUG(); | ||
88 | } | 83 | } |
89 | 84 | ||
90 | return val; | 85 | return val; |
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index c92de4163eba..b25763bc0ec4 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h | |||
@@ -42,14 +42,15 @@ | |||
42 | #define TPIDR_EL1 18 /* Thread ID, Privileged */ | 42 | #define TPIDR_EL1 18 /* Thread ID, Privileged */ |
43 | #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ | 43 | #define AMAIR_EL1 19 /* Aux Memory Attribute Indirection Register */ |
44 | #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ | 44 | #define CNTKCTL_EL1 20 /* Timer Control Register (EL1) */ |
45 | #define PAR_EL1 21 /* Physical Address Register */ | ||
45 | /* 32bit specific registers. Keep them at the end of the range */ | 46 | /* 32bit specific registers. Keep them at the end of the range */ |
46 | #define DACR32_EL2 21 /* Domain Access Control Register */ | 47 | #define DACR32_EL2 22 /* Domain Access Control Register */ |
47 | #define IFSR32_EL2 22 /* Instruction Fault Status Register */ | 48 | #define IFSR32_EL2 23 /* Instruction Fault Status Register */ |
48 | #define FPEXC32_EL2 23 /* Floating-Point Exception Control Register */ | 49 | #define FPEXC32_EL2 24 /* Floating-Point Exception Control Register */ |
49 | #define DBGVCR32_EL2 24 /* Debug Vector Catch Register */ | 50 | #define DBGVCR32_EL2 25 /* Debug Vector Catch Register */ |
50 | #define TEECR32_EL1 25 /* ThumbEE Configuration Register */ | 51 | #define TEECR32_EL1 26 /* ThumbEE Configuration Register */ |
51 | #define TEEHBR32_EL1 26 /* ThumbEE Handler Base Register */ | 52 | #define TEEHBR32_EL1 27 /* ThumbEE Handler Base Register */ |
52 | #define NR_SYS_REGS 27 | 53 | #define NR_SYS_REGS 28 |
53 | 54 | ||
54 | /* 32bit mapping */ | 55 | /* 32bit mapping */ |
55 | #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ | 56 | #define c0_MPIDR (MPIDR_EL1 * 2) /* MultiProcessor ID Register */ |
@@ -69,6 +70,8 @@ | |||
69 | #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ | 70 | #define c5_AIFSR (AFSR1_EL1 * 2) /* Auxiliary Instr Fault Status R */ |
70 | #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ | 71 | #define c6_DFAR (FAR_EL1 * 2) /* Data Fault Address Register */ |
71 | #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ | 72 | #define c6_IFAR (c6_DFAR + 1) /* Instruction Fault Address Register */ |
73 | #define c7_PAR (PAR_EL1 * 2) /* Physical Address Register */ | ||
74 | #define c7_PAR_high (c7_PAR + 1) /* PAR top 32 bits */ | ||
72 | #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ | 75 | #define c10_PRRR (MAIR_EL1 * 2) /* Primary Region Remap Register */ |
73 | #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ | 76 | #define c10_NMRR (c10_PRRR + 1) /* Normal Memory Remap Register */ |
74 | #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ | 77 | #define c12_VBAR (VBAR_EL1 * 2) /* Vector Base Address Register */ |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 644d73956864..0859a4ddd1e7 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -129,7 +129,7 @@ struct kvm_vcpu_arch { | |||
129 | struct kvm_mmu_memory_cache mmu_page_cache; | 129 | struct kvm_mmu_memory_cache mmu_page_cache; |
130 | 130 | ||
131 | /* Target CPU and feature flags */ | 131 | /* Target CPU and feature flags */ |
132 | u32 target; | 132 | int target; |
133 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); | 133 | DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); |
134 | 134 | ||
135 | /* Detect first run of a vcpu */ | 135 | /* Detect first run of a vcpu */ |
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 46b3beb4b773..717031a762c2 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -35,6 +35,7 @@ struct mmu_gather { | |||
35 | struct mm_struct *mm; | 35 | struct mm_struct *mm; |
36 | unsigned int fullmm; | 36 | unsigned int fullmm; |
37 | struct vm_area_struct *vma; | 37 | struct vm_area_struct *vma; |
38 | unsigned long start, end; | ||
38 | unsigned long range_start; | 39 | unsigned long range_start; |
39 | unsigned long range_end; | 40 | unsigned long range_end; |
40 | unsigned int nr; | 41 | unsigned int nr; |
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) | |||
97 | } | 98 | } |
98 | 99 | ||
99 | static inline void | 100 | static inline void |
100 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) | 101 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
101 | { | 102 | { |
102 | tlb->mm = mm; | 103 | tlb->mm = mm; |
103 | tlb->fullmm = fullmm; | 104 | tlb->fullmm = !(start | (end+1)); |
105 | tlb->start = start; | ||
106 | tlb->end = end; | ||
104 | tlb->vma = NULL; | 107 | tlb->vma = NULL; |
105 | tlb->max = ARRAY_SIZE(tlb->local); | 108 | tlb->max = ARRAY_SIZE(tlb->local); |
106 | tlb->pages = tlb->local; | 109 | tlb->pages = tlb->local; |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 9ba33c40cdf8..12e6ccb88691 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -107,7 +107,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) | |||
107 | static int | 107 | static int |
108 | armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) | 108 | armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
109 | { | 109 | { |
110 | int mapping = (*event_map)[config]; | 110 | int mapping; |
111 | |||
112 | if (config >= PERF_COUNT_HW_MAX) | ||
113 | return -EINVAL; | ||
114 | |||
115 | mapping = (*event_map)[config]; | ||
111 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; | 116 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
112 | } | 117 | } |
113 | 118 | ||
@@ -317,6 +322,9 @@ validate_event(struct pmu_hw_events *hw_events, | |||
317 | struct hw_perf_event fake_event = event->hw; | 322 | struct hw_perf_event fake_event = event->hw; |
318 | struct pmu *leader_pmu = event->group_leader->pmu; | 323 | struct pmu *leader_pmu = event->group_leader->pmu; |
319 | 324 | ||
325 | if (is_software_event(event)) | ||
326 | return 1; | ||
327 | |||
320 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) | 328 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) |
321 | return 1; | 329 | return 1; |
322 | 330 | ||
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index ff985e3d8b72..1ac0bbbdddb2 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
@@ -214,6 +214,7 @@ __kvm_hyp_code_start: | |||
214 | mrs x21, tpidr_el1 | 214 | mrs x21, tpidr_el1 |
215 | mrs x22, amair_el1 | 215 | mrs x22, amair_el1 |
216 | mrs x23, cntkctl_el1 | 216 | mrs x23, cntkctl_el1 |
217 | mrs x24, par_el1 | ||
217 | 218 | ||
218 | stp x4, x5, [x3] | 219 | stp x4, x5, [x3] |
219 | stp x6, x7, [x3, #16] | 220 | stp x6, x7, [x3, #16] |
@@ -225,6 +226,7 @@ __kvm_hyp_code_start: | |||
225 | stp x18, x19, [x3, #112] | 226 | stp x18, x19, [x3, #112] |
226 | stp x20, x21, [x3, #128] | 227 | stp x20, x21, [x3, #128] |
227 | stp x22, x23, [x3, #144] | 228 | stp x22, x23, [x3, #144] |
229 | str x24, [x3, #160] | ||
228 | .endm | 230 | .endm |
229 | 231 | ||
230 | .macro restore_sysregs | 232 | .macro restore_sysregs |
@@ -243,6 +245,7 @@ __kvm_hyp_code_start: | |||
243 | ldp x18, x19, [x3, #112] | 245 | ldp x18, x19, [x3, #112] |
244 | ldp x20, x21, [x3, #128] | 246 | ldp x20, x21, [x3, #128] |
245 | ldp x22, x23, [x3, #144] | 247 | ldp x22, x23, [x3, #144] |
248 | ldr x24, [x3, #160] | ||
246 | 249 | ||
247 | msr vmpidr_el2, x4 | 250 | msr vmpidr_el2, x4 |
248 | msr csselr_el1, x5 | 251 | msr csselr_el1, x5 |
@@ -264,6 +267,7 @@ __kvm_hyp_code_start: | |||
264 | msr tpidr_el1, x21 | 267 | msr tpidr_el1, x21 |
265 | msr amair_el1, x22 | 268 | msr amair_el1, x22 |
266 | msr cntkctl_el1, x23 | 269 | msr cntkctl_el1, x23 |
270 | msr par_el1, x24 | ||
267 | .endm | 271 | .endm |
268 | 272 | ||
269 | .macro skip_32bit_state tmp, target | 273 | .macro skip_32bit_state tmp, target |
@@ -600,6 +604,8 @@ END(__kvm_vcpu_run) | |||
600 | 604 | ||
601 | // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); | 605 | // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
602 | ENTRY(__kvm_tlb_flush_vmid_ipa) | 606 | ENTRY(__kvm_tlb_flush_vmid_ipa) |
607 | dsb ishst | ||
608 | |||
603 | kern_hyp_va x0 | 609 | kern_hyp_va x0 |
604 | ldr x2, [x0, #KVM_VTTBR] | 610 | ldr x2, [x0, #KVM_VTTBR] |
605 | msr vttbr_el2, x2 | 611 | msr vttbr_el2, x2 |
@@ -621,6 +627,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa) | |||
621 | ENDPROC(__kvm_tlb_flush_vmid_ipa) | 627 | ENDPROC(__kvm_tlb_flush_vmid_ipa) |
622 | 628 | ||
623 | ENTRY(__kvm_flush_vm_context) | 629 | ENTRY(__kvm_flush_vm_context) |
630 | dsb ishst | ||
624 | tlbi alle1is | 631 | tlbi alle1is |
625 | ic ialluis | 632 | ic ialluis |
626 | dsb sy | 633 | dsb sy |
@@ -753,6 +760,10 @@ el1_trap: | |||
753 | */ | 760 | */ |
754 | tbnz x1, #7, 1f // S1PTW is set | 761 | tbnz x1, #7, 1f // S1PTW is set |
755 | 762 | ||
763 | /* Preserve PAR_EL1 */ | ||
764 | mrs x3, par_el1 | ||
765 | push x3, xzr | ||
766 | |||
756 | /* | 767 | /* |
757 | * Permission fault, HPFAR_EL2 is invalid. | 768 | * Permission fault, HPFAR_EL2 is invalid. |
758 | * Resolve the IPA the hard way using the guest VA. | 769 | * Resolve the IPA the hard way using the guest VA. |
@@ -766,6 +777,8 @@ el1_trap: | |||
766 | 777 | ||
767 | /* Read result */ | 778 | /* Read result */ |
768 | mrs x3, par_el1 | 779 | mrs x3, par_el1 |
780 | pop x0, xzr // Restore PAR_EL1 from the stack | ||
781 | msr par_el1, x0 | ||
769 | tbnz x3, #0, 3f // Bail out if we failed the translation | 782 | tbnz x3, #0, 3f // Bail out if we failed the translation |
770 | ubfx x3, x3, #12, #36 // Extract IPA | 783 | ubfx x3, x3, #12, #36 // Extract IPA |
771 | lsl x3, x3, #4 // and present it like HPFAR | 784 | lsl x3, x3, #4 // and present it like HPFAR |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 94923609753b..02e9d09e1d80 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -211,6 +211,9 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
211 | /* FAR_EL1 */ | 211 | /* FAR_EL1 */ |
212 | { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), | 212 | { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), |
213 | NULL, reset_unknown, FAR_EL1 }, | 213 | NULL, reset_unknown, FAR_EL1 }, |
214 | /* PAR_EL1 */ | ||
215 | { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), | ||
216 | NULL, reset_unknown, PAR_EL1 }, | ||
214 | 217 | ||
215 | /* PMINTENSET_EL1 */ | 218 | /* PMINTENSET_EL1 */ |
216 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), | 219 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), |
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c index f91431963452..7de083d19b7e 100644 --- a/arch/avr32/boards/atngw100/mrmt.c +++ b/arch/avr32/boards/atngw100/mrmt.c | |||
@@ -150,7 +150,6 @@ static struct ac97c_platform_data __initdata ac97c0_data = { | |||
150 | static struct platform_device rmt_ts_device = { | 150 | static struct platform_device rmt_ts_device = { |
151 | .name = "ucb1400_ts", | 151 | .name = "ucb1400_ts", |
152 | .id = -1, | 152 | .id = -1, |
153 | } | ||
154 | }; | 153 | }; |
155 | #endif | 154 | #endif |
156 | 155 | ||
diff --git a/arch/frv/mb93090-mb00/pci-vdk.c b/arch/frv/mb93090-mb00/pci-vdk.c index 0aa35f0eb0db..deb67843693c 100644 --- a/arch/frv/mb93090-mb00/pci-vdk.c +++ b/arch/frv/mb93090-mb00/pci-vdk.c | |||
@@ -320,7 +320,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases); | |||
320 | * are examined. | 320 | * are examined. |
321 | */ | 321 | */ |
322 | 322 | ||
323 | void __init pcibios_fixup_bus(struct pci_bus *bus) | 323 | void pcibios_fixup_bus(struct pci_bus *bus) |
324 | { | 324 | { |
325 | #if 0 | 325 | #if 0 |
326 | printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); | 326 | printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number); |
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 33a97929d055..77d442ab28c8 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig | |||
@@ -158,6 +158,7 @@ source "kernel/Kconfig.hz" | |||
158 | endmenu | 158 | endmenu |
159 | 159 | ||
160 | source "init/Kconfig" | 160 | source "init/Kconfig" |
161 | source "kernel/Kconfig.freezer" | ||
161 | source "drivers/Kconfig" | 162 | source "drivers/Kconfig" |
162 | source "fs/Kconfig" | 163 | source "fs/Kconfig" |
163 | 164 | ||
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 5a768ad8e893..b36370d3eab9 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -565,9 +565,9 @@ config KEXEC | |||
565 | 565 | ||
566 | It is an ongoing process to be certain the hardware in a machine | 566 | It is an ongoing process to be certain the hardware in a machine |
567 | is properly shutdown, so do not be surprised if this code does not | 567 | is properly shutdown, so do not be surprised if this code does not |
568 | initially work for you. It may help to enable device hotplugging | 568 | initially work for you. As of this writing the exact hardware |
569 | support. As of this writing the exact hardware interface is | 569 | interface is strongly in flux, so no good recommendation can be |
570 | strongly in flux, so no good recommendation can be made. | 570 | made. |
571 | 571 | ||
572 | config CRASH_DUMP | 572 | config CRASH_DUMP |
573 | bool "kernel crash dumps" | 573 | bool "kernel crash dumps" |
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index ef3a9de01954..bc5efc7c3f3f 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h | |||
@@ -22,7 +22,7 @@ | |||
22 | * unmapping a portion of the virtual address space, these hooks are called according to | 22 | * unmapping a portion of the virtual address space, these hooks are called according to |
23 | * the following template: | 23 | * the following template: |
24 | * | 24 | * |
25 | * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM | 25 | * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM |
26 | * { | 26 | * { |
27 | * for each vma that needs a shootdown do { | 27 | * for each vma that needs a shootdown do { |
28 | * tlb_start_vma(tlb, vma); | 28 | * tlb_start_vma(tlb, vma); |
@@ -58,6 +58,7 @@ struct mmu_gather { | |||
58 | unsigned int max; | 58 | unsigned int max; |
59 | unsigned char fullmm; /* non-zero means full mm flush */ | 59 | unsigned char fullmm; /* non-zero means full mm flush */ |
60 | unsigned char need_flush; /* really unmapped some PTEs? */ | 60 | unsigned char need_flush; /* really unmapped some PTEs? */ |
61 | unsigned long start, end; | ||
61 | unsigned long start_addr; | 62 | unsigned long start_addr; |
62 | unsigned long end_addr; | 63 | unsigned long end_addr; |
63 | struct page **pages; | 64 | struct page **pages; |
@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) | |||
155 | 156 | ||
156 | 157 | ||
157 | static inline void | 158 | static inline void |
158 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 159 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
159 | { | 160 | { |
160 | tlb->mm = mm; | 161 | tlb->mm = mm; |
161 | tlb->max = ARRAY_SIZE(tlb->local); | 162 | tlb->max = ARRAY_SIZE(tlb->local); |
162 | tlb->pages = tlb->local; | 163 | tlb->pages = tlb->local; |
163 | tlb->nr = 0; | 164 | tlb->nr = 0; |
164 | tlb->fullmm = full_mm_flush; | 165 | tlb->fullmm = !(start | (end+1)); |
166 | tlb->start = start; | ||
167 | tlb->end = end; | ||
165 | tlb->start_addr = ~0UL; | 168 | tlb->start_addr = ~0UL; |
166 | } | 169 | } |
167 | 170 | ||
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c index 2291a7d69d49..121a6660ad4e 100644 --- a/arch/m68k/emu/natfeat.c +++ b/arch/m68k/emu/natfeat.c | |||
@@ -18,9 +18,11 @@ | |||
18 | #include <asm/machdep.h> | 18 | #include <asm/machdep.h> |
19 | #include <asm/natfeat.h> | 19 | #include <asm/natfeat.h> |
20 | 20 | ||
21 | extern long nf_get_id_phys(unsigned long feature_name); | ||
22 | |||
21 | asm("\n" | 23 | asm("\n" |
22 | " .global nf_get_id,nf_call\n" | 24 | " .global nf_get_id_phys,nf_call\n" |
23 | "nf_get_id:\n" | 25 | "nf_get_id_phys:\n" |
24 | " .short 0x7300\n" | 26 | " .short 0x7300\n" |
25 | " rts\n" | 27 | " rts\n" |
26 | "nf_call:\n" | 28 | "nf_call:\n" |
@@ -29,12 +31,25 @@ asm("\n" | |||
29 | "1: moveq.l #0,%d0\n" | 31 | "1: moveq.l #0,%d0\n" |
30 | " rts\n" | 32 | " rts\n" |
31 | " .section __ex_table,\"a\"\n" | 33 | " .section __ex_table,\"a\"\n" |
32 | " .long nf_get_id,1b\n" | 34 | " .long nf_get_id_phys,1b\n" |
33 | " .long nf_call,1b\n" | 35 | " .long nf_call,1b\n" |
34 | " .previous"); | 36 | " .previous"); |
35 | EXPORT_SYMBOL_GPL(nf_get_id); | ||
36 | EXPORT_SYMBOL_GPL(nf_call); | 37 | EXPORT_SYMBOL_GPL(nf_call); |
37 | 38 | ||
39 | long nf_get_id(const char *feature_name) | ||
40 | { | ||
41 | /* feature_name may be in vmalloc()ed memory, so make a copy */ | ||
42 | char name_copy[32]; | ||
43 | size_t n; | ||
44 | |||
45 | n = strlcpy(name_copy, feature_name, sizeof(name_copy)); | ||
46 | if (n >= sizeof(name_copy)) | ||
47 | return 0; | ||
48 | |||
49 | return nf_get_id_phys(virt_to_phys(name_copy)); | ||
50 | } | ||
51 | EXPORT_SYMBOL_GPL(nf_get_id); | ||
52 | |||
38 | void nfprint(const char *fmt, ...) | 53 | void nfprint(const char *fmt, ...) |
39 | { | 54 | { |
40 | static char buf[256]; | 55 | static char buf[256]; |
@@ -43,7 +58,7 @@ void nfprint(const char *fmt, ...) | |||
43 | 58 | ||
44 | va_start(ap, fmt); | 59 | va_start(ap, fmt); |
45 | n = vsnprintf(buf, 256, fmt, ap); | 60 | n = vsnprintf(buf, 256, fmt, ap); |
46 | nf_call(nf_get_id("NF_STDERR"), buf); | 61 | nf_call(nf_get_id("NF_STDERR"), virt_to_phys(buf)); |
47 | va_end(ap); | 62 | va_end(ap); |
48 | } | 63 | } |
49 | 64 | ||
@@ -68,7 +83,7 @@ void nf_init(void) | |||
68 | id = nf_get_id("NF_NAME"); | 83 | id = nf_get_id("NF_NAME"); |
69 | if (!id) | 84 | if (!id) |
70 | return; | 85 | return; |
71 | nf_call(id, buf, 256); | 86 | nf_call(id, virt_to_phys(buf), 256); |
72 | buf[255] = 0; | 87 | buf[255] = 0; |
73 | 88 | ||
74 | pr_info("NatFeats found (%s, %lu.%lu)\n", buf, version >> 16, | 89 | pr_info("NatFeats found (%s, %lu.%lu)\n", buf, version >> 16, |
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index e3011338ab40..0721858fbd1e 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c | |||
@@ -41,8 +41,8 @@ static inline s32 nfhd_read_write(u32 major, u32 minor, u32 rwflag, u32 recno, | |||
41 | static inline s32 nfhd_get_capacity(u32 major, u32 minor, u32 *blocks, | 41 | static inline s32 nfhd_get_capacity(u32 major, u32 minor, u32 *blocks, |
42 | u32 *blocksize) | 42 | u32 *blocksize) |
43 | { | 43 | { |
44 | return nf_call(nfhd_id + NFHD_GET_CAPACITY, major, minor, blocks, | 44 | return nf_call(nfhd_id + NFHD_GET_CAPACITY, major, minor, |
45 | blocksize); | 45 | virt_to_phys(blocks), virt_to_phys(blocksize)); |
46 | } | 46 | } |
47 | 47 | ||
48 | static LIST_HEAD(nfhd_list); | 48 | static LIST_HEAD(nfhd_list); |
diff --git a/arch/m68k/emu/nfcon.c b/arch/m68k/emu/nfcon.c index 6685bf45c2c3..57e8c8fb5eba 100644 --- a/arch/m68k/emu/nfcon.c +++ b/arch/m68k/emu/nfcon.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/uaccess.h> | 17 | #include <linux/uaccess.h> |
18 | #include <linux/io.h> | ||
18 | 19 | ||
19 | #include <asm/natfeat.h> | 20 | #include <asm/natfeat.h> |
20 | 21 | ||
@@ -25,17 +26,18 @@ static struct tty_driver *nfcon_tty_driver; | |||
25 | static void nfputs(const char *str, unsigned int count) | 26 | static void nfputs(const char *str, unsigned int count) |
26 | { | 27 | { |
27 | char buf[68]; | 28 | char buf[68]; |
29 | unsigned long phys = virt_to_phys(buf); | ||
28 | 30 | ||
29 | buf[64] = 0; | 31 | buf[64] = 0; |
30 | while (count > 64) { | 32 | while (count > 64) { |
31 | memcpy(buf, str, 64); | 33 | memcpy(buf, str, 64); |
32 | nf_call(stderr_id, buf); | 34 | nf_call(stderr_id, phys); |
33 | str += 64; | 35 | str += 64; |
34 | count -= 64; | 36 | count -= 64; |
35 | } | 37 | } |
36 | memcpy(buf, str, count); | 38 | memcpy(buf, str, count); |
37 | buf[count] = 0; | 39 | buf[count] = 0; |
38 | nf_call(stderr_id, buf); | 40 | nf_call(stderr_id, phys); |
39 | } | 41 | } |
40 | 42 | ||
41 | static void nfcon_write(struct console *con, const char *str, | 43 | static void nfcon_write(struct console *con, const char *str, |
@@ -79,7 +81,7 @@ static int nfcon_tty_put_char(struct tty_struct *tty, unsigned char ch) | |||
79 | { | 81 | { |
80 | char temp[2] = { ch, 0 }; | 82 | char temp[2] = { ch, 0 }; |
81 | 83 | ||
82 | nf_call(stderr_id, temp); | 84 | nf_call(stderr_id, virt_to_phys(temp)); |
83 | return 1; | 85 | return 1; |
84 | } | 86 | } |
85 | 87 | ||
diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c index 695cd737a42e..a0985fd088d1 100644 --- a/arch/m68k/emu/nfeth.c +++ b/arch/m68k/emu/nfeth.c | |||
@@ -195,7 +195,8 @@ static struct net_device * __init nfeth_probe(int unit) | |||
195 | char mac[ETH_ALEN], host_ip[32], local_ip[32]; | 195 | char mac[ETH_ALEN], host_ip[32], local_ip[32]; |
196 | int err; | 196 | int err; |
197 | 197 | ||
198 | if (!nf_call(nfEtherID + XIF_GET_MAC, unit, mac, ETH_ALEN)) | 198 | if (!nf_call(nfEtherID + XIF_GET_MAC, unit, virt_to_phys(mac), |
199 | ETH_ALEN)) | ||
199 | return NULL; | 200 | return NULL; |
200 | 201 | ||
201 | dev = alloc_etherdev(sizeof(struct nfeth_private)); | 202 | dev = alloc_etherdev(sizeof(struct nfeth_private)); |
@@ -217,9 +218,9 @@ static struct net_device * __init nfeth_probe(int unit) | |||
217 | } | 218 | } |
218 | 219 | ||
219 | nf_call(nfEtherID + XIF_GET_IPHOST, unit, | 220 | nf_call(nfEtherID + XIF_GET_IPHOST, unit, |
220 | host_ip, sizeof(host_ip)); | 221 | virt_to_phys(host_ip), sizeof(host_ip)); |
221 | nf_call(nfEtherID + XIF_GET_IPATARI, unit, | 222 | nf_call(nfEtherID + XIF_GET_IPATARI, unit, |
222 | local_ip, sizeof(local_ip)); | 223 | virt_to_phys(local_ip), sizeof(local_ip)); |
223 | 224 | ||
224 | netdev_info(dev, KBUILD_MODNAME " addr:%s (%s) HWaddr:%pM\n", host_ip, | 225 | netdev_info(dev, KBUILD_MODNAME " addr:%s (%s) HWaddr:%pM\n", host_ip, |
225 | local_ip, mac); | 226 | local_ip, mac); |
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h index 444ea8a09e9f..ef881cfbbca9 100644 --- a/arch/m68k/include/asm/div64.h +++ b/arch/m68k/include/asm/div64.h | |||
@@ -15,16 +15,17 @@ | |||
15 | unsigned long long n64; \ | 15 | unsigned long long n64; \ |
16 | } __n; \ | 16 | } __n; \ |
17 | unsigned long __rem, __upper; \ | 17 | unsigned long __rem, __upper; \ |
18 | unsigned long __base = (base); \ | ||
18 | \ | 19 | \ |
19 | __n.n64 = (n); \ | 20 | __n.n64 = (n); \ |
20 | if ((__upper = __n.n32[0])) { \ | 21 | if ((__upper = __n.n32[0])) { \ |
21 | asm ("divul.l %2,%1:%0" \ | 22 | asm ("divul.l %2,%1:%0" \ |
22 | : "=d" (__n.n32[0]), "=d" (__upper) \ | 23 | : "=d" (__n.n32[0]), "=d" (__upper) \ |
23 | : "d" (base), "0" (__n.n32[0])); \ | 24 | : "d" (__base), "0" (__n.n32[0])); \ |
24 | } \ | 25 | } \ |
25 | asm ("divu.l %2,%1:%0" \ | 26 | asm ("divu.l %2,%1:%0" \ |
26 | : "=d" (__n.n32[1]), "=d" (__rem) \ | 27 | : "=d" (__n.n32[1]), "=d" (__rem) \ |
27 | : "d" (base), "1" (__upper), "0" (__n.n32[1])); \ | 28 | : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \ |
28 | (n) = __n.n64; \ | 29 | (n) = __n.n64; \ |
29 | __rem; \ | 30 | __rem; \ |
30 | }) | 31 | }) |
diff --git a/arch/m68k/include/asm/irqflags.h b/arch/m68k/include/asm/irqflags.h index 7ef4115b8c4a..5053092b369f 100644 --- a/arch/m68k/include/asm/irqflags.h +++ b/arch/m68k/include/asm/irqflags.h | |||
@@ -67,6 +67,10 @@ static inline void arch_local_irq_restore(unsigned long flags) | |||
67 | 67 | ||
68 | static inline bool arch_irqs_disabled_flags(unsigned long flags) | 68 | static inline bool arch_irqs_disabled_flags(unsigned long flags) |
69 | { | 69 | { |
70 | if (MACH_IS_ATARI) { | ||
71 | /* Ignore HSYNC = ipl 2 on Atari */ | ||
72 | return (flags & ~(ALLOWINT | 0x200)) != 0; | ||
73 | } | ||
70 | return (flags & ~ALLOWINT) != 0; | 74 | return (flags & ~ALLOWINT) != 0; |
71 | } | 75 | } |
72 | 76 | ||
diff --git a/arch/m68k/platform/coldfire/pci.c b/arch/m68k/platform/coldfire/pci.c index b33f97a13e6d..df9679238b6d 100644 --- a/arch/m68k/platform/coldfire/pci.c +++ b/arch/m68k/platform/coldfire/pci.c | |||
@@ -319,7 +319,6 @@ static int __init mcf_pci_init(void) | |||
319 | pci_fixup_irqs(pci_common_swizzle, mcf_pci_map_irq); | 319 | pci_fixup_irqs(pci_common_swizzle, mcf_pci_map_irq); |
320 | pci_bus_size_bridges(rootbus); | 320 | pci_bus_size_bridges(rootbus); |
321 | pci_bus_assign_resources(rootbus); | 321 | pci_bus_assign_resources(rootbus); |
322 | pci_enable_bridges(rootbus); | ||
323 | return 0; | 322 | return 0; |
324 | } | 323 | } |
325 | 324 | ||
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index d22a4ecffff4..4fab52294d98 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -28,7 +28,7 @@ config MICROBLAZE | |||
28 | select GENERIC_CLOCKEVENTS | 28 | select GENERIC_CLOCKEVENTS |
29 | select GENERIC_IDLE_POLL_SETUP | 29 | select GENERIC_IDLE_POLL_SETUP |
30 | select MODULES_USE_ELF_RELA | 30 | select MODULES_USE_ELF_RELA |
31 | select CLONE_BACKWARDS | 31 | select CLONE_BACKWARDS3 |
32 | 32 | ||
33 | config SWAP | 33 | config SWAP |
34 | def_bool n | 34 | def_bool n |
diff --git a/arch/microblaze/include/asm/prom.h b/arch/microblaze/include/asm/prom.h index 20c5e8e5121b..9977816c5ad3 100644 --- a/arch/microblaze/include/asm/prom.h +++ b/arch/microblaze/include/asm/prom.h | |||
@@ -50,9 +50,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, | |||
50 | 50 | ||
51 | extern void kdump_move_device_tree(void); | 51 | extern void kdump_move_device_tree(void); |
52 | 52 | ||
53 | /* CPU OF node matching */ | ||
54 | struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); | ||
55 | |||
56 | #endif /* __ASSEMBLY__ */ | 53 | #endif /* __ASSEMBLY__ */ |
57 | #endif /* __KERNEL__ */ | 54 | #endif /* __KERNEL__ */ |
58 | 55 | ||
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index e12764c2a9d0..dccd7cec442d 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -2305,9 +2305,9 @@ config KEXEC | |||
2305 | 2305 | ||
2306 | It is an ongoing process to be certain the hardware in a machine | 2306 | It is an ongoing process to be certain the hardware in a machine |
2307 | is properly shutdown, so do not be surprised if this code does not | 2307 | is properly shutdown, so do not be surprised if this code does not |
2308 | initially work for you. It may help to enable device hotplugging | 2308 | initially work for you. As of this writing the exact hardware |
2309 | support. As of this writing the exact hardware interface is | 2309 | interface is strongly in flux, so no good recommendation can be |
2310 | strongly in flux, so no good recommendation can be made. | 2310 | made. |
2311 | 2311 | ||
2312 | config CRASH_DUMP | 2312 | config CRASH_DUMP |
2313 | bool "Kernel crash dumps" | 2313 | bool "Kernel crash dumps" |
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 1dc086087a72..fa44f3ec5302 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h | |||
@@ -17,6 +17,8 @@ | |||
17 | #define current_cpu_type() current_cpu_data.cputype | 17 | #define current_cpu_type() current_cpu_data.cputype |
18 | #endif | 18 | #endif |
19 | 19 | ||
20 | #define boot_cpu_type() cpu_data[0].cputype | ||
21 | |||
20 | /* | 22 | /* |
21 | * SMP assumption: Options of CPU 0 are a superset of all processors. | 23 | * SMP assumption: Options of CPU 0 are a superset of all processors. |
22 | * This is true for all known MIPS systems. | 24 | * This is true for all known MIPS systems. |
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 159abc8842d2..126da74d4c55 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c | |||
@@ -66,6 +66,8 @@ static void __init bmips_smp_setup(void) | |||
66 | int i, cpu = 1, boot_cpu = 0; | 66 | int i, cpu = 1, boot_cpu = 0; |
67 | 67 | ||
68 | #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) | 68 | #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) |
69 | int cpu_hw_intr; | ||
70 | |||
69 | /* arbitration priority */ | 71 | /* arbitration priority */ |
70 | clear_c0_brcm_cmt_ctrl(0x30); | 72 | clear_c0_brcm_cmt_ctrl(0x30); |
71 | 73 | ||
@@ -80,8 +82,12 @@ static void __init bmips_smp_setup(void) | |||
80 | * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output | 82 | * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output |
81 | * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output | 83 | * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output |
82 | */ | 84 | */ |
83 | change_c0_brcm_cmt_intr(0xf8018000, | 85 | if (boot_cpu == 0) |
84 | (0x02 << 27) | (0x03 << 15)); | 86 | cpu_hw_intr = 0x02; |
87 | else | ||
88 | cpu_hw_intr = 0x1d; | ||
89 | |||
90 | change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15)); | ||
85 | 91 | ||
86 | /* single core, 2 threads (2 pipelines) */ | 92 | /* single core, 2 threads (2 pipelines) */ |
87 | max_cpus = 2; | 93 | max_cpus = 2; |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 1765bab000a0..faf84c5f2629 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -1335,8 +1335,9 @@ static ssize_t store_kill(struct device *dev, struct device_attribute *attr, | |||
1335 | 1335 | ||
1336 | return len; | 1336 | return len; |
1337 | } | 1337 | } |
1338 | static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); | ||
1338 | 1339 | ||
1339 | static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr, | 1340 | static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, |
1340 | char *buf) | 1341 | char *buf) |
1341 | { | 1342 | { |
1342 | struct vpe *vpe = get_vpe(tclimit); | 1343 | struct vpe *vpe = get_vpe(tclimit); |
@@ -1344,7 +1345,7 @@ static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr, | |||
1344 | return sprintf(buf, "%d\n", vpe->ntcs); | 1345 | return sprintf(buf, "%d\n", vpe->ntcs); |
1345 | } | 1346 | } |
1346 | 1347 | ||
1347 | static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr, | 1348 | static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, |
1348 | const char *buf, size_t len) | 1349 | const char *buf, size_t len) |
1349 | { | 1350 | { |
1350 | struct vpe *vpe = get_vpe(tclimit); | 1351 | struct vpe *vpe = get_vpe(tclimit); |
@@ -1365,12 +1366,14 @@ static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr, | |||
1365 | out_einval: | 1366 | out_einval: |
1366 | return -EINVAL; | 1367 | return -EINVAL; |
1367 | } | 1368 | } |
1369 | static DEVICE_ATTR_RW(ntcs); | ||
1368 | 1370 | ||
1369 | static struct device_attribute vpe_class_attributes[] = { | 1371 | static struct attribute vpe_attrs[] = { |
1370 | __ATTR(kill, S_IWUSR, NULL, store_kill), | 1372 | &dev_attr_kill.attr, |
1371 | __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs), | 1373 | &dev_attr_ntcs.attr, |
1372 | {} | 1374 | NULL, |
1373 | }; | 1375 | }; |
1376 | ATTRIBUTE_GROUPS(vpe); | ||
1374 | 1377 | ||
1375 | static void vpe_device_release(struct device *cd) | 1378 | static void vpe_device_release(struct device *cd) |
1376 | { | 1379 | { |
@@ -1381,7 +1384,7 @@ struct class vpe_class = { | |||
1381 | .name = "vpe", | 1384 | .name = "vpe", |
1382 | .owner = THIS_MODULE, | 1385 | .owner = THIS_MODULE, |
1383 | .dev_release = vpe_device_release, | 1386 | .dev_release = vpe_device_release, |
1384 | .dev_attrs = vpe_class_attributes, | 1387 | .dev_groups = vpe_groups, |
1385 | }; | 1388 | }; |
1386 | 1389 | ||
1387 | struct device vpe_device; | 1390 | struct device vpe_device; |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index e773659ccf9f..46048d24328c 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
@@ -803,6 +803,32 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | |||
803 | dec_insn.next_pc_inc; | 803 | dec_insn.next_pc_inc; |
804 | return 1; | 804 | return 1; |
805 | break; | 805 | break; |
806 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
807 | case lwc2_op: /* This is bbit0 on Octeon */ | ||
808 | if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0) | ||
809 | *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); | ||
810 | else | ||
811 | *contpc = regs->cp0_epc + 8; | ||
812 | return 1; | ||
813 | case ldc2_op: /* This is bbit032 on Octeon */ | ||
814 | if ((regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) == 0) | ||
815 | *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); | ||
816 | else | ||
817 | *contpc = regs->cp0_epc + 8; | ||
818 | return 1; | ||
819 | case swc2_op: /* This is bbit1 on Octeon */ | ||
820 | if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) | ||
821 | *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); | ||
822 | else | ||
823 | *contpc = regs->cp0_epc + 8; | ||
824 | return 1; | ||
825 | case sdc2_op: /* This is bbit132 on Octeon */ | ||
826 | if (regs->regs[insn.i_format.rs] & (1ull<<(insn.i_format.rt + 32))) | ||
827 | *contpc = regs->cp0_epc + 4 + (insn.i_format.simmediate << 2); | ||
828 | else | ||
829 | *contpc = regs->cp0_epc + 8; | ||
830 | return 1; | ||
831 | #endif | ||
806 | case cop0_op: | 832 | case cop0_op: |
807 | case cop1_op: | 833 | case cop1_op: |
808 | case cop2_op: | 834 | case cop2_op: |
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c index e4b1140cdae0..3a2b6e9f25cf 100644 --- a/arch/mips/oprofile/op_model_mipsxx.c +++ b/arch/mips/oprofile/op_model_mipsxx.c | |||
@@ -166,7 +166,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr) | |||
166 | reg.control[i] |= M_PERFCTL_USER; | 166 | reg.control[i] |= M_PERFCTL_USER; |
167 | if (ctr[i].exl) | 167 | if (ctr[i].exl) |
168 | reg.control[i] |= M_PERFCTL_EXL; | 168 | reg.control[i] |= M_PERFCTL_EXL; |
169 | if (current_cpu_type() == CPU_XLR) | 169 | if (boot_cpu_type() == CPU_XLR) |
170 | reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS; | 170 | reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS; |
171 | reg.counter[i] = 0x80000000 - ctr[i].count; | 171 | reg.counter[i] = 0x80000000 - ctr[i].count; |
172 | } | 172 | } |
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c index 594e60d6a43b..33e7aa52d9c4 100644 --- a/arch/mips/pci/pci.c +++ b/arch/mips/pci/pci.c | |||
@@ -113,7 +113,6 @@ static void pcibios_scanbus(struct pci_controller *hose) | |||
113 | if (!pci_has_flag(PCI_PROBE_ONLY)) { | 113 | if (!pci_has_flag(PCI_PROBE_ONLY)) { |
114 | pci_bus_size_bridges(bus); | 114 | pci_bus_size_bridges(bus); |
115 | pci_bus_assign_resources(bus); | 115 | pci_bus_assign_resources(bus); |
116 | pci_enable_bridges(bus); | ||
117 | } | 116 | } |
118 | } | 117 | } |
119 | } | 118 | } |
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c index d22dc0d6f289..2b7e837dc2e2 100644 --- a/arch/mips/pnx833x/common/platform.c +++ b/arch/mips/pnx833x/common/platform.c | |||
@@ -206,11 +206,13 @@ static struct resource pnx833x_ethernet_resources[] = { | |||
206 | .end = PNX8335_IP3902_PORTS_END, | 206 | .end = PNX8335_IP3902_PORTS_END, |
207 | .flags = IORESOURCE_MEM, | 207 | .flags = IORESOURCE_MEM, |
208 | }, | 208 | }, |
209 | #ifdef CONFIG_SOC_PNX8335 | ||
209 | [1] = { | 210 | [1] = { |
210 | .start = PNX8335_PIC_ETHERNET_INT, | 211 | .start = PNX8335_PIC_ETHERNET_INT, |
211 | .end = PNX8335_PIC_ETHERNET_INT, | 212 | .end = PNX8335_PIC_ETHERNET_INT, |
212 | .flags = IORESOURCE_IRQ, | 213 | .flags = IORESOURCE_IRQ, |
213 | }, | 214 | }, |
215 | #endif | ||
214 | }; | 216 | }; |
215 | 217 | ||
216 | static struct platform_device pnx833x_ethernet_device = { | 218 | static struct platform_device pnx833x_ethernet_device = { |
diff --git a/arch/mips/sni/a20r.c b/arch/mips/sni/a20r.c index dd0ab982d77e..f9407e170476 100644 --- a/arch/mips/sni/a20r.c +++ b/arch/mips/sni/a20r.c | |||
@@ -122,7 +122,6 @@ static struct resource sc26xx_rsrc[] = { | |||
122 | 122 | ||
123 | static struct sccnxp_pdata sccnxp_data = { | 123 | static struct sccnxp_pdata sccnxp_data = { |
124 | .reg_shift = 2, | 124 | .reg_shift = 2, |
125 | .frequency = 3686400, | ||
126 | .mctrl_cfg[0] = MCTRL_SIG(DTR_OP, LINE_OP7) | | 125 | .mctrl_cfg[0] = MCTRL_SIG(DTR_OP, LINE_OP7) | |
127 | MCTRL_SIG(RTS_OP, LINE_OP3) | | 126 | MCTRL_SIG(RTS_OP, LINE_OP3) | |
128 | MCTRL_SIG(DSR_IP, LINE_IP5) | | 127 | MCTRL_SIG(DSR_IP, LINE_IP5) | |
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 99dbab1c59ac..d60bf98fa5cf 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig | |||
@@ -55,6 +55,7 @@ config GENERIC_CSUM | |||
55 | 55 | ||
56 | source "init/Kconfig" | 56 | source "init/Kconfig" |
57 | 57 | ||
58 | source "kernel/Kconfig.freezer" | ||
58 | 59 | ||
59 | menu "Processor type and features" | 60 | menu "Processor type and features" |
60 | 61 | ||
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h index bbb34e5343a2..eb59bfe23e85 100644 --- a/arch/openrisc/include/asm/prom.h +++ b/arch/openrisc/include/asm/prom.h | |||
@@ -44,9 +44,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, | |||
44 | 44 | ||
45 | extern void kdump_move_device_tree(void); | 45 | extern void kdump_move_device_tree(void); |
46 | 46 | ||
47 | /* CPU OF node matching */ | ||
48 | struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); | ||
49 | |||
50 | /* Get the MAC address */ | 47 | /* Get the MAC address */ |
51 | extern const void *of_get_mac_address(struct device_node *np); | 48 | extern const void *of_get_mac_address(struct device_node *np); |
52 | 49 | ||
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 3bf72cd2c8fc..5aecda05e0da 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -369,9 +369,9 @@ config KEXEC | |||
369 | 369 | ||
370 | It is an ongoing process to be certain the hardware in a machine | 370 | It is an ongoing process to be certain the hardware in a machine |
371 | is properly shutdown, so do not be surprised if this code does not | 371 | is properly shutdown, so do not be surprised if this code does not |
372 | initially work for you. It may help to enable device hotplugging | 372 | initially work for you. As of this writing the exact hardware |
373 | support. As of this writing the exact hardware interface is | 373 | interface is strongly in flux, so no good recommendation can be |
374 | strongly in flux, so no good recommendation can be made. | 374 | made. |
375 | 375 | ||
376 | config CRASH_DUMP | 376 | config CRASH_DUMP |
377 | bool "Build a kdump crash kernel" | 377 | bool "Build a kdump crash kernel" |
@@ -566,7 +566,7 @@ config SCHED_SMT | |||
566 | config PPC_DENORMALISATION | 566 | config PPC_DENORMALISATION |
567 | bool "PowerPC denormalisation exception handling" | 567 | bool "PowerPC denormalisation exception handling" |
568 | depends on PPC_BOOK3S_64 | 568 | depends on PPC_BOOK3S_64 |
569 | default "n" | 569 | default "y" if PPC_POWERNV |
570 | ---help--- | 570 | ---help--- |
571 | Add support for handling denormalisation of single precision | 571 | Add support for handling denormalisation of single precision |
572 | values. Useful for bare metal only. If unsure say Y here. | 572 | values. Useful for bare metal only. If unsure say Y here. |
@@ -979,6 +979,7 @@ config RELOCATABLE | |||
979 | must live at a different physical address than the primary | 979 | must live at a different physical address than the primary |
980 | kernel. | 980 | kernel. |
981 | 981 | ||
982 | # This value must have zeroes in the bottom 60 bits otherwise lots will break | ||
982 | config PAGE_OFFSET | 983 | config PAGE_OFFSET |
983 | hex | 984 | hex |
984 | default "0xc000000000000000" | 985 | default "0xc000000000000000" |
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 988c812aab5b..b9f426212d3a 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h | |||
@@ -211,9 +211,19 @@ extern long long virt_phys_offset; | |||
211 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) | 211 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET)) |
212 | #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) | 212 | #define __pa(x) ((unsigned long)(x) - VIRT_PHYS_OFFSET) |
213 | #else | 213 | #else |
214 | #ifdef CONFIG_PPC64 | ||
215 | /* | ||
216 | * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET | ||
217 | * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit. | ||
218 | */ | ||
219 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET)) | ||
220 | #define __pa(x) ((unsigned long)(x) & 0x0fffffffffffffffUL) | ||
221 | |||
222 | #else /* 32-bit, non book E */ | ||
214 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) | 223 | #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START)) |
215 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) | 224 | #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START) |
216 | #endif | 225 | #endif |
226 | #endif | ||
217 | 227 | ||
218 | /* | 228 | /* |
219 | * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, | 229 | * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI, |
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h index 8b2492644754..3fd2f1b6f906 100644 --- a/arch/powerpc/include/asm/perf_event_server.h +++ b/arch/powerpc/include/asm/perf_event_server.h | |||
@@ -138,11 +138,11 @@ extern ssize_t power_events_sysfs_show(struct device *dev, | |||
138 | #define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr | 138 | #define EVENT_PTR(_id, _suffix) &EVENT_VAR(_id, _suffix).attr.attr |
139 | 139 | ||
140 | #define EVENT_ATTR(_name, _id, _suffix) \ | 140 | #define EVENT_ATTR(_name, _id, _suffix) \ |
141 | PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_PM_##_id, \ | 141 | PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), PME_##_id, \ |
142 | power_events_sysfs_show) | 142 | power_events_sysfs_show) |
143 | 143 | ||
144 | #define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g) | 144 | #define GENERIC_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _g) |
145 | #define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g) | 145 | #define GENERIC_EVENT_PTR(_id) EVENT_PTR(_id, _g) |
146 | 146 | ||
147 | #define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(PM_##_name, _id, _p) | 147 | #define POWER_EVENT_ATTR(_name, _id) EVENT_ATTR(_name, _id, _p) |
148 | #define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p) | 148 | #define POWER_EVENT_PTR(_id) EVENT_PTR(_id, _p) |
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 47a35b08b963..e378cccfca55 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -247,6 +247,10 @@ struct thread_struct { | |||
247 | unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */ | 247 | unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */ |
248 | struct pt_regs ckpt_regs; /* Checkpointed registers */ | 248 | struct pt_regs ckpt_regs; /* Checkpointed registers */ |
249 | 249 | ||
250 | unsigned long tm_tar; | ||
251 | unsigned long tm_ppr; | ||
252 | unsigned long tm_dscr; | ||
253 | |||
250 | /* | 254 | /* |
251 | * Transactional FP and VSX 0-31 register set. | 255 | * Transactional FP and VSX 0-31 register set. |
252 | * NOTE: the sense of these is the opposite of the integer ckpt_regs! | 256 | * NOTE: the sense of these is the opposite of the integer ckpt_regs! |
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index bc2da154f68b..ac204e022922 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h | |||
@@ -43,9 +43,6 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, | |||
43 | 43 | ||
44 | extern void kdump_move_device_tree(void); | 44 | extern void kdump_move_device_tree(void); |
45 | 45 | ||
46 | /* CPU OF node matching */ | ||
47 | struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); | ||
48 | |||
49 | /* cache lookup */ | 46 | /* cache lookup */ |
50 | struct device_node *of_find_next_cache_node(struct device_node *np); | 47 | struct device_node *of_find_next_cache_node(struct device_node *np); |
51 | 48 | ||
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index a6840e4e24f7..99222e27f173 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -254,19 +254,28 @@ | |||
254 | #define SPRN_HRMOR 0x139 /* Real mode offset register */ | 254 | #define SPRN_HRMOR 0x139 /* Real mode offset register */ |
255 | #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ | 255 | #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ |
256 | #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ | 256 | #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ |
257 | /* HFSCR and FSCR bit numbers are the same */ | ||
258 | #define FSCR_TAR_LG 8 /* Enable Target Address Register */ | ||
259 | #define FSCR_EBB_LG 7 /* Enable Event Based Branching */ | ||
260 | #define FSCR_TM_LG 5 /* Enable Transactional Memory */ | ||
261 | #define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */ | ||
262 | #define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/ | ||
263 | #define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */ | ||
264 | #define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */ | ||
265 | #define FSCR_FP_LG 0 /* Enable Floating Point */ | ||
257 | #define SPRN_FSCR 0x099 /* Facility Status & Control Register */ | 266 | #define SPRN_FSCR 0x099 /* Facility Status & Control Register */ |
258 | #define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ | 267 | #define FSCR_TAR __MASK(FSCR_TAR_LG) |
259 | #define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ | 268 | #define FSCR_EBB __MASK(FSCR_EBB_LG) |
260 | #define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ | 269 | #define FSCR_DSCR __MASK(FSCR_DSCR_LG) |
261 | #define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ | 270 | #define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ |
262 | #define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ | 271 | #define HFSCR_TAR __MASK(FSCR_TAR_LG) |
263 | #define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ | 272 | #define HFSCR_EBB __MASK(FSCR_EBB_LG) |
264 | #define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */ | 273 | #define HFSCR_TM __MASK(FSCR_TM_LG) |
265 | #define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */ | 274 | #define HFSCR_PM __MASK(FSCR_PM_LG) |
266 | #define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/ | 275 | #define HFSCR_BHRB __MASK(FSCR_BHRB_LG) |
267 | #define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ | 276 | #define HFSCR_DSCR __MASK(FSCR_DSCR_LG) |
268 | #define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */ | 277 | #define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG) |
269 | #define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */ | 278 | #define HFSCR_FP __MASK(FSCR_FP_LG) |
270 | #define SPRN_TAR 0x32f /* Target Address Register */ | 279 | #define SPRN_TAR 0x32f /* Target Address Register */ |
271 | #define SPRN_LPCR 0x13E /* LPAR Control Register */ | 280 | #define SPRN_LPCR 0x13E /* LPAR Control Register */ |
272 | #define LPCR_VPM0 (1ul << (63-0)) | 281 | #define LPCR_VPM0 (1ul << (63-0)) |
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 49a13e0ef234..294c2cedcf7a 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
@@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *, | |||
15 | struct thread_struct; | 15 | struct thread_struct; |
16 | extern struct task_struct *_switch(struct thread_struct *prev, | 16 | extern struct task_struct *_switch(struct thread_struct *prev, |
17 | struct thread_struct *next); | 17 | struct thread_struct *next); |
18 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
19 | static inline void save_tar(struct thread_struct *prev) | ||
20 | { | ||
21 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
22 | prev->tar = mfspr(SPRN_TAR); | ||
23 | } | ||
24 | #else | ||
25 | static inline void save_tar(struct thread_struct *prev) {} | ||
26 | #endif | ||
18 | 27 | ||
19 | extern void giveup_fpu(struct task_struct *); | 28 | extern void giveup_fpu(struct task_struct *); |
20 | extern void load_up_fpu(void); | 29 | extern void load_up_fpu(void); |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index c7e8afc2ead0..8207459efe56 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -138,6 +138,9 @@ int main(void) | |||
138 | DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); | 138 | DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); |
139 | DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); | 139 | DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); |
140 | DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); | 140 | DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); |
141 | DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar)); | ||
142 | DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr)); | ||
143 | DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr)); | ||
141 | DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); | 144 | DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); |
142 | DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, | 145 | DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, |
143 | transact_vr[0])); | 146 | transact_vr[0])); |
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index ea9414c8088d..55593ee2d5aa 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -1061,7 +1061,7 @@ static const struct file_operations proc_eeh_operations = { | |||
1061 | 1061 | ||
1062 | static int __init eeh_init_proc(void) | 1062 | static int __init eeh_init_proc(void) |
1063 | { | 1063 | { |
1064 | if (machine_is(pseries)) | 1064 | if (machine_is(pseries) || machine_is(powernv)) |
1065 | proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations); | 1065 | proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations); |
1066 | return 0; | 1066 | return 0; |
1067 | } | 1067 | } |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index ab15b8d057ad..2bd0b885b0fe 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) | |||
449 | 449 | ||
450 | #ifdef CONFIG_PPC_BOOK3S_64 | 450 | #ifdef CONFIG_PPC_BOOK3S_64 |
451 | BEGIN_FTR_SECTION | 451 | BEGIN_FTR_SECTION |
452 | /* | ||
453 | * Back up the TAR across context switches. Note that the TAR is not | ||
454 | * available for use in the kernel. (To provide this, the TAR should | ||
455 | * be backed up/restored on exception entry/exit instead, and be in | ||
456 | * pt_regs. FIXME, this should be in pt_regs anyway (for debug).) | ||
457 | */ | ||
458 | mfspr r0,SPRN_TAR | ||
459 | std r0,THREAD_TAR(r3) | ||
460 | |||
461 | /* Event based branch registers */ | 452 | /* Event based branch registers */ |
462 | mfspr r0, SPRN_BESCR | 453 | mfspr r0, SPRN_BESCR |
463 | std r0, THREAD_BESCR(r3) | 454 | std r0, THREAD_BESCR(r3) |
@@ -584,9 +575,34 @@ BEGIN_FTR_SECTION | |||
584 | ld r7,DSCR_DEFAULT@toc(2) | 575 | ld r7,DSCR_DEFAULT@toc(2) |
585 | ld r0,THREAD_DSCR(r4) | 576 | ld r0,THREAD_DSCR(r4) |
586 | cmpwi r6,0 | 577 | cmpwi r6,0 |
578 | li r8, FSCR_DSCR | ||
587 | bne 1f | 579 | bne 1f |
588 | ld r0,0(r7) | 580 | ld r0,0(r7) |
589 | 1: cmpd r0,r25 | 581 | b 3f |
582 | 1: | ||
583 | BEGIN_FTR_SECTION_NESTED(70) | ||
584 | mfspr r6, SPRN_FSCR | ||
585 | or r6, r6, r8 | ||
586 | mtspr SPRN_FSCR, r6 | ||
587 | BEGIN_FTR_SECTION_NESTED(69) | ||
588 | mfspr r6, SPRN_HFSCR | ||
589 | or r6, r6, r8 | ||
590 | mtspr SPRN_HFSCR, r6 | ||
591 | END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69) | ||
592 | b 4f | ||
593 | END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70) | ||
594 | 3: | ||
595 | BEGIN_FTR_SECTION_NESTED(70) | ||
596 | mfspr r6, SPRN_FSCR | ||
597 | andc r6, r6, r8 | ||
598 | mtspr SPRN_FSCR, r6 | ||
599 | BEGIN_FTR_SECTION_NESTED(69) | ||
600 | mfspr r6, SPRN_HFSCR | ||
601 | andc r6, r6, r8 | ||
602 | mtspr SPRN_HFSCR, r6 | ||
603 | END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69) | ||
604 | END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70) | ||
605 | 4: cmpd r0,r25 | ||
590 | beq 2f | 606 | beq 2f |
591 | mtspr SPRN_DSCR,r0 | 607 | mtspr SPRN_DSCR,r0 |
592 | 2: | 608 | 2: |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 4e00d223b2e3..902ca3c6b4b6 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -848,7 +848,7 @@ hv_facility_unavailable_relon_trampoline: | |||
848 | . = 0x4f80 | 848 | . = 0x4f80 |
849 | SET_SCRATCH0(r13) | 849 | SET_SCRATCH0(r13) |
850 | EXCEPTION_PROLOG_0(PACA_EXGEN) | 850 | EXCEPTION_PROLOG_0(PACA_EXGEN) |
851 | b facility_unavailable_relon_hv | 851 | b hv_facility_unavailable_relon_hv |
852 | 852 | ||
853 | STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) | 853 | STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) |
854 | #ifdef CONFIG_PPC_DENORMALISATION | 854 | #ifdef CONFIG_PPC_DENORMALISATION |
@@ -1175,6 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
1175 | b .ret_from_except | 1175 | b .ret_from_except |
1176 | 1176 | ||
1177 | STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) | 1177 | STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) |
1178 | STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) | ||
1178 | 1179 | ||
1179 | .align 7 | 1180 | .align 7 |
1180 | .globl __end_handlers | 1181 | .globl __end_handlers |
@@ -1188,7 +1189,7 @@ __end_handlers: | |||
1188 | STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) | 1189 | STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) |
1189 | STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) | 1190 | STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) |
1190 | STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) | 1191 | STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) |
1191 | STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable) | 1192 | STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable) |
1192 | 1193 | ||
1193 | #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) | 1194 | #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) |
1194 | /* | 1195 | /* |
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index d92f3871e9cf..e2a0a162299b 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c | |||
@@ -35,7 +35,13 @@ | |||
35 | #include <asm/vdso_datapage.h> | 35 | #include <asm/vdso_datapage.h> |
36 | #include <asm/vio.h> | 36 | #include <asm/vio.h> |
37 | #include <asm/mmu.h> | 37 | #include <asm/mmu.h> |
38 | #include <asm/machdep.h> | ||
38 | 39 | ||
40 | |||
41 | /* | ||
42 | * This isn't a module but we expose that to userspace | ||
43 | * via /proc so leave the definitions here | ||
44 | */ | ||
39 | #define MODULE_VERS "1.9" | 45 | #define MODULE_VERS "1.9" |
40 | #define MODULE_NAME "lparcfg" | 46 | #define MODULE_NAME "lparcfg" |
41 | 47 | ||
@@ -418,7 +424,8 @@ static void parse_em_data(struct seq_file *m) | |||
418 | { | 424 | { |
419 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | 425 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; |
420 | 426 | ||
421 | if (plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) | 427 | if (firmware_has_feature(FW_FEATURE_LPAR) && |
428 | plpar_hcall(H_GET_EM_PARMS, retbuf) == H_SUCCESS) | ||
422 | seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); | 429 | seq_printf(m, "power_mode_data=%016lx\n", retbuf[0]); |
423 | } | 430 | } |
424 | 431 | ||
@@ -677,7 +684,6 @@ static int lparcfg_open(struct inode *inode, struct file *file) | |||
677 | } | 684 | } |
678 | 685 | ||
679 | static const struct file_operations lparcfg_fops = { | 686 | static const struct file_operations lparcfg_fops = { |
680 | .owner = THIS_MODULE, | ||
681 | .read = seq_read, | 687 | .read = seq_read, |
682 | .write = lparcfg_write, | 688 | .write = lparcfg_write, |
683 | .open = lparcfg_open, | 689 | .open = lparcfg_open, |
@@ -699,14 +705,4 @@ static int __init lparcfg_init(void) | |||
699 | } | 705 | } |
700 | return 0; | 706 | return 0; |
701 | } | 707 | } |
702 | 708 | machine_device_initcall(pseries, lparcfg_init); | |
703 | static void __exit lparcfg_cleanup(void) | ||
704 | { | ||
705 | remove_proc_subtree("powerpc/lparcfg", NULL); | ||
706 | } | ||
707 | |||
708 | module_init(lparcfg_init); | ||
709 | module_exit(lparcfg_cleanup); | ||
710 | MODULE_DESCRIPTION("Interface for LPAR configuration data"); | ||
711 | MODULE_AUTHOR("Dave Engebretsen"); | ||
712 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 7d22a675fe1a..2b4a9a4db7d9 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -1674,12 +1674,8 @@ void pcibios_scan_phb(struct pci_controller *hose) | |||
1674 | /* Configure PCI Express settings */ | 1674 | /* Configure PCI Express settings */ |
1675 | if (bus && !pci_has_flag(PCI_PROBE_ONLY)) { | 1675 | if (bus && !pci_has_flag(PCI_PROBE_ONLY)) { |
1676 | struct pci_bus *child; | 1676 | struct pci_bus *child; |
1677 | list_for_each_entry(child, &bus->children, node) { | 1677 | list_for_each_entry(child, &bus->children, node) |
1678 | struct pci_dev *self = child->self; | 1678 | pcie_bus_configure_settings(child); |
1679 | if (!self) | ||
1680 | continue; | ||
1681 | pcie_bus_configure_settings(child, self->pcie_mpss); | ||
1682 | } | ||
1683 | } | 1679 | } |
1684 | } | 1680 | } |
1685 | 1681 | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index c517dbe705fd..8083be20fe5e 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
600 | struct ppc64_tlb_batch *batch; | 600 | struct ppc64_tlb_batch *batch; |
601 | #endif | 601 | #endif |
602 | 602 | ||
603 | /* Back up the TAR across context switches. | ||
604 | * Note that the TAR is not available for use in the kernel. (To | ||
605 | * provide this, the TAR should be backed up/restored on exception | ||
606 | * entry/exit instead, and be in pt_regs. FIXME, this should be in | ||
607 | * pt_regs anyway (for debug).) | ||
608 | * Save the TAR here before we do treclaim/trecheckpoint as these | ||
609 | * will change the TAR. | ||
610 | */ | ||
611 | save_tar(&prev->thread); | ||
612 | |||
603 | __switch_to_tm(prev); | 613 | __switch_to_tm(prev); |
604 | 614 | ||
605 | #ifdef CONFIG_SMP | 615 | #ifdef CONFIG_SMP |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index eb23ac92abb9..1c14cd4a5e05 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -865,49 +865,10 @@ static int __init prom_reconfig_setup(void) | |||
865 | __initcall(prom_reconfig_setup); | 865 | __initcall(prom_reconfig_setup); |
866 | #endif | 866 | #endif |
867 | 867 | ||
868 | /* Find the device node for a given logical cpu number, also returns the cpu | 868 | bool arch_match_cpu_phys_id(int cpu, u64 phys_id) |
869 | * local thread number (index in ibm,interrupt-server#s) if relevant and | ||
870 | * asked for (non NULL) | ||
871 | */ | ||
872 | struct device_node *of_get_cpu_node(int cpu, unsigned int *thread) | ||
873 | { | 869 | { |
874 | int hardid; | 870 | return (int)phys_id == get_hard_smp_processor_id(cpu); |
875 | struct device_node *np; | ||
876 | |||
877 | hardid = get_hard_smp_processor_id(cpu); | ||
878 | |||
879 | for_each_node_by_type(np, "cpu") { | ||
880 | const u32 *intserv; | ||
881 | unsigned int plen, t; | ||
882 | |||
883 | /* Check for ibm,ppc-interrupt-server#s. If it doesn't exist | ||
884 | * fallback to "reg" property and assume no threads | ||
885 | */ | ||
886 | intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", | ||
887 | &plen); | ||
888 | if (intserv == NULL) { | ||
889 | const u32 *reg = of_get_property(np, "reg", NULL); | ||
890 | if (reg == NULL) | ||
891 | continue; | ||
892 | if (*reg == hardid) { | ||
893 | if (thread) | ||
894 | *thread = 0; | ||
895 | return np; | ||
896 | } | ||
897 | } else { | ||
898 | plen /= sizeof(u32); | ||
899 | for (t = 0; t < plen; t++) { | ||
900 | if (hardid == intserv[t]) { | ||
901 | if (thread) | ||
902 | *thread = t; | ||
903 | return np; | ||
904 | } | ||
905 | } | ||
906 | } | ||
907 | } | ||
908 | return NULL; | ||
909 | } | 871 | } |
910 | EXPORT_SYMBOL(of_get_cpu_node); | ||
911 | 872 | ||
912 | #if defined(CONFIG_DEBUG_FS) && defined(DEBUG) | 873 | #if defined(CONFIG_DEBUG_FS) && defined(DEBUG) |
913 | static struct debugfs_blob_wrapper flat_dt_blob; | 874 | static struct debugfs_blob_wrapper flat_dt_blob; |
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 51be8fb24803..0554d1f6d70d 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S | |||
@@ -233,6 +233,16 @@ dont_backup_fp: | |||
233 | std r5, _CCR(r7) | 233 | std r5, _CCR(r7) |
234 | std r6, _XER(r7) | 234 | std r6, _XER(r7) |
235 | 235 | ||
236 | |||
237 | /* ******************** TAR, PPR, DSCR ********** */ | ||
238 | mfspr r3, SPRN_TAR | ||
239 | mfspr r4, SPRN_PPR | ||
240 | mfspr r5, SPRN_DSCR | ||
241 | |||
242 | std r3, THREAD_TM_TAR(r12) | ||
243 | std r4, THREAD_TM_PPR(r12) | ||
244 | std r5, THREAD_TM_DSCR(r12) | ||
245 | |||
236 | /* MSR and flags: We don't change CRs, and we don't need to alter | 246 | /* MSR and flags: We don't change CRs, and we don't need to alter |
237 | * MSR. | 247 | * MSR. |
238 | */ | 248 | */ |
@@ -347,6 +357,16 @@ dont_restore_fp: | |||
347 | mtmsr r6 /* FP/Vec off again! */ | 357 | mtmsr r6 /* FP/Vec off again! */ |
348 | 358 | ||
349 | restore_gprs: | 359 | restore_gprs: |
360 | |||
361 | /* ******************** TAR, PPR, DSCR ********** */ | ||
362 | ld r4, THREAD_TM_TAR(r3) | ||
363 | ld r5, THREAD_TM_PPR(r3) | ||
364 | ld r6, THREAD_TM_DSCR(r3) | ||
365 | |||
366 | mtspr SPRN_TAR, r4 | ||
367 | mtspr SPRN_PPR, r5 | ||
368 | mtspr SPRN_DSCR, r6 | ||
369 | |||
350 | /* ******************** CR,LR,CCR,MSR ********** */ | 370 | /* ******************** CR,LR,CCR,MSR ********** */ |
351 | ld r3, _CTR(r7) | 371 | ld r3, _CTR(r7) |
352 | ld r4, _LINK(r7) | 372 | ld r4, _LINK(r7) |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index bf33c22e38a4..e435bc089ea3 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -44,9 +44,7 @@ | |||
44 | #include <asm/machdep.h> | 44 | #include <asm/machdep.h> |
45 | #include <asm/rtas.h> | 45 | #include <asm/rtas.h> |
46 | #include <asm/pmc.h> | 46 | #include <asm/pmc.h> |
47 | #ifdef CONFIG_PPC32 | ||
48 | #include <asm/reg.h> | 47 | #include <asm/reg.h> |
49 | #endif | ||
50 | #ifdef CONFIG_PMAC_BACKLIGHT | 48 | #ifdef CONFIG_PMAC_BACKLIGHT |
51 | #include <asm/backlight.h> | 49 | #include <asm/backlight.h> |
52 | #endif | 50 | #endif |
@@ -1296,43 +1294,54 @@ void vsx_unavailable_exception(struct pt_regs *regs) | |||
1296 | die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); | 1294 | die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); |
1297 | } | 1295 | } |
1298 | 1296 | ||
1297 | #ifdef CONFIG_PPC64 | ||
1299 | void facility_unavailable_exception(struct pt_regs *regs) | 1298 | void facility_unavailable_exception(struct pt_regs *regs) |
1300 | { | 1299 | { |
1301 | static char *facility_strings[] = { | 1300 | static char *facility_strings[] = { |
1302 | "FPU", | 1301 | [FSCR_FP_LG] = "FPU", |
1303 | "VMX/VSX", | 1302 | [FSCR_VECVSX_LG] = "VMX/VSX", |
1304 | "DSCR", | 1303 | [FSCR_DSCR_LG] = "DSCR", |
1305 | "PMU SPRs", | 1304 | [FSCR_PM_LG] = "PMU SPRs", |
1306 | "BHRB", | 1305 | [FSCR_BHRB_LG] = "BHRB", |
1307 | "TM", | 1306 | [FSCR_TM_LG] = "TM", |
1308 | "AT", | 1307 | [FSCR_EBB_LG] = "EBB", |
1309 | "EBB", | 1308 | [FSCR_TAR_LG] = "TAR", |
1310 | "TAR", | ||
1311 | }; | 1309 | }; |
1312 | char *facility, *prefix; | 1310 | char *facility = "unknown"; |
1313 | u64 value; | 1311 | u64 value; |
1312 | u8 status; | ||
1313 | bool hv; | ||
1314 | 1314 | ||
1315 | if (regs->trap == 0xf60) { | 1315 | hv = (regs->trap == 0xf80); |
1316 | value = mfspr(SPRN_FSCR); | 1316 | if (hv) |
1317 | prefix = ""; | ||
1318 | } else { | ||
1319 | value = mfspr(SPRN_HFSCR); | 1317 | value = mfspr(SPRN_HFSCR); |
1320 | prefix = "Hypervisor "; | 1318 | else |
1319 | value = mfspr(SPRN_FSCR); | ||
1320 | |||
1321 | status = value >> 56; | ||
1322 | if (status == FSCR_DSCR_LG) { | ||
1323 | /* User is acessing the DSCR. Set the inherit bit and allow | ||
1324 | * the user to set it directly in future by setting via the | ||
1325 | * H/FSCR DSCR bit. | ||
1326 | */ | ||
1327 | current->thread.dscr_inherit = 1; | ||
1328 | if (hv) | ||
1329 | mtspr(SPRN_HFSCR, value | HFSCR_DSCR); | ||
1330 | else | ||
1331 | mtspr(SPRN_FSCR, value | FSCR_DSCR); | ||
1332 | return; | ||
1321 | } | 1333 | } |
1322 | 1334 | ||
1323 | value = value >> 56; | 1335 | if ((status < ARRAY_SIZE(facility_strings)) && |
1336 | facility_strings[status]) | ||
1337 | facility = facility_strings[status]; | ||
1324 | 1338 | ||
1325 | /* We restore the interrupt state now */ | 1339 | /* We restore the interrupt state now */ |
1326 | if (!arch_irq_disabled_regs(regs)) | 1340 | if (!arch_irq_disabled_regs(regs)) |
1327 | local_irq_enable(); | 1341 | local_irq_enable(); |
1328 | 1342 | ||
1329 | if (value < ARRAY_SIZE(facility_strings)) | ||
1330 | facility = facility_strings[value]; | ||
1331 | else | ||
1332 | facility = "unknown"; | ||
1333 | |||
1334 | pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", | 1343 | pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", |
1335 | prefix, facility, regs->nip, regs->msr); | 1344 | hv ? "Hypervisor " : "", facility, regs->nip, regs->msr); |
1336 | 1345 | ||
1337 | if (user_mode(regs)) { | 1346 | if (user_mode(regs)) { |
1338 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | 1347 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); |
@@ -1341,6 +1350,7 @@ void facility_unavailable_exception(struct pt_regs *regs) | |||
1341 | 1350 | ||
1342 | die("Unexpected facility unavailable exception", regs, SIGABRT); | 1351 | die("Unexpected facility unavailable exception", regs, SIGABRT); |
1343 | } | 1352 | } |
1353 | #endif | ||
1344 | 1354 | ||
1345 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1355 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1346 | 1356 | ||
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 2efa9dde741a..7629cd3eb91a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -1809,7 +1809,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) | |||
1809 | rma_size <<= PAGE_SHIFT; | 1809 | rma_size <<= PAGE_SHIFT; |
1810 | rmls = lpcr_rmls(rma_size); | 1810 | rmls = lpcr_rmls(rma_size); |
1811 | err = -EINVAL; | 1811 | err = -EINVAL; |
1812 | if (rmls < 0) { | 1812 | if ((long)rmls < 0) { |
1813 | pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); | 1813 | pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); |
1814 | goto out_srcu; | 1814 | goto out_srcu; |
1815 | } | 1815 | } |
@@ -1874,7 +1874,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) | |||
1874 | /* Allocate the guest's logical partition ID */ | 1874 | /* Allocate the guest's logical partition ID */ |
1875 | 1875 | ||
1876 | lpid = kvmppc_alloc_lpid(); | 1876 | lpid = kvmppc_alloc_lpid(); |
1877 | if (lpid < 0) | 1877 | if ((long)lpid < 0) |
1878 | return -ENOMEM; | 1878 | return -ENOMEM; |
1879 | kvm->arch.lpid = lpid; | 1879 | kvm->arch.lpid = lpid; |
1880 | 1880 | ||
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 19498a567a81..c6e13d9a9e15 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -1047,11 +1047,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1047 | if (err) | 1047 | if (err) |
1048 | goto free_shadow_vcpu; | 1048 | goto free_shadow_vcpu; |
1049 | 1049 | ||
1050 | err = -ENOMEM; | ||
1050 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); | 1051 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); |
1051 | /* the real shared page fills the last 4k of our page */ | ||
1052 | vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096); | ||
1053 | if (!p) | 1052 | if (!p) |
1054 | goto uninit_vcpu; | 1053 | goto uninit_vcpu; |
1054 | /* the real shared page fills the last 4k of our page */ | ||
1055 | vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); | ||
1055 | 1056 | ||
1056 | #ifdef CONFIG_PPC_BOOK3S_64 | 1057 | #ifdef CONFIG_PPC_BOOK3S_64 |
1057 | /* default to book3s_64 (970fx) */ | 1058 | /* default to book3s_64 (970fx) */ |
diff --git a/arch/powerpc/perf/power7-events-list.h b/arch/powerpc/perf/power7-events-list.h new file mode 100644 index 000000000000..687790a2c0b8 --- /dev/null +++ b/arch/powerpc/perf/power7-events-list.h | |||
@@ -0,0 +1,548 @@ | |||
1 | /* | ||
2 | * Performance counter support for POWER7 processors. | ||
3 | * | ||
4 | * Copyright 2013 Runzhen Wang, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | EVENT(PM_IC_DEMAND_L2_BR_ALL, 0x04898) | ||
13 | EVENT(PM_GCT_UTIL_7_TO_10_SLOTS, 0x020a0) | ||
14 | EVENT(PM_PMC2_SAVED, 0x10022) | ||
15 | EVENT(PM_CMPLU_STALL_DFU, 0x2003c) | ||
16 | EVENT(PM_VSU0_16FLOP, 0x0a0a4) | ||
17 | EVENT(PM_MRK_LSU_DERAT_MISS, 0x3d05a) | ||
18 | EVENT(PM_MRK_ST_CMPL, 0x10034) | ||
19 | EVENT(PM_NEST_PAIR3_ADD, 0x40881) | ||
20 | EVENT(PM_L2_ST_DISP, 0x46180) | ||
21 | EVENT(PM_L2_CASTOUT_MOD, 0x16180) | ||
22 | EVENT(PM_ISEG, 0x020a4) | ||
23 | EVENT(PM_MRK_INST_TIMEO, 0x40034) | ||
24 | EVENT(PM_L2_RCST_DISP_FAIL_ADDR, 0x36282) | ||
25 | EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM, 0x0d0b6) | ||
26 | EVENT(PM_IERAT_WR_64K, 0x040be) | ||
27 | EVENT(PM_MRK_DTLB_MISS_16M, 0x4d05e) | ||
28 | EVENT(PM_IERAT_MISS, 0x100f6) | ||
29 | EVENT(PM_MRK_PTEG_FROM_LMEM, 0x4d052) | ||
30 | EVENT(PM_FLOP, 0x100f4) | ||
31 | EVENT(PM_THRD_PRIO_4_5_CYC, 0x040b4) | ||
32 | EVENT(PM_BR_PRED_TA, 0x040aa) | ||
33 | EVENT(PM_CMPLU_STALL_FXU, 0x20014) | ||
34 | EVENT(PM_EXT_INT, 0x200f8) | ||
35 | EVENT(PM_VSU_FSQRT_FDIV, 0x0a888) | ||
36 | EVENT(PM_MRK_LD_MISS_EXPOSED_CYC, 0x1003e) | ||
37 | EVENT(PM_LSU1_LDF, 0x0c086) | ||
38 | EVENT(PM_IC_WRITE_ALL, 0x0488c) | ||
39 | EVENT(PM_LSU0_SRQ_STFWD, 0x0c0a0) | ||
40 | EVENT(PM_PTEG_FROM_RL2L3_MOD, 0x1c052) | ||
41 | EVENT(PM_MRK_DATA_FROM_L31_SHR, 0x1d04e) | ||
42 | EVENT(PM_DATA_FROM_L21_MOD, 0x3c046) | ||
43 | EVENT(PM_VSU1_SCAL_DOUBLE_ISSUED, 0x0b08a) | ||
44 | EVENT(PM_VSU0_8FLOP, 0x0a0a0) | ||
45 | EVENT(PM_POWER_EVENT1, 0x1006e) | ||
46 | EVENT(PM_DISP_CLB_HELD_BAL, 0x02092) | ||
47 | EVENT(PM_VSU1_2FLOP, 0x0a09a) | ||
48 | EVENT(PM_LWSYNC_HELD, 0x0209a) | ||
49 | EVENT(PM_PTEG_FROM_DL2L3_SHR, 0x3c054) | ||
50 | EVENT(PM_INST_FROM_L21_MOD, 0x34046) | ||
51 | EVENT(PM_IERAT_XLATE_WR_16MPLUS, 0x040bc) | ||
52 | EVENT(PM_IC_REQ_ALL, 0x04888) | ||
53 | EVENT(PM_DSLB_MISS, 0x0d090) | ||
54 | EVENT(PM_L3_MISS, 0x1f082) | ||
55 | EVENT(PM_LSU0_L1_PREF, 0x0d0b8) | ||
56 | EVENT(PM_VSU_SCALAR_SINGLE_ISSUED, 0x0b884) | ||
57 | EVENT(PM_LSU1_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0be) | ||
58 | EVENT(PM_L2_INST, 0x36080) | ||
59 | EVENT(PM_VSU0_FRSP, 0x0a0b4) | ||
60 | EVENT(PM_FLUSH_DISP, 0x02082) | ||
61 | EVENT(PM_PTEG_FROM_L2MISS, 0x4c058) | ||
62 | EVENT(PM_VSU1_DQ_ISSUED, 0x0b09a) | ||
63 | EVENT(PM_CMPLU_STALL_LSU, 0x20012) | ||
64 | EVENT(PM_MRK_DATA_FROM_DMEM, 0x1d04a) | ||
65 | EVENT(PM_LSU_FLUSH_ULD, 0x0c8b0) | ||
66 | EVENT(PM_PTEG_FROM_LMEM, 0x4c052) | ||
67 | EVENT(PM_MRK_DERAT_MISS_16M, 0x3d05c) | ||
68 | EVENT(PM_THRD_ALL_RUN_CYC, 0x2000c) | ||
69 | EVENT(PM_MEM0_PREFETCH_DISP, 0x20083) | ||
70 | EVENT(PM_MRK_STALL_CMPLU_CYC_COUNT, 0x3003f) | ||
71 | EVENT(PM_DATA_FROM_DL2L3_MOD, 0x3c04c) | ||
72 | EVENT(PM_VSU_FRSP, 0x0a8b4) | ||
73 | EVENT(PM_MRK_DATA_FROM_L21_MOD, 0x3d046) | ||
74 | EVENT(PM_PMC1_OVERFLOW, 0x20010) | ||
75 | EVENT(PM_VSU0_SINGLE, 0x0a0a8) | ||
76 | EVENT(PM_MRK_PTEG_FROM_L3MISS, 0x2d058) | ||
77 | EVENT(PM_MRK_PTEG_FROM_L31_SHR, 0x2d056) | ||
78 | EVENT(PM_VSU0_VECTOR_SP_ISSUED, 0x0b090) | ||
79 | EVENT(PM_VSU1_FEST, 0x0a0ba) | ||
80 | EVENT(PM_MRK_INST_DISP, 0x20030) | ||
81 | EVENT(PM_VSU0_COMPLEX_ISSUED, 0x0b096) | ||
82 | EVENT(PM_LSU1_FLUSH_UST, 0x0c0b6) | ||
83 | EVENT(PM_INST_CMPL, 0x00002) | ||
84 | EVENT(PM_FXU_IDLE, 0x1000e) | ||
85 | EVENT(PM_LSU0_FLUSH_ULD, 0x0c0b0) | ||
86 | EVENT(PM_MRK_DATA_FROM_DL2L3_MOD, 0x3d04c) | ||
87 | EVENT(PM_LSU_LMQ_SRQ_EMPTY_ALL_CYC, 0x3001c) | ||
88 | EVENT(PM_LSU1_REJECT_LMQ_FULL, 0x0c0a6) | ||
89 | EVENT(PM_INST_PTEG_FROM_L21_MOD, 0x3e056) | ||
90 | EVENT(PM_INST_FROM_RL2L3_MOD, 0x14042) | ||
91 | EVENT(PM_SHL_CREATED, 0x05082) | ||
92 | EVENT(PM_L2_ST_HIT, 0x46182) | ||
93 | EVENT(PM_DATA_FROM_DMEM, 0x1c04a) | ||
94 | EVENT(PM_L3_LD_MISS, 0x2f082) | ||
95 | EVENT(PM_FXU1_BUSY_FXU0_IDLE, 0x4000e) | ||
96 | EVENT(PM_DISP_CLB_HELD_RES, 0x02094) | ||
97 | EVENT(PM_L2_SN_SX_I_DONE, 0x36382) | ||
98 | EVENT(PM_GRP_CMPL, 0x30004) | ||
99 | EVENT(PM_STCX_CMPL, 0x0c098) | ||
100 | EVENT(PM_VSU0_2FLOP, 0x0a098) | ||
101 | EVENT(PM_L3_PREF_MISS, 0x3f082) | ||
102 | EVENT(PM_LSU_SRQ_SYNC_CYC, 0x0d096) | ||
103 | EVENT(PM_LSU_REJECT_ERAT_MISS, 0x20064) | ||
104 | EVENT(PM_L1_ICACHE_MISS, 0x200fc) | ||
105 | EVENT(PM_LSU1_FLUSH_SRQ, 0x0c0be) | ||
106 | EVENT(PM_LD_REF_L1_LSU0, 0x0c080) | ||
107 | EVENT(PM_VSU0_FEST, 0x0a0b8) | ||
108 | EVENT(PM_VSU_VECTOR_SINGLE_ISSUED, 0x0b890) | ||
109 | EVENT(PM_FREQ_UP, 0x4000c) | ||
110 | EVENT(PM_DATA_FROM_LMEM, 0x3c04a) | ||
111 | EVENT(PM_LSU1_LDX, 0x0c08a) | ||
112 | EVENT(PM_PMC3_OVERFLOW, 0x40010) | ||
113 | EVENT(PM_MRK_BR_MPRED, 0x30036) | ||
114 | EVENT(PM_SHL_MATCH, 0x05086) | ||
115 | EVENT(PM_MRK_BR_TAKEN, 0x10036) | ||
116 | EVENT(PM_CMPLU_STALL_BRU, 0x4004e) | ||
117 | EVENT(PM_ISLB_MISS, 0x0d092) | ||
118 | EVENT(PM_CYC, 0x0001e) | ||
119 | EVENT(PM_DISP_HELD_THERMAL, 0x30006) | ||
120 | EVENT(PM_INST_PTEG_FROM_RL2L3_SHR, 0x2e054) | ||
121 | EVENT(PM_LSU1_SRQ_STFWD, 0x0c0a2) | ||
122 | EVENT(PM_GCT_NOSLOT_BR_MPRED, 0x4001a) | ||
123 | EVENT(PM_1PLUS_PPC_CMPL, 0x100f2) | ||
124 | EVENT(PM_PTEG_FROM_DMEM, 0x2c052) | ||
125 | EVENT(PM_VSU_2FLOP, 0x0a898) | ||
126 | EVENT(PM_GCT_FULL_CYC, 0x04086) | ||
127 | EVENT(PM_MRK_DATA_FROM_L3_CYC, 0x40020) | ||
128 | EVENT(PM_LSU_SRQ_S0_ALLOC, 0x0d09d) | ||
129 | EVENT(PM_MRK_DERAT_MISS_4K, 0x1d05c) | ||
130 | EVENT(PM_BR_MPRED_TA, 0x040ae) | ||
131 | EVENT(PM_INST_PTEG_FROM_L2MISS, 0x4e058) | ||
132 | EVENT(PM_DPU_HELD_POWER, 0x20006) | ||
133 | EVENT(PM_RUN_INST_CMPL, 0x400fa) | ||
134 | EVENT(PM_MRK_VSU_FIN, 0x30032) | ||
135 | EVENT(PM_LSU_SRQ_S0_VALID, 0x0d09c) | ||
136 | EVENT(PM_GCT_EMPTY_CYC, 0x20008) | ||
137 | EVENT(PM_IOPS_DISP, 0x30014) | ||
138 | EVENT(PM_RUN_SPURR, 0x10008) | ||
139 | EVENT(PM_PTEG_FROM_L21_MOD, 0x3c056) | ||
140 | EVENT(PM_VSU0_1FLOP, 0x0a080) | ||
141 | EVENT(PM_SNOOP_TLBIE, 0x0d0b2) | ||
142 | EVENT(PM_DATA_FROM_L3MISS, 0x2c048) | ||
143 | EVENT(PM_VSU_SINGLE, 0x0a8a8) | ||
144 | EVENT(PM_DTLB_MISS_16G, 0x1c05e) | ||
145 | EVENT(PM_CMPLU_STALL_VECTOR, 0x2001c) | ||
146 | EVENT(PM_FLUSH, 0x400f8) | ||
147 | EVENT(PM_L2_LD_HIT, 0x36182) | ||
148 | EVENT(PM_NEST_PAIR2_AND, 0x30883) | ||
149 | EVENT(PM_VSU1_1FLOP, 0x0a082) | ||
150 | EVENT(PM_IC_PREF_REQ, 0x0408a) | ||
151 | EVENT(PM_L3_LD_HIT, 0x2f080) | ||
152 | EVENT(PM_GCT_NOSLOT_IC_MISS, 0x2001a) | ||
153 | EVENT(PM_DISP_HELD, 0x10006) | ||
154 | EVENT(PM_L2_LD, 0x16080) | ||
155 | EVENT(PM_LSU_FLUSH_SRQ, 0x0c8bc) | ||
156 | EVENT(PM_BC_PLUS_8_CONV, 0x040b8) | ||
157 | EVENT(PM_MRK_DATA_FROM_L31_MOD_CYC, 0x40026) | ||
158 | EVENT(PM_CMPLU_STALL_VECTOR_LONG, 0x4004a) | ||
159 | EVENT(PM_L2_RCST_BUSY_RC_FULL, 0x26282) | ||
160 | EVENT(PM_TB_BIT_TRANS, 0x300f8) | ||
161 | EVENT(PM_THERMAL_MAX, 0x40006) | ||
162 | EVENT(PM_LSU1_FLUSH_ULD, 0x0c0b2) | ||
163 | EVENT(PM_LSU1_REJECT_LHS, 0x0c0ae) | ||
164 | EVENT(PM_LSU_LRQ_S0_ALLOC, 0x0d09f) | ||
165 | EVENT(PM_L3_CO_L31, 0x4f080) | ||
166 | EVENT(PM_POWER_EVENT4, 0x4006e) | ||
167 | EVENT(PM_DATA_FROM_L31_SHR, 0x1c04e) | ||
168 | EVENT(PM_BR_UNCOND, 0x0409e) | ||
169 | EVENT(PM_LSU1_DC_PREF_STREAM_ALLOC, 0x0d0aa) | ||
170 | EVENT(PM_PMC4_REWIND, 0x10020) | ||
171 | EVENT(PM_L2_RCLD_DISP, 0x16280) | ||
172 | EVENT(PM_THRD_PRIO_2_3_CYC, 0x040b2) | ||
173 | EVENT(PM_MRK_PTEG_FROM_L2MISS, 0x4d058) | ||
174 | EVENT(PM_IC_DEMAND_L2_BHT_REDIRECT, 0x04098) | ||
175 | EVENT(PM_LSU_DERAT_MISS, 0x200f6) | ||
176 | EVENT(PM_IC_PREF_CANCEL_L2, 0x04094) | ||
177 | EVENT(PM_MRK_FIN_STALL_CYC_COUNT, 0x1003d) | ||
178 | EVENT(PM_BR_PRED_CCACHE, 0x040a0) | ||
179 | EVENT(PM_GCT_UTIL_1_TO_2_SLOTS, 0x0209c) | ||
180 | EVENT(PM_MRK_ST_CMPL_INT, 0x30034) | ||
181 | EVENT(PM_LSU_TWO_TABLEWALK_CYC, 0x0d0a6) | ||
182 | EVENT(PM_MRK_DATA_FROM_L3MISS, 0x2d048) | ||
183 | EVENT(PM_GCT_NOSLOT_CYC, 0x100f8) | ||
184 | EVENT(PM_LSU_SET_MPRED, 0x0c0a8) | ||
185 | EVENT(PM_FLUSH_DISP_TLBIE, 0x0208a) | ||
186 | EVENT(PM_VSU1_FCONV, 0x0a0b2) | ||
187 | EVENT(PM_DERAT_MISS_16G, 0x4c05c) | ||
188 | EVENT(PM_INST_FROM_LMEM, 0x3404a) | ||
189 | EVENT(PM_IC_DEMAND_L2_BR_REDIRECT, 0x0409a) | ||
190 | EVENT(PM_CMPLU_STALL_SCALAR_LONG, 0x20018) | ||
191 | EVENT(PM_INST_PTEG_FROM_L2, 0x1e050) | ||
192 | EVENT(PM_PTEG_FROM_L2, 0x1c050) | ||
193 | EVENT(PM_MRK_DATA_FROM_L21_SHR_CYC, 0x20024) | ||
194 | EVENT(PM_MRK_DTLB_MISS_4K, 0x2d05a) | ||
195 | EVENT(PM_VSU0_FPSCR, 0x0b09c) | ||
196 | EVENT(PM_VSU1_VECT_DOUBLE_ISSUED, 0x0b082) | ||
197 | EVENT(PM_MRK_PTEG_FROM_RL2L3_MOD, 0x1d052) | ||
198 | EVENT(PM_MEM0_RQ_DISP, 0x10083) | ||
199 | EVENT(PM_L2_LD_MISS, 0x26080) | ||
200 | EVENT(PM_VMX_RESULT_SAT_1, 0x0b0a0) | ||
201 | EVENT(PM_L1_PREF, 0x0d8b8) | ||
202 | EVENT(PM_MRK_DATA_FROM_LMEM_CYC, 0x2002c) | ||
203 | EVENT(PM_GRP_IC_MISS_NONSPEC, 0x1000c) | ||
204 | EVENT(PM_PB_NODE_PUMP, 0x10081) | ||
205 | EVENT(PM_SHL_MERGED, 0x05084) | ||
206 | EVENT(PM_NEST_PAIR1_ADD, 0x20881) | ||
207 | EVENT(PM_DATA_FROM_L3, 0x1c048) | ||
208 | EVENT(PM_LSU_FLUSH, 0x0208e) | ||
209 | EVENT(PM_LSU_SRQ_SYNC_COUNT, 0x0d097) | ||
210 | EVENT(PM_PMC2_OVERFLOW, 0x30010) | ||
211 | EVENT(PM_LSU_LDF, 0x0c884) | ||
212 | EVENT(PM_POWER_EVENT3, 0x3006e) | ||
213 | EVENT(PM_DISP_WT, 0x30008) | ||
214 | EVENT(PM_CMPLU_STALL_REJECT, 0x40016) | ||
215 | EVENT(PM_IC_BANK_CONFLICT, 0x04082) | ||
216 | EVENT(PM_BR_MPRED_CR_TA, 0x048ae) | ||
217 | EVENT(PM_L2_INST_MISS, 0x36082) | ||
218 | EVENT(PM_CMPLU_STALL_ERAT_MISS, 0x40018) | ||
219 | EVENT(PM_NEST_PAIR2_ADD, 0x30881) | ||
220 | EVENT(PM_MRK_LSU_FLUSH, 0x0d08c) | ||
221 | EVENT(PM_L2_LDST, 0x16880) | ||
222 | EVENT(PM_INST_FROM_L31_SHR, 0x1404e) | ||
223 | EVENT(PM_VSU0_FIN, 0x0a0bc) | ||
224 | EVENT(PM_LARX_LSU, 0x0c894) | ||
225 | EVENT(PM_INST_FROM_RMEM, 0x34042) | ||
226 | EVENT(PM_DISP_CLB_HELD_TLBIE, 0x02096) | ||
227 | EVENT(PM_MRK_DATA_FROM_DMEM_CYC, 0x2002e) | ||
228 | EVENT(PM_BR_PRED_CR, 0x040a8) | ||
229 | EVENT(PM_LSU_REJECT, 0x10064) | ||
230 | EVENT(PM_GCT_UTIL_3_TO_6_SLOTS, 0x0209e) | ||
231 | EVENT(PM_CMPLU_STALL_END_GCT_NOSLOT, 0x10028) | ||
232 | EVENT(PM_LSU0_REJECT_LMQ_FULL, 0x0c0a4) | ||
233 | EVENT(PM_VSU_FEST, 0x0a8b8) | ||
234 | EVENT(PM_NEST_PAIR0_AND, 0x10883) | ||
235 | EVENT(PM_PTEG_FROM_L3, 0x2c050) | ||
236 | EVENT(PM_POWER_EVENT2, 0x2006e) | ||
237 | EVENT(PM_IC_PREF_CANCEL_PAGE, 0x04090) | ||
238 | EVENT(PM_VSU0_FSQRT_FDIV, 0x0a088) | ||
239 | EVENT(PM_MRK_GRP_CMPL, 0x40030) | ||
240 | EVENT(PM_VSU0_SCAL_DOUBLE_ISSUED, 0x0b088) | ||
241 | EVENT(PM_GRP_DISP, 0x3000a) | ||
242 | EVENT(PM_LSU0_LDX, 0x0c088) | ||
243 | EVENT(PM_DATA_FROM_L2, 0x1c040) | ||
244 | EVENT(PM_MRK_DATA_FROM_RL2L3_MOD, 0x1d042) | ||
245 | EVENT(PM_LD_REF_L1, 0x0c880) | ||
246 | EVENT(PM_VSU0_VECT_DOUBLE_ISSUED, 0x0b080) | ||
247 | EVENT(PM_VSU1_2FLOP_DOUBLE, 0x0a08e) | ||
248 | EVENT(PM_THRD_PRIO_6_7_CYC, 0x040b6) | ||
249 | EVENT(PM_BC_PLUS_8_RSLV_TAKEN, 0x040ba) | ||
250 | EVENT(PM_BR_MPRED_CR, 0x040ac) | ||
251 | EVENT(PM_L3_CO_MEM, 0x4f082) | ||
252 | EVENT(PM_LD_MISS_L1, 0x400f0) | ||
253 | EVENT(PM_DATA_FROM_RL2L3_MOD, 0x1c042) | ||
254 | EVENT(PM_LSU_SRQ_FULL_CYC, 0x1001a) | ||
255 | EVENT(PM_TABLEWALK_CYC, 0x10026) | ||
256 | EVENT(PM_MRK_PTEG_FROM_RMEM, 0x3d052) | ||
257 | EVENT(PM_LSU_SRQ_STFWD, 0x0c8a0) | ||
258 | EVENT(PM_INST_PTEG_FROM_RMEM, 0x3e052) | ||
259 | EVENT(PM_FXU0_FIN, 0x10004) | ||
260 | EVENT(PM_LSU1_L1_SW_PREF, 0x0c09e) | ||
261 | EVENT(PM_PTEG_FROM_L31_MOD, 0x1c054) | ||
262 | EVENT(PM_PMC5_OVERFLOW, 0x10024) | ||
263 | EVENT(PM_LD_REF_L1_LSU1, 0x0c082) | ||
264 | EVENT(PM_INST_PTEG_FROM_L21_SHR, 0x4e056) | ||
265 | EVENT(PM_CMPLU_STALL_THRD, 0x1001c) | ||
266 | EVENT(PM_DATA_FROM_RMEM, 0x3c042) | ||
267 | EVENT(PM_VSU0_SCAL_SINGLE_ISSUED, 0x0b084) | ||
268 | EVENT(PM_BR_MPRED_LSTACK, 0x040a6) | ||
269 | EVENT(PM_MRK_DATA_FROM_RL2L3_MOD_CYC, 0x40028) | ||
270 | EVENT(PM_LSU0_FLUSH_UST, 0x0c0b4) | ||
271 | EVENT(PM_LSU_NCST, 0x0c090) | ||
272 | EVENT(PM_BR_TAKEN, 0x20004) | ||
273 | EVENT(PM_INST_PTEG_FROM_LMEM, 0x4e052) | ||
274 | EVENT(PM_GCT_NOSLOT_BR_MPRED_IC_MISS, 0x4001c) | ||
275 | EVENT(PM_DTLB_MISS_4K, 0x2c05a) | ||
276 | EVENT(PM_PMC4_SAVED, 0x30022) | ||
277 | EVENT(PM_VSU1_PERMUTE_ISSUED, 0x0b092) | ||
278 | EVENT(PM_SLB_MISS, 0x0d890) | ||
279 | EVENT(PM_LSU1_FLUSH_LRQ, 0x0c0ba) | ||
280 | EVENT(PM_DTLB_MISS, 0x300fc) | ||
281 | EVENT(PM_VSU1_FRSP, 0x0a0b6) | ||
282 | EVENT(PM_VSU_VECTOR_DOUBLE_ISSUED, 0x0b880) | ||
283 | EVENT(PM_L2_CASTOUT_SHR, 0x16182) | ||
284 | EVENT(PM_DATA_FROM_DL2L3_SHR, 0x3c044) | ||
285 | EVENT(PM_VSU1_STF, 0x0b08e) | ||
286 | EVENT(PM_ST_FIN, 0x200f0) | ||
287 | EVENT(PM_PTEG_FROM_L21_SHR, 0x4c056) | ||
288 | EVENT(PM_L2_LOC_GUESS_WRONG, 0x26480) | ||
289 | EVENT(PM_MRK_STCX_FAIL, 0x0d08e) | ||
290 | EVENT(PM_LSU0_REJECT_LHS, 0x0c0ac) | ||
291 | EVENT(PM_IC_PREF_CANCEL_HIT, 0x04092) | ||
292 | EVENT(PM_L3_PREF_BUSY, 0x4f080) | ||
293 | EVENT(PM_MRK_BRU_FIN, 0x2003a) | ||
294 | EVENT(PM_LSU1_NCLD, 0x0c08e) | ||
295 | EVENT(PM_INST_PTEG_FROM_L31_MOD, 0x1e054) | ||
296 | EVENT(PM_LSU_NCLD, 0x0c88c) | ||
297 | EVENT(PM_LSU_LDX, 0x0c888) | ||
298 | EVENT(PM_L2_LOC_GUESS_CORRECT, 0x16480) | ||
299 | EVENT(PM_THRESH_TIMEO, 0x10038) | ||
300 | EVENT(PM_L3_PREF_ST, 0x0d0ae) | ||
301 | EVENT(PM_DISP_CLB_HELD_SYNC, 0x02098) | ||
302 | EVENT(PM_VSU_SIMPLE_ISSUED, 0x0b894) | ||
303 | EVENT(PM_VSU1_SINGLE, 0x0a0aa) | ||
304 | EVENT(PM_DATA_TABLEWALK_CYC, 0x3001a) | ||
305 | EVENT(PM_L2_RC_ST_DONE, 0x36380) | ||
306 | EVENT(PM_MRK_PTEG_FROM_L21_MOD, 0x3d056) | ||
307 | EVENT(PM_LARX_LSU1, 0x0c096) | ||
308 | EVENT(PM_MRK_DATA_FROM_RMEM, 0x3d042) | ||
309 | EVENT(PM_DISP_CLB_HELD, 0x02090) | ||
310 | EVENT(PM_DERAT_MISS_4K, 0x1c05c) | ||
311 | EVENT(PM_L2_RCLD_DISP_FAIL_ADDR, 0x16282) | ||
312 | EVENT(PM_SEG_EXCEPTION, 0x028a4) | ||
313 | EVENT(PM_FLUSH_DISP_SB, 0x0208c) | ||
314 | EVENT(PM_L2_DC_INV, 0x26182) | ||
315 | EVENT(PM_PTEG_FROM_DL2L3_MOD, 0x4c054) | ||
316 | EVENT(PM_DSEG, 0x020a6) | ||
317 | EVENT(PM_BR_PRED_LSTACK, 0x040a2) | ||
318 | EVENT(PM_VSU0_STF, 0x0b08c) | ||
319 | EVENT(PM_LSU_FX_FIN, 0x10066) | ||
320 | EVENT(PM_DERAT_MISS_16M, 0x3c05c) | ||
321 | EVENT(PM_MRK_PTEG_FROM_DL2L3_MOD, 0x4d054) | ||
322 | EVENT(PM_GCT_UTIL_11_PLUS_SLOTS, 0x020a2) | ||
323 | EVENT(PM_INST_FROM_L3, 0x14048) | ||
324 | EVENT(PM_MRK_IFU_FIN, 0x3003a) | ||
325 | EVENT(PM_ITLB_MISS, 0x400fc) | ||
326 | EVENT(PM_VSU_STF, 0x0b88c) | ||
327 | EVENT(PM_LSU_FLUSH_UST, 0x0c8b4) | ||
328 | EVENT(PM_L2_LDST_MISS, 0x26880) | ||
329 | EVENT(PM_FXU1_FIN, 0x40004) | ||
330 | EVENT(PM_SHL_DEALLOCATED, 0x05080) | ||
331 | EVENT(PM_L2_SN_M_WR_DONE, 0x46382) | ||
332 | EVENT(PM_LSU_REJECT_SET_MPRED, 0x0c8a8) | ||
333 | EVENT(PM_L3_PREF_LD, 0x0d0ac) | ||
334 | EVENT(PM_L2_SN_M_RD_DONE, 0x46380) | ||
335 | EVENT(PM_MRK_DERAT_MISS_16G, 0x4d05c) | ||
336 | EVENT(PM_VSU_FCONV, 0x0a8b0) | ||
337 | EVENT(PM_ANY_THRD_RUN_CYC, 0x100fa) | ||
338 | EVENT(PM_LSU_LMQ_FULL_CYC, 0x0d0a4) | ||
339 | EVENT(PM_MRK_LSU_REJECT_LHS, 0x0d082) | ||
340 | EVENT(PM_MRK_LD_MISS_L1_CYC, 0x4003e) | ||
341 | EVENT(PM_MRK_DATA_FROM_L2_CYC, 0x20020) | ||
342 | EVENT(PM_INST_IMC_MATCH_DISP, 0x30016) | ||
343 | EVENT(PM_MRK_DATA_FROM_RMEM_CYC, 0x4002c) | ||
344 | EVENT(PM_VSU0_SIMPLE_ISSUED, 0x0b094) | ||
345 | EVENT(PM_CMPLU_STALL_DIV, 0x40014) | ||
346 | EVENT(PM_MRK_PTEG_FROM_RL2L3_SHR, 0x2d054) | ||
347 | EVENT(PM_VSU_FMA_DOUBLE, 0x0a890) | ||
348 | EVENT(PM_VSU_4FLOP, 0x0a89c) | ||
349 | EVENT(PM_VSU1_FIN, 0x0a0be) | ||
350 | EVENT(PM_NEST_PAIR1_AND, 0x20883) | ||
351 | EVENT(PM_INST_PTEG_FROM_RL2L3_MOD, 0x1e052) | ||
352 | EVENT(PM_RUN_CYC, 0x200f4) | ||
353 | EVENT(PM_PTEG_FROM_RMEM, 0x3c052) | ||
354 | EVENT(PM_LSU_LRQ_S0_VALID, 0x0d09e) | ||
355 | EVENT(PM_LSU0_LDF, 0x0c084) | ||
356 | EVENT(PM_FLUSH_COMPLETION, 0x30012) | ||
357 | EVENT(PM_ST_MISS_L1, 0x300f0) | ||
358 | EVENT(PM_L2_NODE_PUMP, 0x36480) | ||
359 | EVENT(PM_INST_FROM_DL2L3_SHR, 0x34044) | ||
360 | EVENT(PM_MRK_STALL_CMPLU_CYC, 0x3003e) | ||
361 | EVENT(PM_VSU1_DENORM, 0x0a0ae) | ||
362 | EVENT(PM_MRK_DATA_FROM_L31_SHR_CYC, 0x20026) | ||
363 | EVENT(PM_NEST_PAIR0_ADD, 0x10881) | ||
364 | EVENT(PM_INST_FROM_L3MISS, 0x24048) | ||
365 | EVENT(PM_EE_OFF_EXT_INT, 0x02080) | ||
366 | EVENT(PM_INST_PTEG_FROM_DMEM, 0x2e052) | ||
367 | EVENT(PM_INST_FROM_DL2L3_MOD, 0x3404c) | ||
368 | EVENT(PM_PMC6_OVERFLOW, 0x30024) | ||
369 | EVENT(PM_VSU_2FLOP_DOUBLE, 0x0a88c) | ||
370 | EVENT(PM_TLB_MISS, 0x20066) | ||
371 | EVENT(PM_FXU_BUSY, 0x2000e) | ||
372 | EVENT(PM_L2_RCLD_DISP_FAIL_OTHER, 0x26280) | ||
373 | EVENT(PM_LSU_REJECT_LMQ_FULL, 0x0c8a4) | ||
374 | EVENT(PM_IC_RELOAD_SHR, 0x04096) | ||
375 | EVENT(PM_GRP_MRK, 0x10031) | ||
376 | EVENT(PM_MRK_ST_NEST, 0x20034) | ||
377 | EVENT(PM_VSU1_FSQRT_FDIV, 0x0a08a) | ||
378 | EVENT(PM_LSU0_FLUSH_LRQ, 0x0c0b8) | ||
379 | EVENT(PM_LARX_LSU0, 0x0c094) | ||
380 | EVENT(PM_IBUF_FULL_CYC, 0x04084) | ||
381 | EVENT(PM_MRK_DATA_FROM_DL2L3_SHR_CYC, 0x2002a) | ||
382 | EVENT(PM_LSU_DC_PREF_STREAM_ALLOC, 0x0d8a8) | ||
383 | EVENT(PM_GRP_MRK_CYC, 0x10030) | ||
384 | EVENT(PM_MRK_DATA_FROM_RL2L3_SHR_CYC, 0x20028) | ||
385 | EVENT(PM_L2_GLOB_GUESS_CORRECT, 0x16482) | ||
386 | EVENT(PM_LSU_REJECT_LHS, 0x0c8ac) | ||
387 | EVENT(PM_MRK_DATA_FROM_LMEM, 0x3d04a) | ||
388 | EVENT(PM_INST_PTEG_FROM_L3, 0x2e050) | ||
389 | EVENT(PM_FREQ_DOWN, 0x3000c) | ||
390 | EVENT(PM_PB_RETRY_NODE_PUMP, 0x30081) | ||
391 | EVENT(PM_INST_FROM_RL2L3_SHR, 0x1404c) | ||
392 | EVENT(PM_MRK_INST_ISSUED, 0x10032) | ||
393 | EVENT(PM_PTEG_FROM_L3MISS, 0x2c058) | ||
394 | EVENT(PM_RUN_PURR, 0x400f4) | ||
395 | EVENT(PM_MRK_GRP_IC_MISS, 0x40038) | ||
396 | EVENT(PM_MRK_DATA_FROM_L3, 0x1d048) | ||
397 | EVENT(PM_CMPLU_STALL_DCACHE_MISS, 0x20016) | ||
398 | EVENT(PM_PTEG_FROM_RL2L3_SHR, 0x2c054) | ||
399 | EVENT(PM_LSU_FLUSH_LRQ, 0x0c8b8) | ||
400 | EVENT(PM_MRK_DERAT_MISS_64K, 0x2d05c) | ||
401 | EVENT(PM_INST_PTEG_FROM_DL2L3_MOD, 0x4e054) | ||
402 | EVENT(PM_L2_ST_MISS, 0x26082) | ||
403 | EVENT(PM_MRK_PTEG_FROM_L21_SHR, 0x4d056) | ||
404 | EVENT(PM_LWSYNC, 0x0d094) | ||
405 | EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM_STRIDE, 0x0d0bc) | ||
406 | EVENT(PM_MRK_LSU_FLUSH_LRQ, 0x0d088) | ||
407 | EVENT(PM_INST_IMC_MATCH_CMPL, 0x100f0) | ||
408 | EVENT(PM_NEST_PAIR3_AND, 0x40883) | ||
409 | EVENT(PM_PB_RETRY_SYS_PUMP, 0x40081) | ||
410 | EVENT(PM_MRK_INST_FIN, 0x30030) | ||
411 | EVENT(PM_MRK_PTEG_FROM_DL2L3_SHR, 0x3d054) | ||
412 | EVENT(PM_INST_FROM_L31_MOD, 0x14044) | ||
413 | EVENT(PM_MRK_DTLB_MISS_64K, 0x3d05e) | ||
414 | EVENT(PM_LSU_FIN, 0x30066) | ||
415 | EVENT(PM_MRK_LSU_REJECT, 0x40064) | ||
416 | EVENT(PM_L2_CO_FAIL_BUSY, 0x16382) | ||
417 | EVENT(PM_MEM0_WQ_DISP, 0x40083) | ||
418 | EVENT(PM_DATA_FROM_L31_MOD, 0x1c044) | ||
419 | EVENT(PM_THERMAL_WARN, 0x10016) | ||
420 | EVENT(PM_VSU0_4FLOP, 0x0a09c) | ||
421 | EVENT(PM_BR_MPRED_CCACHE, 0x040a4) | ||
422 | EVENT(PM_CMPLU_STALL_IFU, 0x4004c) | ||
423 | EVENT(PM_L1_DEMAND_WRITE, 0x0408c) | ||
424 | EVENT(PM_FLUSH_BR_MPRED, 0x02084) | ||
425 | EVENT(PM_MRK_DTLB_MISS_16G, 0x1d05e) | ||
426 | EVENT(PM_MRK_PTEG_FROM_DMEM, 0x2d052) | ||
427 | EVENT(PM_L2_RCST_DISP, 0x36280) | ||
428 | EVENT(PM_CMPLU_STALL, 0x4000a) | ||
429 | EVENT(PM_LSU_PARTIAL_CDF, 0x0c0aa) | ||
430 | EVENT(PM_DISP_CLB_HELD_SB, 0x020a8) | ||
431 | EVENT(PM_VSU0_FMA_DOUBLE, 0x0a090) | ||
432 | EVENT(PM_FXU0_BUSY_FXU1_IDLE, 0x3000e) | ||
433 | EVENT(PM_IC_DEMAND_CYC, 0x10018) | ||
434 | EVENT(PM_MRK_DATA_FROM_L21_SHR, 0x3d04e) | ||
435 | EVENT(PM_MRK_LSU_FLUSH_UST, 0x0d086) | ||
436 | EVENT(PM_INST_PTEG_FROM_L3MISS, 0x2e058) | ||
437 | EVENT(PM_VSU_DENORM, 0x0a8ac) | ||
438 | EVENT(PM_MRK_LSU_PARTIAL_CDF, 0x0d080) | ||
439 | EVENT(PM_INST_FROM_L21_SHR, 0x3404e) | ||
440 | EVENT(PM_IC_PREF_WRITE, 0x0408e) | ||
441 | EVENT(PM_BR_PRED, 0x0409c) | ||
442 | EVENT(PM_INST_FROM_DMEM, 0x1404a) | ||
443 | EVENT(PM_IC_PREF_CANCEL_ALL, 0x04890) | ||
444 | EVENT(PM_LSU_DC_PREF_STREAM_CONFIRM, 0x0d8b4) | ||
445 | EVENT(PM_MRK_LSU_FLUSH_SRQ, 0x0d08a) | ||
446 | EVENT(PM_MRK_FIN_STALL_CYC, 0x1003c) | ||
447 | EVENT(PM_L2_RCST_DISP_FAIL_OTHER, 0x46280) | ||
448 | EVENT(PM_VSU1_DD_ISSUED, 0x0b098) | ||
449 | EVENT(PM_PTEG_FROM_L31_SHR, 0x2c056) | ||
450 | EVENT(PM_DATA_FROM_L21_SHR, 0x3c04e) | ||
451 | EVENT(PM_LSU0_NCLD, 0x0c08c) | ||
452 | EVENT(PM_VSU1_4FLOP, 0x0a09e) | ||
453 | EVENT(PM_VSU1_8FLOP, 0x0a0a2) | ||
454 | EVENT(PM_VSU_8FLOP, 0x0a8a0) | ||
455 | EVENT(PM_LSU_LMQ_SRQ_EMPTY_CYC, 0x2003e) | ||
456 | EVENT(PM_DTLB_MISS_64K, 0x3c05e) | ||
457 | EVENT(PM_THRD_CONC_RUN_INST, 0x300f4) | ||
458 | EVENT(PM_MRK_PTEG_FROM_L2, 0x1d050) | ||
459 | EVENT(PM_PB_SYS_PUMP, 0x20081) | ||
460 | EVENT(PM_VSU_FIN, 0x0a8bc) | ||
461 | EVENT(PM_MRK_DATA_FROM_L31_MOD, 0x1d044) | ||
462 | EVENT(PM_THRD_PRIO_0_1_CYC, 0x040b0) | ||
463 | EVENT(PM_DERAT_MISS_64K, 0x2c05c) | ||
464 | EVENT(PM_PMC2_REWIND, 0x30020) | ||
465 | EVENT(PM_INST_FROM_L2, 0x14040) | ||
466 | EVENT(PM_GRP_BR_MPRED_NONSPEC, 0x1000a) | ||
467 | EVENT(PM_INST_DISP, 0x200f2) | ||
468 | EVENT(PM_MEM0_RD_CANCEL_TOTAL, 0x30083) | ||
469 | EVENT(PM_LSU0_DC_PREF_STREAM_CONFIRM, 0x0d0b4) | ||
470 | EVENT(PM_L1_DCACHE_RELOAD_VALID, 0x300f6) | ||
471 | EVENT(PM_VSU_SCALAR_DOUBLE_ISSUED, 0x0b888) | ||
472 | EVENT(PM_L3_PREF_HIT, 0x3f080) | ||
473 | EVENT(PM_MRK_PTEG_FROM_L31_MOD, 0x1d054) | ||
474 | EVENT(PM_CMPLU_STALL_STORE, 0x2004a) | ||
475 | EVENT(PM_MRK_FXU_FIN, 0x20038) | ||
476 | EVENT(PM_PMC4_OVERFLOW, 0x10010) | ||
477 | EVENT(PM_MRK_PTEG_FROM_L3, 0x2d050) | ||
478 | EVENT(PM_LSU0_LMQ_LHR_MERGE, 0x0d098) | ||
479 | EVENT(PM_BTAC_HIT, 0x0508a) | ||
480 | EVENT(PM_L3_RD_BUSY, 0x4f082) | ||
481 | EVENT(PM_LSU0_L1_SW_PREF, 0x0c09c) | ||
482 | EVENT(PM_INST_FROM_L2MISS, 0x44048) | ||
483 | EVENT(PM_LSU0_DC_PREF_STREAM_ALLOC, 0x0d0a8) | ||
484 | EVENT(PM_L2_ST, 0x16082) | ||
485 | EVENT(PM_VSU0_DENORM, 0x0a0ac) | ||
486 | EVENT(PM_MRK_DATA_FROM_DL2L3_SHR, 0x3d044) | ||
487 | EVENT(PM_BR_PRED_CR_TA, 0x048aa) | ||
488 | EVENT(PM_VSU0_FCONV, 0x0a0b0) | ||
489 | EVENT(PM_MRK_LSU_FLUSH_ULD, 0x0d084) | ||
490 | EVENT(PM_BTAC_MISS, 0x05088) | ||
491 | EVENT(PM_MRK_LD_MISS_EXPOSED_CYC_COUNT, 0x1003f) | ||
492 | EVENT(PM_MRK_DATA_FROM_L2, 0x1d040) | ||
493 | EVENT(PM_LSU_DCACHE_RELOAD_VALID, 0x0d0a2) | ||
494 | EVENT(PM_VSU_FMA, 0x0a884) | ||
495 | EVENT(PM_LSU0_FLUSH_SRQ, 0x0c0bc) | ||
496 | EVENT(PM_LSU1_L1_PREF, 0x0d0ba) | ||
497 | EVENT(PM_IOPS_CMPL, 0x10014) | ||
498 | EVENT(PM_L2_SYS_PUMP, 0x36482) | ||
499 | EVENT(PM_L2_RCLD_BUSY_RC_FULL, 0x46282) | ||
500 | EVENT(PM_LSU_LMQ_S0_ALLOC, 0x0d0a1) | ||
501 | EVENT(PM_FLUSH_DISP_SYNC, 0x02088) | ||
502 | EVENT(PM_MRK_DATA_FROM_DL2L3_MOD_CYC, 0x4002a) | ||
503 | EVENT(PM_L2_IC_INV, 0x26180) | ||
504 | EVENT(PM_MRK_DATA_FROM_L21_MOD_CYC, 0x40024) | ||
505 | EVENT(PM_L3_PREF_LDST, 0x0d8ac) | ||
506 | EVENT(PM_LSU_SRQ_EMPTY_CYC, 0x40008) | ||
507 | EVENT(PM_LSU_LMQ_S0_VALID, 0x0d0a0) | ||
508 | EVENT(PM_FLUSH_PARTIAL, 0x02086) | ||
509 | EVENT(PM_VSU1_FMA_DOUBLE, 0x0a092) | ||
510 | EVENT(PM_1PLUS_PPC_DISP, 0x400f2) | ||
511 | EVENT(PM_DATA_FROM_L2MISS, 0x200fe) | ||
512 | EVENT(PM_SUSPENDED, 0x00000) | ||
513 | EVENT(PM_VSU0_FMA, 0x0a084) | ||
514 | EVENT(PM_CMPLU_STALL_SCALAR, 0x40012) | ||
515 | EVENT(PM_STCX_FAIL, 0x0c09a) | ||
516 | EVENT(PM_VSU0_FSQRT_FDIV_DOUBLE, 0x0a094) | ||
517 | EVENT(PM_DC_PREF_DST, 0x0d0b0) | ||
518 | EVENT(PM_VSU1_SCAL_SINGLE_ISSUED, 0x0b086) | ||
519 | EVENT(PM_L3_HIT, 0x1f080) | ||
520 | EVENT(PM_L2_GLOB_GUESS_WRONG, 0x26482) | ||
521 | EVENT(PM_MRK_DFU_FIN, 0x20032) | ||
522 | EVENT(PM_INST_FROM_L1, 0x04080) | ||
523 | EVENT(PM_BRU_FIN, 0x10068) | ||
524 | EVENT(PM_IC_DEMAND_REQ, 0x04088) | ||
525 | EVENT(PM_VSU1_FSQRT_FDIV_DOUBLE, 0x0a096) | ||
526 | EVENT(PM_VSU1_FMA, 0x0a086) | ||
527 | EVENT(PM_MRK_LD_MISS_L1, 0x20036) | ||
528 | EVENT(PM_VSU0_2FLOP_DOUBLE, 0x0a08c) | ||
529 | EVENT(PM_LSU_DC_PREF_STRIDED_STREAM_CONFIRM, 0x0d8bc) | ||
530 | EVENT(PM_INST_PTEG_FROM_L31_SHR, 0x2e056) | ||
531 | EVENT(PM_MRK_LSU_REJECT_ERAT_MISS, 0x30064) | ||
532 | EVENT(PM_MRK_DATA_FROM_L2MISS, 0x4d048) | ||
533 | EVENT(PM_DATA_FROM_RL2L3_SHR, 0x1c04c) | ||
534 | EVENT(PM_INST_FROM_PREF, 0x14046) | ||
535 | EVENT(PM_VSU1_SQ, 0x0b09e) | ||
536 | EVENT(PM_L2_LD_DISP, 0x36180) | ||
537 | EVENT(PM_L2_DISP_ALL, 0x46080) | ||
538 | EVENT(PM_THRD_GRP_CMPL_BOTH_CYC, 0x10012) | ||
539 | EVENT(PM_VSU_FSQRT_FDIV_DOUBLE, 0x0a894) | ||
540 | EVENT(PM_BR_MPRED, 0x400f6) | ||
541 | EVENT(PM_INST_PTEG_FROM_DL2L3_SHR, 0x3e054) | ||
542 | EVENT(PM_VSU_1FLOP, 0x0a880) | ||
543 | EVENT(PM_HV_CYC, 0x2000a) | ||
544 | EVENT(PM_MRK_LSU_FIN, 0x40032) | ||
545 | EVENT(PM_MRK_DATA_FROM_RL2L3_SHR, 0x1d04c) | ||
546 | EVENT(PM_DTLB_MISS_16M, 0x4c05e) | ||
547 | EVENT(PM_LSU1_LMQ_LHR_MERGE, 0x0d09a) | ||
548 | EVENT(PM_IFU_FIN, 0x40066) | ||
diff --git a/arch/powerpc/perf/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c index d1821b8bbc4c..56c67bca2f75 100644 --- a/arch/powerpc/perf/power7-pmu.c +++ b/arch/powerpc/perf/power7-pmu.c | |||
@@ -53,37 +53,13 @@ | |||
53 | /* | 53 | /* |
54 | * Power7 event codes. | 54 | * Power7 event codes. |
55 | */ | 55 | */ |
56 | #define PME_PM_CYC 0x1e | 56 | #define EVENT(_name, _code) \ |
57 | #define PME_PM_GCT_NOSLOT_CYC 0x100f8 | 57 | PME_##_name = _code, |
58 | #define PME_PM_CMPLU_STALL 0x4000a | 58 | |
59 | #define PME_PM_INST_CMPL 0x2 | 59 | enum { |
60 | #define PME_PM_LD_REF_L1 0xc880 | 60 | #include "power7-events-list.h" |
61 | #define PME_PM_LD_MISS_L1 0x400f0 | 61 | }; |
62 | #define PME_PM_BRU_FIN 0x10068 | 62 | #undef EVENT |
63 | #define PME_PM_BR_MPRED 0x400f6 | ||
64 | |||
65 | #define PME_PM_CMPLU_STALL_FXU 0x20014 | ||
66 | #define PME_PM_CMPLU_STALL_DIV 0x40014 | ||
67 | #define PME_PM_CMPLU_STALL_SCALAR 0x40012 | ||
68 | #define PME_PM_CMPLU_STALL_SCALAR_LONG 0x20018 | ||
69 | #define PME_PM_CMPLU_STALL_VECTOR 0x2001c | ||
70 | #define PME_PM_CMPLU_STALL_VECTOR_LONG 0x4004a | ||
71 | #define PME_PM_CMPLU_STALL_LSU 0x20012 | ||
72 | #define PME_PM_CMPLU_STALL_REJECT 0x40016 | ||
73 | #define PME_PM_CMPLU_STALL_ERAT_MISS 0x40018 | ||
74 | #define PME_PM_CMPLU_STALL_DCACHE_MISS 0x20016 | ||
75 | #define PME_PM_CMPLU_STALL_STORE 0x2004a | ||
76 | #define PME_PM_CMPLU_STALL_THRD 0x1001c | ||
77 | #define PME_PM_CMPLU_STALL_IFU 0x4004c | ||
78 | #define PME_PM_CMPLU_STALL_BRU 0x4004e | ||
79 | #define PME_PM_GCT_NOSLOT_IC_MISS 0x2001a | ||
80 | #define PME_PM_GCT_NOSLOT_BR_MPRED 0x4001a | ||
81 | #define PME_PM_GCT_NOSLOT_BR_MPRED_IC_MISS 0x4001c | ||
82 | #define PME_PM_GRP_CMPL 0x30004 | ||
83 | #define PME_PM_1PLUS_PPC_CMPL 0x100f2 | ||
84 | #define PME_PM_CMPLU_STALL_DFU 0x2003c | ||
85 | #define PME_PM_RUN_CYC 0x200f4 | ||
86 | #define PME_PM_RUN_INST_CMPL 0x400fa | ||
87 | 63 | ||
88 | /* | 64 | /* |
89 | * Layout of constraint bits: | 65 | * Layout of constraint bits: |
@@ -398,96 +374,36 @@ static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = { | |||
398 | }; | 374 | }; |
399 | 375 | ||
400 | 376 | ||
401 | GENERIC_EVENT_ATTR(cpu-cycles, CYC); | 377 | GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC); |
402 | GENERIC_EVENT_ATTR(stalled-cycles-frontend, GCT_NOSLOT_CYC); | 378 | GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC); |
403 | GENERIC_EVENT_ATTR(stalled-cycles-backend, CMPLU_STALL); | 379 | GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL); |
404 | GENERIC_EVENT_ATTR(instructions, INST_CMPL); | 380 | GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL); |
405 | GENERIC_EVENT_ATTR(cache-references, LD_REF_L1); | 381 | GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1); |
406 | GENERIC_EVENT_ATTR(cache-misses, LD_MISS_L1); | 382 | GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1); |
407 | GENERIC_EVENT_ATTR(branch-instructions, BRU_FIN); | 383 | GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN); |
408 | GENERIC_EVENT_ATTR(branch-misses, BR_MPRED); | 384 | GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED); |
409 | 385 | ||
410 | POWER_EVENT_ATTR(CYC, CYC); | 386 | #define EVENT(_name, _code) POWER_EVENT_ATTR(_name, _name); |
411 | POWER_EVENT_ATTR(GCT_NOSLOT_CYC, GCT_NOSLOT_CYC); | 387 | #include "power7-events-list.h" |
412 | POWER_EVENT_ATTR(CMPLU_STALL, CMPLU_STALL); | 388 | #undef EVENT |
413 | POWER_EVENT_ATTR(INST_CMPL, INST_CMPL); | 389 | |
414 | POWER_EVENT_ATTR(LD_REF_L1, LD_REF_L1); | 390 | #define EVENT(_name, _code) POWER_EVENT_PTR(_name), |
415 | POWER_EVENT_ATTR(LD_MISS_L1, LD_MISS_L1); | ||
416 | POWER_EVENT_ATTR(BRU_FIN, BRU_FIN) | ||
417 | POWER_EVENT_ATTR(BR_MPRED, BR_MPRED); | ||
418 | |||
419 | POWER_EVENT_ATTR(CMPLU_STALL_FXU, CMPLU_STALL_FXU); | ||
420 | POWER_EVENT_ATTR(CMPLU_STALL_DIV, CMPLU_STALL_DIV); | ||
421 | POWER_EVENT_ATTR(CMPLU_STALL_SCALAR, CMPLU_STALL_SCALAR); | ||
422 | POWER_EVENT_ATTR(CMPLU_STALL_SCALAR_LONG, CMPLU_STALL_SCALAR_LONG); | ||
423 | POWER_EVENT_ATTR(CMPLU_STALL_VECTOR, CMPLU_STALL_VECTOR); | ||
424 | POWER_EVENT_ATTR(CMPLU_STALL_VECTOR_LONG, CMPLU_STALL_VECTOR_LONG); | ||
425 | POWER_EVENT_ATTR(CMPLU_STALL_LSU, CMPLU_STALL_LSU); | ||
426 | POWER_EVENT_ATTR(CMPLU_STALL_REJECT, CMPLU_STALL_REJECT); | ||
427 | |||
428 | POWER_EVENT_ATTR(CMPLU_STALL_ERAT_MISS, CMPLU_STALL_ERAT_MISS); | ||
429 | POWER_EVENT_ATTR(CMPLU_STALL_DCACHE_MISS, CMPLU_STALL_DCACHE_MISS); | ||
430 | POWER_EVENT_ATTR(CMPLU_STALL_STORE, CMPLU_STALL_STORE); | ||
431 | POWER_EVENT_ATTR(CMPLU_STALL_THRD, CMPLU_STALL_THRD); | ||
432 | POWER_EVENT_ATTR(CMPLU_STALL_IFU, CMPLU_STALL_IFU); | ||
433 | POWER_EVENT_ATTR(CMPLU_STALL_BRU, CMPLU_STALL_BRU); | ||
434 | POWER_EVENT_ATTR(GCT_NOSLOT_IC_MISS, GCT_NOSLOT_IC_MISS); | ||
435 | |||
436 | POWER_EVENT_ATTR(GCT_NOSLOT_BR_MPRED, GCT_NOSLOT_BR_MPRED); | ||
437 | POWER_EVENT_ATTR(GCT_NOSLOT_BR_MPRED_IC_MISS, GCT_NOSLOT_BR_MPRED_IC_MISS); | ||
438 | POWER_EVENT_ATTR(GRP_CMPL, GRP_CMPL); | ||
439 | POWER_EVENT_ATTR(1PLUS_PPC_CMPL, 1PLUS_PPC_CMPL); | ||
440 | POWER_EVENT_ATTR(CMPLU_STALL_DFU, CMPLU_STALL_DFU); | ||
441 | POWER_EVENT_ATTR(RUN_CYC, RUN_CYC); | ||
442 | POWER_EVENT_ATTR(RUN_INST_CMPL, RUN_INST_CMPL); | ||
443 | 391 | ||
444 | static struct attribute *power7_events_attr[] = { | 392 | static struct attribute *power7_events_attr[] = { |
445 | GENERIC_EVENT_PTR(CYC), | 393 | GENERIC_EVENT_PTR(PM_CYC), |
446 | GENERIC_EVENT_PTR(GCT_NOSLOT_CYC), | 394 | GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC), |
447 | GENERIC_EVENT_PTR(CMPLU_STALL), | 395 | GENERIC_EVENT_PTR(PM_CMPLU_STALL), |
448 | GENERIC_EVENT_PTR(INST_CMPL), | 396 | GENERIC_EVENT_PTR(PM_INST_CMPL), |
449 | GENERIC_EVENT_PTR(LD_REF_L1), | 397 | GENERIC_EVENT_PTR(PM_LD_REF_L1), |
450 | GENERIC_EVENT_PTR(LD_MISS_L1), | 398 | GENERIC_EVENT_PTR(PM_LD_MISS_L1), |
451 | GENERIC_EVENT_PTR(BRU_FIN), | 399 | GENERIC_EVENT_PTR(PM_BRU_FIN), |
452 | GENERIC_EVENT_PTR(BR_MPRED), | 400 | GENERIC_EVENT_PTR(PM_BR_MPRED), |
453 | 401 | ||
454 | POWER_EVENT_PTR(CYC), | 402 | #include "power7-events-list.h" |
455 | POWER_EVENT_PTR(GCT_NOSLOT_CYC), | 403 | #undef EVENT |
456 | POWER_EVENT_PTR(CMPLU_STALL), | ||
457 | POWER_EVENT_PTR(INST_CMPL), | ||
458 | POWER_EVENT_PTR(LD_REF_L1), | ||
459 | POWER_EVENT_PTR(LD_MISS_L1), | ||
460 | POWER_EVENT_PTR(BRU_FIN), | ||
461 | POWER_EVENT_PTR(BR_MPRED), | ||
462 | |||
463 | POWER_EVENT_PTR(CMPLU_STALL_FXU), | ||
464 | POWER_EVENT_PTR(CMPLU_STALL_DIV), | ||
465 | POWER_EVENT_PTR(CMPLU_STALL_SCALAR), | ||
466 | POWER_EVENT_PTR(CMPLU_STALL_SCALAR_LONG), | ||
467 | POWER_EVENT_PTR(CMPLU_STALL_VECTOR), | ||
468 | POWER_EVENT_PTR(CMPLU_STALL_VECTOR_LONG), | ||
469 | POWER_EVENT_PTR(CMPLU_STALL_LSU), | ||
470 | POWER_EVENT_PTR(CMPLU_STALL_REJECT), | ||
471 | |||
472 | POWER_EVENT_PTR(CMPLU_STALL_ERAT_MISS), | ||
473 | POWER_EVENT_PTR(CMPLU_STALL_DCACHE_MISS), | ||
474 | POWER_EVENT_PTR(CMPLU_STALL_STORE), | ||
475 | POWER_EVENT_PTR(CMPLU_STALL_THRD), | ||
476 | POWER_EVENT_PTR(CMPLU_STALL_IFU), | ||
477 | POWER_EVENT_PTR(CMPLU_STALL_BRU), | ||
478 | POWER_EVENT_PTR(GCT_NOSLOT_IC_MISS), | ||
479 | POWER_EVENT_PTR(GCT_NOSLOT_BR_MPRED), | ||
480 | |||
481 | POWER_EVENT_PTR(GCT_NOSLOT_BR_MPRED_IC_MISS), | ||
482 | POWER_EVENT_PTR(GRP_CMPL), | ||
483 | POWER_EVENT_PTR(1PLUS_PPC_CMPL), | ||
484 | POWER_EVENT_PTR(CMPLU_STALL_DFU), | ||
485 | POWER_EVENT_PTR(RUN_CYC), | ||
486 | POWER_EVENT_PTR(RUN_INST_CMPL), | ||
487 | NULL | 404 | NULL |
488 | }; | 405 | }; |
489 | 406 | ||
490 | |||
491 | static struct attribute_group power7_pmu_events_group = { | 407 | static struct attribute_group power7_pmu_events_group = { |
492 | .name = "events", | 408 | .name = "events", |
493 | .attrs = power7_events_attr, | 409 | .attrs = power7_events_attr, |
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 9f8671a44551..d276cd3edd8f 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c | |||
@@ -539,65 +539,6 @@ static int zip_oops(size_t text_len) | |||
539 | } | 539 | } |
540 | 540 | ||
541 | #ifdef CONFIG_PSTORE | 541 | #ifdef CONFIG_PSTORE |
542 | /* Derived from logfs_uncompress */ | ||
543 | int nvram_decompress(void *in, void *out, size_t inlen, size_t outlen) | ||
544 | { | ||
545 | int err, ret; | ||
546 | |||
547 | ret = -EIO; | ||
548 | err = zlib_inflateInit(&stream); | ||
549 | if (err != Z_OK) | ||
550 | goto error; | ||
551 | |||
552 | stream.next_in = in; | ||
553 | stream.avail_in = inlen; | ||
554 | stream.total_in = 0; | ||
555 | stream.next_out = out; | ||
556 | stream.avail_out = outlen; | ||
557 | stream.total_out = 0; | ||
558 | |||
559 | err = zlib_inflate(&stream, Z_FINISH); | ||
560 | if (err != Z_STREAM_END) | ||
561 | goto error; | ||
562 | |||
563 | err = zlib_inflateEnd(&stream); | ||
564 | if (err != Z_OK) | ||
565 | goto error; | ||
566 | |||
567 | ret = stream.total_out; | ||
568 | error: | ||
569 | return ret; | ||
570 | } | ||
571 | |||
572 | static int unzip_oops(char *oops_buf, char *big_buf) | ||
573 | { | ||
574 | struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf; | ||
575 | u64 timestamp = oops_hdr->timestamp; | ||
576 | char *big_oops_data = NULL; | ||
577 | char *oops_data_buf = NULL; | ||
578 | size_t big_oops_data_sz; | ||
579 | int unzipped_len; | ||
580 | |||
581 | big_oops_data = big_buf + sizeof(struct oops_log_info); | ||
582 | big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info); | ||
583 | oops_data_buf = oops_buf + sizeof(struct oops_log_info); | ||
584 | |||
585 | unzipped_len = nvram_decompress(oops_data_buf, big_oops_data, | ||
586 | oops_hdr->report_length, | ||
587 | big_oops_data_sz); | ||
588 | |||
589 | if (unzipped_len < 0) { | ||
590 | pr_err("nvram: decompression failed; returned %d\n", | ||
591 | unzipped_len); | ||
592 | return -1; | ||
593 | } | ||
594 | oops_hdr = (struct oops_log_info *)big_buf; | ||
595 | oops_hdr->version = OOPS_HDR_VERSION; | ||
596 | oops_hdr->report_length = (u16) unzipped_len; | ||
597 | oops_hdr->timestamp = timestamp; | ||
598 | return 0; | ||
599 | } | ||
600 | |||
601 | static int nvram_pstore_open(struct pstore_info *psi) | 542 | static int nvram_pstore_open(struct pstore_info *psi) |
602 | { | 543 | { |
603 | /* Reset the iterator to start reading partitions again */ | 544 | /* Reset the iterator to start reading partitions again */ |
@@ -613,7 +554,7 @@ static int nvram_pstore_open(struct pstore_info *psi) | |||
613 | * @part: pstore writes data to registered buffer in parts, | 554 | * @part: pstore writes data to registered buffer in parts, |
614 | * part number will indicate the same. | 555 | * part number will indicate the same. |
615 | * @count: Indicates oops count | 556 | * @count: Indicates oops count |
616 | * @hsize: Size of header added by pstore | 557 | * @compressed: Flag to indicate the log is compressed |
617 | * @size: number of bytes written to the registered buffer | 558 | * @size: number of bytes written to the registered buffer |
618 | * @psi: registered pstore_info structure | 559 | * @psi: registered pstore_info structure |
619 | * | 560 | * |
@@ -624,7 +565,7 @@ static int nvram_pstore_open(struct pstore_info *psi) | |||
624 | static int nvram_pstore_write(enum pstore_type_id type, | 565 | static int nvram_pstore_write(enum pstore_type_id type, |
625 | enum kmsg_dump_reason reason, | 566 | enum kmsg_dump_reason reason, |
626 | u64 *id, unsigned int part, int count, | 567 | u64 *id, unsigned int part, int count, |
627 | size_t hsize, size_t size, | 568 | bool compressed, size_t size, |
628 | struct pstore_info *psi) | 569 | struct pstore_info *psi) |
629 | { | 570 | { |
630 | int rc; | 571 | int rc; |
@@ -640,30 +581,11 @@ static int nvram_pstore_write(enum pstore_type_id type, | |||
640 | oops_hdr->report_length = (u16) size; | 581 | oops_hdr->report_length = (u16) size; |
641 | oops_hdr->timestamp = get_seconds(); | 582 | oops_hdr->timestamp = get_seconds(); |
642 | 583 | ||
643 | if (big_oops_buf) { | 584 | if (compressed) |
644 | rc = zip_oops(size); | 585 | err_type = ERR_TYPE_KERNEL_PANIC_GZ; |
645 | /* | ||
646 | * If compression fails copy recent log messages from | ||
647 | * big_oops_buf to oops_data. | ||
648 | */ | ||
649 | if (rc != 0) { | ||
650 | size_t diff = size - oops_data_sz + hsize; | ||
651 | |||
652 | if (size > oops_data_sz) { | ||
653 | memcpy(oops_data, big_oops_buf, hsize); | ||
654 | memcpy(oops_data + hsize, big_oops_buf + diff, | ||
655 | oops_data_sz - hsize); | ||
656 | |||
657 | oops_hdr->report_length = (u16) oops_data_sz; | ||
658 | } else | ||
659 | memcpy(oops_data, big_oops_buf, size); | ||
660 | } else | ||
661 | err_type = ERR_TYPE_KERNEL_PANIC_GZ; | ||
662 | } | ||
663 | 586 | ||
664 | rc = nvram_write_os_partition(&oops_log_partition, oops_buf, | 587 | rc = nvram_write_os_partition(&oops_log_partition, oops_buf, |
665 | (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type, | 588 | (int) (sizeof(*oops_hdr) + size), err_type, count); |
666 | count); | ||
667 | 589 | ||
668 | if (rc != 0) | 590 | if (rc != 0) |
669 | return rc; | 591 | return rc; |
@@ -679,16 +601,15 @@ static int nvram_pstore_write(enum pstore_type_id type, | |||
679 | */ | 601 | */ |
680 | static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, | 602 | static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, |
681 | int *count, struct timespec *time, char **buf, | 603 | int *count, struct timespec *time, char **buf, |
682 | struct pstore_info *psi) | 604 | bool *compressed, struct pstore_info *psi) |
683 | { | 605 | { |
684 | struct oops_log_info *oops_hdr; | 606 | struct oops_log_info *oops_hdr; |
685 | unsigned int err_type, id_no, size = 0; | 607 | unsigned int err_type, id_no, size = 0; |
686 | struct nvram_os_partition *part = NULL; | 608 | struct nvram_os_partition *part = NULL; |
687 | char *buff = NULL, *big_buff = NULL; | 609 | char *buff = NULL; |
688 | int rc, sig = 0; | 610 | int sig = 0; |
689 | loff_t p; | 611 | loff_t p; |
690 | 612 | ||
691 | read_partition: | ||
692 | read_type++; | 613 | read_type++; |
693 | 614 | ||
694 | switch (nvram_type_ids[read_type]) { | 615 | switch (nvram_type_ids[read_type]) { |
@@ -749,30 +670,32 @@ read_partition: | |||
749 | *id = id_no; | 670 | *id = id_no; |
750 | 671 | ||
751 | if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) { | 672 | if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) { |
752 | oops_hdr = (struct oops_log_info *)buff; | 673 | size_t length, hdr_size; |
753 | *buf = buff + sizeof(*oops_hdr); | ||
754 | |||
755 | if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) { | ||
756 | big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL); | ||
757 | if (!big_buff) | ||
758 | return -ENOMEM; | ||
759 | |||
760 | rc = unzip_oops(buff, big_buff); | ||
761 | 674 | ||
762 | if (rc != 0) { | 675 | oops_hdr = (struct oops_log_info *)buff; |
763 | kfree(buff); | 676 | if (oops_hdr->version < OOPS_HDR_VERSION) { |
764 | kfree(big_buff); | 677 | /* Old format oops header had 2-byte record size */ |
765 | goto read_partition; | 678 | hdr_size = sizeof(u16); |
766 | } | 679 | length = oops_hdr->version; |
767 | 680 | time->tv_sec = 0; | |
768 | oops_hdr = (struct oops_log_info *)big_buff; | 681 | time->tv_nsec = 0; |
769 | *buf = big_buff + sizeof(*oops_hdr); | 682 | } else { |
770 | kfree(buff); | 683 | hdr_size = sizeof(*oops_hdr); |
684 | length = oops_hdr->report_length; | ||
685 | time->tv_sec = oops_hdr->timestamp; | ||
686 | time->tv_nsec = 0; | ||
771 | } | 687 | } |
688 | *buf = kmalloc(length, GFP_KERNEL); | ||
689 | if (*buf == NULL) | ||
690 | return -ENOMEM; | ||
691 | memcpy(*buf, buff + hdr_size, length); | ||
692 | kfree(buff); | ||
772 | 693 | ||
773 | time->tv_sec = oops_hdr->timestamp; | 694 | if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) |
774 | time->tv_nsec = 0; | 695 | *compressed = true; |
775 | return oops_hdr->report_length; | 696 | else |
697 | *compressed = false; | ||
698 | return length; | ||
776 | } | 699 | } |
777 | 700 | ||
778 | *buf = buff; | 701 | *buf = buff; |
@@ -791,13 +714,8 @@ static int nvram_pstore_init(void) | |||
791 | { | 714 | { |
792 | int rc = 0; | 715 | int rc = 0; |
793 | 716 | ||
794 | if (big_oops_buf) { | 717 | nvram_pstore_info.buf = oops_data; |
795 | nvram_pstore_info.buf = big_oops_buf; | 718 | nvram_pstore_info.bufsize = oops_data_sz; |
796 | nvram_pstore_info.bufsize = big_oops_buf_sz; | ||
797 | } else { | ||
798 | nvram_pstore_info.buf = oops_data; | ||
799 | nvram_pstore_info.bufsize = oops_data_sz; | ||
800 | } | ||
801 | 719 | ||
802 | rc = pstore_register(&nvram_pstore_info); | 720 | rc = pstore_register(&nvram_pstore_info); |
803 | if (rc != 0) | 721 | if (rc != 0) |
@@ -836,6 +754,11 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists) | |||
836 | oops_data = oops_buf + sizeof(struct oops_log_info); | 754 | oops_data = oops_buf + sizeof(struct oops_log_info); |
837 | oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info); | 755 | oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info); |
838 | 756 | ||
757 | rc = nvram_pstore_init(); | ||
758 | |||
759 | if (!rc) | ||
760 | return; | ||
761 | |||
839 | /* | 762 | /* |
840 | * Figure compression (preceded by elimination of each line's <n> | 763 | * Figure compression (preceded by elimination of each line's <n> |
841 | * severity prefix) will reduce the oops/panic report to at most | 764 | * severity prefix) will reduce the oops/panic report to at most |
@@ -844,8 +767,8 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists) | |||
844 | big_oops_buf_sz = (oops_data_sz * 100) / 45; | 767 | big_oops_buf_sz = (oops_data_sz * 100) / 45; |
845 | big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); | 768 | big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); |
846 | if (big_oops_buf) { | 769 | if (big_oops_buf) { |
847 | stream.workspace = kmalloc(zlib_deflate_workspacesize( | 770 | stream.workspace = kmalloc(zlib_deflate_workspacesize( |
848 | WINDOW_BITS, MEM_LEVEL), GFP_KERNEL); | 771 | WINDOW_BITS, MEM_LEVEL), GFP_KERNEL); |
849 | if (!stream.workspace) { | 772 | if (!stream.workspace) { |
850 | pr_err("nvram: No memory for compression workspace; " | 773 | pr_err("nvram: No memory for compression workspace; " |
851 | "skipping compression of %s partition data\n", | 774 | "skipping compression of %s partition data\n", |
@@ -859,11 +782,6 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists) | |||
859 | stream.workspace = NULL; | 782 | stream.workspace = NULL; |
860 | } | 783 | } |
861 | 784 | ||
862 | rc = nvram_pstore_init(); | ||
863 | |||
864 | if (!rc) | ||
865 | return; | ||
866 | |||
867 | rc = kmsg_dump_register(&nvram_kmsg_dumper); | 785 | rc = kmsg_dump_register(&nvram_kmsg_dumper); |
868 | if (rc != 0) { | 786 | if (rc != 0) { |
869 | pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc); | 787 | pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc); |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 22f75b504f7f..8b7892bf6d8b 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -116,8 +116,10 @@ config S390 | |||
116 | select HAVE_FUNCTION_GRAPH_TRACER | 116 | select HAVE_FUNCTION_GRAPH_TRACER |
117 | select HAVE_FUNCTION_TRACER | 117 | select HAVE_FUNCTION_TRACER |
118 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 118 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
119 | select HAVE_GENERIC_HARDIRQS | ||
119 | select HAVE_KERNEL_BZIP2 | 120 | select HAVE_KERNEL_BZIP2 |
120 | select HAVE_KERNEL_GZIP | 121 | select HAVE_KERNEL_GZIP |
122 | select HAVE_KERNEL_LZ4 | ||
121 | select HAVE_KERNEL_LZMA | 123 | select HAVE_KERNEL_LZMA |
122 | select HAVE_KERNEL_LZO | 124 | select HAVE_KERNEL_LZO |
123 | select HAVE_KERNEL_XZ | 125 | select HAVE_KERNEL_XZ |
@@ -227,11 +229,12 @@ config MARCH_Z196 | |||
227 | not work on older machines. | 229 | not work on older machines. |
228 | 230 | ||
229 | config MARCH_ZEC12 | 231 | config MARCH_ZEC12 |
230 | bool "IBM zEC12" | 232 | bool "IBM zBC12 and zEC12" |
231 | select HAVE_MARCH_ZEC12_FEATURES if 64BIT | 233 | select HAVE_MARCH_ZEC12_FEATURES if 64BIT |
232 | help | 234 | help |
233 | Select this to enable optimizations for IBM zEC12 (2827 series). The | 235 | Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and |
234 | kernel will be slightly faster but will not work on older machines. | 236 | 2827 series). The kernel will be slightly faster but will not work on |
237 | older machines. | ||
235 | 238 | ||
236 | endchoice | 239 | endchoice |
237 | 240 | ||
@@ -443,6 +446,16 @@ config PCI_NR_FUNCTIONS | |||
443 | This allows you to specify the maximum number of PCI functions which | 446 | This allows you to specify the maximum number of PCI functions which |
444 | this kernel will support. | 447 | this kernel will support. |
445 | 448 | ||
449 | config PCI_NR_MSI | ||
450 | int "Maximum number of MSI interrupts (64-32768)" | ||
451 | range 64 32768 | ||
452 | default "256" | ||
453 | help | ||
454 | This defines the number of virtual interrupts the kernel will | ||
455 | provide for MSI interrupts. If you configure your system to have | ||
456 | too few drivers will fail to allocate MSI interrupts for all | ||
457 | PCI devices. | ||
458 | |||
446 | source "drivers/pci/Kconfig" | 459 | source "drivers/pci/Kconfig" |
447 | source "drivers/pci/pcie/Kconfig" | 460 | source "drivers/pci/pcie/Kconfig" |
448 | source "drivers/pci/hotplug/Kconfig" | 461 | source "drivers/pci/hotplug/Kconfig" |
@@ -709,6 +722,7 @@ config S390_GUEST | |||
709 | def_bool y | 722 | def_bool y |
710 | prompt "s390 support for virtio devices" | 723 | prompt "s390 support for virtio devices" |
711 | depends on 64BIT | 724 | depends on 64BIT |
725 | select TTY | ||
712 | select VIRTUALIZATION | 726 | select VIRTUALIZATION |
713 | select VIRTIO | 727 | select VIRTIO |
714 | select VIRTIO_CONSOLE | 728 | select VIRTIO_CONSOLE |
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index 3ad8f61c9985..866ecbe670e4 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile | |||
@@ -6,9 +6,9 @@ | |||
6 | 6 | ||
7 | BITS := $(if $(CONFIG_64BIT),64,31) | 7 | BITS := $(if $(CONFIG_64BIT),64,31) |
8 | 8 | ||
9 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \ | 9 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 |
10 | vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o \ | 10 | targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 |
11 | sizes.h head$(BITS).o | 11 | targets += misc.o piggy.o sizes.h head$(BITS).o |
12 | 12 | ||
13 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 | 13 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 |
14 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING | 14 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING |
@@ -48,6 +48,7 @@ vmlinux.bin.all-y := $(obj)/vmlinux.bin | |||
48 | 48 | ||
49 | suffix-$(CONFIG_KERNEL_GZIP) := gz | 49 | suffix-$(CONFIG_KERNEL_GZIP) := gz |
50 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 | 50 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 |
51 | suffix-$(CONFIG_KERNEL_LZ4) := lz4 | ||
51 | suffix-$(CONFIG_KERNEL_LZMA) := lzma | 52 | suffix-$(CONFIG_KERNEL_LZMA) := lzma |
52 | suffix-$(CONFIG_KERNEL_LZO) := lzo | 53 | suffix-$(CONFIG_KERNEL_LZO) := lzo |
53 | suffix-$(CONFIG_KERNEL_XZ) := xz | 54 | suffix-$(CONFIG_KERNEL_XZ) := xz |
@@ -56,6 +57,8 @@ $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) | |||
56 | $(call if_changed,gzip) | 57 | $(call if_changed,gzip) |
57 | $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) | 58 | $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) |
58 | $(call if_changed,bzip2) | 59 | $(call if_changed,bzip2) |
60 | $(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) | ||
61 | $(call if_changed,lz4) | ||
59 | $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) | 62 | $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) |
60 | $(call if_changed,lzma) | 63 | $(call if_changed,lzma) |
61 | $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) | 64 | $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) |
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index c4c6a1cf221b..57cbaff1f397 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c | |||
@@ -47,6 +47,10 @@ static unsigned long free_mem_end_ptr; | |||
47 | #include "../../../../lib/decompress_bunzip2.c" | 47 | #include "../../../../lib/decompress_bunzip2.c" |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #ifdef CONFIG_KERNEL_LZ4 | ||
51 | #include "../../../../lib/decompress_unlz4.c" | ||
52 | #endif | ||
53 | |||
50 | #ifdef CONFIG_KERNEL_LZMA | 54 | #ifdef CONFIG_KERNEL_LZMA |
51 | #include "../../../../lib/decompress_unlzma.c" | 55 | #include "../../../../lib/decompress_unlzma.c" |
52 | #endif | 56 | #endif |
diff --git a/arch/s390/include/asm/airq.h b/arch/s390/include/asm/airq.h index 4066cee0c2d2..4bbb5957ed1b 100644 --- a/arch/s390/include/asm/airq.h +++ b/arch/s390/include/asm/airq.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #ifndef _ASM_S390_AIRQ_H | 9 | #ifndef _ASM_S390_AIRQ_H |
10 | #define _ASM_S390_AIRQ_H | 10 | #define _ASM_S390_AIRQ_H |
11 | 11 | ||
12 | #include <linux/bit_spinlock.h> | ||
13 | |||
12 | struct airq_struct { | 14 | struct airq_struct { |
13 | struct hlist_node list; /* Handler queueing. */ | 15 | struct hlist_node list; /* Handler queueing. */ |
14 | void (*handler)(struct airq_struct *); /* Thin-interrupt handler */ | 16 | void (*handler)(struct airq_struct *); /* Thin-interrupt handler */ |
@@ -23,4 +25,69 @@ struct airq_struct { | |||
23 | int register_adapter_interrupt(struct airq_struct *airq); | 25 | int register_adapter_interrupt(struct airq_struct *airq); |
24 | void unregister_adapter_interrupt(struct airq_struct *airq); | 26 | void unregister_adapter_interrupt(struct airq_struct *airq); |
25 | 27 | ||
28 | /* Adapter interrupt bit vector */ | ||
29 | struct airq_iv { | ||
30 | unsigned long *vector; /* Adapter interrupt bit vector */ | ||
31 | unsigned long *avail; /* Allocation bit mask for the bit vector */ | ||
32 | unsigned long *bitlock; /* Lock bit mask for the bit vector */ | ||
33 | unsigned long *ptr; /* Pointer associated with each bit */ | ||
34 | unsigned int *data; /* 32 bit value associated with each bit */ | ||
35 | unsigned long bits; /* Number of bits in the vector */ | ||
36 | unsigned long end; /* Number of highest allocated bit + 1 */ | ||
37 | spinlock_t lock; /* Lock to protect alloc & free */ | ||
38 | }; | ||
39 | |||
40 | #define AIRQ_IV_ALLOC 1 /* Use an allocation bit mask */ | ||
41 | #define AIRQ_IV_BITLOCK 2 /* Allocate the lock bit mask */ | ||
42 | #define AIRQ_IV_PTR 4 /* Allocate the ptr array */ | ||
43 | #define AIRQ_IV_DATA 8 /* Allocate the data array */ | ||
44 | |||
45 | struct airq_iv *airq_iv_create(unsigned long bits, unsigned long flags); | ||
46 | void airq_iv_release(struct airq_iv *iv); | ||
47 | unsigned long airq_iv_alloc_bit(struct airq_iv *iv); | ||
48 | void airq_iv_free_bit(struct airq_iv *iv, unsigned long bit); | ||
49 | unsigned long airq_iv_scan(struct airq_iv *iv, unsigned long start, | ||
50 | unsigned long end); | ||
51 | |||
52 | static inline unsigned long airq_iv_end(struct airq_iv *iv) | ||
53 | { | ||
54 | return iv->end; | ||
55 | } | ||
56 | |||
57 | static inline void airq_iv_lock(struct airq_iv *iv, unsigned long bit) | ||
58 | { | ||
59 | const unsigned long be_to_le = BITS_PER_LONG - 1; | ||
60 | bit_spin_lock(bit ^ be_to_le, iv->bitlock); | ||
61 | } | ||
62 | |||
63 | static inline void airq_iv_unlock(struct airq_iv *iv, unsigned long bit) | ||
64 | { | ||
65 | const unsigned long be_to_le = BITS_PER_LONG - 1; | ||
66 | bit_spin_unlock(bit ^ be_to_le, iv->bitlock); | ||
67 | } | ||
68 | |||
69 | static inline void airq_iv_set_data(struct airq_iv *iv, unsigned long bit, | ||
70 | unsigned int data) | ||
71 | { | ||
72 | iv->data[bit] = data; | ||
73 | } | ||
74 | |||
75 | static inline unsigned int airq_iv_get_data(struct airq_iv *iv, | ||
76 | unsigned long bit) | ||
77 | { | ||
78 | return iv->data[bit]; | ||
79 | } | ||
80 | |||
81 | static inline void airq_iv_set_ptr(struct airq_iv *iv, unsigned long bit, | ||
82 | unsigned long ptr) | ||
83 | { | ||
84 | iv->ptr[bit] = ptr; | ||
85 | } | ||
86 | |||
87 | static inline unsigned long airq_iv_get_ptr(struct airq_iv *iv, | ||
88 | unsigned long bit) | ||
89 | { | ||
90 | return iv->ptr[bit]; | ||
91 | } | ||
92 | |||
26 | #endif /* _ASM_S390_AIRQ_H */ | 93 | #endif /* _ASM_S390_AIRQ_H */ |
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 4d8604e311f3..10135a38673c 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h | |||
@@ -216,7 +216,7 @@ static inline void __set_bit(unsigned long nr, volatile unsigned long *ptr) | |||
216 | addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); | 216 | addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); |
217 | asm volatile( | 217 | asm volatile( |
218 | " oc %O0(1,%R0),%1" | 218 | " oc %O0(1,%R0),%1" |
219 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); | 219 | : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc"); |
220 | } | 220 | } |
221 | 221 | ||
222 | static inline void | 222 | static inline void |
@@ -244,7 +244,7 @@ __clear_bit(unsigned long nr, volatile unsigned long *ptr) | |||
244 | addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); | 244 | addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); |
245 | asm volatile( | 245 | asm volatile( |
246 | " nc %O0(1,%R0),%1" | 246 | " nc %O0(1,%R0),%1" |
247 | : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc" ); | 247 | : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) : "cc"); |
248 | } | 248 | } |
249 | 249 | ||
250 | static inline void | 250 | static inline void |
@@ -271,7 +271,7 @@ static inline void __change_bit(unsigned long nr, volatile unsigned long *ptr) | |||
271 | addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); | 271 | addr = (unsigned long) ptr + ((nr ^ (BITS_PER_LONG - 8)) >> 3); |
272 | asm volatile( | 272 | asm volatile( |
273 | " xc %O0(1,%R0),%1" | 273 | " xc %O0(1,%R0),%1" |
274 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc" ); | 274 | : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) : "cc"); |
275 | } | 275 | } |
276 | 276 | ||
277 | static inline void | 277 | static inline void |
@@ -301,7 +301,7 @@ test_and_set_bit_simple(unsigned long nr, volatile unsigned long *ptr) | |||
301 | ch = *(unsigned char *) addr; | 301 | ch = *(unsigned char *) addr; |
302 | asm volatile( | 302 | asm volatile( |
303 | " oc %O0(1,%R0),%1" | 303 | " oc %O0(1,%R0),%1" |
304 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) | 304 | : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) |
305 | : "cc", "memory"); | 305 | : "cc", "memory"); |
306 | return (ch >> (nr & 7)) & 1; | 306 | return (ch >> (nr & 7)) & 1; |
307 | } | 307 | } |
@@ -320,7 +320,7 @@ test_and_clear_bit_simple(unsigned long nr, volatile unsigned long *ptr) | |||
320 | ch = *(unsigned char *) addr; | 320 | ch = *(unsigned char *) addr; |
321 | asm volatile( | 321 | asm volatile( |
322 | " nc %O0(1,%R0),%1" | 322 | " nc %O0(1,%R0),%1" |
323 | : "=Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) | 323 | : "+Q" (*(char *) addr) : "Q" (_ni_bitmap[nr & 7]) |
324 | : "cc", "memory"); | 324 | : "cc", "memory"); |
325 | return (ch >> (nr & 7)) & 1; | 325 | return (ch >> (nr & 7)) & 1; |
326 | } | 326 | } |
@@ -339,7 +339,7 @@ test_and_change_bit_simple(unsigned long nr, volatile unsigned long *ptr) | |||
339 | ch = *(unsigned char *) addr; | 339 | ch = *(unsigned char *) addr; |
340 | asm volatile( | 340 | asm volatile( |
341 | " xc %O0(1,%R0),%1" | 341 | " xc %O0(1,%R0),%1" |
342 | : "=Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) | 342 | : "+Q" (*(char *) addr) : "Q" (_oi_bitmap[nr & 7]) |
343 | : "cc", "memory"); | 343 | : "cc", "memory"); |
344 | return (ch >> (nr & 7)) & 1; | 344 | return (ch >> (nr & 7)) & 1; |
345 | } | 345 | } |
@@ -693,7 +693,7 @@ static inline int find_next_bit_left(const unsigned long *addr, | |||
693 | size -= offset; | 693 | size -= offset; |
694 | p = addr + offset / BITS_PER_LONG; | 694 | p = addr + offset / BITS_PER_LONG; |
695 | if (bit) { | 695 | if (bit) { |
696 | set = __flo_word(0, *p & (~0UL << bit)); | 696 | set = __flo_word(0, *p & (~0UL >> bit)); |
697 | if (set >= size) | 697 | if (set >= size) |
698 | return size + offset; | 698 | return size + offset; |
699 | if (set < BITS_PER_LONG) | 699 | if (set < BITS_PER_LONG) |
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h index ffb898961c8d..d42625053c37 100644 --- a/arch/s390/include/asm/cio.h +++ b/arch/s390/include/asm/cio.h | |||
@@ -296,6 +296,7 @@ static inline int ccw_dev_id_is_equal(struct ccw_dev_id *dev_id1, | |||
296 | return 0; | 296 | return 0; |
297 | } | 297 | } |
298 | 298 | ||
299 | void channel_subsystem_reinit(void); | ||
299 | extern void css_schedule_reprobe(void); | 300 | extern void css_schedule_reprobe(void); |
300 | 301 | ||
301 | extern void reipl_ccw_dev(struct ccw_dev_id *id); | 302 | extern void reipl_ccw_dev(struct ccw_dev_id *id); |
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h index 0c82ba86e997..a908d2941c5d 100644 --- a/arch/s390/include/asm/hardirq.h +++ b/arch/s390/include/asm/hardirq.h | |||
@@ -20,4 +20,9 @@ | |||
20 | 20 | ||
21 | #define HARDIRQ_BITS 8 | 21 | #define HARDIRQ_BITS 8 |
22 | 22 | ||
23 | static inline void ack_bad_irq(unsigned int irq) | ||
24 | { | ||
25 | printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq); | ||
26 | } | ||
27 | |||
23 | #endif /* __ASM_HARDIRQ_H */ | 28 | #endif /* __ASM_HARDIRQ_H */ |
diff --git a/arch/s390/include/asm/hugetlb.h b/arch/s390/include/asm/hugetlb.h index bd90359d6d22..11eae5f55b70 100644 --- a/arch/s390/include/asm/hugetlb.h +++ b/arch/s390/include/asm/hugetlb.h | |||
@@ -17,6 +17,9 @@ | |||
17 | 17 | ||
18 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | 18 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
19 | pte_t *ptep, pte_t pte); | 19 | pte_t *ptep, pte_t pte); |
20 | pte_t huge_ptep_get(pte_t *ptep); | ||
21 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | ||
22 | unsigned long addr, pte_t *ptep); | ||
20 | 23 | ||
21 | /* | 24 | /* |
22 | * If the arch doesn't supply something else, assume that hugepage | 25 | * If the arch doesn't supply something else, assume that hugepage |
@@ -38,147 +41,75 @@ static inline int prepare_hugepage_range(struct file *file, | |||
38 | int arch_prepare_hugepage(struct page *page); | 41 | int arch_prepare_hugepage(struct page *page); |
39 | void arch_release_hugepage(struct page *page); | 42 | void arch_release_hugepage(struct page *page); |
40 | 43 | ||
41 | static inline pte_t huge_pte_wrprotect(pte_t pte) | 44 | static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, |
45 | pte_t *ptep) | ||
42 | { | 46 | { |
43 | pte_val(pte) |= _PAGE_RO; | 47 | pte_val(*ptep) = _SEGMENT_ENTRY_EMPTY; |
44 | return pte; | ||
45 | } | 48 | } |
46 | 49 | ||
47 | static inline int huge_pte_none(pte_t pte) | 50 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, |
51 | unsigned long address, pte_t *ptep) | ||
48 | { | 52 | { |
49 | return (pte_val(pte) & _SEGMENT_ENTRY_INV) && | 53 | huge_ptep_get_and_clear(vma->vm_mm, address, ptep); |
50 | !(pte_val(pte) & _SEGMENT_ENTRY_RO); | ||
51 | } | 54 | } |
52 | 55 | ||
53 | static inline pte_t huge_ptep_get(pte_t *ptep) | 56 | static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, |
57 | unsigned long addr, pte_t *ptep, | ||
58 | pte_t pte, int dirty) | ||
54 | { | 59 | { |
55 | pte_t pte = *ptep; | 60 | int changed = !pte_same(huge_ptep_get(ptep), pte); |
56 | unsigned long mask; | 61 | if (changed) { |
57 | 62 | huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); | |
58 | if (!MACHINE_HAS_HPAGE) { | 63 | set_huge_pte_at(vma->vm_mm, addr, ptep, pte); |
59 | ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN); | ||
60 | if (ptep) { | ||
61 | mask = pte_val(pte) & | ||
62 | (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); | ||
63 | pte = pte_mkhuge(*ptep); | ||
64 | pte_val(pte) |= mask; | ||
65 | } | ||
66 | } | 64 | } |
67 | return pte; | 65 | return changed; |
68 | } | 66 | } |
69 | 67 | ||
70 | static inline void __pmd_csp(pmd_t *pmdp) | 68 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
69 | unsigned long addr, pte_t *ptep) | ||
71 | { | 70 | { |
72 | register unsigned long reg2 asm("2") = pmd_val(*pmdp); | 71 | pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep); |
73 | register unsigned long reg3 asm("3") = pmd_val(*pmdp) | | 72 | set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte)); |
74 | _SEGMENT_ENTRY_INV; | ||
75 | register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; | ||
76 | |||
77 | asm volatile( | ||
78 | " csp %1,%3" | ||
79 | : "=m" (*pmdp) | ||
80 | : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); | ||
81 | } | 73 | } |
82 | 74 | ||
83 | static inline void huge_ptep_invalidate(struct mm_struct *mm, | 75 | static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) |
84 | unsigned long address, pte_t *ptep) | ||
85 | { | ||
86 | pmd_t *pmdp = (pmd_t *) ptep; | ||
87 | |||
88 | if (MACHINE_HAS_IDTE) | ||
89 | __pmd_idte(address, pmdp); | ||
90 | else | ||
91 | __pmd_csp(pmdp); | ||
92 | pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY; | ||
93 | } | ||
94 | |||
95 | static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm, | ||
96 | unsigned long addr, pte_t *ptep) | ||
97 | { | ||
98 | pte_t pte = huge_ptep_get(ptep); | ||
99 | |||
100 | huge_ptep_invalidate(mm, addr, ptep); | ||
101 | return pte; | ||
102 | } | ||
103 | |||
104 | #define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \ | ||
105 | ({ \ | ||
106 | int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \ | ||
107 | if (__changed) { \ | ||
108 | huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \ | ||
109 | set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \ | ||
110 | } \ | ||
111 | __changed; \ | ||
112 | }) | ||
113 | |||
114 | #define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \ | ||
115 | ({ \ | ||
116 | pte_t __pte = huge_ptep_get(__ptep); \ | ||
117 | if (huge_pte_write(__pte)) { \ | ||
118 | huge_ptep_invalidate(__mm, __addr, __ptep); \ | ||
119 | set_huge_pte_at(__mm, __addr, __ptep, \ | ||
120 | huge_pte_wrprotect(__pte)); \ | ||
121 | } \ | ||
122 | }) | ||
123 | |||
124 | static inline void huge_ptep_clear_flush(struct vm_area_struct *vma, | ||
125 | unsigned long address, pte_t *ptep) | ||
126 | { | 76 | { |
127 | huge_ptep_invalidate(vma->vm_mm, address, ptep); | 77 | return mk_pte(page, pgprot); |
128 | } | 78 | } |
129 | 79 | ||
130 | static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot) | 80 | static inline int huge_pte_none(pte_t pte) |
131 | { | 81 | { |
132 | pte_t pte; | 82 | return pte_none(pte); |
133 | pmd_t pmd; | ||
134 | |||
135 | pmd = mk_pmd_phys(page_to_phys(page), pgprot); | ||
136 | pte_val(pte) = pmd_val(pmd); | ||
137 | return pte; | ||
138 | } | 83 | } |
139 | 84 | ||
140 | static inline int huge_pte_write(pte_t pte) | 85 | static inline int huge_pte_write(pte_t pte) |
141 | { | 86 | { |
142 | pmd_t pmd; | 87 | return pte_write(pte); |
143 | |||
144 | pmd_val(pmd) = pte_val(pte); | ||
145 | return pmd_write(pmd); | ||
146 | } | 88 | } |
147 | 89 | ||
148 | static inline int huge_pte_dirty(pte_t pte) | 90 | static inline int huge_pte_dirty(pte_t pte) |
149 | { | 91 | { |
150 | /* No dirty bit in the segment table entry. */ | 92 | return pte_dirty(pte); |
151 | return 0; | ||
152 | } | 93 | } |
153 | 94 | ||
154 | static inline pte_t huge_pte_mkwrite(pte_t pte) | 95 | static inline pte_t huge_pte_mkwrite(pte_t pte) |
155 | { | 96 | { |
156 | pmd_t pmd; | 97 | return pte_mkwrite(pte); |
157 | |||
158 | pmd_val(pmd) = pte_val(pte); | ||
159 | pte_val(pte) = pmd_val(pmd_mkwrite(pmd)); | ||
160 | return pte; | ||
161 | } | 98 | } |
162 | 99 | ||
163 | static inline pte_t huge_pte_mkdirty(pte_t pte) | 100 | static inline pte_t huge_pte_mkdirty(pte_t pte) |
164 | { | 101 | { |
165 | /* No dirty bit in the segment table entry. */ | 102 | return pte_mkdirty(pte); |
166 | return pte; | ||
167 | } | 103 | } |
168 | 104 | ||
169 | static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) | 105 | static inline pte_t huge_pte_wrprotect(pte_t pte) |
170 | { | 106 | { |
171 | pmd_t pmd; | 107 | return pte_wrprotect(pte); |
172 | |||
173 | pmd_val(pmd) = pte_val(pte); | ||
174 | pte_val(pte) = pmd_val(pmd_modify(pmd, newprot)); | ||
175 | return pte; | ||
176 | } | 108 | } |
177 | 109 | ||
178 | static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, | 110 | static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot) |
179 | pte_t *ptep) | ||
180 | { | 111 | { |
181 | pmd_clear((pmd_t *) ptep); | 112 | return pte_modify(pte, newprot); |
182 | } | 113 | } |
183 | 114 | ||
184 | #endif /* _ASM_S390_HUGETLB_H */ | 115 | #endif /* _ASM_S390_HUGETLB_H */ |
diff --git a/arch/s390/include/asm/hw_irq.h b/arch/s390/include/asm/hw_irq.h index 7e3d2586c1ff..ee96a8b697f9 100644 --- a/arch/s390/include/asm/hw_irq.h +++ b/arch/s390/include/asm/hw_irq.h | |||
@@ -4,19 +4,8 @@ | |||
4 | #include <linux/msi.h> | 4 | #include <linux/msi.h> |
5 | #include <linux/pci.h> | 5 | #include <linux/pci.h> |
6 | 6 | ||
7 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) | 7 | void __init init_airq_interrupts(void); |
8 | { | 8 | void __init init_cio_interrupts(void); |
9 | return __irq_get_msi_desc(irq); | 9 | void __init init_ext_interrupts(void); |
10 | } | ||
11 | |||
12 | /* Must be called with msi map lock held */ | ||
13 | static inline int irq_set_msi_desc(unsigned int irq, struct msi_desc *msi) | ||
14 | { | ||
15 | if (!msi) | ||
16 | return -EINVAL; | ||
17 | |||
18 | msi->irq = irq; | ||
19 | return 0; | ||
20 | } | ||
21 | 10 | ||
22 | #endif | 11 | #endif |
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index 87c17bfb2968..1eaa3625803c 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h | |||
@@ -1,17 +1,28 @@ | |||
1 | #ifndef _ASM_IRQ_H | 1 | #ifndef _ASM_IRQ_H |
2 | #define _ASM_IRQ_H | 2 | #define _ASM_IRQ_H |
3 | 3 | ||
4 | #define EXT_INTERRUPT 1 | ||
5 | #define IO_INTERRUPT 2 | ||
6 | #define THIN_INTERRUPT 3 | ||
7 | |||
8 | #define NR_IRQS_BASE 4 | ||
9 | |||
10 | #ifdef CONFIG_PCI_NR_MSI | ||
11 | # define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI) | ||
12 | #else | ||
13 | # define NR_IRQS NR_IRQS_BASE | ||
14 | #endif | ||
15 | |||
16 | /* This number is used when no interrupt has been assigned */ | ||
17 | #define NO_IRQ 0 | ||
18 | |||
19 | #ifndef __ASSEMBLY__ | ||
20 | |||
4 | #include <linux/hardirq.h> | 21 | #include <linux/hardirq.h> |
5 | #include <linux/percpu.h> | 22 | #include <linux/percpu.h> |
6 | #include <linux/cache.h> | 23 | #include <linux/cache.h> |
7 | #include <linux/types.h> | 24 | #include <linux/types.h> |
8 | 25 | ||
9 | enum interruption_main_class { | ||
10 | EXTERNAL_INTERRUPT, | ||
11 | IO_INTERRUPT, | ||
12 | NR_IRQS | ||
13 | }; | ||
14 | |||
15 | enum interruption_class { | 26 | enum interruption_class { |
16 | IRQEXT_CLK, | 27 | IRQEXT_CLK, |
17 | IRQEXT_EXC, | 28 | IRQEXT_EXC, |
@@ -72,14 +83,8 @@ void service_subclass_irq_unregister(void); | |||
72 | void measurement_alert_subclass_register(void); | 83 | void measurement_alert_subclass_register(void); |
73 | void measurement_alert_subclass_unregister(void); | 84 | void measurement_alert_subclass_unregister(void); |
74 | 85 | ||
75 | #ifdef CONFIG_LOCKDEP | 86 | #define irq_canonicalize(irq) (irq) |
76 | # define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq) | 87 | |
77 | # define disable_irq_nosync_lockdep_irqsave(irq, flags) \ | 88 | #endif /* __ASSEMBLY__ */ |
78 | disable_irq_nosync(irq) | ||
79 | # define disable_irq_lockdep(irq) disable_irq(irq) | ||
80 | # define enable_irq_lockdep(irq) enable_irq(irq) | ||
81 | # define enable_irq_lockdep_irqrestore(irq, flags) \ | ||
82 | enable_irq(irq) | ||
83 | #endif | ||
84 | 89 | ||
85 | #endif /* _ASM_IRQ_H */ | 90 | #endif /* _ASM_IRQ_H */ |
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 084e7755ed9b..7b7fce4e8469 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
@@ -77,8 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
77 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); | 77 | WARN_ON(atomic_read(&prev->context.attach_count) < 0); |
78 | atomic_inc(&next->context.attach_count); | 78 | atomic_inc(&next->context.attach_count); |
79 | /* Check for TLBs not flushed yet */ | 79 | /* Check for TLBs not flushed yet */ |
80 | if (next->context.flush_mm) | 80 | __tlb_flush_mm_lazy(next); |
81 | __tlb_flush_mm(next); | ||
82 | } | 81 | } |
83 | 82 | ||
84 | #define enter_lazy_tlb(mm,tsk) do { } while (0) | 83 | #define enter_lazy_tlb(mm,tsk) do { } while (0) |
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 5d64fb7619cc..1e51f2915b2e 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h | |||
@@ -32,16 +32,6 @@ | |||
32 | 32 | ||
33 | void storage_key_init_range(unsigned long start, unsigned long end); | 33 | void storage_key_init_range(unsigned long start, unsigned long end); |
34 | 34 | ||
35 | static inline unsigned long pfmf(unsigned long function, unsigned long address) | ||
36 | { | ||
37 | asm volatile( | ||
38 | " .insn rre,0xb9af0000,%[function],%[address]" | ||
39 | : [address] "+a" (address) | ||
40 | : [function] "d" (function) | ||
41 | : "memory"); | ||
42 | return address; | ||
43 | } | ||
44 | |||
45 | static inline void clear_page(void *page) | 35 | static inline void clear_page(void *page) |
46 | { | 36 | { |
47 | register unsigned long reg1 asm ("1") = 0; | 37 | register unsigned long reg1 asm ("1") = 0; |
@@ -150,15 +140,6 @@ static inline int page_reset_referenced(unsigned long addr) | |||
150 | #define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */ | 140 | #define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */ |
151 | #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ | 141 | #define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ |
152 | 142 | ||
153 | /* | ||
154 | * Test and clear referenced bit in storage key. | ||
155 | */ | ||
156 | #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG | ||
157 | static inline int page_test_and_clear_young(unsigned long pfn) | ||
158 | { | ||
159 | return page_reset_referenced(pfn << PAGE_SHIFT); | ||
160 | } | ||
161 | |||
162 | struct page; | 143 | struct page; |
163 | void arch_free_page(struct page *page, int order); | 144 | void arch_free_page(struct page *page, int order); |
164 | void arch_alloc_page(struct page *page, int order); | 145 | void arch_alloc_page(struct page *page, int order); |
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index 6e577ba0e5da..c290f13d1c47 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h | |||
@@ -6,6 +6,7 @@ | |||
6 | /* must be set before including pci_clp.h */ | 6 | /* must be set before including pci_clp.h */ |
7 | #define PCI_BAR_COUNT 6 | 7 | #define PCI_BAR_COUNT 6 |
8 | 8 | ||
9 | #include <linux/pci.h> | ||
9 | #include <asm-generic/pci.h> | 10 | #include <asm-generic/pci.h> |
10 | #include <asm-generic/pci-dma-compat.h> | 11 | #include <asm-generic/pci-dma-compat.h> |
11 | #include <asm/pci_clp.h> | 12 | #include <asm/pci_clp.h> |
@@ -53,14 +54,9 @@ struct zpci_fmb { | |||
53 | atomic64_t unmapped_pages; | 54 | atomic64_t unmapped_pages; |
54 | } __packed __aligned(16); | 55 | } __packed __aligned(16); |
55 | 56 | ||
56 | struct msi_map { | 57 | #define ZPCI_MSI_VEC_BITS 11 |
57 | unsigned long irq; | 58 | #define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS) |
58 | struct msi_desc *msi; | 59 | #define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1) |
59 | struct hlist_node msi_chain; | ||
60 | }; | ||
61 | |||
62 | #define ZPCI_NR_MSI_VECS 64 | ||
63 | #define ZPCI_MSI_MASK (ZPCI_NR_MSI_VECS - 1) | ||
64 | 60 | ||
65 | enum zpci_state { | 61 | enum zpci_state { |
66 | ZPCI_FN_STATE_RESERVED, | 62 | ZPCI_FN_STATE_RESERVED, |
@@ -91,8 +87,7 @@ struct zpci_dev { | |||
91 | 87 | ||
92 | /* IRQ stuff */ | 88 | /* IRQ stuff */ |
93 | u64 msi_addr; /* MSI address */ | 89 | u64 msi_addr; /* MSI address */ |
94 | struct zdev_irq_map *irq_map; | 90 | struct airq_iv *aibv; /* adapter interrupt bit vector */ |
95 | struct msi_map *msi_map[ZPCI_NR_MSI_VECS]; | ||
96 | unsigned int aisb; /* number of the summary bit */ | 91 | unsigned int aisb; /* number of the summary bit */ |
97 | 92 | ||
98 | /* DMA stuff */ | 93 | /* DMA stuff */ |
@@ -122,11 +117,6 @@ struct zpci_dev { | |||
122 | struct dentry *debugfs_perf; | 117 | struct dentry *debugfs_perf; |
123 | }; | 118 | }; |
124 | 119 | ||
125 | struct pci_hp_callback_ops { | ||
126 | int (*create_slot) (struct zpci_dev *zdev); | ||
127 | void (*remove_slot) (struct zpci_dev *zdev); | ||
128 | }; | ||
129 | |||
130 | static inline bool zdev_enabled(struct zpci_dev *zdev) | 120 | static inline bool zdev_enabled(struct zpci_dev *zdev) |
131 | { | 121 | { |
132 | return (zdev->fh & (1UL << 31)) ? true : false; | 122 | return (zdev->fh & (1UL << 31)) ? true : false; |
@@ -146,32 +136,38 @@ int zpci_register_ioat(struct zpci_dev *, u8, u64, u64, u64); | |||
146 | int zpci_unregister_ioat(struct zpci_dev *, u8); | 136 | int zpci_unregister_ioat(struct zpci_dev *, u8); |
147 | 137 | ||
148 | /* CLP */ | 138 | /* CLP */ |
149 | int clp_find_pci_devices(void); | 139 | int clp_scan_pci_devices(void); |
140 | int clp_rescan_pci_devices(void); | ||
141 | int clp_rescan_pci_devices_simple(void); | ||
150 | int clp_add_pci_device(u32, u32, int); | 142 | int clp_add_pci_device(u32, u32, int); |
151 | int clp_enable_fh(struct zpci_dev *, u8); | 143 | int clp_enable_fh(struct zpci_dev *, u8); |
152 | int clp_disable_fh(struct zpci_dev *); | 144 | int clp_disable_fh(struct zpci_dev *); |
153 | 145 | ||
154 | /* MSI */ | ||
155 | struct msi_desc *__irq_get_msi_desc(unsigned int); | ||
156 | int zpci_msi_set_mask_bits(struct msi_desc *, u32, u32); | ||
157 | int zpci_setup_msi_irq(struct zpci_dev *, struct msi_desc *, unsigned int, int); | ||
158 | void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *); | ||
159 | int zpci_msihash_init(void); | ||
160 | void zpci_msihash_exit(void); | ||
161 | |||
162 | #ifdef CONFIG_PCI | 146 | #ifdef CONFIG_PCI |
163 | /* Error handling and recovery */ | 147 | /* Error handling and recovery */ |
164 | void zpci_event_error(void *); | 148 | void zpci_event_error(void *); |
165 | void zpci_event_availability(void *); | 149 | void zpci_event_availability(void *); |
150 | void zpci_rescan(void); | ||
166 | #else /* CONFIG_PCI */ | 151 | #else /* CONFIG_PCI */ |
167 | static inline void zpci_event_error(void *e) {} | 152 | static inline void zpci_event_error(void *e) {} |
168 | static inline void zpci_event_availability(void *e) {} | 153 | static inline void zpci_event_availability(void *e) {} |
154 | static inline void zpci_rescan(void) {} | ||
169 | #endif /* CONFIG_PCI */ | 155 | #endif /* CONFIG_PCI */ |
170 | 156 | ||
157 | #ifdef CONFIG_HOTPLUG_PCI_S390 | ||
158 | int zpci_init_slot(struct zpci_dev *); | ||
159 | void zpci_exit_slot(struct zpci_dev *); | ||
160 | #else /* CONFIG_HOTPLUG_PCI_S390 */ | ||
161 | static inline int zpci_init_slot(struct zpci_dev *zdev) | ||
162 | { | ||
163 | return 0; | ||
164 | } | ||
165 | static inline void zpci_exit_slot(struct zpci_dev *zdev) {} | ||
166 | #endif /* CONFIG_HOTPLUG_PCI_S390 */ | ||
167 | |||
171 | /* Helpers */ | 168 | /* Helpers */ |
172 | struct zpci_dev *get_zdev(struct pci_dev *); | 169 | struct zpci_dev *get_zdev(struct pci_dev *); |
173 | struct zpci_dev *get_zdev_by_fid(u32); | 170 | struct zpci_dev *get_zdev_by_fid(u32); |
174 | bool zpci_fid_present(u32); | ||
175 | 171 | ||
176 | /* sysfs */ | 172 | /* sysfs */ |
177 | int zpci_sysfs_add_device(struct device *); | 173 | int zpci_sysfs_add_device(struct device *); |
@@ -181,14 +177,6 @@ void zpci_sysfs_remove_device(struct device *); | |||
181 | int zpci_dma_init(void); | 177 | int zpci_dma_init(void); |
182 | void zpci_dma_exit(void); | 178 | void zpci_dma_exit(void); |
183 | 179 | ||
184 | /* Hotplug */ | ||
185 | extern struct mutex zpci_list_lock; | ||
186 | extern struct list_head zpci_list; | ||
187 | extern unsigned int s390_pci_probe; | ||
188 | |||
189 | void zpci_register_hp_ops(struct pci_hp_callback_ops *); | ||
190 | void zpci_deregister_hp_ops(void); | ||
191 | |||
192 | /* FMB */ | 180 | /* FMB */ |
193 | int zpci_fmb_enable_device(struct zpci_dev *); | 181 | int zpci_fmb_enable_device(struct zpci_dev *); |
194 | int zpci_fmb_disable_device(struct zpci_dev *); | 182 | int zpci_fmb_disable_device(struct zpci_dev *); |
diff --git a/arch/s390/include/asm/pci_insn.h b/arch/s390/include/asm/pci_insn.h index e6a2bdd4d705..df6eac9f0cb4 100644 --- a/arch/s390/include/asm/pci_insn.h +++ b/arch/s390/include/asm/pci_insn.h | |||
@@ -79,11 +79,11 @@ struct zpci_fib { | |||
79 | } __packed; | 79 | } __packed; |
80 | 80 | ||
81 | 81 | ||
82 | int s390pci_mod_fc(u64 req, struct zpci_fib *fib); | 82 | int zpci_mod_fc(u64 req, struct zpci_fib *fib); |
83 | int s390pci_refresh_trans(u64 fn, u64 addr, u64 range); | 83 | int zpci_refresh_trans(u64 fn, u64 addr, u64 range); |
84 | int s390pci_load(u64 *data, u64 req, u64 offset); | 84 | int zpci_load(u64 *data, u64 req, u64 offset); |
85 | int s390pci_store(u64 data, u64 req, u64 offset); | 85 | int zpci_store(u64 data, u64 req, u64 offset); |
86 | int s390pci_store_block(const u64 *data, u64 req, u64 offset); | 86 | int zpci_store_block(const u64 *data, u64 req, u64 offset); |
87 | void set_irq_ctrl(u16 ctl, char *unused, u8 isc); | 87 | void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc); |
88 | 88 | ||
89 | #endif | 89 | #endif |
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index 83a9caa6ae53..d194d544d694 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h | |||
@@ -36,7 +36,7 @@ static inline RETTYPE zpci_read_##RETTYPE(const volatile void __iomem *addr) \ | |||
36 | u64 data; \ | 36 | u64 data; \ |
37 | int rc; \ | 37 | int rc; \ |
38 | \ | 38 | \ |
39 | rc = s390pci_load(&data, req, ZPCI_OFFSET(addr)); \ | 39 | rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \ |
40 | if (rc) \ | 40 | if (rc) \ |
41 | data = -1ULL; \ | 41 | data = -1ULL; \ |
42 | return (RETTYPE) data; \ | 42 | return (RETTYPE) data; \ |
@@ -50,7 +50,7 @@ static inline void zpci_write_##VALTYPE(VALTYPE val, \ | |||
50 | u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \ | 50 | u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \ |
51 | u64 data = (VALTYPE) val; \ | 51 | u64 data = (VALTYPE) val; \ |
52 | \ | 52 | \ |
53 | s390pci_store(data, req, ZPCI_OFFSET(addr)); \ | 53 | zpci_store(data, req, ZPCI_OFFSET(addr)); \ |
54 | } | 54 | } |
55 | 55 | ||
56 | zpci_read(8, u64) | 56 | zpci_read(8, u64) |
@@ -83,7 +83,7 @@ static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len | |||
83 | val = 0; /* let FW report error */ | 83 | val = 0; /* let FW report error */ |
84 | break; | 84 | break; |
85 | } | 85 | } |
86 | return s390pci_store(val, req, offset); | 86 | return zpci_store(val, req, offset); |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) | 89 | static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) |
@@ -91,7 +91,7 @@ static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) | |||
91 | u64 data; | 91 | u64 data; |
92 | int cc; | 92 | int cc; |
93 | 93 | ||
94 | cc = s390pci_load(&data, req, offset); | 94 | cc = zpci_load(&data, req, offset); |
95 | if (cc) | 95 | if (cc) |
96 | goto out; | 96 | goto out; |
97 | 97 | ||
@@ -115,7 +115,7 @@ out: | |||
115 | 115 | ||
116 | static inline int zpci_write_block(u64 req, const u64 *data, u64 offset) | 116 | static inline int zpci_write_block(u64 req, const u64 *data, u64 offset) |
117 | { | 117 | { |
118 | return s390pci_store_block(data, req, offset); | 118 | return zpci_store_block(data, req, offset); |
119 | } | 119 | } |
120 | 120 | ||
121 | static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) | 121 | static inline u8 zpci_get_max_write_size(u64 src, u64 dst, int len, int max) |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 75fb726de91f..9f215b40109e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -217,63 +217,57 @@ extern unsigned long MODULES_END; | |||
217 | 217 | ||
218 | /* Hardware bits in the page table entry */ | 218 | /* Hardware bits in the page table entry */ |
219 | #define _PAGE_CO 0x100 /* HW Change-bit override */ | 219 | #define _PAGE_CO 0x100 /* HW Change-bit override */ |
220 | #define _PAGE_RO 0x200 /* HW read-only bit */ | 220 | #define _PAGE_PROTECT 0x200 /* HW read-only bit */ |
221 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ | 221 | #define _PAGE_INVALID 0x400 /* HW invalid bit */ |
222 | #define _PAGE_LARGE 0x800 /* Bit to mark a large pte */ | ||
222 | 223 | ||
223 | /* Software bits in the page table entry */ | 224 | /* Software bits in the page table entry */ |
224 | #define _PAGE_SWT 0x001 /* SW pte type bit t */ | 225 | #define _PAGE_PRESENT 0x001 /* SW pte present bit */ |
225 | #define _PAGE_SWX 0x002 /* SW pte type bit x */ | 226 | #define _PAGE_TYPE 0x002 /* SW pte type bit */ |
226 | #define _PAGE_SWC 0x004 /* SW pte changed bit */ | 227 | #define _PAGE_YOUNG 0x004 /* SW pte young bit */ |
227 | #define _PAGE_SWR 0x008 /* SW pte referenced bit */ | 228 | #define _PAGE_DIRTY 0x008 /* SW pte dirty bit */ |
228 | #define _PAGE_SWW 0x010 /* SW pte write bit */ | 229 | #define _PAGE_READ 0x010 /* SW pte read bit */ |
229 | #define _PAGE_SPECIAL 0x020 /* SW associated with special page */ | 230 | #define _PAGE_WRITE 0x020 /* SW pte write bit */ |
231 | #define _PAGE_SPECIAL 0x040 /* SW associated with special page */ | ||
230 | #define __HAVE_ARCH_PTE_SPECIAL | 232 | #define __HAVE_ARCH_PTE_SPECIAL |
231 | 233 | ||
232 | /* Set of bits not changed in pte_modify */ | 234 | /* Set of bits not changed in pte_modify */ |
233 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ | 235 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \ |
234 | _PAGE_SWC | _PAGE_SWR) | 236 | _PAGE_DIRTY | _PAGE_YOUNG) |
235 | |||
236 | /* Six different types of pages. */ | ||
237 | #define _PAGE_TYPE_EMPTY 0x400 | ||
238 | #define _PAGE_TYPE_NONE 0x401 | ||
239 | #define _PAGE_TYPE_SWAP 0x403 | ||
240 | #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */ | ||
241 | #define _PAGE_TYPE_RO 0x200 | ||
242 | #define _PAGE_TYPE_RW 0x000 | ||
243 | 237 | ||
244 | /* | 238 | /* |
245 | * Only four types for huge pages, using the invalid bit and protection bit | 239 | * handle_pte_fault uses pte_present, pte_none and pte_file to find out the |
246 | * of a segment table entry. | 240 | * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit |
247 | */ | 241 | * is used to distinguish present from not-present ptes. It is changed only |
248 | #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */ | 242 | * with the page table lock held. |
249 | #define _HPAGE_TYPE_NONE 0x220 | 243 | * |
250 | #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */ | 244 | * The following table gives the different possible bit combinations for |
251 | #define _HPAGE_TYPE_RW 0x000 | 245 | * the pte hardware and software bits in the last 12 bits of a pte: |
252 | |||
253 | /* | ||
254 | * PTE type bits are rather complicated. handle_pte_fault uses pte_present, | ||
255 | * pte_none and pte_file to find out the pte type WITHOUT holding the page | ||
256 | * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to | ||
257 | * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs | ||
258 | * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards. | ||
259 | * This change is done while holding the lock, but the intermediate step | ||
260 | * of a previously valid pte with the hw invalid bit set can be observed by | ||
261 | * handle_pte_fault. That makes it necessary that all valid pte types with | ||
262 | * the hw invalid bit set must be distinguishable from the four pte types | ||
263 | * empty, none, swap and file. | ||
264 | * | 246 | * |
265 | * irxt ipte irxt | 247 | * 842100000000 |
266 | * _PAGE_TYPE_EMPTY 1000 -> 1000 | 248 | * 000084210000 |
267 | * _PAGE_TYPE_NONE 1001 -> 1001 | 249 | * 000000008421 |
268 | * _PAGE_TYPE_SWAP 1011 -> 1011 | 250 | * .IR...wrdytp |
269 | * _PAGE_TYPE_FILE 11?1 -> 11?1 | 251 | * empty .10...000000 |
270 | * _PAGE_TYPE_RO 0100 -> 1100 | 252 | * swap .10...xxxx10 |
271 | * _PAGE_TYPE_RW 0000 -> 1000 | 253 | * file .11...xxxxx0 |
254 | * prot-none, clean, old .11...000001 | ||
255 | * prot-none, clean, young .11...000101 | ||
256 | * prot-none, dirty, old .10...001001 | ||
257 | * prot-none, dirty, young .10...001101 | ||
258 | * read-only, clean, old .11...010001 | ||
259 | * read-only, clean, young .01...010101 | ||
260 | * read-only, dirty, old .11...011001 | ||
261 | * read-only, dirty, young .01...011101 | ||
262 | * read-write, clean, old .11...110001 | ||
263 | * read-write, clean, young .01...110101 | ||
264 | * read-write, dirty, old .10...111001 | ||
265 | * read-write, dirty, young .00...111101 | ||
272 | * | 266 | * |
273 | * pte_none is true for bits combinations 1000, 1010, 1100, 1110 | 267 | * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001 |
274 | * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001 | 268 | * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400 |
275 | * pte_file is true for bits combinations 1101, 1111 | 269 | * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600 |
276 | * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. | 270 | * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402 |
277 | */ | 271 | */ |
278 | 272 | ||
279 | #ifndef CONFIG_64BIT | 273 | #ifndef CONFIG_64BIT |
@@ -286,14 +280,25 @@ extern unsigned long MODULES_END; | |||
286 | #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ | 280 | #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */ |
287 | 281 | ||
288 | /* Bits in the segment table entry */ | 282 | /* Bits in the segment table entry */ |
283 | #define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */ | ||
289 | #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ | 284 | #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */ |
290 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ | 285 | #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ |
291 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | 286 | #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ |
292 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ | 287 | #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ |
293 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ | 288 | #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ |
289 | #define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_PROTECT | ||
294 | 290 | ||
295 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) | 291 | #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) |
296 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) | 292 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) |
293 | |||
294 | /* | ||
295 | * Segment table entry encoding (I = invalid, R = read-only bit): | ||
296 | * ..R...I..... | ||
297 | * prot-none ..1...1..... | ||
298 | * read-only ..1...0..... | ||
299 | * read-write ..0...0..... | ||
300 | * empty ..0...1..... | ||
301 | */ | ||
297 | 302 | ||
298 | /* Page status table bits for virtualization */ | 303 | /* Page status table bits for virtualization */ |
299 | #define PGSTE_ACC_BITS 0xf0000000UL | 304 | #define PGSTE_ACC_BITS 0xf0000000UL |
@@ -303,9 +308,7 @@ extern unsigned long MODULES_END; | |||
303 | #define PGSTE_HC_BIT 0x00200000UL | 308 | #define PGSTE_HC_BIT 0x00200000UL |
304 | #define PGSTE_GR_BIT 0x00040000UL | 309 | #define PGSTE_GR_BIT 0x00040000UL |
305 | #define PGSTE_GC_BIT 0x00020000UL | 310 | #define PGSTE_GC_BIT 0x00020000UL |
306 | #define PGSTE_UR_BIT 0x00008000UL | 311 | #define PGSTE_IN_BIT 0x00008000UL /* IPTE notify bit */ |
307 | #define PGSTE_UC_BIT 0x00004000UL /* user dirty (migration) */ | ||
308 | #define PGSTE_IN_BIT 0x00002000UL /* IPTE notify bit */ | ||
309 | 312 | ||
310 | #else /* CONFIG_64BIT */ | 313 | #else /* CONFIG_64BIT */ |
311 | 314 | ||
@@ -324,8 +327,8 @@ extern unsigned long MODULES_END; | |||
324 | 327 | ||
325 | /* Bits in the region table entry */ | 328 | /* Bits in the region table entry */ |
326 | #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ | 329 | #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */ |
327 | #define _REGION_ENTRY_RO 0x200 /* region protection bit */ | 330 | #define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */ |
328 | #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */ | 331 | #define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */ |
329 | #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ | 332 | #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */ |
330 | #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ | 333 | #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */ |
331 | #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ | 334 | #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */ |
@@ -333,29 +336,47 @@ extern unsigned long MODULES_END; | |||
333 | #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ | 336 | #define _REGION_ENTRY_LENGTH 0x03 /* region third length */ |
334 | 337 | ||
335 | #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) | 338 | #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH) |
336 | #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV) | 339 | #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID) |
337 | #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) | 340 | #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH) |
338 | #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV) | 341 | #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID) |
339 | #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) | 342 | #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH) |
340 | #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV) | 343 | #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID) |
341 | 344 | ||
342 | #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ | 345 | #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */ |
343 | #define _REGION3_ENTRY_RO 0x200 /* page protection bit */ | 346 | #define _REGION3_ENTRY_RO 0x200 /* page protection bit */ |
344 | #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ | 347 | #define _REGION3_ENTRY_CO 0x100 /* change-recording override */ |
345 | 348 | ||
346 | /* Bits in the segment table entry */ | 349 | /* Bits in the segment table entry */ |
350 | #define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL | ||
351 | #define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff1ff33UL | ||
347 | #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ | 352 | #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ |
348 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ | 353 | #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ |
349 | #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */ | 354 | #define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ |
350 | #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */ | 355 | #define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ |
351 | 356 | ||
352 | #define _SEGMENT_ENTRY (0) | 357 | #define _SEGMENT_ENTRY (0) |
353 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) | 358 | #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) |
354 | 359 | ||
355 | #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ | 360 | #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ |
356 | #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ | 361 | #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ |
362 | #define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */ | ||
363 | #define _SEGMENT_ENTRY_YOUNG 0x002 /* SW segment young bit */ | ||
364 | #define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_YOUNG | ||
365 | |||
366 | /* | ||
367 | * Segment table entry encoding (R = read-only, I = invalid, y = young bit): | ||
368 | * ..R...I...y. | ||
369 | * prot-none, old ..0...1...1. | ||
370 | * prot-none, young ..1...1...1. | ||
371 | * read-only, old ..1...1...0. | ||
372 | * read-only, young ..1...0...1. | ||
373 | * read-write, old ..0...1...0. | ||
374 | * read-write, young ..0...0...1. | ||
375 | * The segment table origin is used to distinguish empty (origin==0) from | ||
376 | * read-write, old segment table entries (origin!=0) | ||
377 | */ | ||
378 | |||
357 | #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ | 379 | #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ |
358 | #define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT) | ||
359 | 380 | ||
360 | /* Set of bits not changed in pmd_modify */ | 381 | /* Set of bits not changed in pmd_modify */ |
361 | #define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \ | 382 | #define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \ |
@@ -369,9 +390,7 @@ extern unsigned long MODULES_END; | |||
369 | #define PGSTE_HC_BIT 0x0020000000000000UL | 390 | #define PGSTE_HC_BIT 0x0020000000000000UL |
370 | #define PGSTE_GR_BIT 0x0004000000000000UL | 391 | #define PGSTE_GR_BIT 0x0004000000000000UL |
371 | #define PGSTE_GC_BIT 0x0002000000000000UL | 392 | #define PGSTE_GC_BIT 0x0002000000000000UL |
372 | #define PGSTE_UR_BIT 0x0000800000000000UL | 393 | #define PGSTE_IN_BIT 0x0000800000000000UL /* IPTE notify bit */ |
373 | #define PGSTE_UC_BIT 0x0000400000000000UL /* user dirty (migration) */ | ||
374 | #define PGSTE_IN_BIT 0x0000200000000000UL /* IPTE notify bit */ | ||
375 | 394 | ||
376 | #endif /* CONFIG_64BIT */ | 395 | #endif /* CONFIG_64BIT */ |
377 | 396 | ||
@@ -386,14 +405,18 @@ extern unsigned long MODULES_END; | |||
386 | /* | 405 | /* |
387 | * Page protection definitions. | 406 | * Page protection definitions. |
388 | */ | 407 | */ |
389 | #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE) | 408 | #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID) |
390 | #define PAGE_RO __pgprot(_PAGE_TYPE_RO) | 409 | #define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \ |
391 | #define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW) | 410 | _PAGE_INVALID | _PAGE_PROTECT) |
392 | #define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC) | 411 | #define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
393 | 412 | _PAGE_INVALID | _PAGE_PROTECT) | |
394 | #define PAGE_KERNEL PAGE_RWC | 413 | |
395 | #define PAGE_SHARED PAGE_KERNEL | 414 | #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ |
396 | #define PAGE_COPY PAGE_RO | 415 | _PAGE_YOUNG | _PAGE_DIRTY) |
416 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \ | ||
417 | _PAGE_YOUNG | _PAGE_DIRTY) | ||
418 | #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \ | ||
419 | _PAGE_PROTECT) | ||
397 | 420 | ||
398 | /* | 421 | /* |
399 | * On s390 the page table entry has an invalid bit and a read-only bit. | 422 | * On s390 the page table entry has an invalid bit and a read-only bit. |
@@ -402,35 +425,31 @@ extern unsigned long MODULES_END; | |||
402 | */ | 425 | */ |
403 | /*xwr*/ | 426 | /*xwr*/ |
404 | #define __P000 PAGE_NONE | 427 | #define __P000 PAGE_NONE |
405 | #define __P001 PAGE_RO | 428 | #define __P001 PAGE_READ |
406 | #define __P010 PAGE_RO | 429 | #define __P010 PAGE_READ |
407 | #define __P011 PAGE_RO | 430 | #define __P011 PAGE_READ |
408 | #define __P100 PAGE_RO | 431 | #define __P100 PAGE_READ |
409 | #define __P101 PAGE_RO | 432 | #define __P101 PAGE_READ |
410 | #define __P110 PAGE_RO | 433 | #define __P110 PAGE_READ |
411 | #define __P111 PAGE_RO | 434 | #define __P111 PAGE_READ |
412 | 435 | ||
413 | #define __S000 PAGE_NONE | 436 | #define __S000 PAGE_NONE |
414 | #define __S001 PAGE_RO | 437 | #define __S001 PAGE_READ |
415 | #define __S010 PAGE_RW | 438 | #define __S010 PAGE_WRITE |
416 | #define __S011 PAGE_RW | 439 | #define __S011 PAGE_WRITE |
417 | #define __S100 PAGE_RO | 440 | #define __S100 PAGE_READ |
418 | #define __S101 PAGE_RO | 441 | #define __S101 PAGE_READ |
419 | #define __S110 PAGE_RW | 442 | #define __S110 PAGE_WRITE |
420 | #define __S111 PAGE_RW | 443 | #define __S111 PAGE_WRITE |
421 | 444 | ||
422 | /* | 445 | /* |
423 | * Segment entry (large page) protection definitions. | 446 | * Segment entry (large page) protection definitions. |
424 | */ | 447 | */ |
425 | #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE) | 448 | #define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ |
426 | #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO) | 449 | _SEGMENT_ENTRY_NONE) |
427 | #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW) | 450 | #define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_INVALID | \ |
428 | 451 | _SEGMENT_ENTRY_PROTECT) | |
429 | static inline int mm_exclusive(struct mm_struct *mm) | 452 | #define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_INVALID) |
430 | { | ||
431 | return likely(mm == current->active_mm && | ||
432 | atomic_read(&mm->context.attach_count) <= 1); | ||
433 | } | ||
434 | 453 | ||
435 | static inline int mm_has_pgste(struct mm_struct *mm) | 454 | static inline int mm_has_pgste(struct mm_struct *mm) |
436 | { | 455 | { |
@@ -467,7 +486,7 @@ static inline int pgd_none(pgd_t pgd) | |||
467 | { | 486 | { |
468 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) | 487 | if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2) |
469 | return 0; | 488 | return 0; |
470 | return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL; | 489 | return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL; |
471 | } | 490 | } |
472 | 491 | ||
473 | static inline int pgd_bad(pgd_t pgd) | 492 | static inline int pgd_bad(pgd_t pgd) |
@@ -478,7 +497,7 @@ static inline int pgd_bad(pgd_t pgd) | |||
478 | * invalid for either table entry. | 497 | * invalid for either table entry. |
479 | */ | 498 | */ |
480 | unsigned long mask = | 499 | unsigned long mask = |
481 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & | 500 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & |
482 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; | 501 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
483 | return (pgd_val(pgd) & mask) != 0; | 502 | return (pgd_val(pgd) & mask) != 0; |
484 | } | 503 | } |
@@ -494,7 +513,7 @@ static inline int pud_none(pud_t pud) | |||
494 | { | 513 | { |
495 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) | 514 | if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3) |
496 | return 0; | 515 | return 0; |
497 | return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL; | 516 | return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL; |
498 | } | 517 | } |
499 | 518 | ||
500 | static inline int pud_large(pud_t pud) | 519 | static inline int pud_large(pud_t pud) |
@@ -512,7 +531,7 @@ static inline int pud_bad(pud_t pud) | |||
512 | * invalid for either table entry. | 531 | * invalid for either table entry. |
513 | */ | 532 | */ |
514 | unsigned long mask = | 533 | unsigned long mask = |
515 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV & | 534 | ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID & |
516 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; | 535 | ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH; |
517 | return (pud_val(pud) & mask) != 0; | 536 | return (pud_val(pud) & mask) != 0; |
518 | } | 537 | } |
@@ -521,30 +540,36 @@ static inline int pud_bad(pud_t pud) | |||
521 | 540 | ||
522 | static inline int pmd_present(pmd_t pmd) | 541 | static inline int pmd_present(pmd_t pmd) |
523 | { | 542 | { |
524 | unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO; | 543 | return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID; |
525 | return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE || | ||
526 | !(pmd_val(pmd) & _SEGMENT_ENTRY_INV); | ||
527 | } | 544 | } |
528 | 545 | ||
529 | static inline int pmd_none(pmd_t pmd) | 546 | static inline int pmd_none(pmd_t pmd) |
530 | { | 547 | { |
531 | return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) && | 548 | return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID; |
532 | !(pmd_val(pmd) & _SEGMENT_ENTRY_RO); | ||
533 | } | 549 | } |
534 | 550 | ||
535 | static inline int pmd_large(pmd_t pmd) | 551 | static inline int pmd_large(pmd_t pmd) |
536 | { | 552 | { |
537 | #ifdef CONFIG_64BIT | 553 | #ifdef CONFIG_64BIT |
538 | return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE); | 554 | return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; |
539 | #else | 555 | #else |
540 | return 0; | 556 | return 0; |
541 | #endif | 557 | #endif |
542 | } | 558 | } |
543 | 559 | ||
560 | static inline int pmd_prot_none(pmd_t pmd) | ||
561 | { | ||
562 | return (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) && | ||
563 | (pmd_val(pmd) & _SEGMENT_ENTRY_NONE); | ||
564 | } | ||
565 | |||
544 | static inline int pmd_bad(pmd_t pmd) | 566 | static inline int pmd_bad(pmd_t pmd) |
545 | { | 567 | { |
546 | unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV; | 568 | #ifdef CONFIG_64BIT |
547 | return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY; | 569 | if (pmd_large(pmd)) |
570 | return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; | ||
571 | #endif | ||
572 | return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; | ||
548 | } | 573 | } |
549 | 574 | ||
550 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH | 575 | #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH |
@@ -563,31 +588,40 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma, | |||
563 | #define __HAVE_ARCH_PMD_WRITE | 588 | #define __HAVE_ARCH_PMD_WRITE |
564 | static inline int pmd_write(pmd_t pmd) | 589 | static inline int pmd_write(pmd_t pmd) |
565 | { | 590 | { |
566 | return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0; | 591 | if (pmd_prot_none(pmd)) |
592 | return 0; | ||
593 | return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0; | ||
567 | } | 594 | } |
568 | 595 | ||
569 | static inline int pmd_young(pmd_t pmd) | 596 | static inline int pmd_young(pmd_t pmd) |
570 | { | 597 | { |
571 | return 0; | 598 | int young = 0; |
599 | #ifdef CONFIG_64BIT | ||
600 | if (pmd_prot_none(pmd)) | ||
601 | young = (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) != 0; | ||
602 | else | ||
603 | young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; | ||
604 | #endif | ||
605 | return young; | ||
572 | } | 606 | } |
573 | 607 | ||
574 | static inline int pte_none(pte_t pte) | 608 | static inline int pte_present(pte_t pte) |
575 | { | 609 | { |
576 | return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT); | 610 | /* Bit pattern: (pte & 0x001) == 0x001 */ |
611 | return (pte_val(pte) & _PAGE_PRESENT) != 0; | ||
577 | } | 612 | } |
578 | 613 | ||
579 | static inline int pte_present(pte_t pte) | 614 | static inline int pte_none(pte_t pte) |
580 | { | 615 | { |
581 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX; | 616 | /* Bit pattern: pte == 0x400 */ |
582 | return (pte_val(pte) & mask) == _PAGE_TYPE_NONE || | 617 | return pte_val(pte) == _PAGE_INVALID; |
583 | (!(pte_val(pte) & _PAGE_INVALID) && | ||
584 | !(pte_val(pte) & _PAGE_SWT)); | ||
585 | } | 618 | } |
586 | 619 | ||
587 | static inline int pte_file(pte_t pte) | 620 | static inline int pte_file(pte_t pte) |
588 | { | 621 | { |
589 | unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT; | 622 | /* Bit pattern: (pte & 0x601) == 0x600 */ |
590 | return (pte_val(pte) & mask) == _PAGE_TYPE_FILE; | 623 | return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT)) |
624 | == (_PAGE_INVALID | _PAGE_PROTECT); | ||
591 | } | 625 | } |
592 | 626 | ||
593 | static inline int pte_special(pte_t pte) | 627 | static inline int pte_special(pte_t pte) |
@@ -634,6 +668,15 @@ static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste) | |||
634 | #endif | 668 | #endif |
635 | } | 669 | } |
636 | 670 | ||
671 | static inline pgste_t pgste_get(pte_t *ptep) | ||
672 | { | ||
673 | unsigned long pgste = 0; | ||
674 | #ifdef CONFIG_PGSTE | ||
675 | pgste = *(unsigned long *)(ptep + PTRS_PER_PTE); | ||
676 | #endif | ||
677 | return __pgste(pgste); | ||
678 | } | ||
679 | |||
637 | static inline void pgste_set(pte_t *ptep, pgste_t pgste) | 680 | static inline void pgste_set(pte_t *ptep, pgste_t pgste) |
638 | { | 681 | { |
639 | #ifdef CONFIG_PGSTE | 682 | #ifdef CONFIG_PGSTE |
@@ -644,33 +687,28 @@ static inline void pgste_set(pte_t *ptep, pgste_t pgste) | |||
644 | static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) | 687 | static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) |
645 | { | 688 | { |
646 | #ifdef CONFIG_PGSTE | 689 | #ifdef CONFIG_PGSTE |
647 | unsigned long address, bits; | 690 | unsigned long address, bits, skey; |
648 | unsigned char skey; | ||
649 | 691 | ||
650 | if (pte_val(*ptep) & _PAGE_INVALID) | 692 | if (pte_val(*ptep) & _PAGE_INVALID) |
651 | return pgste; | 693 | return pgste; |
652 | address = pte_val(*ptep) & PAGE_MASK; | 694 | address = pte_val(*ptep) & PAGE_MASK; |
653 | skey = page_get_storage_key(address); | 695 | skey = (unsigned long) page_get_storage_key(address); |
654 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); | 696 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
655 | /* Clear page changed & referenced bit in the storage key */ | 697 | if (!(pgste_val(pgste) & PGSTE_HC_BIT) && (bits & _PAGE_CHANGED)) { |
656 | if (bits & _PAGE_CHANGED) | 698 | /* Transfer dirty + referenced bit to host bits in pgste */ |
699 | pgste_val(pgste) |= bits << 52; | ||
657 | page_set_storage_key(address, skey ^ bits, 0); | 700 | page_set_storage_key(address, skey ^ bits, 0); |
658 | else if (bits) | 701 | } else if (!(pgste_val(pgste) & PGSTE_HR_BIT) && |
702 | (bits & _PAGE_REFERENCED)) { | ||
703 | /* Transfer referenced bit to host bit in pgste */ | ||
704 | pgste_val(pgste) |= PGSTE_HR_BIT; | ||
659 | page_reset_referenced(address); | 705 | page_reset_referenced(address); |
706 | } | ||
660 | /* Transfer page changed & referenced bit to guest bits in pgste */ | 707 | /* Transfer page changed & referenced bit to guest bits in pgste */ |
661 | pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ | 708 | pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */ |
662 | /* Get host changed & referenced bits from pgste */ | ||
663 | bits |= (pgste_val(pgste) & (PGSTE_HR_BIT | PGSTE_HC_BIT)) >> 52; | ||
664 | /* Transfer page changed & referenced bit to kvm user bits */ | ||
665 | pgste_val(pgste) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */ | ||
666 | /* Clear relevant host bits in pgste. */ | ||
667 | pgste_val(pgste) &= ~(PGSTE_HR_BIT | PGSTE_HC_BIT); | ||
668 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); | ||
669 | /* Copy page access key and fetch protection bit to pgste */ | 709 | /* Copy page access key and fetch protection bit to pgste */ |
670 | pgste_val(pgste) |= | 710 | pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT); |
671 | (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; | 711 | pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; |
672 | /* Transfer referenced bit to pte */ | ||
673 | pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1; | ||
674 | #endif | 712 | #endif |
675 | return pgste; | 713 | return pgste; |
676 | 714 | ||
@@ -679,24 +717,11 @@ static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste) | |||
679 | static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) | 717 | static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste) |
680 | { | 718 | { |
681 | #ifdef CONFIG_PGSTE | 719 | #ifdef CONFIG_PGSTE |
682 | int young; | ||
683 | |||
684 | if (pte_val(*ptep) & _PAGE_INVALID) | 720 | if (pte_val(*ptep) & _PAGE_INVALID) |
685 | return pgste; | 721 | return pgste; |
686 | /* Get referenced bit from storage key */ | 722 | /* Get referenced bit from storage key */ |
687 | young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK); | 723 | if (page_reset_referenced(pte_val(*ptep) & PAGE_MASK)) |
688 | if (young) | 724 | pgste_val(pgste) |= PGSTE_HR_BIT | PGSTE_GR_BIT; |
689 | pgste_val(pgste) |= PGSTE_GR_BIT; | ||
690 | /* Get host referenced bit from pgste */ | ||
691 | if (pgste_val(pgste) & PGSTE_HR_BIT) { | ||
692 | pgste_val(pgste) &= ~PGSTE_HR_BIT; | ||
693 | young = 1; | ||
694 | } | ||
695 | /* Transfer referenced bit to kvm user bits and pte */ | ||
696 | if (young) { | ||
697 | pgste_val(pgste) |= PGSTE_UR_BIT; | ||
698 | pte_val(*ptep) |= _PAGE_SWR; | ||
699 | } | ||
700 | #endif | 725 | #endif |
701 | return pgste; | 726 | return pgste; |
702 | } | 727 | } |
@@ -723,13 +748,13 @@ static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry) | |||
723 | 748 | ||
724 | static inline void pgste_set_pte(pte_t *ptep, pte_t entry) | 749 | static inline void pgste_set_pte(pte_t *ptep, pte_t entry) |
725 | { | 750 | { |
726 | if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) { | 751 | if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_WRITE)) { |
727 | /* | 752 | /* |
728 | * Without enhanced suppression-on-protection force | 753 | * Without enhanced suppression-on-protection force |
729 | * the dirty bit on for all writable ptes. | 754 | * the dirty bit on for all writable ptes. |
730 | */ | 755 | */ |
731 | pte_val(entry) |= _PAGE_SWC; | 756 | pte_val(entry) |= _PAGE_DIRTY; |
732 | pte_val(entry) &= ~_PAGE_RO; | 757 | pte_val(entry) &= ~_PAGE_PROTECT; |
733 | } | 758 | } |
734 | *ptep = entry; | 759 | *ptep = entry; |
735 | } | 760 | } |
@@ -841,21 +866,17 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
841 | */ | 866 | */ |
842 | static inline int pte_write(pte_t pte) | 867 | static inline int pte_write(pte_t pte) |
843 | { | 868 | { |
844 | return (pte_val(pte) & _PAGE_SWW) != 0; | 869 | return (pte_val(pte) & _PAGE_WRITE) != 0; |
845 | } | 870 | } |
846 | 871 | ||
847 | static inline int pte_dirty(pte_t pte) | 872 | static inline int pte_dirty(pte_t pte) |
848 | { | 873 | { |
849 | return (pte_val(pte) & _PAGE_SWC) != 0; | 874 | return (pte_val(pte) & _PAGE_DIRTY) != 0; |
850 | } | 875 | } |
851 | 876 | ||
852 | static inline int pte_young(pte_t pte) | 877 | static inline int pte_young(pte_t pte) |
853 | { | 878 | { |
854 | #ifdef CONFIG_PGSTE | 879 | return (pte_val(pte) & _PAGE_YOUNG) != 0; |
855 | if (pte_val(pte) & _PAGE_SWR) | ||
856 | return 1; | ||
857 | #endif | ||
858 | return 0; | ||
859 | } | 880 | } |
860 | 881 | ||
861 | /* | 882 | /* |
@@ -880,12 +901,12 @@ static inline void pud_clear(pud_t *pud) | |||
880 | 901 | ||
881 | static inline void pmd_clear(pmd_t *pmdp) | 902 | static inline void pmd_clear(pmd_t *pmdp) |
882 | { | 903 | { |
883 | pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; | 904 | pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID; |
884 | } | 905 | } |
885 | 906 | ||
886 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 907 | static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
887 | { | 908 | { |
888 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 909 | pte_val(*ptep) = _PAGE_INVALID; |
889 | } | 910 | } |
890 | 911 | ||
891 | /* | 912 | /* |
@@ -896,55 +917,63 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
896 | { | 917 | { |
897 | pte_val(pte) &= _PAGE_CHG_MASK; | 918 | pte_val(pte) &= _PAGE_CHG_MASK; |
898 | pte_val(pte) |= pgprot_val(newprot); | 919 | pte_val(pte) |= pgprot_val(newprot); |
899 | if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW)) | 920 | /* |
900 | pte_val(pte) &= ~_PAGE_RO; | 921 | * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the |
922 | * invalid bit set, clear it again for readable, young pages | ||
923 | */ | ||
924 | if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ)) | ||
925 | pte_val(pte) &= ~_PAGE_INVALID; | ||
926 | /* | ||
927 | * newprot for PAGE_READ and PAGE_WRITE has the page protection | ||
928 | * bit set, clear it again for writable, dirty pages | ||
929 | */ | ||
930 | if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE)) | ||
931 | pte_val(pte) &= ~_PAGE_PROTECT; | ||
901 | return pte; | 932 | return pte; |
902 | } | 933 | } |
903 | 934 | ||
904 | static inline pte_t pte_wrprotect(pte_t pte) | 935 | static inline pte_t pte_wrprotect(pte_t pte) |
905 | { | 936 | { |
906 | pte_val(pte) &= ~_PAGE_SWW; | 937 | pte_val(pte) &= ~_PAGE_WRITE; |
907 | /* Do not clobber _PAGE_TYPE_NONE pages! */ | 938 | pte_val(pte) |= _PAGE_PROTECT; |
908 | if (!(pte_val(pte) & _PAGE_INVALID)) | ||
909 | pte_val(pte) |= _PAGE_RO; | ||
910 | return pte; | 939 | return pte; |
911 | } | 940 | } |
912 | 941 | ||
913 | static inline pte_t pte_mkwrite(pte_t pte) | 942 | static inline pte_t pte_mkwrite(pte_t pte) |
914 | { | 943 | { |
915 | pte_val(pte) |= _PAGE_SWW; | 944 | pte_val(pte) |= _PAGE_WRITE; |
916 | if (pte_val(pte) & _PAGE_SWC) | 945 | if (pte_val(pte) & _PAGE_DIRTY) |
917 | pte_val(pte) &= ~_PAGE_RO; | 946 | pte_val(pte) &= ~_PAGE_PROTECT; |
918 | return pte; | 947 | return pte; |
919 | } | 948 | } |
920 | 949 | ||
921 | static inline pte_t pte_mkclean(pte_t pte) | 950 | static inline pte_t pte_mkclean(pte_t pte) |
922 | { | 951 | { |
923 | pte_val(pte) &= ~_PAGE_SWC; | 952 | pte_val(pte) &= ~_PAGE_DIRTY; |
924 | /* Do not clobber _PAGE_TYPE_NONE pages! */ | 953 | pte_val(pte) |= _PAGE_PROTECT; |
925 | if (!(pte_val(pte) & _PAGE_INVALID)) | ||
926 | pte_val(pte) |= _PAGE_RO; | ||
927 | return pte; | 954 | return pte; |
928 | } | 955 | } |
929 | 956 | ||
930 | static inline pte_t pte_mkdirty(pte_t pte) | 957 | static inline pte_t pte_mkdirty(pte_t pte) |
931 | { | 958 | { |
932 | pte_val(pte) |= _PAGE_SWC; | 959 | pte_val(pte) |= _PAGE_DIRTY; |
933 | if (pte_val(pte) & _PAGE_SWW) | 960 | if (pte_val(pte) & _PAGE_WRITE) |
934 | pte_val(pte) &= ~_PAGE_RO; | 961 | pte_val(pte) &= ~_PAGE_PROTECT; |
935 | return pte; | 962 | return pte; |
936 | } | 963 | } |
937 | 964 | ||
938 | static inline pte_t pte_mkold(pte_t pte) | 965 | static inline pte_t pte_mkold(pte_t pte) |
939 | { | 966 | { |
940 | #ifdef CONFIG_PGSTE | 967 | pte_val(pte) &= ~_PAGE_YOUNG; |
941 | pte_val(pte) &= ~_PAGE_SWR; | 968 | pte_val(pte) |= _PAGE_INVALID; |
942 | #endif | ||
943 | return pte; | 969 | return pte; |
944 | } | 970 | } |
945 | 971 | ||
946 | static inline pte_t pte_mkyoung(pte_t pte) | 972 | static inline pte_t pte_mkyoung(pte_t pte) |
947 | { | 973 | { |
974 | pte_val(pte) |= _PAGE_YOUNG; | ||
975 | if (pte_val(pte) & _PAGE_READ) | ||
976 | pte_val(pte) &= ~_PAGE_INVALID; | ||
948 | return pte; | 977 | return pte; |
949 | } | 978 | } |
950 | 979 | ||
@@ -957,7 +986,7 @@ static inline pte_t pte_mkspecial(pte_t pte) | |||
957 | #ifdef CONFIG_HUGETLB_PAGE | 986 | #ifdef CONFIG_HUGETLB_PAGE |
958 | static inline pte_t pte_mkhuge(pte_t pte) | 987 | static inline pte_t pte_mkhuge(pte_t pte) |
959 | { | 988 | { |
960 | pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO); | 989 | pte_val(pte) |= _PAGE_LARGE; |
961 | return pte; | 990 | return pte; |
962 | } | 991 | } |
963 | #endif | 992 | #endif |
@@ -974,8 +1003,8 @@ static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm, | |||
974 | if (mm_has_pgste(mm)) { | 1003 | if (mm_has_pgste(mm)) { |
975 | pgste = pgste_get_lock(ptep); | 1004 | pgste = pgste_get_lock(ptep); |
976 | pgste = pgste_update_all(ptep, pgste); | 1005 | pgste = pgste_update_all(ptep, pgste); |
977 | dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT); | 1006 | dirty = !!(pgste_val(pgste) & PGSTE_HC_BIT); |
978 | pgste_val(pgste) &= ~PGSTE_UC_BIT; | 1007 | pgste_val(pgste) &= ~PGSTE_HC_BIT; |
979 | pgste_set_unlock(ptep, pgste); | 1008 | pgste_set_unlock(ptep, pgste); |
980 | return dirty; | 1009 | return dirty; |
981 | } | 1010 | } |
@@ -994,59 +1023,75 @@ static inline int ptep_test_and_clear_user_young(struct mm_struct *mm, | |||
994 | if (mm_has_pgste(mm)) { | 1023 | if (mm_has_pgste(mm)) { |
995 | pgste = pgste_get_lock(ptep); | 1024 | pgste = pgste_get_lock(ptep); |
996 | pgste = pgste_update_young(ptep, pgste); | 1025 | pgste = pgste_update_young(ptep, pgste); |
997 | young = !!(pgste_val(pgste) & PGSTE_UR_BIT); | 1026 | young = !!(pgste_val(pgste) & PGSTE_HR_BIT); |
998 | pgste_val(pgste) &= ~PGSTE_UR_BIT; | 1027 | pgste_val(pgste) &= ~PGSTE_HR_BIT; |
999 | pgste_set_unlock(ptep, pgste); | 1028 | pgste_set_unlock(ptep, pgste); |
1000 | } | 1029 | } |
1001 | return young; | 1030 | return young; |
1002 | } | 1031 | } |
1003 | 1032 | ||
1033 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | ||
1034 | { | ||
1035 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { | ||
1036 | #ifndef CONFIG_64BIT | ||
1037 | /* pto must point to the start of the segment table */ | ||
1038 | pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); | ||
1039 | #else | ||
1040 | /* ipte in zarch mode can do the math */ | ||
1041 | pte_t *pto = ptep; | ||
1042 | #endif | ||
1043 | asm volatile( | ||
1044 | " ipte %2,%3" | ||
1045 | : "=m" (*ptep) : "m" (*ptep), | ||
1046 | "a" (pto), "a" (address)); | ||
1047 | } | ||
1048 | } | ||
1049 | |||
1050 | static inline void ptep_flush_lazy(struct mm_struct *mm, | ||
1051 | unsigned long address, pte_t *ptep) | ||
1052 | { | ||
1053 | int active = (mm == current->active_mm) ? 1 : 0; | ||
1054 | |||
1055 | if (atomic_read(&mm->context.attach_count) > active) | ||
1056 | __ptep_ipte(address, ptep); | ||
1057 | else | ||
1058 | mm->context.flush_mm = 1; | ||
1059 | } | ||
1060 | |||
1004 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | 1061 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG |
1005 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, | 1062 | static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, |
1006 | unsigned long addr, pte_t *ptep) | 1063 | unsigned long addr, pte_t *ptep) |
1007 | { | 1064 | { |
1008 | pgste_t pgste; | 1065 | pgste_t pgste; |
1009 | pte_t pte; | 1066 | pte_t pte; |
1067 | int young; | ||
1010 | 1068 | ||
1011 | if (mm_has_pgste(vma->vm_mm)) { | 1069 | if (mm_has_pgste(vma->vm_mm)) { |
1012 | pgste = pgste_get_lock(ptep); | 1070 | pgste = pgste_get_lock(ptep); |
1013 | pgste = pgste_update_young(ptep, pgste); | 1071 | pgste = pgste_ipte_notify(vma->vm_mm, addr, ptep, pgste); |
1014 | pte = *ptep; | ||
1015 | *ptep = pte_mkold(pte); | ||
1016 | pgste_set_unlock(ptep, pgste); | ||
1017 | return pte_young(pte); | ||
1018 | } | 1072 | } |
1019 | return 0; | 1073 | |
1074 | pte = *ptep; | ||
1075 | __ptep_ipte(addr, ptep); | ||
1076 | young = pte_young(pte); | ||
1077 | pte = pte_mkold(pte); | ||
1078 | |||
1079 | if (mm_has_pgste(vma->vm_mm)) { | ||
1080 | pgste_set_pte(ptep, pte); | ||
1081 | pgste_set_unlock(ptep, pgste); | ||
1082 | } else | ||
1083 | *ptep = pte; | ||
1084 | |||
1085 | return young; | ||
1020 | } | 1086 | } |
1021 | 1087 | ||
1022 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | 1088 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH |
1023 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, | 1089 | static inline int ptep_clear_flush_young(struct vm_area_struct *vma, |
1024 | unsigned long address, pte_t *ptep) | 1090 | unsigned long address, pte_t *ptep) |
1025 | { | 1091 | { |
1026 | /* No need to flush TLB | ||
1027 | * On s390 reference bits are in storage key and never in TLB | ||
1028 | * With virtualization we handle the reference bit, without we | ||
1029 | * we can simply return */ | ||
1030 | return ptep_test_and_clear_young(vma, address, ptep); | 1092 | return ptep_test_and_clear_young(vma, address, ptep); |
1031 | } | 1093 | } |
1032 | 1094 | ||
1033 | static inline void __ptep_ipte(unsigned long address, pte_t *ptep) | ||
1034 | { | ||
1035 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { | ||
1036 | #ifndef CONFIG_64BIT | ||
1037 | /* pto must point to the start of the segment table */ | ||
1038 | pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); | ||
1039 | #else | ||
1040 | /* ipte in zarch mode can do the math */ | ||
1041 | pte_t *pto = ptep; | ||
1042 | #endif | ||
1043 | asm volatile( | ||
1044 | " ipte %2,%3" | ||
1045 | : "=m" (*ptep) : "m" (*ptep), | ||
1046 | "a" (pto), "a" (address)); | ||
1047 | } | ||
1048 | } | ||
1049 | |||
1050 | /* | 1095 | /* |
1051 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush | 1096 | * This is hard to understand. ptep_get_and_clear and ptep_clear_flush |
1052 | * both clear the TLB for the unmapped pte. The reason is that | 1097 | * both clear the TLB for the unmapped pte. The reason is that |
@@ -1067,16 +1112,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, | |||
1067 | pgste_t pgste; | 1112 | pgste_t pgste; |
1068 | pte_t pte; | 1113 | pte_t pte; |
1069 | 1114 | ||
1070 | mm->context.flush_mm = 1; | ||
1071 | if (mm_has_pgste(mm)) { | 1115 | if (mm_has_pgste(mm)) { |
1072 | pgste = pgste_get_lock(ptep); | 1116 | pgste = pgste_get_lock(ptep); |
1073 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); | 1117 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); |
1074 | } | 1118 | } |
1075 | 1119 | ||
1076 | pte = *ptep; | 1120 | pte = *ptep; |
1077 | if (!mm_exclusive(mm)) | 1121 | ptep_flush_lazy(mm, address, ptep); |
1078 | __ptep_ipte(address, ptep); | 1122 | pte_val(*ptep) = _PAGE_INVALID; |
1079 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | ||
1080 | 1123 | ||
1081 | if (mm_has_pgste(mm)) { | 1124 | if (mm_has_pgste(mm)) { |
1082 | pgste = pgste_update_all(&pte, pgste); | 1125 | pgste = pgste_update_all(&pte, pgste); |
@@ -1093,15 +1136,14 @@ static inline pte_t ptep_modify_prot_start(struct mm_struct *mm, | |||
1093 | pgste_t pgste; | 1136 | pgste_t pgste; |
1094 | pte_t pte; | 1137 | pte_t pte; |
1095 | 1138 | ||
1096 | mm->context.flush_mm = 1; | ||
1097 | if (mm_has_pgste(mm)) { | 1139 | if (mm_has_pgste(mm)) { |
1098 | pgste = pgste_get_lock(ptep); | 1140 | pgste = pgste_get_lock(ptep); |
1099 | pgste_ipte_notify(mm, address, ptep, pgste); | 1141 | pgste_ipte_notify(mm, address, ptep, pgste); |
1100 | } | 1142 | } |
1101 | 1143 | ||
1102 | pte = *ptep; | 1144 | pte = *ptep; |
1103 | if (!mm_exclusive(mm)) | 1145 | ptep_flush_lazy(mm, address, ptep); |
1104 | __ptep_ipte(address, ptep); | 1146 | pte_val(*ptep) |= _PAGE_INVALID; |
1105 | 1147 | ||
1106 | if (mm_has_pgste(mm)) { | 1148 | if (mm_has_pgste(mm)) { |
1107 | pgste = pgste_update_all(&pte, pgste); | 1149 | pgste = pgste_update_all(&pte, pgste); |
@@ -1117,7 +1159,7 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm, | |||
1117 | pgste_t pgste; | 1159 | pgste_t pgste; |
1118 | 1160 | ||
1119 | if (mm_has_pgste(mm)) { | 1161 | if (mm_has_pgste(mm)) { |
1120 | pgste = *(pgste_t *)(ptep + PTRS_PER_PTE); | 1162 | pgste = pgste_get(ptep); |
1121 | pgste_set_key(ptep, pgste, pte); | 1163 | pgste_set_key(ptep, pgste, pte); |
1122 | pgste_set_pte(ptep, pte); | 1164 | pgste_set_pte(ptep, pte); |
1123 | pgste_set_unlock(ptep, pgste); | 1165 | pgste_set_unlock(ptep, pgste); |
@@ -1139,7 +1181,7 @@ static inline pte_t ptep_clear_flush(struct vm_area_struct *vma, | |||
1139 | 1181 | ||
1140 | pte = *ptep; | 1182 | pte = *ptep; |
1141 | __ptep_ipte(address, ptep); | 1183 | __ptep_ipte(address, ptep); |
1142 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 1184 | pte_val(*ptep) = _PAGE_INVALID; |
1143 | 1185 | ||
1144 | if (mm_has_pgste(vma->vm_mm)) { | 1186 | if (mm_has_pgste(vma->vm_mm)) { |
1145 | pgste = pgste_update_all(&pte, pgste); | 1187 | pgste = pgste_update_all(&pte, pgste); |
@@ -1163,18 +1205,17 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, | |||
1163 | pgste_t pgste; | 1205 | pgste_t pgste; |
1164 | pte_t pte; | 1206 | pte_t pte; |
1165 | 1207 | ||
1166 | if (mm_has_pgste(mm)) { | 1208 | if (!full && mm_has_pgste(mm)) { |
1167 | pgste = pgste_get_lock(ptep); | 1209 | pgste = pgste_get_lock(ptep); |
1168 | if (!full) | 1210 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); |
1169 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); | ||
1170 | } | 1211 | } |
1171 | 1212 | ||
1172 | pte = *ptep; | 1213 | pte = *ptep; |
1173 | if (!full) | 1214 | if (!full) |
1174 | __ptep_ipte(address, ptep); | 1215 | ptep_flush_lazy(mm, address, ptep); |
1175 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 1216 | pte_val(*ptep) = _PAGE_INVALID; |
1176 | 1217 | ||
1177 | if (mm_has_pgste(mm)) { | 1218 | if (!full && mm_has_pgste(mm)) { |
1178 | pgste = pgste_update_all(&pte, pgste); | 1219 | pgste = pgste_update_all(&pte, pgste); |
1179 | pgste_set_unlock(ptep, pgste); | 1220 | pgste_set_unlock(ptep, pgste); |
1180 | } | 1221 | } |
@@ -1189,14 +1230,12 @@ static inline pte_t ptep_set_wrprotect(struct mm_struct *mm, | |||
1189 | pte_t pte = *ptep; | 1230 | pte_t pte = *ptep; |
1190 | 1231 | ||
1191 | if (pte_write(pte)) { | 1232 | if (pte_write(pte)) { |
1192 | mm->context.flush_mm = 1; | ||
1193 | if (mm_has_pgste(mm)) { | 1233 | if (mm_has_pgste(mm)) { |
1194 | pgste = pgste_get_lock(ptep); | 1234 | pgste = pgste_get_lock(ptep); |
1195 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); | 1235 | pgste = pgste_ipte_notify(mm, address, ptep, pgste); |
1196 | } | 1236 | } |
1197 | 1237 | ||
1198 | if (!mm_exclusive(mm)) | 1238 | ptep_flush_lazy(mm, address, ptep); |
1199 | __ptep_ipte(address, ptep); | ||
1200 | pte = pte_wrprotect(pte); | 1239 | pte = pte_wrprotect(pte); |
1201 | 1240 | ||
1202 | if (mm_has_pgste(mm)) { | 1241 | if (mm_has_pgste(mm)) { |
@@ -1240,7 +1279,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) | |||
1240 | { | 1279 | { |
1241 | pte_t __pte; | 1280 | pte_t __pte; |
1242 | pte_val(__pte) = physpage + pgprot_val(pgprot); | 1281 | pte_val(__pte) = physpage + pgprot_val(pgprot); |
1243 | return __pte; | 1282 | return pte_mkyoung(__pte); |
1244 | } | 1283 | } |
1245 | 1284 | ||
1246 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) | 1285 | static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) |
@@ -1248,10 +1287,8 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) | |||
1248 | unsigned long physpage = page_to_phys(page); | 1287 | unsigned long physpage = page_to_phys(page); |
1249 | pte_t __pte = mk_pte_phys(physpage, pgprot); | 1288 | pte_t __pte = mk_pte_phys(physpage, pgprot); |
1250 | 1289 | ||
1251 | if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) { | 1290 | if (pte_write(__pte) && PageDirty(page)) |
1252 | pte_val(__pte) |= _PAGE_SWC; | 1291 | __pte = pte_mkdirty(__pte); |
1253 | pte_val(__pte) &= ~_PAGE_RO; | ||
1254 | } | ||
1255 | return __pte; | 1292 | return __pte; |
1256 | } | 1293 | } |
1257 | 1294 | ||
@@ -1313,7 +1350,7 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) | |||
1313 | unsigned long sto = (unsigned long) pmdp - | 1350 | unsigned long sto = (unsigned long) pmdp - |
1314 | pmd_index(address) * sizeof(pmd_t); | 1351 | pmd_index(address) * sizeof(pmd_t); |
1315 | 1352 | ||
1316 | if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) { | 1353 | if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)) { |
1317 | asm volatile( | 1354 | asm volatile( |
1318 | " .insn rrf,0xb98e0000,%2,%3,0,0" | 1355 | " .insn rrf,0xb98e0000,%2,%3,0,0" |
1319 | : "=m" (*pmdp) | 1356 | : "=m" (*pmdp) |
@@ -1324,24 +1361,68 @@ static inline void __pmd_idte(unsigned long address, pmd_t *pmdp) | |||
1324 | } | 1361 | } |
1325 | } | 1362 | } |
1326 | 1363 | ||
1364 | static inline void __pmd_csp(pmd_t *pmdp) | ||
1365 | { | ||
1366 | register unsigned long reg2 asm("2") = pmd_val(*pmdp); | ||
1367 | register unsigned long reg3 asm("3") = pmd_val(*pmdp) | | ||
1368 | _SEGMENT_ENTRY_INVALID; | ||
1369 | register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5; | ||
1370 | |||
1371 | asm volatile( | ||
1372 | " csp %1,%3" | ||
1373 | : "=m" (*pmdp) | ||
1374 | : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc"); | ||
1375 | } | ||
1376 | |||
1327 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) | 1377 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE) |
1328 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) | 1378 | static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot) |
1329 | { | 1379 | { |
1330 | /* | 1380 | /* |
1331 | * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx) | 1381 | * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx) |
1332 | * Convert to segment table entry format. | 1382 | * Convert to segment table entry format. |
1333 | */ | 1383 | */ |
1334 | if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) | 1384 | if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE)) |
1335 | return pgprot_val(SEGMENT_NONE); | 1385 | return pgprot_val(SEGMENT_NONE); |
1336 | if (pgprot_val(pgprot) == pgprot_val(PAGE_RO)) | 1386 | if (pgprot_val(pgprot) == pgprot_val(PAGE_READ)) |
1337 | return pgprot_val(SEGMENT_RO); | 1387 | return pgprot_val(SEGMENT_READ); |
1338 | return pgprot_val(SEGMENT_RW); | 1388 | return pgprot_val(SEGMENT_WRITE); |
1389 | } | ||
1390 | |||
1391 | static inline pmd_t pmd_mkyoung(pmd_t pmd) | ||
1392 | { | ||
1393 | #ifdef CONFIG_64BIT | ||
1394 | if (pmd_prot_none(pmd)) { | ||
1395 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; | ||
1396 | } else { | ||
1397 | pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; | ||
1398 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; | ||
1399 | } | ||
1400 | #endif | ||
1401 | return pmd; | ||
1402 | } | ||
1403 | |||
1404 | static inline pmd_t pmd_mkold(pmd_t pmd) | ||
1405 | { | ||
1406 | #ifdef CONFIG_64BIT | ||
1407 | if (pmd_prot_none(pmd)) { | ||
1408 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; | ||
1409 | } else { | ||
1410 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; | ||
1411 | pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; | ||
1412 | } | ||
1413 | #endif | ||
1414 | return pmd; | ||
1339 | } | 1415 | } |
1340 | 1416 | ||
1341 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | 1417 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
1342 | { | 1418 | { |
1419 | int young; | ||
1420 | |||
1421 | young = pmd_young(pmd); | ||
1343 | pmd_val(pmd) &= _SEGMENT_CHG_MASK; | 1422 | pmd_val(pmd) &= _SEGMENT_CHG_MASK; |
1344 | pmd_val(pmd) |= massage_pgprot_pmd(newprot); | 1423 | pmd_val(pmd) |= massage_pgprot_pmd(newprot); |
1424 | if (young) | ||
1425 | pmd = pmd_mkyoung(pmd); | ||
1345 | return pmd; | 1426 | return pmd; |
1346 | } | 1427 | } |
1347 | 1428 | ||
@@ -1349,14 +1430,14 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) | |||
1349 | { | 1430 | { |
1350 | pmd_t __pmd; | 1431 | pmd_t __pmd; |
1351 | pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); | 1432 | pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); |
1352 | return __pmd; | 1433 | return pmd_mkyoung(__pmd); |
1353 | } | 1434 | } |
1354 | 1435 | ||
1355 | static inline pmd_t pmd_mkwrite(pmd_t pmd) | 1436 | static inline pmd_t pmd_mkwrite(pmd_t pmd) |
1356 | { | 1437 | { |
1357 | /* Do not clobber _HPAGE_TYPE_NONE pages! */ | 1438 | /* Do not clobber PROT_NONE segments! */ |
1358 | if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV)) | 1439 | if (!pmd_prot_none(pmd)) |
1359 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO; | 1440 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT; |
1360 | return pmd; | 1441 | return pmd; |
1361 | } | 1442 | } |
1362 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ | 1443 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ |
@@ -1378,7 +1459,7 @@ static inline int pmd_trans_splitting(pmd_t pmd) | |||
1378 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, | 1459 | static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, |
1379 | pmd_t *pmdp, pmd_t entry) | 1460 | pmd_t *pmdp, pmd_t entry) |
1380 | { | 1461 | { |
1381 | if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1) | 1462 | if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1) |
1382 | pmd_val(entry) |= _SEGMENT_ENTRY_CO; | 1463 | pmd_val(entry) |= _SEGMENT_ENTRY_CO; |
1383 | *pmdp = entry; | 1464 | *pmdp = entry; |
1384 | } | 1465 | } |
@@ -1391,7 +1472,9 @@ static inline pmd_t pmd_mkhuge(pmd_t pmd) | |||
1391 | 1472 | ||
1392 | static inline pmd_t pmd_wrprotect(pmd_t pmd) | 1473 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
1393 | { | 1474 | { |
1394 | pmd_val(pmd) |= _SEGMENT_ENTRY_RO; | 1475 | /* Do not clobber PROT_NONE segments! */ |
1476 | if (!pmd_prot_none(pmd)) | ||
1477 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; | ||
1395 | return pmd; | 1478 | return pmd; |
1396 | } | 1479 | } |
1397 | 1480 | ||
@@ -1401,50 +1484,16 @@ static inline pmd_t pmd_mkdirty(pmd_t pmd) | |||
1401 | return pmd; | 1484 | return pmd; |
1402 | } | 1485 | } |
1403 | 1486 | ||
1404 | static inline pmd_t pmd_mkold(pmd_t pmd) | ||
1405 | { | ||
1406 | /* No referenced bit in the segment table entry. */ | ||
1407 | return pmd; | ||
1408 | } | ||
1409 | |||
1410 | static inline pmd_t pmd_mkyoung(pmd_t pmd) | ||
1411 | { | ||
1412 | /* No referenced bit in the segment table entry. */ | ||
1413 | return pmd; | ||
1414 | } | ||
1415 | |||
1416 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | 1487 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG |
1417 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, | 1488 | static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, |
1418 | unsigned long address, pmd_t *pmdp) | 1489 | unsigned long address, pmd_t *pmdp) |
1419 | { | 1490 | { |
1420 | unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK; | 1491 | pmd_t pmd; |
1421 | long tmp, rc; | ||
1422 | int counter; | ||
1423 | 1492 | ||
1424 | rc = 0; | 1493 | pmd = *pmdp; |
1425 | if (MACHINE_HAS_RRBM) { | 1494 | __pmd_idte(address, pmdp); |
1426 | counter = PTRS_PER_PTE >> 6; | 1495 | *pmdp = pmd_mkold(pmd); |
1427 | asm volatile( | 1496 | return pmd_young(pmd); |
1428 | "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */ | ||
1429 | " ogr %1,%0\n" | ||
1430 | " la %3,0(%4,%3)\n" | ||
1431 | " brct %2,0b\n" | ||
1432 | : "=&d" (tmp), "+&d" (rc), "+d" (counter), | ||
1433 | "+a" (pmd_addr) | ||
1434 | : "a" (64 * 4096UL) : "cc"); | ||
1435 | rc = !!rc; | ||
1436 | } else { | ||
1437 | counter = PTRS_PER_PTE; | ||
1438 | asm volatile( | ||
1439 | "0: rrbe 0,%2\n" | ||
1440 | " la %2,0(%3,%2)\n" | ||
1441 | " brc 12,1f\n" | ||
1442 | " lhi %0,1\n" | ||
1443 | "1: brct %1,0b\n" | ||
1444 | : "+d" (rc), "+d" (counter), "+a" (pmd_addr) | ||
1445 | : "a" (4096UL) : "cc"); | ||
1446 | } | ||
1447 | return rc; | ||
1448 | } | 1497 | } |
1449 | 1498 | ||
1450 | #define __HAVE_ARCH_PMDP_GET_AND_CLEAR | 1499 | #define __HAVE_ARCH_PMDP_GET_AND_CLEAR |
@@ -1510,10 +1559,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd) | |||
1510 | * exception will occur instead of a page translation exception. The | 1559 | * exception will occur instead of a page translation exception. The |
1511 | * specifiation exception has the bad habit not to store necessary | 1560 | * specifiation exception has the bad habit not to store necessary |
1512 | * information in the lowcore. | 1561 | * information in the lowcore. |
1513 | * Bit 21 and bit 22 are the page invalid bit and the page protection | 1562 | * Bits 21, 22, 30 and 31 are used to indicate the page type. |
1514 | * bit. We set both to indicate a swapped page. | 1563 | * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402 |
1515 | * Bit 30 and 31 are used to distinguish the different page types. For | ||
1516 | * a swapped page these bits need to be zero. | ||
1517 | * This leaves the bits 1-19 and bits 24-29 to store type and offset. | 1564 | * This leaves the bits 1-19 and bits 24-29 to store type and offset. |
1518 | * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 | 1565 | * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19 |
1519 | * plus 24 for the offset. | 1566 | * plus 24 for the offset. |
@@ -1527,10 +1574,8 @@ static inline unsigned long pmd_pfn(pmd_t pmd) | |||
1527 | * exception will occur instead of a page translation exception. The | 1574 | * exception will occur instead of a page translation exception. The |
1528 | * specifiation exception has the bad habit not to store necessary | 1575 | * specifiation exception has the bad habit not to store necessary |
1529 | * information in the lowcore. | 1576 | * information in the lowcore. |
1530 | * Bit 53 and bit 54 are the page invalid bit and the page protection | 1577 | * Bits 53, 54, 62 and 63 are used to indicate the page type. |
1531 | * bit. We set both to indicate a swapped page. | 1578 | * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402 |
1532 | * Bit 62 and 63 are used to distinguish the different page types. For | ||
1533 | * a swapped page these bits need to be zero. | ||
1534 | * This leaves the bits 0-51 and bits 56-61 to store type and offset. | 1579 | * This leaves the bits 0-51 and bits 56-61 to store type and offset. |
1535 | * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 | 1580 | * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51 |
1536 | * plus 56 for the offset. | 1581 | * plus 56 for the offset. |
@@ -1547,7 +1592,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | |||
1547 | { | 1592 | { |
1548 | pte_t pte; | 1593 | pte_t pte; |
1549 | offset &= __SWP_OFFSET_MASK; | 1594 | offset &= __SWP_OFFSET_MASK; |
1550 | pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) | | 1595 | pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) | |
1551 | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); | 1596 | ((offset & 1UL) << 7) | ((offset & ~1UL) << 11); |
1552 | return pte; | 1597 | return pte; |
1553 | } | 1598 | } |
@@ -1570,7 +1615,7 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset) | |||
1570 | 1615 | ||
1571 | #define pgoff_to_pte(__off) \ | 1616 | #define pgoff_to_pte(__off) \ |
1572 | ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ | 1617 | ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \ |
1573 | | _PAGE_TYPE_FILE }) | 1618 | | _PAGE_INVALID | _PAGE_PROTECT }) |
1574 | 1619 | ||
1575 | #endif /* !__ASSEMBLY__ */ | 1620 | #endif /* !__ASSEMBLY__ */ |
1576 | 1621 | ||
diff --git a/arch/s390/include/asm/serial.h b/arch/s390/include/asm/serial.h new file mode 100644 index 000000000000..5b3e48ef534b --- /dev/null +++ b/arch/s390/include/asm/serial.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_S390_SERIAL_H | ||
2 | #define _ASM_S390_SERIAL_H | ||
3 | |||
4 | #define BASE_BAUD 0 | ||
5 | |||
6 | #endif /* _ASM_S390_SERIAL_H */ | ||
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index 80b6f11263c4..6dbd559763c9 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #define __ASM_SWITCH_TO_H | 8 | #define __ASM_SWITCH_TO_H |
9 | 9 | ||
10 | #include <linux/thread_info.h> | 10 | #include <linux/thread_info.h> |
11 | #include <asm/ptrace.h> | ||
11 | 12 | ||
12 | extern struct task_struct *__switch_to(void *, void *); | 13 | extern struct task_struct *__switch_to(void *, void *); |
13 | extern void update_cr_regs(struct task_struct *task); | 14 | extern void update_cr_regs(struct task_struct *task); |
@@ -68,12 +69,16 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs) | |||
68 | 69 | ||
69 | static inline void save_access_regs(unsigned int *acrs) | 70 | static inline void save_access_regs(unsigned int *acrs) |
70 | { | 71 | { |
71 | asm volatile("stam 0,15,%0" : "=Q" (*acrs)); | 72 | typedef struct { int _[NUM_ACRS]; } acrstype; |
73 | |||
74 | asm volatile("stam 0,15,%0" : "=Q" (*(acrstype *)acrs)); | ||
72 | } | 75 | } |
73 | 76 | ||
74 | static inline void restore_access_regs(unsigned int *acrs) | 77 | static inline void restore_access_regs(unsigned int *acrs) |
75 | { | 78 | { |
76 | asm volatile("lam 0,15,%0" : : "Q" (*acrs)); | 79 | typedef struct { int _[NUM_ACRS]; } acrstype; |
80 | |||
81 | asm volatile("lam 0,15,%0" : : "Q" (*(acrstype *)acrs)); | ||
77 | } | 82 | } |
78 | 83 | ||
79 | #define switch_to(prev,next,last) do { \ | 84 | #define switch_to(prev,next,last) do { \ |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index b75d7d686684..2cb846c4b37f 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
@@ -32,6 +32,7 @@ struct mmu_gather { | |||
32 | struct mm_struct *mm; | 32 | struct mm_struct *mm; |
33 | struct mmu_table_batch *batch; | 33 | struct mmu_table_batch *batch; |
34 | unsigned int fullmm; | 34 | unsigned int fullmm; |
35 | unsigned long start, end; | ||
35 | }; | 36 | }; |
36 | 37 | ||
37 | struct mmu_table_batch { | 38 | struct mmu_table_batch { |
@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | |||
48 | 49 | ||
49 | static inline void tlb_gather_mmu(struct mmu_gather *tlb, | 50 | static inline void tlb_gather_mmu(struct mmu_gather *tlb, |
50 | struct mm_struct *mm, | 51 | struct mm_struct *mm, |
51 | unsigned int full_mm_flush) | 52 | unsigned long start, |
53 | unsigned long end) | ||
52 | { | 54 | { |
53 | tlb->mm = mm; | 55 | tlb->mm = mm; |
54 | tlb->fullmm = full_mm_flush; | 56 | tlb->start = start; |
57 | tlb->end = end; | ||
58 | tlb->fullmm = !(start | (end+1)); | ||
55 | tlb->batch = NULL; | 59 | tlb->batch = NULL; |
56 | if (tlb->fullmm) | 60 | if (tlb->fullmm) |
57 | __tlb_flush_mm(mm); | 61 | __tlb_flush_mm(mm); |
@@ -59,13 +63,14 @@ static inline void tlb_gather_mmu(struct mmu_gather *tlb, | |||
59 | 63 | ||
60 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | 64 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) |
61 | { | 65 | { |
66 | __tlb_flush_mm_lazy(tlb->mm); | ||
62 | tlb_table_flush(tlb); | 67 | tlb_table_flush(tlb); |
63 | } | 68 | } |
64 | 69 | ||
65 | static inline void tlb_finish_mmu(struct mmu_gather *tlb, | 70 | static inline void tlb_finish_mmu(struct mmu_gather *tlb, |
66 | unsigned long start, unsigned long end) | 71 | unsigned long start, unsigned long end) |
67 | { | 72 | { |
68 | tlb_table_flush(tlb); | 73 | tlb_flush_mmu(tlb); |
69 | } | 74 | } |
70 | 75 | ||
71 | /* | 76 | /* |
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h index 6b32af30878c..f9fef0425fee 100644 --- a/arch/s390/include/asm/tlbflush.h +++ b/arch/s390/include/asm/tlbflush.h | |||
@@ -86,7 +86,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm) | |||
86 | __tlb_flush_full(mm); | 86 | __tlb_flush_full(mm); |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline void __tlb_flush_mm_cond(struct mm_struct * mm) | 89 | static inline void __tlb_flush_mm_lazy(struct mm_struct * mm) |
90 | { | 90 | { |
91 | if (mm->context.flush_mm) { | 91 | if (mm->context.flush_mm) { |
92 | __tlb_flush_mm(mm); | 92 | __tlb_flush_mm(mm); |
@@ -118,13 +118,13 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm) | |||
118 | 118 | ||
119 | static inline void flush_tlb_mm(struct mm_struct *mm) | 119 | static inline void flush_tlb_mm(struct mm_struct *mm) |
120 | { | 120 | { |
121 | __tlb_flush_mm_cond(mm); | 121 | __tlb_flush_mm_lazy(mm); |
122 | } | 122 | } |
123 | 123 | ||
124 | static inline void flush_tlb_range(struct vm_area_struct *vma, | 124 | static inline void flush_tlb_range(struct vm_area_struct *vma, |
125 | unsigned long start, unsigned long end) | 125 | unsigned long start, unsigned long end) |
126 | { | 126 | { |
127 | __tlb_flush_mm_cond(vma->vm_mm); | 127 | __tlb_flush_mm_lazy(vma->vm_mm); |
128 | } | 128 | } |
129 | 129 | ||
130 | static inline void flush_tlb_kernel_range(unsigned long start, | 130 | static inline void flush_tlb_kernel_range(unsigned long start, |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index be7a408be7a1..cc30d1fb000c 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/unistd.h> | 18 | #include <asm/unistd.h> |
19 | #include <asm/page.h> | 19 | #include <asm/page.h> |
20 | #include <asm/sigp.h> | 20 | #include <asm/sigp.h> |
21 | #include <asm/irq.h> | ||
21 | 22 | ||
22 | __PT_R0 = __PT_GPRS | 23 | __PT_R0 = __PT_GPRS |
23 | __PT_R1 = __PT_GPRS + 4 | 24 | __PT_R1 = __PT_GPRS + 4 |
@@ -435,6 +436,11 @@ io_skip: | |||
435 | io_loop: | 436 | io_loop: |
436 | l %r1,BASED(.Ldo_IRQ) | 437 | l %r1,BASED(.Ldo_IRQ) |
437 | lr %r2,%r11 # pass pointer to pt_regs | 438 | lr %r2,%r11 # pass pointer to pt_regs |
439 | lhi %r3,IO_INTERRUPT | ||
440 | tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? | ||
441 | jz io_call | ||
442 | lhi %r3,THIN_INTERRUPT | ||
443 | io_call: | ||
438 | basr %r14,%r1 # call do_IRQ | 444 | basr %r14,%r1 # call do_IRQ |
439 | tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR | 445 | tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR |
440 | jz io_return | 446 | jz io_return |
@@ -584,9 +590,10 @@ ext_skip: | |||
584 | mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR | 590 | mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR |
585 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS | 591 | mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS |
586 | TRACE_IRQS_OFF | 592 | TRACE_IRQS_OFF |
593 | l %r1,BASED(.Ldo_IRQ) | ||
587 | lr %r2,%r11 # pass pointer to pt_regs | 594 | lr %r2,%r11 # pass pointer to pt_regs |
588 | l %r1,BASED(.Ldo_extint) | 595 | lhi %r3,EXT_INTERRUPT |
589 | basr %r14,%r1 # call do_extint | 596 | basr %r14,%r1 # call do_IRQ |
590 | j io_return | 597 | j io_return |
591 | 598 | ||
592 | /* | 599 | /* |
@@ -879,13 +886,13 @@ cleanup_idle: | |||
879 | stm %r9,%r10,__LC_SYSTEM_TIMER | 886 | stm %r9,%r10,__LC_SYSTEM_TIMER |
880 | mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) | 887 | mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) |
881 | # prepare return psw | 888 | # prepare return psw |
882 | n %r8,BASED(cleanup_idle_wait) # clear wait state bit | 889 | n %r8,BASED(cleanup_idle_wait) # clear irq & wait state bits |
883 | l %r9,24(%r11) # return from psw_idle | 890 | l %r9,24(%r11) # return from psw_idle |
884 | br %r14 | 891 | br %r14 |
885 | cleanup_idle_insn: | 892 | cleanup_idle_insn: |
886 | .long psw_idle_lpsw + 0x80000000 | 893 | .long psw_idle_lpsw + 0x80000000 |
887 | cleanup_idle_wait: | 894 | cleanup_idle_wait: |
888 | .long 0xfffdffff | 895 | .long 0xfcfdffff |
889 | 896 | ||
890 | /* | 897 | /* |
891 | * Integer constants | 898 | * Integer constants |
@@ -902,7 +909,6 @@ cleanup_idle_wait: | |||
902 | .Ldo_machine_check: .long s390_do_machine_check | 909 | .Ldo_machine_check: .long s390_do_machine_check |
903 | .Lhandle_mcck: .long s390_handle_mcck | 910 | .Lhandle_mcck: .long s390_handle_mcck |
904 | .Ldo_IRQ: .long do_IRQ | 911 | .Ldo_IRQ: .long do_IRQ |
905 | .Ldo_extint: .long do_extint | ||
906 | .Ldo_signal: .long do_signal | 912 | .Ldo_signal: .long do_signal |
907 | .Ldo_notify_resume: .long do_notify_resume | 913 | .Ldo_notify_resume: .long do_notify_resume |
908 | .Ldo_per_trap: .long do_per_trap | 914 | .Ldo_per_trap: .long do_per_trap |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 1c039d0c24c7..2b2188b97c6a 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/unistd.h> | 19 | #include <asm/unistd.h> |
20 | #include <asm/page.h> | 20 | #include <asm/page.h> |
21 | #include <asm/sigp.h> | 21 | #include <asm/sigp.h> |
22 | #include <asm/irq.h> | ||
22 | 23 | ||
23 | __PT_R0 = __PT_GPRS | 24 | __PT_R0 = __PT_GPRS |
24 | __PT_R1 = __PT_GPRS + 8 | 25 | __PT_R1 = __PT_GPRS + 8 |
@@ -468,6 +469,11 @@ io_skip: | |||
468 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 469 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
469 | io_loop: | 470 | io_loop: |
470 | lgr %r2,%r11 # pass pointer to pt_regs | 471 | lgr %r2,%r11 # pass pointer to pt_regs |
472 | lghi %r3,IO_INTERRUPT | ||
473 | tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? | ||
474 | jz io_call | ||
475 | lghi %r3,THIN_INTERRUPT | ||
476 | io_call: | ||
471 | brasl %r14,do_IRQ | 477 | brasl %r14,do_IRQ |
472 | tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR | 478 | tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR |
473 | jz io_return | 479 | jz io_return |
@@ -623,7 +629,8 @@ ext_skip: | |||
623 | TRACE_IRQS_OFF | 629 | TRACE_IRQS_OFF |
624 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) | 630 | xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) |
625 | lgr %r2,%r11 # pass pointer to pt_regs | 631 | lgr %r2,%r11 # pass pointer to pt_regs |
626 | brasl %r14,do_extint | 632 | lghi %r3,EXT_INTERRUPT |
633 | brasl %r14,do_IRQ | ||
627 | j io_return | 634 | j io_return |
628 | 635 | ||
629 | /* | 636 | /* |
@@ -922,7 +929,7 @@ cleanup_idle: | |||
922 | stg %r9,__LC_SYSTEM_TIMER | 929 | stg %r9,__LC_SYSTEM_TIMER |
923 | mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) | 930 | mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) |
924 | # prepare return psw | 931 | # prepare return psw |
925 | nihh %r8,0xfffd # clear wait state bit | 932 | nihh %r8,0xfcfd # clear irq & wait state bits |
926 | lg %r9,48(%r11) # return from psw_idle | 933 | lg %r9,48(%r11) # return from psw_idle |
927 | br %r14 | 934 | br %r14 |
928 | cleanup_idle_insn: | 935 | cleanup_idle_insn: |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index 54b0995514e8..b34ba0ea96a9 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/cputime.h> | 22 | #include <asm/cputime.h> |
23 | #include <asm/lowcore.h> | 23 | #include <asm/lowcore.h> |
24 | #include <asm/irq.h> | 24 | #include <asm/irq.h> |
25 | #include <asm/hw_irq.h> | ||
25 | #include "entry.h" | 26 | #include "entry.h" |
26 | 27 | ||
27 | DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); | 28 | DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); |
@@ -42,9 +43,10 @@ struct irq_class { | |||
42 | * Since the external and I/O interrupt fields are already sums we would end | 43 | * Since the external and I/O interrupt fields are already sums we would end |
43 | * up with having a sum which accounts each interrupt twice. | 44 | * up with having a sum which accounts each interrupt twice. |
44 | */ | 45 | */ |
45 | static const struct irq_class irqclass_main_desc[NR_IRQS] = { | 46 | static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = { |
46 | [EXTERNAL_INTERRUPT] = {.name = "EXT"}, | 47 | [EXT_INTERRUPT] = {.name = "EXT"}, |
47 | [IO_INTERRUPT] = {.name = "I/O"} | 48 | [IO_INTERRUPT] = {.name = "I/O"}, |
49 | [THIN_INTERRUPT] = {.name = "AIO"}, | ||
48 | }; | 50 | }; |
49 | 51 | ||
50 | /* | 52 | /* |
@@ -86,6 +88,28 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { | |||
86 | [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"}, | 88 | [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"}, |
87 | }; | 89 | }; |
88 | 90 | ||
91 | void __init init_IRQ(void) | ||
92 | { | ||
93 | irq_reserve_irqs(0, THIN_INTERRUPT); | ||
94 | init_cio_interrupts(); | ||
95 | init_airq_interrupts(); | ||
96 | init_ext_interrupts(); | ||
97 | } | ||
98 | |||
99 | void do_IRQ(struct pt_regs *regs, int irq) | ||
100 | { | ||
101 | struct pt_regs *old_regs; | ||
102 | |||
103 | old_regs = set_irq_regs(regs); | ||
104 | irq_enter(); | ||
105 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) | ||
106 | /* Serve timer interrupts first. */ | ||
107 | clock_comparator_work(); | ||
108 | generic_handle_irq(irq); | ||
109 | irq_exit(); | ||
110 | set_irq_regs(old_regs); | ||
111 | } | ||
112 | |||
89 | /* | 113 | /* |
90 | * show_interrupts is needed by /proc/interrupts. | 114 | * show_interrupts is needed by /proc/interrupts. |
91 | */ | 115 | */ |
@@ -100,27 +124,36 @@ int show_interrupts(struct seq_file *p, void *v) | |||
100 | for_each_online_cpu(cpu) | 124 | for_each_online_cpu(cpu) |
101 | seq_printf(p, "CPU%d ", cpu); | 125 | seq_printf(p, "CPU%d ", cpu); |
102 | seq_putc(p, '\n'); | 126 | seq_putc(p, '\n'); |
127 | goto out; | ||
103 | } | 128 | } |
104 | if (irq < NR_IRQS) { | 129 | if (irq < NR_IRQS) { |
130 | if (irq >= NR_IRQS_BASE) | ||
131 | goto out; | ||
105 | seq_printf(p, "%s: ", irqclass_main_desc[irq].name); | 132 | seq_printf(p, "%s: ", irqclass_main_desc[irq].name); |
106 | for_each_online_cpu(cpu) | 133 | for_each_online_cpu(cpu) |
107 | seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]); | 134 | seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); |
108 | seq_putc(p, '\n'); | 135 | seq_putc(p, '\n'); |
109 | goto skip_arch_irqs; | 136 | goto out; |
110 | } | 137 | } |
111 | for (irq = 0; irq < NR_ARCH_IRQS; irq++) { | 138 | for (irq = 0; irq < NR_ARCH_IRQS; irq++) { |
112 | seq_printf(p, "%s: ", irqclass_sub_desc[irq].name); | 139 | seq_printf(p, "%s: ", irqclass_sub_desc[irq].name); |
113 | for_each_online_cpu(cpu) | 140 | for_each_online_cpu(cpu) |
114 | seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]); | 141 | seq_printf(p, "%10u ", |
142 | per_cpu(irq_stat, cpu).irqs[irq]); | ||
115 | if (irqclass_sub_desc[irq].desc) | 143 | if (irqclass_sub_desc[irq].desc) |
116 | seq_printf(p, " %s", irqclass_sub_desc[irq].desc); | 144 | seq_printf(p, " %s", irqclass_sub_desc[irq].desc); |
117 | seq_putc(p, '\n'); | 145 | seq_putc(p, '\n'); |
118 | } | 146 | } |
119 | skip_arch_irqs: | 147 | out: |
120 | put_online_cpus(); | 148 | put_online_cpus(); |
121 | return 0; | 149 | return 0; |
122 | } | 150 | } |
123 | 151 | ||
152 | int arch_show_interrupts(struct seq_file *p, int prec) | ||
153 | { | ||
154 | return 0; | ||
155 | } | ||
156 | |||
124 | /* | 157 | /* |
125 | * Switch to the asynchronous interrupt stack for softirq execution. | 158 | * Switch to the asynchronous interrupt stack for softirq execution. |
126 | */ | 159 | */ |
@@ -159,14 +192,6 @@ asmlinkage void do_softirq(void) | |||
159 | local_irq_restore(flags); | 192 | local_irq_restore(flags); |
160 | } | 193 | } |
161 | 194 | ||
162 | #ifdef CONFIG_PROC_FS | ||
163 | void init_irq_proc(void) | ||
164 | { | ||
165 | if (proc_mkdir("irq", NULL)) | ||
166 | create_prof_cpu_mask(); | ||
167 | } | ||
168 | #endif | ||
169 | |||
170 | /* | 195 | /* |
171 | * ext_int_hash[index] is the list head for all external interrupts that hash | 196 | * ext_int_hash[index] is the list head for all external interrupts that hash |
172 | * to this index. | 197 | * to this index. |
@@ -183,14 +208,6 @@ struct ext_int_info { | |||
183 | /* ext_int_hash_lock protects the handler lists for external interrupts */ | 208 | /* ext_int_hash_lock protects the handler lists for external interrupts */ |
184 | DEFINE_SPINLOCK(ext_int_hash_lock); | 209 | DEFINE_SPINLOCK(ext_int_hash_lock); |
185 | 210 | ||
186 | static void __init init_external_interrupts(void) | ||
187 | { | ||
188 | int idx; | ||
189 | |||
190 | for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++) | ||
191 | INIT_LIST_HEAD(&ext_int_hash[idx]); | ||
192 | } | ||
193 | |||
194 | static inline int ext_hash(u16 code) | 211 | static inline int ext_hash(u16 code) |
195 | { | 212 | { |
196 | return (code + (code >> 9)) & 0xff; | 213 | return (code + (code >> 9)) & 0xff; |
@@ -234,20 +251,13 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler) | |||
234 | } | 251 | } |
235 | EXPORT_SYMBOL(unregister_external_interrupt); | 252 | EXPORT_SYMBOL(unregister_external_interrupt); |
236 | 253 | ||
237 | void __irq_entry do_extint(struct pt_regs *regs) | 254 | static irqreturn_t do_ext_interrupt(int irq, void *dummy) |
238 | { | 255 | { |
256 | struct pt_regs *regs = get_irq_regs(); | ||
239 | struct ext_code ext_code; | 257 | struct ext_code ext_code; |
240 | struct pt_regs *old_regs; | ||
241 | struct ext_int_info *p; | 258 | struct ext_int_info *p; |
242 | int index; | 259 | int index; |
243 | 260 | ||
244 | old_regs = set_irq_regs(regs); | ||
245 | irq_enter(); | ||
246 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) { | ||
247 | /* Serve timer interrupts first. */ | ||
248 | clock_comparator_work(); | ||
249 | } | ||
250 | kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL); | ||
251 | ext_code = *(struct ext_code *) ®s->int_code; | 261 | ext_code = *(struct ext_code *) ®s->int_code; |
252 | if (ext_code.code != 0x1004) | 262 | if (ext_code.code != 0x1004) |
253 | __get_cpu_var(s390_idle).nohz_delay = 1; | 263 | __get_cpu_var(s390_idle).nohz_delay = 1; |
@@ -259,13 +269,25 @@ void __irq_entry do_extint(struct pt_regs *regs) | |||
259 | p->handler(ext_code, regs->int_parm, | 269 | p->handler(ext_code, regs->int_parm, |
260 | regs->int_parm_long); | 270 | regs->int_parm_long); |
261 | rcu_read_unlock(); | 271 | rcu_read_unlock(); |
262 | irq_exit(); | 272 | |
263 | set_irq_regs(old_regs); | 273 | return IRQ_HANDLED; |
264 | } | 274 | } |
265 | 275 | ||
266 | void __init init_IRQ(void) | 276 | static struct irqaction external_interrupt = { |
277 | .name = "EXT", | ||
278 | .handler = do_ext_interrupt, | ||
279 | }; | ||
280 | |||
281 | void __init init_ext_interrupts(void) | ||
267 | { | 282 | { |
268 | init_external_interrupts(); | 283 | int idx; |
284 | |||
285 | for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++) | ||
286 | INIT_LIST_HEAD(&ext_int_hash[idx]); | ||
287 | |||
288 | irq_set_chip_and_handler(EXT_INTERRUPT, | ||
289 | &dummy_irq_chip, handle_percpu_irq); | ||
290 | setup_irq(EXT_INTERRUPT, &external_interrupt); | ||
269 | } | 291 | } |
270 | 292 | ||
271 | static DEFINE_SPINLOCK(sc_irq_lock); | 293 | static DEFINE_SPINLOCK(sc_irq_lock); |
@@ -313,69 +335,3 @@ void measurement_alert_subclass_unregister(void) | |||
313 | spin_unlock(&ma_subclass_lock); | 335 | spin_unlock(&ma_subclass_lock); |
314 | } | 336 | } |
315 | EXPORT_SYMBOL(measurement_alert_subclass_unregister); | 337 | EXPORT_SYMBOL(measurement_alert_subclass_unregister); |
316 | |||
317 | #ifdef CONFIG_SMP | ||
318 | void synchronize_irq(unsigned int irq) | ||
319 | { | ||
320 | /* | ||
321 | * Not needed, the handler is protected by a lock and IRQs that occur | ||
322 | * after the handler is deleted are just NOPs. | ||
323 | */ | ||
324 | } | ||
325 | EXPORT_SYMBOL_GPL(synchronize_irq); | ||
326 | #endif | ||
327 | |||
328 | #ifndef CONFIG_PCI | ||
329 | |||
330 | /* Only PCI devices have dynamically-defined IRQ handlers */ | ||
331 | |||
332 | int request_irq(unsigned int irq, irq_handler_t handler, | ||
333 | unsigned long irqflags, const char *devname, void *dev_id) | ||
334 | { | ||
335 | return -EINVAL; | ||
336 | } | ||
337 | EXPORT_SYMBOL_GPL(request_irq); | ||
338 | |||
339 | void free_irq(unsigned int irq, void *dev_id) | ||
340 | { | ||
341 | WARN_ON(1); | ||
342 | } | ||
343 | EXPORT_SYMBOL_GPL(free_irq); | ||
344 | |||
345 | void enable_irq(unsigned int irq) | ||
346 | { | ||
347 | WARN_ON(1); | ||
348 | } | ||
349 | EXPORT_SYMBOL_GPL(enable_irq); | ||
350 | |||
351 | void disable_irq(unsigned int irq) | ||
352 | { | ||
353 | WARN_ON(1); | ||
354 | } | ||
355 | EXPORT_SYMBOL_GPL(disable_irq); | ||
356 | |||
357 | #endif /* !CONFIG_PCI */ | ||
358 | |||
359 | void disable_irq_nosync(unsigned int irq) | ||
360 | { | ||
361 | disable_irq(irq); | ||
362 | } | ||
363 | EXPORT_SYMBOL_GPL(disable_irq_nosync); | ||
364 | |||
365 | unsigned long probe_irq_on(void) | ||
366 | { | ||
367 | return 0; | ||
368 | } | ||
369 | EXPORT_SYMBOL_GPL(probe_irq_on); | ||
370 | |||
371 | int probe_irq_off(unsigned long val) | ||
372 | { | ||
373 | return 0; | ||
374 | } | ||
375 | EXPORT_SYMBOL_GPL(probe_irq_off); | ||
376 | |||
377 | unsigned int probe_irq_mask(unsigned long val) | ||
378 | { | ||
379 | return val; | ||
380 | } | ||
381 | EXPORT_SYMBOL_GPL(probe_irq_mask); | ||
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 3388b2b2a07d..adbbe7f1cb0d 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -105,14 +105,31 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn) | |||
105 | fixup |= FIXUP_RETURN_REGISTER; | 105 | fixup |= FIXUP_RETURN_REGISTER; |
106 | break; | 106 | break; |
107 | case 0xeb: | 107 | case 0xeb: |
108 | if ((insn[2] & 0xff) == 0x44 || /* bxhg */ | 108 | switch (insn[2] & 0xff) { |
109 | (insn[2] & 0xff) == 0x45) /* bxleg */ | 109 | case 0x44: /* bxhg */ |
110 | case 0x45: /* bxleg */ | ||
110 | fixup = FIXUP_BRANCH_NOT_TAKEN; | 111 | fixup = FIXUP_BRANCH_NOT_TAKEN; |
112 | break; | ||
113 | } | ||
111 | break; | 114 | break; |
112 | case 0xe3: /* bctg */ | 115 | case 0xe3: /* bctg */ |
113 | if ((insn[2] & 0xff) == 0x46) | 116 | if ((insn[2] & 0xff) == 0x46) |
114 | fixup = FIXUP_BRANCH_NOT_TAKEN; | 117 | fixup = FIXUP_BRANCH_NOT_TAKEN; |
115 | break; | 118 | break; |
119 | case 0xec: | ||
120 | switch (insn[2] & 0xff) { | ||
121 | case 0xe5: /* clgrb */ | ||
122 | case 0xe6: /* cgrb */ | ||
123 | case 0xf6: /* crb */ | ||
124 | case 0xf7: /* clrb */ | ||
125 | case 0xfc: /* cgib */ | ||
126 | case 0xfd: /* cglib */ | ||
127 | case 0xfe: /* cib */ | ||
128 | case 0xff: /* clib */ | ||
129 | fixup = FIXUP_BRANCH_NOT_TAKEN; | ||
130 | break; | ||
131 | } | ||
132 | break; | ||
116 | } | 133 | } |
117 | return fixup; | 134 | return fixup; |
118 | } | 135 | } |
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c index 504175ebf8b0..c4c033819879 100644 --- a/arch/s390/kernel/nmi.c +++ b/arch/s390/kernel/nmi.c | |||
@@ -214,10 +214,7 @@ static int notrace s390_revalidate_registers(struct mci *mci) | |||
214 | : "0", "cc"); | 214 | : "0", "cc"); |
215 | #endif | 215 | #endif |
216 | /* Revalidate clock comparator register */ | 216 | /* Revalidate clock comparator register */ |
217 | if (S390_lowcore.clock_comparator == -1) | 217 | set_clock_comparator(S390_lowcore.clock_comparator); |
218 | set_clock_comparator(S390_lowcore.mcck_clock); | ||
219 | else | ||
220 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
221 | /* Check if old PSW is valid */ | 218 | /* Check if old PSW is valid */ |
222 | if (!mci->wp) | 219 | if (!mci->wp) |
223 | /* | 220 | /* |
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index a6fc037671b1..500aa1029bcb 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c | |||
@@ -52,12 +52,13 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs) | |||
52 | 52 | ||
53 | static bool is_in_guest(struct pt_regs *regs) | 53 | static bool is_in_guest(struct pt_regs *regs) |
54 | { | 54 | { |
55 | unsigned long ip = instruction_pointer(regs); | ||
56 | |||
57 | if (user_mode(regs)) | 55 | if (user_mode(regs)) |
58 | return false; | 56 | return false; |
59 | 57 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | |
60 | return ip == (unsigned long) &sie_exit; | 58 | return instruction_pointer(regs) == (unsigned long) &sie_exit; |
59 | #else | ||
60 | return false; | ||
61 | #endif | ||
61 | } | 62 | } |
62 | 63 | ||
63 | static unsigned long guest_is_user_mode(struct pt_regs *regs) | 64 | static unsigned long guest_is_user_mode(struct pt_regs *regs) |
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 2bc3eddae34a..c5dbb335716d 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -71,6 +71,7 @@ void arch_cpu_idle(void) | |||
71 | } | 71 | } |
72 | /* Halt the cpu and keep track of cpu time accounting. */ | 72 | /* Halt the cpu and keep track of cpu time accounting. */ |
73 | vtime_stop_cpu(); | 73 | vtime_stop_cpu(); |
74 | local_irq_enable(); | ||
74 | } | 75 | } |
75 | 76 | ||
76 | void arch_cpu_idle_exit(void) | 77 | void arch_cpu_idle_exit(void) |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index e9fadb04e3c6..9556905bd3ce 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -60,11 +60,11 @@ void update_cr_regs(struct task_struct *task) | |||
60 | 60 | ||
61 | __ctl_store(cr, 0, 2); | 61 | __ctl_store(cr, 0, 2); |
62 | cr_new[1] = cr[1]; | 62 | cr_new[1] = cr[1]; |
63 | /* Set or clear transaction execution TXC/PIFO bits 8 and 9. */ | 63 | /* Set or clear transaction execution TXC bit 8. */ |
64 | if (task->thread.per_flags & PER_FLAG_NO_TE) | 64 | if (task->thread.per_flags & PER_FLAG_NO_TE) |
65 | cr_new[0] = cr[0] & ~(3UL << 54); | 65 | cr_new[0] = cr[0] & ~(1UL << 55); |
66 | else | 66 | else |
67 | cr_new[0] = cr[0] | (3UL << 54); | 67 | cr_new[0] = cr[0] | (1UL << 55); |
68 | /* Set or clear transaction execution TDC bits 62 and 63. */ | 68 | /* Set or clear transaction execution TDC bits 62 and 63. */ |
69 | cr_new[2] = cr[2] & ~3UL; | 69 | cr_new[2] = cr[2] & ~3UL; |
70 | if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { | 70 | if (task->thread.per_flags & PER_FLAG_TE_ABORT_RAND) { |
@@ -1299,7 +1299,7 @@ int regs_query_register_offset(const char *name) | |||
1299 | 1299 | ||
1300 | if (!name || *name != 'r') | 1300 | if (!name || *name != 'r') |
1301 | return -EINVAL; | 1301 | return -EINVAL; |
1302 | if (strict_strtoul(name + 1, 10, &offset)) | 1302 | if (kstrtoul(name + 1, 10, &offset)) |
1303 | return -EINVAL; | 1303 | return -EINVAL; |
1304 | if (offset >= NUM_GPRS) | 1304 | if (offset >= NUM_GPRS) |
1305 | return -EINVAL; | 1305 | return -EINVAL; |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 497451ec5e26..aeed8a61fa0d 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -994,6 +994,7 @@ static void __init setup_hwcaps(void) | |||
994 | strcpy(elf_platform, "z196"); | 994 | strcpy(elf_platform, "z196"); |
995 | break; | 995 | break; |
996 | case 0x2827: | 996 | case 0x2827: |
997 | case 0x2828: | ||
997 | strcpy(elf_platform, "zEC12"); | 998 | strcpy(elf_platform, "zEC12"); |
998 | break; | 999 | break; |
999 | } | 1000 | } |
diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index c479d2f9605b..737bff38e3ee 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c | |||
@@ -10,6 +10,9 @@ | |||
10 | #include <linux/suspend.h> | 10 | #include <linux/suspend.h> |
11 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
12 | #include <asm/ctl_reg.h> | 12 | #include <asm/ctl_reg.h> |
13 | #include <asm/ipl.h> | ||
14 | #include <asm/cio.h> | ||
15 | #include <asm/pci.h> | ||
13 | 16 | ||
14 | /* | 17 | /* |
15 | * References to section boundaries | 18 | * References to section boundaries |
@@ -211,3 +214,11 @@ void restore_processor_state(void) | |||
211 | __ctl_set_bit(0,28); | 214 | __ctl_set_bit(0,28); |
212 | local_mcck_enable(); | 215 | local_mcck_enable(); |
213 | } | 216 | } |
217 | |||
218 | /* Called at the end of swsusp_arch_resume */ | ||
219 | void s390_early_resume(void) | ||
220 | { | ||
221 | lgr_info_log(); | ||
222 | channel_subsystem_reinit(); | ||
223 | zpci_rescan(); | ||
224 | } | ||
diff --git a/arch/s390/kernel/swsusp_asm64.S b/arch/s390/kernel/swsusp_asm64.S index c487be4cfc81..6b09fdffbd2f 100644 --- a/arch/s390/kernel/swsusp_asm64.S +++ b/arch/s390/kernel/swsusp_asm64.S | |||
@@ -281,11 +281,8 @@ restore_registers: | |||
281 | lghi %r2,0 | 281 | lghi %r2,0 |
282 | brasl %r14,arch_set_page_states | 282 | brasl %r14,arch_set_page_states |
283 | 283 | ||
284 | /* Log potential guest relocation */ | 284 | /* Call arch specific early resume code */ |
285 | brasl %r14,lgr_info_log | 285 | brasl %r14,s390_early_resume |
286 | |||
287 | /* Reinitialize the channel subsystem */ | ||
288 | brasl %r14,channel_subsystem_reinit | ||
289 | 286 | ||
290 | /* Return 0 */ | 287 | /* Return 0 */ |
291 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) | 288 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 876546b9cfa1..064c3082ab33 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -92,7 +92,6 @@ void clock_comparator_work(void) | |||
92 | struct clock_event_device *cd; | 92 | struct clock_event_device *cd; |
93 | 93 | ||
94 | S390_lowcore.clock_comparator = -1ULL; | 94 | S390_lowcore.clock_comparator = -1ULL; |
95 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
96 | cd = &__get_cpu_var(comparators); | 95 | cd = &__get_cpu_var(comparators); |
97 | cd->event_handler(cd); | 96 | cd->event_handler(cd); |
98 | } | 97 | } |
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c index d7776281cb60..05d75c413137 100644 --- a/arch/s390/kernel/vdso.c +++ b/arch/s390/kernel/vdso.c | |||
@@ -63,7 +63,7 @@ static int __init vdso_setup(char *s) | |||
63 | else if (strncmp(s, "off", 4) == 0) | 63 | else if (strncmp(s, "off", 4) == 0) |
64 | vdso_enabled = 0; | 64 | vdso_enabled = 0; |
65 | else { | 65 | else { |
66 | rc = strict_strtoul(s, 0, &val); | 66 | rc = kstrtoul(s, 0, &val); |
67 | vdso_enabled = rc ? 0 : !!val; | 67 | vdso_enabled = rc ? 0 : !!val; |
68 | } | 68 | } |
69 | return !rc; | 69 | return !rc; |
@@ -113,11 +113,11 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore) | |||
113 | 113 | ||
114 | clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, | 114 | clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, |
115 | PAGE_SIZE << SEGMENT_ORDER); | 115 | PAGE_SIZE << SEGMENT_ORDER); |
116 | clear_table((unsigned long *) page_table, _PAGE_TYPE_EMPTY, | 116 | clear_table((unsigned long *) page_table, _PAGE_INVALID, |
117 | 256*sizeof(unsigned long)); | 117 | 256*sizeof(unsigned long)); |
118 | 118 | ||
119 | *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; | 119 | *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; |
120 | *(unsigned long *) page_table = _PAGE_RO + page_frame; | 120 | *(unsigned long *) page_table = _PAGE_PROTECT + page_frame; |
121 | 121 | ||
122 | psal = (u32 *) (page_table + 256*sizeof(unsigned long)); | 122 | psal = (u32 *) (page_table + 256*sizeof(unsigned long)); |
123 | aste = psal + 32; | 123 | aste = psal + 32; |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ba694d2ba51e..34c1c9a90be2 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -702,14 +702,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
702 | return rc; | 702 | return rc; |
703 | 703 | ||
704 | vcpu->arch.sie_block->icptcode = 0; | 704 | vcpu->arch.sie_block->icptcode = 0; |
705 | preempt_disable(); | ||
706 | kvm_guest_enter(); | ||
707 | preempt_enable(); | ||
708 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", | 705 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", |
709 | atomic_read(&vcpu->arch.sie_block->cpuflags)); | 706 | atomic_read(&vcpu->arch.sie_block->cpuflags)); |
710 | trace_kvm_s390_sie_enter(vcpu, | 707 | trace_kvm_s390_sie_enter(vcpu, |
711 | atomic_read(&vcpu->arch.sie_block->cpuflags)); | 708 | atomic_read(&vcpu->arch.sie_block->cpuflags)); |
709 | |||
710 | /* | ||
711 | * As PF_VCPU will be used in fault handler, between guest_enter | ||
712 | * and guest_exit should be no uaccess. | ||
713 | */ | ||
714 | preempt_disable(); | ||
715 | kvm_guest_enter(); | ||
716 | preempt_enable(); | ||
712 | rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); | 717 | rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); |
718 | kvm_guest_exit(); | ||
719 | |||
720 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", | ||
721 | vcpu->arch.sie_block->icptcode); | ||
722 | trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); | ||
723 | |||
713 | if (rc > 0) | 724 | if (rc > 0) |
714 | rc = 0; | 725 | rc = 0; |
715 | if (rc < 0) { | 726 | if (rc < 0) { |
@@ -721,10 +732,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
721 | rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 732 | rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
722 | } | 733 | } |
723 | } | 734 | } |
724 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", | ||
725 | vcpu->arch.sie_block->icptcode); | ||
726 | trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); | ||
727 | kvm_guest_exit(); | ||
728 | 735 | ||
729 | memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); | 736 | memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); |
730 | return rc; | 737 | return rc; |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 0da3e6eb6be6..4cdc54e63ebc 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/compat.h> | 17 | #include <linux/compat.h> |
18 | #include <asm/asm-offsets.h> | 18 | #include <asm/asm-offsets.h> |
19 | #include <asm/facility.h> | ||
19 | #include <asm/current.h> | 20 | #include <asm/current.h> |
20 | #include <asm/debug.h> | 21 | #include <asm/debug.h> |
21 | #include <asm/ebcdic.h> | 22 | #include <asm/ebcdic.h> |
@@ -532,8 +533,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) | |||
532 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 533 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
533 | 534 | ||
534 | /* Only provide non-quiescing support if the host supports it */ | 535 | /* Only provide non-quiescing support if the host supports it */ |
535 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && | 536 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14)) |
536 | S390_lowcore.stfl_fac_list & 0x00020000) | ||
537 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 537 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
538 | 538 | ||
539 | /* No support for conditional-SSKE */ | 539 | /* No support for conditional-SSKE */ |
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index c61b9fad43cc..57c87d7d7ede 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -44,7 +44,6 @@ static void __udelay_disabled(unsigned long long usecs) | |||
44 | do { | 44 | do { |
45 | set_clock_comparator(end); | 45 | set_clock_comparator(end); |
46 | vtime_stop_cpu(); | 46 | vtime_stop_cpu(); |
47 | local_irq_disable(); | ||
48 | } while (get_tod_clock() < end); | 47 | } while (get_tod_clock() < end); |
49 | lockdep_on(); | 48 | lockdep_on(); |
50 | __ctl_load(cr0, 0, 0); | 49 | __ctl_load(cr0, 0, 0); |
@@ -64,7 +63,6 @@ static void __udelay_enabled(unsigned long long usecs) | |||
64 | set_clock_comparator(end); | 63 | set_clock_comparator(end); |
65 | } | 64 | } |
66 | vtime_stop_cpu(); | 65 | vtime_stop_cpu(); |
67 | local_irq_disable(); | ||
68 | if (clock_saved) | 66 | if (clock_saved) |
69 | local_tick_enable(clock_saved); | 67 | local_tick_enable(clock_saved); |
70 | } while (get_tod_clock() < end); | 68 | } while (get_tod_clock() < end); |
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c index 50ea137a2d3c..1694d738b175 100644 --- a/arch/s390/lib/uaccess_pt.c +++ b/arch/s390/lib/uaccess_pt.c | |||
@@ -86,28 +86,28 @@ static unsigned long follow_table(struct mm_struct *mm, | |||
86 | switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { | 86 | switch (mm->context.asce_bits & _ASCE_TYPE_MASK) { |
87 | case _ASCE_TYPE_REGION1: | 87 | case _ASCE_TYPE_REGION1: |
88 | table = table + ((address >> 53) & 0x7ff); | 88 | table = table + ((address >> 53) & 0x7ff); |
89 | if (unlikely(*table & _REGION_ENTRY_INV)) | 89 | if (unlikely(*table & _REGION_ENTRY_INVALID)) |
90 | return -0x39UL; | 90 | return -0x39UL; |
91 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 91 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
92 | /* fallthrough */ | 92 | /* fallthrough */ |
93 | case _ASCE_TYPE_REGION2: | 93 | case _ASCE_TYPE_REGION2: |
94 | table = table + ((address >> 42) & 0x7ff); | 94 | table = table + ((address >> 42) & 0x7ff); |
95 | if (unlikely(*table & _REGION_ENTRY_INV)) | 95 | if (unlikely(*table & _REGION_ENTRY_INVALID)) |
96 | return -0x3aUL; | 96 | return -0x3aUL; |
97 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 97 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
98 | /* fallthrough */ | 98 | /* fallthrough */ |
99 | case _ASCE_TYPE_REGION3: | 99 | case _ASCE_TYPE_REGION3: |
100 | table = table + ((address >> 31) & 0x7ff); | 100 | table = table + ((address >> 31) & 0x7ff); |
101 | if (unlikely(*table & _REGION_ENTRY_INV)) | 101 | if (unlikely(*table & _REGION_ENTRY_INVALID)) |
102 | return -0x3bUL; | 102 | return -0x3bUL; |
103 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 103 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
104 | /* fallthrough */ | 104 | /* fallthrough */ |
105 | case _ASCE_TYPE_SEGMENT: | 105 | case _ASCE_TYPE_SEGMENT: |
106 | table = table + ((address >> 20) & 0x7ff); | 106 | table = table + ((address >> 20) & 0x7ff); |
107 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) | 107 | if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) |
108 | return -0x10UL; | 108 | return -0x10UL; |
109 | if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { | 109 | if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) { |
110 | if (write && (*table & _SEGMENT_ENTRY_RO)) | 110 | if (write && (*table & _SEGMENT_ENTRY_PROTECT)) |
111 | return -0x04UL; | 111 | return -0x04UL; |
112 | return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + | 112 | return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) + |
113 | (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); | 113 | (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE); |
@@ -117,7 +117,7 @@ static unsigned long follow_table(struct mm_struct *mm, | |||
117 | table = table + ((address >> 12) & 0xff); | 117 | table = table + ((address >> 12) & 0xff); |
118 | if (unlikely(*table & _PAGE_INVALID)) | 118 | if (unlikely(*table & _PAGE_INVALID)) |
119 | return -0x11UL; | 119 | return -0x11UL; |
120 | if (write && (*table & _PAGE_RO)) | 120 | if (write && (*table & _PAGE_PROTECT)) |
121 | return -0x04UL; | 121 | return -0x04UL; |
122 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); | 122 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); |
123 | } | 123 | } |
@@ -130,13 +130,13 @@ static unsigned long follow_table(struct mm_struct *mm, | |||
130 | unsigned long *table = (unsigned long *)__pa(mm->pgd); | 130 | unsigned long *table = (unsigned long *)__pa(mm->pgd); |
131 | 131 | ||
132 | table = table + ((address >> 20) & 0x7ff); | 132 | table = table + ((address >> 20) & 0x7ff); |
133 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) | 133 | if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) |
134 | return -0x10UL; | 134 | return -0x10UL; |
135 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); | 135 | table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN); |
136 | table = table + ((address >> 12) & 0xff); | 136 | table = table + ((address >> 12) & 0xff); |
137 | if (unlikely(*table & _PAGE_INVALID)) | 137 | if (unlikely(*table & _PAGE_INVALID)) |
138 | return -0x11UL; | 138 | return -0x11UL; |
139 | if (write && (*table & _PAGE_RO)) | 139 | if (write && (*table & _PAGE_PROTECT)) |
140 | return -0x04UL; | 140 | return -0x04UL; |
141 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); | 141 | return (*table & PAGE_MASK) + (address & ~PAGE_MASK); |
142 | } | 142 | } |
diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index 3ad65b04ac15..46d517c3c763 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c | |||
@@ -53,7 +53,7 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level) | |||
53 | seq_printf(m, "I\n"); | 53 | seq_printf(m, "I\n"); |
54 | return; | 54 | return; |
55 | } | 55 | } |
56 | seq_printf(m, "%s", pr & _PAGE_RO ? "RO " : "RW "); | 56 | seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW "); |
57 | seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " "); | 57 | seq_printf(m, "%s", pr & _PAGE_CO ? "CO " : " "); |
58 | seq_putc(m, '\n'); | 58 | seq_putc(m, '\n'); |
59 | } | 59 | } |
@@ -105,12 +105,12 @@ static void note_page(struct seq_file *m, struct pg_state *st, | |||
105 | } | 105 | } |
106 | 106 | ||
107 | /* | 107 | /* |
108 | * The actual page table walker functions. In order to keep the implementation | 108 | * The actual page table walker functions. In order to keep the |
109 | * of print_prot() short, we only check and pass _PAGE_INVALID and _PAGE_RO | 109 | * implementation of print_prot() short, we only check and pass |
110 | * flags to note_page() if a region, segment or page table entry is invalid or | 110 | * _PAGE_INVALID and _PAGE_PROTECT flags to note_page() if a region, |
111 | * read-only. | 111 | * segment or page table entry is invalid or read-only. |
112 | * After all it's just a hint that the current level being walked contains an | 112 | * After all it's just a hint that the current level being walked |
113 | * invalid or read-only entry. | 113 | * contains an invalid or read-only entry. |
114 | */ | 114 | */ |
115 | static void walk_pte_level(struct seq_file *m, struct pg_state *st, | 115 | static void walk_pte_level(struct seq_file *m, struct pg_state *st, |
116 | pmd_t *pmd, unsigned long addr) | 116 | pmd_t *pmd, unsigned long addr) |
@@ -122,14 +122,14 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, | |||
122 | for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) { | 122 | for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) { |
123 | st->current_address = addr; | 123 | st->current_address = addr; |
124 | pte = pte_offset_kernel(pmd, addr); | 124 | pte = pte_offset_kernel(pmd, addr); |
125 | prot = pte_val(*pte) & (_PAGE_RO | _PAGE_INVALID); | 125 | prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID); |
126 | note_page(m, st, prot, 4); | 126 | note_page(m, st, prot, 4); |
127 | addr += PAGE_SIZE; | 127 | addr += PAGE_SIZE; |
128 | } | 128 | } |
129 | } | 129 | } |
130 | 130 | ||
131 | #ifdef CONFIG_64BIT | 131 | #ifdef CONFIG_64BIT |
132 | #define _PMD_PROT_MASK (_SEGMENT_ENTRY_RO | _SEGMENT_ENTRY_CO) | 132 | #define _PMD_PROT_MASK (_SEGMENT_ENTRY_PROTECT | _SEGMENT_ENTRY_CO) |
133 | #else | 133 | #else |
134 | #define _PMD_PROT_MASK 0 | 134 | #define _PMD_PROT_MASK 0 |
135 | #endif | 135 | #endif |
diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c index 1f5315d1215c..5d758db27bdc 100644 --- a/arch/s390/mm/gup.c +++ b/arch/s390/mm/gup.c | |||
@@ -24,7 +24,7 @@ static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | |||
24 | pte_t *ptep, pte; | 24 | pte_t *ptep, pte; |
25 | struct page *page; | 25 | struct page *page; |
26 | 26 | ||
27 | mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL; | 27 | mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL; |
28 | 28 | ||
29 | ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); | 29 | ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr); |
30 | do { | 30 | do { |
@@ -55,8 +55,8 @@ static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | |||
55 | struct page *head, *page, *tail; | 55 | struct page *head, *page, *tail; |
56 | int refs; | 56 | int refs; |
57 | 57 | ||
58 | result = write ? 0 : _SEGMENT_ENTRY_RO; | 58 | result = write ? 0 : _SEGMENT_ENTRY_PROTECT; |
59 | mask = result | _SEGMENT_ENTRY_INV; | 59 | mask = result | _SEGMENT_ENTRY_INVALID; |
60 | if ((pmd_val(pmd) & mask) != result) | 60 | if ((pmd_val(pmd) & mask) != result) |
61 | return 0; | 61 | return 0; |
62 | VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); | 62 | VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT)); |
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 121089d57802..248445f92604 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c | |||
@@ -8,21 +8,127 @@ | |||
8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
9 | #include <linux/hugetlb.h> | 9 | #include <linux/hugetlb.h> |
10 | 10 | ||
11 | static inline pmd_t __pte_to_pmd(pte_t pte) | ||
12 | { | ||
13 | int none, young, prot; | ||
14 | pmd_t pmd; | ||
15 | |||
16 | /* | ||
17 | * Convert encoding pte bits pmd bits | ||
18 | * .IR...wrdytp ..R...I...y. | ||
19 | * empty .10...000000 -> ..0...1...0. | ||
20 | * prot-none, clean, old .11...000001 -> ..0...1...1. | ||
21 | * prot-none, clean, young .11...000101 -> ..1...1...1. | ||
22 | * prot-none, dirty, old .10...001001 -> ..0...1...1. | ||
23 | * prot-none, dirty, young .10...001101 -> ..1...1...1. | ||
24 | * read-only, clean, old .11...010001 -> ..1...1...0. | ||
25 | * read-only, clean, young .01...010101 -> ..1...0...1. | ||
26 | * read-only, dirty, old .11...011001 -> ..1...1...0. | ||
27 | * read-only, dirty, young .01...011101 -> ..1...0...1. | ||
28 | * read-write, clean, old .11...110001 -> ..0...1...0. | ||
29 | * read-write, clean, young .01...110101 -> ..0...0...1. | ||
30 | * read-write, dirty, old .10...111001 -> ..0...1...0. | ||
31 | * read-write, dirty, young .00...111101 -> ..0...0...1. | ||
32 | * Huge ptes are dirty by definition, a clean pte is made dirty | ||
33 | * by the conversion. | ||
34 | */ | ||
35 | if (pte_present(pte)) { | ||
36 | pmd_val(pmd) = pte_val(pte) & PAGE_MASK; | ||
37 | if (pte_val(pte) & _PAGE_INVALID) | ||
38 | pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; | ||
39 | none = (pte_val(pte) & _PAGE_PRESENT) && | ||
40 | !(pte_val(pte) & _PAGE_READ) && | ||
41 | !(pte_val(pte) & _PAGE_WRITE); | ||
42 | prot = (pte_val(pte) & _PAGE_PROTECT) && | ||
43 | !(pte_val(pte) & _PAGE_WRITE); | ||
44 | young = pte_val(pte) & _PAGE_YOUNG; | ||
45 | if (none || young) | ||
46 | pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; | ||
47 | if (prot || (none && young)) | ||
48 | pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; | ||
49 | } else | ||
50 | pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; | ||
51 | return pmd; | ||
52 | } | ||
53 | |||
54 | static inline pte_t __pmd_to_pte(pmd_t pmd) | ||
55 | { | ||
56 | pte_t pte; | ||
57 | |||
58 | /* | ||
59 | * Convert encoding pmd bits pte bits | ||
60 | * ..R...I...y. .IR...wrdytp | ||
61 | * empty ..0...1...0. -> .10...000000 | ||
62 | * prot-none, old ..0...1...1. -> .10...001001 | ||
63 | * prot-none, young ..1...1...1. -> .10...001101 | ||
64 | * read-only, old ..1...1...0. -> .11...011001 | ||
65 | * read-only, young ..1...0...1. -> .01...011101 | ||
66 | * read-write, old ..0...1...0. -> .10...111001 | ||
67 | * read-write, young ..0...0...1. -> .00...111101 | ||
68 | * Huge ptes are dirty by definition | ||
69 | */ | ||
70 | if (pmd_present(pmd)) { | ||
71 | pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY | | ||
72 | (pmd_val(pmd) & PAGE_MASK); | ||
73 | if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) | ||
74 | pte_val(pte) |= _PAGE_INVALID; | ||
75 | if (pmd_prot_none(pmd)) { | ||
76 | if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) | ||
77 | pte_val(pte) |= _PAGE_YOUNG; | ||
78 | } else { | ||
79 | pte_val(pte) |= _PAGE_READ; | ||
80 | if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) | ||
81 | pte_val(pte) |= _PAGE_PROTECT; | ||
82 | else | ||
83 | pte_val(pte) |= _PAGE_WRITE; | ||
84 | if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) | ||
85 | pte_val(pte) |= _PAGE_YOUNG; | ||
86 | } | ||
87 | } else | ||
88 | pte_val(pte) = _PAGE_INVALID; | ||
89 | return pte; | ||
90 | } | ||
11 | 91 | ||
12 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, | 92 | void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
13 | pte_t *pteptr, pte_t pteval) | 93 | pte_t *ptep, pte_t pte) |
14 | { | 94 | { |
15 | pmd_t *pmdp = (pmd_t *) pteptr; | 95 | pmd_t pmd; |
16 | unsigned long mask; | ||
17 | 96 | ||
97 | pmd = __pte_to_pmd(pte); | ||
18 | if (!MACHINE_HAS_HPAGE) { | 98 | if (!MACHINE_HAS_HPAGE) { |
19 | pteptr = (pte_t *) pte_page(pteval)[1].index; | 99 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; |
20 | mask = pte_val(pteval) & | 100 | pmd_val(pmd) |= pte_page(pte)[1].index; |
21 | (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO); | 101 | } else |
22 | pte_val(pteval) = (_SEGMENT_ENTRY + __pa(pteptr)) | mask; | 102 | pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO; |
103 | *(pmd_t *) ptep = pmd; | ||
104 | } | ||
105 | |||
106 | pte_t huge_ptep_get(pte_t *ptep) | ||
107 | { | ||
108 | unsigned long origin; | ||
109 | pmd_t pmd; | ||
110 | |||
111 | pmd = *(pmd_t *) ptep; | ||
112 | if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) { | ||
113 | origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN; | ||
114 | pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; | ||
115 | pmd_val(pmd) |= *(unsigned long *) origin; | ||
23 | } | 116 | } |
117 | return __pmd_to_pte(pmd); | ||
118 | } | ||
24 | 119 | ||
25 | pmd_val(*pmdp) = pte_val(pteval); | 120 | pte_t huge_ptep_get_and_clear(struct mm_struct *mm, |
121 | unsigned long addr, pte_t *ptep) | ||
122 | { | ||
123 | pmd_t *pmdp = (pmd_t *) ptep; | ||
124 | pte_t pte = huge_ptep_get(ptep); | ||
125 | |||
126 | if (MACHINE_HAS_IDTE) | ||
127 | __pmd_idte(addr, pmdp); | ||
128 | else | ||
129 | __pmd_csp(pmdp); | ||
130 | pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY; | ||
131 | return pte; | ||
26 | } | 132 | } |
27 | 133 | ||
28 | int arch_prepare_hugepage(struct page *page) | 134 | int arch_prepare_hugepage(struct page *page) |
@@ -58,7 +164,7 @@ void arch_release_hugepage(struct page *page) | |||
58 | ptep = (pte_t *) page[1].index; | 164 | ptep = (pte_t *) page[1].index; |
59 | if (!ptep) | 165 | if (!ptep) |
60 | return; | 166 | return; |
61 | clear_table((unsigned long *) ptep, _PAGE_TYPE_EMPTY, | 167 | clear_table((unsigned long *) ptep, _PAGE_INVALID, |
62 | PTRS_PER_PTE * sizeof(pte_t)); | 168 | PTRS_PER_PTE * sizeof(pte_t)); |
63 | page_table_free(&init_mm, (unsigned long *) ptep); | 169 | page_table_free(&init_mm, (unsigned long *) ptep); |
64 | page[1].index = 0; | 170 | page[1].index = 0; |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index ce36ea80e4f9..ad446b0c55b6 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -69,6 +69,7 @@ static void __init setup_zero_pages(void) | |||
69 | order = 2; | 69 | order = 2; |
70 | break; | 70 | break; |
71 | case 0x2827: /* zEC12 */ | 71 | case 0x2827: /* zEC12 */ |
72 | case 0x2828: /* zEC12 */ | ||
72 | default: | 73 | default: |
73 | order = 5; | 74 | order = 5; |
74 | break; | 75 | break; |
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 80adfbf75065..990397420e6b 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c | |||
@@ -118,7 +118,7 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
118 | pte = pte_offset_kernel(pmd, address); | 118 | pte = pte_offset_kernel(pmd, address); |
119 | if (!enable) { | 119 | if (!enable) { |
120 | __ptep_ipte(address, pte); | 120 | __ptep_ipte(address, pte); |
121 | pte_val(*pte) = _PAGE_TYPE_EMPTY; | 121 | pte_val(*pte) = _PAGE_INVALID; |
122 | continue; | 122 | continue; |
123 | } | 123 | } |
124 | pte_val(*pte) = __pa(address); | 124 | pte_val(*pte) = __pa(address); |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index a8154a1a2c94..6d16132d0850 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -161,7 +161,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) | |||
161 | struct gmap_rmap *rmap; | 161 | struct gmap_rmap *rmap; |
162 | struct page *page; | 162 | struct page *page; |
163 | 163 | ||
164 | if (*table & _SEGMENT_ENTRY_INV) | 164 | if (*table & _SEGMENT_ENTRY_INVALID) |
165 | return 0; | 165 | return 0; |
166 | page = pfn_to_page(*table >> PAGE_SHIFT); | 166 | page = pfn_to_page(*table >> PAGE_SHIFT); |
167 | mp = (struct gmap_pgtable *) page->index; | 167 | mp = (struct gmap_pgtable *) page->index; |
@@ -172,7 +172,7 @@ static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table) | |||
172 | kfree(rmap); | 172 | kfree(rmap); |
173 | break; | 173 | break; |
174 | } | 174 | } |
175 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; | 175 | *table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT; |
176 | return 1; | 176 | return 1; |
177 | } | 177 | } |
178 | 178 | ||
@@ -258,7 +258,7 @@ static int gmap_alloc_table(struct gmap *gmap, | |||
258 | return -ENOMEM; | 258 | return -ENOMEM; |
259 | new = (unsigned long *) page_to_phys(page); | 259 | new = (unsigned long *) page_to_phys(page); |
260 | crst_table_init(new, init); | 260 | crst_table_init(new, init); |
261 | if (*table & _REGION_ENTRY_INV) { | 261 | if (*table & _REGION_ENTRY_INVALID) { |
262 | list_add(&page->lru, &gmap->crst_list); | 262 | list_add(&page->lru, &gmap->crst_list); |
263 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | | 263 | *table = (unsigned long) new | _REGION_ENTRY_LENGTH | |
264 | (*table & _REGION_ENTRY_TYPE_MASK); | 264 | (*table & _REGION_ENTRY_TYPE_MASK); |
@@ -292,22 +292,22 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | |||
292 | for (off = 0; off < len; off += PMD_SIZE) { | 292 | for (off = 0; off < len; off += PMD_SIZE) { |
293 | /* Walk the guest addr space page table */ | 293 | /* Walk the guest addr space page table */ |
294 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | 294 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
295 | if (*table & _REGION_ENTRY_INV) | 295 | if (*table & _REGION_ENTRY_INVALID) |
296 | goto out; | 296 | goto out; |
297 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 297 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
298 | table = table + (((to + off) >> 42) & 0x7ff); | 298 | table = table + (((to + off) >> 42) & 0x7ff); |
299 | if (*table & _REGION_ENTRY_INV) | 299 | if (*table & _REGION_ENTRY_INVALID) |
300 | goto out; | 300 | goto out; |
301 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 301 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
302 | table = table + (((to + off) >> 31) & 0x7ff); | 302 | table = table + (((to + off) >> 31) & 0x7ff); |
303 | if (*table & _REGION_ENTRY_INV) | 303 | if (*table & _REGION_ENTRY_INVALID) |
304 | goto out; | 304 | goto out; |
305 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 305 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
306 | table = table + (((to + off) >> 20) & 0x7ff); | 306 | table = table + (((to + off) >> 20) & 0x7ff); |
307 | 307 | ||
308 | /* Clear segment table entry in guest address space. */ | 308 | /* Clear segment table entry in guest address space. */ |
309 | flush |= gmap_unlink_segment(gmap, table); | 309 | flush |= gmap_unlink_segment(gmap, table); |
310 | *table = _SEGMENT_ENTRY_INV; | 310 | *table = _SEGMENT_ENTRY_INVALID; |
311 | } | 311 | } |
312 | out: | 312 | out: |
313 | spin_unlock(&gmap->mm->page_table_lock); | 313 | spin_unlock(&gmap->mm->page_table_lock); |
@@ -345,17 +345,17 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, | |||
345 | for (off = 0; off < len; off += PMD_SIZE) { | 345 | for (off = 0; off < len; off += PMD_SIZE) { |
346 | /* Walk the gmap address space page table */ | 346 | /* Walk the gmap address space page table */ |
347 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | 347 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
348 | if ((*table & _REGION_ENTRY_INV) && | 348 | if ((*table & _REGION_ENTRY_INVALID) && |
349 | gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) | 349 | gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY)) |
350 | goto out_unmap; | 350 | goto out_unmap; |
351 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 351 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
352 | table = table + (((to + off) >> 42) & 0x7ff); | 352 | table = table + (((to + off) >> 42) & 0x7ff); |
353 | if ((*table & _REGION_ENTRY_INV) && | 353 | if ((*table & _REGION_ENTRY_INVALID) && |
354 | gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) | 354 | gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY)) |
355 | goto out_unmap; | 355 | goto out_unmap; |
356 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 356 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
357 | table = table + (((to + off) >> 31) & 0x7ff); | 357 | table = table + (((to + off) >> 31) & 0x7ff); |
358 | if ((*table & _REGION_ENTRY_INV) && | 358 | if ((*table & _REGION_ENTRY_INVALID) && |
359 | gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) | 359 | gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY)) |
360 | goto out_unmap; | 360 | goto out_unmap; |
361 | table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); | 361 | table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN); |
@@ -363,7 +363,8 @@ int gmap_map_segment(struct gmap *gmap, unsigned long from, | |||
363 | 363 | ||
364 | /* Store 'from' address in an invalid segment table entry. */ | 364 | /* Store 'from' address in an invalid segment table entry. */ |
365 | flush |= gmap_unlink_segment(gmap, table); | 365 | flush |= gmap_unlink_segment(gmap, table); |
366 | *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off); | 366 | *table = (from + off) | (_SEGMENT_ENTRY_INVALID | |
367 | _SEGMENT_ENTRY_PROTECT); | ||
367 | } | 368 | } |
368 | spin_unlock(&gmap->mm->page_table_lock); | 369 | spin_unlock(&gmap->mm->page_table_lock); |
369 | up_read(&gmap->mm->mmap_sem); | 370 | up_read(&gmap->mm->mmap_sem); |
@@ -384,15 +385,15 @@ static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap) | |||
384 | unsigned long *table; | 385 | unsigned long *table; |
385 | 386 | ||
386 | table = gmap->table + ((address >> 53) & 0x7ff); | 387 | table = gmap->table + ((address >> 53) & 0x7ff); |
387 | if (unlikely(*table & _REGION_ENTRY_INV)) | 388 | if (unlikely(*table & _REGION_ENTRY_INVALID)) |
388 | return ERR_PTR(-EFAULT); | 389 | return ERR_PTR(-EFAULT); |
389 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 390 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
390 | table = table + ((address >> 42) & 0x7ff); | 391 | table = table + ((address >> 42) & 0x7ff); |
391 | if (unlikely(*table & _REGION_ENTRY_INV)) | 392 | if (unlikely(*table & _REGION_ENTRY_INVALID)) |
392 | return ERR_PTR(-EFAULT); | 393 | return ERR_PTR(-EFAULT); |
393 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 394 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
394 | table = table + ((address >> 31) & 0x7ff); | 395 | table = table + ((address >> 31) & 0x7ff); |
395 | if (unlikely(*table & _REGION_ENTRY_INV)) | 396 | if (unlikely(*table & _REGION_ENTRY_INVALID)) |
396 | return ERR_PTR(-EFAULT); | 397 | return ERR_PTR(-EFAULT); |
397 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 398 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
398 | table = table + ((address >> 20) & 0x7ff); | 399 | table = table + ((address >> 20) & 0x7ff); |
@@ -422,11 +423,11 @@ unsigned long __gmap_translate(unsigned long address, struct gmap *gmap) | |||
422 | return PTR_ERR(segment_ptr); | 423 | return PTR_ERR(segment_ptr); |
423 | /* Convert the gmap address to an mm address. */ | 424 | /* Convert the gmap address to an mm address. */ |
424 | segment = *segment_ptr; | 425 | segment = *segment_ptr; |
425 | if (!(segment & _SEGMENT_ENTRY_INV)) { | 426 | if (!(segment & _SEGMENT_ENTRY_INVALID)) { |
426 | page = pfn_to_page(segment >> PAGE_SHIFT); | 427 | page = pfn_to_page(segment >> PAGE_SHIFT); |
427 | mp = (struct gmap_pgtable *) page->index; | 428 | mp = (struct gmap_pgtable *) page->index; |
428 | return mp->vmaddr | (address & ~PMD_MASK); | 429 | return mp->vmaddr | (address & ~PMD_MASK); |
429 | } else if (segment & _SEGMENT_ENTRY_RO) { | 430 | } else if (segment & _SEGMENT_ENTRY_PROTECT) { |
430 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; | 431 | vmaddr = segment & _SEGMENT_ENTRY_ORIGIN; |
431 | return vmaddr | (address & ~PMD_MASK); | 432 | return vmaddr | (address & ~PMD_MASK); |
432 | } | 433 | } |
@@ -517,8 +518,8 @@ static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table) | |||
517 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); | 518 | page = pfn_to_page(__pa(table) >> PAGE_SHIFT); |
518 | mp = (struct gmap_pgtable *) page->index; | 519 | mp = (struct gmap_pgtable *) page->index; |
519 | list_for_each_entry_safe(rmap, next, &mp->mapper, list) { | 520 | list_for_each_entry_safe(rmap, next, &mp->mapper, list) { |
520 | *rmap->entry = | 521 | *rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID | |
521 | _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr; | 522 | _SEGMENT_ENTRY_PROTECT); |
522 | list_del(&rmap->list); | 523 | list_del(&rmap->list); |
523 | kfree(rmap); | 524 | kfree(rmap); |
524 | flush = 1; | 525 | flush = 1; |
@@ -545,13 +546,13 @@ unsigned long __gmap_fault(unsigned long address, struct gmap *gmap) | |||
545 | /* Convert the gmap address to an mm address. */ | 546 | /* Convert the gmap address to an mm address. */ |
546 | while (1) { | 547 | while (1) { |
547 | segment = *segment_ptr; | 548 | segment = *segment_ptr; |
548 | if (!(segment & _SEGMENT_ENTRY_INV)) { | 549 | if (!(segment & _SEGMENT_ENTRY_INVALID)) { |
549 | /* Page table is present */ | 550 | /* Page table is present */ |
550 | page = pfn_to_page(segment >> PAGE_SHIFT); | 551 | page = pfn_to_page(segment >> PAGE_SHIFT); |
551 | mp = (struct gmap_pgtable *) page->index; | 552 | mp = (struct gmap_pgtable *) page->index; |
552 | return mp->vmaddr | (address & ~PMD_MASK); | 553 | return mp->vmaddr | (address & ~PMD_MASK); |
553 | } | 554 | } |
554 | if (!(segment & _SEGMENT_ENTRY_RO)) | 555 | if (!(segment & _SEGMENT_ENTRY_PROTECT)) |
555 | /* Nothing mapped in the gmap address space. */ | 556 | /* Nothing mapped in the gmap address space. */ |
556 | break; | 557 | break; |
557 | rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap); | 558 | rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap); |
@@ -586,25 +587,25 @@ void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap) | |||
586 | while (address < to) { | 587 | while (address < to) { |
587 | /* Walk the gmap address space page table */ | 588 | /* Walk the gmap address space page table */ |
588 | table = gmap->table + ((address >> 53) & 0x7ff); | 589 | table = gmap->table + ((address >> 53) & 0x7ff); |
589 | if (unlikely(*table & _REGION_ENTRY_INV)) { | 590 | if (unlikely(*table & _REGION_ENTRY_INVALID)) { |
590 | address = (address + PMD_SIZE) & PMD_MASK; | 591 | address = (address + PMD_SIZE) & PMD_MASK; |
591 | continue; | 592 | continue; |
592 | } | 593 | } |
593 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 594 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
594 | table = table + ((address >> 42) & 0x7ff); | 595 | table = table + ((address >> 42) & 0x7ff); |
595 | if (unlikely(*table & _REGION_ENTRY_INV)) { | 596 | if (unlikely(*table & _REGION_ENTRY_INVALID)) { |
596 | address = (address + PMD_SIZE) & PMD_MASK; | 597 | address = (address + PMD_SIZE) & PMD_MASK; |
597 | continue; | 598 | continue; |
598 | } | 599 | } |
599 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 600 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
600 | table = table + ((address >> 31) & 0x7ff); | 601 | table = table + ((address >> 31) & 0x7ff); |
601 | if (unlikely(*table & _REGION_ENTRY_INV)) { | 602 | if (unlikely(*table & _REGION_ENTRY_INVALID)) { |
602 | address = (address + PMD_SIZE) & PMD_MASK; | 603 | address = (address + PMD_SIZE) & PMD_MASK; |
603 | continue; | 604 | continue; |
604 | } | 605 | } |
605 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 606 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
606 | table = table + ((address >> 20) & 0x7ff); | 607 | table = table + ((address >> 20) & 0x7ff); |
607 | if (unlikely(*table & _SEGMENT_ENTRY_INV)) { | 608 | if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) { |
608 | address = (address + PMD_SIZE) & PMD_MASK; | 609 | address = (address + PMD_SIZE) & PMD_MASK; |
609 | continue; | 610 | continue; |
610 | } | 611 | } |
@@ -687,7 +688,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len) | |||
687 | continue; | 688 | continue; |
688 | /* Set notification bit in the pgste of the pte */ | 689 | /* Set notification bit in the pgste of the pte */ |
689 | entry = *ptep; | 690 | entry = *ptep; |
690 | if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_RO)) == 0) { | 691 | if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) { |
691 | pgste = pgste_get_lock(ptep); | 692 | pgste = pgste_get_lock(ptep); |
692 | pgste_val(pgste) |= PGSTE_IN_BIT; | 693 | pgste_val(pgste) |= PGSTE_IN_BIT; |
693 | pgste_set_unlock(ptep, pgste); | 694 | pgste_set_unlock(ptep, pgste); |
@@ -752,8 +753,9 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm, | |||
752 | page->index = (unsigned long) mp; | 753 | page->index = (unsigned long) mp; |
753 | atomic_set(&page->_mapcount, 3); | 754 | atomic_set(&page->_mapcount, 3); |
754 | table = (unsigned long *) page_to_phys(page); | 755 | table = (unsigned long *) page_to_phys(page); |
755 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2); | 756 | clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); |
756 | clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); | 757 | clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT, |
758 | PAGE_SIZE/2); | ||
757 | return table; | 759 | return table; |
758 | } | 760 | } |
759 | 761 | ||
@@ -791,26 +793,21 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, | |||
791 | pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; | 793 | pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48; |
792 | pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; | 794 | pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56; |
793 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { | 795 | if (!(pte_val(*ptep) & _PAGE_INVALID)) { |
794 | unsigned long address, bits; | 796 | unsigned long address, bits, skey; |
795 | unsigned char skey; | ||
796 | 797 | ||
797 | address = pte_val(*ptep) & PAGE_MASK; | 798 | address = pte_val(*ptep) & PAGE_MASK; |
798 | skey = page_get_storage_key(address); | 799 | skey = (unsigned long) page_get_storage_key(address); |
799 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); | 800 | bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED); |
801 | skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT); | ||
800 | /* Set storage key ACC and FP */ | 802 | /* Set storage key ACC and FP */ |
801 | page_set_storage_key(address, | 803 | page_set_storage_key(address, skey, !nq); |
802 | (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)), | ||
803 | !nq); | ||
804 | |||
805 | /* Merge host changed & referenced into pgste */ | 804 | /* Merge host changed & referenced into pgste */ |
806 | pgste_val(new) |= bits << 52; | 805 | pgste_val(new) |= bits << 52; |
807 | /* Transfer skey changed & referenced bit to kvm user bits */ | ||
808 | pgste_val(new) |= bits << 45; /* PGSTE_UR_BIT & PGSTE_UC_BIT */ | ||
809 | } | 806 | } |
810 | /* changing the guest storage key is considered a change of the page */ | 807 | /* changing the guest storage key is considered a change of the page */ |
811 | if ((pgste_val(new) ^ pgste_val(old)) & | 808 | if ((pgste_val(new) ^ pgste_val(old)) & |
812 | (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) | 809 | (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT)) |
813 | pgste_val(new) |= PGSTE_UC_BIT; | 810 | pgste_val(new) |= PGSTE_HC_BIT; |
814 | 811 | ||
815 | pgste_set_unlock(ptep, new); | 812 | pgste_set_unlock(ptep, new); |
816 | pte_unmap_unlock(*ptep, ptl); | 813 | pte_unmap_unlock(*ptep, ptl); |
@@ -878,7 +875,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr) | |||
878 | pgtable_page_ctor(page); | 875 | pgtable_page_ctor(page); |
879 | atomic_set(&page->_mapcount, 1); | 876 | atomic_set(&page->_mapcount, 1); |
880 | table = (unsigned long *) page_to_phys(page); | 877 | table = (unsigned long *) page_to_phys(page); |
881 | clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE); | 878 | clear_table(table, _PAGE_INVALID, PAGE_SIZE); |
882 | spin_lock_bh(&mm->context.list_lock); | 879 | spin_lock_bh(&mm->context.list_lock); |
883 | list_add(&page->lru, &mm->context.pgtable_list); | 880 | list_add(&page->lru, &mm->context.pgtable_list); |
884 | } else { | 881 | } else { |
@@ -1007,7 +1004,6 @@ void tlb_table_flush(struct mmu_gather *tlb) | |||
1007 | struct mmu_table_batch **batch = &tlb->batch; | 1004 | struct mmu_table_batch **batch = &tlb->batch; |
1008 | 1005 | ||
1009 | if (*batch) { | 1006 | if (*batch) { |
1010 | __tlb_flush_mm(tlb->mm); | ||
1011 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); | 1007 | call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu); |
1012 | *batch = NULL; | 1008 | *batch = NULL; |
1013 | } | 1009 | } |
@@ -1017,11 +1013,12 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) | |||
1017 | { | 1013 | { |
1018 | struct mmu_table_batch **batch = &tlb->batch; | 1014 | struct mmu_table_batch **batch = &tlb->batch; |
1019 | 1015 | ||
1016 | tlb->mm->context.flush_mm = 1; | ||
1020 | if (*batch == NULL) { | 1017 | if (*batch == NULL) { |
1021 | *batch = (struct mmu_table_batch *) | 1018 | *batch = (struct mmu_table_batch *) |
1022 | __get_free_page(GFP_NOWAIT | __GFP_NOWARN); | 1019 | __get_free_page(GFP_NOWAIT | __GFP_NOWARN); |
1023 | if (*batch == NULL) { | 1020 | if (*batch == NULL) { |
1024 | __tlb_flush_mm(tlb->mm); | 1021 | __tlb_flush_mm_lazy(tlb->mm); |
1025 | tlb_remove_table_one(table); | 1022 | tlb_remove_table_one(table); |
1026 | return; | 1023 | return; |
1027 | } | 1024 | } |
@@ -1029,7 +1026,7 @@ void tlb_remove_table(struct mmu_gather *tlb, void *table) | |||
1029 | } | 1026 | } |
1030 | (*batch)->tables[(*batch)->nr++] = table; | 1027 | (*batch)->tables[(*batch)->nr++] = table; |
1031 | if ((*batch)->nr == MAX_TABLE_BATCH) | 1028 | if ((*batch)->nr == MAX_TABLE_BATCH) |
1032 | tlb_table_flush(tlb); | 1029 | tlb_flush_mmu(tlb); |
1033 | } | 1030 | } |
1034 | 1031 | ||
1035 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 1032 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
@@ -1198,9 +1195,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp) | |||
1198 | list_del(lh); | 1195 | list_del(lh); |
1199 | } | 1196 | } |
1200 | ptep = (pte_t *) pgtable; | 1197 | ptep = (pte_t *) pgtable; |
1201 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 1198 | pte_val(*ptep) = _PAGE_INVALID; |
1202 | ptep++; | 1199 | ptep++; |
1203 | pte_val(*ptep) = _PAGE_TYPE_EMPTY; | 1200 | pte_val(*ptep) = _PAGE_INVALID; |
1204 | return pgtable; | 1201 | return pgtable; |
1205 | } | 1202 | } |
1206 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 1203 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 8b268fcc4612..bcfb70b60be6 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c | |||
@@ -69,7 +69,7 @@ static pte_t __ref *vmem_pte_alloc(unsigned long address) | |||
69 | pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); | 69 | pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t)); |
70 | if (!pte) | 70 | if (!pte) |
71 | return NULL; | 71 | return NULL; |
72 | clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY, | 72 | clear_table((unsigned long *) pte, _PAGE_INVALID, |
73 | PTRS_PER_PTE * sizeof(pte_t)); | 73 | PTRS_PER_PTE * sizeof(pte_t)); |
74 | return pte; | 74 | return pte; |
75 | } | 75 | } |
@@ -101,7 +101,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
101 | !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { | 101 | !(address & ~PUD_MASK) && (address + PUD_SIZE <= end)) { |
102 | pud_val(*pu_dir) = __pa(address) | | 102 | pud_val(*pu_dir) = __pa(address) | |
103 | _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | | 103 | _REGION_ENTRY_TYPE_R3 | _REGION3_ENTRY_LARGE | |
104 | (ro ? _REGION_ENTRY_RO : 0); | 104 | (ro ? _REGION_ENTRY_PROTECT : 0); |
105 | address += PUD_SIZE; | 105 | address += PUD_SIZE; |
106 | continue; | 106 | continue; |
107 | } | 107 | } |
@@ -118,7 +118,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
118 | !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { | 118 | !(address & ~PMD_MASK) && (address + PMD_SIZE <= end)) { |
119 | pmd_val(*pm_dir) = __pa(address) | | 119 | pmd_val(*pm_dir) = __pa(address) | |
120 | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | | 120 | _SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE | |
121 | (ro ? _SEGMENT_ENTRY_RO : 0); | 121 | _SEGMENT_ENTRY_YOUNG | |
122 | (ro ? _SEGMENT_ENTRY_PROTECT : 0); | ||
122 | address += PMD_SIZE; | 123 | address += PMD_SIZE; |
123 | continue; | 124 | continue; |
124 | } | 125 | } |
@@ -131,7 +132,8 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro) | |||
131 | } | 132 | } |
132 | 133 | ||
133 | pt_dir = pte_offset_kernel(pm_dir, address); | 134 | pt_dir = pte_offset_kernel(pm_dir, address); |
134 | pte_val(*pt_dir) = __pa(address) | (ro ? _PAGE_RO : 0); | 135 | pte_val(*pt_dir) = __pa(address) | |
136 | pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL); | ||
135 | address += PAGE_SIZE; | 137 | address += PAGE_SIZE; |
136 | } | 138 | } |
137 | ret = 0; | 139 | ret = 0; |
@@ -154,7 +156,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size) | |||
154 | pte_t *pt_dir; | 156 | pte_t *pt_dir; |
155 | pte_t pte; | 157 | pte_t pte; |
156 | 158 | ||
157 | pte_val(pte) = _PAGE_TYPE_EMPTY; | 159 | pte_val(pte) = _PAGE_INVALID; |
158 | while (address < end) { | 160 | while (address < end) { |
159 | pg_dir = pgd_offset_k(address); | 161 | pg_dir = pgd_offset_k(address); |
160 | if (pgd_none(*pg_dir)) { | 162 | if (pgd_none(*pg_dir)) { |
@@ -255,7 +257,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node) | |||
255 | new_page =__pa(vmem_alloc_pages(0)); | 257 | new_page =__pa(vmem_alloc_pages(0)); |
256 | if (!new_page) | 258 | if (!new_page) |
257 | goto out; | 259 | goto out; |
258 | pte_val(*pt_dir) = __pa(new_page); | 260 | pte_val(*pt_dir) = |
261 | __pa(new_page) | pgprot_val(PAGE_KERNEL); | ||
259 | } | 262 | } |
260 | address += PAGE_SIZE; | 263 | address += PAGE_SIZE; |
261 | } | 264 | } |
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index ffeb17ce7f31..930783d2c99b 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c | |||
@@ -440,7 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) | |||
440 | switch (id.machine) { | 440 | switch (id.machine) { |
441 | case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; | 441 | case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; |
442 | case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; | 442 | case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; |
443 | case 0x2827: ops->cpu_type = "s390/zEC12"; break; | 443 | case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break; |
444 | default: return -ENODEV; | 444 | default: return -ENODEV; |
445 | } | 445 | } |
446 | } | 446 | } |
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile index 086a2e37935d..a9e1dc4ae442 100644 --- a/arch/s390/pci/Makefile +++ b/arch/s390/pci/Makefile | |||
@@ -2,5 +2,5 @@ | |||
2 | # Makefile for the s390 PCI subsystem. | 2 | # Makefile for the s390 PCI subsystem. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \ | 5 | obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_sysfs.o \ |
6 | pci_event.o pci_debug.o pci_insn.o | 6 | pci_event.o pci_debug.o pci_insn.o |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index e2956ad39a4f..f17a8343e360 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -42,45 +42,26 @@ | |||
42 | #define SIC_IRQ_MODE_SINGLE 1 | 42 | #define SIC_IRQ_MODE_SINGLE 1 |
43 | 43 | ||
44 | #define ZPCI_NR_DMA_SPACES 1 | 44 | #define ZPCI_NR_DMA_SPACES 1 |
45 | #define ZPCI_MSI_VEC_BITS 6 | ||
46 | #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS | 45 | #define ZPCI_NR_DEVICES CONFIG_PCI_NR_FUNCTIONS |
47 | 46 | ||
48 | /* list of all detected zpci devices */ | 47 | /* list of all detected zpci devices */ |
49 | LIST_HEAD(zpci_list); | 48 | static LIST_HEAD(zpci_list); |
50 | EXPORT_SYMBOL_GPL(zpci_list); | 49 | static DEFINE_SPINLOCK(zpci_list_lock); |
51 | DEFINE_MUTEX(zpci_list_lock); | ||
52 | EXPORT_SYMBOL_GPL(zpci_list_lock); | ||
53 | 50 | ||
54 | static struct pci_hp_callback_ops *hotplug_ops; | 51 | static void zpci_enable_irq(struct irq_data *data); |
52 | static void zpci_disable_irq(struct irq_data *data); | ||
55 | 53 | ||
56 | static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); | 54 | static struct irq_chip zpci_irq_chip = { |
57 | static DEFINE_SPINLOCK(zpci_domain_lock); | 55 | .name = "zPCI", |
58 | 56 | .irq_unmask = zpci_enable_irq, | |
59 | struct callback { | 57 | .irq_mask = zpci_disable_irq, |
60 | irq_handler_t handler; | ||
61 | void *data; | ||
62 | }; | 58 | }; |
63 | 59 | ||
64 | struct zdev_irq_map { | 60 | static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); |
65 | unsigned long aibv; /* AI bit vector */ | 61 | static DEFINE_SPINLOCK(zpci_domain_lock); |
66 | int msi_vecs; /* consecutive MSI-vectors used */ | ||
67 | int __unused; | ||
68 | struct callback cb[ZPCI_NR_MSI_VECS]; /* callback handler array */ | ||
69 | spinlock_t lock; /* protect callbacks against de-reg */ | ||
70 | }; | ||
71 | |||
72 | struct intr_bucket { | ||
73 | /* amap of adapters, one bit per dev, corresponds to one irq nr */ | ||
74 | unsigned long *alloc; | ||
75 | /* AI summary bit, global page for all devices */ | ||
76 | unsigned long *aisb; | ||
77 | /* pointer to aibv and callback data in zdev */ | ||
78 | struct zdev_irq_map *imap[ZPCI_NR_DEVICES]; | ||
79 | /* protects the whole bucket struct */ | ||
80 | spinlock_t lock; | ||
81 | }; | ||
82 | 62 | ||
83 | static struct intr_bucket *bucket; | 63 | static struct airq_iv *zpci_aisb_iv; |
64 | static struct airq_iv *zpci_aibv[ZPCI_NR_DEVICES]; | ||
84 | 65 | ||
85 | /* Adapter interrupt definitions */ | 66 | /* Adapter interrupt definitions */ |
86 | static void zpci_irq_handler(struct airq_struct *airq); | 67 | static void zpci_irq_handler(struct airq_struct *airq); |
@@ -96,27 +77,8 @@ static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES); | |||
96 | struct zpci_iomap_entry *zpci_iomap_start; | 77 | struct zpci_iomap_entry *zpci_iomap_start; |
97 | EXPORT_SYMBOL_GPL(zpci_iomap_start); | 78 | EXPORT_SYMBOL_GPL(zpci_iomap_start); |
98 | 79 | ||
99 | /* highest irq summary bit */ | ||
100 | static int __read_mostly aisb_max; | ||
101 | |||
102 | static struct kmem_cache *zdev_irq_cache; | ||
103 | static struct kmem_cache *zdev_fmb_cache; | 80 | static struct kmem_cache *zdev_fmb_cache; |
104 | 81 | ||
105 | static inline int irq_to_msi_nr(unsigned int irq) | ||
106 | { | ||
107 | return irq & ZPCI_MSI_MASK; | ||
108 | } | ||
109 | |||
110 | static inline int irq_to_dev_nr(unsigned int irq) | ||
111 | { | ||
112 | return irq >> ZPCI_MSI_VEC_BITS; | ||
113 | } | ||
114 | |||
115 | static inline struct zdev_irq_map *get_imap(unsigned int irq) | ||
116 | { | ||
117 | return bucket->imap[irq_to_dev_nr(irq)]; | ||
118 | } | ||
119 | |||
120 | struct zpci_dev *get_zdev(struct pci_dev *pdev) | 82 | struct zpci_dev *get_zdev(struct pci_dev *pdev) |
121 | { | 83 | { |
122 | return (struct zpci_dev *) pdev->sysdata; | 84 | return (struct zpci_dev *) pdev->sysdata; |
@@ -126,22 +88,17 @@ struct zpci_dev *get_zdev_by_fid(u32 fid) | |||
126 | { | 88 | { |
127 | struct zpci_dev *tmp, *zdev = NULL; | 89 | struct zpci_dev *tmp, *zdev = NULL; |
128 | 90 | ||
129 | mutex_lock(&zpci_list_lock); | 91 | spin_lock(&zpci_list_lock); |
130 | list_for_each_entry(tmp, &zpci_list, entry) { | 92 | list_for_each_entry(tmp, &zpci_list, entry) { |
131 | if (tmp->fid == fid) { | 93 | if (tmp->fid == fid) { |
132 | zdev = tmp; | 94 | zdev = tmp; |
133 | break; | 95 | break; |
134 | } | 96 | } |
135 | } | 97 | } |
136 | mutex_unlock(&zpci_list_lock); | 98 | spin_unlock(&zpci_list_lock); |
137 | return zdev; | 99 | return zdev; |
138 | } | 100 | } |
139 | 101 | ||
140 | bool zpci_fid_present(u32 fid) | ||
141 | { | ||
142 | return (get_zdev_by_fid(fid) != NULL) ? true : false; | ||
143 | } | ||
144 | |||
145 | static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus) | 102 | static struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus) |
146 | { | 103 | { |
147 | return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL; | 104 | return (bus && bus->sysdata) ? (struct zpci_dev *) bus->sysdata : NULL; |
@@ -160,8 +117,7 @@ int pci_proc_domain(struct pci_bus *bus) | |||
160 | EXPORT_SYMBOL_GPL(pci_proc_domain); | 117 | EXPORT_SYMBOL_GPL(pci_proc_domain); |
161 | 118 | ||
162 | /* Modify PCI: Register adapter interruptions */ | 119 | /* Modify PCI: Register adapter interruptions */ |
163 | static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, | 120 | static int zpci_set_airq(struct zpci_dev *zdev) |
164 | u64 aibv) | ||
165 | { | 121 | { |
166 | u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT); | 122 | u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT); |
167 | struct zpci_fib *fib; | 123 | struct zpci_fib *fib; |
@@ -172,14 +128,14 @@ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, | |||
172 | return -ENOMEM; | 128 | return -ENOMEM; |
173 | 129 | ||
174 | fib->isc = PCI_ISC; | 130 | fib->isc = PCI_ISC; |
175 | fib->noi = zdev->irq_map->msi_vecs; | ||
176 | fib->sum = 1; /* enable summary notifications */ | 131 | fib->sum = 1; /* enable summary notifications */ |
177 | fib->aibv = aibv; | 132 | fib->noi = airq_iv_end(zdev->aibv); |
178 | fib->aibvo = 0; /* every function has its own page */ | 133 | fib->aibv = (unsigned long) zdev->aibv->vector; |
179 | fib->aisb = (u64) bucket->aisb + aisb / 8; | 134 | fib->aibvo = 0; /* each zdev has its own interrupt vector */ |
180 | fib->aisbo = aisb & ZPCI_MSI_MASK; | 135 | fib->aisb = (unsigned long) zpci_aisb_iv->vector + (zdev->aisb/64)*8; |
136 | fib->aisbo = zdev->aisb & 63; | ||
181 | 137 | ||
182 | rc = s390pci_mod_fc(req, fib); | 138 | rc = zpci_mod_fc(req, fib); |
183 | pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); | 139 | pr_debug("%s mpcifc returned noi: %d\n", __func__, fib->noi); |
184 | 140 | ||
185 | free_page((unsigned long) fib); | 141 | free_page((unsigned long) fib); |
@@ -209,7 +165,7 @@ static int mod_pci(struct zpci_dev *zdev, int fn, u8 dmaas, struct mod_pci_args | |||
209 | fib->iota = args->iota; | 165 | fib->iota = args->iota; |
210 | fib->fmb_addr = args->fmb_addr; | 166 | fib->fmb_addr = args->fmb_addr; |
211 | 167 | ||
212 | rc = s390pci_mod_fc(req, fib); | 168 | rc = zpci_mod_fc(req, fib); |
213 | free_page((unsigned long) fib); | 169 | free_page((unsigned long) fib); |
214 | return rc; | 170 | return rc; |
215 | } | 171 | } |
@@ -234,7 +190,7 @@ int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas) | |||
234 | } | 190 | } |
235 | 191 | ||
236 | /* Modify PCI: Unregister adapter interruptions */ | 192 | /* Modify PCI: Unregister adapter interruptions */ |
237 | static int zpci_unregister_airq(struct zpci_dev *zdev) | 193 | static int zpci_clear_airq(struct zpci_dev *zdev) |
238 | { | 194 | { |
239 | struct mod_pci_args args = { 0, 0, 0, 0 }; | 195 | struct mod_pci_args args = { 0, 0, 0, 0 }; |
240 | 196 | ||
@@ -283,7 +239,7 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len) | |||
283 | u64 data; | 239 | u64 data; |
284 | int rc; | 240 | int rc; |
285 | 241 | ||
286 | rc = s390pci_load(&data, req, offset); | 242 | rc = zpci_load(&data, req, offset); |
287 | if (!rc) { | 243 | if (!rc) { |
288 | data = data << ((8 - len) * 8); | 244 | data = data << ((8 - len) * 8); |
289 | data = le64_to_cpu(data); | 245 | data = le64_to_cpu(data); |
@@ -301,25 +257,46 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) | |||
301 | 257 | ||
302 | data = cpu_to_le64(data); | 258 | data = cpu_to_le64(data); |
303 | data = data >> ((8 - len) * 8); | 259 | data = data >> ((8 - len) * 8); |
304 | rc = s390pci_store(data, req, offset); | 260 | rc = zpci_store(data, req, offset); |
305 | return rc; | 261 | return rc; |
306 | } | 262 | } |
307 | 263 | ||
308 | void enable_irq(unsigned int irq) | 264 | static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag) |
265 | { | ||
266 | int offset, pos; | ||
267 | u32 mask_bits; | ||
268 | |||
269 | if (msi->msi_attrib.is_msix) { | ||
270 | offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | ||
271 | PCI_MSIX_ENTRY_VECTOR_CTRL; | ||
272 | msi->masked = readl(msi->mask_base + offset); | ||
273 | writel(flag, msi->mask_base + offset); | ||
274 | } else if (msi->msi_attrib.maskbit) { | ||
275 | pos = (long) msi->mask_base; | ||
276 | pci_read_config_dword(msi->dev, pos, &mask_bits); | ||
277 | mask_bits &= ~(mask); | ||
278 | mask_bits |= flag & mask; | ||
279 | pci_write_config_dword(msi->dev, pos, mask_bits); | ||
280 | } else | ||
281 | return 0; | ||
282 | |||
283 | msi->msi_attrib.maskbit = !!flag; | ||
284 | return 1; | ||
285 | } | ||
286 | |||
287 | static void zpci_enable_irq(struct irq_data *data) | ||
309 | { | 288 | { |
310 | struct msi_desc *msi = irq_get_msi_desc(irq); | 289 | struct msi_desc *msi = irq_get_msi_desc(data->irq); |
311 | 290 | ||
312 | zpci_msi_set_mask_bits(msi, 1, 0); | 291 | zpci_msi_set_mask_bits(msi, 1, 0); |
313 | } | 292 | } |
314 | EXPORT_SYMBOL_GPL(enable_irq); | ||
315 | 293 | ||
316 | void disable_irq(unsigned int irq) | 294 | static void zpci_disable_irq(struct irq_data *data) |
317 | { | 295 | { |
318 | struct msi_desc *msi = irq_get_msi_desc(irq); | 296 | struct msi_desc *msi = irq_get_msi_desc(data->irq); |
319 | 297 | ||
320 | zpci_msi_set_mask_bits(msi, 1, 1); | 298 | zpci_msi_set_mask_bits(msi, 1, 1); |
321 | } | 299 | } |
322 | EXPORT_SYMBOL_GPL(disable_irq); | ||
323 | 300 | ||
324 | void pcibios_fixup_bus(struct pci_bus *bus) | 301 | void pcibios_fixup_bus(struct pci_bus *bus) |
325 | { | 302 | { |
@@ -404,152 +381,147 @@ static struct pci_ops pci_root_ops = { | |||
404 | .write = pci_write, | 381 | .write = pci_write, |
405 | }; | 382 | }; |
406 | 383 | ||
407 | /* store the last handled bit to implement fair scheduling of devices */ | ||
408 | static DEFINE_PER_CPU(unsigned long, next_sbit); | ||
409 | |||
410 | static void zpci_irq_handler(struct airq_struct *airq) | 384 | static void zpci_irq_handler(struct airq_struct *airq) |
411 | { | 385 | { |
412 | unsigned long sbit, mbit, last = 0, start = __get_cpu_var(next_sbit); | 386 | unsigned long si, ai; |
413 | int rescan = 0, max = aisb_max; | 387 | struct airq_iv *aibv; |
414 | struct zdev_irq_map *imap; | 388 | int irqs_on = 0; |
415 | 389 | ||
416 | inc_irq_stat(IRQIO_PCI); | 390 | inc_irq_stat(IRQIO_PCI); |
417 | sbit = start; | 391 | for (si = 0;;) { |
418 | 392 | /* Scan adapter summary indicator bit vector */ | |
419 | scan: | 393 | si = airq_iv_scan(zpci_aisb_iv, si, airq_iv_end(zpci_aisb_iv)); |
420 | /* find summary_bit */ | 394 | if (si == -1UL) { |
421 | for_each_set_bit_left_cont(sbit, bucket->aisb, max) { | 395 | if (irqs_on++) |
422 | clear_bit(63 - (sbit & 63), bucket->aisb + (sbit >> 6)); | 396 | /* End of second scan with interrupts on. */ |
423 | last = sbit; | 397 | break; |
398 | /* First scan complete, reenable interrupts. */ | ||
399 | zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); | ||
400 | si = 0; | ||
401 | continue; | ||
402 | } | ||
424 | 403 | ||
425 | /* find vector bit */ | 404 | /* Scan the adapter interrupt vector for this device. */ |
426 | imap = bucket->imap[sbit]; | 405 | aibv = zpci_aibv[si]; |
427 | for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) { | 406 | for (ai = 0;;) { |
407 | ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv)); | ||
408 | if (ai == -1UL) | ||
409 | break; | ||
428 | inc_irq_stat(IRQIO_MSI); | 410 | inc_irq_stat(IRQIO_MSI); |
429 | clear_bit(63 - mbit, &imap->aibv); | 411 | airq_iv_lock(aibv, ai); |
430 | 412 | generic_handle_irq(airq_iv_get_data(aibv, ai)); | |
431 | spin_lock(&imap->lock); | 413 | airq_iv_unlock(aibv, ai); |
432 | if (imap->cb[mbit].handler) | ||
433 | imap->cb[mbit].handler(mbit, | ||
434 | imap->cb[mbit].data); | ||
435 | spin_unlock(&imap->lock); | ||
436 | } | 414 | } |
437 | } | 415 | } |
438 | |||
439 | if (rescan) | ||
440 | goto out; | ||
441 | |||
442 | /* scan the skipped bits */ | ||
443 | if (start > 0) { | ||
444 | sbit = 0; | ||
445 | max = start; | ||
446 | start = 0; | ||
447 | goto scan; | ||
448 | } | ||
449 | |||
450 | /* enable interrupts again */ | ||
451 | set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); | ||
452 | |||
453 | /* check again to not lose initiative */ | ||
454 | rmb(); | ||
455 | max = aisb_max; | ||
456 | sbit = find_first_bit_left(bucket->aisb, max); | ||
457 | if (sbit != max) { | ||
458 | rescan++; | ||
459 | goto scan; | ||
460 | } | ||
461 | out: | ||
462 | /* store next device bit to scan */ | ||
463 | __get_cpu_var(next_sbit) = (++last >= aisb_max) ? 0 : last; | ||
464 | } | 416 | } |
465 | 417 | ||
466 | /* msi_vecs - number of requested interrupts, 0 place function to error state */ | 418 | int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) |
467 | static int zpci_setup_msi(struct pci_dev *pdev, int msi_vecs) | ||
468 | { | 419 | { |
469 | struct zpci_dev *zdev = get_zdev(pdev); | 420 | struct zpci_dev *zdev = get_zdev(pdev); |
470 | unsigned int aisb, msi_nr; | 421 | unsigned int hwirq, irq, msi_vecs; |
422 | unsigned long aisb; | ||
471 | struct msi_desc *msi; | 423 | struct msi_desc *msi; |
424 | struct msi_msg msg; | ||
472 | int rc; | 425 | int rc; |
473 | 426 | ||
474 | /* store the number of used MSI vectors */ | 427 | pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec); |
475 | zdev->irq_map->msi_vecs = min(msi_vecs, ZPCI_NR_MSI_VECS); | 428 | if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI) |
476 | 429 | return -EINVAL; | |
477 | spin_lock(&bucket->lock); | 430 | msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX); |
478 | aisb = find_first_zero_bit(bucket->alloc, PAGE_SIZE); | 431 | msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI); |
479 | /* alloc map exhausted? */ | ||
480 | if (aisb == PAGE_SIZE) { | ||
481 | spin_unlock(&bucket->lock); | ||
482 | return -EIO; | ||
483 | } | ||
484 | set_bit(aisb, bucket->alloc); | ||
485 | spin_unlock(&bucket->lock); | ||
486 | 432 | ||
433 | /* Allocate adapter summary indicator bit */ | ||
434 | rc = -EIO; | ||
435 | aisb = airq_iv_alloc_bit(zpci_aisb_iv); | ||
436 | if (aisb == -1UL) | ||
437 | goto out; | ||
487 | zdev->aisb = aisb; | 438 | zdev->aisb = aisb; |
488 | if (aisb + 1 > aisb_max) | ||
489 | aisb_max = aisb + 1; | ||
490 | 439 | ||
491 | /* wire up IRQ shortcut pointer */ | 440 | /* Create adapter interrupt vector */ |
492 | bucket->imap[zdev->aisb] = zdev->irq_map; | 441 | rc = -ENOMEM; |
493 | pr_debug("%s: imap[%u] linked to %p\n", __func__, zdev->aisb, zdev->irq_map); | 442 | zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK); |
443 | if (!zdev->aibv) | ||
444 | goto out_si; | ||
494 | 445 | ||
495 | /* TODO: irq number 0 wont be found if we return less than requested MSIs. | 446 | /* Wire up shortcut pointer */ |
496 | * ignore it for now and fix in common code. | 447 | zpci_aibv[aisb] = zdev->aibv; |
497 | */ | ||
498 | msi_nr = aisb << ZPCI_MSI_VEC_BITS; | ||
499 | 448 | ||
449 | /* Request MSI interrupts */ | ||
450 | hwirq = 0; | ||
500 | list_for_each_entry(msi, &pdev->msi_list, list) { | 451 | list_for_each_entry(msi, &pdev->msi_list, list) { |
501 | rc = zpci_setup_msi_irq(zdev, msi, msi_nr, | 452 | rc = -EIO; |
502 | aisb << ZPCI_MSI_VEC_BITS); | 453 | irq = irq_alloc_desc(0); /* Alloc irq on node 0 */ |
454 | if (irq == NO_IRQ) | ||
455 | goto out_msi; | ||
456 | rc = irq_set_msi_desc(irq, msi); | ||
503 | if (rc) | 457 | if (rc) |
504 | return rc; | 458 | goto out_msi; |
505 | msi_nr++; | 459 | irq_set_chip_and_handler(irq, &zpci_irq_chip, |
460 | handle_simple_irq); | ||
461 | msg.data = hwirq; | ||
462 | msg.address_lo = zdev->msi_addr & 0xffffffff; | ||
463 | msg.address_hi = zdev->msi_addr >> 32; | ||
464 | write_msi_msg(irq, &msg); | ||
465 | airq_iv_set_data(zdev->aibv, hwirq, irq); | ||
466 | hwirq++; | ||
506 | } | 467 | } |
507 | 468 | ||
508 | rc = zpci_register_airq(zdev, aisb, (u64) &zdev->irq_map->aibv); | 469 | /* Enable adapter interrupts */ |
509 | if (rc) { | 470 | rc = zpci_set_airq(zdev); |
510 | clear_bit(aisb, bucket->alloc); | 471 | if (rc) |
511 | dev_err(&pdev->dev, "register MSI failed with: %d\n", rc); | 472 | goto out_msi; |
512 | return rc; | 473 | |
474 | return (msi_vecs == nvec) ? 0 : msi_vecs; | ||
475 | |||
476 | out_msi: | ||
477 | list_for_each_entry(msi, &pdev->msi_list, list) { | ||
478 | if (hwirq-- == 0) | ||
479 | break; | ||
480 | irq_set_msi_desc(msi->irq, NULL); | ||
481 | irq_free_desc(msi->irq); | ||
482 | msi->msg.address_lo = 0; | ||
483 | msi->msg.address_hi = 0; | ||
484 | msi->msg.data = 0; | ||
485 | msi->irq = 0; | ||
513 | } | 486 | } |
514 | return (zdev->irq_map->msi_vecs == msi_vecs) ? | 487 | zpci_aibv[aisb] = NULL; |
515 | 0 : zdev->irq_map->msi_vecs; | 488 | airq_iv_release(zdev->aibv); |
489 | out_si: | ||
490 | airq_iv_free_bit(zpci_aisb_iv, aisb); | ||
491 | out: | ||
492 | dev_err(&pdev->dev, "register MSI failed with: %d\n", rc); | ||
493 | return rc; | ||
516 | } | 494 | } |
517 | 495 | ||
518 | static void zpci_teardown_msi(struct pci_dev *pdev) | 496 | void arch_teardown_msi_irqs(struct pci_dev *pdev) |
519 | { | 497 | { |
520 | struct zpci_dev *zdev = get_zdev(pdev); | 498 | struct zpci_dev *zdev = get_zdev(pdev); |
521 | struct msi_desc *msi; | 499 | struct msi_desc *msi; |
522 | int aisb, rc; | 500 | int rc; |
523 | 501 | ||
524 | rc = zpci_unregister_airq(zdev); | 502 | pr_info("%s: on pdev: %p\n", __func__, pdev); |
503 | |||
504 | /* Disable adapter interrupts */ | ||
505 | rc = zpci_clear_airq(zdev); | ||
525 | if (rc) { | 506 | if (rc) { |
526 | dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc); | 507 | dev_err(&pdev->dev, "deregister MSI failed with: %d\n", rc); |
527 | return; | 508 | return; |
528 | } | 509 | } |
529 | 510 | ||
530 | msi = list_first_entry(&pdev->msi_list, struct msi_desc, list); | 511 | /* Release MSI interrupts */ |
531 | aisb = irq_to_dev_nr(msi->irq); | 512 | list_for_each_entry(msi, &pdev->msi_list, list) { |
532 | 513 | zpci_msi_set_mask_bits(msi, 1, 1); | |
533 | list_for_each_entry(msi, &pdev->msi_list, list) | 514 | irq_set_msi_desc(msi->irq, NULL); |
534 | zpci_teardown_msi_irq(zdev, msi); | 515 | irq_free_desc(msi->irq); |
535 | 516 | msi->msg.address_lo = 0; | |
536 | clear_bit(aisb, bucket->alloc); | 517 | msi->msg.address_hi = 0; |
537 | if (aisb + 1 == aisb_max) | 518 | msi->msg.data = 0; |
538 | aisb_max--; | 519 | msi->irq = 0; |
539 | } | 520 | } |
540 | |||
541 | int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | ||
542 | { | ||
543 | pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec); | ||
544 | if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI) | ||
545 | return -EINVAL; | ||
546 | return zpci_setup_msi(pdev, nvec); | ||
547 | } | ||
548 | 521 | ||
549 | void arch_teardown_msi_irqs(struct pci_dev *pdev) | 522 | zpci_aibv[zdev->aisb] = NULL; |
550 | { | 523 | airq_iv_release(zdev->aibv); |
551 | pr_info("%s: on pdev: %p\n", __func__, pdev); | 524 | airq_iv_free_bit(zpci_aisb_iv, zdev->aisb); |
552 | zpci_teardown_msi(pdev); | ||
553 | } | 525 | } |
554 | 526 | ||
555 | static void zpci_map_resources(struct zpci_dev *zdev) | 527 | static void zpci_map_resources(struct zpci_dev *zdev) |
@@ -564,8 +536,6 @@ static void zpci_map_resources(struct zpci_dev *zdev) | |||
564 | continue; | 536 | continue; |
565 | pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0); | 537 | pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0); |
566 | pdev->resource[i].end = pdev->resource[i].start + len - 1; | 538 | pdev->resource[i].end = pdev->resource[i].start + len - 1; |
567 | pr_debug("BAR%i: -> start: %Lx end: %Lx\n", | ||
568 | i, pdev->resource[i].start, pdev->resource[i].end); | ||
569 | } | 539 | } |
570 | } | 540 | } |
571 | 541 | ||
@@ -589,162 +559,47 @@ struct zpci_dev *zpci_alloc_device(void) | |||
589 | 559 | ||
590 | /* Alloc memory for our private pci device data */ | 560 | /* Alloc memory for our private pci device data */ |
591 | zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); | 561 | zdev = kzalloc(sizeof(*zdev), GFP_KERNEL); |
592 | if (!zdev) | 562 | return zdev ? : ERR_PTR(-ENOMEM); |
593 | return ERR_PTR(-ENOMEM); | ||
594 | |||
595 | /* Alloc aibv & callback space */ | ||
596 | zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL); | ||
597 | if (!zdev->irq_map) | ||
598 | goto error; | ||
599 | WARN_ON((u64) zdev->irq_map & 0xff); | ||
600 | return zdev; | ||
601 | |||
602 | error: | ||
603 | kfree(zdev); | ||
604 | return ERR_PTR(-ENOMEM); | ||
605 | } | 563 | } |
606 | 564 | ||
607 | void zpci_free_device(struct zpci_dev *zdev) | 565 | void zpci_free_device(struct zpci_dev *zdev) |
608 | { | 566 | { |
609 | kmem_cache_free(zdev_irq_cache, zdev->irq_map); | ||
610 | kfree(zdev); | 567 | kfree(zdev); |
611 | } | 568 | } |
612 | 569 | ||
613 | /* | ||
614 | * Too late for any s390 specific setup, since interrupts must be set up | ||
615 | * already which requires DMA setup too and the pci scan will access the | ||
616 | * config space, which only works if the function handle is enabled. | ||
617 | */ | ||
618 | int pcibios_enable_device(struct pci_dev *pdev, int mask) | ||
619 | { | ||
620 | struct resource *res; | ||
621 | u16 cmd; | ||
622 | int i; | ||
623 | |||
624 | pci_read_config_word(pdev, PCI_COMMAND, &cmd); | ||
625 | |||
626 | for (i = 0; i < PCI_BAR_COUNT; i++) { | ||
627 | res = &pdev->resource[i]; | ||
628 | |||
629 | if (res->flags & IORESOURCE_IO) | ||
630 | return -EINVAL; | ||
631 | |||
632 | if (res->flags & IORESOURCE_MEM) | ||
633 | cmd |= PCI_COMMAND_MEMORY; | ||
634 | } | ||
635 | pci_write_config_word(pdev, PCI_COMMAND, cmd); | ||
636 | return 0; | ||
637 | } | ||
638 | |||
639 | int pcibios_add_platform_entries(struct pci_dev *pdev) | 570 | int pcibios_add_platform_entries(struct pci_dev *pdev) |
640 | { | 571 | { |
641 | return zpci_sysfs_add_device(&pdev->dev); | 572 | return zpci_sysfs_add_device(&pdev->dev); |
642 | } | 573 | } |
643 | 574 | ||
644 | int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data) | ||
645 | { | ||
646 | int msi_nr = irq_to_msi_nr(irq); | ||
647 | struct zdev_irq_map *imap; | ||
648 | struct msi_desc *msi; | ||
649 | |||
650 | msi = irq_get_msi_desc(irq); | ||
651 | if (!msi) | ||
652 | return -EIO; | ||
653 | |||
654 | imap = get_imap(irq); | ||
655 | spin_lock_init(&imap->lock); | ||
656 | |||
657 | pr_debug("%s: register handler for IRQ:MSI %d:%d\n", __func__, irq >> 6, msi_nr); | ||
658 | imap->cb[msi_nr].handler = handler; | ||
659 | imap->cb[msi_nr].data = data; | ||
660 | |||
661 | /* | ||
662 | * The generic MSI code returns with the interrupt disabled on the | ||
663 | * card, using the MSI mask bits. Firmware doesn't appear to unmask | ||
664 | * at that level, so we do it here by hand. | ||
665 | */ | ||
666 | zpci_msi_set_mask_bits(msi, 1, 0); | ||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | void zpci_free_irq(unsigned int irq) | ||
671 | { | ||
672 | struct zdev_irq_map *imap = get_imap(irq); | ||
673 | int msi_nr = irq_to_msi_nr(irq); | ||
674 | unsigned long flags; | ||
675 | |||
676 | pr_debug("%s: for irq: %d\n", __func__, irq); | ||
677 | |||
678 | spin_lock_irqsave(&imap->lock, flags); | ||
679 | imap->cb[msi_nr].handler = NULL; | ||
680 | imap->cb[msi_nr].data = NULL; | ||
681 | spin_unlock_irqrestore(&imap->lock, flags); | ||
682 | } | ||
683 | |||
684 | int request_irq(unsigned int irq, irq_handler_t handler, | ||
685 | unsigned long irqflags, const char *devname, void *dev_id) | ||
686 | { | ||
687 | pr_debug("%s: irq: %d handler: %p flags: %lx dev: %s\n", | ||
688 | __func__, irq, handler, irqflags, devname); | ||
689 | |||
690 | return zpci_request_irq(irq, handler, dev_id); | ||
691 | } | ||
692 | EXPORT_SYMBOL_GPL(request_irq); | ||
693 | |||
694 | void free_irq(unsigned int irq, void *dev_id) | ||
695 | { | ||
696 | zpci_free_irq(irq); | ||
697 | } | ||
698 | EXPORT_SYMBOL_GPL(free_irq); | ||
699 | |||
700 | static int __init zpci_irq_init(void) | 575 | static int __init zpci_irq_init(void) |
701 | { | 576 | { |
702 | int cpu, rc; | 577 | int rc; |
703 | |||
704 | bucket = kzalloc(sizeof(*bucket), GFP_KERNEL); | ||
705 | if (!bucket) | ||
706 | return -ENOMEM; | ||
707 | |||
708 | bucket->aisb = (unsigned long *) get_zeroed_page(GFP_KERNEL); | ||
709 | if (!bucket->aisb) { | ||
710 | rc = -ENOMEM; | ||
711 | goto out_aisb; | ||
712 | } | ||
713 | |||
714 | bucket->alloc = (unsigned long *) get_zeroed_page(GFP_KERNEL); | ||
715 | if (!bucket->alloc) { | ||
716 | rc = -ENOMEM; | ||
717 | goto out_alloc; | ||
718 | } | ||
719 | 578 | ||
720 | rc = register_adapter_interrupt(&zpci_airq); | 579 | rc = register_adapter_interrupt(&zpci_airq); |
721 | if (rc) | 580 | if (rc) |
722 | goto out_ai; | 581 | goto out; |
723 | /* Set summary to 1 to be called every time for the ISC. */ | 582 | /* Set summary to 1 to be called every time for the ISC. */ |
724 | *zpci_airq.lsi_ptr = 1; | 583 | *zpci_airq.lsi_ptr = 1; |
725 | 584 | ||
726 | for_each_online_cpu(cpu) | 585 | rc = -ENOMEM; |
727 | per_cpu(next_sbit, cpu) = 0; | 586 | zpci_aisb_iv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC); |
587 | if (!zpci_aisb_iv) | ||
588 | goto out_airq; | ||
728 | 589 | ||
729 | spin_lock_init(&bucket->lock); | 590 | zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); |
730 | set_irq_ctrl(SIC_IRQ_MODE_SINGLE, NULL, PCI_ISC); | ||
731 | return 0; | 591 | return 0; |
732 | 592 | ||
733 | out_ai: | 593 | out_airq: |
734 | free_page((unsigned long) bucket->alloc); | 594 | unregister_adapter_interrupt(&zpci_airq); |
735 | out_alloc: | 595 | out: |
736 | free_page((unsigned long) bucket->aisb); | ||
737 | out_aisb: | ||
738 | kfree(bucket); | ||
739 | return rc; | 596 | return rc; |
740 | } | 597 | } |
741 | 598 | ||
742 | static void zpci_irq_exit(void) | 599 | static void zpci_irq_exit(void) |
743 | { | 600 | { |
744 | free_page((unsigned long) bucket->alloc); | 601 | airq_iv_release(zpci_aisb_iv); |
745 | free_page((unsigned long) bucket->aisb); | ||
746 | unregister_adapter_interrupt(&zpci_airq); | 602 | unregister_adapter_interrupt(&zpci_airq); |
747 | kfree(bucket); | ||
748 | } | 603 | } |
749 | 604 | ||
750 | static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size, | 605 | static struct resource *zpci_alloc_bus_resource(unsigned long start, unsigned long size, |
@@ -801,16 +656,49 @@ static void zpci_free_iomap(struct zpci_dev *zdev, int entry) | |||
801 | int pcibios_add_device(struct pci_dev *pdev) | 656 | int pcibios_add_device(struct pci_dev *pdev) |
802 | { | 657 | { |
803 | struct zpci_dev *zdev = get_zdev(pdev); | 658 | struct zpci_dev *zdev = get_zdev(pdev); |
659 | struct resource *res; | ||
660 | int i; | ||
661 | |||
662 | zdev->pdev = pdev; | ||
663 | zpci_map_resources(zdev); | ||
664 | |||
665 | for (i = 0; i < PCI_BAR_COUNT; i++) { | ||
666 | res = &pdev->resource[i]; | ||
667 | if (res->parent || !res->flags) | ||
668 | continue; | ||
669 | pci_claim_resource(pdev, i); | ||
670 | } | ||
671 | |||
672 | return 0; | ||
673 | } | ||
674 | |||
675 | int pcibios_enable_device(struct pci_dev *pdev, int mask) | ||
676 | { | ||
677 | struct zpci_dev *zdev = get_zdev(pdev); | ||
678 | struct resource *res; | ||
679 | u16 cmd; | ||
680 | int i; | ||
804 | 681 | ||
805 | zdev->pdev = pdev; | 682 | zdev->pdev = pdev; |
806 | zpci_debug_init_device(zdev); | 683 | zpci_debug_init_device(zdev); |
807 | zpci_fmb_enable_device(zdev); | 684 | zpci_fmb_enable_device(zdev); |
808 | zpci_map_resources(zdev); | 685 | zpci_map_resources(zdev); |
809 | 686 | ||
687 | pci_read_config_word(pdev, PCI_COMMAND, &cmd); | ||
688 | for (i = 0; i < PCI_BAR_COUNT; i++) { | ||
689 | res = &pdev->resource[i]; | ||
690 | |||
691 | if (res->flags & IORESOURCE_IO) | ||
692 | return -EINVAL; | ||
693 | |||
694 | if (res->flags & IORESOURCE_MEM) | ||
695 | cmd |= PCI_COMMAND_MEMORY; | ||
696 | } | ||
697 | pci_write_config_word(pdev, PCI_COMMAND, cmd); | ||
810 | return 0; | 698 | return 0; |
811 | } | 699 | } |
812 | 700 | ||
813 | void pcibios_release_device(struct pci_dev *pdev) | 701 | void pcibios_disable_device(struct pci_dev *pdev) |
814 | { | 702 | { |
815 | struct zpci_dev *zdev = get_zdev(pdev); | 703 | struct zpci_dev *zdev = get_zdev(pdev); |
816 | 704 | ||
@@ -898,6 +786,8 @@ int zpci_enable_device(struct zpci_dev *zdev) | |||
898 | rc = zpci_dma_init_device(zdev); | 786 | rc = zpci_dma_init_device(zdev); |
899 | if (rc) | 787 | if (rc) |
900 | goto out_dma; | 788 | goto out_dma; |
789 | |||
790 | zdev->state = ZPCI_FN_STATE_ONLINE; | ||
901 | return 0; | 791 | return 0; |
902 | 792 | ||
903 | out_dma: | 793 | out_dma: |
@@ -926,18 +816,16 @@ int zpci_create_device(struct zpci_dev *zdev) | |||
926 | rc = zpci_enable_device(zdev); | 816 | rc = zpci_enable_device(zdev); |
927 | if (rc) | 817 | if (rc) |
928 | goto out_free; | 818 | goto out_free; |
929 | |||
930 | zdev->state = ZPCI_FN_STATE_ONLINE; | ||
931 | } | 819 | } |
932 | rc = zpci_scan_bus(zdev); | 820 | rc = zpci_scan_bus(zdev); |
933 | if (rc) | 821 | if (rc) |
934 | goto out_disable; | 822 | goto out_disable; |
935 | 823 | ||
936 | mutex_lock(&zpci_list_lock); | 824 | spin_lock(&zpci_list_lock); |
937 | list_add_tail(&zdev->entry, &zpci_list); | 825 | list_add_tail(&zdev->entry, &zpci_list); |
938 | if (hotplug_ops) | 826 | spin_unlock(&zpci_list_lock); |
939 | hotplug_ops->create_slot(zdev); | 827 | |
940 | mutex_unlock(&zpci_list_lock); | 828 | zpci_init_slot(zdev); |
941 | 829 | ||
942 | return 0; | 830 | return 0; |
943 | 831 | ||
@@ -967,15 +855,10 @@ static inline int barsize(u8 size) | |||
967 | 855 | ||
968 | static int zpci_mem_init(void) | 856 | static int zpci_mem_init(void) |
969 | { | 857 | { |
970 | zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map), | ||
971 | L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL); | ||
972 | if (!zdev_irq_cache) | ||
973 | goto error_zdev; | ||
974 | |||
975 | zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), | 858 | zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb), |
976 | 16, 0, NULL); | 859 | 16, 0, NULL); |
977 | if (!zdev_fmb_cache) | 860 | if (!zdev_fmb_cache) |
978 | goto error_fmb; | 861 | goto error_zdev; |
979 | 862 | ||
980 | /* TODO: use realloc */ | 863 | /* TODO: use realloc */ |
981 | zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start), | 864 | zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start), |
@@ -986,8 +869,6 @@ static int zpci_mem_init(void) | |||
986 | 869 | ||
987 | error_iomap: | 870 | error_iomap: |
988 | kmem_cache_destroy(zdev_fmb_cache); | 871 | kmem_cache_destroy(zdev_fmb_cache); |
989 | error_fmb: | ||
990 | kmem_cache_destroy(zdev_irq_cache); | ||
991 | error_zdev: | 872 | error_zdev: |
992 | return -ENOMEM; | 873 | return -ENOMEM; |
993 | } | 874 | } |
@@ -995,28 +876,10 @@ error_zdev: | |||
995 | static void zpci_mem_exit(void) | 876 | static void zpci_mem_exit(void) |
996 | { | 877 | { |
997 | kfree(zpci_iomap_start); | 878 | kfree(zpci_iomap_start); |
998 | kmem_cache_destroy(zdev_irq_cache); | ||
999 | kmem_cache_destroy(zdev_fmb_cache); | 879 | kmem_cache_destroy(zdev_fmb_cache); |
1000 | } | 880 | } |
1001 | 881 | ||
1002 | void zpci_register_hp_ops(struct pci_hp_callback_ops *ops) | 882 | static unsigned int s390_pci_probe; |
1003 | { | ||
1004 | mutex_lock(&zpci_list_lock); | ||
1005 | hotplug_ops = ops; | ||
1006 | mutex_unlock(&zpci_list_lock); | ||
1007 | } | ||
1008 | EXPORT_SYMBOL_GPL(zpci_register_hp_ops); | ||
1009 | |||
1010 | void zpci_deregister_hp_ops(void) | ||
1011 | { | ||
1012 | mutex_lock(&zpci_list_lock); | ||
1013 | hotplug_ops = NULL; | ||
1014 | mutex_unlock(&zpci_list_lock); | ||
1015 | } | ||
1016 | EXPORT_SYMBOL_GPL(zpci_deregister_hp_ops); | ||
1017 | |||
1018 | unsigned int s390_pci_probe; | ||
1019 | EXPORT_SYMBOL_GPL(s390_pci_probe); | ||
1020 | 883 | ||
1021 | char * __init pcibios_setup(char *str) | 884 | char * __init pcibios_setup(char *str) |
1022 | { | 885 | { |
@@ -1044,16 +907,12 @@ static int __init pci_base_init(void) | |||
1044 | 907 | ||
1045 | rc = zpci_debug_init(); | 908 | rc = zpci_debug_init(); |
1046 | if (rc) | 909 | if (rc) |
1047 | return rc; | 910 | goto out; |
1048 | 911 | ||
1049 | rc = zpci_mem_init(); | 912 | rc = zpci_mem_init(); |
1050 | if (rc) | 913 | if (rc) |
1051 | goto out_mem; | 914 | goto out_mem; |
1052 | 915 | ||
1053 | rc = zpci_msihash_init(); | ||
1054 | if (rc) | ||
1055 | goto out_hash; | ||
1056 | |||
1057 | rc = zpci_irq_init(); | 916 | rc = zpci_irq_init(); |
1058 | if (rc) | 917 | if (rc) |
1059 | goto out_irq; | 918 | goto out_irq; |
@@ -1062,7 +921,7 @@ static int __init pci_base_init(void) | |||
1062 | if (rc) | 921 | if (rc) |
1063 | goto out_dma; | 922 | goto out_dma; |
1064 | 923 | ||
1065 | rc = clp_find_pci_devices(); | 924 | rc = clp_scan_pci_devices(); |
1066 | if (rc) | 925 | if (rc) |
1067 | goto out_find; | 926 | goto out_find; |
1068 | 927 | ||
@@ -1073,11 +932,15 @@ out_find: | |||
1073 | out_dma: | 932 | out_dma: |
1074 | zpci_irq_exit(); | 933 | zpci_irq_exit(); |
1075 | out_irq: | 934 | out_irq: |
1076 | zpci_msihash_exit(); | ||
1077 | out_hash: | ||
1078 | zpci_mem_exit(); | 935 | zpci_mem_exit(); |
1079 | out_mem: | 936 | out_mem: |
1080 | zpci_debug_exit(); | 937 | zpci_debug_exit(); |
938 | out: | ||
1081 | return rc; | 939 | return rc; |
1082 | } | 940 | } |
1083 | subsys_initcall(pci_base_init); | 941 | subsys_initcall_sync(pci_base_init); |
942 | |||
943 | void zpci_rescan(void) | ||
944 | { | ||
945 | clp_rescan_pci_devices_simple(); | ||
946 | } | ||
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c index 2e9539625d93..475563c3d1e4 100644 --- a/arch/s390/pci/pci_clp.c +++ b/arch/s390/pci/pci_clp.c | |||
@@ -36,9 +36,9 @@ static inline u8 clp_instr(void *data) | |||
36 | return cc; | 36 | return cc; |
37 | } | 37 | } |
38 | 38 | ||
39 | static void *clp_alloc_block(void) | 39 | static void *clp_alloc_block(gfp_t gfp_mask) |
40 | { | 40 | { |
41 | return (void *) __get_free_pages(GFP_KERNEL, get_order(CLP_BLK_SIZE)); | 41 | return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE)); |
42 | } | 42 | } |
43 | 43 | ||
44 | static void clp_free_block(void *ptr) | 44 | static void clp_free_block(void *ptr) |
@@ -70,7 +70,7 @@ static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid) | |||
70 | struct clp_req_rsp_query_pci_grp *rrb; | 70 | struct clp_req_rsp_query_pci_grp *rrb; |
71 | int rc; | 71 | int rc; |
72 | 72 | ||
73 | rrb = clp_alloc_block(); | 73 | rrb = clp_alloc_block(GFP_KERNEL); |
74 | if (!rrb) | 74 | if (!rrb) |
75 | return -ENOMEM; | 75 | return -ENOMEM; |
76 | 76 | ||
@@ -113,7 +113,7 @@ static int clp_query_pci_fn(struct zpci_dev *zdev, u32 fh) | |||
113 | struct clp_req_rsp_query_pci *rrb; | 113 | struct clp_req_rsp_query_pci *rrb; |
114 | int rc; | 114 | int rc; |
115 | 115 | ||
116 | rrb = clp_alloc_block(); | 116 | rrb = clp_alloc_block(GFP_KERNEL); |
117 | if (!rrb) | 117 | if (!rrb) |
118 | return -ENOMEM; | 118 | return -ENOMEM; |
119 | 119 | ||
@@ -179,9 +179,9 @@ error: | |||
179 | static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) | 179 | static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) |
180 | { | 180 | { |
181 | struct clp_req_rsp_set_pci *rrb; | 181 | struct clp_req_rsp_set_pci *rrb; |
182 | int rc, retries = 1000; | 182 | int rc, retries = 100; |
183 | 183 | ||
184 | rrb = clp_alloc_block(); | 184 | rrb = clp_alloc_block(GFP_KERNEL); |
185 | if (!rrb) | 185 | if (!rrb) |
186 | return -ENOMEM; | 186 | return -ENOMEM; |
187 | 187 | ||
@@ -199,7 +199,7 @@ static int clp_set_pci_fn(u32 *fh, u8 nr_dma_as, u8 command) | |||
199 | retries--; | 199 | retries--; |
200 | if (retries < 0) | 200 | if (retries < 0) |
201 | break; | 201 | break; |
202 | msleep(1); | 202 | msleep(20); |
203 | } | 203 | } |
204 | } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY); | 204 | } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY); |
205 | 205 | ||
@@ -245,49 +245,12 @@ int clp_disable_fh(struct zpci_dev *zdev) | |||
245 | return rc; | 245 | return rc; |
246 | } | 246 | } |
247 | 247 | ||
248 | static void clp_check_pcifn_entry(struct clp_fh_list_entry *entry) | 248 | static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, |
249 | void (*cb)(struct clp_fh_list_entry *entry)) | ||
249 | { | 250 | { |
250 | int present, rc; | ||
251 | |||
252 | if (!entry->vendor_id) | ||
253 | return; | ||
254 | |||
255 | /* TODO: be a little bit more scalable */ | ||
256 | present = zpci_fid_present(entry->fid); | ||
257 | |||
258 | if (present) | ||
259 | pr_debug("%s: device %x already present\n", __func__, entry->fid); | ||
260 | |||
261 | /* skip already used functions */ | ||
262 | if (present && entry->config_state) | ||
263 | return; | ||
264 | |||
265 | /* aev 306: function moved to stand-by state */ | ||
266 | if (present && !entry->config_state) { | ||
267 | /* | ||
268 | * The handle is already disabled, that means no iota/irq freeing via | ||
269 | * the firmware interfaces anymore. Need to free resources manually | ||
270 | * (DMA memory, debug, sysfs)... | ||
271 | */ | ||
272 | zpci_stop_device(get_zdev_by_fid(entry->fid)); | ||
273 | return; | ||
274 | } | ||
275 | |||
276 | rc = clp_add_pci_device(entry->fid, entry->fh, entry->config_state); | ||
277 | if (rc) | ||
278 | pr_err("Failed to add fid: 0x%x\n", entry->fid); | ||
279 | } | ||
280 | |||
281 | int clp_find_pci_devices(void) | ||
282 | { | ||
283 | struct clp_req_rsp_list_pci *rrb; | ||
284 | u64 resume_token = 0; | 251 | u64 resume_token = 0; |
285 | int entries, i, rc; | 252 | int entries, i, rc; |
286 | 253 | ||
287 | rrb = clp_alloc_block(); | ||
288 | if (!rrb) | ||
289 | return -ENOMEM; | ||
290 | |||
291 | do { | 254 | do { |
292 | memset(rrb, 0, sizeof(*rrb)); | 255 | memset(rrb, 0, sizeof(*rrb)); |
293 | rrb->request.hdr.len = sizeof(rrb->request); | 256 | rrb->request.hdr.len = sizeof(rrb->request); |
@@ -316,12 +279,101 @@ int clp_find_pci_devices(void) | |||
316 | resume_token = rrb->response.resume_token; | 279 | resume_token = rrb->response.resume_token; |
317 | 280 | ||
318 | for (i = 0; i < entries; i++) | 281 | for (i = 0; i < entries; i++) |
319 | clp_check_pcifn_entry(&rrb->response.fh_list[i]); | 282 | cb(&rrb->response.fh_list[i]); |
320 | } while (resume_token); | 283 | } while (resume_token); |
321 | 284 | ||
322 | pr_debug("Maximum number of supported PCI functions: %u\n", | 285 | pr_debug("Maximum number of supported PCI functions: %u\n", |
323 | rrb->response.max_fn); | 286 | rrb->response.max_fn); |
324 | out: | 287 | out: |
288 | return rc; | ||
289 | } | ||
290 | |||
291 | static void __clp_add(struct clp_fh_list_entry *entry) | ||
292 | { | ||
293 | if (!entry->vendor_id) | ||
294 | return; | ||
295 | |||
296 | clp_add_pci_device(entry->fid, entry->fh, entry->config_state); | ||
297 | } | ||
298 | |||
299 | static void __clp_rescan(struct clp_fh_list_entry *entry) | ||
300 | { | ||
301 | struct zpci_dev *zdev; | ||
302 | |||
303 | if (!entry->vendor_id) | ||
304 | return; | ||
305 | |||
306 | zdev = get_zdev_by_fid(entry->fid); | ||
307 | if (!zdev) { | ||
308 | clp_add_pci_device(entry->fid, entry->fh, entry->config_state); | ||
309 | return; | ||
310 | } | ||
311 | |||
312 | if (!entry->config_state) { | ||
313 | /* | ||
314 | * The handle is already disabled, that means no iota/irq freeing via | ||
315 | * the firmware interfaces anymore. Need to free resources manually | ||
316 | * (DMA memory, debug, sysfs)... | ||
317 | */ | ||
318 | zpci_stop_device(zdev); | ||
319 | } | ||
320 | } | ||
321 | |||
322 | static void __clp_update(struct clp_fh_list_entry *entry) | ||
323 | { | ||
324 | struct zpci_dev *zdev; | ||
325 | |||
326 | if (!entry->vendor_id) | ||
327 | return; | ||
328 | |||
329 | zdev = get_zdev_by_fid(entry->fid); | ||
330 | if (!zdev) | ||
331 | return; | ||
332 | |||
333 | zdev->fh = entry->fh; | ||
334 | } | ||
335 | |||
336 | int clp_scan_pci_devices(void) | ||
337 | { | ||
338 | struct clp_req_rsp_list_pci *rrb; | ||
339 | int rc; | ||
340 | |||
341 | rrb = clp_alloc_block(GFP_KERNEL); | ||
342 | if (!rrb) | ||
343 | return -ENOMEM; | ||
344 | |||
345 | rc = clp_list_pci(rrb, __clp_add); | ||
346 | |||
347 | clp_free_block(rrb); | ||
348 | return rc; | ||
349 | } | ||
350 | |||
351 | int clp_rescan_pci_devices(void) | ||
352 | { | ||
353 | struct clp_req_rsp_list_pci *rrb; | ||
354 | int rc; | ||
355 | |||
356 | rrb = clp_alloc_block(GFP_KERNEL); | ||
357 | if (!rrb) | ||
358 | return -ENOMEM; | ||
359 | |||
360 | rc = clp_list_pci(rrb, __clp_rescan); | ||
361 | |||
362 | clp_free_block(rrb); | ||
363 | return rc; | ||
364 | } | ||
365 | |||
366 | int clp_rescan_pci_devices_simple(void) | ||
367 | { | ||
368 | struct clp_req_rsp_list_pci *rrb; | ||
369 | int rc; | ||
370 | |||
371 | rrb = clp_alloc_block(GFP_NOWAIT); | ||
372 | if (!rrb) | ||
373 | return -ENOMEM; | ||
374 | |||
375 | rc = clp_list_pci(rrb, __clp_update); | ||
376 | |||
325 | clp_free_block(rrb); | 377 | clp_free_block(rrb); |
326 | return rc; | 378 | return rc; |
327 | } | 379 | } |
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index a2343c1f6e04..7e5573acb063 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/export.h> | 10 | #include <linux/export.h> |
11 | #include <linux/iommu-helper.h> | 11 | #include <linux/iommu-helper.h> |
12 | #include <linux/dma-mapping.h> | 12 | #include <linux/dma-mapping.h> |
13 | #include <linux/vmalloc.h> | ||
13 | #include <linux/pci.h> | 14 | #include <linux/pci.h> |
14 | #include <asm/pci_dma.h> | 15 | #include <asm/pci_dma.h> |
15 | 16 | ||
@@ -170,8 +171,8 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa, | |||
170 | */ | 171 | */ |
171 | goto no_refresh; | 172 | goto no_refresh; |
172 | 173 | ||
173 | rc = s390pci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, | 174 | rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, |
174 | nr_pages * PAGE_SIZE); | 175 | nr_pages * PAGE_SIZE); |
175 | 176 | ||
176 | no_refresh: | 177 | no_refresh: |
177 | spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); | 178 | spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags); |
@@ -407,7 +408,6 @@ static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg, | |||
407 | 408 | ||
408 | int zpci_dma_init_device(struct zpci_dev *zdev) | 409 | int zpci_dma_init_device(struct zpci_dev *zdev) |
409 | { | 410 | { |
410 | unsigned int bitmap_order; | ||
411 | int rc; | 411 | int rc; |
412 | 412 | ||
413 | spin_lock_init(&zdev->iommu_bitmap_lock); | 413 | spin_lock_init(&zdev->iommu_bitmap_lock); |
@@ -421,12 +421,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev) | |||
421 | 421 | ||
422 | zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET; | 422 | zdev->iommu_size = (unsigned long) high_memory - PAGE_OFFSET; |
423 | zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT; | 423 | zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT; |
424 | bitmap_order = get_order(zdev->iommu_pages / 8); | 424 | zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8); |
425 | pr_info("iommu_size: 0x%lx iommu_pages: 0x%lx bitmap_order: %i\n", | ||
426 | zdev->iommu_size, zdev->iommu_pages, bitmap_order); | ||
427 | |||
428 | zdev->iommu_bitmap = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, | ||
429 | bitmap_order); | ||
430 | if (!zdev->iommu_bitmap) { | 425 | if (!zdev->iommu_bitmap) { |
431 | rc = -ENOMEM; | 426 | rc = -ENOMEM; |
432 | goto out_reg; | 427 | goto out_reg; |
@@ -451,8 +446,7 @@ void zpci_dma_exit_device(struct zpci_dev *zdev) | |||
451 | { | 446 | { |
452 | zpci_unregister_ioat(zdev, 0); | 447 | zpci_unregister_ioat(zdev, 0); |
453 | dma_cleanup_tables(zdev); | 448 | dma_cleanup_tables(zdev); |
454 | free_pages((unsigned long) zdev->iommu_bitmap, | 449 | vfree(zdev->iommu_bitmap); |
455 | get_order(zdev->iommu_pages / 8)); | ||
456 | zdev->iommu_bitmap = NULL; | 450 | zdev->iommu_bitmap = NULL; |
457 | zdev->next_bit = 0; | 451 | zdev->next_bit = 0; |
458 | } | 452 | } |
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c index ec62e3a0dc09..0aecaf954845 100644 --- a/arch/s390/pci/pci_event.c +++ b/arch/s390/pci/pci_event.c | |||
@@ -69,7 +69,7 @@ static void zpci_event_log_avail(struct zpci_ccdf_avail *ccdf) | |||
69 | clp_add_pci_device(ccdf->fid, ccdf->fh, 0); | 69 | clp_add_pci_device(ccdf->fid, ccdf->fh, 0); |
70 | break; | 70 | break; |
71 | case 0x0306: | 71 | case 0x0306: |
72 | clp_find_pci_devices(); | 72 | clp_rescan_pci_devices(); |
73 | break; | 73 | break; |
74 | default: | 74 | default: |
75 | break; | 75 | break; |
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c index 22eeb9d7ffeb..85267c058af8 100644 --- a/arch/s390/pci/pci_insn.c +++ b/arch/s390/pci/pci_insn.c | |||
@@ -27,7 +27,7 @@ static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) | |||
27 | return cc; | 27 | return cc; |
28 | } | 28 | } |
29 | 29 | ||
30 | int s390pci_mod_fc(u64 req, struct zpci_fib *fib) | 30 | int zpci_mod_fc(u64 req, struct zpci_fib *fib) |
31 | { | 31 | { |
32 | u8 cc, status; | 32 | u8 cc, status; |
33 | 33 | ||
@@ -61,7 +61,7 @@ static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status) | |||
61 | return cc; | 61 | return cc; |
62 | } | 62 | } |
63 | 63 | ||
64 | int s390pci_refresh_trans(u64 fn, u64 addr, u64 range) | 64 | int zpci_refresh_trans(u64 fn, u64 addr, u64 range) |
65 | { | 65 | { |
66 | u8 cc, status; | 66 | u8 cc, status; |
67 | 67 | ||
@@ -78,7 +78,7 @@ int s390pci_refresh_trans(u64 fn, u64 addr, u64 range) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | /* Set Interruption Controls */ | 80 | /* Set Interruption Controls */ |
81 | void set_irq_ctrl(u16 ctl, char *unused, u8 isc) | 81 | void zpci_set_irq_ctrl(u16 ctl, char *unused, u8 isc) |
82 | { | 82 | { |
83 | asm volatile ( | 83 | asm volatile ( |
84 | " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" | 84 | " .insn rsy,0xeb00000000d1,%[ctl],%[isc],%[u]\n" |
@@ -109,7 +109,7 @@ static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status) | |||
109 | return cc; | 109 | return cc; |
110 | } | 110 | } |
111 | 111 | ||
112 | int s390pci_load(u64 *data, u64 req, u64 offset) | 112 | int zpci_load(u64 *data, u64 req, u64 offset) |
113 | { | 113 | { |
114 | u8 status; | 114 | u8 status; |
115 | int cc; | 115 | int cc; |
@@ -125,7 +125,7 @@ int s390pci_load(u64 *data, u64 req, u64 offset) | |||
125 | __func__, cc, status, req, offset); | 125 | __func__, cc, status, req, offset); |
126 | return (cc > 0) ? -EIO : cc; | 126 | return (cc > 0) ? -EIO : cc; |
127 | } | 127 | } |
128 | EXPORT_SYMBOL_GPL(s390pci_load); | 128 | EXPORT_SYMBOL_GPL(zpci_load); |
129 | 129 | ||
130 | /* PCI Store */ | 130 | /* PCI Store */ |
131 | static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status) | 131 | static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status) |
@@ -147,7 +147,7 @@ static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status) | |||
147 | return cc; | 147 | return cc; |
148 | } | 148 | } |
149 | 149 | ||
150 | int s390pci_store(u64 data, u64 req, u64 offset) | 150 | int zpci_store(u64 data, u64 req, u64 offset) |
151 | { | 151 | { |
152 | u8 status; | 152 | u8 status; |
153 | int cc; | 153 | int cc; |
@@ -163,7 +163,7 @@ int s390pci_store(u64 data, u64 req, u64 offset) | |||
163 | __func__, cc, status, req, offset); | 163 | __func__, cc, status, req, offset); |
164 | return (cc > 0) ? -EIO : cc; | 164 | return (cc > 0) ? -EIO : cc; |
165 | } | 165 | } |
166 | EXPORT_SYMBOL_GPL(s390pci_store); | 166 | EXPORT_SYMBOL_GPL(zpci_store); |
167 | 167 | ||
168 | /* PCI Store Block */ | 168 | /* PCI Store Block */ |
169 | static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) | 169 | static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) |
@@ -183,7 +183,7 @@ static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) | |||
183 | return cc; | 183 | return cc; |
184 | } | 184 | } |
185 | 185 | ||
186 | int s390pci_store_block(const u64 *data, u64 req, u64 offset) | 186 | int zpci_store_block(const u64 *data, u64 req, u64 offset) |
187 | { | 187 | { |
188 | u8 status; | 188 | u8 status; |
189 | int cc; | 189 | int cc; |
@@ -199,4 +199,4 @@ int s390pci_store_block(const u64 *data, u64 req, u64 offset) | |||
199 | __func__, cc, status, req, offset); | 199 | __func__, cc, status, req, offset); |
200 | return (cc > 0) ? -EIO : cc; | 200 | return (cc > 0) ? -EIO : cc; |
201 | } | 201 | } |
202 | EXPORT_SYMBOL_GPL(s390pci_store_block); | 202 | EXPORT_SYMBOL_GPL(zpci_store_block); |
diff --git a/arch/s390/pci/pci_msi.c b/arch/s390/pci/pci_msi.c deleted file mode 100644 index b097aed05a9b..000000000000 --- a/arch/s390/pci/pci_msi.c +++ /dev/null | |||
@@ -1,142 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 2012 | ||
3 | * | ||
4 | * Author(s): | ||
5 | * Jan Glauber <jang@linux.vnet.ibm.com> | ||
6 | */ | ||
7 | |||
8 | #define COMPONENT "zPCI" | ||
9 | #define pr_fmt(fmt) COMPONENT ": " fmt | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/err.h> | ||
13 | #include <linux/rculist.h> | ||
14 | #include <linux/hash.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/msi.h> | ||
17 | #include <asm/hw_irq.h> | ||
18 | |||
19 | /* mapping of irq numbers to msi_desc */ | ||
20 | static struct hlist_head *msi_hash; | ||
21 | static const unsigned int msi_hash_bits = 8; | ||
22 | #define MSI_HASH_BUCKETS (1U << msi_hash_bits) | ||
23 | #define msi_hashfn(nr) hash_long(nr, msi_hash_bits) | ||
24 | |||
25 | static DEFINE_SPINLOCK(msi_map_lock); | ||
26 | |||
27 | struct msi_desc *__irq_get_msi_desc(unsigned int irq) | ||
28 | { | ||
29 | struct msi_map *map; | ||
30 | |||
31 | hlist_for_each_entry_rcu(map, | ||
32 | &msi_hash[msi_hashfn(irq)], msi_chain) | ||
33 | if (map->irq == irq) | ||
34 | return map->msi; | ||
35 | return NULL; | ||
36 | } | ||
37 | |||
38 | int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag) | ||
39 | { | ||
40 | if (msi->msi_attrib.is_msix) { | ||
41 | int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | ||
42 | PCI_MSIX_ENTRY_VECTOR_CTRL; | ||
43 | msi->masked = readl(msi->mask_base + offset); | ||
44 | writel(flag, msi->mask_base + offset); | ||
45 | } else { | ||
46 | if (msi->msi_attrib.maskbit) { | ||
47 | int pos; | ||
48 | u32 mask_bits; | ||
49 | |||
50 | pos = (long) msi->mask_base; | ||
51 | pci_read_config_dword(msi->dev, pos, &mask_bits); | ||
52 | mask_bits &= ~(mask); | ||
53 | mask_bits |= flag & mask; | ||
54 | pci_write_config_dword(msi->dev, pos, mask_bits); | ||
55 | } else { | ||
56 | return 0; | ||
57 | } | ||
58 | } | ||
59 | |||
60 | msi->msi_attrib.maskbit = !!flag; | ||
61 | return 1; | ||
62 | } | ||
63 | |||
64 | int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi, | ||
65 | unsigned int nr, int offset) | ||
66 | { | ||
67 | struct msi_map *map; | ||
68 | struct msi_msg msg; | ||
69 | int rc; | ||
70 | |||
71 | map = kmalloc(sizeof(*map), GFP_KERNEL); | ||
72 | if (map == NULL) | ||
73 | return -ENOMEM; | ||
74 | |||
75 | map->irq = nr; | ||
76 | map->msi = msi; | ||
77 | zdev->msi_map[nr & ZPCI_MSI_MASK] = map; | ||
78 | INIT_HLIST_NODE(&map->msi_chain); | ||
79 | |||
80 | pr_debug("%s hashing irq: %u to bucket nr: %llu\n", | ||
81 | __func__, nr, msi_hashfn(nr)); | ||
82 | hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]); | ||
83 | |||
84 | spin_lock(&msi_map_lock); | ||
85 | rc = irq_set_msi_desc(nr, msi); | ||
86 | if (rc) { | ||
87 | spin_unlock(&msi_map_lock); | ||
88 | hlist_del_rcu(&map->msi_chain); | ||
89 | kfree(map); | ||
90 | zdev->msi_map[nr & ZPCI_MSI_MASK] = NULL; | ||
91 | return rc; | ||
92 | } | ||
93 | spin_unlock(&msi_map_lock); | ||
94 | |||
95 | msg.data = nr - offset; | ||
96 | msg.address_lo = zdev->msi_addr & 0xffffffff; | ||
97 | msg.address_hi = zdev->msi_addr >> 32; | ||
98 | write_msi_msg(nr, &msg); | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi) | ||
103 | { | ||
104 | int irq = msi->irq & ZPCI_MSI_MASK; | ||
105 | struct msi_map *map; | ||
106 | |||
107 | msi->msg.address_lo = 0; | ||
108 | msi->msg.address_hi = 0; | ||
109 | msi->msg.data = 0; | ||
110 | msi->irq = 0; | ||
111 | zpci_msi_set_mask_bits(msi, 1, 1); | ||
112 | |||
113 | spin_lock(&msi_map_lock); | ||
114 | map = zdev->msi_map[irq]; | ||
115 | hlist_del_rcu(&map->msi_chain); | ||
116 | kfree(map); | ||
117 | zdev->msi_map[irq] = NULL; | ||
118 | spin_unlock(&msi_map_lock); | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * The msi hash table has 256 entries which is good for 4..20 | ||
123 | * devices (a typical device allocates 10 + CPUs MSI's). Maybe make | ||
124 | * the hash table size adjustable later. | ||
125 | */ | ||
126 | int __init zpci_msihash_init(void) | ||
127 | { | ||
128 | unsigned int i; | ||
129 | |||
130 | msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL); | ||
131 | if (!msi_hash) | ||
132 | return -ENOMEM; | ||
133 | |||
134 | for (i = 0; i < MSI_HASH_BUCKETS; i++) | ||
135 | INIT_HLIST_HEAD(&msi_hash[i]); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | void __init zpci_msihash_exit(void) | ||
140 | { | ||
141 | kfree(msi_hash); | ||
142 | } | ||
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c index e99a2557f186..cf8a12ff733b 100644 --- a/arch/s390/pci/pci_sysfs.c +++ b/arch/s390/pci/pci_sysfs.c | |||
@@ -48,11 +48,38 @@ static ssize_t show_pfgid(struct device *dev, struct device_attribute *attr, | |||
48 | } | 48 | } |
49 | static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL); | 49 | static DEVICE_ATTR(pfgid, S_IRUGO, show_pfgid, NULL); |
50 | 50 | ||
51 | static void recover_callback(struct device *dev) | ||
52 | { | ||
53 | struct pci_dev *pdev = to_pci_dev(dev); | ||
54 | struct zpci_dev *zdev = get_zdev(pdev); | ||
55 | int ret; | ||
56 | |||
57 | pci_stop_and_remove_bus_device(pdev); | ||
58 | ret = zpci_disable_device(zdev); | ||
59 | if (ret) | ||
60 | return; | ||
61 | |||
62 | ret = zpci_enable_device(zdev); | ||
63 | if (ret) | ||
64 | return; | ||
65 | |||
66 | pci_rescan_bus(zdev->bus); | ||
67 | } | ||
68 | |||
69 | static ssize_t store_recover(struct device *dev, struct device_attribute *attr, | ||
70 | const char *buf, size_t count) | ||
71 | { | ||
72 | int rc = device_schedule_callback(dev, recover_callback); | ||
73 | return rc ? rc : count; | ||
74 | } | ||
75 | static DEVICE_ATTR(recover, S_IWUSR, NULL, store_recover); | ||
76 | |||
51 | static struct device_attribute *zpci_dev_attrs[] = { | 77 | static struct device_attribute *zpci_dev_attrs[] = { |
52 | &dev_attr_function_id, | 78 | &dev_attr_function_id, |
53 | &dev_attr_function_handle, | 79 | &dev_attr_function_handle, |
54 | &dev_attr_pchid, | 80 | &dev_attr_pchid, |
55 | &dev_attr_pfgid, | 81 | &dev_attr_pfgid, |
82 | &dev_attr_recover, | ||
56 | NULL, | 83 | NULL, |
57 | }; | 84 | }; |
58 | 85 | ||
diff --git a/arch/score/Kconfig b/arch/score/Kconfig index c8def8bc9020..5fc237581caf 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig | |||
@@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT | |||
87 | 87 | ||
88 | source "init/Kconfig" | 88 | source "init/Kconfig" |
89 | 89 | ||
90 | source "kernel/Kconfig.freezer" | ||
91 | |||
90 | config MMU | 92 | config MMU |
91 | def_bool y | 93 | def_bool y |
92 | 94 | ||
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 1020dd85431a..1018ed3a3ca5 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -643,9 +643,9 @@ config KEXEC | |||
643 | 643 | ||
644 | It is an ongoing process to be certain the hardware in a machine | 644 | It is an ongoing process to be certain the hardware in a machine |
645 | is properly shutdown, so do not be surprised if this code does not | 645 | is properly shutdown, so do not be surprised if this code does not |
646 | initially work for you. It may help to enable device hotplugging | 646 | initially work for you. As of this writing the exact hardware |
647 | support. As of this writing the exact hardware interface is | 647 | interface is strongly in flux, so no good recommendation can be |
648 | strongly in flux, so no good recommendation can be made. | 648 | made. |
649 | 649 | ||
650 | config CRASH_DUMP | 650 | config CRASH_DUMP |
651 | bool "kernel crash dumps (EXPERIMENTAL)" | 651 | bool "kernel crash dumps (EXPERIMENTAL)" |
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c index 102f5d58b037..60ed3e1c4b75 100644 --- a/arch/sh/drivers/pci/pci.c +++ b/arch/sh/drivers/pci/pci.c | |||
@@ -69,7 +69,6 @@ static void pcibios_scanbus(struct pci_channel *hose) | |||
69 | 69 | ||
70 | pci_bus_size_bridges(bus); | 70 | pci_bus_size_bridges(bus); |
71 | pci_bus_assign_resources(bus); | 71 | pci_bus_assign_resources(bus); |
72 | pci_enable_bridges(bus); | ||
73 | } else { | 72 | } else { |
74 | pci_free_resource_list(&resources); | 73 | pci_free_resource_list(&resources); |
75 | } | 74 | } |
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index e61d43d9f689..362192ed12fe 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h | |||
@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) | |||
36 | } | 36 | } |
37 | 37 | ||
38 | static inline void | 38 | static inline void |
39 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 39 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
40 | { | 40 | { |
41 | tlb->mm = mm; | 41 | tlb->mm = mm; |
42 | tlb->fullmm = full_mm_flush; | 42 | tlb->start = start; |
43 | tlb->end = end; | ||
44 | tlb->fullmm = !(start | (end+1)); | ||
43 | 45 | ||
44 | init_tlb_gather(tlb); | 46 | init_tlb_gather(tlb); |
45 | } | 47 | } |
diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c index d30622592116..e3abfd4277e2 100644 --- a/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c | |||
@@ -91,13 +91,11 @@ static struct cpuidle_driver cpuidle_driver = { | |||
91 | 91 | ||
92 | int __init sh_mobile_setup_cpuidle(void) | 92 | int __init sh_mobile_setup_cpuidle(void) |
93 | { | 93 | { |
94 | int ret; | ||
95 | |||
96 | if (sh_mobile_sleep_supported & SUSP_SH_SF) | 94 | if (sh_mobile_sleep_supported & SUSP_SH_SF) |
97 | cpuidle_driver.states[1].disabled = false; | 95 | cpuidle_driver.states[1].disabled = false; |
98 | 96 | ||
99 | if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) | 97 | if (sh_mobile_sleep_supported & SUSP_SH_STANDBY) |
100 | cpuidle_driver.states[2].disabled = false; | 98 | cpuidle_driver.states[2].disabled = false; |
101 | 99 | ||
102 | return cpuidle_register(&cpuidle_driver); | 100 | return cpuidle_register(&cpuidle_driver, NULL); |
103 | } | 101 | } |
diff --git a/arch/tile/include/asm/topology.h b/arch/tile/include/asm/topology.h index d5e86c9f74fd..d15c0d8d550f 100644 --- a/arch/tile/include/asm/topology.h +++ b/arch/tile/include/asm/topology.h | |||
@@ -89,9 +89,6 @@ static inline const struct cpumask *cpumask_of_node(int node) | |||
89 | #define topology_core_id(cpu) (cpu) | 89 | #define topology_core_id(cpu) (cpu) |
90 | #define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask) | 90 | #define topology_core_cpumask(cpu) ((void)(cpu), cpu_online_mask) |
91 | #define topology_thread_cpumask(cpu) cpumask_of(cpu) | 91 | #define topology_thread_cpumask(cpu) cpumask_of(cpu) |
92 | |||
93 | /* indicates that pointers to the topology struct cpumask maps are valid */ | ||
94 | #define arch_provides_topology_pointers yes | ||
95 | #endif | 92 | #endif |
96 | 93 | ||
97 | #endif /* _ASM_TILE_TOPOLOGY_H */ | 94 | #endif /* _ASM_TILE_TOPOLOGY_H */ |
diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index 11425633b2d7..6640e7bbeaa2 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c | |||
@@ -508,13 +508,8 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller) | |||
508 | rc_dev_cap.word); | 508 | rc_dev_cap.word); |
509 | 509 | ||
510 | /* Configure PCI Express MPS setting. */ | 510 | /* Configure PCI Express MPS setting. */ |
511 | list_for_each_entry(child, &root_bus->children, node) { | 511 | list_for_each_entry(child, &root_bus->children, node) |
512 | struct pci_dev *self = child->self; | 512 | pcie_bus_configure_settings(child); |
513 | if (!self) | ||
514 | continue; | ||
515 | |||
516 | pcie_bus_configure_settings(child, self->pcie_mpss); | ||
517 | } | ||
518 | 513 | ||
519 | /* | 514 | /* |
520 | * Set the mac_config register in trio based on the MPS/MRS of the link. | 515 | * Set the mac_config register in trio based on the MPS/MRS of the link. |
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 4febacd1a8a1..29b0301c18aa 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h | |||
@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) | |||
45 | } | 45 | } |
46 | 46 | ||
47 | static inline void | 47 | static inline void |
48 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 48 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
49 | { | 49 | { |
50 | tlb->mm = mm; | 50 | tlb->mm = mm; |
51 | tlb->fullmm = full_mm_flush; | 51 | tlb->start = start; |
52 | tlb->end = end; | ||
53 | tlb->fullmm = !(start | (end+1)); | ||
52 | 54 | ||
53 | init_tlb_gather(tlb); | 55 | init_tlb_gather(tlb); |
54 | } | 56 | } |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index b32ebf92b0ce..f16fc34e6608 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -16,6 +16,7 @@ config X86_64 | |||
16 | def_bool y | 16 | def_bool y |
17 | depends on 64BIT | 17 | depends on 64BIT |
18 | select X86_DEV_DMA_OPS | 18 | select X86_DEV_DMA_OPS |
19 | select ARCH_USE_CMPXCHG_LOCKREF | ||
19 | 20 | ||
20 | ### Arch settings | 21 | ### Arch settings |
21 | config X86 | 22 | config X86 |
@@ -81,7 +82,6 @@ config X86 | |||
81 | select HAVE_USER_RETURN_NOTIFIER | 82 | select HAVE_USER_RETURN_NOTIFIER |
82 | select ARCH_BINFMT_ELF_RANDOMIZE_PIE | 83 | select ARCH_BINFMT_ELF_RANDOMIZE_PIE |
83 | select HAVE_ARCH_JUMP_LABEL | 84 | select HAVE_ARCH_JUMP_LABEL |
84 | select HAVE_TEXT_POKE_SMP | ||
85 | select HAVE_GENERIC_HARDIRQS | 85 | select HAVE_GENERIC_HARDIRQS |
86 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 86 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
87 | select SPARSE_IRQ | 87 | select SPARSE_IRQ |
@@ -1627,9 +1627,9 @@ config KEXEC | |||
1627 | 1627 | ||
1628 | It is an ongoing process to be certain the hardware in a machine | 1628 | It is an ongoing process to be certain the hardware in a machine |
1629 | is properly shutdown, so do not be surprised if this code does not | 1629 | is properly shutdown, so do not be surprised if this code does not |
1630 | initially work for you. It may help to enable device hotplugging | 1630 | initially work for you. As of this writing the exact hardware |
1631 | support. As of this writing the exact hardware interface is | 1631 | interface is strongly in flux, so no good recommendation can be |
1632 | strongly in flux, so no good recommendation can be made. | 1632 | made. |
1633 | 1633 | ||
1634 | config CRASH_DUMP | 1634 | config CRASH_DUMP |
1635 | bool "kernel crash dumps" | 1635 | bool "kernel crash dumps" |
@@ -2332,10 +2332,6 @@ config HAVE_ATOMIC_IOMAP | |||
2332 | def_bool y | 2332 | def_bool y |
2333 | depends on X86_32 | 2333 | depends on X86_32 |
2334 | 2334 | ||
2335 | config HAVE_TEXT_POKE_SMP | ||
2336 | bool | ||
2337 | select STOP_MACHINE if SMP | ||
2338 | |||
2339 | config X86_DEV_DMA_OPS | 2335 | config X86_DEV_DMA_OPS |
2340 | bool | 2336 | bool |
2341 | depends on X86_64 || STA2X11 | 2337 | depends on X86_64 || STA2X11 |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index d606463aa6d6..b7388a425f09 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -225,7 +225,7 @@ static void low_free(unsigned long size, unsigned long addr) | |||
225 | unsigned long nr_pages; | 225 | unsigned long nr_pages; |
226 | 226 | ||
227 | nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; | 227 | nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; |
228 | efi_call_phys2(sys_table->boottime->free_pages, addr, size); | 228 | efi_call_phys2(sys_table->boottime->free_pages, addr, nr_pages); |
229 | } | 229 | } |
230 | 230 | ||
231 | static void find_bits(unsigned long mask, u8 *pos, u8 *size) | 231 | static void find_bits(unsigned long mask, u8 *pos, u8 *size) |
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S index 474dc1b59f72..4299eb05023c 100644 --- a/arch/x86/ia32/ia32entry.S +++ b/arch/x86/ia32/ia32entry.S | |||
@@ -452,7 +452,7 @@ ia32_badsys: | |||
452 | 452 | ||
453 | CFI_ENDPROC | 453 | CFI_ENDPROC |
454 | 454 | ||
455 | .macro PTREGSCALL label, func, arg | 455 | .macro PTREGSCALL label, func |
456 | ALIGN | 456 | ALIGN |
457 | GLOBAL(\label) | 457 | GLOBAL(\label) |
458 | leaq \func(%rip),%rax | 458 | leaq \func(%rip),%rax |
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 58ed6d96a6ac..0a3f9c9f98d5 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/stringify.h> | 6 | #include <linux/stringify.h> |
7 | #include <asm/asm.h> | 7 | #include <asm/asm.h> |
8 | #include <asm/ptrace.h> | ||
8 | 9 | ||
9 | /* | 10 | /* |
10 | * Alternative inline assembly for SMP. | 11 | * Alternative inline assembly for SMP. |
@@ -220,20 +221,11 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len); | |||
220 | * no thread can be preempted in the instructions being modified (no iret to an | 221 | * no thread can be preempted in the instructions being modified (no iret to an |
221 | * invalid instruction possible) or if the instructions are changed from a | 222 | * invalid instruction possible) or if the instructions are changed from a |
222 | * consistent state to another consistent state atomically. | 223 | * consistent state to another consistent state atomically. |
223 | * More care must be taken when modifying code in the SMP case because of | ||
224 | * Intel's errata. text_poke_smp() takes care that errata, but still | ||
225 | * doesn't support NMI/MCE handler code modifying. | ||
226 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an | 224 | * On the local CPU you need to be protected again NMI or MCE handlers seeing an |
227 | * inconsistent instruction while you patch. | 225 | * inconsistent instruction while you patch. |
228 | */ | 226 | */ |
229 | struct text_poke_param { | ||
230 | void *addr; | ||
231 | const void *opcode; | ||
232 | size_t len; | ||
233 | }; | ||
234 | |||
235 | extern void *text_poke(void *addr, const void *opcode, size_t len); | 227 | extern void *text_poke(void *addr, const void *opcode, size_t len); |
236 | extern void *text_poke_smp(void *addr, const void *opcode, size_t len); | 228 | extern int poke_int3_handler(struct pt_regs *regs); |
237 | extern void text_poke_smp_batch(struct text_poke_param *params, int n); | 229 | extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler); |
238 | 230 | ||
239 | #endif /* _ASM_X86_ALTERNATIVE_H */ | 231 | #endif /* _ASM_X86_ALTERNATIVE_H */ |
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index f8119b582c3c..1d2091a226bc 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -715,4 +715,6 @@ static inline void exiting_ack_irq(void) | |||
715 | ack_APIC_irq(); | 715 | ack_APIC_irq(); |
716 | } | 716 | } |
717 | 717 | ||
718 | extern void ioapic_zap_locks(void); | ||
719 | |||
718 | #endif /* _ASM_X86_APIC_H */ | 720 | #endif /* _ASM_X86_APIC_H */ |
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 1c2d247f65ce..4582e8e1cd1a 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h | |||
@@ -3,21 +3,25 @@ | |||
3 | 3 | ||
4 | #ifdef __ASSEMBLY__ | 4 | #ifdef __ASSEMBLY__ |
5 | # define __ASM_FORM(x) x | 5 | # define __ASM_FORM(x) x |
6 | # define __ASM_FORM_RAW(x) x | ||
6 | # define __ASM_FORM_COMMA(x) x, | 7 | # define __ASM_FORM_COMMA(x) x, |
7 | #else | 8 | #else |
8 | # define __ASM_FORM(x) " " #x " " | 9 | # define __ASM_FORM(x) " " #x " " |
10 | # define __ASM_FORM_RAW(x) #x | ||
9 | # define __ASM_FORM_COMMA(x) " " #x "," | 11 | # define __ASM_FORM_COMMA(x) " " #x "," |
10 | #endif | 12 | #endif |
11 | 13 | ||
12 | #ifdef CONFIG_X86_32 | 14 | #ifdef CONFIG_X86_32 |
13 | # define __ASM_SEL(a,b) __ASM_FORM(a) | 15 | # define __ASM_SEL(a,b) __ASM_FORM(a) |
16 | # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(a) | ||
14 | #else | 17 | #else |
15 | # define __ASM_SEL(a,b) __ASM_FORM(b) | 18 | # define __ASM_SEL(a,b) __ASM_FORM(b) |
19 | # define __ASM_SEL_RAW(a,b) __ASM_FORM_RAW(b) | ||
16 | #endif | 20 | #endif |
17 | 21 | ||
18 | #define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \ | 22 | #define __ASM_SIZE(inst, ...) __ASM_SEL(inst##l##__VA_ARGS__, \ |
19 | inst##q##__VA_ARGS__) | 23 | inst##q##__VA_ARGS__) |
20 | #define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg) | 24 | #define __ASM_REG(reg) __ASM_SEL_RAW(e##reg, r##reg) |
21 | 25 | ||
22 | #define _ASM_PTR __ASM_SEL(.long, .quad) | 26 | #define _ASM_PTR __ASM_SEL(.long, .quad) |
23 | #define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) | 27 | #define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) |
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 6dfd0195bb55..41639ce8fd63 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h | |||
@@ -15,6 +15,14 @@ | |||
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
16 | #include <asm/alternative.h> | 16 | #include <asm/alternative.h> |
17 | 17 | ||
18 | #if BITS_PER_LONG == 32 | ||
19 | # define _BITOPS_LONG_SHIFT 5 | ||
20 | #elif BITS_PER_LONG == 64 | ||
21 | # define _BITOPS_LONG_SHIFT 6 | ||
22 | #else | ||
23 | # error "Unexpected BITS_PER_LONG" | ||
24 | #endif | ||
25 | |||
18 | #define BIT_64(n) (U64_C(1) << (n)) | 26 | #define BIT_64(n) (U64_C(1) << (n)) |
19 | 27 | ||
20 | /* | 28 | /* |
@@ -59,7 +67,7 @@ | |||
59 | * restricted to acting on a single-word quantity. | 67 | * restricted to acting on a single-word quantity. |
60 | */ | 68 | */ |
61 | static __always_inline void | 69 | static __always_inline void |
62 | set_bit(unsigned int nr, volatile unsigned long *addr) | 70 | set_bit(long nr, volatile unsigned long *addr) |
63 | { | 71 | { |
64 | if (IS_IMMEDIATE(nr)) { | 72 | if (IS_IMMEDIATE(nr)) { |
65 | asm volatile(LOCK_PREFIX "orb %1,%0" | 73 | asm volatile(LOCK_PREFIX "orb %1,%0" |
@@ -81,7 +89,7 @@ set_bit(unsigned int nr, volatile unsigned long *addr) | |||
81 | * If it's called on the same region of memory simultaneously, the effect | 89 | * If it's called on the same region of memory simultaneously, the effect |
82 | * may be that only one operation succeeds. | 90 | * may be that only one operation succeeds. |
83 | */ | 91 | */ |
84 | static inline void __set_bit(int nr, volatile unsigned long *addr) | 92 | static inline void __set_bit(long nr, volatile unsigned long *addr) |
85 | { | 93 | { |
86 | asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); | 94 | asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); |
87 | } | 95 | } |
@@ -97,7 +105,7 @@ static inline void __set_bit(int nr, volatile unsigned long *addr) | |||
97 | * in order to ensure changes are visible on other processors. | 105 | * in order to ensure changes are visible on other processors. |
98 | */ | 106 | */ |
99 | static __always_inline void | 107 | static __always_inline void |
100 | clear_bit(int nr, volatile unsigned long *addr) | 108 | clear_bit(long nr, volatile unsigned long *addr) |
101 | { | 109 | { |
102 | if (IS_IMMEDIATE(nr)) { | 110 | if (IS_IMMEDIATE(nr)) { |
103 | asm volatile(LOCK_PREFIX "andb %1,%0" | 111 | asm volatile(LOCK_PREFIX "andb %1,%0" |
@@ -118,13 +126,13 @@ clear_bit(int nr, volatile unsigned long *addr) | |||
118 | * clear_bit() is atomic and implies release semantics before the memory | 126 | * clear_bit() is atomic and implies release semantics before the memory |
119 | * operation. It can be used for an unlock. | 127 | * operation. It can be used for an unlock. |
120 | */ | 128 | */ |
121 | static inline void clear_bit_unlock(unsigned nr, volatile unsigned long *addr) | 129 | static inline void clear_bit_unlock(long nr, volatile unsigned long *addr) |
122 | { | 130 | { |
123 | barrier(); | 131 | barrier(); |
124 | clear_bit(nr, addr); | 132 | clear_bit(nr, addr); |
125 | } | 133 | } |
126 | 134 | ||
127 | static inline void __clear_bit(int nr, volatile unsigned long *addr) | 135 | static inline void __clear_bit(long nr, volatile unsigned long *addr) |
128 | { | 136 | { |
129 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); | 137 | asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); |
130 | } | 138 | } |
@@ -141,7 +149,7 @@ static inline void __clear_bit(int nr, volatile unsigned long *addr) | |||
141 | * No memory barrier is required here, because x86 cannot reorder stores past | 149 | * No memory barrier is required here, because x86 cannot reorder stores past |
142 | * older loads. Same principle as spin_unlock. | 150 | * older loads. Same principle as spin_unlock. |
143 | */ | 151 | */ |
144 | static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) | 152 | static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr) |
145 | { | 153 | { |
146 | barrier(); | 154 | barrier(); |
147 | __clear_bit(nr, addr); | 155 | __clear_bit(nr, addr); |
@@ -159,7 +167,7 @@ static inline void __clear_bit_unlock(unsigned nr, volatile unsigned long *addr) | |||
159 | * If it's called on the same region of memory simultaneously, the effect | 167 | * If it's called on the same region of memory simultaneously, the effect |
160 | * may be that only one operation succeeds. | 168 | * may be that only one operation succeeds. |
161 | */ | 169 | */ |
162 | static inline void __change_bit(int nr, volatile unsigned long *addr) | 170 | static inline void __change_bit(long nr, volatile unsigned long *addr) |
163 | { | 171 | { |
164 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); | 172 | asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); |
165 | } | 173 | } |
@@ -173,7 +181,7 @@ static inline void __change_bit(int nr, volatile unsigned long *addr) | |||
173 | * Note that @nr may be almost arbitrarily large; this function is not | 181 | * Note that @nr may be almost arbitrarily large; this function is not |
174 | * restricted to acting on a single-word quantity. | 182 | * restricted to acting on a single-word quantity. |
175 | */ | 183 | */ |
176 | static inline void change_bit(int nr, volatile unsigned long *addr) | 184 | static inline void change_bit(long nr, volatile unsigned long *addr) |
177 | { | 185 | { |
178 | if (IS_IMMEDIATE(nr)) { | 186 | if (IS_IMMEDIATE(nr)) { |
179 | asm volatile(LOCK_PREFIX "xorb %1,%0" | 187 | asm volatile(LOCK_PREFIX "xorb %1,%0" |
@@ -194,7 +202,7 @@ static inline void change_bit(int nr, volatile unsigned long *addr) | |||
194 | * This operation is atomic and cannot be reordered. | 202 | * This operation is atomic and cannot be reordered. |
195 | * It also implies a memory barrier. | 203 | * It also implies a memory barrier. |
196 | */ | 204 | */ |
197 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | 205 | static inline int test_and_set_bit(long nr, volatile unsigned long *addr) |
198 | { | 206 | { |
199 | int oldbit; | 207 | int oldbit; |
200 | 208 | ||
@@ -212,7 +220,7 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | |||
212 | * This is the same as test_and_set_bit on x86. | 220 | * This is the same as test_and_set_bit on x86. |
213 | */ | 221 | */ |
214 | static __always_inline int | 222 | static __always_inline int |
215 | test_and_set_bit_lock(int nr, volatile unsigned long *addr) | 223 | test_and_set_bit_lock(long nr, volatile unsigned long *addr) |
216 | { | 224 | { |
217 | return test_and_set_bit(nr, addr); | 225 | return test_and_set_bit(nr, addr); |
218 | } | 226 | } |
@@ -226,7 +234,7 @@ test_and_set_bit_lock(int nr, volatile unsigned long *addr) | |||
226 | * If two examples of this operation race, one can appear to succeed | 234 | * If two examples of this operation race, one can appear to succeed |
227 | * but actually fail. You must protect multiple accesses with a lock. | 235 | * but actually fail. You must protect multiple accesses with a lock. |
228 | */ | 236 | */ |
229 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | 237 | static inline int __test_and_set_bit(long nr, volatile unsigned long *addr) |
230 | { | 238 | { |
231 | int oldbit; | 239 | int oldbit; |
232 | 240 | ||
@@ -245,7 +253,7 @@ static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | |||
245 | * This operation is atomic and cannot be reordered. | 253 | * This operation is atomic and cannot be reordered. |
246 | * It also implies a memory barrier. | 254 | * It also implies a memory barrier. |
247 | */ | 255 | */ |
248 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | 256 | static inline int test_and_clear_bit(long nr, volatile unsigned long *addr) |
249 | { | 257 | { |
250 | int oldbit; | 258 | int oldbit; |
251 | 259 | ||
@@ -272,7 +280,7 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | |||
272 | * accessed from a hypervisor on the same CPU if running in a VM: don't change | 280 | * accessed from a hypervisor on the same CPU if running in a VM: don't change |
273 | * this without also updating arch/x86/kernel/kvm.c | 281 | * this without also updating arch/x86/kernel/kvm.c |
274 | */ | 282 | */ |
275 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | 283 | static inline int __test_and_clear_bit(long nr, volatile unsigned long *addr) |
276 | { | 284 | { |
277 | int oldbit; | 285 | int oldbit; |
278 | 286 | ||
@@ -284,7 +292,7 @@ static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | |||
284 | } | 292 | } |
285 | 293 | ||
286 | /* WARNING: non atomic and it can be reordered! */ | 294 | /* WARNING: non atomic and it can be reordered! */ |
287 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | 295 | static inline int __test_and_change_bit(long nr, volatile unsigned long *addr) |
288 | { | 296 | { |
289 | int oldbit; | 297 | int oldbit; |
290 | 298 | ||
@@ -304,7 +312,7 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | |||
304 | * This operation is atomic and cannot be reordered. | 312 | * This operation is atomic and cannot be reordered. |
305 | * It also implies a memory barrier. | 313 | * It also implies a memory barrier. |
306 | */ | 314 | */ |
307 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | 315 | static inline int test_and_change_bit(long nr, volatile unsigned long *addr) |
308 | { | 316 | { |
309 | int oldbit; | 317 | int oldbit; |
310 | 318 | ||
@@ -315,13 +323,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
315 | return oldbit; | 323 | return oldbit; |
316 | } | 324 | } |
317 | 325 | ||
318 | static __always_inline int constant_test_bit(unsigned int nr, const volatile unsigned long *addr) | 326 | static __always_inline int constant_test_bit(long nr, const volatile unsigned long *addr) |
319 | { | 327 | { |
320 | return ((1UL << (nr % BITS_PER_LONG)) & | 328 | return ((1UL << (nr & (BITS_PER_LONG-1))) & |
321 | (addr[nr / BITS_PER_LONG])) != 0; | 329 | (addr[nr >> _BITOPS_LONG_SHIFT])) != 0; |
322 | } | 330 | } |
323 | 331 | ||
324 | static inline int variable_test_bit(int nr, volatile const unsigned long *addr) | 332 | static inline int variable_test_bit(long nr, volatile const unsigned long *addr) |
325 | { | 333 | { |
326 | int oldbit; | 334 | int oldbit; |
327 | 335 | ||
diff --git a/arch/x86/include/asm/bootparam_utils.h b/arch/x86/include/asm/bootparam_utils.h index 653668d140f9..4a8cb8d7cbd5 100644 --- a/arch/x86/include/asm/bootparam_utils.h +++ b/arch/x86/include/asm/bootparam_utils.h | |||
@@ -35,9 +35,9 @@ static void sanitize_boot_params(struct boot_params *boot_params) | |||
35 | */ | 35 | */ |
36 | if (boot_params->sentinel) { | 36 | if (boot_params->sentinel) { |
37 | /* fields in boot_params are left uninitialized, clear them */ | 37 | /* fields in boot_params are left uninitialized, clear them */ |
38 | memset(&boot_params->olpc_ofw_header, 0, | 38 | memset(&boot_params->ext_ramdisk_image, 0, |
39 | (char *)&boot_params->efi_info - | 39 | (char *)&boot_params->efi_info - |
40 | (char *)&boot_params->olpc_ofw_header); | 40 | (char *)&boot_params->ext_ramdisk_image); |
41 | memset(&boot_params->kbd_status, 0, | 41 | memset(&boot_params->kbd_status, 0, |
42 | (char *)&boot_params->hdr - | 42 | (char *)&boot_params->hdr - |
43 | (char *)&boot_params->kbd_status); | 43 | (char *)&boot_params->kbd_status); |
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index 50e5c58ced23..4c019179a57d 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h | |||
@@ -59,7 +59,7 @@ static inline u16 find_equiv_id(struct equiv_cpu_entry *equiv_cpu_table, | |||
59 | 59 | ||
60 | extern int __apply_microcode_amd(struct microcode_amd *mc_amd); | 60 | extern int __apply_microcode_amd(struct microcode_amd *mc_amd); |
61 | extern int apply_microcode_amd(int cpu); | 61 | extern int apply_microcode_amd(int cpu); |
62 | extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size); | 62 | extern enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size); |
63 | 63 | ||
64 | #ifdef CONFIG_MICROCODE_AMD_EARLY | 64 | #ifdef CONFIG_MICROCODE_AMD_EARLY |
65 | #ifdef CONFIG_X86_32 | 65 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index cdbf36776106..be12c534fd59 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -45,22 +45,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
45 | /* Re-load page tables */ | 45 | /* Re-load page tables */ |
46 | load_cr3(next->pgd); | 46 | load_cr3(next->pgd); |
47 | 47 | ||
48 | /* stop flush ipis for the previous mm */ | 48 | /* Stop flush ipis for the previous mm */ |
49 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | 49 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
50 | 50 | ||
51 | /* | 51 | /* Load the LDT, if the LDT is different: */ |
52 | * load the LDT, if the LDT is different: | ||
53 | */ | ||
54 | if (unlikely(prev->context.ldt != next->context.ldt)) | 52 | if (unlikely(prev->context.ldt != next->context.ldt)) |
55 | load_LDT_nolock(&next->context); | 53 | load_LDT_nolock(&next->context); |
56 | } | 54 | } |
57 | #ifdef CONFIG_SMP | 55 | #ifdef CONFIG_SMP |
58 | else { | 56 | else { |
59 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); | 57 | this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK); |
60 | BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); | 58 | BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next); |
61 | 59 | ||
62 | if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next))) { | 60 | if (!cpumask_test_cpu(cpu, mm_cpumask(next))) { |
63 | /* We were in lazy tlb mode and leave_mm disabled | 61 | /* |
62 | * On established mms, the mm_cpumask is only changed | ||
63 | * from irq context, from ptep_clear_flush() while in | ||
64 | * lazy tlb mode, and here. Irqs are blocked during | ||
65 | * schedule, protecting us from simultaneous changes. | ||
66 | */ | ||
67 | cpumask_set_cpu(cpu, mm_cpumask(next)); | ||
68 | /* | ||
69 | * We were in lazy tlb mode and leave_mm disabled | ||
64 | * tlb flush IPI delivery. We must reload CR3 | 70 | * tlb flush IPI delivery. We must reload CR3 |
65 | * to make sure to use no freed page tables. | 71 | * to make sure to use no freed page tables. |
66 | */ | 72 | */ |
diff --git a/arch/x86/include/asm/mutex_64.h b/arch/x86/include/asm/mutex_64.h index 2c543fff241b..e7e6751648ed 100644 --- a/arch/x86/include/asm/mutex_64.h +++ b/arch/x86/include/asm/mutex_64.h | |||
@@ -16,6 +16,20 @@ | |||
16 | * | 16 | * |
17 | * Atomically decrements @v and calls <fail_fn> if the result is negative. | 17 | * Atomically decrements @v and calls <fail_fn> if the result is negative. |
18 | */ | 18 | */ |
19 | #ifdef CC_HAVE_ASM_GOTO | ||
20 | static inline void __mutex_fastpath_lock(atomic_t *v, | ||
21 | void (*fail_fn)(atomic_t *)) | ||
22 | { | ||
23 | asm volatile goto(LOCK_PREFIX " decl %0\n" | ||
24 | " jns %l[exit]\n" | ||
25 | : : "m" (v->counter) | ||
26 | : "memory", "cc" | ||
27 | : exit); | ||
28 | fail_fn(v); | ||
29 | exit: | ||
30 | return; | ||
31 | } | ||
32 | #else | ||
19 | #define __mutex_fastpath_lock(v, fail_fn) \ | 33 | #define __mutex_fastpath_lock(v, fail_fn) \ |
20 | do { \ | 34 | do { \ |
21 | unsigned long dummy; \ | 35 | unsigned long dummy; \ |
@@ -32,6 +46,7 @@ do { \ | |||
32 | : "rax", "rsi", "rdx", "rcx", \ | 46 | : "rax", "rsi", "rdx", "rcx", \ |
33 | "r8", "r9", "r10", "r11", "memory"); \ | 47 | "r8", "r9", "r10", "r11", "memory"); \ |
34 | } while (0) | 48 | } while (0) |
49 | #endif | ||
35 | 50 | ||
36 | /** | 51 | /** |
37 | * __mutex_fastpath_lock_retval - try to take the lock by moving the count | 52 | * __mutex_fastpath_lock_retval - try to take the lock by moving the count |
@@ -56,6 +71,20 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count) | |||
56 | * | 71 | * |
57 | * Atomically increments @v and calls <fail_fn> if the result is nonpositive. | 72 | * Atomically increments @v and calls <fail_fn> if the result is nonpositive. |
58 | */ | 73 | */ |
74 | #ifdef CC_HAVE_ASM_GOTO | ||
75 | static inline void __mutex_fastpath_unlock(atomic_t *v, | ||
76 | void (*fail_fn)(atomic_t *)) | ||
77 | { | ||
78 | asm volatile goto(LOCK_PREFIX " incl %0\n" | ||
79 | " jg %l[exit]\n" | ||
80 | : : "m" (v->counter) | ||
81 | : "memory", "cc" | ||
82 | : exit); | ||
83 | fail_fn(v); | ||
84 | exit: | ||
85 | return; | ||
86 | } | ||
87 | #else | ||
59 | #define __mutex_fastpath_unlock(v, fail_fn) \ | 88 | #define __mutex_fastpath_unlock(v, fail_fn) \ |
60 | do { \ | 89 | do { \ |
61 | unsigned long dummy; \ | 90 | unsigned long dummy; \ |
@@ -72,6 +101,7 @@ do { \ | |||
72 | : "rax", "rsi", "rdx", "rcx", \ | 101 | : "rax", "rsi", "rdx", "rcx", \ |
73 | "r8", "r9", "r10", "r11", "memory"); \ | 102 | "r8", "r9", "r10", "r11", "memory"); \ |
74 | } while (0) | 103 | } while (0) |
104 | #endif | ||
75 | 105 | ||
76 | #define __mutex_slowpath_needs_to_unlock() 1 | 106 | #define __mutex_slowpath_needs_to_unlock() 1 |
77 | 107 | ||
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index f2b489cf1602..3bf2dd0cf61f 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h | |||
@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
55 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) | 55 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
59 | |||
60 | /* | ||
61 | * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and | ||
62 | * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset | ||
63 | * into this range. | ||
64 | */ | ||
65 | #define PTE_FILE_MAX_BITS 28 | ||
66 | #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) | ||
67 | #define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1) | ||
68 | #define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1) | ||
69 | #define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1) | ||
70 | #define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) | ||
71 | #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) | ||
72 | #define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) | ||
73 | |||
74 | #define pte_to_pgoff(pte) \ | ||
75 | ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \ | ||
76 | & ((1U << PTE_FILE_BITS1) - 1))) \ | ||
77 | + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \ | ||
78 | & ((1U << PTE_FILE_BITS2) - 1)) \ | ||
79 | << (PTE_FILE_BITS1)) \ | ||
80 | + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \ | ||
81 | & ((1U << PTE_FILE_BITS3) - 1)) \ | ||
82 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | ||
83 | + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \ | ||
84 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)) | ||
85 | |||
86 | #define pgoff_to_pte(off) \ | ||
87 | ((pte_t) { .pte_low = \ | ||
88 | ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ | ||
89 | + ((((off) >> PTE_FILE_BITS1) \ | ||
90 | & ((1U << PTE_FILE_BITS2) - 1)) \ | ||
91 | << PTE_FILE_SHIFT2) \ | ||
92 | + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | ||
93 | & ((1U << PTE_FILE_BITS3) - 1)) \ | ||
94 | << PTE_FILE_SHIFT3) \ | ||
95 | + ((((off) >> \ | ||
96 | (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \ | ||
97 | << PTE_FILE_SHIFT4) \ | ||
98 | + _PAGE_FILE }) | ||
99 | |||
100 | #else /* CONFIG_MEM_SOFT_DIRTY */ | ||
101 | |||
58 | /* | 102 | /* |
59 | * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, | 103 | * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, |
60 | * split up the 29 bits of offset into this range: | 104 | * split up the 29 bits of offset into this range. |
61 | */ | 105 | */ |
62 | #define PTE_FILE_MAX_BITS 29 | 106 | #define PTE_FILE_MAX_BITS 29 |
63 | #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) | 107 | #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) |
@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
88 | << PTE_FILE_SHIFT3) \ | 132 | << PTE_FILE_SHIFT3) \ |
89 | + _PAGE_FILE }) | 133 | + _PAGE_FILE }) |
90 | 134 | ||
135 | #endif /* CONFIG_MEM_SOFT_DIRTY */ | ||
136 | |||
91 | /* Encode and de-code a swap entry */ | 137 | /* Encode and de-code a swap entry */ |
92 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE | 138 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE |
93 | #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) | 139 | #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 4cc9f2b7cdc3..81bb91b49a88 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) | |||
179 | /* | 179 | /* |
180 | * Bits 0, 6 and 7 are taken in the low part of the pte, | 180 | * Bits 0, 6 and 7 are taken in the low part of the pte, |
181 | * put the 32 bits of offset into the high part. | 181 | * put the 32 bits of offset into the high part. |
182 | * | ||
183 | * For soft-dirty tracking 11 bit is taken from | ||
184 | * the low part of pte as well. | ||
182 | */ | 185 | */ |
183 | #define pte_to_pgoff(pte) ((pte).pte_high) | 186 | #define pte_to_pgoff(pte) ((pte).pte_high) |
184 | #define pgoff_to_pte(off) \ | 187 | #define pgoff_to_pte(off) \ |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 4e4765908af5..8d16befdec88 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -315,6 +315,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) | |||
315 | return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); | 315 | return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); |
316 | } | 316 | } |
317 | 317 | ||
318 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) | ||
319 | { | ||
320 | return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); | ||
321 | } | ||
322 | |||
323 | static inline int pte_swp_soft_dirty(pte_t pte) | ||
324 | { | ||
325 | return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; | ||
326 | } | ||
327 | |||
328 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) | ||
329 | { | ||
330 | return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); | ||
331 | } | ||
332 | |||
333 | static inline pte_t pte_file_clear_soft_dirty(pte_t pte) | ||
334 | { | ||
335 | return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); | ||
336 | } | ||
337 | |||
338 | static inline pte_t pte_file_mksoft_dirty(pte_t pte) | ||
339 | { | ||
340 | return pte_set_flags(pte, _PAGE_SOFT_DIRTY); | ||
341 | } | ||
342 | |||
343 | static inline int pte_file_soft_dirty(pte_t pte) | ||
344 | { | ||
345 | return pte_flags(pte) & _PAGE_SOFT_DIRTY; | ||
346 | } | ||
347 | |||
318 | /* | 348 | /* |
319 | * Mask out unsupported bits in a present pgprot. Non-present pgprots | 349 | * Mask out unsupported bits in a present pgprot. Non-present pgprots |
320 | * can use those bits for other purposes, so leave them be. | 350 | * can use those bits for other purposes, so leave them be. |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index c98ac63aae48..f4843e031131 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -61,12 +61,27 @@ | |||
61 | * they do not conflict with each other. | 61 | * they do not conflict with each other. |
62 | */ | 62 | */ |
63 | 63 | ||
64 | #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN | ||
65 | |||
64 | #ifdef CONFIG_MEM_SOFT_DIRTY | 66 | #ifdef CONFIG_MEM_SOFT_DIRTY |
65 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) | 67 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) |
66 | #else | 68 | #else |
67 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) | 69 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) |
68 | #endif | 70 | #endif |
69 | 71 | ||
72 | /* | ||
73 | * Tracking soft dirty bit when a page goes to a swap is tricky. | ||
74 | * We need a bit which can be stored in pte _and_ not conflict | ||
75 | * with swap entry format. On x86 bits 6 and 7 are *not* involved | ||
76 | * into swap entry computation, but bit 6 is used for nonlinear | ||
77 | * file mapping, so we borrow bit 7 for soft dirty tracking. | ||
78 | */ | ||
79 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
80 | #define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE | ||
81 | #else | ||
82 | #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) | ||
83 | #endif | ||
84 | |||
70 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | 85 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
71 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) | 86 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) |
72 | #else | 87 | #else |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 573c1ad4994e..4c2d31d941ea 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -942,35 +942,6 @@ extern int set_tsc_mode(unsigned int val); | |||
942 | 942 | ||
943 | extern u16 amd_get_nb_id(int cpu); | 943 | extern u16 amd_get_nb_id(int cpu); |
944 | 944 | ||
945 | struct aperfmperf { | ||
946 | u64 aperf, mperf; | ||
947 | }; | ||
948 | |||
949 | static inline void get_aperfmperf(struct aperfmperf *am) | ||
950 | { | ||
951 | WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_APERFMPERF)); | ||
952 | |||
953 | rdmsrl(MSR_IA32_APERF, am->aperf); | ||
954 | rdmsrl(MSR_IA32_MPERF, am->mperf); | ||
955 | } | ||
956 | |||
957 | #define APERFMPERF_SHIFT 10 | ||
958 | |||
959 | static inline | ||
960 | unsigned long calc_aperfmperf_ratio(struct aperfmperf *old, | ||
961 | struct aperfmperf *new) | ||
962 | { | ||
963 | u64 aperf = new->aperf - old->aperf; | ||
964 | u64 mperf = new->mperf - old->mperf; | ||
965 | unsigned long ratio = aperf; | ||
966 | |||
967 | mperf >>= APERFMPERF_SHIFT; | ||
968 | if (mperf) | ||
969 | ratio = div64_u64(aperf, mperf); | ||
970 | |||
971 | return ratio; | ||
972 | } | ||
973 | |||
974 | extern unsigned long arch_align_stack(unsigned long sp); | 945 | extern unsigned long arch_align_stack(unsigned long sp); |
975 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); | 946 | extern void free_init_pages(char *what, unsigned long begin, unsigned long end); |
976 | 947 | ||
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 33692eaabab5..e0e668422c75 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -34,6 +34,11 @@ | |||
34 | # define UNLOCK_LOCK_PREFIX | 34 | # define UNLOCK_LOCK_PREFIX |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) | ||
38 | { | ||
39 | return lock.tickets.head == lock.tickets.tail; | ||
40 | } | ||
41 | |||
37 | /* | 42 | /* |
38 | * Ticket locks are conceptually two parts, one indicating the current head of | 43 | * Ticket locks are conceptually two parts, one indicating the current head of |
39 | * the queue, and the other indicating the current tail. The lock is acquired | 44 | * the queue, and the other indicating the current tail. The lock is acquired |
@@ -233,8 +238,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
233 | #define arch_read_relax(lock) cpu_relax() | 238 | #define arch_read_relax(lock) cpu_relax() |
234 | #define arch_write_relax(lock) cpu_relax() | 239 | #define arch_write_relax(lock) cpu_relax() |
235 | 240 | ||
236 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ | ||
237 | static inline void smp_mb__after_lock(void) { } | ||
238 | #define ARCH_HAS_SMP_MB_AFTER_LOCK | ||
239 | |||
240 | #endif /* _ASM_X86_SPINLOCK_H */ | 241 | #endif /* _ASM_X86_SPINLOCK_H */ |
diff --git a/arch/x86/include/asm/sync_bitops.h b/arch/x86/include/asm/sync_bitops.h index 9d09b4073b60..05af3b31d522 100644 --- a/arch/x86/include/asm/sync_bitops.h +++ b/arch/x86/include/asm/sync_bitops.h | |||
@@ -26,9 +26,9 @@ | |||
26 | * Note that @nr may be almost arbitrarily large; this function is not | 26 | * Note that @nr may be almost arbitrarily large; this function is not |
27 | * restricted to acting on a single-word quantity. | 27 | * restricted to acting on a single-word quantity. |
28 | */ | 28 | */ |
29 | static inline void sync_set_bit(int nr, volatile unsigned long *addr) | 29 | static inline void sync_set_bit(long nr, volatile unsigned long *addr) |
30 | { | 30 | { |
31 | asm volatile("lock; btsl %1,%0" | 31 | asm volatile("lock; bts %1,%0" |
32 | : "+m" (ADDR) | 32 | : "+m" (ADDR) |
33 | : "Ir" (nr) | 33 | : "Ir" (nr) |
34 | : "memory"); | 34 | : "memory"); |
@@ -44,9 +44,9 @@ static inline void sync_set_bit(int nr, volatile unsigned long *addr) | |||
44 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | 44 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() |
45 | * in order to ensure changes are visible on other processors. | 45 | * in order to ensure changes are visible on other processors. |
46 | */ | 46 | */ |
47 | static inline void sync_clear_bit(int nr, volatile unsigned long *addr) | 47 | static inline void sync_clear_bit(long nr, volatile unsigned long *addr) |
48 | { | 48 | { |
49 | asm volatile("lock; btrl %1,%0" | 49 | asm volatile("lock; btr %1,%0" |
50 | : "+m" (ADDR) | 50 | : "+m" (ADDR) |
51 | : "Ir" (nr) | 51 | : "Ir" (nr) |
52 | : "memory"); | 52 | : "memory"); |
@@ -61,9 +61,9 @@ static inline void sync_clear_bit(int nr, volatile unsigned long *addr) | |||
61 | * Note that @nr may be almost arbitrarily large; this function is not | 61 | * Note that @nr may be almost arbitrarily large; this function is not |
62 | * restricted to acting on a single-word quantity. | 62 | * restricted to acting on a single-word quantity. |
63 | */ | 63 | */ |
64 | static inline void sync_change_bit(int nr, volatile unsigned long *addr) | 64 | static inline void sync_change_bit(long nr, volatile unsigned long *addr) |
65 | { | 65 | { |
66 | asm volatile("lock; btcl %1,%0" | 66 | asm volatile("lock; btc %1,%0" |
67 | : "+m" (ADDR) | 67 | : "+m" (ADDR) |
68 | : "Ir" (nr) | 68 | : "Ir" (nr) |
69 | : "memory"); | 69 | : "memory"); |
@@ -77,11 +77,11 @@ static inline void sync_change_bit(int nr, volatile unsigned long *addr) | |||
77 | * This operation is atomic and cannot be reordered. | 77 | * This operation is atomic and cannot be reordered. |
78 | * It also implies a memory barrier. | 78 | * It also implies a memory barrier. |
79 | */ | 79 | */ |
80 | static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr) | 80 | static inline int sync_test_and_set_bit(long nr, volatile unsigned long *addr) |
81 | { | 81 | { |
82 | int oldbit; | 82 | int oldbit; |
83 | 83 | ||
84 | asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0" | 84 | asm volatile("lock; bts %2,%1\n\tsbbl %0,%0" |
85 | : "=r" (oldbit), "+m" (ADDR) | 85 | : "=r" (oldbit), "+m" (ADDR) |
86 | : "Ir" (nr) : "memory"); | 86 | : "Ir" (nr) : "memory"); |
87 | return oldbit; | 87 | return oldbit; |
@@ -95,11 +95,11 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr) | |||
95 | * This operation is atomic and cannot be reordered. | 95 | * This operation is atomic and cannot be reordered. |
96 | * It also implies a memory barrier. | 96 | * It also implies a memory barrier. |
97 | */ | 97 | */ |
98 | static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr) | 98 | static inline int sync_test_and_clear_bit(long nr, volatile unsigned long *addr) |
99 | { | 99 | { |
100 | int oldbit; | 100 | int oldbit; |
101 | 101 | ||
102 | asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0" | 102 | asm volatile("lock; btr %2,%1\n\tsbbl %0,%0" |
103 | : "=r" (oldbit), "+m" (ADDR) | 103 | : "=r" (oldbit), "+m" (ADDR) |
104 | : "Ir" (nr) : "memory"); | 104 | : "Ir" (nr) : "memory"); |
105 | return oldbit; | 105 | return oldbit; |
@@ -113,11 +113,11 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr) | |||
113 | * This operation is atomic and cannot be reordered. | 113 | * This operation is atomic and cannot be reordered. |
114 | * It also implies a memory barrier. | 114 | * It also implies a memory barrier. |
115 | */ | 115 | */ |
116 | static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr) | 116 | static inline int sync_test_and_change_bit(long nr, volatile unsigned long *addr) |
117 | { | 117 | { |
118 | int oldbit; | 118 | int oldbit; |
119 | 119 | ||
120 | asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0" | 120 | asm volatile("lock; btc %2,%1\n\tsbbl %0,%0" |
121 | : "=r" (oldbit), "+m" (ADDR) | 121 | : "=r" (oldbit), "+m" (ADDR) |
122 | : "Ir" (nr) : "memory"); | 122 | : "Ir" (nr) : "memory"); |
123 | return oldbit; | 123 | return oldbit; |
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h index 095b21507b6a..d35f24e231cd 100644 --- a/arch/x86/include/asm/topology.h +++ b/arch/x86/include/asm/topology.h | |||
@@ -124,9 +124,6 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu); | |||
124 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) | 124 | #define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id) |
125 | #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) | 125 | #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu)) |
126 | #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) | 126 | #define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu)) |
127 | |||
128 | /* indicates that pointers to the topology cpumask_t maps are valid */ | ||
129 | #define arch_provides_topology_pointers yes | ||
130 | #endif | 127 | #endif |
131 | 128 | ||
132 | static inline void arch_fix_phys_package_id(int num, u32 slot) | 129 | static inline void arch_fix_phys_package_id(int num, u32 slot) |
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index c91e8b9d588b..235be70d5bb4 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
@@ -49,6 +49,7 @@ extern void tsc_init(void); | |||
49 | extern void mark_tsc_unstable(char *reason); | 49 | extern void mark_tsc_unstable(char *reason); |
50 | extern int unsynchronized_tsc(void); | 50 | extern int unsynchronized_tsc(void); |
51 | extern int check_tsc_unstable(void); | 51 | extern int check_tsc_unstable(void); |
52 | extern int check_tsc_disabled(void); | ||
52 | extern unsigned long native_calibrate_tsc(void); | 53 | extern unsigned long native_calibrate_tsc(void); |
53 | 54 | ||
54 | extern int tsc_clocksource_reliable; | 55 | extern int tsc_clocksource_reliable; |
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 5ee26875baea..5838fa911aa0 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h | |||
@@ -153,16 +153,19 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) | |||
153 | * Careful: we have to cast the result to the type of the pointer | 153 | * Careful: we have to cast the result to the type of the pointer |
154 | * for sign reasons. | 154 | * for sign reasons. |
155 | * | 155 | * |
156 | * The use of %edx as the register specifier is a bit of a | 156 | * The use of _ASM_DX as the register specifier is a bit of a |
157 | * simplification, as gcc only cares about it as the starting point | 157 | * simplification, as gcc only cares about it as the starting point |
158 | * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits | 158 | * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits |
159 | * (%ecx being the next register in gcc's x86 register sequence), and | 159 | * (%ecx being the next register in gcc's x86 register sequence), and |
160 | * %rdx on 64 bits. | 160 | * %rdx on 64 bits. |
161 | * | ||
162 | * Clang/LLVM cares about the size of the register, but still wants | ||
163 | * the base register for something that ends up being a pair. | ||
161 | */ | 164 | */ |
162 | #define get_user(x, ptr) \ | 165 | #define get_user(x, ptr) \ |
163 | ({ \ | 166 | ({ \ |
164 | int __ret_gu; \ | 167 | int __ret_gu; \ |
165 | register __inttype(*(ptr)) __val_gu asm("%edx"); \ | 168 | register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \ |
166 | __chk_user_ptr(ptr); \ | 169 | __chk_user_ptr(ptr); \ |
167 | might_fault(); \ | 170 | might_fault(); \ |
168 | asm volatile("call __get_user_%P3" \ | 171 | asm volatile("call __get_user_%P3" \ |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 2627a81253ee..81aa73b8ecf5 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -199,7 +199,7 @@ static void acpi_register_lapic(int id, u8 enabled) | |||
199 | { | 199 | { |
200 | unsigned int ver = 0; | 200 | unsigned int ver = 0; |
201 | 201 | ||
202 | if (id >= (MAX_LOCAL_APIC-1)) { | 202 | if (id >= MAX_LOCAL_APIC) { |
203 | printk(KERN_INFO PREFIX "skipped apicid that is too big\n"); | 203 | printk(KERN_INFO PREFIX "skipped apicid that is too big\n"); |
204 | return; | 204 | return; |
205 | } | 205 | } |
@@ -1120,6 +1120,7 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) | |||
1120 | int ioapic; | 1120 | int ioapic; |
1121 | int ioapic_pin; | 1121 | int ioapic_pin; |
1122 | struct io_apic_irq_attr irq_attr; | 1122 | struct io_apic_irq_attr irq_attr; |
1123 | int ret; | ||
1123 | 1124 | ||
1124 | if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) | 1125 | if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC) |
1125 | return gsi; | 1126 | return gsi; |
@@ -1149,7 +1150,9 @@ int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity) | |||
1149 | set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin, | 1150 | set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin, |
1150 | trigger == ACPI_EDGE_SENSITIVE ? 0 : 1, | 1151 | trigger == ACPI_EDGE_SENSITIVE ? 0 : 1, |
1151 | polarity == ACPI_ACTIVE_HIGH ? 0 : 1); | 1152 | polarity == ACPI_ACTIVE_HIGH ? 0 : 1); |
1152 | io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr); | 1153 | ret = io_apic_set_pci_routing(dev, gsi_to_irq(gsi), &irq_attr); |
1154 | if (ret < 0) | ||
1155 | gsi = INT_MIN; | ||
1153 | 1156 | ||
1154 | return gsi; | 1157 | return gsi; |
1155 | } | 1158 | } |
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index c15cf9a25e27..15e8563e5c24 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/memory.h> | 11 | #include <linux/memory.h> |
12 | #include <linux/stop_machine.h> | 12 | #include <linux/stop_machine.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/kdebug.h> | ||
14 | #include <asm/alternative.h> | 15 | #include <asm/alternative.h> |
15 | #include <asm/sections.h> | 16 | #include <asm/sections.h> |
16 | #include <asm/pgtable.h> | 17 | #include <asm/pgtable.h> |
@@ -596,97 +597,93 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len) | |||
596 | return addr; | 597 | return addr; |
597 | } | 598 | } |
598 | 599 | ||
599 | /* | 600 | static void do_sync_core(void *info) |
600 | * Cross-modifying kernel text with stop_machine(). | 601 | { |
601 | * This code originally comes from immediate value. | 602 | sync_core(); |
602 | */ | 603 | } |
603 | static atomic_t stop_machine_first; | ||
604 | static int wrote_text; | ||
605 | 604 | ||
606 | struct text_poke_params { | 605 | static bool bp_patching_in_progress; |
607 | struct text_poke_param *params; | 606 | static void *bp_int3_handler, *bp_int3_addr; |
608 | int nparams; | ||
609 | }; | ||
610 | 607 | ||
611 | static int __kprobes stop_machine_text_poke(void *data) | 608 | int poke_int3_handler(struct pt_regs *regs) |
612 | { | 609 | { |
613 | struct text_poke_params *tpp = data; | 610 | /* bp_patching_in_progress */ |
614 | struct text_poke_param *p; | 611 | smp_rmb(); |
615 | int i; | ||
616 | 612 | ||
617 | if (atomic_xchg(&stop_machine_first, 0)) { | 613 | if (likely(!bp_patching_in_progress)) |
618 | for (i = 0; i < tpp->nparams; i++) { | 614 | return 0; |
619 | p = &tpp->params[i]; | ||
620 | text_poke(p->addr, p->opcode, p->len); | ||
621 | } | ||
622 | smp_wmb(); /* Make sure other cpus see that this has run */ | ||
623 | wrote_text = 1; | ||
624 | } else { | ||
625 | while (!wrote_text) | ||
626 | cpu_relax(); | ||
627 | smp_mb(); /* Load wrote_text before following execution */ | ||
628 | } | ||
629 | 615 | ||
630 | for (i = 0; i < tpp->nparams; i++) { | 616 | if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr) |
631 | p = &tpp->params[i]; | 617 | return 0; |
632 | flush_icache_range((unsigned long)p->addr, | 618 | |
633 | (unsigned long)p->addr + p->len); | 619 | /* set up the specified breakpoint handler */ |
634 | } | 620 | regs->ip = (unsigned long) bp_int3_handler; |
635 | /* | 621 | |
636 | * Intel Archiecture Software Developer's Manual section 7.1.3 specifies | 622 | return 1; |
637 | * that a core serializing instruction such as "cpuid" should be | ||
638 | * executed on _each_ core before the new instruction is made visible. | ||
639 | */ | ||
640 | sync_core(); | ||
641 | return 0; | ||
642 | } | ||
643 | 623 | ||
644 | /** | ||
645 | * text_poke_smp - Update instructions on a live kernel on SMP | ||
646 | * @addr: address to modify | ||
647 | * @opcode: source of the copy | ||
648 | * @len: length to copy | ||
649 | * | ||
650 | * Modify multi-byte instruction by using stop_machine() on SMP. This allows | ||
651 | * user to poke/set multi-byte text on SMP. Only non-NMI/MCE code modifying | ||
652 | * should be allowed, since stop_machine() does _not_ protect code against | ||
653 | * NMI and MCE. | ||
654 | * | ||
655 | * Note: Must be called under get_online_cpus() and text_mutex. | ||
656 | */ | ||
657 | void *__kprobes text_poke_smp(void *addr, const void *opcode, size_t len) | ||
658 | { | ||
659 | struct text_poke_params tpp; | ||
660 | struct text_poke_param p; | ||
661 | |||
662 | p.addr = addr; | ||
663 | p.opcode = opcode; | ||
664 | p.len = len; | ||
665 | tpp.params = &p; | ||
666 | tpp.nparams = 1; | ||
667 | atomic_set(&stop_machine_first, 1); | ||
668 | wrote_text = 0; | ||
669 | /* Use __stop_machine() because the caller already got online_cpus. */ | ||
670 | __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask); | ||
671 | return addr; | ||
672 | } | 624 | } |
673 | 625 | ||
674 | /** | 626 | /** |
675 | * text_poke_smp_batch - Update instructions on a live kernel on SMP | 627 | * text_poke_bp() -- update instructions on live kernel on SMP |
676 | * @params: an array of text_poke parameters | 628 | * @addr: address to patch |
677 | * @n: the number of elements in params. | 629 | * @opcode: opcode of new instruction |
630 | * @len: length to copy | ||
631 | * @handler: address to jump to when the temporary breakpoint is hit | ||
678 | * | 632 | * |
679 | * Modify multi-byte instruction by using stop_machine() on SMP. Since the | 633 | * Modify multi-byte instruction by using int3 breakpoint on SMP. |
680 | * stop_machine() is heavy task, it is better to aggregate text_poke requests | 634 | * We completely avoid stop_machine() here, and achieve the |
681 | * and do it once if possible. | 635 | * synchronization using int3 breakpoint. |
682 | * | 636 | * |
683 | * Note: Must be called under get_online_cpus() and text_mutex. | 637 | * The way it is done: |
638 | * - add a int3 trap to the address that will be patched | ||
639 | * - sync cores | ||
640 | * - update all but the first byte of the patched range | ||
641 | * - sync cores | ||
642 | * - replace the first byte (int3) by the first byte of | ||
643 | * replacing opcode | ||
644 | * - sync cores | ||
645 | * | ||
646 | * Note: must be called under text_mutex. | ||
684 | */ | 647 | */ |
685 | void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n) | 648 | void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler) |
686 | { | 649 | { |
687 | struct text_poke_params tpp = {.params = params, .nparams = n}; | 650 | unsigned char int3 = 0xcc; |
651 | |||
652 | bp_int3_handler = handler; | ||
653 | bp_int3_addr = (u8 *)addr + sizeof(int3); | ||
654 | bp_patching_in_progress = true; | ||
655 | /* | ||
656 | * Corresponding read barrier in int3 notifier for | ||
657 | * making sure the in_progress flags is correctly ordered wrt. | ||
658 | * patching | ||
659 | */ | ||
660 | smp_wmb(); | ||
661 | |||
662 | text_poke(addr, &int3, sizeof(int3)); | ||
688 | 663 | ||
689 | atomic_set(&stop_machine_first, 1); | 664 | on_each_cpu(do_sync_core, NULL, 1); |
690 | wrote_text = 0; | 665 | |
691 | __stop_machine(stop_machine_text_poke, (void *)&tpp, cpu_online_mask); | 666 | if (len - sizeof(int3) > 0) { |
667 | /* patch all but the first byte */ | ||
668 | text_poke((char *)addr + sizeof(int3), | ||
669 | (const char *) opcode + sizeof(int3), | ||
670 | len - sizeof(int3)); | ||
671 | /* | ||
672 | * According to Intel, this core syncing is very likely | ||
673 | * not necessary and we'd be safe even without it. But | ||
674 | * better safe than sorry (plus there's not only Intel). | ||
675 | */ | ||
676 | on_each_cpu(do_sync_core, NULL, 1); | ||
677 | } | ||
678 | |||
679 | /* patch the first byte */ | ||
680 | text_poke(addr, opcode, sizeof(int3)); | ||
681 | |||
682 | on_each_cpu(do_sync_core, NULL, 1); | ||
683 | |||
684 | bp_patching_in_progress = false; | ||
685 | smp_wmb(); | ||
686 | |||
687 | return addr; | ||
692 | } | 688 | } |
689 | |||
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 9ed796ccc32c..e63a5bd2a78f 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -1534,6 +1534,11 @@ void intel_ir_io_apic_print_entries(unsigned int apic, | |||
1534 | } | 1534 | } |
1535 | } | 1535 | } |
1536 | 1536 | ||
1537 | void ioapic_zap_locks(void) | ||
1538 | { | ||
1539 | raw_spin_lock_init(&ioapic_lock); | ||
1540 | } | ||
1541 | |||
1537 | __apicdebuginit(void) print_IO_APIC(int ioapic_idx) | 1542 | __apicdebuginit(void) print_IO_APIC(int ioapic_idx) |
1538 | { | 1543 | { |
1539 | union IO_APIC_reg_00 reg_00; | 1544 | union IO_APIC_reg_00 reg_00; |
@@ -3375,12 +3380,15 @@ int io_apic_setup_irq_pin_once(unsigned int irq, int node, | |||
3375 | { | 3380 | { |
3376 | unsigned int ioapic_idx = attr->ioapic, pin = attr->ioapic_pin; | 3381 | unsigned int ioapic_idx = attr->ioapic, pin = attr->ioapic_pin; |
3377 | int ret; | 3382 | int ret; |
3383 | struct IO_APIC_route_entry orig_entry; | ||
3378 | 3384 | ||
3379 | /* Avoid redundant programming */ | 3385 | /* Avoid redundant programming */ |
3380 | if (test_bit(pin, ioapics[ioapic_idx].pin_programmed)) { | 3386 | if (test_bit(pin, ioapics[ioapic_idx].pin_programmed)) { |
3381 | pr_debug("Pin %d-%d already programmed\n", | 3387 | pr_debug("Pin %d-%d already programmed\n", mpc_ioapic_id(ioapic_idx), pin); |
3382 | mpc_ioapic_id(ioapic_idx), pin); | 3388 | orig_entry = ioapic_read_entry(attr->ioapic, pin); |
3383 | return 0; | 3389 | if (attr->trigger == orig_entry.trigger && attr->polarity == orig_entry.polarity) |
3390 | return 0; | ||
3391 | return -EBUSY; | ||
3384 | } | 3392 | } |
3385 | ret = io_apic_setup_irq_pin(irq, node, attr); | 3393 | ret = io_apic_setup_irq_pin(irq, node, attr); |
3386 | if (!ret) | 3394 | if (!ret) |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 466e3d15de12..903a264af981 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -512,7 +512,7 @@ static void early_init_amd(struct cpuinfo_x86 *c) | |||
512 | 512 | ||
513 | static const int amd_erratum_383[]; | 513 | static const int amd_erratum_383[]; |
514 | static const int amd_erratum_400[]; | 514 | static const int amd_erratum_400[]; |
515 | static bool cpu_has_amd_erratum(const int *erratum); | 515 | static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); |
516 | 516 | ||
517 | static void init_amd(struct cpuinfo_x86 *c) | 517 | static void init_amd(struct cpuinfo_x86 *c) |
518 | { | 518 | { |
@@ -729,11 +729,11 @@ static void init_amd(struct cpuinfo_x86 *c) | |||
729 | value &= ~(1ULL << 24); | 729 | value &= ~(1ULL << 24); |
730 | wrmsrl_safe(MSR_AMD64_BU_CFG2, value); | 730 | wrmsrl_safe(MSR_AMD64_BU_CFG2, value); |
731 | 731 | ||
732 | if (cpu_has_amd_erratum(amd_erratum_383)) | 732 | if (cpu_has_amd_erratum(c, amd_erratum_383)) |
733 | set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); | 733 | set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); |
734 | } | 734 | } |
735 | 735 | ||
736 | if (cpu_has_amd_erratum(amd_erratum_400)) | 736 | if (cpu_has_amd_erratum(c, amd_erratum_400)) |
737 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); | 737 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); |
738 | 738 | ||
739 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); | 739 | rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy); |
@@ -878,23 +878,13 @@ static const int amd_erratum_400[] = | |||
878 | static const int amd_erratum_383[] = | 878 | static const int amd_erratum_383[] = |
879 | AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); | 879 | AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf)); |
880 | 880 | ||
881 | static bool cpu_has_amd_erratum(const int *erratum) | 881 | |
882 | static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) | ||
882 | { | 883 | { |
883 | struct cpuinfo_x86 *cpu = __this_cpu_ptr(&cpu_info); | ||
884 | int osvw_id = *erratum++; | 884 | int osvw_id = *erratum++; |
885 | u32 range; | 885 | u32 range; |
886 | u32 ms; | 886 | u32 ms; |
887 | 887 | ||
888 | /* | ||
889 | * If called early enough that current_cpu_data hasn't been initialized | ||
890 | * yet, fall back to boot_cpu_data. | ||
891 | */ | ||
892 | if (cpu->x86 == 0) | ||
893 | cpu = &boot_cpu_data; | ||
894 | |||
895 | if (cpu->x86_vendor != X86_VENDOR_AMD) | ||
896 | return false; | ||
897 | |||
898 | if (osvw_id >= 0 && osvw_id < 65536 && | 888 | if (osvw_id >= 0 && osvw_id < 65536 && |
899 | cpu_has(cpu, X86_FEATURE_OSVW)) { | 889 | cpu_has(cpu, X86_FEATURE_OSVW)) { |
900 | u64 osvw_len; | 890 | u64 osvw_len; |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index a7c7305030cc..8355c84b9729 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1884,6 +1884,7 @@ static struct pmu pmu = { | |||
1884 | void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) | 1884 | void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) |
1885 | { | 1885 | { |
1886 | userpg->cap_usr_time = 0; | 1886 | userpg->cap_usr_time = 0; |
1887 | userpg->cap_usr_time_zero = 0; | ||
1887 | userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc; | 1888 | userpg->cap_usr_rdpmc = x86_pmu.attr_rdpmc; |
1888 | userpg->pmc_width = x86_pmu.cntval_bits; | 1889 | userpg->pmc_width = x86_pmu.cntval_bits; |
1889 | 1890 | ||
@@ -1897,6 +1898,11 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now) | |||
1897 | userpg->time_mult = this_cpu_read(cyc2ns); | 1898 | userpg->time_mult = this_cpu_read(cyc2ns); |
1898 | userpg->time_shift = CYC2NS_SCALE_FACTOR; | 1899 | userpg->time_shift = CYC2NS_SCALE_FACTOR; |
1899 | userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; | 1900 | userpg->time_offset = this_cpu_read(cyc2ns_offset) - now; |
1901 | |||
1902 | if (sched_clock_stable && !check_tsc_disabled()) { | ||
1903 | userpg->cap_usr_time_zero = 1; | ||
1904 | userpg->time_zero = this_cpu_read(cyc2ns_offset); | ||
1905 | } | ||
1900 | } | 1906 | } |
1901 | 1907 | ||
1902 | /* | 1908 | /* |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 97e557bc4c91..cc16faae0538 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -641,6 +641,8 @@ extern struct event_constraint intel_core2_pebs_event_constraints[]; | |||
641 | 641 | ||
642 | extern struct event_constraint intel_atom_pebs_event_constraints[]; | 642 | extern struct event_constraint intel_atom_pebs_event_constraints[]; |
643 | 643 | ||
644 | extern struct event_constraint intel_slm_pebs_event_constraints[]; | ||
645 | |||
644 | extern struct event_constraint intel_nehalem_pebs_event_constraints[]; | 646 | extern struct event_constraint intel_nehalem_pebs_event_constraints[]; |
645 | 647 | ||
646 | extern struct event_constraint intel_westmere_pebs_event_constraints[]; | 648 | extern struct event_constraint intel_westmere_pebs_event_constraints[]; |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 4cbe03287b08..beeb7cc07044 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -347,8 +347,7 @@ static struct amd_nb *amd_alloc_nb(int cpu) | |||
347 | struct amd_nb *nb; | 347 | struct amd_nb *nb; |
348 | int i; | 348 | int i; |
349 | 349 | ||
350 | nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO, | 350 | nb = kzalloc_node(sizeof(struct amd_nb), GFP_KERNEL, cpu_to_node(cpu)); |
351 | cpu_to_node(cpu)); | ||
352 | if (!nb) | 351 | if (!nb) |
353 | return NULL; | 352 | return NULL; |
354 | 353 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index fbc9210b45bc..0abf6742a8b0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -81,7 +81,8 @@ static struct event_constraint intel_nehalem_event_constraints[] __read_mostly = | |||
81 | 81 | ||
82 | static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = | 82 | static struct extra_reg intel_nehalem_extra_regs[] __read_mostly = |
83 | { | 83 | { |
84 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), | 84 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ |
85 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), | ||
85 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), | 86 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), |
86 | EVENT_EXTRA_END | 87 | EVENT_EXTRA_END |
87 | }; | 88 | }; |
@@ -143,8 +144,9 @@ static struct event_constraint intel_ivb_event_constraints[] __read_mostly = | |||
143 | 144 | ||
144 | static struct extra_reg intel_westmere_extra_regs[] __read_mostly = | 145 | static struct extra_reg intel_westmere_extra_regs[] __read_mostly = |
145 | { | 146 | { |
146 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), | 147 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ |
147 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), | 148 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0xffff, RSP_0), |
149 | INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0xffff, RSP_1), | ||
148 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), | 150 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x100b), |
149 | EVENT_EXTRA_END | 151 | EVENT_EXTRA_END |
150 | }; | 152 | }; |
@@ -162,16 +164,27 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly = | |||
162 | EVENT_CONSTRAINT_END | 164 | EVENT_CONSTRAINT_END |
163 | }; | 165 | }; |
164 | 166 | ||
167 | static struct event_constraint intel_slm_event_constraints[] __read_mostly = | ||
168 | { | ||
169 | FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */ | ||
170 | FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */ | ||
171 | FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF */ | ||
172 | FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */ | ||
173 | EVENT_CONSTRAINT_END | ||
174 | }; | ||
175 | |||
165 | static struct extra_reg intel_snb_extra_regs[] __read_mostly = { | 176 | static struct extra_reg intel_snb_extra_regs[] __read_mostly = { |
166 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), | 177 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ |
167 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), | 178 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0), |
179 | INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1), | ||
168 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 180 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), |
169 | EVENT_EXTRA_END | 181 | EVENT_EXTRA_END |
170 | }; | 182 | }; |
171 | 183 | ||
172 | static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { | 184 | static struct extra_reg intel_snbep_extra_regs[] __read_mostly = { |
173 | INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), | 185 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ |
174 | INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), | 186 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0), |
187 | INTEL_UEVENT_EXTRA_REG(0x01bb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1), | ||
175 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), | 188 | INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd), |
176 | EVENT_EXTRA_END | 189 | EVENT_EXTRA_END |
177 | }; | 190 | }; |
@@ -882,6 +895,140 @@ static __initconst const u64 atom_hw_cache_event_ids | |||
882 | }, | 895 | }, |
883 | }; | 896 | }; |
884 | 897 | ||
898 | static struct extra_reg intel_slm_extra_regs[] __read_mostly = | ||
899 | { | ||
900 | /* must define OFFCORE_RSP_X first, see intel_fixup_er() */ | ||
901 | INTEL_UEVENT_EXTRA_REG(0x01b7, MSR_OFFCORE_RSP_0, 0x768005ffff, RSP_0), | ||
902 | INTEL_UEVENT_EXTRA_REG(0x02b7, MSR_OFFCORE_RSP_1, 0x768005ffff, RSP_1), | ||
903 | EVENT_EXTRA_END | ||
904 | }; | ||
905 | |||
906 | #define SLM_DMND_READ SNB_DMND_DATA_RD | ||
907 | #define SLM_DMND_WRITE SNB_DMND_RFO | ||
908 | #define SLM_DMND_PREFETCH (SNB_PF_DATA_RD|SNB_PF_RFO) | ||
909 | |||
910 | #define SLM_SNP_ANY (SNB_SNP_NONE|SNB_SNP_MISS|SNB_NO_FWD|SNB_HITM) | ||
911 | #define SLM_LLC_ACCESS SNB_RESP_ANY | ||
912 | #define SLM_LLC_MISS (SLM_SNP_ANY|SNB_NON_DRAM) | ||
913 | |||
914 | static __initconst const u64 slm_hw_cache_extra_regs | ||
915 | [PERF_COUNT_HW_CACHE_MAX] | ||
916 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
917 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
918 | { | ||
919 | [ C(LL ) ] = { | ||
920 | [ C(OP_READ) ] = { | ||
921 | [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS, | ||
922 | [ C(RESULT_MISS) ] = SLM_DMND_READ|SLM_LLC_MISS, | ||
923 | }, | ||
924 | [ C(OP_WRITE) ] = { | ||
925 | [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS, | ||
926 | [ C(RESULT_MISS) ] = SLM_DMND_WRITE|SLM_LLC_MISS, | ||
927 | }, | ||
928 | [ C(OP_PREFETCH) ] = { | ||
929 | [ C(RESULT_ACCESS) ] = SLM_DMND_PREFETCH|SLM_LLC_ACCESS, | ||
930 | [ C(RESULT_MISS) ] = SLM_DMND_PREFETCH|SLM_LLC_MISS, | ||
931 | }, | ||
932 | }, | ||
933 | }; | ||
934 | |||
935 | static __initconst const u64 slm_hw_cache_event_ids | ||
936 | [PERF_COUNT_HW_CACHE_MAX] | ||
937 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
938 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = | ||
939 | { | ||
940 | [ C(L1D) ] = { | ||
941 | [ C(OP_READ) ] = { | ||
942 | [ C(RESULT_ACCESS) ] = 0, | ||
943 | [ C(RESULT_MISS) ] = 0x0104, /* LD_DCU_MISS */ | ||
944 | }, | ||
945 | [ C(OP_WRITE) ] = { | ||
946 | [ C(RESULT_ACCESS) ] = 0, | ||
947 | [ C(RESULT_MISS) ] = 0, | ||
948 | }, | ||
949 | [ C(OP_PREFETCH) ] = { | ||
950 | [ C(RESULT_ACCESS) ] = 0, | ||
951 | [ C(RESULT_MISS) ] = 0, | ||
952 | }, | ||
953 | }, | ||
954 | [ C(L1I ) ] = { | ||
955 | [ C(OP_READ) ] = { | ||
956 | [ C(RESULT_ACCESS) ] = 0x0380, /* ICACHE.ACCESSES */ | ||
957 | [ C(RESULT_MISS) ] = 0x0280, /* ICACGE.MISSES */ | ||
958 | }, | ||
959 | [ C(OP_WRITE) ] = { | ||
960 | [ C(RESULT_ACCESS) ] = -1, | ||
961 | [ C(RESULT_MISS) ] = -1, | ||
962 | }, | ||
963 | [ C(OP_PREFETCH) ] = { | ||
964 | [ C(RESULT_ACCESS) ] = 0, | ||
965 | [ C(RESULT_MISS) ] = 0, | ||
966 | }, | ||
967 | }, | ||
968 | [ C(LL ) ] = { | ||
969 | [ C(OP_READ) ] = { | ||
970 | /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */ | ||
971 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
972 | /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */ | ||
973 | [ C(RESULT_MISS) ] = 0x01b7, | ||
974 | }, | ||
975 | [ C(OP_WRITE) ] = { | ||
976 | /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */ | ||
977 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
978 | /* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */ | ||
979 | [ C(RESULT_MISS) ] = 0x01b7, | ||
980 | }, | ||
981 | [ C(OP_PREFETCH) ] = { | ||
982 | /* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */ | ||
983 | [ C(RESULT_ACCESS) ] = 0x01b7, | ||
984 | /* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */ | ||
985 | [ C(RESULT_MISS) ] = 0x01b7, | ||
986 | }, | ||
987 | }, | ||
988 | [ C(DTLB) ] = { | ||
989 | [ C(OP_READ) ] = { | ||
990 | [ C(RESULT_ACCESS) ] = 0, | ||
991 | [ C(RESULT_MISS) ] = 0x0804, /* LD_DTLB_MISS */ | ||
992 | }, | ||
993 | [ C(OP_WRITE) ] = { | ||
994 | [ C(RESULT_ACCESS) ] = 0, | ||
995 | [ C(RESULT_MISS) ] = 0, | ||
996 | }, | ||
997 | [ C(OP_PREFETCH) ] = { | ||
998 | [ C(RESULT_ACCESS) ] = 0, | ||
999 | [ C(RESULT_MISS) ] = 0, | ||
1000 | }, | ||
1001 | }, | ||
1002 | [ C(ITLB) ] = { | ||
1003 | [ C(OP_READ) ] = { | ||
1004 | [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */ | ||
1005 | [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */ | ||
1006 | }, | ||
1007 | [ C(OP_WRITE) ] = { | ||
1008 | [ C(RESULT_ACCESS) ] = -1, | ||
1009 | [ C(RESULT_MISS) ] = -1, | ||
1010 | }, | ||
1011 | [ C(OP_PREFETCH) ] = { | ||
1012 | [ C(RESULT_ACCESS) ] = -1, | ||
1013 | [ C(RESULT_MISS) ] = -1, | ||
1014 | }, | ||
1015 | }, | ||
1016 | [ C(BPU ) ] = { | ||
1017 | [ C(OP_READ) ] = { | ||
1018 | [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */ | ||
1019 | [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */ | ||
1020 | }, | ||
1021 | [ C(OP_WRITE) ] = { | ||
1022 | [ C(RESULT_ACCESS) ] = -1, | ||
1023 | [ C(RESULT_MISS) ] = -1, | ||
1024 | }, | ||
1025 | [ C(OP_PREFETCH) ] = { | ||
1026 | [ C(RESULT_ACCESS) ] = -1, | ||
1027 | [ C(RESULT_MISS) ] = -1, | ||
1028 | }, | ||
1029 | }, | ||
1030 | }; | ||
1031 | |||
885 | static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) | 1032 | static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event) |
886 | { | 1033 | { |
887 | /* user explicitly requested branch sampling */ | 1034 | /* user explicitly requested branch sampling */ |
@@ -1301,11 +1448,11 @@ static void intel_fixup_er(struct perf_event *event, int idx) | |||
1301 | 1448 | ||
1302 | if (idx == EXTRA_REG_RSP_0) { | 1449 | if (idx == EXTRA_REG_RSP_0) { |
1303 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | 1450 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; |
1304 | event->hw.config |= 0x01b7; | 1451 | event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event; |
1305 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; | 1452 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; |
1306 | } else if (idx == EXTRA_REG_RSP_1) { | 1453 | } else if (idx == EXTRA_REG_RSP_1) { |
1307 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; | 1454 | event->hw.config &= ~INTEL_ARCH_EVENT_MASK; |
1308 | event->hw.config |= 0x01bb; | 1455 | event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event; |
1309 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; | 1456 | event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; |
1310 | } | 1457 | } |
1311 | } | 1458 | } |
@@ -2176,6 +2323,21 @@ __init int intel_pmu_init(void) | |||
2176 | pr_cont("Atom events, "); | 2323 | pr_cont("Atom events, "); |
2177 | break; | 2324 | break; |
2178 | 2325 | ||
2326 | case 55: /* Atom 22nm "Silvermont" */ | ||
2327 | memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, | ||
2328 | sizeof(hw_cache_event_ids)); | ||
2329 | memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs, | ||
2330 | sizeof(hw_cache_extra_regs)); | ||
2331 | |||
2332 | intel_pmu_lbr_init_atom(); | ||
2333 | |||
2334 | x86_pmu.event_constraints = intel_slm_event_constraints; | ||
2335 | x86_pmu.pebs_constraints = intel_slm_pebs_event_constraints; | ||
2336 | x86_pmu.extra_regs = intel_slm_extra_regs; | ||
2337 | x86_pmu.er_flags |= ERF_HAS_RSP_1; | ||
2338 | pr_cont("Silvermont events, "); | ||
2339 | break; | ||
2340 | |||
2179 | case 37: /* 32 nm nehalem, "Clarkdale" */ | 2341 | case 37: /* 32 nm nehalem, "Clarkdale" */ |
2180 | case 44: /* 32 nm nehalem, "Gulftown" */ | 2342 | case 44: /* 32 nm nehalem, "Gulftown" */ |
2181 | case 47: /* 32 nm Xeon E7 */ | 2343 | case 47: /* 32 nm Xeon E7 */ |
@@ -2270,6 +2432,7 @@ __init int intel_pmu_init(void) | |||
2270 | case 70: | 2432 | case 70: |
2271 | case 71: | 2433 | case 71: |
2272 | case 63: | 2434 | case 63: |
2435 | case 69: | ||
2273 | x86_pmu.late_ack = true; | 2436 | x86_pmu.late_ack = true; |
2274 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | 2437 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); |
2275 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | 2438 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 3065c57a63c1..63438aad177f 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -224,7 +224,7 @@ static int alloc_pebs_buffer(int cpu) | |||
224 | if (!x86_pmu.pebs) | 224 | if (!x86_pmu.pebs) |
225 | return 0; | 225 | return 0; |
226 | 226 | ||
227 | buffer = kmalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node); | 227 | buffer = kzalloc_node(PEBS_BUFFER_SIZE, GFP_KERNEL, node); |
228 | if (unlikely(!buffer)) | 228 | if (unlikely(!buffer)) |
229 | return -ENOMEM; | 229 | return -ENOMEM; |
230 | 230 | ||
@@ -262,7 +262,7 @@ static int alloc_bts_buffer(int cpu) | |||
262 | if (!x86_pmu.bts) | 262 | if (!x86_pmu.bts) |
263 | return 0; | 263 | return 0; |
264 | 264 | ||
265 | buffer = kmalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_ZERO, node); | 265 | buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node); |
266 | if (unlikely(!buffer)) | 266 | if (unlikely(!buffer)) |
267 | return -ENOMEM; | 267 | return -ENOMEM; |
268 | 268 | ||
@@ -295,7 +295,7 @@ static int alloc_ds_buffer(int cpu) | |||
295 | int node = cpu_to_node(cpu); | 295 | int node = cpu_to_node(cpu); |
296 | struct debug_store *ds; | 296 | struct debug_store *ds; |
297 | 297 | ||
298 | ds = kmalloc_node(sizeof(*ds), GFP_KERNEL | __GFP_ZERO, node); | 298 | ds = kzalloc_node(sizeof(*ds), GFP_KERNEL, node); |
299 | if (unlikely(!ds)) | 299 | if (unlikely(!ds)) |
300 | return -ENOMEM; | 300 | return -ENOMEM; |
301 | 301 | ||
@@ -517,6 +517,32 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { | |||
517 | EVENT_CONSTRAINT_END | 517 | EVENT_CONSTRAINT_END |
518 | }; | 518 | }; |
519 | 519 | ||
520 | struct event_constraint intel_slm_pebs_event_constraints[] = { | ||
521 | INTEL_UEVENT_CONSTRAINT(0x0103, 0x1), /* REHABQ.LD_BLOCK_ST_FORWARD_PS */ | ||
522 | INTEL_UEVENT_CONSTRAINT(0x0803, 0x1), /* REHABQ.LD_SPLITS_PS */ | ||
523 | INTEL_UEVENT_CONSTRAINT(0x0204, 0x1), /* MEM_UOPS_RETIRED.L2_HIT_LOADS_PS */ | ||
524 | INTEL_UEVENT_CONSTRAINT(0x0404, 0x1), /* MEM_UOPS_RETIRED.L2_MISS_LOADS_PS */ | ||
525 | INTEL_UEVENT_CONSTRAINT(0x0804, 0x1), /* MEM_UOPS_RETIRED.DTLB_MISS_LOADS_PS */ | ||
526 | INTEL_UEVENT_CONSTRAINT(0x2004, 0x1), /* MEM_UOPS_RETIRED.HITM_PS */ | ||
527 | INTEL_UEVENT_CONSTRAINT(0x00c0, 0x1), /* INST_RETIRED.ANY_PS */ | ||
528 | INTEL_UEVENT_CONSTRAINT(0x00c4, 0x1), /* BR_INST_RETIRED.ALL_BRANCHES_PS */ | ||
529 | INTEL_UEVENT_CONSTRAINT(0x7ec4, 0x1), /* BR_INST_RETIRED.JCC_PS */ | ||
530 | INTEL_UEVENT_CONSTRAINT(0xbfc4, 0x1), /* BR_INST_RETIRED.FAR_BRANCH_PS */ | ||
531 | INTEL_UEVENT_CONSTRAINT(0xebc4, 0x1), /* BR_INST_RETIRED.NON_RETURN_IND_PS */ | ||
532 | INTEL_UEVENT_CONSTRAINT(0xf7c4, 0x1), /* BR_INST_RETIRED.RETURN_PS */ | ||
533 | INTEL_UEVENT_CONSTRAINT(0xf9c4, 0x1), /* BR_INST_RETIRED.CALL_PS */ | ||
534 | INTEL_UEVENT_CONSTRAINT(0xfbc4, 0x1), /* BR_INST_RETIRED.IND_CALL_PS */ | ||
535 | INTEL_UEVENT_CONSTRAINT(0xfdc4, 0x1), /* BR_INST_RETIRED.REL_CALL_PS */ | ||
536 | INTEL_UEVENT_CONSTRAINT(0xfec4, 0x1), /* BR_INST_RETIRED.TAKEN_JCC_PS */ | ||
537 | INTEL_UEVENT_CONSTRAINT(0x00c5, 0x1), /* BR_INST_MISP_RETIRED.ALL_BRANCHES_PS */ | ||
538 | INTEL_UEVENT_CONSTRAINT(0x7ec5, 0x1), /* BR_INST_MISP_RETIRED.JCC_PS */ | ||
539 | INTEL_UEVENT_CONSTRAINT(0xebc5, 0x1), /* BR_INST_MISP_RETIRED.NON_RETURN_IND_PS */ | ||
540 | INTEL_UEVENT_CONSTRAINT(0xf7c5, 0x1), /* BR_INST_MISP_RETIRED.RETURN_PS */ | ||
541 | INTEL_UEVENT_CONSTRAINT(0xfbc5, 0x1), /* BR_INST_MISP_RETIRED.IND_CALL_PS */ | ||
542 | INTEL_UEVENT_CONSTRAINT(0xfec5, 0x1), /* BR_INST_MISP_RETIRED.TAKEN_JCC_PS */ | ||
543 | EVENT_CONSTRAINT_END | ||
544 | }; | ||
545 | |||
520 | struct event_constraint intel_nehalem_pebs_event_constraints[] = { | 546 | struct event_constraint intel_nehalem_pebs_event_constraints[] = { |
521 | INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ | 547 | INTEL_PLD_CONSTRAINT(0x100b, 0xf), /* MEM_INST_RETIRED.* */ |
522 | INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ | 548 | INTEL_EVENT_CONSTRAINT(0x0f, 0xf), /* MEM_UNCORE_RETIRED.* */ |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index cad791dbde95..fd8011ed4dcd 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -6,6 +6,8 @@ static struct intel_uncore_type **pci_uncores = empty_uncore; | |||
6 | /* pci bus to socket mapping */ | 6 | /* pci bus to socket mapping */ |
7 | static int pcibus_to_physid[256] = { [0 ... 255] = -1, }; | 7 | static int pcibus_to_physid[256] = { [0 ... 255] = -1, }; |
8 | 8 | ||
9 | static struct pci_dev *extra_pci_dev[UNCORE_SOCKET_MAX][UNCORE_EXTRA_PCI_DEV_MAX]; | ||
10 | |||
9 | static DEFINE_RAW_SPINLOCK(uncore_box_lock); | 11 | static DEFINE_RAW_SPINLOCK(uncore_box_lock); |
10 | 12 | ||
11 | /* mask of cpus that collect uncore events */ | 13 | /* mask of cpus that collect uncore events */ |
@@ -45,6 +47,24 @@ DEFINE_UNCORE_FORMAT_ATTR(filter_band0, filter_band0, "config1:0-7"); | |||
45 | DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); | 47 | DEFINE_UNCORE_FORMAT_ATTR(filter_band1, filter_band1, "config1:8-15"); |
46 | DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); | 48 | DEFINE_UNCORE_FORMAT_ATTR(filter_band2, filter_band2, "config1:16-23"); |
47 | DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); | 49 | DEFINE_UNCORE_FORMAT_ATTR(filter_band3, filter_band3, "config1:24-31"); |
50 | DEFINE_UNCORE_FORMAT_ATTR(match_rds, match_rds, "config1:48-51"); | ||
51 | DEFINE_UNCORE_FORMAT_ATTR(match_rnid30, match_rnid30, "config1:32-35"); | ||
52 | DEFINE_UNCORE_FORMAT_ATTR(match_rnid4, match_rnid4, "config1:31"); | ||
53 | DEFINE_UNCORE_FORMAT_ATTR(match_dnid, match_dnid, "config1:13-17"); | ||
54 | DEFINE_UNCORE_FORMAT_ATTR(match_mc, match_mc, "config1:9-12"); | ||
55 | DEFINE_UNCORE_FORMAT_ATTR(match_opc, match_opc, "config1:5-8"); | ||
56 | DEFINE_UNCORE_FORMAT_ATTR(match_vnw, match_vnw, "config1:3-4"); | ||
57 | DEFINE_UNCORE_FORMAT_ATTR(match0, match0, "config1:0-31"); | ||
58 | DEFINE_UNCORE_FORMAT_ATTR(match1, match1, "config1:32-63"); | ||
59 | DEFINE_UNCORE_FORMAT_ATTR(mask_rds, mask_rds, "config2:48-51"); | ||
60 | DEFINE_UNCORE_FORMAT_ATTR(mask_rnid30, mask_rnid30, "config2:32-35"); | ||
61 | DEFINE_UNCORE_FORMAT_ATTR(mask_rnid4, mask_rnid4, "config2:31"); | ||
62 | DEFINE_UNCORE_FORMAT_ATTR(mask_dnid, mask_dnid, "config2:13-17"); | ||
63 | DEFINE_UNCORE_FORMAT_ATTR(mask_mc, mask_mc, "config2:9-12"); | ||
64 | DEFINE_UNCORE_FORMAT_ATTR(mask_opc, mask_opc, "config2:5-8"); | ||
65 | DEFINE_UNCORE_FORMAT_ATTR(mask_vnw, mask_vnw, "config2:3-4"); | ||
66 | DEFINE_UNCORE_FORMAT_ATTR(mask0, mask0, "config2:0-31"); | ||
67 | DEFINE_UNCORE_FORMAT_ATTR(mask1, mask1, "config2:32-63"); | ||
48 | 68 | ||
49 | static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) | 69 | static u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) |
50 | { | 70 | { |
@@ -281,7 +301,7 @@ static struct attribute *snbep_uncore_cbox_formats_attr[] = { | |||
281 | }; | 301 | }; |
282 | 302 | ||
283 | static struct attribute *snbep_uncore_pcu_formats_attr[] = { | 303 | static struct attribute *snbep_uncore_pcu_formats_attr[] = { |
284 | &format_attr_event.attr, | 304 | &format_attr_event_ext.attr, |
285 | &format_attr_occ_sel.attr, | 305 | &format_attr_occ_sel.attr, |
286 | &format_attr_edge.attr, | 306 | &format_attr_edge.attr, |
287 | &format_attr_inv.attr, | 307 | &format_attr_inv.attr, |
@@ -301,6 +321,24 @@ static struct attribute *snbep_uncore_qpi_formats_attr[] = { | |||
301 | &format_attr_edge.attr, | 321 | &format_attr_edge.attr, |
302 | &format_attr_inv.attr, | 322 | &format_attr_inv.attr, |
303 | &format_attr_thresh8.attr, | 323 | &format_attr_thresh8.attr, |
324 | &format_attr_match_rds.attr, | ||
325 | &format_attr_match_rnid30.attr, | ||
326 | &format_attr_match_rnid4.attr, | ||
327 | &format_attr_match_dnid.attr, | ||
328 | &format_attr_match_mc.attr, | ||
329 | &format_attr_match_opc.attr, | ||
330 | &format_attr_match_vnw.attr, | ||
331 | &format_attr_match0.attr, | ||
332 | &format_attr_match1.attr, | ||
333 | &format_attr_mask_rds.attr, | ||
334 | &format_attr_mask_rnid30.attr, | ||
335 | &format_attr_mask_rnid4.attr, | ||
336 | &format_attr_mask_dnid.attr, | ||
337 | &format_attr_mask_mc.attr, | ||
338 | &format_attr_mask_opc.attr, | ||
339 | &format_attr_mask_vnw.attr, | ||
340 | &format_attr_mask0.attr, | ||
341 | &format_attr_mask1.attr, | ||
304 | NULL, | 342 | NULL, |
305 | }; | 343 | }; |
306 | 344 | ||
@@ -314,8 +352,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = { | |||
314 | static struct uncore_event_desc snbep_uncore_qpi_events[] = { | 352 | static struct uncore_event_desc snbep_uncore_qpi_events[] = { |
315 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), | 353 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), |
316 | INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), | 354 | INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), |
317 | INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"), | 355 | INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), |
318 | INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"), | 356 | INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), |
319 | { /* end: all zeroes */ }, | 357 | { /* end: all zeroes */ }, |
320 | }; | 358 | }; |
321 | 359 | ||
@@ -356,13 +394,16 @@ static struct intel_uncore_ops snbep_uncore_msr_ops = { | |||
356 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), | 394 | SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), |
357 | }; | 395 | }; |
358 | 396 | ||
397 | #define SNBEP_UNCORE_PCI_OPS_COMMON_INIT() \ | ||
398 | .init_box = snbep_uncore_pci_init_box, \ | ||
399 | .disable_box = snbep_uncore_pci_disable_box, \ | ||
400 | .enable_box = snbep_uncore_pci_enable_box, \ | ||
401 | .disable_event = snbep_uncore_pci_disable_event, \ | ||
402 | .read_counter = snbep_uncore_pci_read_counter | ||
403 | |||
359 | static struct intel_uncore_ops snbep_uncore_pci_ops = { | 404 | static struct intel_uncore_ops snbep_uncore_pci_ops = { |
360 | .init_box = snbep_uncore_pci_init_box, | 405 | SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), |
361 | .disable_box = snbep_uncore_pci_disable_box, | 406 | .enable_event = snbep_uncore_pci_enable_event, \ |
362 | .enable_box = snbep_uncore_pci_enable_box, | ||
363 | .disable_event = snbep_uncore_pci_disable_event, | ||
364 | .enable_event = snbep_uncore_pci_enable_event, | ||
365 | .read_counter = snbep_uncore_pci_read_counter, | ||
366 | }; | 407 | }; |
367 | 408 | ||
368 | static struct event_constraint snbep_uncore_cbox_constraints[] = { | 409 | static struct event_constraint snbep_uncore_cbox_constraints[] = { |
@@ -726,6 +767,61 @@ static struct intel_uncore_type *snbep_msr_uncores[] = { | |||
726 | NULL, | 767 | NULL, |
727 | }; | 768 | }; |
728 | 769 | ||
770 | enum { | ||
771 | SNBEP_PCI_QPI_PORT0_FILTER, | ||
772 | SNBEP_PCI_QPI_PORT1_FILTER, | ||
773 | }; | ||
774 | |||
775 | static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) | ||
776 | { | ||
777 | struct hw_perf_event *hwc = &event->hw; | ||
778 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
779 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
780 | |||
781 | if ((hwc->config & SNBEP_PMON_CTL_EV_SEL_MASK) == 0x38) { | ||
782 | reg1->idx = 0; | ||
783 | reg1->reg = SNBEP_Q_Py_PCI_PMON_PKT_MATCH0; | ||
784 | reg1->config = event->attr.config1; | ||
785 | reg2->reg = SNBEP_Q_Py_PCI_PMON_PKT_MASK0; | ||
786 | reg2->config = event->attr.config2; | ||
787 | } | ||
788 | return 0; | ||
789 | } | ||
790 | |||
791 | static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event) | ||
792 | { | ||
793 | struct pci_dev *pdev = box->pci_dev; | ||
794 | struct hw_perf_event *hwc = &event->hw; | ||
795 | struct hw_perf_event_extra *reg1 = &hwc->extra_reg; | ||
796 | struct hw_perf_event_extra *reg2 = &hwc->branch_reg; | ||
797 | |||
798 | if (reg1->idx != EXTRA_REG_NONE) { | ||
799 | int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER; | ||
800 | struct pci_dev *filter_pdev = extra_pci_dev[box->phys_id][idx]; | ||
801 | WARN_ON_ONCE(!filter_pdev); | ||
802 | if (filter_pdev) { | ||
803 | pci_write_config_dword(filter_pdev, reg1->reg, | ||
804 | (u32)reg1->config); | ||
805 | pci_write_config_dword(filter_pdev, reg1->reg + 4, | ||
806 | (u32)(reg1->config >> 32)); | ||
807 | pci_write_config_dword(filter_pdev, reg2->reg, | ||
808 | (u32)reg2->config); | ||
809 | pci_write_config_dword(filter_pdev, reg2->reg + 4, | ||
810 | (u32)(reg2->config >> 32)); | ||
811 | } | ||
812 | } | ||
813 | |||
814 | pci_write_config_dword(pdev, hwc->config_base, hwc->config | SNBEP_PMON_CTL_EN); | ||
815 | } | ||
816 | |||
817 | static struct intel_uncore_ops snbep_uncore_qpi_ops = { | ||
818 | SNBEP_UNCORE_PCI_OPS_COMMON_INIT(), | ||
819 | .enable_event = snbep_qpi_enable_event, | ||
820 | .hw_config = snbep_qpi_hw_config, | ||
821 | .get_constraint = uncore_get_constraint, | ||
822 | .put_constraint = uncore_put_constraint, | ||
823 | }; | ||
824 | |||
729 | #define SNBEP_UNCORE_PCI_COMMON_INIT() \ | 825 | #define SNBEP_UNCORE_PCI_COMMON_INIT() \ |
730 | .perf_ctr = SNBEP_PCI_PMON_CTR0, \ | 826 | .perf_ctr = SNBEP_PCI_PMON_CTR0, \ |
731 | .event_ctl = SNBEP_PCI_PMON_CTL0, \ | 827 | .event_ctl = SNBEP_PCI_PMON_CTL0, \ |
@@ -755,17 +851,18 @@ static struct intel_uncore_type snbep_uncore_imc = { | |||
755 | }; | 851 | }; |
756 | 852 | ||
757 | static struct intel_uncore_type snbep_uncore_qpi = { | 853 | static struct intel_uncore_type snbep_uncore_qpi = { |
758 | .name = "qpi", | 854 | .name = "qpi", |
759 | .num_counters = 4, | 855 | .num_counters = 4, |
760 | .num_boxes = 2, | 856 | .num_boxes = 2, |
761 | .perf_ctr_bits = 48, | 857 | .perf_ctr_bits = 48, |
762 | .perf_ctr = SNBEP_PCI_PMON_CTR0, | 858 | .perf_ctr = SNBEP_PCI_PMON_CTR0, |
763 | .event_ctl = SNBEP_PCI_PMON_CTL0, | 859 | .event_ctl = SNBEP_PCI_PMON_CTL0, |
764 | .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, | 860 | .event_mask = SNBEP_QPI_PCI_PMON_RAW_EVENT_MASK, |
765 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, | 861 | .box_ctl = SNBEP_PCI_PMON_BOX_CTL, |
766 | .ops = &snbep_uncore_pci_ops, | 862 | .num_shared_regs = 1, |
767 | .event_descs = snbep_uncore_qpi_events, | 863 | .ops = &snbep_uncore_qpi_ops, |
768 | .format_group = &snbep_uncore_qpi_format_group, | 864 | .event_descs = snbep_uncore_qpi_events, |
865 | .format_group = &snbep_uncore_qpi_format_group, | ||
769 | }; | 866 | }; |
770 | 867 | ||
771 | 868 | ||
@@ -807,43 +904,53 @@ static struct intel_uncore_type *snbep_pci_uncores[] = { | |||
807 | static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = { | 904 | static DEFINE_PCI_DEVICE_TABLE(snbep_uncore_pci_ids) = { |
808 | { /* Home Agent */ | 905 | { /* Home Agent */ |
809 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), | 906 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_HA), |
810 | .driver_data = SNBEP_PCI_UNCORE_HA, | 907 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_HA, 0), |
811 | }, | 908 | }, |
812 | { /* MC Channel 0 */ | 909 | { /* MC Channel 0 */ |
813 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), | 910 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC0), |
814 | .driver_data = SNBEP_PCI_UNCORE_IMC, | 911 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 0), |
815 | }, | 912 | }, |
816 | { /* MC Channel 1 */ | 913 | { /* MC Channel 1 */ |
817 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), | 914 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC1), |
818 | .driver_data = SNBEP_PCI_UNCORE_IMC, | 915 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 1), |
819 | }, | 916 | }, |
820 | { /* MC Channel 2 */ | 917 | { /* MC Channel 2 */ |
821 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), | 918 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC2), |
822 | .driver_data = SNBEP_PCI_UNCORE_IMC, | 919 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 2), |
823 | }, | 920 | }, |
824 | { /* MC Channel 3 */ | 921 | { /* MC Channel 3 */ |
825 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), | 922 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_IMC3), |
826 | .driver_data = SNBEP_PCI_UNCORE_IMC, | 923 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_IMC, 3), |
827 | }, | 924 | }, |
828 | { /* QPI Port 0 */ | 925 | { /* QPI Port 0 */ |
829 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), | 926 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI0), |
830 | .driver_data = SNBEP_PCI_UNCORE_QPI, | 927 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 0), |
831 | }, | 928 | }, |
832 | { /* QPI Port 1 */ | 929 | { /* QPI Port 1 */ |
833 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), | 930 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_QPI1), |
834 | .driver_data = SNBEP_PCI_UNCORE_QPI, | 931 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_QPI, 1), |
835 | }, | 932 | }, |
836 | { /* R2PCIe */ | 933 | { /* R2PCIe */ |
837 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), | 934 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R2PCIE), |
838 | .driver_data = SNBEP_PCI_UNCORE_R2PCIE, | 935 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R2PCIE, 0), |
839 | }, | 936 | }, |
840 | { /* R3QPI Link 0 */ | 937 | { /* R3QPI Link 0 */ |
841 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), | 938 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI0), |
842 | .driver_data = SNBEP_PCI_UNCORE_R3QPI, | 939 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 0), |
843 | }, | 940 | }, |
844 | { /* R3QPI Link 1 */ | 941 | { /* R3QPI Link 1 */ |
845 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), | 942 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_UNC_R3QPI1), |
846 | .driver_data = SNBEP_PCI_UNCORE_R3QPI, | 943 | .driver_data = UNCORE_PCI_DEV_DATA(SNBEP_PCI_UNCORE_R3QPI, 1), |
944 | }, | ||
945 | { /* QPI Port 0 filter */ | ||
946 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c86), | ||
947 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, | ||
948 | SNBEP_PCI_QPI_PORT0_FILTER), | ||
949 | }, | ||
950 | { /* QPI Port 0 filter */ | ||
951 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3c96), | ||
952 | .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, | ||
953 | SNBEP_PCI_QPI_PORT1_FILTER), | ||
847 | }, | 954 | }, |
848 | { /* end: all zeroes */ } | 955 | { /* end: all zeroes */ } |
849 | }; | 956 | }; |
@@ -1256,71 +1363,71 @@ static struct intel_uncore_type *ivt_pci_uncores[] = { | |||
1256 | static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = { | 1363 | static DEFINE_PCI_DEVICE_TABLE(ivt_uncore_pci_ids) = { |
1257 | { /* Home Agent 0 */ | 1364 | { /* Home Agent 0 */ |
1258 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30), | 1365 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe30), |
1259 | .driver_data = IVT_PCI_UNCORE_HA, | 1366 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 0), |
1260 | }, | 1367 | }, |
1261 | { /* Home Agent 1 */ | 1368 | { /* Home Agent 1 */ |
1262 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38), | 1369 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe38), |
1263 | .driver_data = IVT_PCI_UNCORE_HA, | 1370 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_HA, 1), |
1264 | }, | 1371 | }, |
1265 | { /* MC0 Channel 0 */ | 1372 | { /* MC0 Channel 0 */ |
1266 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4), | 1373 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb4), |
1267 | .driver_data = IVT_PCI_UNCORE_IMC, | 1374 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 0), |
1268 | }, | 1375 | }, |
1269 | { /* MC0 Channel 1 */ | 1376 | { /* MC0 Channel 1 */ |
1270 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5), | 1377 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb5), |
1271 | .driver_data = IVT_PCI_UNCORE_IMC, | 1378 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 1), |
1272 | }, | 1379 | }, |
1273 | { /* MC0 Channel 3 */ | 1380 | { /* MC0 Channel 3 */ |
1274 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0), | 1381 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb0), |
1275 | .driver_data = IVT_PCI_UNCORE_IMC, | 1382 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 2), |
1276 | }, | 1383 | }, |
1277 | { /* MC0 Channel 4 */ | 1384 | { /* MC0 Channel 4 */ |
1278 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1), | 1385 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xeb1), |
1279 | .driver_data = IVT_PCI_UNCORE_IMC, | 1386 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 3), |
1280 | }, | 1387 | }, |
1281 | { /* MC1 Channel 0 */ | 1388 | { /* MC1 Channel 0 */ |
1282 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4), | 1389 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef4), |
1283 | .driver_data = IVT_PCI_UNCORE_IMC, | 1390 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 4), |
1284 | }, | 1391 | }, |
1285 | { /* MC1 Channel 1 */ | 1392 | { /* MC1 Channel 1 */ |
1286 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5), | 1393 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef5), |
1287 | .driver_data = IVT_PCI_UNCORE_IMC, | 1394 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 5), |
1288 | }, | 1395 | }, |
1289 | { /* MC1 Channel 3 */ | 1396 | { /* MC1 Channel 3 */ |
1290 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0), | 1397 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef0), |
1291 | .driver_data = IVT_PCI_UNCORE_IMC, | 1398 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 6), |
1292 | }, | 1399 | }, |
1293 | { /* MC1 Channel 4 */ | 1400 | { /* MC1 Channel 4 */ |
1294 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), | 1401 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xef1), |
1295 | .driver_data = IVT_PCI_UNCORE_IMC, | 1402 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_IMC, 7), |
1296 | }, | 1403 | }, |
1297 | { /* QPI0 Port 0 */ | 1404 | { /* QPI0 Port 0 */ |
1298 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), | 1405 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe32), |
1299 | .driver_data = IVT_PCI_UNCORE_QPI, | 1406 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 0), |
1300 | }, | 1407 | }, |
1301 | { /* QPI0 Port 1 */ | 1408 | { /* QPI0 Port 1 */ |
1302 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33), | 1409 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe33), |
1303 | .driver_data = IVT_PCI_UNCORE_QPI, | 1410 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 1), |
1304 | }, | 1411 | }, |
1305 | { /* QPI1 Port 2 */ | 1412 | { /* QPI1 Port 2 */ |
1306 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a), | 1413 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3a), |
1307 | .driver_data = IVT_PCI_UNCORE_QPI, | 1414 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_QPI, 2), |
1308 | }, | 1415 | }, |
1309 | { /* R2PCIe */ | 1416 | { /* R2PCIe */ |
1310 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34), | 1417 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe34), |
1311 | .driver_data = IVT_PCI_UNCORE_R2PCIE, | 1418 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R2PCIE, 0), |
1312 | }, | 1419 | }, |
1313 | { /* R3QPI0 Link 0 */ | 1420 | { /* R3QPI0 Link 0 */ |
1314 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36), | 1421 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe36), |
1315 | .driver_data = IVT_PCI_UNCORE_R3QPI, | 1422 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 0), |
1316 | }, | 1423 | }, |
1317 | { /* R3QPI0 Link 1 */ | 1424 | { /* R3QPI0 Link 1 */ |
1318 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37), | 1425 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe37), |
1319 | .driver_data = IVT_PCI_UNCORE_R3QPI, | 1426 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 1), |
1320 | }, | 1427 | }, |
1321 | { /* R3QPI1 Link 2 */ | 1428 | { /* R3QPI1 Link 2 */ |
1322 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), | 1429 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0xe3e), |
1323 | .driver_data = IVT_PCI_UNCORE_R3QPI, | 1430 | .driver_data = UNCORE_PCI_DEV_DATA(IVT_PCI_UNCORE_R3QPI, 2), |
1324 | }, | 1431 | }, |
1325 | { /* end: all zeroes */ } | 1432 | { /* end: all zeroes */ } |
1326 | }; | 1433 | }; |
@@ -2606,7 +2713,7 @@ struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, int cp | |||
2606 | 2713 | ||
2607 | size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); | 2714 | size = sizeof(*box) + type->num_shared_regs * sizeof(struct intel_uncore_extra_reg); |
2608 | 2715 | ||
2609 | box = kmalloc_node(size, GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); | 2716 | box = kzalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); |
2610 | if (!box) | 2717 | if (!box) |
2611 | return NULL; | 2718 | return NULL; |
2612 | 2719 | ||
@@ -3167,16 +3274,24 @@ static bool pcidrv_registered; | |||
3167 | /* | 3274 | /* |
3168 | * add a pci uncore device | 3275 | * add a pci uncore device |
3169 | */ | 3276 | */ |
3170 | static int uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev) | 3277 | static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
3171 | { | 3278 | { |
3172 | struct intel_uncore_pmu *pmu; | 3279 | struct intel_uncore_pmu *pmu; |
3173 | struct intel_uncore_box *box; | 3280 | struct intel_uncore_box *box; |
3174 | int i, phys_id; | 3281 | struct intel_uncore_type *type; |
3282 | int phys_id; | ||
3175 | 3283 | ||
3176 | phys_id = pcibus_to_physid[pdev->bus->number]; | 3284 | phys_id = pcibus_to_physid[pdev->bus->number]; |
3177 | if (phys_id < 0) | 3285 | if (phys_id < 0) |
3178 | return -ENODEV; | 3286 | return -ENODEV; |
3179 | 3287 | ||
3288 | if (UNCORE_PCI_DEV_TYPE(id->driver_data) == UNCORE_EXTRA_PCI_DEV) { | ||
3289 | extra_pci_dev[phys_id][UNCORE_PCI_DEV_IDX(id->driver_data)] = pdev; | ||
3290 | pci_set_drvdata(pdev, NULL); | ||
3291 | return 0; | ||
3292 | } | ||
3293 | |||
3294 | type = pci_uncores[UNCORE_PCI_DEV_TYPE(id->driver_data)]; | ||
3180 | box = uncore_alloc_box(type, 0); | 3295 | box = uncore_alloc_box(type, 0); |
3181 | if (!box) | 3296 | if (!box) |
3182 | return -ENOMEM; | 3297 | return -ENOMEM; |
@@ -3185,21 +3300,11 @@ static int uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev) | |||
3185 | * for performance monitoring unit with multiple boxes, | 3300 | * for performance monitoring unit with multiple boxes, |
3186 | * each box has a different function id. | 3301 | * each box has a different function id. |
3187 | */ | 3302 | */ |
3188 | for (i = 0; i < type->num_boxes; i++) { | 3303 | pmu = &type->pmus[UNCORE_PCI_DEV_IDX(id->driver_data)]; |
3189 | pmu = &type->pmus[i]; | 3304 | if (pmu->func_id < 0) |
3190 | if (pmu->func_id == pdev->devfn) | 3305 | pmu->func_id = pdev->devfn; |
3191 | break; | 3306 | else |
3192 | if (pmu->func_id < 0) { | 3307 | WARN_ON_ONCE(pmu->func_id != pdev->devfn); |
3193 | pmu->func_id = pdev->devfn; | ||
3194 | break; | ||
3195 | } | ||
3196 | pmu = NULL; | ||
3197 | } | ||
3198 | |||
3199 | if (!pmu) { | ||
3200 | kfree(box); | ||
3201 | return -EINVAL; | ||
3202 | } | ||
3203 | 3308 | ||
3204 | box->phys_id = phys_id; | 3309 | box->phys_id = phys_id; |
3205 | box->pci_dev = pdev; | 3310 | box->pci_dev = pdev; |
@@ -3217,9 +3322,22 @@ static int uncore_pci_add(struct intel_uncore_type *type, struct pci_dev *pdev) | |||
3217 | static void uncore_pci_remove(struct pci_dev *pdev) | 3322 | static void uncore_pci_remove(struct pci_dev *pdev) |
3218 | { | 3323 | { |
3219 | struct intel_uncore_box *box = pci_get_drvdata(pdev); | 3324 | struct intel_uncore_box *box = pci_get_drvdata(pdev); |
3220 | struct intel_uncore_pmu *pmu = box->pmu; | 3325 | struct intel_uncore_pmu *pmu; |
3221 | int cpu, phys_id = pcibus_to_physid[pdev->bus->number]; | 3326 | int i, cpu, phys_id = pcibus_to_physid[pdev->bus->number]; |
3222 | 3327 | ||
3328 | box = pci_get_drvdata(pdev); | ||
3329 | if (!box) { | ||
3330 | for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { | ||
3331 | if (extra_pci_dev[phys_id][i] == pdev) { | ||
3332 | extra_pci_dev[phys_id][i] = NULL; | ||
3333 | break; | ||
3334 | } | ||
3335 | } | ||
3336 | WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX); | ||
3337 | return; | ||
3338 | } | ||
3339 | |||
3340 | pmu = box->pmu; | ||
3223 | if (WARN_ON_ONCE(phys_id != box->phys_id)) | 3341 | if (WARN_ON_ONCE(phys_id != box->phys_id)) |
3224 | return; | 3342 | return; |
3225 | 3343 | ||
@@ -3240,12 +3358,6 @@ static void uncore_pci_remove(struct pci_dev *pdev) | |||
3240 | kfree(box); | 3358 | kfree(box); |
3241 | } | 3359 | } |
3242 | 3360 | ||
3243 | static int uncore_pci_probe(struct pci_dev *pdev, | ||
3244 | const struct pci_device_id *id) | ||
3245 | { | ||
3246 | return uncore_pci_add(pci_uncores[id->driver_data], pdev); | ||
3247 | } | ||
3248 | |||
3249 | static int __init uncore_pci_init(void) | 3361 | static int __init uncore_pci_init(void) |
3250 | { | 3362 | { |
3251 | int ret; | 3363 | int ret; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 47b3d00c9d89..a80ab71a883d 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h | |||
@@ -12,6 +12,15 @@ | |||
12 | #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC | 12 | #define UNCORE_PMC_IDX_FIXED UNCORE_PMC_IDX_MAX_GENERIC |
13 | #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) | 13 | #define UNCORE_PMC_IDX_MAX (UNCORE_PMC_IDX_FIXED + 1) |
14 | 14 | ||
15 | #define UNCORE_PCI_DEV_DATA(type, idx) ((type << 8) | idx) | ||
16 | #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) | ||
17 | #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) | ||
18 | #define UNCORE_EXTRA_PCI_DEV 0xff | ||
19 | #define UNCORE_EXTRA_PCI_DEV_MAX 2 | ||
20 | |||
21 | /* support up to 8 sockets */ | ||
22 | #define UNCORE_SOCKET_MAX 8 | ||
23 | |||
15 | #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) | 24 | #define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff) |
16 | 25 | ||
17 | /* SNB event control */ | 26 | /* SNB event control */ |
@@ -108,6 +117,7 @@ | |||
108 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ | 117 | (SNBEP_PMON_CTL_EV_SEL_MASK | \ |
109 | SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ | 118 | SNBEP_PCU_MSR_PMON_CTL_OCC_SEL_MASK | \ |
110 | SNBEP_PMON_CTL_EDGE_DET | \ | 119 | SNBEP_PMON_CTL_EDGE_DET | \ |
120 | SNBEP_PMON_CTL_EV_SEL_EXT | \ | ||
111 | SNBEP_PMON_CTL_INVERT | \ | 121 | SNBEP_PMON_CTL_INVERT | \ |
112 | SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ | 122 | SNBEP_PCU_MSR_PMON_CTL_TRESH_MASK | \ |
113 | SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ | 123 | SNBEP_PCU_MSR_PMON_CTL_OCC_INVERT | \ |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 74467feb4dc5..e0e0841eef45 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -128,7 +128,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs) | |||
128 | cpu_emergency_svm_disable(); | 128 | cpu_emergency_svm_disable(); |
129 | 129 | ||
130 | lapic_shutdown(); | 130 | lapic_shutdown(); |
131 | #if defined(CONFIG_X86_IO_APIC) | 131 | #ifdef CONFIG_X86_IO_APIC |
132 | /* Prevent crash_kexec() from deadlocking on ioapic_lock. */ | ||
133 | ioapic_zap_locks(); | ||
132 | disable_IO_APIC(); | 134 | disable_IO_APIC(); |
133 | #endif | 135 | #endif |
134 | #ifdef CONFIG_HPET_TIMER | 136 | #ifdef CONFIG_HPET_TIMER |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 94ab6b90dd3f..63bdb29b2549 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -196,15 +196,23 @@ static void __init ati_bugs_contd(int num, int slot, int func) | |||
196 | static void __init intel_remapping_check(int num, int slot, int func) | 196 | static void __init intel_remapping_check(int num, int slot, int func) |
197 | { | 197 | { |
198 | u8 revision; | 198 | u8 revision; |
199 | u16 device; | ||
199 | 200 | ||
201 | device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); | ||
200 | revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); | 202 | revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); |
201 | 203 | ||
202 | /* | 204 | /* |
203 | * Revision 0x13 of this chipset supports irq remapping | 205 | * Revision 13 of all triggering devices id in this quirk have |
204 | * but has an erratum that breaks its behavior, flag it as such | 206 | * a problem draining interrupts when irq remapping is enabled, |
207 | * and should be flagged as broken. Additionally revisions 0x12 | ||
208 | * and 0x22 of device id 0x3405 has this problem. | ||
205 | */ | 209 | */ |
206 | if (revision == 0x13) | 210 | if (revision == 0x13) |
207 | set_irq_remapping_broken(); | 211 | set_irq_remapping_broken(); |
212 | else if ((device == 0x3405) && | ||
213 | ((revision == 0x12) || | ||
214 | (revision == 0x22))) | ||
215 | set_irq_remapping_broken(); | ||
208 | 216 | ||
209 | } | 217 | } |
210 | 218 | ||
@@ -239,6 +247,8 @@ static struct chipset early_qrk[] __initdata = { | |||
239 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, | 247 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, |
240 | { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST, | 248 | { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST, |
241 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, | 249 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, |
250 | { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST, | ||
251 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, | ||
242 | { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, | 252 | { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, |
243 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, | 253 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, |
244 | {} | 254 | {} |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 202d24f0f7e7..5d576ab34403 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -116,7 +116,7 @@ static void mxcsr_feature_mask_init(void) | |||
116 | 116 | ||
117 | if (cpu_has_fxsr) { | 117 | if (cpu_has_fxsr) { |
118 | memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); | 118 | memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); |
119 | asm volatile("fxsave %0" : : "m" (fx_scratch)); | 119 | asm volatile("fxsave %0" : "+m" (fx_scratch)); |
120 | mask = fx_scratch.mxcsr_mask; | 120 | mask = fx_scratch.mxcsr_mask; |
121 | if (mask == 0) | 121 | if (mask == 0) |
122 | mask = 0x0000ffbf; | 122 | mask = 0x0000ffbf; |
diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c index 2889b3d43882..460f5d9ceebb 100644 --- a/arch/x86/kernel/jump_label.c +++ b/arch/x86/kernel/jump_label.c | |||
@@ -37,7 +37,19 @@ static void __jump_label_transform(struct jump_entry *entry, | |||
37 | } else | 37 | } else |
38 | memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE); | 38 | memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE); |
39 | 39 | ||
40 | (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); | 40 | /* |
41 | * Make text_poke_bp() a default fallback poker. | ||
42 | * | ||
43 | * At the time the change is being done, just ignore whether we | ||
44 | * are doing nop -> jump or jump -> nop transition, and assume | ||
45 | * always nop being the 'currently valid' instruction | ||
46 | * | ||
47 | */ | ||
48 | if (poker) | ||
49 | (*poker)((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE); | ||
50 | else | ||
51 | text_poke_bp((void *)entry->code, &code, JUMP_LABEL_NOP_SIZE, | ||
52 | (void *)entry->code + JUMP_LABEL_NOP_SIZE); | ||
41 | } | 53 | } |
42 | 54 | ||
43 | void arch_jump_label_transform(struct jump_entry *entry, | 55 | void arch_jump_label_transform(struct jump_entry *entry, |
@@ -45,7 +57,7 @@ void arch_jump_label_transform(struct jump_entry *entry, | |||
45 | { | 57 | { |
46 | get_online_cpus(); | 58 | get_online_cpus(); |
47 | mutex_lock(&text_mutex); | 59 | mutex_lock(&text_mutex); |
48 | __jump_label_transform(entry, type, text_poke_smp); | 60 | __jump_label_transform(entry, type, NULL); |
49 | mutex_unlock(&text_mutex); | 61 | mutex_unlock(&text_mutex); |
50 | put_online_cpus(); | 62 | put_online_cpus(); |
51 | } | 63 | } |
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index 2e9d4b5af036..c6ee63f927ab 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h | |||
@@ -82,14 +82,9 @@ extern void synthesize_reljump(void *from, void *to); | |||
82 | extern void synthesize_relcall(void *from, void *to); | 82 | extern void synthesize_relcall(void *from, void *to); |
83 | 83 | ||
84 | #ifdef CONFIG_OPTPROBES | 84 | #ifdef CONFIG_OPTPROBES |
85 | extern int arch_init_optprobes(void); | ||
86 | extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter); | 85 | extern int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter); |
87 | extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr); | 86 | extern unsigned long __recover_optprobed_insn(kprobe_opcode_t *buf, unsigned long addr); |
88 | #else /* !CONFIG_OPTPROBES */ | 87 | #else /* !CONFIG_OPTPROBES */ |
89 | static inline int arch_init_optprobes(void) | ||
90 | { | ||
91 | return 0; | ||
92 | } | ||
93 | static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) | 88 | static inline int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) |
94 | { | 89 | { |
95 | return 0; | 90 | return 0; |
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 048852d06447..79a3f9682871 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -1068,7 +1068,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
1068 | 1068 | ||
1069 | int __init arch_init_kprobes(void) | 1069 | int __init arch_init_kprobes(void) |
1070 | { | 1070 | { |
1071 | return arch_init_optprobes(); | 1071 | return 0; |
1072 | } | 1072 | } |
1073 | 1073 | ||
1074 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | 1074 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) |
diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 2c1ac2893365..898160b42e43 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c | |||
@@ -368,31 +368,6 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) | |||
368 | return 0; | 368 | return 0; |
369 | } | 369 | } |
370 | 370 | ||
371 | #define MAX_OPTIMIZE_PROBES 256 | ||
372 | static struct text_poke_param *jump_poke_params; | ||
373 | static struct jump_poke_buffer { | ||
374 | u8 buf[RELATIVEJUMP_SIZE]; | ||
375 | } *jump_poke_bufs; | ||
376 | |||
377 | static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, | ||
378 | u8 *insn_buf, | ||
379 | struct optimized_kprobe *op) | ||
380 | { | ||
381 | s32 rel = (s32)((long)op->optinsn.insn - | ||
382 | ((long)op->kp.addr + RELATIVEJUMP_SIZE)); | ||
383 | |||
384 | /* Backup instructions which will be replaced by jump address */ | ||
385 | memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, | ||
386 | RELATIVE_ADDR_SIZE); | ||
387 | |||
388 | insn_buf[0] = RELATIVEJUMP_OPCODE; | ||
389 | *(s32 *)(&insn_buf[1]) = rel; | ||
390 | |||
391 | tprm->addr = op->kp.addr; | ||
392 | tprm->opcode = insn_buf; | ||
393 | tprm->len = RELATIVEJUMP_SIZE; | ||
394 | } | ||
395 | |||
396 | /* | 371 | /* |
397 | * Replace breakpoints (int3) with relative jumps. | 372 | * Replace breakpoints (int3) with relative jumps. |
398 | * Caller must call with locking kprobe_mutex and text_mutex. | 373 | * Caller must call with locking kprobe_mutex and text_mutex. |
@@ -400,37 +375,38 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm, | |||
400 | void __kprobes arch_optimize_kprobes(struct list_head *oplist) | 375 | void __kprobes arch_optimize_kprobes(struct list_head *oplist) |
401 | { | 376 | { |
402 | struct optimized_kprobe *op, *tmp; | 377 | struct optimized_kprobe *op, *tmp; |
403 | int c = 0; | 378 | u8 insn_buf[RELATIVEJUMP_SIZE]; |
404 | 379 | ||
405 | list_for_each_entry_safe(op, tmp, oplist, list) { | 380 | list_for_each_entry_safe(op, tmp, oplist, list) { |
381 | s32 rel = (s32)((long)op->optinsn.insn - | ||
382 | ((long)op->kp.addr + RELATIVEJUMP_SIZE)); | ||
383 | |||
406 | WARN_ON(kprobe_disabled(&op->kp)); | 384 | WARN_ON(kprobe_disabled(&op->kp)); |
407 | /* Setup param */ | 385 | |
408 | setup_optimize_kprobe(&jump_poke_params[c], | 386 | /* Backup instructions which will be replaced by jump address */ |
409 | jump_poke_bufs[c].buf, op); | 387 | memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE, |
388 | RELATIVE_ADDR_SIZE); | ||
389 | |||
390 | insn_buf[0] = RELATIVEJUMP_OPCODE; | ||
391 | *(s32 *)(&insn_buf[1]) = rel; | ||
392 | |||
393 | text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, | ||
394 | op->optinsn.insn); | ||
395 | |||
410 | list_del_init(&op->list); | 396 | list_del_init(&op->list); |
411 | if (++c >= MAX_OPTIMIZE_PROBES) | ||
412 | break; | ||
413 | } | 397 | } |
414 | |||
415 | /* | ||
416 | * text_poke_smp doesn't support NMI/MCE code modifying. | ||
417 | * However, since kprobes itself also doesn't support NMI/MCE | ||
418 | * code probing, it's not a problem. | ||
419 | */ | ||
420 | text_poke_smp_batch(jump_poke_params, c); | ||
421 | } | 398 | } |
422 | 399 | ||
423 | static void __kprobes setup_unoptimize_kprobe(struct text_poke_param *tprm, | 400 | /* Replace a relative jump with a breakpoint (int3). */ |
424 | u8 *insn_buf, | 401 | void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) |
425 | struct optimized_kprobe *op) | ||
426 | { | 402 | { |
403 | u8 insn_buf[RELATIVEJUMP_SIZE]; | ||
404 | |||
427 | /* Set int3 to first byte for kprobes */ | 405 | /* Set int3 to first byte for kprobes */ |
428 | insn_buf[0] = BREAKPOINT_INSTRUCTION; | 406 | insn_buf[0] = BREAKPOINT_INSTRUCTION; |
429 | memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | 407 | memcpy(insn_buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); |
430 | 408 | text_poke_bp(op->kp.addr, insn_buf, RELATIVEJUMP_SIZE, | |
431 | tprm->addr = op->kp.addr; | 409 | op->optinsn.insn); |
432 | tprm->opcode = insn_buf; | ||
433 | tprm->len = RELATIVEJUMP_SIZE; | ||
434 | } | 410 | } |
435 | 411 | ||
436 | /* | 412 | /* |
@@ -441,34 +417,11 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist, | |||
441 | struct list_head *done_list) | 417 | struct list_head *done_list) |
442 | { | 418 | { |
443 | struct optimized_kprobe *op, *tmp; | 419 | struct optimized_kprobe *op, *tmp; |
444 | int c = 0; | ||
445 | 420 | ||
446 | list_for_each_entry_safe(op, tmp, oplist, list) { | 421 | list_for_each_entry_safe(op, tmp, oplist, list) { |
447 | /* Setup param */ | 422 | arch_unoptimize_kprobe(op); |
448 | setup_unoptimize_kprobe(&jump_poke_params[c], | ||
449 | jump_poke_bufs[c].buf, op); | ||
450 | list_move(&op->list, done_list); | 423 | list_move(&op->list, done_list); |
451 | if (++c >= MAX_OPTIMIZE_PROBES) | ||
452 | break; | ||
453 | } | 424 | } |
454 | |||
455 | /* | ||
456 | * text_poke_smp doesn't support NMI/MCE code modifying. | ||
457 | * However, since kprobes itself also doesn't support NMI/MCE | ||
458 | * code probing, it's not a problem. | ||
459 | */ | ||
460 | text_poke_smp_batch(jump_poke_params, c); | ||
461 | } | ||
462 | |||
463 | /* Replace a relative jump with a breakpoint (int3). */ | ||
464 | void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) | ||
465 | { | ||
466 | u8 buf[RELATIVEJUMP_SIZE]; | ||
467 | |||
468 | /* Set int3 to first byte for kprobes */ | ||
469 | buf[0] = BREAKPOINT_INSTRUCTION; | ||
470 | memcpy(buf + 1, op->optinsn.copied_insn, RELATIVE_ADDR_SIZE); | ||
471 | text_poke_smp(op->kp.addr, buf, RELATIVEJUMP_SIZE); | ||
472 | } | 425 | } |
473 | 426 | ||
474 | int __kprobes | 427 | int __kprobes |
@@ -488,22 +441,3 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) | |||
488 | } | 441 | } |
489 | return 0; | 442 | return 0; |
490 | } | 443 | } |
491 | |||
492 | int __kprobes arch_init_optprobes(void) | ||
493 | { | ||
494 | /* Allocate code buffer and parameter array */ | ||
495 | jump_poke_bufs = kmalloc(sizeof(struct jump_poke_buffer) * | ||
496 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | ||
497 | if (!jump_poke_bufs) | ||
498 | return -ENOMEM; | ||
499 | |||
500 | jump_poke_params = kmalloc(sizeof(struct text_poke_param) * | ||
501 | MAX_OPTIMIZE_PROBES, GFP_KERNEL); | ||
502 | if (!jump_poke_params) { | ||
503 | kfree(jump_poke_bufs); | ||
504 | jump_poke_bufs = NULL; | ||
505 | return -ENOMEM; | ||
506 | } | ||
507 | |||
508 | return 0; | ||
509 | } | ||
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 47ebb1dbfbcb..7123b5df479d 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -145,10 +145,9 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig) | |||
145 | return 0; | 145 | return 0; |
146 | } | 146 | } |
147 | 147 | ||
148 | static unsigned int verify_patch_size(int cpu, u32 patch_size, | 148 | static unsigned int verify_patch_size(u8 family, u32 patch_size, |
149 | unsigned int size) | 149 | unsigned int size) |
150 | { | 150 | { |
151 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
152 | u32 max_size; | 151 | u32 max_size; |
153 | 152 | ||
154 | #define F1XH_MPB_MAX_SIZE 2048 | 153 | #define F1XH_MPB_MAX_SIZE 2048 |
@@ -156,7 +155,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size, | |||
156 | #define F15H_MPB_MAX_SIZE 4096 | 155 | #define F15H_MPB_MAX_SIZE 4096 |
157 | #define F16H_MPB_MAX_SIZE 3458 | 156 | #define F16H_MPB_MAX_SIZE 3458 |
158 | 157 | ||
159 | switch (c->x86) { | 158 | switch (family) { |
160 | case 0x14: | 159 | case 0x14: |
161 | max_size = F14H_MPB_MAX_SIZE; | 160 | max_size = F14H_MPB_MAX_SIZE; |
162 | break; | 161 | break; |
@@ -220,12 +219,13 @@ int apply_microcode_amd(int cpu) | |||
220 | return 0; | 219 | return 0; |
221 | } | 220 | } |
222 | 221 | ||
223 | if (__apply_microcode_amd(mc_amd)) | 222 | if (__apply_microcode_amd(mc_amd)) { |
224 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", | 223 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", |
225 | cpu, mc_amd->hdr.patch_id); | 224 | cpu, mc_amd->hdr.patch_id); |
226 | else | 225 | return -1; |
227 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, | 226 | } |
228 | mc_amd->hdr.patch_id); | 227 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, |
228 | mc_amd->hdr.patch_id); | ||
229 | 229 | ||
230 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; | 230 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; |
231 | c->microcode = mc_amd->hdr.patch_id; | 231 | c->microcode = mc_amd->hdr.patch_id; |
@@ -276,9 +276,8 @@ static void cleanup(void) | |||
276 | * driver cannot continue functioning normally. In such cases, we tear | 276 | * driver cannot continue functioning normally. In such cases, we tear |
277 | * down everything we've used up so far and exit. | 277 | * down everything we've used up so far and exit. |
278 | */ | 278 | */ |
279 | static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) | 279 | static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover) |
280 | { | 280 | { |
281 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
282 | struct microcode_header_amd *mc_hdr; | 281 | struct microcode_header_amd *mc_hdr; |
283 | struct ucode_patch *patch; | 282 | struct ucode_patch *patch; |
284 | unsigned int patch_size, crnt_size, ret; | 283 | unsigned int patch_size, crnt_size, ret; |
@@ -298,7 +297,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) | |||
298 | 297 | ||
299 | /* check if patch is for the current family */ | 298 | /* check if patch is for the current family */ |
300 | proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); | 299 | proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff); |
301 | if (proc_fam != c->x86) | 300 | if (proc_fam != family) |
302 | return crnt_size; | 301 | return crnt_size; |
303 | 302 | ||
304 | if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { | 303 | if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) { |
@@ -307,7 +306,7 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) | |||
307 | return crnt_size; | 306 | return crnt_size; |
308 | } | 307 | } |
309 | 308 | ||
310 | ret = verify_patch_size(cpu, patch_size, leftover); | 309 | ret = verify_patch_size(family, patch_size, leftover); |
311 | if (!ret) { | 310 | if (!ret) { |
312 | pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); | 311 | pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id); |
313 | return crnt_size; | 312 | return crnt_size; |
@@ -338,7 +337,8 @@ static int verify_and_add_patch(unsigned int cpu, u8 *fw, unsigned int leftover) | |||
338 | return crnt_size; | 337 | return crnt_size; |
339 | } | 338 | } |
340 | 339 | ||
341 | static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t size) | 340 | static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, |
341 | size_t size) | ||
342 | { | 342 | { |
343 | enum ucode_state ret = UCODE_ERROR; | 343 | enum ucode_state ret = UCODE_ERROR; |
344 | unsigned int leftover; | 344 | unsigned int leftover; |
@@ -361,7 +361,7 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz | |||
361 | } | 361 | } |
362 | 362 | ||
363 | while (leftover) { | 363 | while (leftover) { |
364 | crnt_size = verify_and_add_patch(cpu, fw, leftover); | 364 | crnt_size = verify_and_add_patch(family, fw, leftover); |
365 | if (crnt_size < 0) | 365 | if (crnt_size < 0) |
366 | return ret; | 366 | return ret; |
367 | 367 | ||
@@ -372,22 +372,22 @@ static enum ucode_state __load_microcode_amd(int cpu, const u8 *data, size_t siz | |||
372 | return UCODE_OK; | 372 | return UCODE_OK; |
373 | } | 373 | } |
374 | 374 | ||
375 | enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) | 375 | enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size) |
376 | { | 376 | { |
377 | enum ucode_state ret; | 377 | enum ucode_state ret; |
378 | 378 | ||
379 | /* free old equiv table */ | 379 | /* free old equiv table */ |
380 | free_equiv_cpu_table(); | 380 | free_equiv_cpu_table(); |
381 | 381 | ||
382 | ret = __load_microcode_amd(cpu, data, size); | 382 | ret = __load_microcode_amd(family, data, size); |
383 | 383 | ||
384 | if (ret != UCODE_OK) | 384 | if (ret != UCODE_OK) |
385 | cleanup(); | 385 | cleanup(); |
386 | 386 | ||
387 | #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) | 387 | #if defined(CONFIG_MICROCODE_AMD_EARLY) && defined(CONFIG_X86_32) |
388 | /* save BSP's matching patch for early load */ | 388 | /* save BSP's matching patch for early load */ |
389 | if (cpu_data(cpu).cpu_index == boot_cpu_data.cpu_index) { | 389 | if (cpu_data(smp_processor_id()).cpu_index == boot_cpu_data.cpu_index) { |
390 | struct ucode_patch *p = find_patch(cpu); | 390 | struct ucode_patch *p = find_patch(smp_processor_id()); |
391 | if (p) { | 391 | if (p) { |
392 | memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); | 392 | memset(amd_bsp_mpb, 0, MPB_MAX_SIZE); |
393 | memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), | 393 | memcpy(amd_bsp_mpb, p->data, min_t(u32, ksize(p->data), |
@@ -440,7 +440,7 @@ static enum ucode_state request_microcode_amd(int cpu, struct device *device, | |||
440 | goto fw_release; | 440 | goto fw_release; |
441 | } | 441 | } |
442 | 442 | ||
443 | ret = load_microcode_amd(cpu, fw->data, fw->size); | 443 | ret = load_microcode_amd(c->x86, fw->data, fw->size); |
444 | 444 | ||
445 | fw_release: | 445 | fw_release: |
446 | release_firmware(fw); | 446 | release_firmware(fw); |
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c index 1d14ffee5749..6073104ccaa3 100644 --- a/arch/x86/kernel/microcode_amd_early.c +++ b/arch/x86/kernel/microcode_amd_early.c | |||
@@ -238,25 +238,17 @@ static void __init collect_cpu_sig_on_bsp(void *arg) | |||
238 | uci->cpu_sig.sig = cpuid_eax(0x00000001); | 238 | uci->cpu_sig.sig = cpuid_eax(0x00000001); |
239 | } | 239 | } |
240 | #else | 240 | #else |
241 | static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c, | 241 | void load_ucode_amd_ap(void) |
242 | struct ucode_cpu_info *uci) | ||
243 | { | 242 | { |
243 | unsigned int cpu = smp_processor_id(); | ||
244 | struct ucode_cpu_info *uci = ucode_cpu_info + cpu; | ||
244 | u32 rev, eax; | 245 | u32 rev, eax; |
245 | 246 | ||
246 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); | 247 | rdmsr(MSR_AMD64_PATCH_LEVEL, rev, eax); |
247 | eax = cpuid_eax(0x00000001); | 248 | eax = cpuid_eax(0x00000001); |
248 | 249 | ||
249 | uci->cpu_sig.sig = eax; | ||
250 | uci->cpu_sig.rev = rev; | 250 | uci->cpu_sig.rev = rev; |
251 | c->microcode = rev; | 251 | uci->cpu_sig.sig = eax; |
252 | c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | ||
253 | } | ||
254 | |||
255 | void load_ucode_amd_ap(void) | ||
256 | { | ||
257 | unsigned int cpu = smp_processor_id(); | ||
258 | |||
259 | collect_cpu_info_amd_early(&cpu_data(cpu), ucode_cpu_info + cpu); | ||
260 | 252 | ||
261 | if (cpu && !ucode_loaded) { | 253 | if (cpu && !ucode_loaded) { |
262 | void *ucode; | 254 | void *ucode; |
@@ -265,8 +257,10 @@ void load_ucode_amd_ap(void) | |||
265 | return; | 257 | return; |
266 | 258 | ||
267 | ucode = (void *)(initrd_start + ucode_offset); | 259 | ucode = (void *)(initrd_start + ucode_offset); |
268 | if (load_microcode_amd(0, ucode, ucode_size) != UCODE_OK) | 260 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); |
261 | if (load_microcode_amd(eax, ucode, ucode_size) != UCODE_OK) | ||
269 | return; | 262 | return; |
263 | |||
270 | ucode_loaded = true; | 264 | ucode_loaded = true; |
271 | } | 265 | } |
272 | 266 | ||
@@ -278,6 +272,8 @@ int __init save_microcode_in_initrd_amd(void) | |||
278 | { | 272 | { |
279 | enum ucode_state ret; | 273 | enum ucode_state ret; |
280 | void *ucode; | 274 | void *ucode; |
275 | u32 eax; | ||
276 | |||
281 | #ifdef CONFIG_X86_32 | 277 | #ifdef CONFIG_X86_32 |
282 | unsigned int bsp = boot_cpu_data.cpu_index; | 278 | unsigned int bsp = boot_cpu_data.cpu_index; |
283 | struct ucode_cpu_info *uci = ucode_cpu_info + bsp; | 279 | struct ucode_cpu_info *uci = ucode_cpu_info + bsp; |
@@ -293,7 +289,10 @@ int __init save_microcode_in_initrd_amd(void) | |||
293 | return 0; | 289 | return 0; |
294 | 290 | ||
295 | ucode = (void *)(initrd_start + ucode_offset); | 291 | ucode = (void *)(initrd_start + ucode_offset); |
296 | ret = load_microcode_amd(0, ucode, ucode_size); | 292 | eax = cpuid_eax(0x00000001); |
293 | eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | ||
294 | |||
295 | ret = load_microcode_amd(eax, ucode, ucode_size); | ||
297 | if (ret != UCODE_OK) | 296 | if (ret != UCODE_OK) |
298 | return -EINVAL; | 297 | return -EINVAL; |
299 | 298 | ||
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index dbded5aedb81..30277e27431a 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
101 | *begin = new_begin; | 101 | *begin = new_begin; |
102 | } | 102 | } |
103 | } else { | 103 | } else { |
104 | *begin = TASK_UNMAPPED_BASE; | 104 | *begin = current->mm->mmap_legacy_base; |
105 | *end = TASK_SIZE; | 105 | *end = TASK_SIZE; |
106 | } | 106 | } |
107 | } | 107 | } |
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index addf7b58f4e8..91a4496db434 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c | |||
@@ -301,6 +301,15 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control) | |||
301 | return 0; | 301 | return 0; |
302 | } | 302 | } |
303 | 303 | ||
304 | static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b) | ||
305 | { | ||
306 | if (!tboot_enabled()) | ||
307 | return 0; | ||
308 | |||
309 | pr_warning("tboot is not able to suspend on platforms with reduced hardware sleep (ACPIv5)"); | ||
310 | return -ENODEV; | ||
311 | } | ||
312 | |||
304 | static atomic_t ap_wfs_count; | 313 | static atomic_t ap_wfs_count; |
305 | 314 | ||
306 | static int tboot_wait_for_aps(int num_aps) | 315 | static int tboot_wait_for_aps(int num_aps) |
@@ -422,6 +431,7 @@ static __init int tboot_late_init(void) | |||
422 | #endif | 431 | #endif |
423 | 432 | ||
424 | acpi_os_set_prepare_sleep(&tboot_sleep); | 433 | acpi_os_set_prepare_sleep(&tboot_sleep); |
434 | acpi_os_set_prepare_extended_sleep(&tboot_extended_sleep); | ||
425 | return 0; | 435 | return 0; |
426 | } | 436 | } |
427 | 437 | ||
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 1b23a1c92746..8c8093b146ca 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #include <asm/mce.h> | 58 | #include <asm/mce.h> |
59 | #include <asm/fixmap.h> | 59 | #include <asm/fixmap.h> |
60 | #include <asm/mach_traps.h> | 60 | #include <asm/mach_traps.h> |
61 | #include <asm/alternative.h> | ||
61 | 62 | ||
62 | #ifdef CONFIG_X86_64 | 63 | #ifdef CONFIG_X86_64 |
63 | #include <asm/x86_init.h> | 64 | #include <asm/x86_init.h> |
@@ -327,6 +328,9 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co | |||
327 | ftrace_int3_handler(regs)) | 328 | ftrace_int3_handler(regs)) |
328 | return; | 329 | return; |
329 | #endif | 330 | #endif |
331 | if (poke_int3_handler(regs)) | ||
332 | return; | ||
333 | |||
330 | prev_state = exception_enter(); | 334 | prev_state = exception_enter(); |
331 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP | 335 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP |
332 | if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, | 336 | if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 6ff49247edf8..930e5d48f560 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -89,6 +89,12 @@ int check_tsc_unstable(void) | |||
89 | } | 89 | } |
90 | EXPORT_SYMBOL_GPL(check_tsc_unstable); | 90 | EXPORT_SYMBOL_GPL(check_tsc_unstable); |
91 | 91 | ||
92 | int check_tsc_disabled(void) | ||
93 | { | ||
94 | return tsc_disabled; | ||
95 | } | ||
96 | EXPORT_SYMBOL_GPL(check_tsc_disabled); | ||
97 | |||
92 | #ifdef CONFIG_X86_TSC | 98 | #ifdef CONFIG_X86_TSC |
93 | int __init notsc_setup(char *str) | 99 | int __init notsc_setup(char *str) |
94 | { | 100 | { |
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index 5d7e51f3fd28..533a85e3a07e 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt | |||
@@ -1,10 +1,8 @@ | |||
1 | # x86 Opcode Maps | 1 | # x86 Opcode Maps |
2 | # | 2 | # |
3 | # This is (mostly) based on following documentations. | 3 | # This is (mostly) based on following documentations. |
4 | # - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2 | 4 | # - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2C |
5 | # (#325383-040US, October 2011) | 5 | # (#326018-047US, June 2013) |
6 | # - Intel(R) Advanced Vector Extensions Programming Reference | ||
7 | # (#319433-011,JUNE 2011). | ||
8 | # | 6 | # |
9 | #<Opcode maps> | 7 | #<Opcode maps> |
10 | # Table: table-name | 8 | # Table: table-name |
@@ -29,6 +27,7 @@ | |||
29 | # - (F3): the last prefix is 0xF3 | 27 | # - (F3): the last prefix is 0xF3 |
30 | # - (F2): the last prefix is 0xF2 | 28 | # - (F2): the last prefix is 0xF2 |
31 | # - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) | 29 | # - (!F3) : the last prefix is not 0xF3 (including non-last prefix case) |
30 | # - (66&F2): Both 0x66 and 0xF2 prefixes are specified. | ||
32 | 31 | ||
33 | Table: one byte opcode | 32 | Table: one byte opcode |
34 | Referrer: | 33 | Referrer: |
@@ -246,8 +245,8 @@ c2: RETN Iw (f64) | |||
246 | c3: RETN | 245 | c3: RETN |
247 | c4: LES Gz,Mp (i64) | VEX+2byte (Prefix) | 246 | c4: LES Gz,Mp (i64) | VEX+2byte (Prefix) |
248 | c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix) | 247 | c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix) |
249 | c6: Grp11 Eb,Ib (1A) | 248 | c6: Grp11A Eb,Ib (1A) |
250 | c7: Grp11 Ev,Iz (1A) | 249 | c7: Grp11B Ev,Iz (1A) |
251 | c8: ENTER Iw,Ib | 250 | c8: ENTER Iw,Ib |
252 | c9: LEAVE (d64) | 251 | c9: LEAVE (d64) |
253 | ca: RETF Iw | 252 | ca: RETF Iw |
@@ -293,8 +292,8 @@ ef: OUT DX,eAX | |||
293 | # 0xf0 - 0xff | 292 | # 0xf0 - 0xff |
294 | f0: LOCK (Prefix) | 293 | f0: LOCK (Prefix) |
295 | f1: | 294 | f1: |
296 | f2: REPNE (Prefix) | 295 | f2: REPNE (Prefix) | XACQUIRE (Prefix) |
297 | f3: REP/REPE (Prefix) | 296 | f3: REP/REPE (Prefix) | XRELEASE (Prefix) |
298 | f4: HLT | 297 | f4: HLT |
299 | f5: CMC | 298 | f5: CMC |
300 | f6: Grp3_1 Eb (1A) | 299 | f6: Grp3_1 Eb (1A) |
@@ -326,7 +325,8 @@ AVXcode: 1 | |||
326 | 0a: | 325 | 0a: |
327 | 0b: UD2 (1B) | 326 | 0b: UD2 (1B) |
328 | 0c: | 327 | 0c: |
329 | 0d: NOP Ev | GrpP | 328 | # AMD's prefetch group. Intel supports prefetchw(/1) only. |
329 | 0d: GrpP | ||
330 | 0e: FEMMS | 330 | 0e: FEMMS |
331 | # 3DNow! uses the last imm byte as opcode extension. | 331 | # 3DNow! uses the last imm byte as opcode extension. |
332 | 0f: 3DNow! Pq,Qq,Ib | 332 | 0f: 3DNow! Pq,Qq,Ib |
@@ -729,12 +729,12 @@ dc: VAESENC Vdq,Hdq,Wdq (66),(v1) | |||
729 | dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1) | 729 | dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1) |
730 | de: VAESDEC Vdq,Hdq,Wdq (66),(v1) | 730 | de: VAESDEC Vdq,Hdq,Wdq (66),(v1) |
731 | df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1) | 731 | df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1) |
732 | f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | 732 | f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2) |
733 | f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | 733 | f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2) |
734 | f2: ANDN Gy,By,Ey (v) | 734 | f2: ANDN Gy,By,Ey (v) |
735 | f3: Grp17 (1A) | 735 | f3: Grp17 (1A) |
736 | f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) | 736 | f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v) |
737 | f6: MULX By,Gy,rDX,Ey (F2),(v) | 737 | f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v) |
738 | f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v) | 738 | f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v) |
739 | EndTable | 739 | EndTable |
740 | 740 | ||
@@ -861,8 +861,8 @@ EndTable | |||
861 | 861 | ||
862 | GrpTable: Grp7 | 862 | GrpTable: Grp7 |
863 | 0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | 863 | 0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) |
864 | 1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001) | 864 | 1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) |
865 | 2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | 865 | 2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) |
866 | 3: LIDT Ms | 866 | 3: LIDT Ms |
867 | 4: SMSW Mw/Rv | 867 | 4: SMSW Mw/Rv |
868 | 5: | 868 | 5: |
@@ -880,15 +880,21 @@ EndTable | |||
880 | GrpTable: Grp9 | 880 | GrpTable: Grp9 |
881 | 1: CMPXCHG8B/16B Mq/Mdq | 881 | 1: CMPXCHG8B/16B Mq/Mdq |
882 | 6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B) | 882 | 6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B) |
883 | 7: VMPTRST Mq | VMPTRST Mq (F3) | 883 | 7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B) |
884 | EndTable | 884 | EndTable |
885 | 885 | ||
886 | GrpTable: Grp10 | 886 | GrpTable: Grp10 |
887 | EndTable | 887 | EndTable |
888 | 888 | ||
889 | GrpTable: Grp11 | 889 | # Grp11A and Grp11B are expressed as Grp11 in Intel SDM |
890 | # Note: the operands are given by group opcode | 890 | GrpTable: Grp11A |
891 | 0: MOV | 891 | 0: MOV Eb,Ib |
892 | 7: XABORT Ib (000),(11B) | ||
893 | EndTable | ||
894 | |||
895 | GrpTable: Grp11B | ||
896 | 0: MOV Eb,Iz | ||
897 | 7: XBEGIN Jz (000),(11B) | ||
892 | EndTable | 898 | EndTable |
893 | 899 | ||
894 | GrpTable: Grp12 | 900 | GrpTable: Grp12 |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 2ec29ac78ae6..04664cdb7fda 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -78,8 +78,8 @@ __ref void *alloc_low_pages(unsigned int num) | |||
78 | return __va(pfn << PAGE_SHIFT); | 78 | return __va(pfn << PAGE_SHIFT); |
79 | } | 79 | } |
80 | 80 | ||
81 | /* need 4 4k for initial PMD_SIZE, 4k for 0-ISA_END_ADDRESS */ | 81 | /* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */ |
82 | #define INIT_PGT_BUF_SIZE (5 * PAGE_SIZE) | 82 | #define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE) |
83 | RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); | 83 | RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); |
84 | void __init early_alloc_pgt_buf(void) | 84 | void __init early_alloc_pgt_buf(void) |
85 | { | 85 | { |
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 62c29a5bfe26..25e7e1372bb2 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c | |||
@@ -112,11 +112,13 @@ static unsigned long mmap_legacy_base(void) | |||
112 | */ | 112 | */ |
113 | void arch_pick_mmap_layout(struct mm_struct *mm) | 113 | void arch_pick_mmap_layout(struct mm_struct *mm) |
114 | { | 114 | { |
115 | mm->mmap_legacy_base = mmap_legacy_base(); | ||
116 | mm->mmap_base = mmap_base(); | ||
117 | |||
115 | if (mmap_is_legacy()) { | 118 | if (mmap_is_legacy()) { |
116 | mm->mmap_base = mmap_legacy_base(); | 119 | mm->mmap_base = mm->mmap_legacy_base; |
117 | mm->get_unmapped_area = arch_get_unmapped_area; | 120 | mm->get_unmapped_area = arch_get_unmapped_area; |
118 | } else { | 121 | } else { |
119 | mm->mmap_base = mmap_base(); | ||
120 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | 122 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; |
121 | } | 123 | } |
122 | } | 124 | } |
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c index cdd0da9dd530..266ca912f62e 100644 --- a/arch/x86/mm/srat.c +++ b/arch/x86/mm/srat.c | |||
@@ -146,6 +146,7 @@ int __init | |||
146 | acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | 146 | acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) |
147 | { | 147 | { |
148 | u64 start, end; | 148 | u64 start, end; |
149 | u32 hotpluggable; | ||
149 | int node, pxm; | 150 | int node, pxm; |
150 | 151 | ||
151 | if (srat_disabled()) | 152 | if (srat_disabled()) |
@@ -154,7 +155,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | |||
154 | goto out_err_bad_srat; | 155 | goto out_err_bad_srat; |
155 | if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) | 156 | if ((ma->flags & ACPI_SRAT_MEM_ENABLED) == 0) |
156 | goto out_err; | 157 | goto out_err; |
157 | if ((ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE) && !save_add_info()) | 158 | hotpluggable = ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE; |
159 | if (hotpluggable && !save_add_info()) | ||
158 | goto out_err; | 160 | goto out_err; |
159 | 161 | ||
160 | start = ma->base_address; | 162 | start = ma->base_address; |
@@ -174,9 +176,10 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | |||
174 | 176 | ||
175 | node_set(node, numa_nodes_parsed); | 177 | node_set(node, numa_nodes_parsed); |
176 | 178 | ||
177 | printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", | 179 | pr_info("SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]%s\n", |
178 | node, pxm, | 180 | node, pxm, |
179 | (unsigned long long) start, (unsigned long long) end - 1); | 181 | (unsigned long long) start, (unsigned long long) end - 1, |
182 | hotpluggable ? " hotplug" : ""); | ||
180 | 183 | ||
181 | return 0; | 184 | return 0; |
182 | out_err_bad_srat: | 185 | out_err_bad_srat: |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index d641897a1f4e..b30e937689d6 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -568,13 +568,8 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) | |||
568 | */ | 568 | */ |
569 | if (bus) { | 569 | if (bus) { |
570 | struct pci_bus *child; | 570 | struct pci_bus *child; |
571 | list_for_each_entry(child, &bus->children, node) { | 571 | list_for_each_entry(child, &bus->children, node) |
572 | struct pci_dev *self = child->self; | 572 | pcie_bus_configure_settings(child); |
573 | if (!self) | ||
574 | continue; | ||
575 | |||
576 | pcie_bus_configure_settings(child, self->pcie_mpss); | ||
577 | } | ||
578 | } | 573 | } |
579 | 574 | ||
580 | if (bus && node != -1) { | 575 | if (bus && node != -1) { |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index 94919e307f8e..db6b1ab43255 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -210,6 +210,8 @@ static void pcibios_allocate_bridge_resources(struct pci_dev *dev) | |||
210 | r = &dev->resource[idx]; | 210 | r = &dev->resource[idx]; |
211 | if (!r->flags) | 211 | if (!r->flags) |
212 | continue; | 212 | continue; |
213 | if (r->parent) /* Already allocated */ | ||
214 | continue; | ||
213 | if (!r->start || pci_claim_resource(dev, idx) < 0) { | 215 | if (!r->start || pci_claim_resource(dev, idx) < 0) { |
214 | /* | 216 | /* |
215 | * Something is wrong with the region. | 217 | * Something is wrong with the region. |
@@ -318,6 +320,8 @@ static void pcibios_allocate_dev_rom_resource(struct pci_dev *dev) | |||
318 | r = &dev->resource[PCI_ROM_RESOURCE]; | 320 | r = &dev->resource[PCI_ROM_RESOURCE]; |
319 | if (!r->flags || !r->start) | 321 | if (!r->flags || !r->start) |
320 | return; | 322 | return; |
323 | if (r->parent) /* Already allocated */ | ||
324 | return; | ||
321 | 325 | ||
322 | if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) { | 326 | if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) { |
323 | r->end -= r->start; | 327 | r->end -= r->start; |
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 082e88129712..5596c7bdd327 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c | |||
@@ -700,7 +700,7 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, | |||
700 | if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed) | 700 | if (!(pci_probe & PCI_PROBE_MMCONF) || pci_mmcfg_arch_init_failed) |
701 | return -ENODEV; | 701 | return -ENODEV; |
702 | 702 | ||
703 | if (start > end) | 703 | if (start > end || !addr) |
704 | return -EINVAL; | 704 | return -EINVAL; |
705 | 705 | ||
706 | mutex_lock(&pci_mmcfg_lock); | 706 | mutex_lock(&pci_mmcfg_lock); |
@@ -716,11 +716,6 @@ int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, | |||
716 | return -EEXIST; | 716 | return -EEXIST; |
717 | } | 717 | } |
718 | 718 | ||
719 | if (!addr) { | ||
720 | mutex_unlock(&pci_mmcfg_lock); | ||
721 | return -EINVAL; | ||
722 | } | ||
723 | |||
724 | rc = -EBUSY; | 719 | rc = -EBUSY; |
725 | cfg = pci_mmconfig_alloc(seg, start, end, addr); | 720 | cfg = pci_mmconfig_alloc(seg, start, end, addr); |
726 | if (cfg == NULL) { | 721 | if (cfg == NULL) { |
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c index 6eb18c42a28a..903fded50786 100644 --- a/arch/x86/pci/mrst.c +++ b/arch/x86/pci/mrst.c | |||
@@ -23,11 +23,11 @@ | |||
23 | #include <linux/ioport.h> | 23 | #include <linux/ioport.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/dmi.h> | 25 | #include <linux/dmi.h> |
26 | #include <linux/acpi.h> | ||
27 | #include <linux/io.h> | ||
28 | #include <linux/smp.h> | ||
26 | 29 | ||
27 | #include <asm/acpi.h> | ||
28 | #include <asm/segment.h> | 30 | #include <asm/segment.h> |
29 | #include <asm/io.h> | ||
30 | #include <asm/smp.h> | ||
31 | #include <asm/pci_x86.h> | 31 | #include <asm/pci_x86.h> |
32 | #include <asm/hw_irq.h> | 32 | #include <asm/hw_irq.h> |
33 | #include <asm/io_apic.h> | 33 | #include <asm/io_apic.h> |
@@ -43,7 +43,7 @@ | |||
43 | #define PCI_FIXED_BAR_4_SIZE 0x14 | 43 | #define PCI_FIXED_BAR_4_SIZE 0x14 |
44 | #define PCI_FIXED_BAR_5_SIZE 0x1c | 44 | #define PCI_FIXED_BAR_5_SIZE 0x1c |
45 | 45 | ||
46 | static int pci_soc_mode = 0; | 46 | static int pci_soc_mode; |
47 | 47 | ||
48 | /** | 48 | /** |
49 | * fixed_bar_cap - return the offset of the fixed BAR cap if found | 49 | * fixed_bar_cap - return the offset of the fixed BAR cap if found |
@@ -141,7 +141,8 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn, | |||
141 | */ | 141 | */ |
142 | static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) | 142 | static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) |
143 | { | 143 | { |
144 | /* This is a workaround for A0 LNC bug where PCI status register does | 144 | /* |
145 | * This is a workaround for A0 LNC bug where PCI status register does | ||
145 | * not have new CAP bit set. can not be written by SW either. | 146 | * not have new CAP bit set. can not be written by SW either. |
146 | * | 147 | * |
147 | * PCI header type in real LNC indicates a single function device, this | 148 | * PCI header type in real LNC indicates a single function device, this |
@@ -154,7 +155,7 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) | |||
154 | || devfn == PCI_DEVFN(0, 0) | 155 | || devfn == PCI_DEVFN(0, 0) |
155 | || devfn == PCI_DEVFN(3, 0))) | 156 | || devfn == PCI_DEVFN(3, 0))) |
156 | return 1; | 157 | return 1; |
157 | return 0; /* langwell on others */ | 158 | return 0; /* Langwell on others */ |
158 | } | 159 | } |
159 | 160 | ||
160 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, | 161 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, |
@@ -172,7 +173,8 @@ static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, | |||
172 | { | 173 | { |
173 | int offset; | 174 | int offset; |
174 | 175 | ||
175 | /* On MRST, there is no PCI ROM BAR, this will cause a subsequent read | 176 | /* |
177 | * On MRST, there is no PCI ROM BAR, this will cause a subsequent read | ||
176 | * to ROM BAR return 0 then being ignored. | 178 | * to ROM BAR return 0 then being ignored. |
177 | */ | 179 | */ |
178 | if (where == PCI_ROM_ADDRESS) | 180 | if (where == PCI_ROM_ADDRESS) |
@@ -210,7 +212,8 @@ static int mrst_pci_irq_enable(struct pci_dev *dev) | |||
210 | 212 | ||
211 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); | 213 | pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); |
212 | 214 | ||
213 | /* MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to | 215 | /* |
216 | * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to | ||
214 | * IOAPIC RTE entries, so we just enable RTE for the device. | 217 | * IOAPIC RTE entries, so we just enable RTE for the device. |
215 | */ | 218 | */ |
216 | irq_attr.ioapic = mp_find_ioapic(dev->irq); | 219 | irq_attr.ioapic = mp_find_ioapic(dev->irq); |
@@ -235,7 +238,7 @@ struct pci_ops pci_mrst_ops = { | |||
235 | */ | 238 | */ |
236 | int __init pci_mrst_init(void) | 239 | int __init pci_mrst_init(void) |
237 | { | 240 | { |
238 | printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n"); | 241 | pr_info("Intel MID platform detected, using MID PCI ops\n"); |
239 | pci_mmcfg_late_init(); | 242 | pci_mmcfg_late_init(); |
240 | pcibios_enable_irq = mrst_pci_irq_enable; | 243 | pcibios_enable_irq = mrst_pci_irq_enable; |
241 | pci_root_ops = pci_mrst_ops; | 244 | pci_root_ops = pci_mrst_ops; |
@@ -244,17 +247,21 @@ int __init pci_mrst_init(void) | |||
244 | return 1; | 247 | return 1; |
245 | } | 248 | } |
246 | 249 | ||
247 | /* Langwell devices are not true pci devices, they are not subject to 10 ms | 250 | /* |
248 | * d3 to d0 delay required by pci spec. | 251 | * Langwell devices are not true PCI devices; they are not subject to 10 ms |
252 | * d3 to d0 delay required by PCI spec. | ||
249 | */ | 253 | */ |
250 | static void pci_d3delay_fixup(struct pci_dev *dev) | 254 | static void pci_d3delay_fixup(struct pci_dev *dev) |
251 | { | 255 | { |
252 | /* PCI fixups are effectively decided compile time. If we have a dual | 256 | /* |
253 | SoC/non-SoC kernel we don't want to mangle d3 on non SoC devices */ | 257 | * PCI fixups are effectively decided compile time. If we have a dual |
254 | if (!pci_soc_mode) | 258 | * SoC/non-SoC kernel we don't want to mangle d3 on non-SoC devices. |
255 | return; | 259 | */ |
256 | /* true pci devices in lincroft should allow type 1 access, the rest | 260 | if (!pci_soc_mode) |
257 | * are langwell fake pci devices. | 261 | return; |
262 | /* | ||
263 | * True PCI devices in Lincroft should allow type 1 access, the rest | ||
264 | * are Langwell fake PCI devices. | ||
258 | */ | 265 | */ |
259 | if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID)) | 266 | if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID)) |
260 | return; | 267 | return; |
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk index e6773dc8ac41..093a892026f9 100644 --- a/arch/x86/tools/gen-insn-attr-x86.awk +++ b/arch/x86/tools/gen-insn-attr-x86.awk | |||
@@ -68,7 +68,7 @@ BEGIN { | |||
68 | 68 | ||
69 | lprefix1_expr = "\\((66|!F3)\\)" | 69 | lprefix1_expr = "\\((66|!F3)\\)" |
70 | lprefix2_expr = "\\(F3\\)" | 70 | lprefix2_expr = "\\(F3\\)" |
71 | lprefix3_expr = "\\((F2|!F3)\\)" | 71 | lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)" |
72 | lprefix_expr = "\\((66|F2|F3)\\)" | 72 | lprefix_expr = "\\((66|F2|F3)\\)" |
73 | max_lprefix = 4 | 73 | max_lprefix = 4 |
74 | 74 | ||
@@ -83,6 +83,8 @@ BEGIN { | |||
83 | prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ" | 83 | prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ" |
84 | prefix_num["REPNE"] = "INAT_PFX_REPNE" | 84 | prefix_num["REPNE"] = "INAT_PFX_REPNE" |
85 | prefix_num["REP/REPE"] = "INAT_PFX_REPE" | 85 | prefix_num["REP/REPE"] = "INAT_PFX_REPE" |
86 | prefix_num["XACQUIRE"] = "INAT_PFX_REPNE" | ||
87 | prefix_num["XRELEASE"] = "INAT_PFX_REPE" | ||
86 | prefix_num["LOCK"] = "INAT_PFX_LOCK" | 88 | prefix_num["LOCK"] = "INAT_PFX_LOCK" |
87 | prefix_num["SEG=CS"] = "INAT_PFX_CS" | 89 | prefix_num["SEG=CS"] = "INAT_PFX_CS" |
88 | prefix_num["SEG=DS"] = "INAT_PFX_DS" | 90 | prefix_num["SEG=DS"] = "INAT_PFX_DS" |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 056d11faef21..8f3eea6b80c5 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -313,6 +313,17 @@ static void xen_align_and_add_e820_region(u64 start, u64 size, int type) | |||
313 | e820_add_region(start, end - start, type); | 313 | e820_add_region(start, end - start, type); |
314 | } | 314 | } |
315 | 315 | ||
316 | void xen_ignore_unusable(struct e820entry *list, size_t map_size) | ||
317 | { | ||
318 | struct e820entry *entry; | ||
319 | unsigned int i; | ||
320 | |||
321 | for (i = 0, entry = list; i < map_size; i++, entry++) { | ||
322 | if (entry->type == E820_UNUSABLE) | ||
323 | entry->type = E820_RAM; | ||
324 | } | ||
325 | } | ||
326 | |||
316 | /** | 327 | /** |
317 | * machine_specific_memory_setup - Hook for machine specific memory setup. | 328 | * machine_specific_memory_setup - Hook for machine specific memory setup. |
318 | **/ | 329 | **/ |
@@ -353,6 +364,17 @@ char * __init xen_memory_setup(void) | |||
353 | } | 364 | } |
354 | BUG_ON(rc); | 365 | BUG_ON(rc); |
355 | 366 | ||
367 | /* | ||
368 | * Xen won't allow a 1:1 mapping to be created to UNUSABLE | ||
369 | * regions, so if we're using the machine memory map leave the | ||
370 | * region as RAM as it is in the pseudo-physical map. | ||
371 | * | ||
372 | * UNUSABLE regions in domUs are not handled and will need | ||
373 | * a patch in the future. | ||
374 | */ | ||
375 | if (xen_initial_domain()) | ||
376 | xen_ignore_unusable(map, memmap.nr_entries); | ||
377 | |||
356 | /* Make sure the Xen-supplied memory map is well-ordered. */ | 378 | /* Make sure the Xen-supplied memory map is well-ordered. */ |
357 | sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); | 379 | sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries); |
358 | 380 | ||
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index ca92754eb846..b81c88e51daa 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -694,8 +694,15 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) | |||
694 | static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) | 694 | static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) |
695 | { | 695 | { |
696 | int rc; | 696 | int rc; |
697 | rc = native_cpu_up(cpu, tidle); | 697 | /* |
698 | WARN_ON (xen_smp_intr_init(cpu)); | 698 | * xen_smp_intr_init() needs to run before native_cpu_up() |
699 | * so that IPI vectors are set up on the booting CPU before | ||
700 | * it is marked online in native_cpu_up(). | ||
701 | */ | ||
702 | rc = xen_smp_intr_init(cpu); | ||
703 | WARN_ON(rc); | ||
704 | if (!rc) | ||
705 | rc = native_cpu_up(cpu, tidle); | ||
699 | return rc; | 706 | return rc; |
700 | } | 707 | } |
701 | 708 | ||