diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-08-18 23:33:01 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2013-08-18 23:33:01 -0400 |
commit | bd479f293370d863953aba59130bcc7ae867dd10 (patch) | |
tree | b6987c9c622d1f98b680d6fce1447972d717761c | |
parent | 68c91d377c9bd14cbe35c647ed3b847f7862c958 (diff) | |
parent | b36f4be3de1b123d8601de062e7dbfc904f305fb (diff) |
Merge 3.11-rc6 into usb-next
We want these USB fixes in this branch as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
367 files changed, 3426 insertions, 1917 deletions
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl index 6a8b7158697f..9c92bb879b6d 100644 --- a/Documentation/DocBook/media_api.tmpl +++ b/Documentation/DocBook/media_api.tmpl | |||
@@ -1,6 +1,6 @@ | |||
1 | <?xml version="1.0"?> | 1 | <?xml version="1.0"?> |
2 | <!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN" | 2 | <!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" |
3 | "http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" [ | 3 | "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" [ |
4 | <!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities; | 4 | <!ENTITY % media-entities SYSTEM "./media-entities.tmpl"> %media-entities; |
5 | <!ENTITY media-indices SYSTEM "./media-indices.tmpl"> | 5 | <!ENTITY media-indices SYSTEM "./media-indices.tmpl"> |
6 | 6 | ||
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt index a1ee681942cc..6113f9275f42 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mv64xxx.txt | |||
@@ -4,7 +4,7 @@ | |||
4 | Required properties : | 4 | Required properties : |
5 | 5 | ||
6 | - reg : Offset and length of the register set for the device | 6 | - reg : Offset and length of the register set for the device |
7 | - compatible : Should be "marvell,mv64xxx-i2c" | 7 | - compatible : Should be "marvell,mv64xxx-i2c" or "allwinner,sun4i-i2c" |
8 | - interrupts : The interrupt number | 8 | - interrupts : The interrupt number |
9 | 9 | ||
10 | Optional properties : | 10 | Optional properties : |
diff --git a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt index d5a308629c57..30b0581bb1ce 100644 --- a/Documentation/devicetree/bindings/regulator/palmas-pmic.txt +++ b/Documentation/devicetree/bindings/regulator/palmas-pmic.txt | |||
@@ -31,9 +31,8 @@ Optional nodes: | |||
31 | Optional sub-node properties: | 31 | Optional sub-node properties: |
32 | ti,warm-reset - maintain voltage during warm reset(boolean) | 32 | ti,warm-reset - maintain voltage during warm reset(boolean) |
33 | ti,roof-floor - control voltage selection by pin(boolean) | 33 | ti,roof-floor - control voltage selection by pin(boolean) |
34 | ti,sleep-mode - mode to adopt in pmic sleep 0 - off, 1 - auto, | 34 | ti,mode-sleep - mode to adopt in pmic sleep 0 - off, 1 - auto, |
35 | 2 - eco, 3 - forced pwm | 35 | 2 - eco, 3 - forced pwm |
36 | ti,tstep - slope control 0 - Jump, 1 10mV/us, 2 5mV/us, 3 2.5mV/us | ||
37 | ti,smps-range - OTP has the wrong range set for the hardware so override | 36 | ti,smps-range - OTP has the wrong range set for the hardware so override |
38 | 0 - low range, 1 - high range. | 37 | 0 - low range, 1 - high range. |
39 | 38 | ||
@@ -59,7 +58,6 @@ pmic { | |||
59 | ti,warm-reset; | 58 | ti,warm-reset; |
60 | ti,roof-floor; | 59 | ti,roof-floor; |
61 | ti,mode-sleep = <0>; | 60 | ti,mode-sleep = <0>; |
62 | ti,tstep = <0>; | ||
63 | ti,smps-range = <1>; | 61 | ti,smps-range = <1>; |
64 | }; | 62 | }; |
65 | 63 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 3153ccabc38f..94da71142b7e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -965,6 +965,12 @@ M: Lennert Buytenhek <kernel@wantstofly.org> | |||
965 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 965 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
966 | S: Maintained | 966 | S: Maintained |
967 | 967 | ||
968 | ARM/TEXAS INSTRUMENT KEYSTONE ARCHITECTURE | ||
969 | M: Santosh Shilimkar <santosh.shilimkar@ti.com> | ||
970 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
971 | S: Maintained | ||
972 | F: arch/arm/mach-keystone/ | ||
973 | |||
968 | ARM/LOGICPD PXA270 MACHINE SUPPORT | 974 | ARM/LOGICPD PXA270 MACHINE SUPPORT |
969 | M: Lennert Buytenhek <kernel@wantstofly.org> | 975 | M: Lennert Buytenhek <kernel@wantstofly.org> |
970 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 976 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
@@ -1259,7 +1265,6 @@ F: drivers/rtc/rtc-coh901331.c | |||
1259 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git | 1265 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git |
1260 | 1266 | ||
1261 | ARM/Ux500 ARM ARCHITECTURE | 1267 | ARM/Ux500 ARM ARCHITECTURE |
1262 | M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com> | ||
1263 | M: Linus Walleij <linus.walleij@linaro.org> | 1268 | M: Linus Walleij <linus.walleij@linaro.org> |
1264 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | 1269 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) |
1265 | S: Maintained | 1270 | S: Maintained |
@@ -5576,9 +5581,9 @@ S: Maintained | |||
5576 | F: drivers/media/tuners/mxl5007t.* | 5581 | F: drivers/media/tuners/mxl5007t.* |
5577 | 5582 | ||
5578 | MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) | 5583 | MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE) |
5579 | M: Andrew Gallatin <gallatin@myri.com> | 5584 | M: Hyong-Youb Kim <hykim@myri.com> |
5580 | L: netdev@vger.kernel.org | 5585 | L: netdev@vger.kernel.org |
5581 | W: http://www.myri.com/scs/download-Myri10GE.html | 5586 | W: https://www.myricom.com/support/downloads/myri10ge.html |
5582 | S: Supported | 5587 | S: Supported |
5583 | F: drivers/net/ethernet/myricom/myri10ge/ | 5588 | F: drivers/net/ethernet/myricom/myri10ge/ |
5584 | 5589 | ||
@@ -7361,7 +7366,6 @@ F: drivers/net/ethernet/sfc/ | |||
7361 | 7366 | ||
7362 | SGI GRU DRIVER | 7367 | SGI GRU DRIVER |
7363 | M: Dimitri Sivanich <sivanich@sgi.com> | 7368 | M: Dimitri Sivanich <sivanich@sgi.com> |
7364 | M: Robin Holt <holt@sgi.com> | ||
7365 | S: Maintained | 7369 | S: Maintained |
7366 | F: drivers/misc/sgi-gru/ | 7370 | F: drivers/misc/sgi-gru/ |
7367 | 7371 | ||
@@ -7381,7 +7385,8 @@ S: Maintained for 2.6. | |||
7381 | F: Documentation/sgi-visws.txt | 7385 | F: Documentation/sgi-visws.txt |
7382 | 7386 | ||
7383 | SGI XP/XPC/XPNET DRIVER | 7387 | SGI XP/XPC/XPNET DRIVER |
7384 | M: Robin Holt <holt@sgi.com> | 7388 | M: Cliff Whickman <cpw@sgi.com> |
7389 | M: Robin Holt <robinmholt@gmail.com> | ||
7385 | S: Maintained | 7390 | S: Maintained |
7386 | F: drivers/misc/sgi-xp/ | 7391 | F: drivers/misc/sgi-xp/ |
7387 | 7392 | ||
@@ -8664,6 +8669,11 @@ T: git git://git.alsa-project.org/alsa-kernel.git | |||
8664 | S: Maintained | 8669 | S: Maintained |
8665 | F: sound/usb/midi.* | 8670 | F: sound/usb/midi.* |
8666 | 8671 | ||
8672 | USB NETWORKING DRIVERS | ||
8673 | L: linux-usb@vger.kernel.org | ||
8674 | S: Odd Fixes | ||
8675 | F: drivers/net/usb/ | ||
8676 | |||
8667 | USB OHCI DRIVER | 8677 | USB OHCI DRIVER |
8668 | M: Alan Stern <stern@rowland.harvard.edu> | 8678 | M: Alan Stern <stern@rowland.harvard.edu> |
8669 | L: linux-usb@vger.kernel.org | 8679 | L: linux-usb@vger.kernel.org |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 11 | 2 | PATCHLEVEL = 11 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc4 | 4 | EXTRAVERSION = -rc6 |
5 | NAME = Linux for Workgroups | 5 | NAME = Linux for Workgroups |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/Kconfig b/arch/Kconfig index 8d2ae24b9f4a..1feb169274fe 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -407,6 +407,12 @@ config CLONE_BACKWARDS2 | |||
407 | help | 407 | help |
408 | Architecture has the first two arguments of clone(2) swapped. | 408 | Architecture has the first two arguments of clone(2) swapped. |
409 | 409 | ||
410 | config CLONE_BACKWARDS3 | ||
411 | bool | ||
412 | help | ||
413 | Architecture has tls passed as the 3rd argument of clone(2), | ||
414 | not the 5th one. | ||
415 | |||
410 | config ODD_RT_SIGACTION | 416 | config ODD_RT_SIGACTION |
411 | bool | 417 | bool |
412 | help | 418 | help |
diff --git a/arch/arm/boot/dts/msm8960-cdp.dts b/arch/arm/boot/dts/msm8960-cdp.dts index db2060c46540..9c1167b0459b 100644 --- a/arch/arm/boot/dts/msm8960-cdp.dts +++ b/arch/arm/boot/dts/msm8960-cdp.dts | |||
@@ -26,7 +26,7 @@ | |||
26 | cpu-offset = <0x80000>; | 26 | cpu-offset = <0x80000>; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | msmgpio: gpio@fd510000 { | 29 | msmgpio: gpio@800000 { |
30 | compatible = "qcom,msm-gpio"; | 30 | compatible = "qcom,msm-gpio"; |
31 | gpio-controller; | 31 | gpio-controller; |
32 | #gpio-cells = <2>; | 32 | #gpio-cells = <2>; |
@@ -34,7 +34,7 @@ | |||
34 | interrupts = <0 32 0x4>; | 34 | interrupts = <0 32 0x4>; |
35 | interrupt-controller; | 35 | interrupt-controller; |
36 | #interrupt-cells = <2>; | 36 | #interrupt-cells = <2>; |
37 | reg = <0xfd510000 0x4000>; | 37 | reg = <0x800000 0x4000>; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | serial@16440000 { | 40 | serial@16440000 { |
diff --git a/arch/arm/boot/dts/omap5-uevm.dts b/arch/arm/boot/dts/omap5-uevm.dts index 08b72678abff..65d7b601651c 100644 --- a/arch/arm/boot/dts/omap5-uevm.dts +++ b/arch/arm/boot/dts/omap5-uevm.dts | |||
@@ -235,7 +235,7 @@ | |||
235 | }; | 235 | }; |
236 | 236 | ||
237 | &mmc1 { | 237 | &mmc1 { |
238 | vmmc-supply = <&vmmcsd_fixed>; | 238 | vmmc-supply = <&ldo9_reg>; |
239 | bus-width = <4>; | 239 | bus-width = <4>; |
240 | }; | 240 | }; |
241 | 241 | ||
@@ -282,6 +282,7 @@ | |||
282 | 282 | ||
283 | regulators { | 283 | regulators { |
284 | smps123_reg: smps123 { | 284 | smps123_reg: smps123 { |
285 | /* VDD_OPP_MPU */ | ||
285 | regulator-name = "smps123"; | 286 | regulator-name = "smps123"; |
286 | regulator-min-microvolt = < 600000>; | 287 | regulator-min-microvolt = < 600000>; |
287 | regulator-max-microvolt = <1500000>; | 288 | regulator-max-microvolt = <1500000>; |
@@ -290,6 +291,7 @@ | |||
290 | }; | 291 | }; |
291 | 292 | ||
292 | smps45_reg: smps45 { | 293 | smps45_reg: smps45 { |
294 | /* VDD_OPP_MM */ | ||
293 | regulator-name = "smps45"; | 295 | regulator-name = "smps45"; |
294 | regulator-min-microvolt = < 600000>; | 296 | regulator-min-microvolt = < 600000>; |
295 | regulator-max-microvolt = <1310000>; | 297 | regulator-max-microvolt = <1310000>; |
@@ -298,6 +300,7 @@ | |||
298 | }; | 300 | }; |
299 | 301 | ||
300 | smps6_reg: smps6 { | 302 | smps6_reg: smps6 { |
303 | /* VDD_DDR3 - over VDD_SMPS6 */ | ||
301 | regulator-name = "smps6"; | 304 | regulator-name = "smps6"; |
302 | regulator-min-microvolt = <1200000>; | 305 | regulator-min-microvolt = <1200000>; |
303 | regulator-max-microvolt = <1200000>; | 306 | regulator-max-microvolt = <1200000>; |
@@ -306,6 +309,7 @@ | |||
306 | }; | 309 | }; |
307 | 310 | ||
308 | smps7_reg: smps7 { | 311 | smps7_reg: smps7 { |
312 | /* VDDS_1v8_OMAP over VDDS_1v8_MAIN */ | ||
309 | regulator-name = "smps7"; | 313 | regulator-name = "smps7"; |
310 | regulator-min-microvolt = <1800000>; | 314 | regulator-min-microvolt = <1800000>; |
311 | regulator-max-microvolt = <1800000>; | 315 | regulator-max-microvolt = <1800000>; |
@@ -314,6 +318,7 @@ | |||
314 | }; | 318 | }; |
315 | 319 | ||
316 | smps8_reg: smps8 { | 320 | smps8_reg: smps8 { |
321 | /* VDD_OPP_CORE */ | ||
317 | regulator-name = "smps8"; | 322 | regulator-name = "smps8"; |
318 | regulator-min-microvolt = < 600000>; | 323 | regulator-min-microvolt = < 600000>; |
319 | regulator-max-microvolt = <1310000>; | 324 | regulator-max-microvolt = <1310000>; |
@@ -322,15 +327,15 @@ | |||
322 | }; | 327 | }; |
323 | 328 | ||
324 | smps9_reg: smps9 { | 329 | smps9_reg: smps9 { |
330 | /* VDDA_2v1_AUD over VDD_2v1 */ | ||
325 | regulator-name = "smps9"; | 331 | regulator-name = "smps9"; |
326 | regulator-min-microvolt = <2100000>; | 332 | regulator-min-microvolt = <2100000>; |
327 | regulator-max-microvolt = <2100000>; | 333 | regulator-max-microvolt = <2100000>; |
328 | regulator-always-on; | ||
329 | regulator-boot-on; | ||
330 | ti,smps-range = <0x80>; | 334 | ti,smps-range = <0x80>; |
331 | }; | 335 | }; |
332 | 336 | ||
333 | smps10_reg: smps10 { | 337 | smps10_reg: smps10 { |
338 | /* VBUS_5V_OTG */ | ||
334 | regulator-name = "smps10"; | 339 | regulator-name = "smps10"; |
335 | regulator-min-microvolt = <5000000>; | 340 | regulator-min-microvolt = <5000000>; |
336 | regulator-max-microvolt = <5000000>; | 341 | regulator-max-microvolt = <5000000>; |
@@ -339,38 +344,40 @@ | |||
339 | }; | 344 | }; |
340 | 345 | ||
341 | ldo1_reg: ldo1 { | 346 | ldo1_reg: ldo1 { |
347 | /* VDDAPHY_CAM: vdda_csiport */ | ||
342 | regulator-name = "ldo1"; | 348 | regulator-name = "ldo1"; |
343 | regulator-min-microvolt = <2800000>; | 349 | regulator-min-microvolt = <1500000>; |
344 | regulator-max-microvolt = <2800000>; | 350 | regulator-max-microvolt = <1800000>; |
345 | regulator-always-on; | ||
346 | regulator-boot-on; | ||
347 | }; | 351 | }; |
348 | 352 | ||
349 | ldo2_reg: ldo2 { | 353 | ldo2_reg: ldo2 { |
354 | /* VCC_2V8_DISP: Does not go anywhere */ | ||
350 | regulator-name = "ldo2"; | 355 | regulator-name = "ldo2"; |
351 | regulator-min-microvolt = <2900000>; | 356 | regulator-min-microvolt = <2800000>; |
352 | regulator-max-microvolt = <2900000>; | 357 | regulator-max-microvolt = <2800000>; |
353 | regulator-always-on; | 358 | /* Unused */ |
354 | regulator-boot-on; | 359 | status = "disabled"; |
355 | }; | 360 | }; |
356 | 361 | ||
357 | ldo3_reg: ldo3 { | 362 | ldo3_reg: ldo3 { |
363 | /* VDDAPHY_MDM: vdda_lli */ | ||
358 | regulator-name = "ldo3"; | 364 | regulator-name = "ldo3"; |
359 | regulator-min-microvolt = <3000000>; | 365 | regulator-min-microvolt = <1500000>; |
360 | regulator-max-microvolt = <3000000>; | 366 | regulator-max-microvolt = <1500000>; |
361 | regulator-always-on; | ||
362 | regulator-boot-on; | 367 | regulator-boot-on; |
368 | /* Only if Modem is used */ | ||
369 | status = "disabled"; | ||
363 | }; | 370 | }; |
364 | 371 | ||
365 | ldo4_reg: ldo4 { | 372 | ldo4_reg: ldo4 { |
373 | /* VDDAPHY_DISP: vdda_dsiport/hdmi */ | ||
366 | regulator-name = "ldo4"; | 374 | regulator-name = "ldo4"; |
367 | regulator-min-microvolt = <2200000>; | 375 | regulator-min-microvolt = <1500000>; |
368 | regulator-max-microvolt = <2200000>; | 376 | regulator-max-microvolt = <1800000>; |
369 | regulator-always-on; | ||
370 | regulator-boot-on; | ||
371 | }; | 377 | }; |
372 | 378 | ||
373 | ldo5_reg: ldo5 { | 379 | ldo5_reg: ldo5 { |
380 | /* VDDA_1V8_PHY: usb/sata/hdmi.. */ | ||
374 | regulator-name = "ldo5"; | 381 | regulator-name = "ldo5"; |
375 | regulator-min-microvolt = <1800000>; | 382 | regulator-min-microvolt = <1800000>; |
376 | regulator-max-microvolt = <1800000>; | 383 | regulator-max-microvolt = <1800000>; |
@@ -379,38 +386,43 @@ | |||
379 | }; | 386 | }; |
380 | 387 | ||
381 | ldo6_reg: ldo6 { | 388 | ldo6_reg: ldo6 { |
389 | /* VDDS_1V2_WKUP: hsic/ldo_emu_wkup */ | ||
382 | regulator-name = "ldo6"; | 390 | regulator-name = "ldo6"; |
383 | regulator-min-microvolt = <1500000>; | 391 | regulator-min-microvolt = <1200000>; |
384 | regulator-max-microvolt = <1500000>; | 392 | regulator-max-microvolt = <1200000>; |
385 | regulator-always-on; | 393 | regulator-always-on; |
386 | regulator-boot-on; | 394 | regulator-boot-on; |
387 | }; | 395 | }; |
388 | 396 | ||
389 | ldo7_reg: ldo7 { | 397 | ldo7_reg: ldo7 { |
398 | /* VDD_VPP: vpp1 */ | ||
390 | regulator-name = "ldo7"; | 399 | regulator-name = "ldo7"; |
391 | regulator-min-microvolt = <1500000>; | 400 | regulator-min-microvolt = <2000000>; |
392 | regulator-max-microvolt = <1500000>; | 401 | regulator-max-microvolt = <2000000>; |
393 | regulator-always-on; | 402 | /* Only for efuse reprograming! */ |
394 | regulator-boot-on; | 403 | status = "disabled"; |
395 | }; | 404 | }; |
396 | 405 | ||
397 | ldo8_reg: ldo8 { | 406 | ldo8_reg: ldo8 { |
407 | /* VDD_3v0: Does not go anywhere */ | ||
398 | regulator-name = "ldo8"; | 408 | regulator-name = "ldo8"; |
399 | regulator-min-microvolt = <1500000>; | 409 | regulator-min-microvolt = <3000000>; |
400 | regulator-max-microvolt = <1500000>; | 410 | regulator-max-microvolt = <3000000>; |
401 | regulator-always-on; | ||
402 | regulator-boot-on; | 411 | regulator-boot-on; |
412 | /* Unused */ | ||
413 | status = "disabled"; | ||
403 | }; | 414 | }; |
404 | 415 | ||
405 | ldo9_reg: ldo9 { | 416 | ldo9_reg: ldo9 { |
417 | /* VCC_DV_SDIO: vdds_sdcard */ | ||
406 | regulator-name = "ldo9"; | 418 | regulator-name = "ldo9"; |
407 | regulator-min-microvolt = <1800000>; | 419 | regulator-min-microvolt = <1800000>; |
408 | regulator-max-microvolt = <3300000>; | 420 | regulator-max-microvolt = <3000000>; |
409 | regulator-always-on; | ||
410 | regulator-boot-on; | 421 | regulator-boot-on; |
411 | }; | 422 | }; |
412 | 423 | ||
413 | ldoln_reg: ldoln { | 424 | ldoln_reg: ldoln { |
425 | /* VDDA_1v8_REF: vdds_osc/mm_l4per.. */ | ||
414 | regulator-name = "ldoln"; | 426 | regulator-name = "ldoln"; |
415 | regulator-min-microvolt = <1800000>; | 427 | regulator-min-microvolt = <1800000>; |
416 | regulator-max-microvolt = <1800000>; | 428 | regulator-max-microvolt = <1800000>; |
@@ -419,12 +431,20 @@ | |||
419 | }; | 431 | }; |
420 | 432 | ||
421 | ldousb_reg: ldousb { | 433 | ldousb_reg: ldousb { |
434 | /* VDDA_3V_USB: VDDA_USBHS33 */ | ||
422 | regulator-name = "ldousb"; | 435 | regulator-name = "ldousb"; |
423 | regulator-min-microvolt = <3250000>; | 436 | regulator-min-microvolt = <3250000>; |
424 | regulator-max-microvolt = <3250000>; | 437 | regulator-max-microvolt = <3250000>; |
425 | regulator-always-on; | 438 | regulator-always-on; |
426 | regulator-boot-on; | 439 | regulator-boot-on; |
427 | }; | 440 | }; |
441 | |||
442 | regen3_reg: regen3 { | ||
443 | /* REGEN3 controls LDO9 supply to card */ | ||
444 | regulator-name = "regen3"; | ||
445 | regulator-always-on; | ||
446 | regulator-boot-on; | ||
447 | }; | ||
428 | }; | 448 | }; |
429 | }; | 449 | }; |
430 | }; | 450 | }; |
diff --git a/arch/arm/boot/dts/stih41x.dtsi b/arch/arm/boot/dts/stih41x.dtsi index 7321403cab8a..f5b9898d9c6e 100644 --- a/arch/arm/boot/dts/stih41x.dtsi +++ b/arch/arm/boot/dts/stih41x.dtsi | |||
@@ -6,10 +6,12 @@ | |||
6 | #address-cells = <1>; | 6 | #address-cells = <1>; |
7 | #size-cells = <0>; | 7 | #size-cells = <0>; |
8 | cpu@0 { | 8 | cpu@0 { |
9 | device_type = "cpu"; | ||
9 | compatible = "arm,cortex-a9"; | 10 | compatible = "arm,cortex-a9"; |
10 | reg = <0>; | 11 | reg = <0>; |
11 | }; | 12 | }; |
12 | cpu@1 { | 13 | cpu@1 { |
14 | device_type = "cpu"; | ||
13 | compatible = "arm,cortex-a9"; | 15 | compatible = "arm,cortex-a9"; |
14 | reg = <1>; | 16 | reg = <1>; |
15 | }; | 17 | }; |
diff --git a/arch/arm/boot/dts/tegra20-colibri-512.dtsi b/arch/arm/boot/dts/tegra20-colibri-512.dtsi index 2fcb3f2ca160..5592be6f2f7a 100644 --- a/arch/arm/boot/dts/tegra20-colibri-512.dtsi +++ b/arch/arm/boot/dts/tegra20-colibri-512.dtsi | |||
@@ -457,6 +457,7 @@ | |||
457 | }; | 457 | }; |
458 | 458 | ||
459 | usb-phy@c5004000 { | 459 | usb-phy@c5004000 { |
460 | status = "okay"; | ||
460 | nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1) | 461 | nvidia,phy-reset-gpio = <&gpio TEGRA_GPIO(V, 1) |
461 | GPIO_ACTIVE_LOW>; | 462 | GPIO_ACTIVE_LOW>; |
462 | }; | 463 | }; |
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 6462a721ebd4..a252c0bfacf5 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h | |||
@@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void) | |||
88 | { | 88 | { |
89 | return 1 << mpidr_hash.bits; | 89 | return 1 << mpidr_hash.bits; |
90 | } | 90 | } |
91 | |||
92 | extern int platform_can_cpu_hotplug(void); | ||
93 | |||
91 | #endif | 94 | #endif |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index f8b8965666e9..b07c09e5a0ac 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
107 | " subs %1, %0, %0, ror #16\n" | 107 | " subs %1, %0, %0, ror #16\n" |
108 | " addeq %0, %0, %4\n" | 108 | " addeq %0, %0, %4\n" |
109 | " strexeq %2, %0, [%3]" | 109 | " strexeq %2, %0, [%3]" |
110 | : "=&r" (slock), "=&r" (contended), "=r" (res) | 110 | : "=&r" (slock), "=&r" (contended), "=&r" (res) |
111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | 111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) |
112 | : "cc"); | 112 | : "cc"); |
113 | } while (res); | 113 | } while (res); |
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
168 | 168 | ||
169 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 169 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
170 | { | 170 | { |
171 | unsigned long tmp; | 171 | unsigned long contended, res; |
172 | 172 | ||
173 | __asm__ __volatile__( | 173 | do { |
174 | " ldrex %0, [%1]\n" | 174 | __asm__ __volatile__( |
175 | " teq %0, #0\n" | 175 | " ldrex %0, [%2]\n" |
176 | " strexeq %0, %2, [%1]" | 176 | " mov %1, #0\n" |
177 | : "=&r" (tmp) | 177 | " teq %0, #0\n" |
178 | : "r" (&rw->lock), "r" (0x80000000) | 178 | " strexeq %1, %3, [%2]" |
179 | : "cc"); | 179 | : "=&r" (contended), "=&r" (res) |
180 | : "r" (&rw->lock), "r" (0x80000000) | ||
181 | : "cc"); | ||
182 | } while (res); | ||
180 | 183 | ||
181 | if (tmp == 0) { | 184 | if (!contended) { |
182 | smp_mb(); | 185 | smp_mb(); |
183 | return 1; | 186 | return 1; |
184 | } else { | 187 | } else { |
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
254 | 257 | ||
255 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 258 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
256 | { | 259 | { |
257 | unsigned long tmp, tmp2 = 1; | 260 | unsigned long contended, res; |
258 | 261 | ||
259 | __asm__ __volatile__( | 262 | do { |
260 | " ldrex %0, [%2]\n" | 263 | __asm__ __volatile__( |
261 | " adds %0, %0, #1\n" | 264 | " ldrex %0, [%2]\n" |
262 | " strexpl %1, %0, [%2]\n" | 265 | " mov %1, #0\n" |
263 | : "=&r" (tmp), "+r" (tmp2) | 266 | " adds %0, %0, #1\n" |
264 | : "r" (&rw->lock) | 267 | " strexpl %1, %0, [%2]" |
265 | : "cc"); | 268 | : "=&r" (contended), "=&r" (res) |
269 | : "r" (&rw->lock) | ||
270 | : "cc"); | ||
271 | } while (res); | ||
266 | 272 | ||
267 | smp_mb(); | 273 | /* If the lock is negative, then it is already held for write. */ |
268 | return tmp2 == 0; | 274 | if (contended < 0x80000000) { |
275 | smp_mb(); | ||
276 | return 1; | ||
277 | } else { | ||
278 | return 0; | ||
279 | } | ||
269 | } | 280 | } |
270 | 281 | ||
271 | /* read_can_lock - would read_trylock() succeed? */ | 282 | /* read_can_lock - would read_trylock() succeed? */ |
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 46e7cfb3e721..0baf7f0d9394 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -43,6 +43,7 @@ struct mmu_gather { | |||
43 | struct mm_struct *mm; | 43 | struct mm_struct *mm; |
44 | unsigned int fullmm; | 44 | unsigned int fullmm; |
45 | struct vm_area_struct *vma; | 45 | struct vm_area_struct *vma; |
46 | unsigned long start, end; | ||
46 | unsigned long range_start; | 47 | unsigned long range_start; |
47 | unsigned long range_end; | 48 | unsigned long range_end; |
48 | unsigned int nr; | 49 | unsigned int nr; |
@@ -107,10 +108,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) | |||
107 | } | 108 | } |
108 | 109 | ||
109 | static inline void | 110 | static inline void |
110 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) | 111 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
111 | { | 112 | { |
112 | tlb->mm = mm; | 113 | tlb->mm = mm; |
113 | tlb->fullmm = fullmm; | 114 | tlb->fullmm = !(start | (end+1)); |
115 | tlb->start = start; | ||
116 | tlb->end = end; | ||
114 | tlb->vma = NULL; | 117 | tlb->vma = NULL; |
115 | tlb->max = ARRAY_SIZE(tlb->local); | 118 | tlb->max = ARRAY_SIZE(tlb->local); |
116 | tlb->pages = tlb->local; | 119 | tlb->pages = tlb->local; |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d40d0ef389db..9cbe70c8b0ef 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -357,7 +357,8 @@ ENDPROC(__pabt_svc) | |||
357 | .endm | 357 | .endm |
358 | 358 | ||
359 | .macro kuser_cmpxchg_check | 359 | .macro kuser_cmpxchg_check |
360 | #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | 360 | #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ |
361 | !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | ||
361 | #ifndef CONFIG_MMU | 362 | #ifndef CONFIG_MMU |
362 | #warning "NPTL on non MMU needs fixing" | 363 | #warning "NPTL on non MMU needs fixing" |
363 | #else | 364 | #else |
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 25442f451148..fc7920288a3d 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c | |||
@@ -84,17 +84,13 @@ int show_fiq_list(struct seq_file *p, int prec) | |||
84 | 84 | ||
85 | void set_fiq_handler(void *start, unsigned int length) | 85 | void set_fiq_handler(void *start, unsigned int length) |
86 | { | 86 | { |
87 | #if defined(CONFIG_CPU_USE_DOMAINS) | ||
88 | void *base = (void *)0xffff0000; | ||
89 | #else | ||
90 | void *base = vectors_page; | 87 | void *base = vectors_page; |
91 | #endif | ||
92 | unsigned offset = FIQ_OFFSET; | 88 | unsigned offset = FIQ_OFFSET; |
93 | 89 | ||
94 | memcpy(base + offset, start, length); | 90 | memcpy(base + offset, start, length); |
91 | if (!cache_is_vipt_nonaliasing()) | ||
92 | flush_icache_range(base + offset, offset + length); | ||
95 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); | 93 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); |
96 | if (!vectors_high()) | ||
97 | flush_icache_range(offset, offset + length); | ||
98 | } | 94 | } |
99 | 95 | ||
100 | int claim_fiq(struct fiq_handler *f) | 96 | int claim_fiq(struct fiq_handler *f) |
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 4fb074c446bf..d7c82df69243 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
17 | #include <asm/mach-types.h> | 17 | #include <asm/mach-types.h> |
18 | #include <asm/smp_plat.h> | ||
18 | #include <asm/system_misc.h> | 19 | #include <asm/system_misc.h> |
19 | 20 | ||
20 | extern const unsigned char relocate_new_kernel[]; | 21 | extern const unsigned char relocate_new_kernel[]; |
@@ -39,6 +40,14 @@ int machine_kexec_prepare(struct kimage *image) | |||
39 | int i, err; | 40 | int i, err; |
40 | 41 | ||
41 | /* | 42 | /* |
43 | * Validate that if the current HW supports SMP, then the SW supports | ||
44 | * and implements CPU hotplug for the current HW. If not, we won't be | ||
45 | * able to kexec reliably, so fail the prepare operation. | ||
46 | */ | ||
47 | if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) | ||
48 | return -EINVAL; | ||
49 | |||
50 | /* | ||
42 | * No segment at default ATAGs address. try to locate | 51 | * No segment at default ATAGs address. try to locate |
43 | * a dtb using magic. | 52 | * a dtb using magic. |
44 | */ | 53 | */ |
@@ -134,10 +143,13 @@ void machine_kexec(struct kimage *image) | |||
134 | unsigned long reboot_code_buffer_phys; | 143 | unsigned long reboot_code_buffer_phys; |
135 | void *reboot_code_buffer; | 144 | void *reboot_code_buffer; |
136 | 145 | ||
137 | if (num_online_cpus() > 1) { | 146 | /* |
138 | pr_err("kexec: error: multiple CPUs still online\n"); | 147 | * This can only happen if machine_shutdown() failed to disable some |
139 | return; | 148 | * CPU, and that can only happen if the checks in |
140 | } | 149 | * machine_kexec_prepare() were not correct. If this fails, we can't |
150 | * reliably kexec anyway, so BUG_ON is appropriate. | ||
151 | */ | ||
152 | BUG_ON(num_online_cpus() > 1); | ||
141 | 153 | ||
142 | page_list = image->head & PAGE_MASK; | 154 | page_list = image->head & PAGE_MASK; |
143 | 155 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d9f5cd4e533f..e186ee1e63f6 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -53,7 +53,12 @@ armpmu_map_cache_event(const unsigned (*cache_map) | |||
53 | static int | 53 | static int |
54 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) | 54 | armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
55 | { | 55 | { |
56 | int mapping = (*event_map)[config]; | 56 | int mapping; |
57 | |||
58 | if (config >= PERF_COUNT_HW_MAX) | ||
59 | return -EINVAL; | ||
60 | |||
61 | mapping = (*event_map)[config]; | ||
57 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; | 62 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
58 | } | 63 | } |
59 | 64 | ||
@@ -253,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events, | |||
253 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 258 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
254 | struct pmu *leader_pmu = event->group_leader->pmu; | 259 | struct pmu *leader_pmu = event->group_leader->pmu; |
255 | 260 | ||
261 | if (is_software_event(event)) | ||
262 | return 1; | ||
263 | |||
256 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) | 264 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) |
257 | return 1; | 265 | return 1; |
258 | 266 | ||
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 536c85fe72a8..94f6b05f9e24 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -462,7 +462,7 @@ int in_gate_area_no_mm(unsigned long addr) | |||
462 | { | 462 | { |
463 | return in_gate_area(NULL, addr); | 463 | return in_gate_area(NULL, addr); |
464 | } | 464 | } |
465 | #define is_gate_vma(vma) ((vma) = &gate_vma) | 465 | #define is_gate_vma(vma) ((vma) == &gate_vma) |
466 | #else | 466 | #else |
467 | #define is_gate_vma(vma) 0 | 467 | #define is_gate_vma(vma) 0 |
468 | #endif | 468 | #endif |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index c2b4f8f0be9a..2dc19349eb19 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle) | |||
145 | return -ENOSYS; | 145 | return -ENOSYS; |
146 | } | 146 | } |
147 | 147 | ||
148 | int platform_can_cpu_hotplug(void) | ||
149 | { | ||
150 | #ifdef CONFIG_HOTPLUG_CPU | ||
151 | if (smp_ops.cpu_kill) | ||
152 | return 1; | ||
153 | #endif | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
148 | #ifdef CONFIG_HOTPLUG_CPU | 158 | #ifdef CONFIG_HOTPLUG_CPU |
149 | static void percpu_timer_stop(void); | 159 | static void percpu_timer_stop(void); |
150 | 160 | ||
diff --git a/arch/arm/mach-msm/Kconfig b/arch/arm/mach-msm/Kconfig index 614e41e7881b..905efc8cac79 100644 --- a/arch/arm/mach-msm/Kconfig +++ b/arch/arm/mach-msm/Kconfig | |||
@@ -121,8 +121,7 @@ config MSM_SMD | |||
121 | bool | 121 | bool |
122 | 122 | ||
123 | config MSM_GPIOMUX | 123 | config MSM_GPIOMUX |
124 | depends on !(ARCH_MSM8X60 || ARCH_MSM8960) | 124 | bool |
125 | bool "MSM V1 TLMM GPIOMUX architecture" | ||
126 | help | 125 | help |
127 | Support for MSM V1 TLMM GPIOMUX architecture. | 126 | Support for MSM V1 TLMM GPIOMUX architecture. |
128 | 127 | ||
diff --git a/arch/arm/mach-msm/gpiomux-v1.c b/arch/arm/mach-msm/gpiomux-v1.c deleted file mode 100644 index 27de2abd7144..000000000000 --- a/arch/arm/mach-msm/gpiomux-v1.c +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | /* Copyright (c) 2010, Code Aurora Forum. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA | ||
15 | * 02110-1301, USA. | ||
16 | */ | ||
17 | #include <linux/kernel.h> | ||
18 | #include "gpiomux.h" | ||
19 | #include "proc_comm.h" | ||
20 | |||
21 | void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val) | ||
22 | { | ||
23 | unsigned tlmm_config = (val & ~GPIOMUX_CTL_MASK) | | ||
24 | ((gpio & 0x3ff) << 4); | ||
25 | unsigned tlmm_disable = 0; | ||
26 | int rc; | ||
27 | |||
28 | rc = msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, | ||
29 | &tlmm_config, &tlmm_disable); | ||
30 | if (rc) | ||
31 | pr_err("%s: unexpected proc_comm failure %d: %08x %08x\n", | ||
32 | __func__, rc, tlmm_config, tlmm_disable); | ||
33 | } | ||
diff --git a/arch/arm/mach-msm/gpiomux.h b/arch/arm/mach-msm/gpiomux.h index 8e82f41a8923..4410d7766f93 100644 --- a/arch/arm/mach-msm/gpiomux.h +++ b/arch/arm/mach-msm/gpiomux.h | |||
@@ -73,16 +73,6 @@ extern struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS]; | |||
73 | int msm_gpiomux_write(unsigned gpio, | 73 | int msm_gpiomux_write(unsigned gpio, |
74 | gpiomux_config_t active, | 74 | gpiomux_config_t active, |
75 | gpiomux_config_t suspended); | 75 | gpiomux_config_t suspended); |
76 | |||
77 | /* Architecture-internal function for use by the framework only. | ||
78 | * This function can assume the following: | ||
79 | * - the gpio value has passed a bounds-check | ||
80 | * - the gpiomux spinlock has been obtained | ||
81 | * | ||
82 | * This function is not for public consumption. External users | ||
83 | * should use msm_gpiomux_write. | ||
84 | */ | ||
85 | void __msm_gpiomux_write(unsigned gpio, gpiomux_config_t val); | ||
86 | #else | 76 | #else |
87 | static inline int msm_gpiomux_write(unsigned gpio, | 77 | static inline int msm_gpiomux_write(unsigned gpio, |
88 | gpiomux_config_t active, | 78 | gpiomux_config_t active, |
diff --git a/arch/arm/mach-omap2/dss-common.c b/arch/arm/mach-omap2/dss-common.c index 393aeefaebb0..043e5705f2a6 100644 --- a/arch/arm/mach-omap2/dss-common.c +++ b/arch/arm/mach-omap2/dss-common.c | |||
@@ -42,7 +42,7 @@ | |||
42 | 42 | ||
43 | /* Using generic display panel */ | 43 | /* Using generic display panel */ |
44 | static struct tfp410_platform_data omap4_dvi_panel = { | 44 | static struct tfp410_platform_data omap4_dvi_panel = { |
45 | .i2c_bus_num = 3, | 45 | .i2c_bus_num = 2, |
46 | .power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO, | 46 | .power_down_gpio = PANDA_DVI_TFP410_POWER_DOWN_GPIO, |
47 | }; | 47 | }; |
48 | 48 | ||
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 5cc92874be7e..f99f68e1e85b 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c | |||
@@ -129,6 +129,7 @@ static int omap_device_build_from_dt(struct platform_device *pdev) | |||
129 | struct device_node *node = pdev->dev.of_node; | 129 | struct device_node *node = pdev->dev.of_node; |
130 | const char *oh_name; | 130 | const char *oh_name; |
131 | int oh_cnt, i, ret = 0; | 131 | int oh_cnt, i, ret = 0; |
132 | bool device_active = false; | ||
132 | 133 | ||
133 | oh_cnt = of_property_count_strings(node, "ti,hwmods"); | 134 | oh_cnt = of_property_count_strings(node, "ti,hwmods"); |
134 | if (oh_cnt <= 0) { | 135 | if (oh_cnt <= 0) { |
@@ -152,6 +153,8 @@ static int omap_device_build_from_dt(struct platform_device *pdev) | |||
152 | goto odbfd_exit1; | 153 | goto odbfd_exit1; |
153 | } | 154 | } |
154 | hwmods[i] = oh; | 155 | hwmods[i] = oh; |
156 | if (oh->flags & HWMOD_INIT_NO_IDLE) | ||
157 | device_active = true; | ||
155 | } | 158 | } |
156 | 159 | ||
157 | od = omap_device_alloc(pdev, hwmods, oh_cnt); | 160 | od = omap_device_alloc(pdev, hwmods, oh_cnt); |
@@ -172,6 +175,11 @@ static int omap_device_build_from_dt(struct platform_device *pdev) | |||
172 | 175 | ||
173 | pdev->dev.pm_domain = &omap_device_pm_domain; | 176 | pdev->dev.pm_domain = &omap_device_pm_domain; |
174 | 177 | ||
178 | if (device_active) { | ||
179 | omap_device_enable(pdev); | ||
180 | pm_runtime_set_active(&pdev->dev); | ||
181 | } | ||
182 | |||
175 | odbfd_exit1: | 183 | odbfd_exit1: |
176 | kfree(hwmods); | 184 | kfree(hwmods); |
177 | odbfd_exit: | 185 | odbfd_exit: |
@@ -842,6 +850,7 @@ static int __init omap_device_late_idle(struct device *dev, void *data) | |||
842 | { | 850 | { |
843 | struct platform_device *pdev = to_platform_device(dev); | 851 | struct platform_device *pdev = to_platform_device(dev); |
844 | struct omap_device *od = to_omap_device(pdev); | 852 | struct omap_device *od = to_omap_device(pdev); |
853 | int i; | ||
845 | 854 | ||
846 | if (!od) | 855 | if (!od) |
847 | return 0; | 856 | return 0; |
@@ -850,6 +859,15 @@ static int __init omap_device_late_idle(struct device *dev, void *data) | |||
850 | * If omap_device state is enabled, but has no driver bound, | 859 | * If omap_device state is enabled, but has no driver bound, |
851 | * idle it. | 860 | * idle it. |
852 | */ | 861 | */ |
862 | |||
863 | /* | ||
864 | * Some devices (like memory controllers) are always kept | ||
865 | * enabled, and should not be idled even with no drivers. | ||
866 | */ | ||
867 | for (i = 0; i < od->hwmods_cnt; i++) | ||
868 | if (od->hwmods[i]->flags & HWMOD_INIT_NO_IDLE) | ||
869 | return 0; | ||
870 | |||
853 | if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { | 871 | if (od->_driver_status != BUS_NOTIFY_BOUND_DRIVER) { |
854 | if (od->_state == OMAP_DEVICE_STATE_ENABLED) { | 872 | if (od->_state == OMAP_DEVICE_STATE_ENABLED) { |
855 | dev_warn(dev, "%s: enabled but no driver. Idling\n", | 873 | dev_warn(dev, "%s: enabled but no driver. Idling\n", |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 7341eff63f56..7f4db12b1459 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -2386,7 +2386,7 @@ static void __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data) | |||
2386 | 2386 | ||
2387 | np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); | 2387 | np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh); |
2388 | if (np) | 2388 | if (np) |
2389 | va_start = of_iomap(np, 0); | 2389 | va_start = of_iomap(np, oh->mpu_rt_idx); |
2390 | } else { | 2390 | } else { |
2391 | va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); | 2391 | va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start); |
2392 | } | 2392 | } |
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h index aab33fd814c0..e1482a9b3bc2 100644 --- a/arch/arm/mach-omap2/omap_hwmod.h +++ b/arch/arm/mach-omap2/omap_hwmod.h | |||
@@ -95,6 +95,54 @@ extern struct omap_hwmod_sysc_fields omap_hwmod_sysc_type3; | |||
95 | #define MODULEMODE_HWCTRL 1 | 95 | #define MODULEMODE_HWCTRL 1 |
96 | #define MODULEMODE_SWCTRL 2 | 96 | #define MODULEMODE_SWCTRL 2 |
97 | 97 | ||
98 | #define DEBUG_OMAP2UART1_FLAGS 0 | ||
99 | #define DEBUG_OMAP2UART2_FLAGS 0 | ||
100 | #define DEBUG_OMAP2UART3_FLAGS 0 | ||
101 | #define DEBUG_OMAP3UART3_FLAGS 0 | ||
102 | #define DEBUG_OMAP3UART4_FLAGS 0 | ||
103 | #define DEBUG_OMAP4UART3_FLAGS 0 | ||
104 | #define DEBUG_OMAP4UART4_FLAGS 0 | ||
105 | #define DEBUG_TI81XXUART1_FLAGS 0 | ||
106 | #define DEBUG_TI81XXUART2_FLAGS 0 | ||
107 | #define DEBUG_TI81XXUART3_FLAGS 0 | ||
108 | #define DEBUG_AM33XXUART1_FLAGS 0 | ||
109 | |||
110 | #define DEBUG_OMAPUART_FLAGS (HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET) | ||
111 | |||
112 | #if defined(CONFIG_DEBUG_OMAP2UART1) | ||
113 | #undef DEBUG_OMAP2UART1_FLAGS | ||
114 | #define DEBUG_OMAP2UART1_FLAGS DEBUG_OMAPUART_FLAGS | ||
115 | #elif defined(CONFIG_DEBUG_OMAP2UART2) | ||
116 | #undef DEBUG_OMAP2UART2_FLAGS | ||
117 | #define DEBUG_OMAP2UART2_FLAGS DEBUG_OMAPUART_FLAGS | ||
118 | #elif defined(CONFIG_DEBUG_OMAP2UART3) | ||
119 | #undef DEBUG_OMAP2UART3_FLAGS | ||
120 | #define DEBUG_OMAP2UART3_FLAGS DEBUG_OMAPUART_FLAGS | ||
121 | #elif defined(CONFIG_DEBUG_OMAP3UART3) | ||
122 | #undef DEBUG_OMAP3UART3_FLAGS | ||
123 | #define DEBUG_OMAP3UART3_FLAGS DEBUG_OMAPUART_FLAGS | ||
124 | #elif defined(CONFIG_DEBUG_OMAP3UART4) | ||
125 | #undef DEBUG_OMAP3UART4_FLAGS | ||
126 | #define DEBUG_OMAP3UART4_FLAGS DEBUG_OMAPUART_FLAGS | ||
127 | #elif defined(CONFIG_DEBUG_OMAP4UART3) | ||
128 | #undef DEBUG_OMAP4UART3_FLAGS | ||
129 | #define DEBUG_OMAP4UART3_FLAGS DEBUG_OMAPUART_FLAGS | ||
130 | #elif defined(CONFIG_DEBUG_OMAP4UART4) | ||
131 | #undef DEBUG_OMAP4UART4_FLAGS | ||
132 | #define DEBUG_OMAP4UART4_FLAGS DEBUG_OMAPUART_FLAGS | ||
133 | #elif defined(CONFIG_DEBUG_TI81XXUART1) | ||
134 | #undef DEBUG_TI81XXUART1_FLAGS | ||
135 | #define DEBUG_TI81XXUART1_FLAGS DEBUG_OMAPUART_FLAGS | ||
136 | #elif defined(CONFIG_DEBUG_TI81XXUART2) | ||
137 | #undef DEBUG_TI81XXUART2_FLAGS | ||
138 | #define DEBUG_TI81XXUART2_FLAGS DEBUG_OMAPUART_FLAGS | ||
139 | #elif defined(CONFIG_DEBUG_TI81XXUART3) | ||
140 | #undef DEBUG_TI81XXUART3_FLAGS | ||
141 | #define DEBUG_TI81XXUART3_FLAGS DEBUG_OMAPUART_FLAGS | ||
142 | #elif defined(CONFIG_DEBUG_AM33XXUART1) | ||
143 | #undef DEBUG_AM33XXUART1_FLAGS | ||
144 | #define DEBUG_AM33XXUART1_FLAGS DEBUG_OMAPUART_FLAGS | ||
145 | #endif | ||
98 | 146 | ||
99 | /** | 147 | /** |
100 | * struct omap_hwmod_mux_info - hwmod specific mux configuration | 148 | * struct omap_hwmod_mux_info - hwmod specific mux configuration |
@@ -568,6 +616,7 @@ struct omap_hwmod_link { | |||
568 | * @voltdm: pointer to voltage domain (filled in at runtime) | 616 | * @voltdm: pointer to voltage domain (filled in at runtime) |
569 | * @dev_attr: arbitrary device attributes that can be passed to the driver | 617 | * @dev_attr: arbitrary device attributes that can be passed to the driver |
570 | * @_sysc_cache: internal-use hwmod flags | 618 | * @_sysc_cache: internal-use hwmod flags |
619 | * @mpu_rt_idx: index of device address space for register target (for DT boot) | ||
571 | * @_mpu_rt_va: cached register target start address (internal use) | 620 | * @_mpu_rt_va: cached register target start address (internal use) |
572 | * @_mpu_port: cached MPU register target slave (internal use) | 621 | * @_mpu_port: cached MPU register target slave (internal use) |
573 | * @opt_clks_cnt: number of @opt_clks | 622 | * @opt_clks_cnt: number of @opt_clks |
@@ -617,6 +666,7 @@ struct omap_hwmod { | |||
617 | struct list_head node; | 666 | struct list_head node; |
618 | struct omap_hwmod_ocp_if *_mpu_port; | 667 | struct omap_hwmod_ocp_if *_mpu_port; |
619 | u16 flags; | 668 | u16 flags; |
669 | u8 mpu_rt_idx; | ||
620 | u8 response_lat; | 670 | u8 response_lat; |
621 | u8 rst_lines_cnt; | 671 | u8 rst_lines_cnt; |
622 | u8 opt_clks_cnt; | 672 | u8 opt_clks_cnt; |
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c index d05fc7b54567..56cebb05509e 100644 --- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c | |||
@@ -512,7 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = { | |||
512 | .mpu_irqs = omap2_uart1_mpu_irqs, | 512 | .mpu_irqs = omap2_uart1_mpu_irqs, |
513 | .sdma_reqs = omap2_uart1_sdma_reqs, | 513 | .sdma_reqs = omap2_uart1_sdma_reqs, |
514 | .main_clk = "uart1_fck", | 514 | .main_clk = "uart1_fck", |
515 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 515 | .flags = DEBUG_OMAP2UART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
516 | .prcm = { | 516 | .prcm = { |
517 | .omap2 = { | 517 | .omap2 = { |
518 | .module_offs = CORE_MOD, | 518 | .module_offs = CORE_MOD, |
@@ -532,7 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = { | |||
532 | .mpu_irqs = omap2_uart2_mpu_irqs, | 532 | .mpu_irqs = omap2_uart2_mpu_irqs, |
533 | .sdma_reqs = omap2_uart2_sdma_reqs, | 533 | .sdma_reqs = omap2_uart2_sdma_reqs, |
534 | .main_clk = "uart2_fck", | 534 | .main_clk = "uart2_fck", |
535 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 535 | .flags = DEBUG_OMAP2UART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
536 | .prcm = { | 536 | .prcm = { |
537 | .omap2 = { | 537 | .omap2 = { |
538 | .module_offs = CORE_MOD, | 538 | .module_offs = CORE_MOD, |
@@ -552,7 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = { | |||
552 | .mpu_irqs = omap2_uart3_mpu_irqs, | 552 | .mpu_irqs = omap2_uart3_mpu_irqs, |
553 | .sdma_reqs = omap2_uart3_sdma_reqs, | 553 | .sdma_reqs = omap2_uart3_sdma_reqs, |
554 | .main_clk = "uart3_fck", | 554 | .main_clk = "uart3_fck", |
555 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 555 | .flags = DEBUG_OMAP2UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
556 | .prcm = { | 556 | .prcm = { |
557 | .omap2 = { | 557 | .omap2 = { |
558 | .module_offs = CORE_MOD, | 558 | .module_offs = CORE_MOD, |
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c index 28bbd56346a9..eb2f3b93b51c 100644 --- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c | |||
@@ -562,6 +562,7 @@ static struct omap_hwmod am33xx_cpgmac0_hwmod = { | |||
562 | .clkdm_name = "cpsw_125mhz_clkdm", | 562 | .clkdm_name = "cpsw_125mhz_clkdm", |
563 | .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY), | 563 | .flags = (HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY), |
564 | .main_clk = "cpsw_125mhz_gclk", | 564 | .main_clk = "cpsw_125mhz_gclk", |
565 | .mpu_rt_idx = 1, | ||
565 | .prcm = { | 566 | .prcm = { |
566 | .omap4 = { | 567 | .omap4 = { |
567 | .clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET, | 568 | .clkctrl_offs = AM33XX_CM_PER_CPGMAC0_CLKCTRL_OFFSET, |
@@ -1512,7 +1513,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = { | |||
1512 | .name = "uart1", | 1513 | .name = "uart1", |
1513 | .class = &uart_class, | 1514 | .class = &uart_class, |
1514 | .clkdm_name = "l4_wkup_clkdm", | 1515 | .clkdm_name = "l4_wkup_clkdm", |
1515 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 1516 | .flags = DEBUG_AM33XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
1516 | .main_clk = "dpll_per_m2_div4_wkupdm_ck", | 1517 | .main_clk = "dpll_per_m2_div4_wkupdm_ck", |
1517 | .prcm = { | 1518 | .prcm = { |
1518 | .omap4 = { | 1519 | .omap4 = { |
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index f7a3df2fb579..0c3a427da544 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | |||
@@ -490,7 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = { | |||
490 | .mpu_irqs = omap2_uart1_mpu_irqs, | 490 | .mpu_irqs = omap2_uart1_mpu_irqs, |
491 | .sdma_reqs = omap2_uart1_sdma_reqs, | 491 | .sdma_reqs = omap2_uart1_sdma_reqs, |
492 | .main_clk = "uart1_fck", | 492 | .main_clk = "uart1_fck", |
493 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 493 | .flags = DEBUG_TI81XXUART1_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
494 | .prcm = { | 494 | .prcm = { |
495 | .omap2 = { | 495 | .omap2 = { |
496 | .module_offs = CORE_MOD, | 496 | .module_offs = CORE_MOD, |
@@ -509,7 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = { | |||
509 | .mpu_irqs = omap2_uart2_mpu_irqs, | 509 | .mpu_irqs = omap2_uart2_mpu_irqs, |
510 | .sdma_reqs = omap2_uart2_sdma_reqs, | 510 | .sdma_reqs = omap2_uart2_sdma_reqs, |
511 | .main_clk = "uart2_fck", | 511 | .main_clk = "uart2_fck", |
512 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 512 | .flags = DEBUG_TI81XXUART2_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
513 | .prcm = { | 513 | .prcm = { |
514 | .omap2 = { | 514 | .omap2 = { |
515 | .module_offs = CORE_MOD, | 515 | .module_offs = CORE_MOD, |
@@ -528,7 +528,8 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = { | |||
528 | .mpu_irqs = omap2_uart3_mpu_irqs, | 528 | .mpu_irqs = omap2_uart3_mpu_irqs, |
529 | .sdma_reqs = omap2_uart3_sdma_reqs, | 529 | .sdma_reqs = omap2_uart3_sdma_reqs, |
530 | .main_clk = "uart3_fck", | 530 | .main_clk = "uart3_fck", |
531 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 531 | .flags = DEBUG_OMAP3UART3_FLAGS | DEBUG_TI81XXUART3_FLAGS | |
532 | HWMOD_SWSUP_SIDLE_ACT, | ||
532 | .prcm = { | 533 | .prcm = { |
533 | .omap2 = { | 534 | .omap2 = { |
534 | .module_offs = OMAP3430_PER_MOD, | 535 | .module_offs = OMAP3430_PER_MOD, |
@@ -558,7 +559,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = { | |||
558 | .mpu_irqs = uart4_mpu_irqs, | 559 | .mpu_irqs = uart4_mpu_irqs, |
559 | .sdma_reqs = uart4_sdma_reqs, | 560 | .sdma_reqs = uart4_sdma_reqs, |
560 | .main_clk = "uart4_fck", | 561 | .main_clk = "uart4_fck", |
561 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 562 | .flags = DEBUG_OMAP3UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
562 | .prcm = { | 563 | .prcm = { |
563 | .omap2 = { | 564 | .omap2 = { |
564 | .module_offs = OMAP3430_PER_MOD, | 565 | .module_offs = OMAP3430_PER_MOD, |
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index d04b5e60fdbe..9c3b504477d7 100644 --- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c | |||
@@ -2858,8 +2858,7 @@ static struct omap_hwmod omap44xx_uart3_hwmod = { | |||
2858 | .name = "uart3", | 2858 | .name = "uart3", |
2859 | .class = &omap44xx_uart_hwmod_class, | 2859 | .class = &omap44xx_uart_hwmod_class, |
2860 | .clkdm_name = "l4_per_clkdm", | 2860 | .clkdm_name = "l4_per_clkdm", |
2861 | .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET | | 2861 | .flags = DEBUG_OMAP4UART3_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
2862 | HWMOD_SWSUP_SIDLE_ACT, | ||
2863 | .main_clk = "func_48m_fclk", | 2862 | .main_clk = "func_48m_fclk", |
2864 | .prcm = { | 2863 | .prcm = { |
2865 | .omap4 = { | 2864 | .omap4 = { |
@@ -2875,7 +2874,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = { | |||
2875 | .name = "uart4", | 2874 | .name = "uart4", |
2876 | .class = &omap44xx_uart_hwmod_class, | 2875 | .class = &omap44xx_uart_hwmod_class, |
2877 | .clkdm_name = "l4_per_clkdm", | 2876 | .clkdm_name = "l4_per_clkdm", |
2878 | .flags = HWMOD_SWSUP_SIDLE_ACT, | 2877 | .flags = DEBUG_OMAP4UART4_FLAGS | HWMOD_SWSUP_SIDLE_ACT, |
2879 | .main_clk = "func_48m_fclk", | 2878 | .main_clk = "func_48m_fclk", |
2880 | .prcm = { | 2879 | .prcm = { |
2881 | .omap4 = { | 2880 | .omap4 = { |
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c index f37ae96b70a1..3c70f5c1860f 100644 --- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c | |||
@@ -1375,7 +1375,7 @@ static struct omap_hwmod omap54xx_uart3_hwmod = { | |||
1375 | .name = "uart3", | 1375 | .name = "uart3", |
1376 | .class = &omap54xx_uart_hwmod_class, | 1376 | .class = &omap54xx_uart_hwmod_class, |
1377 | .clkdm_name = "l4per_clkdm", | 1377 | .clkdm_name = "l4per_clkdm", |
1378 | .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, | 1378 | .flags = DEBUG_OMAP4UART3_FLAGS, |
1379 | .main_clk = "func_48m_fclk", | 1379 | .main_clk = "func_48m_fclk", |
1380 | .prcm = { | 1380 | .prcm = { |
1381 | .omap4 = { | 1381 | .omap4 = { |
@@ -1391,6 +1391,7 @@ static struct omap_hwmod omap54xx_uart4_hwmod = { | |||
1391 | .name = "uart4", | 1391 | .name = "uart4", |
1392 | .class = &omap54xx_uart_hwmod_class, | 1392 | .class = &omap54xx_uart_hwmod_class, |
1393 | .clkdm_name = "l4per_clkdm", | 1393 | .clkdm_name = "l4per_clkdm", |
1394 | .flags = DEBUG_OMAP4UART4_FLAGS, | ||
1394 | .main_clk = "func_48m_fclk", | 1395 | .main_clk = "func_48m_fclk", |
1395 | .prcm = { | 1396 | .prcm = { |
1396 | .omap4 = { | 1397 | .omap4 = { |
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c index 3a674de6cb63..a388f8c1bcb3 100644 --- a/arch/arm/mach-omap2/serial.c +++ b/arch/arm/mach-omap2/serial.c | |||
@@ -208,17 +208,6 @@ static int __init omap_serial_early_init(void) | |||
208 | pr_info("%s used as console in debug mode: uart%d clocks will not be gated", | 208 | pr_info("%s used as console in debug mode: uart%d clocks will not be gated", |
209 | uart_name, uart->num); | 209 | uart_name, uart->num); |
210 | } | 210 | } |
211 | |||
212 | /* | ||
213 | * omap-uart can be used for earlyprintk logs | ||
214 | * So if omap-uart is used as console then prevent | ||
215 | * uart reset and idle to get logs from omap-uart | ||
216 | * until uart console driver is available to take | ||
217 | * care for console messages. | ||
218 | * Idling or resetting omap-uart while printing logs | ||
219 | * early boot logs can stall the boot-up. | ||
220 | */ | ||
221 | oh->flags |= HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET; | ||
222 | } | 211 | } |
223 | } while (1); | 212 | } while (1); |
224 | 213 | ||
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index e115f6742107..c5be60d85e4b 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c | |||
@@ -1162,9 +1162,6 @@ static void __init eva_init(void) | |||
1162 | gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */ | 1162 | gpio_request_one(61, GPIOF_OUT_INIT_HIGH, NULL); /* LCDDON */ |
1163 | gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */ | 1163 | gpio_request_one(202, GPIOF_OUT_INIT_LOW, NULL); /* LCD0_LED_CONT */ |
1164 | 1164 | ||
1165 | /* Touchscreen */ | ||
1166 | gpio_request_one(166, GPIOF_OUT_INIT_HIGH, NULL); /* TP_RST_B */ | ||
1167 | |||
1168 | /* GETHER */ | 1165 | /* GETHER */ |
1169 | gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */ | 1166 | gpio_request_one(18, GPIOF_OUT_INIT_HIGH, NULL); /* PHY_RST */ |
1170 | 1167 | ||
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c index d5554646916c..3354a85c90f7 100644 --- a/arch/arm/mach-shmobile/board-bockw.c +++ b/arch/arm/mach-shmobile/board-bockw.c | |||
@@ -167,7 +167,13 @@ static const struct pinctrl_map bockw_pinctrl_map[] = { | |||
167 | "usb1", "usb1"), | 167 | "usb1", "usb1"), |
168 | /* SDHI0 */ | 168 | /* SDHI0 */ |
169 | PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", | 169 | PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", |
170 | "sdhi0", "sdhi0"), | 170 | "sdhi0_data4", "sdhi0"), |
171 | PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", | ||
172 | "sdhi0_ctrl", "sdhi0"), | ||
173 | PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", | ||
174 | "sdhi0_cd", "sdhi0"), | ||
175 | PIN_MAP_MUX_GROUP_DEFAULT("sh_mobile_sdhi.0", "pfc-r8a7778", | ||
176 | "sdhi0_wp", "sdhi0"), | ||
171 | }; | 177 | }; |
172 | 178 | ||
173 | #define FPGA 0x18200000 | 179 | #define FPGA 0x18200000 |
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index d73e21d3ea8a..8d6bd5c5efb9 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c | |||
@@ -59,7 +59,7 @@ static __initdata struct gpio_led_platform_data lager_leds_pdata = { | |||
59 | #define GPIO_KEY(c, g, d, ...) \ | 59 | #define GPIO_KEY(c, g, d, ...) \ |
60 | { .code = c, .gpio = g, .desc = d, .active_low = 1 } | 60 | { .code = c, .gpio = g, .desc = d, .active_low = 1 } |
61 | 61 | ||
62 | static __initdata struct gpio_keys_button gpio_buttons[] = { | 62 | static struct gpio_keys_button gpio_buttons[] = { |
63 | GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"), | 63 | GPIO_KEY(KEY_4, RCAR_GP_PIN(1, 28), "SW2-pin4"), |
64 | GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"), | 64 | GPIO_KEY(KEY_3, RCAR_GP_PIN(1, 26), "SW2-pin3"), |
65 | GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"), | 65 | GPIO_KEY(KEY_2, RCAR_GP_PIN(1, 24), "SW2-pin2"), |
diff --git a/arch/arm/mach-sti/headsmp.S b/arch/arm/mach-sti/headsmp.S index 78ebc7559f53..4c09bae86edf 100644 --- a/arch/arm/mach-sti/headsmp.S +++ b/arch/arm/mach-sti/headsmp.S | |||
@@ -16,8 +16,6 @@ | |||
16 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | 18 | ||
19 | __INIT | ||
20 | |||
21 | /* | 19 | /* |
22 | * ST specific entry point for secondary CPUs. This provides | 20 | * ST specific entry point for secondary CPUs. This provides |
23 | * a "holding pen" into which all secondary cores are held until we're | 21 | * a "holding pen" into which all secondary cores are held until we're |
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 46b3beb4b773..717031a762c2 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -35,6 +35,7 @@ struct mmu_gather { | |||
35 | struct mm_struct *mm; | 35 | struct mm_struct *mm; |
36 | unsigned int fullmm; | 36 | unsigned int fullmm; |
37 | struct vm_area_struct *vma; | 37 | struct vm_area_struct *vma; |
38 | unsigned long start, end; | ||
38 | unsigned long range_start; | 39 | unsigned long range_start; |
39 | unsigned long range_end; | 40 | unsigned long range_end; |
40 | unsigned int nr; | 41 | unsigned int nr; |
@@ -97,10 +98,12 @@ static inline void tlb_flush_mmu(struct mmu_gather *tlb) | |||
97 | } | 98 | } |
98 | 99 | ||
99 | static inline void | 100 | static inline void |
100 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int fullmm) | 101 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
101 | { | 102 | { |
102 | tlb->mm = mm; | 103 | tlb->mm = mm; |
103 | tlb->fullmm = fullmm; | 104 | tlb->fullmm = !(start | (end+1)); |
105 | tlb->start = start; | ||
106 | tlb->end = end; | ||
104 | tlb->vma = NULL; | 107 | tlb->vma = NULL; |
105 | tlb->max = ARRAY_SIZE(tlb->local); | 108 | tlb->max = ARRAY_SIZE(tlb->local); |
106 | tlb->pages = tlb->local; | 109 | tlb->pages = tlb->local; |
diff --git a/arch/avr32/boards/atngw100/mrmt.c b/arch/avr32/boards/atngw100/mrmt.c index f91431963452..7de083d19b7e 100644 --- a/arch/avr32/boards/atngw100/mrmt.c +++ b/arch/avr32/boards/atngw100/mrmt.c | |||
@@ -150,7 +150,6 @@ static struct ac97c_platform_data __initdata ac97c0_data = { | |||
150 | static struct platform_device rmt_ts_device = { | 150 | static struct platform_device rmt_ts_device = { |
151 | .name = "ucb1400_ts", | 151 | .name = "ucb1400_ts", |
152 | .id = -1, | 152 | .id = -1, |
153 | } | ||
154 | }; | 153 | }; |
155 | #endif | 154 | #endif |
156 | 155 | ||
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 33a97929d055..77d442ab28c8 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig | |||
@@ -158,6 +158,7 @@ source "kernel/Kconfig.hz" | |||
158 | endmenu | 158 | endmenu |
159 | 159 | ||
160 | source "init/Kconfig" | 160 | source "init/Kconfig" |
161 | source "kernel/Kconfig.freezer" | ||
161 | source "drivers/Kconfig" | 162 | source "drivers/Kconfig" |
162 | source "fs/Kconfig" | 163 | source "fs/Kconfig" |
163 | 164 | ||
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h index ef3a9de01954..bc5efc7c3f3f 100644 --- a/arch/ia64/include/asm/tlb.h +++ b/arch/ia64/include/asm/tlb.h | |||
@@ -22,7 +22,7 @@ | |||
22 | * unmapping a portion of the virtual address space, these hooks are called according to | 22 | * unmapping a portion of the virtual address space, these hooks are called according to |
23 | * the following template: | 23 | * the following template: |
24 | * | 24 | * |
25 | * tlb <- tlb_gather_mmu(mm, full_mm_flush); // start unmap for address space MM | 25 | * tlb <- tlb_gather_mmu(mm, start, end); // start unmap for address space MM |
26 | * { | 26 | * { |
27 | * for each vma that needs a shootdown do { | 27 | * for each vma that needs a shootdown do { |
28 | * tlb_start_vma(tlb, vma); | 28 | * tlb_start_vma(tlb, vma); |
@@ -58,6 +58,7 @@ struct mmu_gather { | |||
58 | unsigned int max; | 58 | unsigned int max; |
59 | unsigned char fullmm; /* non-zero means full mm flush */ | 59 | unsigned char fullmm; /* non-zero means full mm flush */ |
60 | unsigned char need_flush; /* really unmapped some PTEs? */ | 60 | unsigned char need_flush; /* really unmapped some PTEs? */ |
61 | unsigned long start, end; | ||
61 | unsigned long start_addr; | 62 | unsigned long start_addr; |
62 | unsigned long end_addr; | 63 | unsigned long end_addr; |
63 | struct page **pages; | 64 | struct page **pages; |
@@ -155,13 +156,15 @@ static inline void __tlb_alloc_page(struct mmu_gather *tlb) | |||
155 | 156 | ||
156 | 157 | ||
157 | static inline void | 158 | static inline void |
158 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 159 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
159 | { | 160 | { |
160 | tlb->mm = mm; | 161 | tlb->mm = mm; |
161 | tlb->max = ARRAY_SIZE(tlb->local); | 162 | tlb->max = ARRAY_SIZE(tlb->local); |
162 | tlb->pages = tlb->local; | 163 | tlb->pages = tlb->local; |
163 | tlb->nr = 0; | 164 | tlb->nr = 0; |
164 | tlb->fullmm = full_mm_flush; | 165 | tlb->fullmm = !(start | (end+1)); |
166 | tlb->start = start; | ||
167 | tlb->end = end; | ||
165 | tlb->start_addr = ~0UL; | 168 | tlb->start_addr = ~0UL; |
166 | } | 169 | } |
167 | 170 | ||
diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c index 2291a7d69d49..fa277aecfb78 100644 --- a/arch/m68k/emu/natfeat.c +++ b/arch/m68k/emu/natfeat.c | |||
@@ -18,9 +18,11 @@ | |||
18 | #include <asm/machdep.h> | 18 | #include <asm/machdep.h> |
19 | #include <asm/natfeat.h> | 19 | #include <asm/natfeat.h> |
20 | 20 | ||
21 | extern long nf_get_id2(const char *feature_name); | ||
22 | |||
21 | asm("\n" | 23 | asm("\n" |
22 | " .global nf_get_id,nf_call\n" | 24 | " .global nf_get_id2,nf_call\n" |
23 | "nf_get_id:\n" | 25 | "nf_get_id2:\n" |
24 | " .short 0x7300\n" | 26 | " .short 0x7300\n" |
25 | " rts\n" | 27 | " rts\n" |
26 | "nf_call:\n" | 28 | "nf_call:\n" |
@@ -29,12 +31,25 @@ asm("\n" | |||
29 | "1: moveq.l #0,%d0\n" | 31 | "1: moveq.l #0,%d0\n" |
30 | " rts\n" | 32 | " rts\n" |
31 | " .section __ex_table,\"a\"\n" | 33 | " .section __ex_table,\"a\"\n" |
32 | " .long nf_get_id,1b\n" | 34 | " .long nf_get_id2,1b\n" |
33 | " .long nf_call,1b\n" | 35 | " .long nf_call,1b\n" |
34 | " .previous"); | 36 | " .previous"); |
35 | EXPORT_SYMBOL_GPL(nf_get_id); | ||
36 | EXPORT_SYMBOL_GPL(nf_call); | 37 | EXPORT_SYMBOL_GPL(nf_call); |
37 | 38 | ||
39 | long nf_get_id(const char *feature_name) | ||
40 | { | ||
41 | /* feature_name may be in vmalloc()ed memory, so make a copy */ | ||
42 | char name_copy[32]; | ||
43 | size_t n; | ||
44 | |||
45 | n = strlcpy(name_copy, feature_name, sizeof(name_copy)); | ||
46 | if (n >= sizeof(name_copy)) | ||
47 | return 0; | ||
48 | |||
49 | return nf_get_id2(name_copy); | ||
50 | } | ||
51 | EXPORT_SYMBOL_GPL(nf_get_id); | ||
52 | |||
38 | void nfprint(const char *fmt, ...) | 53 | void nfprint(const char *fmt, ...) |
39 | { | 54 | { |
40 | static char buf[256]; | 55 | static char buf[256]; |
diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h index 444ea8a09e9f..ef881cfbbca9 100644 --- a/arch/m68k/include/asm/div64.h +++ b/arch/m68k/include/asm/div64.h | |||
@@ -15,16 +15,17 @@ | |||
15 | unsigned long long n64; \ | 15 | unsigned long long n64; \ |
16 | } __n; \ | 16 | } __n; \ |
17 | unsigned long __rem, __upper; \ | 17 | unsigned long __rem, __upper; \ |
18 | unsigned long __base = (base); \ | ||
18 | \ | 19 | \ |
19 | __n.n64 = (n); \ | 20 | __n.n64 = (n); \ |
20 | if ((__upper = __n.n32[0])) { \ | 21 | if ((__upper = __n.n32[0])) { \ |
21 | asm ("divul.l %2,%1:%0" \ | 22 | asm ("divul.l %2,%1:%0" \ |
22 | : "=d" (__n.n32[0]), "=d" (__upper) \ | 23 | : "=d" (__n.n32[0]), "=d" (__upper) \ |
23 | : "d" (base), "0" (__n.n32[0])); \ | 24 | : "d" (__base), "0" (__n.n32[0])); \ |
24 | } \ | 25 | } \ |
25 | asm ("divu.l %2,%1:%0" \ | 26 | asm ("divu.l %2,%1:%0" \ |
26 | : "=d" (__n.n32[1]), "=d" (__rem) \ | 27 | : "=d" (__n.n32[1]), "=d" (__rem) \ |
27 | : "d" (base), "1" (__upper), "0" (__n.n32[1])); \ | 28 | : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \ |
28 | (n) = __n.n64; \ | 29 | (n) = __n.n64; \ |
29 | __rem; \ | 30 | __rem; \ |
30 | }) | 31 | }) |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index d22a4ecffff4..4fab52294d98 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -28,7 +28,7 @@ config MICROBLAZE | |||
28 | select GENERIC_CLOCKEVENTS | 28 | select GENERIC_CLOCKEVENTS |
29 | select GENERIC_IDLE_POLL_SETUP | 29 | select GENERIC_IDLE_POLL_SETUP |
30 | select MODULES_USE_ELF_RELA | 30 | select MODULES_USE_ELF_RELA |
31 | select CLONE_BACKWARDS | 31 | select CLONE_BACKWARDS3 |
32 | 32 | ||
33 | config SWAP | 33 | config SWAP |
34 | def_bool n | 34 | def_bool n |
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 1dc086087a72..fa44f3ec5302 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h | |||
@@ -17,6 +17,8 @@ | |||
17 | #define current_cpu_type() current_cpu_data.cputype | 17 | #define current_cpu_type() current_cpu_data.cputype |
18 | #endif | 18 | #endif |
19 | 19 | ||
20 | #define boot_cpu_type() cpu_data[0].cputype | ||
21 | |||
20 | /* | 22 | /* |
21 | * SMP assumption: Options of CPU 0 are a superset of all processors. | 23 | * SMP assumption: Options of CPU 0 are a superset of all processors. |
22 | * This is true for all known MIPS systems. | 24 | * This is true for all known MIPS systems. |
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 159abc8842d2..126da74d4c55 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c | |||
@@ -66,6 +66,8 @@ static void __init bmips_smp_setup(void) | |||
66 | int i, cpu = 1, boot_cpu = 0; | 66 | int i, cpu = 1, boot_cpu = 0; |
67 | 67 | ||
68 | #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) | 68 | #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) |
69 | int cpu_hw_intr; | ||
70 | |||
69 | /* arbitration priority */ | 71 | /* arbitration priority */ |
70 | clear_c0_brcm_cmt_ctrl(0x30); | 72 | clear_c0_brcm_cmt_ctrl(0x30); |
71 | 73 | ||
@@ -80,8 +82,12 @@ static void __init bmips_smp_setup(void) | |||
80 | * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output | 82 | * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output |
81 | * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output | 83 | * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output |
82 | */ | 84 | */ |
83 | change_c0_brcm_cmt_intr(0xf8018000, | 85 | if (boot_cpu == 0) |
84 | (0x02 << 27) | (0x03 << 15)); | 86 | cpu_hw_intr = 0x02; |
87 | else | ||
88 | cpu_hw_intr = 0x1d; | ||
89 | |||
90 | change_c0_brcm_cmt_intr(0xf8018000, (cpu_hw_intr << 27) | (0x03 << 15)); | ||
85 | 91 | ||
86 | /* single core, 2 threads (2 pipelines) */ | 92 | /* single core, 2 threads (2 pipelines) */ |
87 | max_cpus = 2; | 93 | max_cpus = 2; |
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c index e4b1140cdae0..3a2b6e9f25cf 100644 --- a/arch/mips/oprofile/op_model_mipsxx.c +++ b/arch/mips/oprofile/op_model_mipsxx.c | |||
@@ -166,7 +166,7 @@ static void mipsxx_reg_setup(struct op_counter_config *ctr) | |||
166 | reg.control[i] |= M_PERFCTL_USER; | 166 | reg.control[i] |= M_PERFCTL_USER; |
167 | if (ctr[i].exl) | 167 | if (ctr[i].exl) |
168 | reg.control[i] |= M_PERFCTL_EXL; | 168 | reg.control[i] |= M_PERFCTL_EXL; |
169 | if (current_cpu_type() == CPU_XLR) | 169 | if (boot_cpu_type() == CPU_XLR) |
170 | reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS; | 170 | reg.control[i] |= M_PERFCTL_COUNT_ALL_THREADS; |
171 | reg.counter[i] = 0x80000000 - ctr[i].count; | 171 | reg.counter[i] = 0x80000000 - ctr[i].count; |
172 | } | 172 | } |
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c index d22dc0d6f289..2b7e837dc2e2 100644 --- a/arch/mips/pnx833x/common/platform.c +++ b/arch/mips/pnx833x/common/platform.c | |||
@@ -206,11 +206,13 @@ static struct resource pnx833x_ethernet_resources[] = { | |||
206 | .end = PNX8335_IP3902_PORTS_END, | 206 | .end = PNX8335_IP3902_PORTS_END, |
207 | .flags = IORESOURCE_MEM, | 207 | .flags = IORESOURCE_MEM, |
208 | }, | 208 | }, |
209 | #ifdef CONFIG_SOC_PNX8335 | ||
209 | [1] = { | 210 | [1] = { |
210 | .start = PNX8335_PIC_ETHERNET_INT, | 211 | .start = PNX8335_PIC_ETHERNET_INT, |
211 | .end = PNX8335_PIC_ETHERNET_INT, | 212 | .end = PNX8335_PIC_ETHERNET_INT, |
212 | .flags = IORESOURCE_IRQ, | 213 | .flags = IORESOURCE_IRQ, |
213 | }, | 214 | }, |
215 | #endif | ||
214 | }; | 216 | }; |
215 | 217 | ||
216 | static struct platform_device pnx833x_ethernet_device = { | 218 | static struct platform_device pnx833x_ethernet_device = { |
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 99dbab1c59ac..d60bf98fa5cf 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig | |||
@@ -55,6 +55,7 @@ config GENERIC_CSUM | |||
55 | 55 | ||
56 | source "init/Kconfig" | 56 | source "init/Kconfig" |
57 | 57 | ||
58 | source "kernel/Kconfig.freezer" | ||
58 | 59 | ||
59 | menu "Processor type and features" | 60 | menu "Processor type and features" |
60 | 61 | ||
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 3bf72cd2c8fc..dbd9d3c991e8 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -566,7 +566,7 @@ config SCHED_SMT | |||
566 | config PPC_DENORMALISATION | 566 | config PPC_DENORMALISATION |
567 | bool "PowerPC denormalisation exception handling" | 567 | bool "PowerPC denormalisation exception handling" |
568 | depends on PPC_BOOK3S_64 | 568 | depends on PPC_BOOK3S_64 |
569 | default "n" | 569 | default "y" if PPC_POWERNV |
570 | ---help--- | 570 | ---help--- |
571 | Add support for handling denormalisation of single precision | 571 | Add support for handling denormalisation of single precision |
572 | values. Useful for bare metal only. If unsure say Y here. | 572 | values. Useful for bare metal only. If unsure say Y here. |
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 47a35b08b963..e378cccfca55 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -247,6 +247,10 @@ struct thread_struct { | |||
247 | unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */ | 247 | unsigned long tm_orig_msr; /* Thread's MSR on ctx switch */ |
248 | struct pt_regs ckpt_regs; /* Checkpointed registers */ | 248 | struct pt_regs ckpt_regs; /* Checkpointed registers */ |
249 | 249 | ||
250 | unsigned long tm_tar; | ||
251 | unsigned long tm_ppr; | ||
252 | unsigned long tm_dscr; | ||
253 | |||
250 | /* | 254 | /* |
251 | * Transactional FP and VSX 0-31 register set. | 255 | * Transactional FP and VSX 0-31 register set. |
252 | * NOTE: the sense of these is the opposite of the integer ckpt_regs! | 256 | * NOTE: the sense of these is the opposite of the integer ckpt_regs! |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index a6840e4e24f7..99222e27f173 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -254,19 +254,28 @@ | |||
254 | #define SPRN_HRMOR 0x139 /* Real mode offset register */ | 254 | #define SPRN_HRMOR 0x139 /* Real mode offset register */ |
255 | #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ | 255 | #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ |
256 | #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ | 256 | #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ |
257 | /* HFSCR and FSCR bit numbers are the same */ | ||
258 | #define FSCR_TAR_LG 8 /* Enable Target Address Register */ | ||
259 | #define FSCR_EBB_LG 7 /* Enable Event Based Branching */ | ||
260 | #define FSCR_TM_LG 5 /* Enable Transactional Memory */ | ||
261 | #define FSCR_PM_LG 4 /* Enable prob/priv access to PMU SPRs */ | ||
262 | #define FSCR_BHRB_LG 3 /* Enable Branch History Rolling Buffer*/ | ||
263 | #define FSCR_DSCR_LG 2 /* Enable Data Stream Control Register */ | ||
264 | #define FSCR_VECVSX_LG 1 /* Enable VMX/VSX */ | ||
265 | #define FSCR_FP_LG 0 /* Enable Floating Point */ | ||
257 | #define SPRN_FSCR 0x099 /* Facility Status & Control Register */ | 266 | #define SPRN_FSCR 0x099 /* Facility Status & Control Register */ |
258 | #define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ | 267 | #define FSCR_TAR __MASK(FSCR_TAR_LG) |
259 | #define FSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ | 268 | #define FSCR_EBB __MASK(FSCR_EBB_LG) |
260 | #define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ | 269 | #define FSCR_DSCR __MASK(FSCR_DSCR_LG) |
261 | #define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ | 270 | #define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */ |
262 | #define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */ | 271 | #define HFSCR_TAR __MASK(FSCR_TAR_LG) |
263 | #define HFSCR_EBB (1 << (63-56)) /* Enable Event Based Branching */ | 272 | #define HFSCR_EBB __MASK(FSCR_EBB_LG) |
264 | #define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */ | 273 | #define HFSCR_TM __MASK(FSCR_TM_LG) |
265 | #define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */ | 274 | #define HFSCR_PM __MASK(FSCR_PM_LG) |
266 | #define HFSCR_BHRB (1 << (63-59)) /* Enable Branch History Rolling Buffer*/ | 275 | #define HFSCR_BHRB __MASK(FSCR_BHRB_LG) |
267 | #define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */ | 276 | #define HFSCR_DSCR __MASK(FSCR_DSCR_LG) |
268 | #define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */ | 277 | #define HFSCR_VECVSX __MASK(FSCR_VECVSX_LG) |
269 | #define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */ | 278 | #define HFSCR_FP __MASK(FSCR_FP_LG) |
270 | #define SPRN_TAR 0x32f /* Target Address Register */ | 279 | #define SPRN_TAR 0x32f /* Target Address Register */ |
271 | #define SPRN_LPCR 0x13E /* LPAR Control Register */ | 280 | #define SPRN_LPCR 0x13E /* LPAR Control Register */ |
272 | #define LPCR_VPM0 (1ul << (63-0)) | 281 | #define LPCR_VPM0 (1ul << (63-0)) |
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 49a13e0ef234..294c2cedcf7a 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h | |||
@@ -15,6 +15,15 @@ extern struct task_struct *__switch_to(struct task_struct *, | |||
15 | struct thread_struct; | 15 | struct thread_struct; |
16 | extern struct task_struct *_switch(struct thread_struct *prev, | 16 | extern struct task_struct *_switch(struct thread_struct *prev, |
17 | struct thread_struct *next); | 17 | struct thread_struct *next); |
18 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
19 | static inline void save_tar(struct thread_struct *prev) | ||
20 | { | ||
21 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | ||
22 | prev->tar = mfspr(SPRN_TAR); | ||
23 | } | ||
24 | #else | ||
25 | static inline void save_tar(struct thread_struct *prev) {} | ||
26 | #endif | ||
18 | 27 | ||
19 | extern void giveup_fpu(struct task_struct *); | 28 | extern void giveup_fpu(struct task_struct *); |
20 | extern void load_up_fpu(void); | 29 | extern void load_up_fpu(void); |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index c7e8afc2ead0..8207459efe56 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -138,6 +138,9 @@ int main(void) | |||
138 | DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); | 138 | DEFINE(THREAD_TM_TFHAR, offsetof(struct thread_struct, tm_tfhar)); |
139 | DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); | 139 | DEFINE(THREAD_TM_TEXASR, offsetof(struct thread_struct, tm_texasr)); |
140 | DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); | 140 | DEFINE(THREAD_TM_TFIAR, offsetof(struct thread_struct, tm_tfiar)); |
141 | DEFINE(THREAD_TM_TAR, offsetof(struct thread_struct, tm_tar)); | ||
142 | DEFINE(THREAD_TM_PPR, offsetof(struct thread_struct, tm_ppr)); | ||
143 | DEFINE(THREAD_TM_DSCR, offsetof(struct thread_struct, tm_dscr)); | ||
141 | DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); | 144 | DEFINE(PT_CKPT_REGS, offsetof(struct thread_struct, ckpt_regs)); |
142 | DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, | 145 | DEFINE(THREAD_TRANSACT_VR0, offsetof(struct thread_struct, |
143 | transact_vr[0])); | 146 | transact_vr[0])); |
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index ea9414c8088d..55593ee2d5aa 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c | |||
@@ -1061,7 +1061,7 @@ static const struct file_operations proc_eeh_operations = { | |||
1061 | 1061 | ||
1062 | static int __init eeh_init_proc(void) | 1062 | static int __init eeh_init_proc(void) |
1063 | { | 1063 | { |
1064 | if (machine_is(pseries)) | 1064 | if (machine_is(pseries) || machine_is(powernv)) |
1065 | proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations); | 1065 | proc_create("powerpc/eeh", 0, NULL, &proc_eeh_operations); |
1066 | return 0; | 1066 | return 0; |
1067 | } | 1067 | } |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index ab15b8d057ad..2bd0b885b0fe 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -449,15 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) | |||
449 | 449 | ||
450 | #ifdef CONFIG_PPC_BOOK3S_64 | 450 | #ifdef CONFIG_PPC_BOOK3S_64 |
451 | BEGIN_FTR_SECTION | 451 | BEGIN_FTR_SECTION |
452 | /* | ||
453 | * Back up the TAR across context switches. Note that the TAR is not | ||
454 | * available for use in the kernel. (To provide this, the TAR should | ||
455 | * be backed up/restored on exception entry/exit instead, and be in | ||
456 | * pt_regs. FIXME, this should be in pt_regs anyway (for debug).) | ||
457 | */ | ||
458 | mfspr r0,SPRN_TAR | ||
459 | std r0,THREAD_TAR(r3) | ||
460 | |||
461 | /* Event based branch registers */ | 452 | /* Event based branch registers */ |
462 | mfspr r0, SPRN_BESCR | 453 | mfspr r0, SPRN_BESCR |
463 | std r0, THREAD_BESCR(r3) | 454 | std r0, THREAD_BESCR(r3) |
@@ -584,9 +575,34 @@ BEGIN_FTR_SECTION | |||
584 | ld r7,DSCR_DEFAULT@toc(2) | 575 | ld r7,DSCR_DEFAULT@toc(2) |
585 | ld r0,THREAD_DSCR(r4) | 576 | ld r0,THREAD_DSCR(r4) |
586 | cmpwi r6,0 | 577 | cmpwi r6,0 |
578 | li r8, FSCR_DSCR | ||
587 | bne 1f | 579 | bne 1f |
588 | ld r0,0(r7) | 580 | ld r0,0(r7) |
589 | 1: cmpd r0,r25 | 581 | b 3f |
582 | 1: | ||
583 | BEGIN_FTR_SECTION_NESTED(70) | ||
584 | mfspr r6, SPRN_FSCR | ||
585 | or r6, r6, r8 | ||
586 | mtspr SPRN_FSCR, r6 | ||
587 | BEGIN_FTR_SECTION_NESTED(69) | ||
588 | mfspr r6, SPRN_HFSCR | ||
589 | or r6, r6, r8 | ||
590 | mtspr SPRN_HFSCR, r6 | ||
591 | END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69) | ||
592 | b 4f | ||
593 | END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70) | ||
594 | 3: | ||
595 | BEGIN_FTR_SECTION_NESTED(70) | ||
596 | mfspr r6, SPRN_FSCR | ||
597 | andc r6, r6, r8 | ||
598 | mtspr SPRN_FSCR, r6 | ||
599 | BEGIN_FTR_SECTION_NESTED(69) | ||
600 | mfspr r6, SPRN_HFSCR | ||
601 | andc r6, r6, r8 | ||
602 | mtspr SPRN_HFSCR, r6 | ||
603 | END_FTR_SECTION_NESTED(CPU_FTR_HVMODE, CPU_FTR_HVMODE, 69) | ||
604 | END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70) | ||
605 | 4: cmpd r0,r25 | ||
590 | beq 2f | 606 | beq 2f |
591 | mtspr SPRN_DSCR,r0 | 607 | mtspr SPRN_DSCR,r0 |
592 | 2: | 608 | 2: |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 4e00d223b2e3..902ca3c6b4b6 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -848,7 +848,7 @@ hv_facility_unavailable_relon_trampoline: | |||
848 | . = 0x4f80 | 848 | . = 0x4f80 |
849 | SET_SCRATCH0(r13) | 849 | SET_SCRATCH0(r13) |
850 | EXCEPTION_PROLOG_0(PACA_EXGEN) | 850 | EXCEPTION_PROLOG_0(PACA_EXGEN) |
851 | b facility_unavailable_relon_hv | 851 | b hv_facility_unavailable_relon_hv |
852 | 852 | ||
853 | STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) | 853 | STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint) |
854 | #ifdef CONFIG_PPC_DENORMALISATION | 854 | #ifdef CONFIG_PPC_DENORMALISATION |
@@ -1175,6 +1175,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
1175 | b .ret_from_except | 1175 | b .ret_from_except |
1176 | 1176 | ||
1177 | STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) | 1177 | STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) |
1178 | STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) | ||
1178 | 1179 | ||
1179 | .align 7 | 1180 | .align 7 |
1180 | .globl __end_handlers | 1181 | .globl __end_handlers |
@@ -1188,7 +1189,7 @@ __end_handlers: | |||
1188 | STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) | 1189 | STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable) |
1189 | STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) | 1190 | STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable) |
1190 | STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) | 1191 | STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable) |
1191 | STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable) | 1192 | STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable) |
1192 | 1193 | ||
1193 | #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) | 1194 | #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) |
1194 | /* | 1195 | /* |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index c517dbe705fd..8083be20fe5e 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -600,6 +600,16 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
600 | struct ppc64_tlb_batch *batch; | 600 | struct ppc64_tlb_batch *batch; |
601 | #endif | 601 | #endif |
602 | 602 | ||
603 | /* Back up the TAR across context switches. | ||
604 | * Note that the TAR is not available for use in the kernel. (To | ||
605 | * provide this, the TAR should be backed up/restored on exception | ||
606 | * entry/exit instead, and be in pt_regs. FIXME, this should be in | ||
607 | * pt_regs anyway (for debug).) | ||
608 | * Save the TAR here before we do treclaim/trecheckpoint as these | ||
609 | * will change the TAR. | ||
610 | */ | ||
611 | save_tar(&prev->thread); | ||
612 | |||
603 | __switch_to_tm(prev); | 613 | __switch_to_tm(prev); |
604 | 614 | ||
605 | #ifdef CONFIG_SMP | 615 | #ifdef CONFIG_SMP |
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 51be8fb24803..0554d1f6d70d 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S | |||
@@ -233,6 +233,16 @@ dont_backup_fp: | |||
233 | std r5, _CCR(r7) | 233 | std r5, _CCR(r7) |
234 | std r6, _XER(r7) | 234 | std r6, _XER(r7) |
235 | 235 | ||
236 | |||
237 | /* ******************** TAR, PPR, DSCR ********** */ | ||
238 | mfspr r3, SPRN_TAR | ||
239 | mfspr r4, SPRN_PPR | ||
240 | mfspr r5, SPRN_DSCR | ||
241 | |||
242 | std r3, THREAD_TM_TAR(r12) | ||
243 | std r4, THREAD_TM_PPR(r12) | ||
244 | std r5, THREAD_TM_DSCR(r12) | ||
245 | |||
236 | /* MSR and flags: We don't change CRs, and we don't need to alter | 246 | /* MSR and flags: We don't change CRs, and we don't need to alter |
237 | * MSR. | 247 | * MSR. |
238 | */ | 248 | */ |
@@ -347,6 +357,16 @@ dont_restore_fp: | |||
347 | mtmsr r6 /* FP/Vec off again! */ | 357 | mtmsr r6 /* FP/Vec off again! */ |
348 | 358 | ||
349 | restore_gprs: | 359 | restore_gprs: |
360 | |||
361 | /* ******************** TAR, PPR, DSCR ********** */ | ||
362 | ld r4, THREAD_TM_TAR(r3) | ||
363 | ld r5, THREAD_TM_PPR(r3) | ||
364 | ld r6, THREAD_TM_DSCR(r3) | ||
365 | |||
366 | mtspr SPRN_TAR, r4 | ||
367 | mtspr SPRN_PPR, r5 | ||
368 | mtspr SPRN_DSCR, r6 | ||
369 | |||
350 | /* ******************** CR,LR,CCR,MSR ********** */ | 370 | /* ******************** CR,LR,CCR,MSR ********** */ |
351 | ld r3, _CTR(r7) | 371 | ld r3, _CTR(r7) |
352 | ld r4, _LINK(r7) | 372 | ld r4, _LINK(r7) |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index bf33c22e38a4..e435bc089ea3 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -44,9 +44,7 @@ | |||
44 | #include <asm/machdep.h> | 44 | #include <asm/machdep.h> |
45 | #include <asm/rtas.h> | 45 | #include <asm/rtas.h> |
46 | #include <asm/pmc.h> | 46 | #include <asm/pmc.h> |
47 | #ifdef CONFIG_PPC32 | ||
48 | #include <asm/reg.h> | 47 | #include <asm/reg.h> |
49 | #endif | ||
50 | #ifdef CONFIG_PMAC_BACKLIGHT | 48 | #ifdef CONFIG_PMAC_BACKLIGHT |
51 | #include <asm/backlight.h> | 49 | #include <asm/backlight.h> |
52 | #endif | 50 | #endif |
@@ -1296,43 +1294,54 @@ void vsx_unavailable_exception(struct pt_regs *regs) | |||
1296 | die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); | 1294 | die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT); |
1297 | } | 1295 | } |
1298 | 1296 | ||
1297 | #ifdef CONFIG_PPC64 | ||
1299 | void facility_unavailable_exception(struct pt_regs *regs) | 1298 | void facility_unavailable_exception(struct pt_regs *regs) |
1300 | { | 1299 | { |
1301 | static char *facility_strings[] = { | 1300 | static char *facility_strings[] = { |
1302 | "FPU", | 1301 | [FSCR_FP_LG] = "FPU", |
1303 | "VMX/VSX", | 1302 | [FSCR_VECVSX_LG] = "VMX/VSX", |
1304 | "DSCR", | 1303 | [FSCR_DSCR_LG] = "DSCR", |
1305 | "PMU SPRs", | 1304 | [FSCR_PM_LG] = "PMU SPRs", |
1306 | "BHRB", | 1305 | [FSCR_BHRB_LG] = "BHRB", |
1307 | "TM", | 1306 | [FSCR_TM_LG] = "TM", |
1308 | "AT", | 1307 | [FSCR_EBB_LG] = "EBB", |
1309 | "EBB", | 1308 | [FSCR_TAR_LG] = "TAR", |
1310 | "TAR", | ||
1311 | }; | 1309 | }; |
1312 | char *facility, *prefix; | 1310 | char *facility = "unknown"; |
1313 | u64 value; | 1311 | u64 value; |
1312 | u8 status; | ||
1313 | bool hv; | ||
1314 | 1314 | ||
1315 | if (regs->trap == 0xf60) { | 1315 | hv = (regs->trap == 0xf80); |
1316 | value = mfspr(SPRN_FSCR); | 1316 | if (hv) |
1317 | prefix = ""; | ||
1318 | } else { | ||
1319 | value = mfspr(SPRN_HFSCR); | 1317 | value = mfspr(SPRN_HFSCR); |
1320 | prefix = "Hypervisor "; | 1318 | else |
1319 | value = mfspr(SPRN_FSCR); | ||
1320 | |||
1321 | status = value >> 56; | ||
1322 | if (status == FSCR_DSCR_LG) { | ||
1323 | /* User is acessing the DSCR. Set the inherit bit and allow | ||
1324 | * the user to set it directly in future by setting via the | ||
1325 | * H/FSCR DSCR bit. | ||
1326 | */ | ||
1327 | current->thread.dscr_inherit = 1; | ||
1328 | if (hv) | ||
1329 | mtspr(SPRN_HFSCR, value | HFSCR_DSCR); | ||
1330 | else | ||
1331 | mtspr(SPRN_FSCR, value | FSCR_DSCR); | ||
1332 | return; | ||
1321 | } | 1333 | } |
1322 | 1334 | ||
1323 | value = value >> 56; | 1335 | if ((status < ARRAY_SIZE(facility_strings)) && |
1336 | facility_strings[status]) | ||
1337 | facility = facility_strings[status]; | ||
1324 | 1338 | ||
1325 | /* We restore the interrupt state now */ | 1339 | /* We restore the interrupt state now */ |
1326 | if (!arch_irq_disabled_regs(regs)) | 1340 | if (!arch_irq_disabled_regs(regs)) |
1327 | local_irq_enable(); | 1341 | local_irq_enable(); |
1328 | 1342 | ||
1329 | if (value < ARRAY_SIZE(facility_strings)) | ||
1330 | facility = facility_strings[value]; | ||
1331 | else | ||
1332 | facility = "unknown"; | ||
1333 | |||
1334 | pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", | 1343 | pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n", |
1335 | prefix, facility, regs->nip, regs->msr); | 1344 | hv ? "Hypervisor " : "", facility, regs->nip, regs->msr); |
1336 | 1345 | ||
1337 | if (user_mode(regs)) { | 1346 | if (user_mode(regs)) { |
1338 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); | 1347 | _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); |
@@ -1341,6 +1350,7 @@ void facility_unavailable_exception(struct pt_regs *regs) | |||
1341 | 1350 | ||
1342 | die("Unexpected facility unavailable exception", regs, SIGABRT); | 1351 | die("Unexpected facility unavailable exception", regs, SIGABRT); |
1343 | } | 1352 | } |
1353 | #endif | ||
1344 | 1354 | ||
1345 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1355 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1346 | 1356 | ||
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 2efa9dde741a..7629cd3eb91a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -1809,7 +1809,7 @@ static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) | |||
1809 | rma_size <<= PAGE_SHIFT; | 1809 | rma_size <<= PAGE_SHIFT; |
1810 | rmls = lpcr_rmls(rma_size); | 1810 | rmls = lpcr_rmls(rma_size); |
1811 | err = -EINVAL; | 1811 | err = -EINVAL; |
1812 | if (rmls < 0) { | 1812 | if ((long)rmls < 0) { |
1813 | pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); | 1813 | pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size); |
1814 | goto out_srcu; | 1814 | goto out_srcu; |
1815 | } | 1815 | } |
@@ -1874,7 +1874,7 @@ int kvmppc_core_init_vm(struct kvm *kvm) | |||
1874 | /* Allocate the guest's logical partition ID */ | 1874 | /* Allocate the guest's logical partition ID */ |
1875 | 1875 | ||
1876 | lpid = kvmppc_alloc_lpid(); | 1876 | lpid = kvmppc_alloc_lpid(); |
1877 | if (lpid < 0) | 1877 | if ((long)lpid < 0) |
1878 | return -ENOMEM; | 1878 | return -ENOMEM; |
1879 | kvm->arch.lpid = lpid; | 1879 | kvm->arch.lpid = lpid; |
1880 | 1880 | ||
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 19498a567a81..c6e13d9a9e15 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -1047,11 +1047,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1047 | if (err) | 1047 | if (err) |
1048 | goto free_shadow_vcpu; | 1048 | goto free_shadow_vcpu; |
1049 | 1049 | ||
1050 | err = -ENOMEM; | ||
1050 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); | 1051 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); |
1051 | /* the real shared page fills the last 4k of our page */ | ||
1052 | vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096); | ||
1053 | if (!p) | 1052 | if (!p) |
1054 | goto uninit_vcpu; | 1053 | goto uninit_vcpu; |
1054 | /* the real shared page fills the last 4k of our page */ | ||
1055 | vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096); | ||
1055 | 1056 | ||
1056 | #ifdef CONFIG_PPC_BOOK3S_64 | 1057 | #ifdef CONFIG_PPC_BOOK3S_64 |
1057 | /* default to book3s_64 (970fx) */ | 1058 | /* default to book3s_64 (970fx) */ |
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index 9f8671a44551..6a5f2b1f32ca 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c | |||
@@ -569,35 +569,6 @@ error: | |||
569 | return ret; | 569 | return ret; |
570 | } | 570 | } |
571 | 571 | ||
572 | static int unzip_oops(char *oops_buf, char *big_buf) | ||
573 | { | ||
574 | struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf; | ||
575 | u64 timestamp = oops_hdr->timestamp; | ||
576 | char *big_oops_data = NULL; | ||
577 | char *oops_data_buf = NULL; | ||
578 | size_t big_oops_data_sz; | ||
579 | int unzipped_len; | ||
580 | |||
581 | big_oops_data = big_buf + sizeof(struct oops_log_info); | ||
582 | big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info); | ||
583 | oops_data_buf = oops_buf + sizeof(struct oops_log_info); | ||
584 | |||
585 | unzipped_len = nvram_decompress(oops_data_buf, big_oops_data, | ||
586 | oops_hdr->report_length, | ||
587 | big_oops_data_sz); | ||
588 | |||
589 | if (unzipped_len < 0) { | ||
590 | pr_err("nvram: decompression failed; returned %d\n", | ||
591 | unzipped_len); | ||
592 | return -1; | ||
593 | } | ||
594 | oops_hdr = (struct oops_log_info *)big_buf; | ||
595 | oops_hdr->version = OOPS_HDR_VERSION; | ||
596 | oops_hdr->report_length = (u16) unzipped_len; | ||
597 | oops_hdr->timestamp = timestamp; | ||
598 | return 0; | ||
599 | } | ||
600 | |||
601 | static int nvram_pstore_open(struct pstore_info *psi) | 572 | static int nvram_pstore_open(struct pstore_info *psi) |
602 | { | 573 | { |
603 | /* Reset the iterator to start reading partitions again */ | 574 | /* Reset the iterator to start reading partitions again */ |
@@ -685,10 +656,9 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type, | |||
685 | unsigned int err_type, id_no, size = 0; | 656 | unsigned int err_type, id_no, size = 0; |
686 | struct nvram_os_partition *part = NULL; | 657 | struct nvram_os_partition *part = NULL; |
687 | char *buff = NULL, *big_buff = NULL; | 658 | char *buff = NULL, *big_buff = NULL; |
688 | int rc, sig = 0; | 659 | int sig = 0; |
689 | loff_t p; | 660 | loff_t p; |
690 | 661 | ||
691 | read_partition: | ||
692 | read_type++; | 662 | read_type++; |
693 | 663 | ||
694 | switch (nvram_type_ids[read_type]) { | 664 | switch (nvram_type_ids[read_type]) { |
@@ -749,30 +719,46 @@ read_partition: | |||
749 | *id = id_no; | 719 | *id = id_no; |
750 | 720 | ||
751 | if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) { | 721 | if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) { |
722 | int length, unzipped_len; | ||
723 | size_t hdr_size; | ||
724 | |||
752 | oops_hdr = (struct oops_log_info *)buff; | 725 | oops_hdr = (struct oops_log_info *)buff; |
753 | *buf = buff + sizeof(*oops_hdr); | 726 | if (oops_hdr->version < OOPS_HDR_VERSION) { |
727 | /* Old format oops header had 2-byte record size */ | ||
728 | hdr_size = sizeof(u16); | ||
729 | length = oops_hdr->version; | ||
730 | time->tv_sec = 0; | ||
731 | time->tv_nsec = 0; | ||
732 | } else { | ||
733 | hdr_size = sizeof(*oops_hdr); | ||
734 | length = oops_hdr->report_length; | ||
735 | time->tv_sec = oops_hdr->timestamp; | ||
736 | time->tv_nsec = 0; | ||
737 | } | ||
738 | *buf = kmalloc(length, GFP_KERNEL); | ||
739 | if (*buf == NULL) | ||
740 | return -ENOMEM; | ||
741 | memcpy(*buf, buff + hdr_size, length); | ||
742 | kfree(buff); | ||
754 | 743 | ||
755 | if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) { | 744 | if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) { |
756 | big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL); | 745 | big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL); |
757 | if (!big_buff) | 746 | if (!big_buff) |
758 | return -ENOMEM; | 747 | return -ENOMEM; |
759 | 748 | ||
760 | rc = unzip_oops(buff, big_buff); | 749 | unzipped_len = nvram_decompress(*buf, big_buff, |
750 | length, big_oops_buf_sz); | ||
761 | 751 | ||
762 | if (rc != 0) { | 752 | if (unzipped_len < 0) { |
763 | kfree(buff); | 753 | pr_err("nvram: decompression failed, returned " |
754 | "rc %d\n", unzipped_len); | ||
764 | kfree(big_buff); | 755 | kfree(big_buff); |
765 | goto read_partition; | 756 | } else { |
757 | *buf = big_buff; | ||
758 | length = unzipped_len; | ||
766 | } | 759 | } |
767 | |||
768 | oops_hdr = (struct oops_log_info *)big_buff; | ||
769 | *buf = big_buff + sizeof(*oops_hdr); | ||
770 | kfree(buff); | ||
771 | } | 760 | } |
772 | 761 | return length; | |
773 | time->tv_sec = oops_hdr->timestamp; | ||
774 | time->tv_nsec = 0; | ||
775 | return oops_hdr->report_length; | ||
776 | } | 762 | } |
777 | 763 | ||
778 | *buf = buff; | 764 | *buf = buff; |
@@ -816,6 +802,7 @@ static int nvram_pstore_init(void) | |||
816 | static void __init nvram_init_oops_partition(int rtas_partition_exists) | 802 | static void __init nvram_init_oops_partition(int rtas_partition_exists) |
817 | { | 803 | { |
818 | int rc; | 804 | int rc; |
805 | size_t size; | ||
819 | 806 | ||
820 | rc = pseries_nvram_init_os_partition(&oops_log_partition); | 807 | rc = pseries_nvram_init_os_partition(&oops_log_partition); |
821 | if (rc != 0) { | 808 | if (rc != 0) { |
@@ -844,8 +831,9 @@ static void __init nvram_init_oops_partition(int rtas_partition_exists) | |||
844 | big_oops_buf_sz = (oops_data_sz * 100) / 45; | 831 | big_oops_buf_sz = (oops_data_sz * 100) / 45; |
845 | big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); | 832 | big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL); |
846 | if (big_oops_buf) { | 833 | if (big_oops_buf) { |
847 | stream.workspace = kmalloc(zlib_deflate_workspacesize( | 834 | size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL), |
848 | WINDOW_BITS, MEM_LEVEL), GFP_KERNEL); | 835 | zlib_inflate_workspacesize()); |
836 | stream.workspace = kmalloc(size, GFP_KERNEL); | ||
849 | if (!stream.workspace) { | 837 | if (!stream.workspace) { |
850 | pr_err("nvram: No memory for compression workspace; " | 838 | pr_err("nvram: No memory for compression workspace; " |
851 | "skipping compression of %s partition data\n", | 839 | "skipping compression of %s partition data\n", |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 22f75b504f7f..8a4cae78f03c 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -118,6 +118,7 @@ config S390 | |||
118 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | 118 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST |
119 | select HAVE_KERNEL_BZIP2 | 119 | select HAVE_KERNEL_BZIP2 |
120 | select HAVE_KERNEL_GZIP | 120 | select HAVE_KERNEL_GZIP |
121 | select HAVE_KERNEL_LZ4 | ||
121 | select HAVE_KERNEL_LZMA | 122 | select HAVE_KERNEL_LZMA |
122 | select HAVE_KERNEL_LZO | 123 | select HAVE_KERNEL_LZO |
123 | select HAVE_KERNEL_XZ | 124 | select HAVE_KERNEL_XZ |
@@ -227,11 +228,12 @@ config MARCH_Z196 | |||
227 | not work on older machines. | 228 | not work on older machines. |
228 | 229 | ||
229 | config MARCH_ZEC12 | 230 | config MARCH_ZEC12 |
230 | bool "IBM zEC12" | 231 | bool "IBM zBC12 and zEC12" |
231 | select HAVE_MARCH_ZEC12_FEATURES if 64BIT | 232 | select HAVE_MARCH_ZEC12_FEATURES if 64BIT |
232 | help | 233 | help |
233 | Select this to enable optimizations for IBM zEC12 (2827 series). The | 234 | Select this to enable optimizations for IBM zBC12 and zEC12 (2828 and |
234 | kernel will be slightly faster but will not work on older machines. | 235 | 2827 series). The kernel will be slightly faster but will not work on |
236 | older machines. | ||
235 | 237 | ||
236 | endchoice | 238 | endchoice |
237 | 239 | ||
@@ -709,6 +711,7 @@ config S390_GUEST | |||
709 | def_bool y | 711 | def_bool y |
710 | prompt "s390 support for virtio devices" | 712 | prompt "s390 support for virtio devices" |
711 | depends on 64BIT | 713 | depends on 64BIT |
714 | select TTY | ||
712 | select VIRTUALIZATION | 715 | select VIRTUALIZATION |
713 | select VIRTIO | 716 | select VIRTIO |
714 | select VIRTIO_CONSOLE | 717 | select VIRTIO_CONSOLE |
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index 3ad8f61c9985..866ecbe670e4 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile | |||
@@ -6,9 +6,9 @@ | |||
6 | 6 | ||
7 | BITS := $(if $(CONFIG_64BIT),64,31) | 7 | BITS := $(if $(CONFIG_64BIT),64,31) |
8 | 8 | ||
9 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 \ | 9 | targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 |
10 | vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo misc.o piggy.o \ | 10 | targets += vmlinux.bin.xz vmlinux.bin.lzma vmlinux.bin.lzo vmlinux.bin.lz4 |
11 | sizes.h head$(BITS).o | 11 | targets += misc.o piggy.o sizes.h head$(BITS).o |
12 | 12 | ||
13 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 | 13 | KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 |
14 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING | 14 | KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING |
@@ -48,6 +48,7 @@ vmlinux.bin.all-y := $(obj)/vmlinux.bin | |||
48 | 48 | ||
49 | suffix-$(CONFIG_KERNEL_GZIP) := gz | 49 | suffix-$(CONFIG_KERNEL_GZIP) := gz |
50 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 | 50 | suffix-$(CONFIG_KERNEL_BZIP2) := bz2 |
51 | suffix-$(CONFIG_KERNEL_LZ4) := lz4 | ||
51 | suffix-$(CONFIG_KERNEL_LZMA) := lzma | 52 | suffix-$(CONFIG_KERNEL_LZMA) := lzma |
52 | suffix-$(CONFIG_KERNEL_LZO) := lzo | 53 | suffix-$(CONFIG_KERNEL_LZO) := lzo |
53 | suffix-$(CONFIG_KERNEL_XZ) := xz | 54 | suffix-$(CONFIG_KERNEL_XZ) := xz |
@@ -56,6 +57,8 @@ $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) | |||
56 | $(call if_changed,gzip) | 57 | $(call if_changed,gzip) |
57 | $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) | 58 | $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) |
58 | $(call if_changed,bzip2) | 59 | $(call if_changed,bzip2) |
60 | $(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) | ||
61 | $(call if_changed,lz4) | ||
59 | $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) | 62 | $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) |
60 | $(call if_changed,lzma) | 63 | $(call if_changed,lzma) |
61 | $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) | 64 | $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) |
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index c4c6a1cf221b..57cbaff1f397 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c | |||
@@ -47,6 +47,10 @@ static unsigned long free_mem_end_ptr; | |||
47 | #include "../../../../lib/decompress_bunzip2.c" | 47 | #include "../../../../lib/decompress_bunzip2.c" |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | #ifdef CONFIG_KERNEL_LZ4 | ||
51 | #include "../../../../lib/decompress_unlz4.c" | ||
52 | #endif | ||
53 | |||
50 | #ifdef CONFIG_KERNEL_LZMA | 54 | #ifdef CONFIG_KERNEL_LZMA |
51 | #include "../../../../lib/decompress_unlzma.c" | 55 | #include "../../../../lib/decompress_unlzma.c" |
52 | #endif | 56 | #endif |
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index 4d8604e311f3..7d4676758733 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h | |||
@@ -693,7 +693,7 @@ static inline int find_next_bit_left(const unsigned long *addr, | |||
693 | size -= offset; | 693 | size -= offset; |
694 | p = addr + offset / BITS_PER_LONG; | 694 | p = addr + offset / BITS_PER_LONG; |
695 | if (bit) { | 695 | if (bit) { |
696 | set = __flo_word(0, *p & (~0UL << bit)); | 696 | set = __flo_word(0, *p & (~0UL >> bit)); |
697 | if (set >= size) | 697 | if (set >= size) |
698 | return size + offset; | 698 | return size + offset; |
699 | if (set < BITS_PER_LONG) | 699 | if (set < BITS_PER_LONG) |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index b75d7d686684..6d6d92b4ea11 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
@@ -32,6 +32,7 @@ struct mmu_gather { | |||
32 | struct mm_struct *mm; | 32 | struct mm_struct *mm; |
33 | struct mmu_table_batch *batch; | 33 | struct mmu_table_batch *batch; |
34 | unsigned int fullmm; | 34 | unsigned int fullmm; |
35 | unsigned long start, end; | ||
35 | }; | 36 | }; |
36 | 37 | ||
37 | struct mmu_table_batch { | 38 | struct mmu_table_batch { |
@@ -48,10 +49,13 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table); | |||
48 | 49 | ||
49 | static inline void tlb_gather_mmu(struct mmu_gather *tlb, | 50 | static inline void tlb_gather_mmu(struct mmu_gather *tlb, |
50 | struct mm_struct *mm, | 51 | struct mm_struct *mm, |
51 | unsigned int full_mm_flush) | 52 | unsigned long start, |
53 | unsigned long end) | ||
52 | { | 54 | { |
53 | tlb->mm = mm; | 55 | tlb->mm = mm; |
54 | tlb->fullmm = full_mm_flush; | 56 | tlb->start = start; |
57 | tlb->end = end; | ||
58 | tlb->fullmm = !(start | (end+1)); | ||
55 | tlb->batch = NULL; | 59 | tlb->batch = NULL; |
56 | if (tlb->fullmm) | 60 | if (tlb->fullmm) |
57 | __tlb_flush_mm(mm); | 61 | __tlb_flush_mm(mm); |
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c index a6fc037671b1..500aa1029bcb 100644 --- a/arch/s390/kernel/perf_event.c +++ b/arch/s390/kernel/perf_event.c | |||
@@ -52,12 +52,13 @@ static struct kvm_s390_sie_block *sie_block(struct pt_regs *regs) | |||
52 | 52 | ||
53 | static bool is_in_guest(struct pt_regs *regs) | 53 | static bool is_in_guest(struct pt_regs *regs) |
54 | { | 54 | { |
55 | unsigned long ip = instruction_pointer(regs); | ||
56 | |||
57 | if (user_mode(regs)) | 55 | if (user_mode(regs)) |
58 | return false; | 56 | return false; |
59 | 57 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) | |
60 | return ip == (unsigned long) &sie_exit; | 58 | return instruction_pointer(regs) == (unsigned long) &sie_exit; |
59 | #else | ||
60 | return false; | ||
61 | #endif | ||
61 | } | 62 | } |
62 | 63 | ||
63 | static unsigned long guest_is_user_mode(struct pt_regs *regs) | 64 | static unsigned long guest_is_user_mode(struct pt_regs *regs) |
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 497451ec5e26..aeed8a61fa0d 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c | |||
@@ -994,6 +994,7 @@ static void __init setup_hwcaps(void) | |||
994 | strcpy(elf_platform, "z196"); | 994 | strcpy(elf_platform, "z196"); |
995 | break; | 995 | break; |
996 | case 0x2827: | 996 | case 0x2827: |
997 | case 0x2828: | ||
997 | strcpy(elf_platform, "zEC12"); | 998 | strcpy(elf_platform, "zEC12"); |
998 | break; | 999 | break; |
999 | } | 1000 | } |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ba694d2ba51e..34c1c9a90be2 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -702,14 +702,25 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
702 | return rc; | 702 | return rc; |
703 | 703 | ||
704 | vcpu->arch.sie_block->icptcode = 0; | 704 | vcpu->arch.sie_block->icptcode = 0; |
705 | preempt_disable(); | ||
706 | kvm_guest_enter(); | ||
707 | preempt_enable(); | ||
708 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", | 705 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", |
709 | atomic_read(&vcpu->arch.sie_block->cpuflags)); | 706 | atomic_read(&vcpu->arch.sie_block->cpuflags)); |
710 | trace_kvm_s390_sie_enter(vcpu, | 707 | trace_kvm_s390_sie_enter(vcpu, |
711 | atomic_read(&vcpu->arch.sie_block->cpuflags)); | 708 | atomic_read(&vcpu->arch.sie_block->cpuflags)); |
709 | |||
710 | /* | ||
711 | * As PF_VCPU will be used in fault handler, between guest_enter | ||
712 | * and guest_exit should be no uaccess. | ||
713 | */ | ||
714 | preempt_disable(); | ||
715 | kvm_guest_enter(); | ||
716 | preempt_enable(); | ||
712 | rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); | 717 | rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs); |
718 | kvm_guest_exit(); | ||
719 | |||
720 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", | ||
721 | vcpu->arch.sie_block->icptcode); | ||
722 | trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); | ||
723 | |||
713 | if (rc > 0) | 724 | if (rc > 0) |
714 | rc = 0; | 725 | rc = 0; |
715 | if (rc < 0) { | 726 | if (rc < 0) { |
@@ -721,10 +732,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) | |||
721 | rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); | 732 | rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); |
722 | } | 733 | } |
723 | } | 734 | } |
724 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", | ||
725 | vcpu->arch.sie_block->icptcode); | ||
726 | trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); | ||
727 | kvm_guest_exit(); | ||
728 | 735 | ||
729 | memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); | 736 | memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); |
730 | return rc; | 737 | return rc; |
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c index 0da3e6eb6be6..4cdc54e63ebc 100644 --- a/arch/s390/kvm/priv.c +++ b/arch/s390/kvm/priv.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/compat.h> | 17 | #include <linux/compat.h> |
18 | #include <asm/asm-offsets.h> | 18 | #include <asm/asm-offsets.h> |
19 | #include <asm/facility.h> | ||
19 | #include <asm/current.h> | 20 | #include <asm/current.h> |
20 | #include <asm/debug.h> | 21 | #include <asm/debug.h> |
21 | #include <asm/ebcdic.h> | 22 | #include <asm/ebcdic.h> |
@@ -532,8 +533,7 @@ static int handle_pfmf(struct kvm_vcpu *vcpu) | |||
532 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 533 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
533 | 534 | ||
534 | /* Only provide non-quiescing support if the host supports it */ | 535 | /* Only provide non-quiescing support if the host supports it */ |
535 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && | 536 | if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14)) |
536 | S390_lowcore.stfl_fac_list & 0x00020000) | ||
537 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); | 537 | return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); |
538 | 538 | ||
539 | /* No support for conditional-SSKE */ | 539 | /* No support for conditional-SSKE */ |
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index ce36ea80e4f9..ad446b0c55b6 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -69,6 +69,7 @@ static void __init setup_zero_pages(void) | |||
69 | order = 2; | 69 | order = 2; |
70 | break; | 70 | break; |
71 | case 0x2827: /* zEC12 */ | 71 | case 0x2827: /* zEC12 */ |
72 | case 0x2828: /* zEC12 */ | ||
72 | default: | 73 | default: |
73 | order = 5; | 74 | order = 5; |
74 | break; | 75 | break; |
diff --git a/arch/s390/oprofile/init.c b/arch/s390/oprofile/init.c index ffeb17ce7f31..930783d2c99b 100644 --- a/arch/s390/oprofile/init.c +++ b/arch/s390/oprofile/init.c | |||
@@ -440,7 +440,7 @@ static int oprofile_hwsampler_init(struct oprofile_operations *ops) | |||
440 | switch (id.machine) { | 440 | switch (id.machine) { |
441 | case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; | 441 | case 0x2097: case 0x2098: ops->cpu_type = "s390/z10"; break; |
442 | case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; | 442 | case 0x2817: case 0x2818: ops->cpu_type = "s390/z196"; break; |
443 | case 0x2827: ops->cpu_type = "s390/zEC12"; break; | 443 | case 0x2827: case 0x2828: ops->cpu_type = "s390/zEC12"; break; |
444 | default: return -ENODEV; | 444 | default: return -ENODEV; |
445 | } | 445 | } |
446 | } | 446 | } |
diff --git a/arch/score/Kconfig b/arch/score/Kconfig index c8def8bc9020..5fc237581caf 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig | |||
@@ -87,6 +87,8 @@ config STACKTRACE_SUPPORT | |||
87 | 87 | ||
88 | source "init/Kconfig" | 88 | source "init/Kconfig" |
89 | 89 | ||
90 | source "kernel/Kconfig.freezer" | ||
91 | |||
90 | config MMU | 92 | config MMU |
91 | def_bool y | 93 | def_bool y |
92 | 94 | ||
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index e61d43d9f689..362192ed12fe 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h | |||
@@ -36,10 +36,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) | |||
36 | } | 36 | } |
37 | 37 | ||
38 | static inline void | 38 | static inline void |
39 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 39 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
40 | { | 40 | { |
41 | tlb->mm = mm; | 41 | tlb->mm = mm; |
42 | tlb->fullmm = full_mm_flush; | 42 | tlb->start = start; |
43 | tlb->end = end; | ||
44 | tlb->fullmm = !(start | (end+1)); | ||
43 | 45 | ||
44 | init_tlb_gather(tlb); | 46 | init_tlb_gather(tlb); |
45 | } | 47 | } |
diff --git a/arch/um/include/asm/tlb.h b/arch/um/include/asm/tlb.h index 4febacd1a8a1..29b0301c18aa 100644 --- a/arch/um/include/asm/tlb.h +++ b/arch/um/include/asm/tlb.h | |||
@@ -45,10 +45,12 @@ static inline void init_tlb_gather(struct mmu_gather *tlb) | |||
45 | } | 45 | } |
46 | 46 | ||
47 | static inline void | 47 | static inline void |
48 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned int full_mm_flush) | 48 | tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
49 | { | 49 | { |
50 | tlb->mm = mm; | 50 | tlb->mm = mm; |
51 | tlb->fullmm = full_mm_flush; | 51 | tlb->start = start; |
52 | tlb->end = end; | ||
53 | tlb->fullmm = !(start | (end+1)); | ||
52 | 54 | ||
53 | init_tlb_gather(tlb); | 55 | init_tlb_gather(tlb); |
54 | } | 56 | } |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index d606463aa6d6..b7388a425f09 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -225,7 +225,7 @@ static void low_free(unsigned long size, unsigned long addr) | |||
225 | unsigned long nr_pages; | 225 | unsigned long nr_pages; |
226 | 226 | ||
227 | nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; | 227 | nr_pages = round_up(size, EFI_PAGE_SIZE) / EFI_PAGE_SIZE; |
228 | efi_call_phys2(sys_table->boottime->free_pages, addr, size); | 228 | efi_call_phys2(sys_table->boottime->free_pages, addr, nr_pages); |
229 | } | 229 | } |
230 | 230 | ||
231 | static void find_bits(unsigned long mask, u8 *pos, u8 *size) | 231 | static void find_bits(unsigned long mask, u8 *pos, u8 *size) |
diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h index f2b489cf1602..3bf2dd0cf61f 100644 --- a/arch/x86/include/asm/pgtable-2level.h +++ b/arch/x86/include/asm/pgtable-2level.h | |||
@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
55 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) | 55 | #define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp) |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
59 | |||
60 | /* | ||
61 | * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and | ||
62 | * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset | ||
63 | * into this range. | ||
64 | */ | ||
65 | #define PTE_FILE_MAX_BITS 28 | ||
66 | #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) | ||
67 | #define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1) | ||
68 | #define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1) | ||
69 | #define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1) | ||
70 | #define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1) | ||
71 | #define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1) | ||
72 | #define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1) | ||
73 | |||
74 | #define pte_to_pgoff(pte) \ | ||
75 | ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \ | ||
76 | & ((1U << PTE_FILE_BITS1) - 1))) \ | ||
77 | + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \ | ||
78 | & ((1U << PTE_FILE_BITS2) - 1)) \ | ||
79 | << (PTE_FILE_BITS1)) \ | ||
80 | + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \ | ||
81 | & ((1U << PTE_FILE_BITS3) - 1)) \ | ||
82 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | ||
83 | + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \ | ||
84 | << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3)) | ||
85 | |||
86 | #define pgoff_to_pte(off) \ | ||
87 | ((pte_t) { .pte_low = \ | ||
88 | ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \ | ||
89 | + ((((off) >> PTE_FILE_BITS1) \ | ||
90 | & ((1U << PTE_FILE_BITS2) - 1)) \ | ||
91 | << PTE_FILE_SHIFT2) \ | ||
92 | + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \ | ||
93 | & ((1U << PTE_FILE_BITS3) - 1)) \ | ||
94 | << PTE_FILE_SHIFT3) \ | ||
95 | + ((((off) >> \ | ||
96 | (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \ | ||
97 | << PTE_FILE_SHIFT4) \ | ||
98 | + _PAGE_FILE }) | ||
99 | |||
100 | #else /* CONFIG_MEM_SOFT_DIRTY */ | ||
101 | |||
58 | /* | 102 | /* |
59 | * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, | 103 | * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken, |
60 | * split up the 29 bits of offset into this range: | 104 | * split up the 29 bits of offset into this range. |
61 | */ | 105 | */ |
62 | #define PTE_FILE_MAX_BITS 29 | 106 | #define PTE_FILE_MAX_BITS 29 |
63 | #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) | 107 | #define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1) |
@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp) | |||
88 | << PTE_FILE_SHIFT3) \ | 132 | << PTE_FILE_SHIFT3) \ |
89 | + _PAGE_FILE }) | 133 | + _PAGE_FILE }) |
90 | 134 | ||
135 | #endif /* CONFIG_MEM_SOFT_DIRTY */ | ||
136 | |||
91 | /* Encode and de-code a swap entry */ | 137 | /* Encode and de-code a swap entry */ |
92 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE | 138 | #if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE |
93 | #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) | 139 | #define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1) |
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 4cc9f2b7cdc3..81bb91b49a88 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h | |||
@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *pmdp) | |||
179 | /* | 179 | /* |
180 | * Bits 0, 6 and 7 are taken in the low part of the pte, | 180 | * Bits 0, 6 and 7 are taken in the low part of the pte, |
181 | * put the 32 bits of offset into the high part. | 181 | * put the 32 bits of offset into the high part. |
182 | * | ||
183 | * For soft-dirty tracking 11 bit is taken from | ||
184 | * the low part of pte as well. | ||
182 | */ | 185 | */ |
183 | #define pte_to_pgoff(pte) ((pte).pte_high) | 186 | #define pte_to_pgoff(pte) ((pte).pte_high) |
184 | #define pgoff_to_pte(off) \ | 187 | #define pgoff_to_pte(off) \ |
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 7dc305a46058..1c00631164c2 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h | |||
@@ -314,6 +314,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) | |||
314 | return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); | 314 | return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); |
315 | } | 315 | } |
316 | 316 | ||
317 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) | ||
318 | { | ||
319 | return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); | ||
320 | } | ||
321 | |||
322 | static inline int pte_swp_soft_dirty(pte_t pte) | ||
323 | { | ||
324 | return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; | ||
325 | } | ||
326 | |||
327 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) | ||
328 | { | ||
329 | return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); | ||
330 | } | ||
331 | |||
332 | static inline pte_t pte_file_clear_soft_dirty(pte_t pte) | ||
333 | { | ||
334 | return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); | ||
335 | } | ||
336 | |||
337 | static inline pte_t pte_file_mksoft_dirty(pte_t pte) | ||
338 | { | ||
339 | return pte_set_flags(pte, _PAGE_SOFT_DIRTY); | ||
340 | } | ||
341 | |||
342 | static inline int pte_file_soft_dirty(pte_t pte) | ||
343 | { | ||
344 | return pte_flags(pte) & _PAGE_SOFT_DIRTY; | ||
345 | } | ||
346 | |||
317 | /* | 347 | /* |
318 | * Mask out unsupported bits in a present pgprot. Non-present pgprots | 348 | * Mask out unsupported bits in a present pgprot. Non-present pgprots |
319 | * can use those bits for other purposes, so leave them be. | 349 | * can use those bits for other purposes, so leave them be. |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index c98ac63aae48..f4843e031131 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -61,12 +61,27 @@ | |||
61 | * they do not conflict with each other. | 61 | * they do not conflict with each other. |
62 | */ | 62 | */ |
63 | 63 | ||
64 | #define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN | ||
65 | |||
64 | #ifdef CONFIG_MEM_SOFT_DIRTY | 66 | #ifdef CONFIG_MEM_SOFT_DIRTY |
65 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) | 67 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY) |
66 | #else | 68 | #else |
67 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) | 69 | #define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0)) |
68 | #endif | 70 | #endif |
69 | 71 | ||
72 | /* | ||
73 | * Tracking soft dirty bit when a page goes to a swap is tricky. | ||
74 | * We need a bit which can be stored in pte _and_ not conflict | ||
75 | * with swap entry format. On x86 bits 6 and 7 are *not* involved | ||
76 | * into swap entry computation, but bit 6 is used for nonlinear | ||
77 | * file mapping, so we borrow bit 7 for soft dirty tracking. | ||
78 | */ | ||
79 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
80 | #define _PAGE_SWP_SOFT_DIRTY _PAGE_PSE | ||
81 | #else | ||
82 | #define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0)) | ||
83 | #endif | ||
84 | |||
70 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | 85 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) |
71 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) | 86 | #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) |
72 | #else | 87 | #else |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 33692eaabab5..e3ddd7db723f 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -233,8 +233,4 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) | |||
233 | #define arch_read_relax(lock) cpu_relax() | 233 | #define arch_read_relax(lock) cpu_relax() |
234 | #define arch_write_relax(lock) cpu_relax() | 234 | #define arch_write_relax(lock) cpu_relax() |
235 | 235 | ||
236 | /* The {read|write|spin}_lock() on x86 are full memory barriers. */ | ||
237 | static inline void smp_mb__after_lock(void) { } | ||
238 | #define ARCH_HAS_SMP_MB_AFTER_LOCK | ||
239 | |||
240 | #endif /* _ASM_X86_SPINLOCK_H */ | 236 | #endif /* _ASM_X86_SPINLOCK_H */ |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index fbc9210b45bc..a45d8d4ace10 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -2270,6 +2270,7 @@ __init int intel_pmu_init(void) | |||
2270 | case 70: | 2270 | case 70: |
2271 | case 71: | 2271 | case 71: |
2272 | case 63: | 2272 | case 63: |
2273 | case 69: | ||
2273 | x86_pmu.late_ack = true; | 2274 | x86_pmu.late_ack = true; |
2274 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); | 2275 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); |
2275 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); | 2276 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index cad791dbde95..1fb6c72717bd 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -314,8 +314,8 @@ static struct uncore_event_desc snbep_uncore_imc_events[] = { | |||
314 | static struct uncore_event_desc snbep_uncore_qpi_events[] = { | 314 | static struct uncore_event_desc snbep_uncore_qpi_events[] = { |
315 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), | 315 | INTEL_UNCORE_EVENT_DESC(clockticks, "event=0x14"), |
316 | INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), | 316 | INTEL_UNCORE_EVENT_DESC(txl_flits_active, "event=0x00,umask=0x06"), |
317 | INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x02,umask=0x08"), | 317 | INTEL_UNCORE_EVENT_DESC(drs_data, "event=0x102,umask=0x08"), |
318 | INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x03,umask=0x04"), | 318 | INTEL_UNCORE_EVENT_DESC(ncb_data, "event=0x103,umask=0x04"), |
319 | { /* end: all zeroes */ }, | 319 | { /* end: all zeroes */ }, |
320 | }; | 320 | }; |
321 | 321 | ||
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 94ab6b90dd3f..63bdb29b2549 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -196,15 +196,23 @@ static void __init ati_bugs_contd(int num, int slot, int func) | |||
196 | static void __init intel_remapping_check(int num, int slot, int func) | 196 | static void __init intel_remapping_check(int num, int slot, int func) |
197 | { | 197 | { |
198 | u8 revision; | 198 | u8 revision; |
199 | u16 device; | ||
199 | 200 | ||
201 | device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); | ||
200 | revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); | 202 | revision = read_pci_config_byte(num, slot, func, PCI_REVISION_ID); |
201 | 203 | ||
202 | /* | 204 | /* |
203 | * Revision 0x13 of this chipset supports irq remapping | 205 | * Revision 13 of all triggering devices id in this quirk have |
204 | * but has an erratum that breaks its behavior, flag it as such | 206 | * a problem draining interrupts when irq remapping is enabled, |
207 | * and should be flagged as broken. Additionally revisions 0x12 | ||
208 | * and 0x22 of device id 0x3405 has this problem. | ||
205 | */ | 209 | */ |
206 | if (revision == 0x13) | 210 | if (revision == 0x13) |
207 | set_irq_remapping_broken(); | 211 | set_irq_remapping_broken(); |
212 | else if ((device == 0x3405) && | ||
213 | ((revision == 0x12) || | ||
214 | (revision == 0x22))) | ||
215 | set_irq_remapping_broken(); | ||
208 | 216 | ||
209 | } | 217 | } |
210 | 218 | ||
@@ -239,6 +247,8 @@ static struct chipset early_qrk[] __initdata = { | |||
239 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, | 247 | PCI_CLASS_SERIAL_SMBUS, PCI_ANY_ID, 0, ati_bugs_contd }, |
240 | { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST, | 248 | { PCI_VENDOR_ID_INTEL, 0x3403, PCI_CLASS_BRIDGE_HOST, |
241 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, | 249 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, |
250 | { PCI_VENDOR_ID_INTEL, 0x3405, PCI_CLASS_BRIDGE_HOST, | ||
251 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, | ||
242 | { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, | 252 | { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, |
243 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, | 253 | PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, |
244 | {} | 254 | {} |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 202d24f0f7e7..5d576ab34403 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -116,7 +116,7 @@ static void mxcsr_feature_mask_init(void) | |||
116 | 116 | ||
117 | if (cpu_has_fxsr) { | 117 | if (cpu_has_fxsr) { |
118 | memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); | 118 | memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); |
119 | asm volatile("fxsave %0" : : "m" (fx_scratch)); | 119 | asm volatile("fxsave %0" : "+m" (fx_scratch)); |
120 | mask = fx_scratch.mxcsr_mask; | 120 | mask = fx_scratch.mxcsr_mask; |
121 | if (mask == 0) | 121 | if (mask == 0) |
122 | mask = 0x0000ffbf; | 122 | mask = 0x0000ffbf; |
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c index 47ebb1dbfbcb..7a0adb7ee433 100644 --- a/arch/x86/kernel/microcode_amd.c +++ b/arch/x86/kernel/microcode_amd.c | |||
@@ -220,12 +220,13 @@ int apply_microcode_amd(int cpu) | |||
220 | return 0; | 220 | return 0; |
221 | } | 221 | } |
222 | 222 | ||
223 | if (__apply_microcode_amd(mc_amd)) | 223 | if (__apply_microcode_amd(mc_amd)) { |
224 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", | 224 | pr_err("CPU%d: update failed for patch_level=0x%08x\n", |
225 | cpu, mc_amd->hdr.patch_id); | 225 | cpu, mc_amd->hdr.patch_id); |
226 | else | 226 | return -1; |
227 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, | 227 | } |
228 | mc_amd->hdr.patch_id); | 228 | pr_info("CPU%d: new patch_level=0x%08x\n", cpu, |
229 | mc_amd->hdr.patch_id); | ||
229 | 230 | ||
230 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; | 231 | uci->cpu_sig.rev = mc_amd->hdr.patch_id; |
231 | c->microcode = mc_amd->hdr.patch_id; | 232 | c->microcode = mc_amd->hdr.patch_id; |
diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c index dbded5aedb81..48f8375e4c6b 100644 --- a/arch/x86/kernel/sys_x86_64.c +++ b/arch/x86/kernel/sys_x86_64.c | |||
@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin, | |||
101 | *begin = new_begin; | 101 | *begin = new_begin; |
102 | } | 102 | } |
103 | } else { | 103 | } else { |
104 | *begin = TASK_UNMAPPED_BASE; | 104 | *begin = mmap_legacy_base(); |
105 | *end = TASK_SIZE; | 105 | *end = TASK_SIZE; |
106 | } | 106 | } |
107 | } | 107 | } |
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 62c29a5bfe26..f63778cb2363 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c | |||
@@ -98,7 +98,7 @@ static unsigned long mmap_base(void) | |||
98 | * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 | 98 | * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 |
99 | * does, but not when emulating X86_32 | 99 | * does, but not when emulating X86_32 |
100 | */ | 100 | */ |
101 | static unsigned long mmap_legacy_base(void) | 101 | unsigned long mmap_legacy_base(void) |
102 | { | 102 | { |
103 | if (mmap_is_ia32()) | 103 | if (mmap_is_ia32()) |
104 | return TASK_UNMAPPED_BASE; | 104 | return TASK_UNMAPPED_BASE; |
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index fd6c51cc3acb..5a74a9c1e42c 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c | |||
@@ -451,7 +451,6 @@ static void acpi_processor_remove(struct acpi_device *device) | |||
451 | /* Clean up. */ | 451 | /* Clean up. */ |
452 | per_cpu(processor_device_array, pr->id) = NULL; | 452 | per_cpu(processor_device_array, pr->id) = NULL; |
453 | per_cpu(processors, pr->id) = NULL; | 453 | per_cpu(processors, pr->id) = NULL; |
454 | try_offline_node(cpu_to_node(pr->id)); | ||
455 | 454 | ||
456 | /* Remove the CPU. */ | 455 | /* Remove the CPU. */ |
457 | get_online_cpus(); | 456 | get_online_cpus(); |
@@ -459,6 +458,8 @@ static void acpi_processor_remove(struct acpi_device *device) | |||
459 | acpi_unmap_lsapic(pr->id); | 458 | acpi_unmap_lsapic(pr->id); |
460 | put_online_cpus(); | 459 | put_online_cpus(); |
461 | 460 | ||
461 | try_offline_node(cpu_to_node(pr->id)); | ||
462 | |||
462 | out: | 463 | out: |
463 | free_cpumask_var(pr->throttling.shared_cpu_map); | 464 | free_cpumask_var(pr->throttling.shared_cpu_map); |
464 | kfree(pr); | 465 | kfree(pr); |
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index f68095756fb7..408f6b2a5fa8 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
@@ -31,6 +31,7 @@ static LIST_HEAD(bus_type_list); | |||
31 | static DECLARE_RWSEM(bus_type_sem); | 31 | static DECLARE_RWSEM(bus_type_sem); |
32 | 32 | ||
33 | #define PHYSICAL_NODE_STRING "physical_node" | 33 | #define PHYSICAL_NODE_STRING "physical_node" |
34 | #define PHYSICAL_NODE_NAME_SIZE (sizeof(PHYSICAL_NODE_STRING) + 10) | ||
34 | 35 | ||
35 | int register_acpi_bus_type(struct acpi_bus_type *type) | 36 | int register_acpi_bus_type(struct acpi_bus_type *type) |
36 | { | 37 | { |
@@ -78,41 +79,108 @@ static struct acpi_bus_type *acpi_get_bus_type(struct device *dev) | |||
78 | return ret; | 79 | return ret; |
79 | } | 80 | } |
80 | 81 | ||
81 | static acpi_status do_acpi_find_child(acpi_handle handle, u32 lvl_not_used, | 82 | static acpi_status acpi_dev_present(acpi_handle handle, u32 lvl_not_used, |
82 | void *addr_p, void **ret_p) | 83 | void *not_used, void **ret_p) |
83 | { | 84 | { |
84 | unsigned long long addr, sta; | 85 | struct acpi_device *adev = NULL; |
85 | acpi_status status; | ||
86 | 86 | ||
87 | status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); | 87 | acpi_bus_get_device(handle, &adev); |
88 | if (ACPI_SUCCESS(status) && addr == *((u64 *)addr_p)) { | 88 | if (adev) { |
89 | *ret_p = handle; | 89 | *ret_p = handle; |
90 | status = acpi_bus_get_status_handle(handle, &sta); | 90 | return AE_CTRL_TERMINATE; |
91 | if (ACPI_SUCCESS(status) && (sta & ACPI_STA_DEVICE_ENABLED)) | ||
92 | return AE_CTRL_TERMINATE; | ||
93 | } | 91 | } |
94 | return AE_OK; | 92 | return AE_OK; |
95 | } | 93 | } |
96 | 94 | ||
97 | acpi_handle acpi_get_child(acpi_handle parent, u64 address) | 95 | static bool acpi_extra_checks_passed(acpi_handle handle, bool is_bridge) |
98 | { | 96 | { |
99 | void *ret = NULL; | 97 | unsigned long long sta; |
98 | acpi_status status; | ||
99 | |||
100 | status = acpi_bus_get_status_handle(handle, &sta); | ||
101 | if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_ENABLED)) | ||
102 | return false; | ||
103 | |||
104 | if (is_bridge) { | ||
105 | void *test = NULL; | ||
106 | |||
107 | /* Check if this object has at least one child device. */ | ||
108 | acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
109 | acpi_dev_present, NULL, NULL, &test); | ||
110 | return !!test; | ||
111 | } | ||
112 | return true; | ||
113 | } | ||
114 | |||
115 | struct find_child_context { | ||
116 | u64 addr; | ||
117 | bool is_bridge; | ||
118 | acpi_handle ret; | ||
119 | bool ret_checked; | ||
120 | }; | ||
121 | |||
122 | static acpi_status do_find_child(acpi_handle handle, u32 lvl_not_used, | ||
123 | void *data, void **not_used) | ||
124 | { | ||
125 | struct find_child_context *context = data; | ||
126 | unsigned long long addr; | ||
127 | acpi_status status; | ||
100 | 128 | ||
101 | if (!parent) | 129 | status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &addr); |
102 | return NULL; | 130 | if (ACPI_FAILURE(status) || addr != context->addr) |
131 | return AE_OK; | ||
103 | 132 | ||
104 | acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, NULL, | 133 | if (!context->ret) { |
105 | do_acpi_find_child, &address, &ret); | 134 | /* This is the first matching object. Save its handle. */ |
106 | return (acpi_handle)ret; | 135 | context->ret = handle; |
136 | return AE_OK; | ||
137 | } | ||
138 | /* | ||
139 | * There is more than one matching object with the same _ADR value. | ||
140 | * That really is unexpected, so we are kind of beyond the scope of the | ||
141 | * spec here. We have to choose which one to return, though. | ||
142 | * | ||
143 | * First, check if the previously found object is good enough and return | ||
144 | * its handle if so. Second, check the same for the object that we've | ||
145 | * just found. | ||
146 | */ | ||
147 | if (!context->ret_checked) { | ||
148 | if (acpi_extra_checks_passed(context->ret, context->is_bridge)) | ||
149 | return AE_CTRL_TERMINATE; | ||
150 | else | ||
151 | context->ret_checked = true; | ||
152 | } | ||
153 | if (acpi_extra_checks_passed(handle, context->is_bridge)) { | ||
154 | context->ret = handle; | ||
155 | return AE_CTRL_TERMINATE; | ||
156 | } | ||
157 | return AE_OK; | ||
107 | } | 158 | } |
108 | EXPORT_SYMBOL(acpi_get_child); | 159 | |
160 | acpi_handle acpi_find_child(acpi_handle parent, u64 addr, bool is_bridge) | ||
161 | { | ||
162 | if (parent) { | ||
163 | struct find_child_context context = { | ||
164 | .addr = addr, | ||
165 | .is_bridge = is_bridge, | ||
166 | }; | ||
167 | |||
168 | acpi_walk_namespace(ACPI_TYPE_DEVICE, parent, 1, do_find_child, | ||
169 | NULL, &context, NULL); | ||
170 | return context.ret; | ||
171 | } | ||
172 | return NULL; | ||
173 | } | ||
174 | EXPORT_SYMBOL_GPL(acpi_find_child); | ||
109 | 175 | ||
110 | int acpi_bind_one(struct device *dev, acpi_handle handle) | 176 | int acpi_bind_one(struct device *dev, acpi_handle handle) |
111 | { | 177 | { |
112 | struct acpi_device *acpi_dev; | 178 | struct acpi_device *acpi_dev; |
113 | acpi_status status; | 179 | acpi_status status; |
114 | struct acpi_device_physical_node *physical_node, *pn; | 180 | struct acpi_device_physical_node *physical_node, *pn; |
115 | char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; | 181 | char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; |
182 | struct list_head *physnode_list; | ||
183 | unsigned int node_id; | ||
116 | int retval = -EINVAL; | 184 | int retval = -EINVAL; |
117 | 185 | ||
118 | if (ACPI_HANDLE(dev)) { | 186 | if (ACPI_HANDLE(dev)) { |
@@ -139,25 +207,27 @@ int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
139 | 207 | ||
140 | mutex_lock(&acpi_dev->physical_node_lock); | 208 | mutex_lock(&acpi_dev->physical_node_lock); |
141 | 209 | ||
142 | /* Sanity check. */ | 210 | /* |
143 | list_for_each_entry(pn, &acpi_dev->physical_node_list, node) | 211 | * Keep the list sorted by node_id so that the IDs of removed nodes can |
212 | * be recycled easily. | ||
213 | */ | ||
214 | physnode_list = &acpi_dev->physical_node_list; | ||
215 | node_id = 0; | ||
216 | list_for_each_entry(pn, &acpi_dev->physical_node_list, node) { | ||
217 | /* Sanity check. */ | ||
144 | if (pn->dev == dev) { | 218 | if (pn->dev == dev) { |
145 | dev_warn(dev, "Already associated with ACPI node\n"); | 219 | dev_warn(dev, "Already associated with ACPI node\n"); |
146 | goto err_free; | 220 | goto err_free; |
147 | } | 221 | } |
148 | 222 | if (pn->node_id == node_id) { | |
149 | /* allocate physical node id according to physical_node_id_bitmap */ | 223 | physnode_list = &pn->node; |
150 | physical_node->node_id = | 224 | node_id++; |
151 | find_first_zero_bit(acpi_dev->physical_node_id_bitmap, | 225 | } |
152 | ACPI_MAX_PHYSICAL_NODE); | ||
153 | if (physical_node->node_id >= ACPI_MAX_PHYSICAL_NODE) { | ||
154 | retval = -ENOSPC; | ||
155 | goto err_free; | ||
156 | } | 226 | } |
157 | 227 | ||
158 | set_bit(physical_node->node_id, acpi_dev->physical_node_id_bitmap); | 228 | physical_node->node_id = node_id; |
159 | physical_node->dev = dev; | 229 | physical_node->dev = dev; |
160 | list_add_tail(&physical_node->node, &acpi_dev->physical_node_list); | 230 | list_add(&physical_node->node, physnode_list); |
161 | acpi_dev->physical_node_count++; | 231 | acpi_dev->physical_node_count++; |
162 | 232 | ||
163 | mutex_unlock(&acpi_dev->physical_node_lock); | 233 | mutex_unlock(&acpi_dev->physical_node_lock); |
@@ -208,7 +278,7 @@ int acpi_unbind_one(struct device *dev) | |||
208 | 278 | ||
209 | mutex_lock(&acpi_dev->physical_node_lock); | 279 | mutex_lock(&acpi_dev->physical_node_lock); |
210 | list_for_each_safe(node, next, &acpi_dev->physical_node_list) { | 280 | list_for_each_safe(node, next, &acpi_dev->physical_node_list) { |
211 | char physical_node_name[sizeof(PHYSICAL_NODE_STRING) + 2]; | 281 | char physical_node_name[PHYSICAL_NODE_NAME_SIZE]; |
212 | 282 | ||
213 | entry = list_entry(node, struct acpi_device_physical_node, | 283 | entry = list_entry(node, struct acpi_device_physical_node, |
214 | node); | 284 | node); |
@@ -216,7 +286,6 @@ int acpi_unbind_one(struct device *dev) | |||
216 | continue; | 286 | continue; |
217 | 287 | ||
218 | list_del(node); | 288 | list_del(node); |
219 | clear_bit(entry->node_id, acpi_dev->physical_node_id_bitmap); | ||
220 | 289 | ||
221 | acpi_dev->physical_node_count--; | 290 | acpi_dev->physical_node_count--; |
222 | 291 | ||
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c index aa1227a7e3f2..04a13784dd20 100644 --- a/drivers/acpi/proc.c +++ b/drivers/acpi/proc.c | |||
@@ -311,6 +311,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | |||
311 | dev->pnp.bus_id, | 311 | dev->pnp.bus_id, |
312 | (u32) dev->wakeup.sleep_state); | 312 | (u32) dev->wakeup.sleep_state); |
313 | 313 | ||
314 | mutex_lock(&dev->physical_node_lock); | ||
315 | |||
314 | if (!dev->physical_node_count) { | 316 | if (!dev->physical_node_count) { |
315 | seq_printf(seq, "%c%-8s\n", | 317 | seq_printf(seq, "%c%-8s\n", |
316 | dev->wakeup.flags.run_wake ? '*' : ' ', | 318 | dev->wakeup.flags.run_wake ? '*' : ' ', |
@@ -338,6 +340,8 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | |||
338 | put_device(ldev); | 340 | put_device(ldev); |
339 | } | 341 | } |
340 | } | 342 | } |
343 | |||
344 | mutex_unlock(&dev->physical_node_lock); | ||
341 | } | 345 | } |
342 | mutex_unlock(&acpi_device_lock); | 346 | mutex_unlock(&acpi_device_lock); |
343 | return 0; | 347 | return 0; |
@@ -347,12 +351,16 @@ static void physical_device_enable_wakeup(struct acpi_device *adev) | |||
347 | { | 351 | { |
348 | struct acpi_device_physical_node *entry; | 352 | struct acpi_device_physical_node *entry; |
349 | 353 | ||
354 | mutex_lock(&adev->physical_node_lock); | ||
355 | |||
350 | list_for_each_entry(entry, | 356 | list_for_each_entry(entry, |
351 | &adev->physical_node_list, node) | 357 | &adev->physical_node_list, node) |
352 | if (entry->dev && device_can_wakeup(entry->dev)) { | 358 | if (entry->dev && device_can_wakeup(entry->dev)) { |
353 | bool enable = !device_may_wakeup(entry->dev); | 359 | bool enable = !device_may_wakeup(entry->dev); |
354 | device_set_wakeup_enable(entry->dev, enable); | 360 | device_set_wakeup_enable(entry->dev, enable); |
355 | } | 361 | } |
362 | |||
363 | mutex_unlock(&adev->physical_node_lock); | ||
356 | } | 364 | } |
357 | 365 | ||
358 | static ssize_t | 366 | static ssize_t |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 0ec434d2586d..e1284b8dc6ee 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -689,7 +689,7 @@ static int acpi_video_bqc_quirk(struct acpi_video_device *device, | |||
689 | * Some systems always report current brightness level as maximum | 689 | * Some systems always report current brightness level as maximum |
690 | * through _BQC, we need to test another value for them. | 690 | * through _BQC, we need to test another value for them. |
691 | */ | 691 | */ |
692 | test_level = current_level == max_level ? br->levels[2] : max_level; | 692 | test_level = current_level == max_level ? br->levels[3] : max_level; |
693 | 693 | ||
694 | result = acpi_video_device_lcd_set_level(device, test_level); | 694 | result = acpi_video_device_lcd_set_level(device, test_level); |
695 | if (result) | 695 | if (result) |
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c index 4ec7c04b3f82..26386f0b89a8 100644 --- a/drivers/ata/pata_imx.c +++ b/drivers/ata/pata_imx.c | |||
@@ -237,6 +237,7 @@ static const struct of_device_id imx_pata_dt_ids[] = { | |||
237 | /* sentinel */ | 237 | /* sentinel */ |
238 | } | 238 | } |
239 | }; | 239 | }; |
240 | MODULE_DEVICE_TABLE(of, imx_pata_dt_ids); | ||
240 | 241 | ||
241 | static struct platform_driver pata_imx_driver = { | 242 | static struct platform_driver pata_imx_driver = { |
242 | .probe = pata_imx_probe, | 243 | .probe = pata_imx_probe, |
diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index e69102696533..3455f833e473 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c | |||
@@ -719,7 +719,8 @@ static int regcache_sync_block_raw(struct regmap *map, void *block, | |||
719 | } | 719 | } |
720 | } | 720 | } |
721 | 721 | ||
722 | return regcache_sync_block_raw_flush(map, &data, base, regtmp); | 722 | return regcache_sync_block_raw_flush(map, &data, base, regtmp + |
723 | map->reg_stride); | ||
723 | } | 724 | } |
724 | 725 | ||
725 | int regcache_sync_block(struct regmap *map, void *block, | 726 | int regcache_sync_block(struct regmap *map, void *block, |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 99cb944a002d..4d45dba7fb8f 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -906,16 +906,10 @@ bio_pageinc(struct bio *bio) | |||
906 | int i; | 906 | int i; |
907 | 907 | ||
908 | bio_for_each_segment(bv, bio, i) { | 908 | bio_for_each_segment(bv, bio, i) { |
909 | page = bv->bv_page; | ||
910 | /* Non-zero page count for non-head members of | 909 | /* Non-zero page count for non-head members of |
911 | * compound pages is no longer allowed by the kernel, | 910 | * compound pages is no longer allowed by the kernel. |
912 | * but this has never been seen here. | ||
913 | */ | 911 | */ |
914 | if (unlikely(PageCompound(page))) | 912 | page = compound_trans_head(bv->bv_page); |
915 | if (compound_trans_head(page) != page) { | ||
916 | pr_crit("page tail used for block I/O\n"); | ||
917 | BUG(); | ||
918 | } | ||
919 | atomic_inc(&page->_count); | 913 | atomic_inc(&page->_count); |
920 | } | 914 | } |
921 | } | 915 | } |
@@ -924,10 +918,13 @@ static void | |||
924 | bio_pagedec(struct bio *bio) | 918 | bio_pagedec(struct bio *bio) |
925 | { | 919 | { |
926 | struct bio_vec *bv; | 920 | struct bio_vec *bv; |
921 | struct page *page; | ||
927 | int i; | 922 | int i; |
928 | 923 | ||
929 | bio_for_each_segment(bv, bio, i) | 924 | bio_for_each_segment(bv, bio, i) { |
930 | atomic_dec(&bv->bv_page->_count); | 925 | page = compound_trans_head(bv->bv_page); |
926 | atomic_dec(&page->_count); | ||
927 | } | ||
931 | } | 928 | } |
932 | 929 | ||
933 | static void | 930 | static void |
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 1b456fe9b87a..fc45567ad3ac 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -272,9 +272,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, | |||
272 | unsigned long flags; | 272 | unsigned long flags; |
273 | 273 | ||
274 | spin_lock_irqsave(&portdev->ports_lock, flags); | 274 | spin_lock_irqsave(&portdev->ports_lock, flags); |
275 | list_for_each_entry(port, &portdev->ports, list) | 275 | list_for_each_entry(port, &portdev->ports, list) { |
276 | if (port->cdev->dev == dev) | 276 | if (port->cdev->dev == dev) { |
277 | kref_get(&port->kref); | ||
277 | goto out; | 278 | goto out; |
279 | } | ||
280 | } | ||
278 | port = NULL; | 281 | port = NULL; |
279 | out: | 282 | out: |
280 | spin_unlock_irqrestore(&portdev->ports_lock, flags); | 283 | spin_unlock_irqrestore(&portdev->ports_lock, flags); |
@@ -746,6 +749,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, | |||
746 | 749 | ||
747 | port = filp->private_data; | 750 | port = filp->private_data; |
748 | 751 | ||
752 | /* Port is hot-unplugged. */ | ||
753 | if (!port->guest_connected) | ||
754 | return -ENODEV; | ||
755 | |||
749 | if (!port_has_data(port)) { | 756 | if (!port_has_data(port)) { |
750 | /* | 757 | /* |
751 | * If nothing's connected on the host just return 0 in | 758 | * If nothing's connected on the host just return 0 in |
@@ -762,7 +769,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, | |||
762 | if (ret < 0) | 769 | if (ret < 0) |
763 | return ret; | 770 | return ret; |
764 | } | 771 | } |
765 | /* Port got hot-unplugged. */ | 772 | /* Port got hot-unplugged while we were waiting above. */ |
766 | if (!port->guest_connected) | 773 | if (!port->guest_connected) |
767 | return -ENODEV; | 774 | return -ENODEV; |
768 | /* | 775 | /* |
@@ -932,13 +939,25 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | |||
932 | if (is_rproc_serial(port->out_vq->vdev)) | 939 | if (is_rproc_serial(port->out_vq->vdev)) |
933 | return -EINVAL; | 940 | return -EINVAL; |
934 | 941 | ||
942 | /* | ||
943 | * pipe->nrbufs == 0 means there are no data to transfer, | ||
944 | * so this returns just 0 for no data. | ||
945 | */ | ||
946 | pipe_lock(pipe); | ||
947 | if (!pipe->nrbufs) { | ||
948 | ret = 0; | ||
949 | goto error_out; | ||
950 | } | ||
951 | |||
935 | ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); | 952 | ret = wait_port_writable(port, filp->f_flags & O_NONBLOCK); |
936 | if (ret < 0) | 953 | if (ret < 0) |
937 | return ret; | 954 | goto error_out; |
938 | 955 | ||
939 | buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); | 956 | buf = alloc_buf(port->out_vq, 0, pipe->nrbufs); |
940 | if (!buf) | 957 | if (!buf) { |
941 | return -ENOMEM; | 958 | ret = -ENOMEM; |
959 | goto error_out; | ||
960 | } | ||
942 | 961 | ||
943 | sgl.n = 0; | 962 | sgl.n = 0; |
944 | sgl.len = 0; | 963 | sgl.len = 0; |
@@ -946,12 +965,17 @@ static ssize_t port_fops_splice_write(struct pipe_inode_info *pipe, | |||
946 | sgl.sg = buf->sg; | 965 | sgl.sg = buf->sg; |
947 | sg_init_table(sgl.sg, sgl.size); | 966 | sg_init_table(sgl.sg, sgl.size); |
948 | ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); | 967 | ret = __splice_from_pipe(pipe, &sd, pipe_to_sg); |
968 | pipe_unlock(pipe); | ||
949 | if (likely(ret > 0)) | 969 | if (likely(ret > 0)) |
950 | ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); | 970 | ret = __send_to_port(port, buf->sg, sgl.n, sgl.len, buf, true); |
951 | 971 | ||
952 | if (unlikely(ret <= 0)) | 972 | if (unlikely(ret <= 0)) |
953 | free_buf(buf, true); | 973 | free_buf(buf, true); |
954 | return ret; | 974 | return ret; |
975 | |||
976 | error_out: | ||
977 | pipe_unlock(pipe); | ||
978 | return ret; | ||
955 | } | 979 | } |
956 | 980 | ||
957 | static unsigned int port_fops_poll(struct file *filp, poll_table *wait) | 981 | static unsigned int port_fops_poll(struct file *filp, poll_table *wait) |
@@ -1019,14 +1043,14 @@ static int port_fops_open(struct inode *inode, struct file *filp) | |||
1019 | struct port *port; | 1043 | struct port *port; |
1020 | int ret; | 1044 | int ret; |
1021 | 1045 | ||
1046 | /* We get the port with a kref here */ | ||
1022 | port = find_port_by_devt(cdev->dev); | 1047 | port = find_port_by_devt(cdev->dev); |
1048 | if (!port) { | ||
1049 | /* Port was unplugged before we could proceed */ | ||
1050 | return -ENXIO; | ||
1051 | } | ||
1023 | filp->private_data = port; | 1052 | filp->private_data = port; |
1024 | 1053 | ||
1025 | /* Prevent against a port getting hot-unplugged at the same time */ | ||
1026 | spin_lock_irq(&port->portdev->ports_lock); | ||
1027 | kref_get(&port->kref); | ||
1028 | spin_unlock_irq(&port->portdev->ports_lock); | ||
1029 | |||
1030 | /* | 1054 | /* |
1031 | * Don't allow opening of console port devices -- that's done | 1055 | * Don't allow opening of console port devices -- that's done |
1032 | * via /dev/hvc | 1056 | * via /dev/hvc |
@@ -1498,14 +1522,6 @@ static void remove_port(struct kref *kref) | |||
1498 | 1522 | ||
1499 | port = container_of(kref, struct port, kref); | 1523 | port = container_of(kref, struct port, kref); |
1500 | 1524 | ||
1501 | sysfs_remove_group(&port->dev->kobj, &port_attribute_group); | ||
1502 | device_destroy(pdrvdata.class, port->dev->devt); | ||
1503 | cdev_del(port->cdev); | ||
1504 | |||
1505 | kfree(port->name); | ||
1506 | |||
1507 | debugfs_remove(port->debugfs_file); | ||
1508 | |||
1509 | kfree(port); | 1525 | kfree(port); |
1510 | } | 1526 | } |
1511 | 1527 | ||
@@ -1539,12 +1555,14 @@ static void unplug_port(struct port *port) | |||
1539 | spin_unlock_irq(&port->portdev->ports_lock); | 1555 | spin_unlock_irq(&port->portdev->ports_lock); |
1540 | 1556 | ||
1541 | if (port->guest_connected) { | 1557 | if (port->guest_connected) { |
1558 | /* Let the app know the port is going down. */ | ||
1559 | send_sigio_to_port(port); | ||
1560 | |||
1561 | /* Do this after sigio is actually sent */ | ||
1542 | port->guest_connected = false; | 1562 | port->guest_connected = false; |
1543 | port->host_connected = false; | 1563 | port->host_connected = false; |
1544 | wake_up_interruptible(&port->waitqueue); | ||
1545 | 1564 | ||
1546 | /* Let the app know the port is going down. */ | 1565 | wake_up_interruptible(&port->waitqueue); |
1547 | send_sigio_to_port(port); | ||
1548 | } | 1566 | } |
1549 | 1567 | ||
1550 | if (is_console_port(port)) { | 1568 | if (is_console_port(port)) { |
@@ -1563,6 +1581,14 @@ static void unplug_port(struct port *port) | |||
1563 | */ | 1581 | */ |
1564 | port->portdev = NULL; | 1582 | port->portdev = NULL; |
1565 | 1583 | ||
1584 | sysfs_remove_group(&port->dev->kobj, &port_attribute_group); | ||
1585 | device_destroy(pdrvdata.class, port->dev->devt); | ||
1586 | cdev_del(port->cdev); | ||
1587 | |||
1588 | kfree(port->name); | ||
1589 | |||
1590 | debugfs_remove(port->debugfs_file); | ||
1591 | |||
1566 | /* | 1592 | /* |
1567 | * Locks around here are not necessary - a port can't be | 1593 | * Locks around here are not necessary - a port can't be |
1568 | * opened after we removed the port struct from ports_list | 1594 | * opened after we removed the port struct from ports_list |
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 1bdb882c845b..4e5739773c33 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c | |||
@@ -581,11 +581,15 @@ struct samsung_div_clock exynos4x12_div_clks[] __initdata = { | |||
581 | DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), | 581 | DIV(none, "div_spi1_isp", "mout_spi1_isp", E4X12_DIV_ISP, 16, 4), |
582 | DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), | 582 | DIV(none, "div_spi1_isp_pre", "div_spi1_isp", E4X12_DIV_ISP, 20, 8), |
583 | DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), | 583 | DIV(none, "div_uart_isp", "mout_uart_isp", E4X12_DIV_ISP, 28, 4), |
584 | DIV(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3), | 584 | DIV_F(div_isp0, "div_isp0", "aclk200", E4X12_DIV_ISP0, 0, 3, |
585 | DIV(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3), | 585 | CLK_GET_RATE_NOCACHE, 0), |
586 | DIV_F(div_isp1, "div_isp1", "aclk200", E4X12_DIV_ISP0, 4, 3, | ||
587 | CLK_GET_RATE_NOCACHE, 0), | ||
586 | DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), | 588 | DIV(none, "div_mpwm", "div_isp1", E4X12_DIV_ISP1, 0, 3), |
587 | DIV(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, 4, 3), | 589 | DIV_F(div_mcuisp0, "div_mcuisp0", "aclk400_mcuisp", E4X12_DIV_ISP1, |
588 | DIV(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3), | 590 | 4, 3, CLK_GET_RATE_NOCACHE, 0), |
591 | DIV_F(div_mcuisp1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, | ||
592 | 8, 3, CLK_GET_RATE_NOCACHE, 0), | ||
589 | DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), | 593 | DIV(sclk_fimg2d, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), |
590 | }; | 594 | }; |
591 | 595 | ||
@@ -863,57 +867,57 @@ struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = { | |||
863 | GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", | 867 | GATE_DA(i2s0, "samsung-i2s.0", "i2s0", "aclk100", |
864 | E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), | 868 | E4X12_GATE_IP_MAUDIO, 3, 0, 0, "iis"), |
865 | GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, | 869 | GATE(fimc_isp, "isp", "aclk200", E4X12_GATE_ISP0, 0, |
866 | CLK_IGNORE_UNUSED, 0), | 870 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
867 | GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, | 871 | GATE(fimc_drc, "drc", "aclk200", E4X12_GATE_ISP0, 1, |
868 | CLK_IGNORE_UNUSED, 0), | 872 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
869 | GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, | 873 | GATE(fimc_fd, "fd", "aclk200", E4X12_GATE_ISP0, 2, |
870 | CLK_IGNORE_UNUSED, 0), | 874 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
871 | GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, | 875 | GATE(fimc_lite0, "lite0", "aclk200", E4X12_GATE_ISP0, 3, |
872 | CLK_IGNORE_UNUSED, 0), | 876 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
873 | GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, | 877 | GATE(fimc_lite1, "lite1", "aclk200", E4X12_GATE_ISP0, 4, |
874 | CLK_IGNORE_UNUSED, 0), | 878 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
875 | GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, | 879 | GATE(mcuisp, "mcuisp", "aclk200", E4X12_GATE_ISP0, 5, |
876 | CLK_IGNORE_UNUSED, 0), | 880 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
877 | GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, | 881 | GATE(gicisp, "gicisp", "aclk200", E4X12_GATE_ISP0, 7, |
878 | CLK_IGNORE_UNUSED, 0), | 882 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
879 | GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, | 883 | GATE(smmu_isp, "smmu_isp", "aclk200", E4X12_GATE_ISP0, 8, |
880 | CLK_IGNORE_UNUSED, 0), | 884 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
881 | GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, | 885 | GATE(smmu_drc, "smmu_drc", "aclk200", E4X12_GATE_ISP0, 9, |
882 | CLK_IGNORE_UNUSED, 0), | 886 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
883 | GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, | 887 | GATE(smmu_fd, "smmu_fd", "aclk200", E4X12_GATE_ISP0, 10, |
884 | CLK_IGNORE_UNUSED, 0), | 888 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
885 | GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, | 889 | GATE(smmu_lite0, "smmu_lite0", "aclk200", E4X12_GATE_ISP0, 11, |
886 | CLK_IGNORE_UNUSED, 0), | 890 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
887 | GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, | 891 | GATE(smmu_lite1, "smmu_lite1", "aclk200", E4X12_GATE_ISP0, 12, |
888 | CLK_IGNORE_UNUSED, 0), | 892 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
889 | GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, | 893 | GATE(ppmuispmx, "ppmuispmx", "aclk200", E4X12_GATE_ISP0, 20, |
890 | CLK_IGNORE_UNUSED, 0), | 894 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
891 | GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, | 895 | GATE(ppmuispx, "ppmuispx", "aclk200", E4X12_GATE_ISP0, 21, |
892 | CLK_IGNORE_UNUSED, 0), | 896 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
893 | GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, | 897 | GATE(mcuctl_isp, "mcuctl_isp", "aclk200", E4X12_GATE_ISP0, 23, |
894 | CLK_IGNORE_UNUSED, 0), | 898 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
895 | GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, | 899 | GATE(mpwm_isp, "mpwm_isp", "aclk200", E4X12_GATE_ISP0, 24, |
896 | CLK_IGNORE_UNUSED, 0), | 900 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
897 | GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, | 901 | GATE(i2c0_isp, "i2c0_isp", "aclk200", E4X12_GATE_ISP0, 25, |
898 | CLK_IGNORE_UNUSED, 0), | 902 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
899 | GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, | 903 | GATE(i2c1_isp, "i2c1_isp", "aclk200", E4X12_GATE_ISP0, 26, |
900 | CLK_IGNORE_UNUSED, 0), | 904 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
901 | GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, | 905 | GATE(mtcadc_isp, "mtcadc_isp", "aclk200", E4X12_GATE_ISP0, 27, |
902 | CLK_IGNORE_UNUSED, 0), | 906 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
903 | GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, | 907 | GATE(pwm_isp, "pwm_isp", "aclk200", E4X12_GATE_ISP0, 28, |
904 | CLK_IGNORE_UNUSED, 0), | 908 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
905 | GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, | 909 | GATE(wdt_isp, "wdt_isp", "aclk200", E4X12_GATE_ISP0, 30, |
906 | CLK_IGNORE_UNUSED, 0), | 910 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
907 | GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, | 911 | GATE(uart_isp, "uart_isp", "aclk200", E4X12_GATE_ISP0, 31, |
908 | CLK_IGNORE_UNUSED, 0), | 912 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
909 | GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, | 913 | GATE(asyncaxim, "asyncaxim", "aclk200", E4X12_GATE_ISP1, 0, |
910 | CLK_IGNORE_UNUSED, 0), | 914 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
911 | GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, | 915 | GATE(smmu_ispcx, "smmu_ispcx", "aclk200", E4X12_GATE_ISP1, 4, |
912 | CLK_IGNORE_UNUSED, 0), | 916 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
913 | GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, | 917 | GATE(spi0_isp, "spi0_isp", "aclk200", E4X12_GATE_ISP1, 12, |
914 | CLK_IGNORE_UNUSED, 0), | 918 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
915 | GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, | 919 | GATE(spi1_isp, "spi1_isp", "aclk200", E4X12_GATE_ISP1, 13, |
916 | CLK_IGNORE_UNUSED, 0), | 920 | CLK_IGNORE_UNUSED | CLK_GET_RATE_NOCACHE, 0), |
917 | GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0), | 921 | GATE(g2d, "g2d", "aclk200", GATE_IP_DMC, 23, 0, 0), |
918 | }; | 922 | }; |
919 | 923 | ||
diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index 5c205b60a82a..089d3e30e221 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c | |||
@@ -71,6 +71,7 @@ static DEFINE_SPINLOCK(armpll_lock); | |||
71 | static DEFINE_SPINLOCK(ddrpll_lock); | 71 | static DEFINE_SPINLOCK(ddrpll_lock); |
72 | static DEFINE_SPINLOCK(iopll_lock); | 72 | static DEFINE_SPINLOCK(iopll_lock); |
73 | static DEFINE_SPINLOCK(armclk_lock); | 73 | static DEFINE_SPINLOCK(armclk_lock); |
74 | static DEFINE_SPINLOCK(swdtclk_lock); | ||
74 | static DEFINE_SPINLOCK(ddrclk_lock); | 75 | static DEFINE_SPINLOCK(ddrclk_lock); |
75 | static DEFINE_SPINLOCK(dciclk_lock); | 76 | static DEFINE_SPINLOCK(dciclk_lock); |
76 | static DEFINE_SPINLOCK(gem0clk_lock); | 77 | static DEFINE_SPINLOCK(gem0clk_lock); |
@@ -293,7 +294,7 @@ static void __init zynq_clk_setup(struct device_node *np) | |||
293 | } | 294 | } |
294 | clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt], | 295 | clks[swdt] = clk_register_mux(NULL, clk_output_name[swdt], |
295 | swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT, | 296 | swdt_ext_clk_mux_parents, 2, CLK_SET_RATE_PARENT, |
296 | SLCR_SWDT_CLK_SEL, 0, 1, 0, &gem0clk_lock); | 297 | SLCR_SWDT_CLK_SEL, 0, 1, 0, &swdtclk_lock); |
297 | 298 | ||
298 | /* DDR clocks */ | 299 | /* DDR clocks */ |
299 | clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0, | 300 | clk = clk_register_divider(NULL, "ddr2x_div", "ddrpll", 0, |
@@ -364,8 +365,9 @@ static void __init zynq_clk_setup(struct device_node *np) | |||
364 | CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6, | 365 | CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 20, 6, |
365 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, | 366 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, |
366 | &gem0clk_lock); | 367 | &gem0clk_lock); |
367 | clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, 0, | 368 | clk = clk_register_mux(NULL, "gem0_emio_mux", gem0_mux_parents, 2, |
368 | SLCR_GEM0_CLK_CTRL, 6, 1, 0, &gem0clk_lock); | 369 | CLK_SET_RATE_PARENT, SLCR_GEM0_CLK_CTRL, 6, 1, 0, |
370 | &gem0clk_lock); | ||
369 | clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0], | 371 | clks[gem0] = clk_register_gate(NULL, clk_output_name[gem0], |
370 | "gem0_emio_mux", CLK_SET_RATE_PARENT, | 372 | "gem0_emio_mux", CLK_SET_RATE_PARENT, |
371 | SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock); | 373 | SLCR_GEM0_CLK_CTRL, 0, 0, &gem0clk_lock); |
@@ -386,8 +388,9 @@ static void __init zynq_clk_setup(struct device_node *np) | |||
386 | CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6, | 388 | CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 20, 6, |
387 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, | 389 | CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO, |
388 | &gem1clk_lock); | 390 | &gem1clk_lock); |
389 | clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, 0, | 391 | clk = clk_register_mux(NULL, "gem1_emio_mux", gem1_mux_parents, 2, |
390 | SLCR_GEM1_CLK_CTRL, 6, 1, 0, &gem1clk_lock); | 392 | CLK_SET_RATE_PARENT, SLCR_GEM1_CLK_CTRL, 6, 1, 0, |
393 | &gem1clk_lock); | ||
391 | clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1], | 394 | clks[gem1] = clk_register_gate(NULL, clk_output_name[gem1], |
392 | "gem1_emio_mux", CLK_SET_RATE_PARENT, | 395 | "gem1_emio_mux", CLK_SET_RATE_PARENT, |
393 | SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); | 396 | SLCR_GEM1_CLK_CTRL, 0, 0, &gem1clk_lock); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 0ceb2eff5a7e..f97cb3d8c5a2 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -221,8 +221,8 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, | |||
221 | return count; | 221 | return count; |
222 | } | 222 | } |
223 | 223 | ||
224 | static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, | 224 | static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, |
225 | size_t count) | 225 | const char *buf, size_t count) |
226 | { | 226 | { |
227 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | 227 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
228 | unsigned int input, j; | 228 | unsigned int input, j; |
@@ -235,10 +235,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, | |||
235 | if (input > 1) | 235 | if (input > 1) |
236 | input = 1; | 236 | input = 1; |
237 | 237 | ||
238 | if (input == cs_tuners->ignore_nice) /* nothing to do */ | 238 | if (input == cs_tuners->ignore_nice_load) /* nothing to do */ |
239 | return count; | 239 | return count; |
240 | 240 | ||
241 | cs_tuners->ignore_nice = input; | 241 | cs_tuners->ignore_nice_load = input; |
242 | 242 | ||
243 | /* we need to re-evaluate prev_cpu_idle */ | 243 | /* we need to re-evaluate prev_cpu_idle */ |
244 | for_each_online_cpu(j) { | 244 | for_each_online_cpu(j) { |
@@ -246,7 +246,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, | |||
246 | dbs_info = &per_cpu(cs_cpu_dbs_info, j); | 246 | dbs_info = &per_cpu(cs_cpu_dbs_info, j); |
247 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, | 247 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
248 | &dbs_info->cdbs.prev_cpu_wall, 0); | 248 | &dbs_info->cdbs.prev_cpu_wall, 0); |
249 | if (cs_tuners->ignore_nice) | 249 | if (cs_tuners->ignore_nice_load) |
250 | dbs_info->cdbs.prev_cpu_nice = | 250 | dbs_info->cdbs.prev_cpu_nice = |
251 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 251 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
252 | } | 252 | } |
@@ -279,7 +279,7 @@ show_store_one(cs, sampling_rate); | |||
279 | show_store_one(cs, sampling_down_factor); | 279 | show_store_one(cs, sampling_down_factor); |
280 | show_store_one(cs, up_threshold); | 280 | show_store_one(cs, up_threshold); |
281 | show_store_one(cs, down_threshold); | 281 | show_store_one(cs, down_threshold); |
282 | show_store_one(cs, ignore_nice); | 282 | show_store_one(cs, ignore_nice_load); |
283 | show_store_one(cs, freq_step); | 283 | show_store_one(cs, freq_step); |
284 | declare_show_sampling_rate_min(cs); | 284 | declare_show_sampling_rate_min(cs); |
285 | 285 | ||
@@ -287,7 +287,7 @@ gov_sys_pol_attr_rw(sampling_rate); | |||
287 | gov_sys_pol_attr_rw(sampling_down_factor); | 287 | gov_sys_pol_attr_rw(sampling_down_factor); |
288 | gov_sys_pol_attr_rw(up_threshold); | 288 | gov_sys_pol_attr_rw(up_threshold); |
289 | gov_sys_pol_attr_rw(down_threshold); | 289 | gov_sys_pol_attr_rw(down_threshold); |
290 | gov_sys_pol_attr_rw(ignore_nice); | 290 | gov_sys_pol_attr_rw(ignore_nice_load); |
291 | gov_sys_pol_attr_rw(freq_step); | 291 | gov_sys_pol_attr_rw(freq_step); |
292 | gov_sys_pol_attr_ro(sampling_rate_min); | 292 | gov_sys_pol_attr_ro(sampling_rate_min); |
293 | 293 | ||
@@ -297,7 +297,7 @@ static struct attribute *dbs_attributes_gov_sys[] = { | |||
297 | &sampling_down_factor_gov_sys.attr, | 297 | &sampling_down_factor_gov_sys.attr, |
298 | &up_threshold_gov_sys.attr, | 298 | &up_threshold_gov_sys.attr, |
299 | &down_threshold_gov_sys.attr, | 299 | &down_threshold_gov_sys.attr, |
300 | &ignore_nice_gov_sys.attr, | 300 | &ignore_nice_load_gov_sys.attr, |
301 | &freq_step_gov_sys.attr, | 301 | &freq_step_gov_sys.attr, |
302 | NULL | 302 | NULL |
303 | }; | 303 | }; |
@@ -313,7 +313,7 @@ static struct attribute *dbs_attributes_gov_pol[] = { | |||
313 | &sampling_down_factor_gov_pol.attr, | 313 | &sampling_down_factor_gov_pol.attr, |
314 | &up_threshold_gov_pol.attr, | 314 | &up_threshold_gov_pol.attr, |
315 | &down_threshold_gov_pol.attr, | 315 | &down_threshold_gov_pol.attr, |
316 | &ignore_nice_gov_pol.attr, | 316 | &ignore_nice_load_gov_pol.attr, |
317 | &freq_step_gov_pol.attr, | 317 | &freq_step_gov_pol.attr, |
318 | NULL | 318 | NULL |
319 | }; | 319 | }; |
@@ -338,7 +338,7 @@ static int cs_init(struct dbs_data *dbs_data) | |||
338 | tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; | 338 | tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; |
339 | tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; | 339 | tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; |
340 | tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; | 340 | tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; |
341 | tuners->ignore_nice = 0; | 341 | tuners->ignore_nice_load = 0; |
342 | tuners->freq_step = DEF_FREQUENCY_STEP; | 342 | tuners->freq_step = DEF_FREQUENCY_STEP; |
343 | 343 | ||
344 | dbs_data->tuners = tuners; | 344 | dbs_data->tuners = tuners; |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 7b839a8db2a7..e59afaa9da23 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
@@ -47,9 +47,9 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) | |||
47 | unsigned int j; | 47 | unsigned int j; |
48 | 48 | ||
49 | if (dbs_data->cdata->governor == GOV_ONDEMAND) | 49 | if (dbs_data->cdata->governor == GOV_ONDEMAND) |
50 | ignore_nice = od_tuners->ignore_nice; | 50 | ignore_nice = od_tuners->ignore_nice_load; |
51 | else | 51 | else |
52 | ignore_nice = cs_tuners->ignore_nice; | 52 | ignore_nice = cs_tuners->ignore_nice_load; |
53 | 53 | ||
54 | policy = cdbs->cur_policy; | 54 | policy = cdbs->cur_policy; |
55 | 55 | ||
@@ -298,12 +298,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
298 | cs_tuners = dbs_data->tuners; | 298 | cs_tuners = dbs_data->tuners; |
299 | cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); | 299 | cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); |
300 | sampling_rate = cs_tuners->sampling_rate; | 300 | sampling_rate = cs_tuners->sampling_rate; |
301 | ignore_nice = cs_tuners->ignore_nice; | 301 | ignore_nice = cs_tuners->ignore_nice_load; |
302 | } else { | 302 | } else { |
303 | od_tuners = dbs_data->tuners; | 303 | od_tuners = dbs_data->tuners; |
304 | od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); | 304 | od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu); |
305 | sampling_rate = od_tuners->sampling_rate; | 305 | sampling_rate = od_tuners->sampling_rate; |
306 | ignore_nice = od_tuners->ignore_nice; | 306 | ignore_nice = od_tuners->ignore_nice_load; |
307 | od_ops = dbs_data->cdata->gov_ops; | 307 | od_ops = dbs_data->cdata->gov_ops; |
308 | io_busy = od_tuners->io_is_busy; | 308 | io_busy = od_tuners->io_is_busy; |
309 | } | 309 | } |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index 6663ec3b3056..d5f12b4b11b8 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -165,7 +165,7 @@ struct cs_cpu_dbs_info_s { | |||
165 | 165 | ||
166 | /* Per policy Governers sysfs tunables */ | 166 | /* Per policy Governers sysfs tunables */ |
167 | struct od_dbs_tuners { | 167 | struct od_dbs_tuners { |
168 | unsigned int ignore_nice; | 168 | unsigned int ignore_nice_load; |
169 | unsigned int sampling_rate; | 169 | unsigned int sampling_rate; |
170 | unsigned int sampling_down_factor; | 170 | unsigned int sampling_down_factor; |
171 | unsigned int up_threshold; | 171 | unsigned int up_threshold; |
@@ -175,7 +175,7 @@ struct od_dbs_tuners { | |||
175 | }; | 175 | }; |
176 | 176 | ||
177 | struct cs_dbs_tuners { | 177 | struct cs_dbs_tuners { |
178 | unsigned int ignore_nice; | 178 | unsigned int ignore_nice_load; |
179 | unsigned int sampling_rate; | 179 | unsigned int sampling_rate; |
180 | unsigned int sampling_down_factor; | 180 | unsigned int sampling_down_factor; |
181 | unsigned int up_threshold; | 181 | unsigned int up_threshold; |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 93eb5cbcc1f6..c087347d6688 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -403,8 +403,8 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, | |||
403 | return count; | 403 | return count; |
404 | } | 404 | } |
405 | 405 | ||
406 | static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, | 406 | static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, |
407 | size_t count) | 407 | const char *buf, size_t count) |
408 | { | 408 | { |
409 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | 409 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
410 | unsigned int input; | 410 | unsigned int input; |
@@ -419,10 +419,10 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, | |||
419 | if (input > 1) | 419 | if (input > 1) |
420 | input = 1; | 420 | input = 1; |
421 | 421 | ||
422 | if (input == od_tuners->ignore_nice) { /* nothing to do */ | 422 | if (input == od_tuners->ignore_nice_load) { /* nothing to do */ |
423 | return count; | 423 | return count; |
424 | } | 424 | } |
425 | od_tuners->ignore_nice = input; | 425 | od_tuners->ignore_nice_load = input; |
426 | 426 | ||
427 | /* we need to re-evaluate prev_cpu_idle */ | 427 | /* we need to re-evaluate prev_cpu_idle */ |
428 | for_each_online_cpu(j) { | 428 | for_each_online_cpu(j) { |
@@ -430,7 +430,7 @@ static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, | |||
430 | dbs_info = &per_cpu(od_cpu_dbs_info, j); | 430 | dbs_info = &per_cpu(od_cpu_dbs_info, j); |
431 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, | 431 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
432 | &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); | 432 | &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); |
433 | if (od_tuners->ignore_nice) | 433 | if (od_tuners->ignore_nice_load) |
434 | dbs_info->cdbs.prev_cpu_nice = | 434 | dbs_info->cdbs.prev_cpu_nice = |
435 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 435 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
436 | 436 | ||
@@ -461,7 +461,7 @@ show_store_one(od, sampling_rate); | |||
461 | show_store_one(od, io_is_busy); | 461 | show_store_one(od, io_is_busy); |
462 | show_store_one(od, up_threshold); | 462 | show_store_one(od, up_threshold); |
463 | show_store_one(od, sampling_down_factor); | 463 | show_store_one(od, sampling_down_factor); |
464 | show_store_one(od, ignore_nice); | 464 | show_store_one(od, ignore_nice_load); |
465 | show_store_one(od, powersave_bias); | 465 | show_store_one(od, powersave_bias); |
466 | declare_show_sampling_rate_min(od); | 466 | declare_show_sampling_rate_min(od); |
467 | 467 | ||
@@ -469,7 +469,7 @@ gov_sys_pol_attr_rw(sampling_rate); | |||
469 | gov_sys_pol_attr_rw(io_is_busy); | 469 | gov_sys_pol_attr_rw(io_is_busy); |
470 | gov_sys_pol_attr_rw(up_threshold); | 470 | gov_sys_pol_attr_rw(up_threshold); |
471 | gov_sys_pol_attr_rw(sampling_down_factor); | 471 | gov_sys_pol_attr_rw(sampling_down_factor); |
472 | gov_sys_pol_attr_rw(ignore_nice); | 472 | gov_sys_pol_attr_rw(ignore_nice_load); |
473 | gov_sys_pol_attr_rw(powersave_bias); | 473 | gov_sys_pol_attr_rw(powersave_bias); |
474 | gov_sys_pol_attr_ro(sampling_rate_min); | 474 | gov_sys_pol_attr_ro(sampling_rate_min); |
475 | 475 | ||
@@ -478,7 +478,7 @@ static struct attribute *dbs_attributes_gov_sys[] = { | |||
478 | &sampling_rate_gov_sys.attr, | 478 | &sampling_rate_gov_sys.attr, |
479 | &up_threshold_gov_sys.attr, | 479 | &up_threshold_gov_sys.attr, |
480 | &sampling_down_factor_gov_sys.attr, | 480 | &sampling_down_factor_gov_sys.attr, |
481 | &ignore_nice_gov_sys.attr, | 481 | &ignore_nice_load_gov_sys.attr, |
482 | &powersave_bias_gov_sys.attr, | 482 | &powersave_bias_gov_sys.attr, |
483 | &io_is_busy_gov_sys.attr, | 483 | &io_is_busy_gov_sys.attr, |
484 | NULL | 484 | NULL |
@@ -494,7 +494,7 @@ static struct attribute *dbs_attributes_gov_pol[] = { | |||
494 | &sampling_rate_gov_pol.attr, | 494 | &sampling_rate_gov_pol.attr, |
495 | &up_threshold_gov_pol.attr, | 495 | &up_threshold_gov_pol.attr, |
496 | &sampling_down_factor_gov_pol.attr, | 496 | &sampling_down_factor_gov_pol.attr, |
497 | &ignore_nice_gov_pol.attr, | 497 | &ignore_nice_load_gov_pol.attr, |
498 | &powersave_bias_gov_pol.attr, | 498 | &powersave_bias_gov_pol.attr, |
499 | &io_is_busy_gov_pol.attr, | 499 | &io_is_busy_gov_pol.attr, |
500 | NULL | 500 | NULL |
@@ -544,7 +544,7 @@ static int od_init(struct dbs_data *dbs_data) | |||
544 | } | 544 | } |
545 | 545 | ||
546 | tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; | 546 | tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; |
547 | tuners->ignore_nice = 0; | 547 | tuners->ignore_nice_load = 0; |
548 | tuners->powersave_bias = default_powersave_bias; | 548 | tuners->powersave_bias = default_powersave_bias; |
549 | tuners->io_is_busy = should_io_be_busy(); | 549 | tuners->io_is_busy = should_io_be_busy(); |
550 | 550 | ||
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c index bb838b985077..9536852c504a 100644 --- a/drivers/cpufreq/loongson2_cpufreq.c +++ b/drivers/cpufreq/loongson2_cpufreq.c | |||
@@ -118,11 +118,6 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
118 | clk_put(cpuclk); | 118 | clk_put(cpuclk); |
119 | return -EINVAL; | 119 | return -EINVAL; |
120 | } | 120 | } |
121 | ret = clk_set_rate(cpuclk, rate); | ||
122 | if (ret) { | ||
123 | clk_put(cpuclk); | ||
124 | return ret; | ||
125 | } | ||
126 | 121 | ||
127 | /* clock table init */ | 122 | /* clock table init */ |
128 | for (i = 2; | 123 | for (i = 2; |
@@ -130,6 +125,12 @@ static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
130 | i++) | 125 | i++) |
131 | loongson2_clockmod_table[i].frequency = (rate * i) / 8; | 126 | loongson2_clockmod_table[i].frequency = (rate * i) / 8; |
132 | 127 | ||
128 | ret = clk_set_rate(cpuclk, rate); | ||
129 | if (ret) { | ||
130 | clk_put(cpuclk); | ||
131 | return ret; | ||
132 | } | ||
133 | |||
133 | policy->cur = loongson2_cpufreq_get(policy->cpu); | 134 | policy->cur = loongson2_cpufreq_get(policy->cpu); |
134 | 135 | ||
135 | cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], | 136 | cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0], |
diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c index b67f45f5c271..5039fbc88254 100644 --- a/drivers/dma/sh/shdma.c +++ b/drivers/dma/sh/shdma.c | |||
@@ -400,8 +400,8 @@ static size_t sh_dmae_get_partial(struct shdma_chan *schan, | |||
400 | shdma_chan); | 400 | shdma_chan); |
401 | struct sh_dmae_desc *sh_desc = container_of(sdesc, | 401 | struct sh_dmae_desc *sh_desc = container_of(sdesc, |
402 | struct sh_dmae_desc, shdma_desc); | 402 | struct sh_dmae_desc, shdma_desc); |
403 | return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << | 403 | return sh_desc->hw.tcr - |
404 | sh_chan->xmit_shift; | 404 | (sh_dmae_readl(sh_chan, TCR) << sh_chan->xmit_shift); |
405 | } | 405 | } |
406 | 406 | ||
407 | /* Called from error IRQ or NMI */ | 407 | /* Called from error IRQ or NMI */ |
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 98d670825a1a..6e8887fe6c1b 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c | |||
@@ -323,6 +323,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align, | |||
323 | 323 | ||
324 | astbo->gem.driver_private = NULL; | 324 | astbo->gem.driver_private = NULL; |
325 | astbo->bo.bdev = &ast->ttm.bdev; | 325 | astbo->bo.bdev = &ast->ttm.bdev; |
326 | astbo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
326 | 327 | ||
327 | ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 328 | ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
328 | 329 | ||
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 0047012045c2..69fd8f1ac8df 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c | |||
@@ -328,6 +328,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align, | |||
328 | 328 | ||
329 | cirrusbo->gem.driver_private = NULL; | 329 | cirrusbo->gem.driver_private = NULL; |
330 | cirrusbo->bo.bdev = &cirrus->ttm.bdev; | 330 | cirrusbo->bo.bdev = &cirrus->ttm.bdev; |
331 | cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
331 | 332 | ||
332 | cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 333 | cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
333 | 334 | ||
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 8bcce7866d36..f92da0a32f0d 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
708 | /* Subtract time delta from raw timestamp to get final | 708 | /* Subtract time delta from raw timestamp to get final |
709 | * vblank_time timestamp for end of vblank. | 709 | * vblank_time timestamp for end of vblank. |
710 | */ | 710 | */ |
711 | etime = ktime_sub_ns(etime, delta_ns); | 711 | if (delta_ns < 0) |
712 | etime = ktime_add_ns(etime, -delta_ns); | ||
713 | else | ||
714 | etime = ktime_sub_ns(etime, delta_ns); | ||
712 | *vblank_time = ktime_to_timeval(etime); | 715 | *vblank_time = ktime_to_timeval(etime); |
713 | 716 | ||
714 | DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", | 717 | DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f2326fc60ac9..6f514297c483 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1856,10 +1856,16 @@ | |||
1856 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) | 1856 | #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) |
1857 | 1857 | ||
1858 | #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) | 1858 | #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) |
1859 | /* HDMI/DP bits are gen4+ */ | 1859 | /* |
1860 | #define PORTB_HOTPLUG_LIVE_STATUS (1 << 29) | 1860 | * HDMI/DP bits are gen4+ |
1861 | * | ||
1862 | * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. | ||
1863 | * Please check the detailed lore in the commit message for for experimental | ||
1864 | * evidence. | ||
1865 | */ | ||
1866 | #define PORTD_HOTPLUG_LIVE_STATUS (1 << 29) | ||
1861 | #define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) | 1867 | #define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) |
1862 | #define PORTD_HOTPLUG_LIVE_STATUS (1 << 27) | 1868 | #define PORTB_HOTPLUG_LIVE_STATUS (1 << 27) |
1863 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) | 1869 | #define PORTD_HOTPLUG_INT_STATUS (3 << 21) |
1864 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) | 1870 | #define PORTC_HOTPLUG_INT_STATUS (3 << 19) |
1865 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) | 1871 | #define PORTB_HOTPLUG_INT_STATUS (3 << 17) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 5fb305840db8..e38b45786653 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -8269,9 +8269,11 @@ check_crtc_state(struct drm_device *dev) | |||
8269 | 8269 | ||
8270 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, | 8270 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, |
8271 | base.head) { | 8271 | base.head) { |
8272 | enum pipe pipe; | ||
8272 | if (encoder->base.crtc != &crtc->base) | 8273 | if (encoder->base.crtc != &crtc->base) |
8273 | continue; | 8274 | continue; |
8274 | if (encoder->get_config) | 8275 | if (encoder->get_config && |
8276 | encoder->get_hw_state(encoder, &pipe)) | ||
8275 | encoder->get_config(encoder, &pipe_config); | 8277 | encoder->get_config(encoder, &pipe_config); |
8276 | } | 8278 | } |
8277 | 8279 | ||
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 67e2c1f1c9a8..5950888ae1d0 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -497,8 +497,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max) | |||
497 | goto out; | 497 | goto out; |
498 | } | 498 | } |
499 | 499 | ||
500 | /* scale to hardware */ | 500 | /* scale to hardware, but be careful to not overflow */ |
501 | level = level * freq / max; | 501 | if (freq < max) |
502 | level = level * freq / max; | ||
503 | else | ||
504 | level = freq / max * level; | ||
502 | 505 | ||
503 | dev_priv->backlight.level = level; | 506 | dev_priv->backlight.level = level; |
504 | if (dev_priv->backlight.device) | 507 | if (dev_priv->backlight.device) |
@@ -515,6 +518,17 @@ void intel_panel_disable_backlight(struct drm_device *dev) | |||
515 | struct drm_i915_private *dev_priv = dev->dev_private; | 518 | struct drm_i915_private *dev_priv = dev->dev_private; |
516 | unsigned long flags; | 519 | unsigned long flags; |
517 | 520 | ||
521 | /* | ||
522 | * Do not disable backlight on the vgaswitcheroo path. When switching | ||
523 | * away from i915, the other client may depend on i915 to handle the | ||
524 | * backlight. This will leave the backlight on unnecessarily when | ||
525 | * another client is not activated. | ||
526 | */ | ||
527 | if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { | ||
528 | DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); | ||
529 | return; | ||
530 | } | ||
531 | |||
518 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); | 532 | spin_lock_irqsave(&dev_priv->backlight.lock, flags); |
519 | 533 | ||
520 | dev_priv->backlight.enabled = false; | 534 | dev_priv->backlight.enabled = false; |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f895d1508df8..b0e4a0bd1313 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -5063,8 +5063,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) | |||
5063 | } | 5063 | } |
5064 | } else { | 5064 | } else { |
5065 | if (enable_requested) { | 5065 | if (enable_requested) { |
5066 | unsigned long irqflags; | ||
5067 | enum pipe p; | ||
5068 | |||
5066 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); | 5069 | I915_WRITE(HSW_PWR_WELL_DRIVER, 0); |
5070 | POSTING_READ(HSW_PWR_WELL_DRIVER); | ||
5067 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); | 5071 | DRM_DEBUG_KMS("Requesting to disable the power well\n"); |
5072 | |||
5073 | /* | ||
5074 | * After this, the registers on the pipes that are part | ||
5075 | * of the power well will become zero, so we have to | ||
5076 | * adjust our counters according to that. | ||
5077 | * | ||
5078 | * FIXME: Should we do this in general in | ||
5079 | * drm_vblank_post_modeset? | ||
5080 | */ | ||
5081 | spin_lock_irqsave(&dev->vbl_lock, irqflags); | ||
5082 | for_each_pipe(p) | ||
5083 | if (p != PIPE_A) | ||
5084 | dev->last_vblank[p] = 0; | ||
5085 | spin_unlock_irqrestore(&dev->vbl_lock, irqflags); | ||
5068 | } | 5086 | } |
5069 | } | 5087 | } |
5070 | } | 5088 | } |
diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 13878d5de063..d70e4a92773b 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c | |||
@@ -323,6 +323,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align, | |||
323 | 323 | ||
324 | mgabo->gem.driver_private = NULL; | 324 | mgabo->gem.driver_private = NULL; |
325 | mgabo->bo.bdev = &mdev->ttm.bdev; | 325 | mgabo->bo.bdev = &mdev->ttm.bdev; |
326 | mgabo->bo.bdev->dev_mapping = dev->dev_mapping; | ||
326 | 327 | ||
327 | mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); | 328 | mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); |
328 | 329 | ||
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 0bfd55e08820..9953e1fbc46d 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c | |||
@@ -2548,9 +2548,6 @@ int btc_dpm_init(struct radeon_device *rdev) | |||
2548 | { | 2548 | { |
2549 | struct rv7xx_power_info *pi; | 2549 | struct rv7xx_power_info *pi; |
2550 | struct evergreen_power_info *eg_pi; | 2550 | struct evergreen_power_info *eg_pi; |
2551 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
2552 | u16 data_offset, size; | ||
2553 | u8 frev, crev; | ||
2554 | struct atom_clock_dividers dividers; | 2551 | struct atom_clock_dividers dividers; |
2555 | int ret; | 2552 | int ret; |
2556 | 2553 | ||
@@ -2633,16 +2630,7 @@ int btc_dpm_init(struct radeon_device *rdev) | |||
2633 | eg_pi->vddci_control = | 2630 | eg_pi->vddci_control = |
2634 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); | 2631 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); |
2635 | 2632 | ||
2636 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 2633 | rv770_get_engine_memory_ss(rdev); |
2637 | &frev, &crev, &data_offset)) { | ||
2638 | pi->sclk_ss = true; | ||
2639 | pi->mclk_ss = true; | ||
2640 | pi->dynamic_ss = true; | ||
2641 | } else { | ||
2642 | pi->sclk_ss = false; | ||
2643 | pi->mclk_ss = false; | ||
2644 | pi->dynamic_ss = true; | ||
2645 | } | ||
2646 | 2634 | ||
2647 | pi->asi = RV770_ASI_DFLT; | 2635 | pi->asi = RV770_ASI_DFLT; |
2648 | pi->pasi = CYPRESS_HASI_DFLT; | 2636 | pi->pasi = CYPRESS_HASI_DFLT; |
@@ -2659,8 +2647,7 @@ int btc_dpm_init(struct radeon_device *rdev) | |||
2659 | 2647 | ||
2660 | pi->dynamic_pcie_gen2 = true; | 2648 | pi->dynamic_pcie_gen2 = true; |
2661 | 2649 | ||
2662 | if (pi->gfx_clock_gating && | 2650 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
2663 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
2664 | pi->thermal_protection = true; | 2651 | pi->thermal_protection = true; |
2665 | else | 2652 | else |
2666 | pi->thermal_protection = false; | 2653 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 6dacec4e2090..8928bd109c16 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c | |||
@@ -2587,9 +2587,11 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, | |||
2587 | if (rdev->wb.enabled) { | 2587 | if (rdev->wb.enabled) { |
2588 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); | 2588 | rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); |
2589 | } else { | 2589 | } else { |
2590 | mutex_lock(&rdev->srbm_mutex); | ||
2590 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 2591 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
2591 | rptr = RREG32(CP_HQD_PQ_RPTR); | 2592 | rptr = RREG32(CP_HQD_PQ_RPTR); |
2592 | cik_srbm_select(rdev, 0, 0, 0, 0); | 2593 | cik_srbm_select(rdev, 0, 0, 0, 0); |
2594 | mutex_unlock(&rdev->srbm_mutex); | ||
2593 | } | 2595 | } |
2594 | rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; | 2596 | rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; |
2595 | 2597 | ||
@@ -2604,9 +2606,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, | |||
2604 | if (rdev->wb.enabled) { | 2606 | if (rdev->wb.enabled) { |
2605 | wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); | 2607 | wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); |
2606 | } else { | 2608 | } else { |
2609 | mutex_lock(&rdev->srbm_mutex); | ||
2607 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); | 2610 | cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); |
2608 | wptr = RREG32(CP_HQD_PQ_WPTR); | 2611 | wptr = RREG32(CP_HQD_PQ_WPTR); |
2609 | cik_srbm_select(rdev, 0, 0, 0, 0); | 2612 | cik_srbm_select(rdev, 0, 0, 0, 0); |
2613 | mutex_unlock(&rdev->srbm_mutex); | ||
2610 | } | 2614 | } |
2611 | wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; | 2615 | wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; |
2612 | 2616 | ||
@@ -2897,6 +2901,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
2897 | WREG32(CP_CPF_DEBUG, tmp); | 2901 | WREG32(CP_CPF_DEBUG, tmp); |
2898 | 2902 | ||
2899 | /* init the pipes */ | 2903 | /* init the pipes */ |
2904 | mutex_lock(&rdev->srbm_mutex); | ||
2900 | for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { | 2905 | for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { |
2901 | int me = (i < 4) ? 1 : 2; | 2906 | int me = (i < 4) ? 1 : 2; |
2902 | int pipe = (i < 4) ? i : (i - 4); | 2907 | int pipe = (i < 4) ? i : (i - 4); |
@@ -2919,6 +2924,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
2919 | WREG32(CP_HPD_EOP_CONTROL, tmp); | 2924 | WREG32(CP_HPD_EOP_CONTROL, tmp); |
2920 | } | 2925 | } |
2921 | cik_srbm_select(rdev, 0, 0, 0, 0); | 2926 | cik_srbm_select(rdev, 0, 0, 0, 0); |
2927 | mutex_unlock(&rdev->srbm_mutex); | ||
2922 | 2928 | ||
2923 | /* init the queues. Just two for now. */ | 2929 | /* init the queues. Just two for now. */ |
2924 | for (i = 0; i < 2; i++) { | 2930 | for (i = 0; i < 2; i++) { |
@@ -2972,6 +2978,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
2972 | mqd->static_thread_mgmt23[0] = 0xffffffff; | 2978 | mqd->static_thread_mgmt23[0] = 0xffffffff; |
2973 | mqd->static_thread_mgmt23[1] = 0xffffffff; | 2979 | mqd->static_thread_mgmt23[1] = 0xffffffff; |
2974 | 2980 | ||
2981 | mutex_lock(&rdev->srbm_mutex); | ||
2975 | cik_srbm_select(rdev, rdev->ring[idx].me, | 2982 | cik_srbm_select(rdev, rdev->ring[idx].me, |
2976 | rdev->ring[idx].pipe, | 2983 | rdev->ring[idx].pipe, |
2977 | rdev->ring[idx].queue, 0); | 2984 | rdev->ring[idx].queue, 0); |
@@ -3099,6 +3106,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) | |||
3099 | WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); | 3106 | WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); |
3100 | 3107 | ||
3101 | cik_srbm_select(rdev, 0, 0, 0, 0); | 3108 | cik_srbm_select(rdev, 0, 0, 0, 0); |
3109 | mutex_unlock(&rdev->srbm_mutex); | ||
3102 | 3110 | ||
3103 | radeon_bo_kunmap(rdev->ring[idx].mqd_obj); | 3111 | radeon_bo_kunmap(rdev->ring[idx].mqd_obj); |
3104 | radeon_bo_unreserve(rdev->ring[idx].mqd_obj); | 3112 | radeon_bo_unreserve(rdev->ring[idx].mqd_obj); |
@@ -4320,6 +4328,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) | |||
4320 | 4328 | ||
4321 | /* XXX SH_MEM regs */ | 4329 | /* XXX SH_MEM regs */ |
4322 | /* where to put LDS, scratch, GPUVM in FSA64 space */ | 4330 | /* where to put LDS, scratch, GPUVM in FSA64 space */ |
4331 | mutex_lock(&rdev->srbm_mutex); | ||
4323 | for (i = 0; i < 16; i++) { | 4332 | for (i = 0; i < 16; i++) { |
4324 | cik_srbm_select(rdev, 0, 0, 0, i); | 4333 | cik_srbm_select(rdev, 0, 0, 0, i); |
4325 | /* CP and shaders */ | 4334 | /* CP and shaders */ |
@@ -4335,6 +4344,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) | |||
4335 | /* XXX SDMA RLC - todo */ | 4344 | /* XXX SDMA RLC - todo */ |
4336 | } | 4345 | } |
4337 | cik_srbm_select(rdev, 0, 0, 0, 0); | 4346 | cik_srbm_select(rdev, 0, 0, 0, 0); |
4347 | mutex_unlock(&rdev->srbm_mutex); | ||
4338 | 4348 | ||
4339 | cik_pcie_gart_tlb_flush(rdev); | 4349 | cik_pcie_gart_tlb_flush(rdev); |
4340 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", | 4350 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", |
@@ -5954,6 +5964,8 @@ static int cik_startup(struct radeon_device *rdev) | |||
5954 | struct radeon_ring *ring; | 5964 | struct radeon_ring *ring; |
5955 | int r; | 5965 | int r; |
5956 | 5966 | ||
5967 | cik_mc_program(rdev); | ||
5968 | |||
5957 | if (rdev->flags & RADEON_IS_IGP) { | 5969 | if (rdev->flags & RADEON_IS_IGP) { |
5958 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | 5970 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || |
5959 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { | 5971 | !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { |
@@ -5985,7 +5997,6 @@ static int cik_startup(struct radeon_device *rdev) | |||
5985 | if (r) | 5997 | if (r) |
5986 | return r; | 5998 | return r; |
5987 | 5999 | ||
5988 | cik_mc_program(rdev); | ||
5989 | r = cik_pcie_gart_enable(rdev); | 6000 | r = cik_pcie_gart_enable(rdev); |
5990 | if (r) | 6001 | if (r) |
5991 | return r; | 6002 | return r; |
@@ -6194,7 +6205,7 @@ int cik_suspend(struct radeon_device *rdev) | |||
6194 | radeon_vm_manager_fini(rdev); | 6205 | radeon_vm_manager_fini(rdev); |
6195 | cik_cp_enable(rdev, false); | 6206 | cik_cp_enable(rdev, false); |
6196 | cik_sdma_enable(rdev, false); | 6207 | cik_sdma_enable(rdev, false); |
6197 | r600_uvd_rbc_stop(rdev); | 6208 | r600_uvd_stop(rdev); |
6198 | radeon_uvd_suspend(rdev); | 6209 | radeon_uvd_suspend(rdev); |
6199 | cik_irq_suspend(rdev); | 6210 | cik_irq_suspend(rdev); |
6200 | radeon_wb_disable(rdev); | 6211 | radeon_wb_disable(rdev); |
@@ -6358,6 +6369,7 @@ void cik_fini(struct radeon_device *rdev) | |||
6358 | radeon_vm_manager_fini(rdev); | 6369 | radeon_vm_manager_fini(rdev); |
6359 | radeon_ib_pool_fini(rdev); | 6370 | radeon_ib_pool_fini(rdev); |
6360 | radeon_irq_kms_fini(rdev); | 6371 | radeon_irq_kms_fini(rdev); |
6372 | r600_uvd_stop(rdev); | ||
6361 | radeon_uvd_fini(rdev); | 6373 | radeon_uvd_fini(rdev); |
6362 | cik_pcie_gart_fini(rdev); | 6374 | cik_pcie_gart_fini(rdev); |
6363 | r600_vram_scratch_fini(rdev); | 6375 | r600_vram_scratch_fini(rdev); |
@@ -6978,7 +6990,7 @@ int cik_uvd_resume(struct radeon_device *rdev) | |||
6978 | 6990 | ||
6979 | /* programm the VCPU memory controller bits 0-27 */ | 6991 | /* programm the VCPU memory controller bits 0-27 */ |
6980 | addr = rdev->uvd.gpu_addr >> 3; | 6992 | addr = rdev->uvd.gpu_addr >> 3; |
6981 | size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; | 6993 | size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; |
6982 | WREG32(UVD_VCPU_CACHE_OFFSET0, addr); | 6994 | WREG32(UVD_VCPU_CACHE_OFFSET0, addr); |
6983 | WREG32(UVD_VCPU_CACHE_SIZE0, size); | 6995 | WREG32(UVD_VCPU_CACHE_SIZE0, size); |
6984 | 6996 | ||
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 9bcdd174780f..7e5d0b570a30 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c | |||
@@ -2038,9 +2038,6 @@ int cypress_dpm_init(struct radeon_device *rdev) | |||
2038 | { | 2038 | { |
2039 | struct rv7xx_power_info *pi; | 2039 | struct rv7xx_power_info *pi; |
2040 | struct evergreen_power_info *eg_pi; | 2040 | struct evergreen_power_info *eg_pi; |
2041 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
2042 | uint16_t data_offset, size; | ||
2043 | uint8_t frev, crev; | ||
2044 | struct atom_clock_dividers dividers; | 2041 | struct atom_clock_dividers dividers; |
2045 | int ret; | 2042 | int ret; |
2046 | 2043 | ||
@@ -2092,16 +2089,7 @@ int cypress_dpm_init(struct radeon_device *rdev) | |||
2092 | eg_pi->vddci_control = | 2089 | eg_pi->vddci_control = |
2093 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); | 2090 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); |
2094 | 2091 | ||
2095 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 2092 | rv770_get_engine_memory_ss(rdev); |
2096 | &frev, &crev, &data_offset)) { | ||
2097 | pi->sclk_ss = true; | ||
2098 | pi->mclk_ss = true; | ||
2099 | pi->dynamic_ss = true; | ||
2100 | } else { | ||
2101 | pi->sclk_ss = false; | ||
2102 | pi->mclk_ss = false; | ||
2103 | pi->dynamic_ss = true; | ||
2104 | } | ||
2105 | 2093 | ||
2106 | pi->asi = RV770_ASI_DFLT; | 2094 | pi->asi = RV770_ASI_DFLT; |
2107 | pi->pasi = CYPRESS_HASI_DFLT; | 2095 | pi->pasi = CYPRESS_HASI_DFLT; |
@@ -2122,8 +2110,7 @@ int cypress_dpm_init(struct radeon_device *rdev) | |||
2122 | 2110 | ||
2123 | pi->dynamic_pcie_gen2 = true; | 2111 | pi->dynamic_pcie_gen2 = true; |
2124 | 2112 | ||
2125 | if (pi->gfx_clock_gating && | 2113 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
2126 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
2127 | pi->thermal_protection = true; | 2114 | pi->thermal_protection = true; |
2128 | else | 2115 | else |
2129 | pi->thermal_protection = false; | 2116 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 038dcac7670c..d5b49e33315e 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -5106,6 +5106,8 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
5106 | /* enable aspm */ | 5106 | /* enable aspm */ |
5107 | evergreen_program_aspm(rdev); | 5107 | evergreen_program_aspm(rdev); |
5108 | 5108 | ||
5109 | evergreen_mc_program(rdev); | ||
5110 | |||
5109 | if (ASIC_IS_DCE5(rdev)) { | 5111 | if (ASIC_IS_DCE5(rdev)) { |
5110 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { | 5112 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { |
5111 | r = ni_init_microcode(rdev); | 5113 | r = ni_init_microcode(rdev); |
@@ -5133,7 +5135,6 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
5133 | if (r) | 5135 | if (r) |
5134 | return r; | 5136 | return r; |
5135 | 5137 | ||
5136 | evergreen_mc_program(rdev); | ||
5137 | if (rdev->flags & RADEON_IS_AGP) { | 5138 | if (rdev->flags & RADEON_IS_AGP) { |
5138 | evergreen_agp_enable(rdev); | 5139 | evergreen_agp_enable(rdev); |
5139 | } else { | 5140 | } else { |
@@ -5291,10 +5292,10 @@ int evergreen_resume(struct radeon_device *rdev) | |||
5291 | int evergreen_suspend(struct radeon_device *rdev) | 5292 | int evergreen_suspend(struct radeon_device *rdev) |
5292 | { | 5293 | { |
5293 | r600_audio_fini(rdev); | 5294 | r600_audio_fini(rdev); |
5295 | r600_uvd_stop(rdev); | ||
5294 | radeon_uvd_suspend(rdev); | 5296 | radeon_uvd_suspend(rdev); |
5295 | r700_cp_stop(rdev); | 5297 | r700_cp_stop(rdev); |
5296 | r600_dma_stop(rdev); | 5298 | r600_dma_stop(rdev); |
5297 | r600_uvd_rbc_stop(rdev); | ||
5298 | evergreen_irq_suspend(rdev); | 5299 | evergreen_irq_suspend(rdev); |
5299 | radeon_wb_disable(rdev); | 5300 | radeon_wb_disable(rdev); |
5300 | evergreen_pcie_gart_disable(rdev); | 5301 | evergreen_pcie_gart_disable(rdev); |
@@ -5429,6 +5430,7 @@ void evergreen_fini(struct radeon_device *rdev) | |||
5429 | radeon_ib_pool_fini(rdev); | 5430 | radeon_ib_pool_fini(rdev); |
5430 | radeon_irq_kms_fini(rdev); | 5431 | radeon_irq_kms_fini(rdev); |
5431 | evergreen_pcie_gart_fini(rdev); | 5432 | evergreen_pcie_gart_fini(rdev); |
5433 | r600_uvd_stop(rdev); | ||
5432 | radeon_uvd_fini(rdev); | 5434 | radeon_uvd_fini(rdev); |
5433 | r600_vram_scratch_fini(rdev); | 5435 | r600_vram_scratch_fini(rdev); |
5434 | radeon_gem_fini(rdev); | 5436 | radeon_gem_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index bb9ea3641312..b0e280058b9b 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c | |||
@@ -148,18 +148,40 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
148 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 148 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
149 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); | 149 | struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); |
150 | u32 base_rate = 24000; | 150 | u32 base_rate = 24000; |
151 | u32 max_ratio = clock / base_rate; | ||
152 | u32 dto_phase; | ||
153 | u32 dto_modulo = clock; | ||
154 | u32 wallclock_ratio; | ||
155 | u32 dto_cntl; | ||
151 | 156 | ||
152 | if (!dig || !dig->afmt) | 157 | if (!dig || !dig->afmt) |
153 | return; | 158 | return; |
154 | 159 | ||
160 | if (max_ratio >= 8) { | ||
161 | dto_phase = 192 * 1000; | ||
162 | wallclock_ratio = 3; | ||
163 | } else if (max_ratio >= 4) { | ||
164 | dto_phase = 96 * 1000; | ||
165 | wallclock_ratio = 2; | ||
166 | } else if (max_ratio >= 2) { | ||
167 | dto_phase = 48 * 1000; | ||
168 | wallclock_ratio = 1; | ||
169 | } else { | ||
170 | dto_phase = 24 * 1000; | ||
171 | wallclock_ratio = 0; | ||
172 | } | ||
173 | dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; | ||
174 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); | ||
175 | WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); | ||
176 | |||
155 | /* XXX two dtos; generally use dto0 for hdmi */ | 177 | /* XXX two dtos; generally use dto0 for hdmi */ |
156 | /* Express [24MHz / target pixel clock] as an exact rational | 178 | /* Express [24MHz / target pixel clock] as an exact rational |
157 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE | 179 | * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE |
158 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator | 180 | * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator |
159 | */ | 181 | */ |
160 | WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); | 182 | WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); |
161 | WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); | 183 | WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); |
162 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); | 184 | WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); |
163 | } | 185 | } |
164 | 186 | ||
165 | 187 | ||
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index a7baf67aef6c..0d582ac1dc31 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h | |||
@@ -497,6 +497,9 @@ | |||
497 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 | 497 | #define DCCG_AUDIO_DTO0_MODULE 0x05b4 |
498 | #define DCCG_AUDIO_DTO0_LOAD 0x05b8 | 498 | #define DCCG_AUDIO_DTO0_LOAD 0x05b8 |
499 | #define DCCG_AUDIO_DTO0_CNTL 0x05bc | 499 | #define DCCG_AUDIO_DTO0_CNTL 0x05bc |
500 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) | ||
501 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 | ||
502 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 | ||
500 | 503 | ||
501 | #define DCCG_AUDIO_DTO1_PHASE 0x05c0 | 504 | #define DCCG_AUDIO_DTO1_PHASE 0x05c0 |
502 | #define DCCG_AUDIO_DTO1_MODULE 0x05c4 | 505 | #define DCCG_AUDIO_DTO1_MODULE 0x05c4 |
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 56bd4f3be4fe..ccb4f8b54852 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -794,9 +794,13 @@ int ni_init_microcode(struct radeon_device *rdev) | |||
794 | if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { | 794 | if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { |
795 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); | 795 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); |
796 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); | 796 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); |
797 | if (err) | 797 | if (err) { |
798 | goto out; | 798 | printk(KERN_ERR |
799 | if (rdev->smc_fw->size != smc_req_size) { | 799 | "smc: error loading firmware \"%s\"\n", |
800 | fw_name); | ||
801 | release_firmware(rdev->smc_fw); | ||
802 | rdev->smc_fw = NULL; | ||
803 | } else if (rdev->smc_fw->size != smc_req_size) { | ||
800 | printk(KERN_ERR | 804 | printk(KERN_ERR |
801 | "ni_mc: Bogus length %zu in firmware \"%s\"\n", | 805 | "ni_mc: Bogus length %zu in firmware \"%s\"\n", |
802 | rdev->mc_fw->size, fw_name); | 806 | rdev->mc_fw->size, fw_name); |
@@ -2079,6 +2083,8 @@ static int cayman_startup(struct radeon_device *rdev) | |||
2079 | /* enable aspm */ | 2083 | /* enable aspm */ |
2080 | evergreen_program_aspm(rdev); | 2084 | evergreen_program_aspm(rdev); |
2081 | 2085 | ||
2086 | evergreen_mc_program(rdev); | ||
2087 | |||
2082 | if (rdev->flags & RADEON_IS_IGP) { | 2088 | if (rdev->flags & RADEON_IS_IGP) { |
2083 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 2089 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
2084 | r = ni_init_microcode(rdev); | 2090 | r = ni_init_microcode(rdev); |
@@ -2107,7 +2113,6 @@ static int cayman_startup(struct radeon_device *rdev) | |||
2107 | if (r) | 2113 | if (r) |
2108 | return r; | 2114 | return r; |
2109 | 2115 | ||
2110 | evergreen_mc_program(rdev); | ||
2111 | r = cayman_pcie_gart_enable(rdev); | 2116 | r = cayman_pcie_gart_enable(rdev); |
2112 | if (r) | 2117 | if (r) |
2113 | return r; | 2118 | return r; |
@@ -2286,7 +2291,7 @@ int cayman_suspend(struct radeon_device *rdev) | |||
2286 | radeon_vm_manager_fini(rdev); | 2291 | radeon_vm_manager_fini(rdev); |
2287 | cayman_cp_enable(rdev, false); | 2292 | cayman_cp_enable(rdev, false); |
2288 | cayman_dma_stop(rdev); | 2293 | cayman_dma_stop(rdev); |
2289 | r600_uvd_rbc_stop(rdev); | 2294 | r600_uvd_stop(rdev); |
2290 | radeon_uvd_suspend(rdev); | 2295 | radeon_uvd_suspend(rdev); |
2291 | evergreen_irq_suspend(rdev); | 2296 | evergreen_irq_suspend(rdev); |
2292 | radeon_wb_disable(rdev); | 2297 | radeon_wb_disable(rdev); |
@@ -2418,6 +2423,7 @@ void cayman_fini(struct radeon_device *rdev) | |||
2418 | radeon_vm_manager_fini(rdev); | 2423 | radeon_vm_manager_fini(rdev); |
2419 | radeon_ib_pool_fini(rdev); | 2424 | radeon_ib_pool_fini(rdev); |
2420 | radeon_irq_kms_fini(rdev); | 2425 | radeon_irq_kms_fini(rdev); |
2426 | r600_uvd_stop(rdev); | ||
2421 | radeon_uvd_fini(rdev); | 2427 | radeon_uvd_fini(rdev); |
2422 | cayman_pcie_gart_fini(rdev); | 2428 | cayman_pcie_gart_fini(rdev); |
2423 | r600_vram_scratch_fini(rdev); | 2429 | r600_vram_scratch_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 4f9b9bc20daa..f0f5f748938a 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c | |||
@@ -4067,9 +4067,6 @@ int ni_dpm_init(struct radeon_device *rdev) | |||
4067 | struct rv7xx_power_info *pi; | 4067 | struct rv7xx_power_info *pi; |
4068 | struct evergreen_power_info *eg_pi; | 4068 | struct evergreen_power_info *eg_pi; |
4069 | struct ni_power_info *ni_pi; | 4069 | struct ni_power_info *ni_pi; |
4070 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
4071 | u16 data_offset, size; | ||
4072 | u8 frev, crev; | ||
4073 | struct atom_clock_dividers dividers; | 4070 | struct atom_clock_dividers dividers; |
4074 | int ret; | 4071 | int ret; |
4075 | 4072 | ||
@@ -4162,16 +4159,7 @@ int ni_dpm_init(struct radeon_device *rdev) | |||
4162 | eg_pi->vddci_control = | 4159 | eg_pi->vddci_control = |
4163 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); | 4160 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); |
4164 | 4161 | ||
4165 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 4162 | rv770_get_engine_memory_ss(rdev); |
4166 | &frev, &crev, &data_offset)) { | ||
4167 | pi->sclk_ss = true; | ||
4168 | pi->mclk_ss = true; | ||
4169 | pi->dynamic_ss = true; | ||
4170 | } else { | ||
4171 | pi->sclk_ss = false; | ||
4172 | pi->mclk_ss = false; | ||
4173 | pi->dynamic_ss = true; | ||
4174 | } | ||
4175 | 4163 | ||
4176 | pi->asi = RV770_ASI_DFLT; | 4164 | pi->asi = RV770_ASI_DFLT; |
4177 | pi->pasi = CYPRESS_HASI_DFLT; | 4165 | pi->pasi = CYPRESS_HASI_DFLT; |
@@ -4188,8 +4176,7 @@ int ni_dpm_init(struct radeon_device *rdev) | |||
4188 | 4176 | ||
4189 | pi->dynamic_pcie_gen2 = true; | 4177 | pi->dynamic_pcie_gen2 = true; |
4190 | 4178 | ||
4191 | if (pi->gfx_clock_gating && | 4179 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
4192 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
4193 | pi->thermal_protection = true; | 4180 | pi->thermal_protection = true; |
4194 | else | 4181 | else |
4195 | pi->thermal_protection = false; | 4182 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 10f712e37003..e66e72077350 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -2299,9 +2299,13 @@ int r600_init_microcode(struct radeon_device *rdev) | |||
2299 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { | 2299 | if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { |
2300 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); | 2300 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); |
2301 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); | 2301 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); |
2302 | if (err) | 2302 | if (err) { |
2303 | goto out; | 2303 | printk(KERN_ERR |
2304 | if (rdev->smc_fw->size != smc_req_size) { | 2304 | "smc: error loading firmware \"%s\"\n", |
2305 | fw_name); | ||
2306 | release_firmware(rdev->smc_fw); | ||
2307 | rdev->smc_fw = NULL; | ||
2308 | } else if (rdev->smc_fw->size != smc_req_size) { | ||
2305 | printk(KERN_ERR | 2309 | printk(KERN_ERR |
2306 | "smc: Bogus length %zu in firmware \"%s\"\n", | 2310 | "smc: Bogus length %zu in firmware \"%s\"\n", |
2307 | rdev->smc_fw->size, fw_name); | 2311 | rdev->smc_fw->size, fw_name); |
@@ -2697,12 +2701,29 @@ int r600_uvd_rbc_start(struct radeon_device *rdev) | |||
2697 | return 0; | 2701 | return 0; |
2698 | } | 2702 | } |
2699 | 2703 | ||
2700 | void r600_uvd_rbc_stop(struct radeon_device *rdev) | 2704 | void r600_uvd_stop(struct radeon_device *rdev) |
2701 | { | 2705 | { |
2702 | struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; | 2706 | struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; |
2703 | 2707 | ||
2704 | /* force RBC into idle state */ | 2708 | /* force RBC into idle state */ |
2705 | WREG32(UVD_RBC_RB_CNTL, 0x11010101); | 2709 | WREG32(UVD_RBC_RB_CNTL, 0x11010101); |
2710 | |||
2711 | /* Stall UMC and register bus before resetting VCPU */ | ||
2712 | WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
2713 | WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); | ||
2714 | mdelay(1); | ||
2715 | |||
2716 | /* put VCPU into reset */ | ||
2717 | WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); | ||
2718 | mdelay(5); | ||
2719 | |||
2720 | /* disable VCPU clock */ | ||
2721 | WREG32(UVD_VCPU_CNTL, 0x0); | ||
2722 | |||
2723 | /* Unstall UMC and register bus */ | ||
2724 | WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); | ||
2725 | WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); | ||
2726 | |||
2706 | ring->ready = false; | 2727 | ring->ready = false; |
2707 | } | 2728 | } |
2708 | 2729 | ||
@@ -2722,6 +2743,11 @@ int r600_uvd_init(struct radeon_device *rdev) | |||
2722 | /* disable interupt */ | 2743 | /* disable interupt */ |
2723 | WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); | 2744 | WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); |
2724 | 2745 | ||
2746 | /* Stall UMC and register bus before resetting VCPU */ | ||
2747 | WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
2748 | WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); | ||
2749 | mdelay(1); | ||
2750 | |||
2725 | /* put LMI, VCPU, RBC etc... into reset */ | 2751 | /* put LMI, VCPU, RBC etc... into reset */ |
2726 | WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | | 2752 | WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | |
2727 | LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | | 2753 | LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | |
@@ -2751,10 +2777,6 @@ int r600_uvd_init(struct radeon_device *rdev) | |||
2751 | WREG32(UVD_MPC_SET_ALU, 0); | 2777 | WREG32(UVD_MPC_SET_ALU, 0); |
2752 | WREG32(UVD_MPC_SET_MUX, 0x88); | 2778 | WREG32(UVD_MPC_SET_MUX, 0x88); |
2753 | 2779 | ||
2754 | /* Stall UMC */ | ||
2755 | WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); | ||
2756 | WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); | ||
2757 | |||
2758 | /* take all subblocks out of reset, except VCPU */ | 2780 | /* take all subblocks out of reset, except VCPU */ |
2759 | WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); | 2781 | WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); |
2760 | mdelay(5); | 2782 | mdelay(5); |
@@ -3312,6 +3334,8 @@ static int r600_startup(struct radeon_device *rdev) | |||
3312 | /* enable pcie gen2 link */ | 3334 | /* enable pcie gen2 link */ |
3313 | r600_pcie_gen2_enable(rdev); | 3335 | r600_pcie_gen2_enable(rdev); |
3314 | 3336 | ||
3337 | r600_mc_program(rdev); | ||
3338 | |||
3315 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 3339 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
3316 | r = r600_init_microcode(rdev); | 3340 | r = r600_init_microcode(rdev); |
3317 | if (r) { | 3341 | if (r) { |
@@ -3324,7 +3348,6 @@ static int r600_startup(struct radeon_device *rdev) | |||
3324 | if (r) | 3348 | if (r) |
3325 | return r; | 3349 | return r; |
3326 | 3350 | ||
3327 | r600_mc_program(rdev); | ||
3328 | if (rdev->flags & RADEON_IS_AGP) { | 3351 | if (rdev->flags & RADEON_IS_AGP) { |
3329 | r600_agp_enable(rdev); | 3352 | r600_agp_enable(rdev); |
3330 | } else { | 3353 | } else { |
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index f48240bb8c56..f264df5470f7 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c | |||
@@ -226,10 +226,29 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
226 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); | 226 | struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); |
227 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; | 227 | struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; |
228 | u32 base_rate = 24000; | 228 | u32 base_rate = 24000; |
229 | u32 max_ratio = clock / base_rate; | ||
230 | u32 dto_phase; | ||
231 | u32 dto_modulo = clock; | ||
232 | u32 wallclock_ratio; | ||
233 | u32 dto_cntl; | ||
229 | 234 | ||
230 | if (!dig || !dig->afmt) | 235 | if (!dig || !dig->afmt) |
231 | return; | 236 | return; |
232 | 237 | ||
238 | if (max_ratio >= 8) { | ||
239 | dto_phase = 192 * 1000; | ||
240 | wallclock_ratio = 3; | ||
241 | } else if (max_ratio >= 4) { | ||
242 | dto_phase = 96 * 1000; | ||
243 | wallclock_ratio = 2; | ||
244 | } else if (max_ratio >= 2) { | ||
245 | dto_phase = 48 * 1000; | ||
246 | wallclock_ratio = 1; | ||
247 | } else { | ||
248 | dto_phase = 24 * 1000; | ||
249 | wallclock_ratio = 0; | ||
250 | } | ||
251 | |||
233 | /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. | 252 | /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. |
234 | * doesn't matter which one you use. Just use the first one. | 253 | * doesn't matter which one you use. Just use the first one. |
235 | */ | 254 | */ |
@@ -242,9 +261,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) | |||
242 | /* according to the reg specs, this should DCE3.2 only, but in | 261 | /* according to the reg specs, this should DCE3.2 only, but in |
243 | * practice it seems to cover DCE3.0 as well. | 262 | * practice it seems to cover DCE3.0 as well. |
244 | */ | 263 | */ |
245 | WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); | 264 | if (dig->dig_encoder == 0) { |
246 | WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); | 265 | dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; |
247 | WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ | 266 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); |
267 | WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); | ||
268 | WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); | ||
269 | WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); | ||
270 | WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ | ||
271 | } else { | ||
272 | dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; | ||
273 | dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); | ||
274 | WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl); | ||
275 | WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase); | ||
276 | WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); | ||
277 | WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ | ||
278 | } | ||
248 | } else { | 279 | } else { |
249 | /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ | 280 | /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ |
250 | WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | | 281 | WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | |
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 8e3fe815edab..7c780839a7f4 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h | |||
@@ -933,6 +933,9 @@ | |||
933 | #define DCCG_AUDIO_DTO0_LOAD 0x051c | 933 | #define DCCG_AUDIO_DTO0_LOAD 0x051c |
934 | # define DTO_LOAD (1 << 31) | 934 | # define DTO_LOAD (1 << 31) |
935 | #define DCCG_AUDIO_DTO0_CNTL 0x0520 | 935 | #define DCCG_AUDIO_DTO0_CNTL 0x0520 |
936 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) | ||
937 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 | ||
938 | # define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 | ||
936 | 939 | ||
937 | #define DCCG_AUDIO_DTO1_PHASE 0x0524 | 940 | #define DCCG_AUDIO_DTO1_PHASE 0x0524 |
938 | #define DCCG_AUDIO_DTO1_MODULE 0x0528 | 941 | #define DCCG_AUDIO_DTO1_MODULE 0x0528 |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2f08219c39b6..274b8e1b889f 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -1468,7 +1468,6 @@ struct radeon_uvd { | |||
1468 | void *cpu_addr; | 1468 | void *cpu_addr; |
1469 | uint64_t gpu_addr; | 1469 | uint64_t gpu_addr; |
1470 | void *saved_bo; | 1470 | void *saved_bo; |
1471 | unsigned fw_size; | ||
1472 | atomic_t handles[RADEON_MAX_UVD_HANDLES]; | 1471 | atomic_t handles[RADEON_MAX_UVD_HANDLES]; |
1473 | struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; | 1472 | struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; |
1474 | struct delayed_work idle_work; | 1473 | struct delayed_work idle_work; |
@@ -2066,6 +2065,7 @@ struct radeon_device { | |||
2066 | const struct firmware *mec_fw; /* CIK MEC firmware */ | 2065 | const struct firmware *mec_fw; /* CIK MEC firmware */ |
2067 | const struct firmware *sdma_fw; /* CIK SDMA firmware */ | 2066 | const struct firmware *sdma_fw; /* CIK SDMA firmware */ |
2068 | const struct firmware *smc_fw; /* SMC firmware */ | 2067 | const struct firmware *smc_fw; /* SMC firmware */ |
2068 | const struct firmware *uvd_fw; /* UVD firmware */ | ||
2069 | struct r600_blit r600_blit; | 2069 | struct r600_blit r600_blit; |
2070 | struct r600_vram_scratch vram_scratch; | 2070 | struct r600_vram_scratch vram_scratch; |
2071 | int msi_enabled; /* msi enabled */ | 2071 | int msi_enabled; /* msi enabled */ |
@@ -2095,6 +2095,8 @@ struct radeon_device { | |||
2095 | /* ACPI interface */ | 2095 | /* ACPI interface */ |
2096 | struct radeon_atif atif; | 2096 | struct radeon_atif atif; |
2097 | struct radeon_atcs atcs; | 2097 | struct radeon_atcs atcs; |
2098 | /* srbm instance registers */ | ||
2099 | struct mutex srbm_mutex; | ||
2098 | }; | 2100 | }; |
2099 | 2101 | ||
2100 | int radeon_device_init(struct radeon_device *rdev, | 2102 | int radeon_device_init(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 902479fa737f..3d61d5aac18f 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
@@ -441,7 +441,7 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde | |||
441 | /* uvd */ | 441 | /* uvd */ |
442 | int r600_uvd_init(struct radeon_device *rdev); | 442 | int r600_uvd_init(struct radeon_device *rdev); |
443 | int r600_uvd_rbc_start(struct radeon_device *rdev); | 443 | int r600_uvd_rbc_start(struct radeon_device *rdev); |
444 | void r600_uvd_rbc_stop(struct radeon_device *rdev); | 444 | void r600_uvd_stop(struct radeon_device *rdev); |
445 | int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); | 445 | int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); |
446 | void r600_uvd_fence_emit(struct radeon_device *rdev, | 446 | void r600_uvd_fence_emit(struct radeon_device *rdev, |
447 | struct radeon_fence *fence); | 447 | struct radeon_fence *fence); |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 82335e38ec4f..63398ae1dbf5 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
@@ -1163,6 +1163,7 @@ int radeon_device_init(struct radeon_device *rdev, | |||
1163 | mutex_init(&rdev->gem.mutex); | 1163 | mutex_init(&rdev->gem.mutex); |
1164 | mutex_init(&rdev->pm.mutex); | 1164 | mutex_init(&rdev->pm.mutex); |
1165 | mutex_init(&rdev->gpu_clock_mutex); | 1165 | mutex_init(&rdev->gpu_clock_mutex); |
1166 | mutex_init(&rdev->srbm_mutex); | ||
1166 | init_rwsem(&rdev->pm.mclk_lock); | 1167 | init_rwsem(&rdev->pm.mclk_lock); |
1167 | init_rwsem(&rdev->exclusive_lock); | 1168 | init_rwsem(&rdev->exclusive_lock); |
1168 | init_waitqueue_head(&rdev->irq.vblank_queue); | 1169 | init_waitqueue_head(&rdev->irq.vblank_queue); |
@@ -1519,6 +1520,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) | |||
1519 | radeon_save_bios_scratch_regs(rdev); | 1520 | radeon_save_bios_scratch_regs(rdev); |
1520 | /* block TTM */ | 1521 | /* block TTM */ |
1521 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); | 1522 | resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); |
1523 | radeon_pm_suspend(rdev); | ||
1522 | radeon_suspend(rdev); | 1524 | radeon_suspend(rdev); |
1523 | 1525 | ||
1524 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { | 1526 | for (i = 0; i < RADEON_NUM_RINGS; ++i) { |
@@ -1564,6 +1566,7 @@ retry: | |||
1564 | } | 1566 | } |
1565 | } | 1567 | } |
1566 | 1568 | ||
1569 | radeon_pm_resume(rdev); | ||
1567 | drm_helper_resume_force_mode(rdev->ddev); | 1570 | drm_helper_resume_force_mode(rdev->ddev); |
1568 | 1571 | ||
1569 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); | 1572 | ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 7ddb0efe2408..ddb8f8e04eb5 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
@@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) | |||
782 | 782 | ||
783 | } else { | 783 | } else { |
784 | /* put fence directly behind firmware */ | 784 | /* put fence directly behind firmware */ |
785 | index = ALIGN(rdev->uvd.fw_size, 8); | 785 | index = ALIGN(rdev->uvd_fw->size, 8); |
786 | rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; | 786 | rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; |
787 | rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; | 787 | rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; |
788 | } | 788 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 6a51d943ccf4..b990b1a2bd50 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c | |||
@@ -207,7 +207,6 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev) | |||
207 | if (rdev->gart.robj == NULL) { | 207 | if (rdev->gart.robj == NULL) { |
208 | return; | 208 | return; |
209 | } | 209 | } |
210 | radeon_gart_table_vram_unpin(rdev); | ||
211 | radeon_bo_unref(&rdev->gart.robj); | 210 | radeon_bo_unref(&rdev->gart.robj); |
212 | } | 211 | } |
213 | 212 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f374c467aaca..c557850cd345 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c | |||
@@ -1176,7 +1176,14 @@ int radeon_pm_init(struct radeon_device *rdev) | |||
1176 | case CHIP_VERDE: | 1176 | case CHIP_VERDE: |
1177 | case CHIP_OLAND: | 1177 | case CHIP_OLAND: |
1178 | case CHIP_HAINAN: | 1178 | case CHIP_HAINAN: |
1179 | if (radeon_dpm == 1) | 1179 | /* DPM requires the RLC, RV770+ dGPU requires SMC */ |
1180 | if (!rdev->rlc_fw) | ||
1181 | rdev->pm.pm_method = PM_METHOD_PROFILE; | ||
1182 | else if ((rdev->family >= CHIP_RV770) && | ||
1183 | (!(rdev->flags & RADEON_IS_IGP)) && | ||
1184 | (!rdev->smc_fw)) | ||
1185 | rdev->pm.pm_method = PM_METHOD_PROFILE; | ||
1186 | else if (radeon_dpm == 1) | ||
1180 | rdev->pm.pm_method = PM_METHOD_DPM; | 1187 | rdev->pm.pm_method = PM_METHOD_DPM; |
1181 | else | 1188 | else |
1182 | rdev->pm.pm_method = PM_METHOD_PROFILE; | 1189 | rdev->pm.pm_method = PM_METHOD_PROFILE; |
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 414fd145d20e..f1c15754e73c 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c | |||
@@ -56,7 +56,6 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work); | |||
56 | 56 | ||
57 | int radeon_uvd_init(struct radeon_device *rdev) | 57 | int radeon_uvd_init(struct radeon_device *rdev) |
58 | { | 58 | { |
59 | const struct firmware *fw; | ||
60 | unsigned long bo_size; | 59 | unsigned long bo_size; |
61 | const char *fw_name; | 60 | const char *fw_name; |
62 | int i, r; | 61 | int i, r; |
@@ -105,14 +104,14 @@ int radeon_uvd_init(struct radeon_device *rdev) | |||
105 | return -EINVAL; | 104 | return -EINVAL; |
106 | } | 105 | } |
107 | 106 | ||
108 | r = request_firmware(&fw, fw_name, rdev->dev); | 107 | r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); |
109 | if (r) { | 108 | if (r) { |
110 | dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", | 109 | dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", |
111 | fw_name); | 110 | fw_name); |
112 | return r; | 111 | return r; |
113 | } | 112 | } |
114 | 113 | ||
115 | bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) + | 114 | bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + |
116 | RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; | 115 | RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; |
117 | r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, | 116 | r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, |
118 | RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); | 117 | RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); |
@@ -145,12 +144,6 @@ int radeon_uvd_init(struct radeon_device *rdev) | |||
145 | 144 | ||
146 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); | 145 | radeon_bo_unreserve(rdev->uvd.vcpu_bo); |
147 | 146 | ||
148 | rdev->uvd.fw_size = fw->size; | ||
149 | memset(rdev->uvd.cpu_addr, 0, bo_size); | ||
150 | memcpy(rdev->uvd.cpu_addr, fw->data, fw->size); | ||
151 | |||
152 | release_firmware(fw); | ||
153 | |||
154 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | 147 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
155 | atomic_set(&rdev->uvd.handles[i], 0); | 148 | atomic_set(&rdev->uvd.handles[i], 0); |
156 | rdev->uvd.filp[i] = NULL; | 149 | rdev->uvd.filp[i] = NULL; |
@@ -174,33 +167,60 @@ void radeon_uvd_fini(struct radeon_device *rdev) | |||
174 | } | 167 | } |
175 | 168 | ||
176 | radeon_bo_unref(&rdev->uvd.vcpu_bo); | 169 | radeon_bo_unref(&rdev->uvd.vcpu_bo); |
170 | |||
171 | release_firmware(rdev->uvd_fw); | ||
177 | } | 172 | } |
178 | 173 | ||
179 | int radeon_uvd_suspend(struct radeon_device *rdev) | 174 | int radeon_uvd_suspend(struct radeon_device *rdev) |
180 | { | 175 | { |
181 | unsigned size; | 176 | unsigned size; |
177 | void *ptr; | ||
178 | int i; | ||
182 | 179 | ||
183 | if (rdev->uvd.vcpu_bo == NULL) | 180 | if (rdev->uvd.vcpu_bo == NULL) |
184 | return 0; | 181 | return 0; |
185 | 182 | ||
183 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) | ||
184 | if (atomic_read(&rdev->uvd.handles[i])) | ||
185 | break; | ||
186 | |||
187 | if (i == RADEON_MAX_UVD_HANDLES) | ||
188 | return 0; | ||
189 | |||
186 | size = radeon_bo_size(rdev->uvd.vcpu_bo); | 190 | size = radeon_bo_size(rdev->uvd.vcpu_bo); |
191 | size -= rdev->uvd_fw->size; | ||
192 | |||
193 | ptr = rdev->uvd.cpu_addr; | ||
194 | ptr += rdev->uvd_fw->size; | ||
195 | |||
187 | rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); | 196 | rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); |
188 | memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size); | 197 | memcpy(rdev->uvd.saved_bo, ptr, size); |
189 | 198 | ||
190 | return 0; | 199 | return 0; |
191 | } | 200 | } |
192 | 201 | ||
193 | int radeon_uvd_resume(struct radeon_device *rdev) | 202 | int radeon_uvd_resume(struct radeon_device *rdev) |
194 | { | 203 | { |
204 | unsigned size; | ||
205 | void *ptr; | ||
206 | |||
195 | if (rdev->uvd.vcpu_bo == NULL) | 207 | if (rdev->uvd.vcpu_bo == NULL) |
196 | return -EINVAL; | 208 | return -EINVAL; |
197 | 209 | ||
210 | memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); | ||
211 | |||
212 | size = radeon_bo_size(rdev->uvd.vcpu_bo); | ||
213 | size -= rdev->uvd_fw->size; | ||
214 | |||
215 | ptr = rdev->uvd.cpu_addr; | ||
216 | ptr += rdev->uvd_fw->size; | ||
217 | |||
198 | if (rdev->uvd.saved_bo != NULL) { | 218 | if (rdev->uvd.saved_bo != NULL) { |
199 | unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo); | 219 | memcpy(ptr, rdev->uvd.saved_bo, size); |
200 | memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size); | ||
201 | kfree(rdev->uvd.saved_bo); | 220 | kfree(rdev->uvd.saved_bo); |
202 | rdev->uvd.saved_bo = NULL; | 221 | rdev->uvd.saved_bo = NULL; |
203 | } | 222 | } else |
223 | memset(ptr, 0, size); | ||
204 | 224 | ||
205 | return 0; | 225 | return 0; |
206 | } | 226 | } |
@@ -215,8 +235,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) | |||
215 | { | 235 | { |
216 | int i, r; | 236 | int i, r; |
217 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { | 237 | for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { |
218 | if (rdev->uvd.filp[i] == filp) { | 238 | uint32_t handle = atomic_read(&rdev->uvd.handles[i]); |
219 | uint32_t handle = atomic_read(&rdev->uvd.handles[i]); | 239 | if (handle != 0 && rdev->uvd.filp[i] == filp) { |
220 | struct radeon_fence *fence; | 240 | struct radeon_fence *fence; |
221 | 241 | ||
222 | r = radeon_uvd_get_destroy_msg(rdev, | 242 | r = radeon_uvd_get_destroy_msg(rdev, |
@@ -337,8 +357,10 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
337 | } | 357 | } |
338 | 358 | ||
339 | r = radeon_bo_kmap(bo, &ptr); | 359 | r = radeon_bo_kmap(bo, &ptr); |
340 | if (r) | 360 | if (r) { |
361 | DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); | ||
341 | return r; | 362 | return r; |
363 | } | ||
342 | 364 | ||
343 | msg = ptr + offset; | 365 | msg = ptr + offset; |
344 | 366 | ||
@@ -364,8 +386,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
364 | radeon_bo_kunmap(bo); | 386 | radeon_bo_kunmap(bo); |
365 | return 0; | 387 | return 0; |
366 | } else { | 388 | } else { |
367 | /* it's a create msg, no special handling needed */ | ||
368 | radeon_bo_kunmap(bo); | 389 | radeon_bo_kunmap(bo); |
390 | |||
391 | if (msg_type != 0) { | ||
392 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); | ||
393 | return -EINVAL; | ||
394 | } | ||
395 | |||
396 | /* it's a create msg, no special handling needed */ | ||
369 | } | 397 | } |
370 | 398 | ||
371 | /* create or decode, validate the handle */ | 399 | /* create or decode, validate the handle */ |
@@ -388,7 +416,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, | |||
388 | 416 | ||
389 | static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | 417 | static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, |
390 | int data0, int data1, | 418 | int data0, int data1, |
391 | unsigned buf_sizes[]) | 419 | unsigned buf_sizes[], bool *has_msg_cmd) |
392 | { | 420 | { |
393 | struct radeon_cs_chunk *relocs_chunk; | 421 | struct radeon_cs_chunk *relocs_chunk; |
394 | struct radeon_cs_reloc *reloc; | 422 | struct radeon_cs_reloc *reloc; |
@@ -417,7 +445,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
417 | 445 | ||
418 | if (cmd < 0x4) { | 446 | if (cmd < 0x4) { |
419 | if ((end - start) < buf_sizes[cmd]) { | 447 | if ((end - start) < buf_sizes[cmd]) { |
420 | DRM_ERROR("buffer to small (%d / %d)!\n", | 448 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, |
421 | (unsigned)(end - start), buf_sizes[cmd]); | 449 | (unsigned)(end - start), buf_sizes[cmd]); |
422 | return -EINVAL; | 450 | return -EINVAL; |
423 | } | 451 | } |
@@ -442,9 +470,17 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
442 | } | 470 | } |
443 | 471 | ||
444 | if (cmd == 0) { | 472 | if (cmd == 0) { |
473 | if (*has_msg_cmd) { | ||
474 | DRM_ERROR("More than one message in a UVD-IB!\n"); | ||
475 | return -EINVAL; | ||
476 | } | ||
477 | *has_msg_cmd = true; | ||
445 | r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); | 478 | r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); |
446 | if (r) | 479 | if (r) |
447 | return r; | 480 | return r; |
481 | } else if (!*has_msg_cmd) { | ||
482 | DRM_ERROR("Message needed before other commands are send!\n"); | ||
483 | return -EINVAL; | ||
448 | } | 484 | } |
449 | 485 | ||
450 | return 0; | 486 | return 0; |
@@ -453,7 +489,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, | |||
453 | static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, | 489 | static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, |
454 | struct radeon_cs_packet *pkt, | 490 | struct radeon_cs_packet *pkt, |
455 | int *data0, int *data1, | 491 | int *data0, int *data1, |
456 | unsigned buf_sizes[]) | 492 | unsigned buf_sizes[], |
493 | bool *has_msg_cmd) | ||
457 | { | 494 | { |
458 | int i, r; | 495 | int i, r; |
459 | 496 | ||
@@ -467,7 +504,8 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, | |||
467 | *data1 = p->idx; | 504 | *data1 = p->idx; |
468 | break; | 505 | break; |
469 | case UVD_GPCOM_VCPU_CMD: | 506 | case UVD_GPCOM_VCPU_CMD: |
470 | r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes); | 507 | r = radeon_uvd_cs_reloc(p, *data0, *data1, |
508 | buf_sizes, has_msg_cmd); | ||
471 | if (r) | 509 | if (r) |
472 | return r; | 510 | return r; |
473 | break; | 511 | break; |
@@ -488,6 +526,9 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |||
488 | struct radeon_cs_packet pkt; | 526 | struct radeon_cs_packet pkt; |
489 | int r, data0 = 0, data1 = 0; | 527 | int r, data0 = 0, data1 = 0; |
490 | 528 | ||
529 | /* does the IB has a msg command */ | ||
530 | bool has_msg_cmd = false; | ||
531 | |||
491 | /* minimum buffer sizes */ | 532 | /* minimum buffer sizes */ |
492 | unsigned buf_sizes[] = { | 533 | unsigned buf_sizes[] = { |
493 | [0x00000000] = 2048, | 534 | [0x00000000] = 2048, |
@@ -514,8 +555,8 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |||
514 | return r; | 555 | return r; |
515 | switch (pkt.type) { | 556 | switch (pkt.type) { |
516 | case RADEON_PACKET_TYPE0: | 557 | case RADEON_PACKET_TYPE0: |
517 | r = radeon_uvd_cs_reg(p, &pkt, &data0, | 558 | r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1, |
518 | &data1, buf_sizes); | 559 | buf_sizes, &has_msg_cmd); |
519 | if (r) | 560 | if (r) |
520 | return r; | 561 | return r; |
521 | break; | 562 | break; |
@@ -527,6 +568,12 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) | |||
527 | return -EINVAL; | 568 | return -EINVAL; |
528 | } | 569 | } |
529 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); | 570 | } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); |
571 | |||
572 | if (!has_msg_cmd) { | ||
573 | DRM_ERROR("UVD-IBs need a msg command!\n"); | ||
574 | return -EINVAL; | ||
575 | } | ||
576 | |||
530 | return 0; | 577 | return 0; |
531 | } | 578 | } |
532 | 579 | ||
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index 363018c60412..bdd888b4db2b 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c | |||
@@ -1944,9 +1944,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev) | |||
1944 | 1944 | ||
1945 | int rv6xx_dpm_init(struct radeon_device *rdev) | 1945 | int rv6xx_dpm_init(struct radeon_device *rdev) |
1946 | { | 1946 | { |
1947 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | 1947 | struct radeon_atom_ss ss; |
1948 | uint16_t data_offset, size; | ||
1949 | uint8_t frev, crev; | ||
1950 | struct atom_clock_dividers dividers; | 1948 | struct atom_clock_dividers dividers; |
1951 | struct rv6xx_power_info *pi; | 1949 | struct rv6xx_power_info *pi; |
1952 | int ret; | 1950 | int ret; |
@@ -1989,16 +1987,18 @@ int rv6xx_dpm_init(struct radeon_device *rdev) | |||
1989 | 1987 | ||
1990 | pi->gfx_clock_gating = true; | 1988 | pi->gfx_clock_gating = true; |
1991 | 1989 | ||
1992 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 1990 | pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
1993 | &frev, &crev, &data_offset)) { | 1991 | ASIC_INTERNAL_ENGINE_SS, 0); |
1994 | pi->sclk_ss = true; | 1992 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, |
1995 | pi->mclk_ss = true; | 1993 | ASIC_INTERNAL_MEMORY_SS, 0); |
1994 | |||
1995 | /* Disable sclk ss, causes hangs on a lot of systems */ | ||
1996 | pi->sclk_ss = false; | ||
1997 | |||
1998 | if (pi->sclk_ss || pi->mclk_ss) | ||
1996 | pi->dynamic_ss = true; | 1999 | pi->dynamic_ss = true; |
1997 | } else { | 2000 | else |
1998 | pi->sclk_ss = false; | ||
1999 | pi->mclk_ss = false; | ||
2000 | pi->dynamic_ss = false; | 2001 | pi->dynamic_ss = false; |
2001 | } | ||
2002 | 2002 | ||
2003 | pi->dynamic_pcie_gen2 = true; | 2003 | pi->dynamic_pcie_gen2 = true; |
2004 | 2004 | ||
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 30ea14e8854c..bcc68ec204ad 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -813,7 +813,7 @@ int rv770_uvd_resume(struct radeon_device *rdev) | |||
813 | 813 | ||
814 | /* programm the VCPU memory controller bits 0-27 */ | 814 | /* programm the VCPU memory controller bits 0-27 */ |
815 | addr = rdev->uvd.gpu_addr >> 3; | 815 | addr = rdev->uvd.gpu_addr >> 3; |
816 | size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; | 816 | size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; |
817 | WREG32(UVD_VCPU_CACHE_OFFSET0, addr); | 817 | WREG32(UVD_VCPU_CACHE_OFFSET0, addr); |
818 | WREG32(UVD_VCPU_CACHE_SIZE0, size); | 818 | WREG32(UVD_VCPU_CACHE_SIZE0, size); |
819 | 819 | ||
@@ -1829,6 +1829,8 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1829 | /* enable pcie gen2 link */ | 1829 | /* enable pcie gen2 link */ |
1830 | rv770_pcie_gen2_enable(rdev); | 1830 | rv770_pcie_gen2_enable(rdev); |
1831 | 1831 | ||
1832 | rv770_mc_program(rdev); | ||
1833 | |||
1832 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { | 1834 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { |
1833 | r = r600_init_microcode(rdev); | 1835 | r = r600_init_microcode(rdev); |
1834 | if (r) { | 1836 | if (r) { |
@@ -1841,7 +1843,6 @@ static int rv770_startup(struct radeon_device *rdev) | |||
1841 | if (r) | 1843 | if (r) |
1842 | return r; | 1844 | return r; |
1843 | 1845 | ||
1844 | rv770_mc_program(rdev); | ||
1845 | if (rdev->flags & RADEON_IS_AGP) { | 1846 | if (rdev->flags & RADEON_IS_AGP) { |
1846 | rv770_agp_enable(rdev); | 1847 | rv770_agp_enable(rdev); |
1847 | } else { | 1848 | } else { |
@@ -1983,6 +1984,7 @@ int rv770_resume(struct radeon_device *rdev) | |||
1983 | int rv770_suspend(struct radeon_device *rdev) | 1984 | int rv770_suspend(struct radeon_device *rdev) |
1984 | { | 1985 | { |
1985 | r600_audio_fini(rdev); | 1986 | r600_audio_fini(rdev); |
1987 | r600_uvd_stop(rdev); | ||
1986 | radeon_uvd_suspend(rdev); | 1988 | radeon_uvd_suspend(rdev); |
1987 | r700_cp_stop(rdev); | 1989 | r700_cp_stop(rdev); |
1988 | r600_dma_stop(rdev); | 1990 | r600_dma_stop(rdev); |
@@ -2098,6 +2100,7 @@ void rv770_fini(struct radeon_device *rdev) | |||
2098 | radeon_ib_pool_fini(rdev); | 2100 | radeon_ib_pool_fini(rdev); |
2099 | radeon_irq_kms_fini(rdev); | 2101 | radeon_irq_kms_fini(rdev); |
2100 | rv770_pcie_gart_fini(rdev); | 2102 | rv770_pcie_gart_fini(rdev); |
2103 | r600_uvd_stop(rdev); | ||
2101 | radeon_uvd_fini(rdev); | 2104 | radeon_uvd_fini(rdev); |
2102 | r600_vram_scratch_fini(rdev); | 2105 | r600_vram_scratch_fini(rdev); |
2103 | radeon_gem_fini(rdev); | 2106 | radeon_gem_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 2d347925f77d..094c67a29d0d 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c | |||
@@ -2319,12 +2319,25 @@ int rv7xx_parse_power_table(struct radeon_device *rdev) | |||
2319 | return 0; | 2319 | return 0; |
2320 | } | 2320 | } |
2321 | 2321 | ||
2322 | void rv770_get_engine_memory_ss(struct radeon_device *rdev) | ||
2323 | { | ||
2324 | struct rv7xx_power_info *pi = rv770_get_pi(rdev); | ||
2325 | struct radeon_atom_ss ss; | ||
2326 | |||
2327 | pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
2328 | ASIC_INTERNAL_ENGINE_SS, 0); | ||
2329 | pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, | ||
2330 | ASIC_INTERNAL_MEMORY_SS, 0); | ||
2331 | |||
2332 | if (pi->sclk_ss || pi->mclk_ss) | ||
2333 | pi->dynamic_ss = true; | ||
2334 | else | ||
2335 | pi->dynamic_ss = false; | ||
2336 | } | ||
2337 | |||
2322 | int rv770_dpm_init(struct radeon_device *rdev) | 2338 | int rv770_dpm_init(struct radeon_device *rdev) |
2323 | { | 2339 | { |
2324 | struct rv7xx_power_info *pi; | 2340 | struct rv7xx_power_info *pi; |
2325 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
2326 | uint16_t data_offset, size; | ||
2327 | uint8_t frev, crev; | ||
2328 | struct atom_clock_dividers dividers; | 2341 | struct atom_clock_dividers dividers; |
2329 | int ret; | 2342 | int ret; |
2330 | 2343 | ||
@@ -2369,16 +2382,7 @@ int rv770_dpm_init(struct radeon_device *rdev) | |||
2369 | pi->mvdd_control = | 2382 | pi->mvdd_control = |
2370 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); | 2383 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); |
2371 | 2384 | ||
2372 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 2385 | rv770_get_engine_memory_ss(rdev); |
2373 | &frev, &crev, &data_offset)) { | ||
2374 | pi->sclk_ss = true; | ||
2375 | pi->mclk_ss = true; | ||
2376 | pi->dynamic_ss = true; | ||
2377 | } else { | ||
2378 | pi->sclk_ss = false; | ||
2379 | pi->mclk_ss = false; | ||
2380 | pi->dynamic_ss = false; | ||
2381 | } | ||
2382 | 2386 | ||
2383 | pi->asi = RV770_ASI_DFLT; | 2387 | pi->asi = RV770_ASI_DFLT; |
2384 | pi->pasi = RV770_HASI_DFLT; | 2388 | pi->pasi = RV770_HASI_DFLT; |
@@ -2393,8 +2397,7 @@ int rv770_dpm_init(struct radeon_device *rdev) | |||
2393 | 2397 | ||
2394 | pi->dynamic_pcie_gen2 = true; | 2398 | pi->dynamic_pcie_gen2 = true; |
2395 | 2399 | ||
2396 | if (pi->gfx_clock_gating && | 2400 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
2397 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
2398 | pi->thermal_protection = true; | 2401 | pi->thermal_protection = true; |
2399 | else | 2402 | else |
2400 | pi->thermal_protection = false; | 2403 | pi->thermal_protection = false; |
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h index 96b1b2a62a8a..9244effc6b59 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.h +++ b/drivers/gpu/drm/radeon/rv770_dpm.h | |||
@@ -275,6 +275,7 @@ void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev, | |||
275 | void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, | 275 | void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, |
276 | struct radeon_ps *new_ps, | 276 | struct radeon_ps *new_ps, |
277 | struct radeon_ps *old_ps); | 277 | struct radeon_ps *old_ps); |
278 | void rv770_get_engine_memory_ss(struct radeon_device *rdev); | ||
278 | 279 | ||
279 | /* smc */ | 280 | /* smc */ |
280 | int rv770_read_smc_soft_register(struct radeon_device *rdev, | 281 | int rv770_read_smc_soft_register(struct radeon_device *rdev, |
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 6ca904673a4f..daa8d2df8ec5 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c | |||
@@ -1663,9 +1663,13 @@ static int si_init_microcode(struct radeon_device *rdev) | |||
1663 | 1663 | ||
1664 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); | 1664 | snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); |
1665 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); | 1665 | err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); |
1666 | if (err) | 1666 | if (err) { |
1667 | goto out; | 1667 | printk(KERN_ERR |
1668 | if (rdev->smc_fw->size != smc_req_size) { | 1668 | "smc: error loading firmware \"%s\"\n", |
1669 | fw_name); | ||
1670 | release_firmware(rdev->smc_fw); | ||
1671 | rdev->smc_fw = NULL; | ||
1672 | } else if (rdev->smc_fw->size != smc_req_size) { | ||
1669 | printk(KERN_ERR | 1673 | printk(KERN_ERR |
1670 | "si_smc: Bogus length %zu in firmware \"%s\"\n", | 1674 | "si_smc: Bogus length %zu in firmware \"%s\"\n", |
1671 | rdev->smc_fw->size, fw_name); | 1675 | rdev->smc_fw->size, fw_name); |
@@ -6418,6 +6422,8 @@ static int si_startup(struct radeon_device *rdev) | |||
6418 | /* enable aspm */ | 6422 | /* enable aspm */ |
6419 | si_program_aspm(rdev); | 6423 | si_program_aspm(rdev); |
6420 | 6424 | ||
6425 | si_mc_program(rdev); | ||
6426 | |||
6421 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || | 6427 | if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || |
6422 | !rdev->rlc_fw || !rdev->mc_fw) { | 6428 | !rdev->rlc_fw || !rdev->mc_fw) { |
6423 | r = si_init_microcode(rdev); | 6429 | r = si_init_microcode(rdev); |
@@ -6437,7 +6443,6 @@ static int si_startup(struct radeon_device *rdev) | |||
6437 | if (r) | 6443 | if (r) |
6438 | return r; | 6444 | return r; |
6439 | 6445 | ||
6440 | si_mc_program(rdev); | ||
6441 | r = si_pcie_gart_enable(rdev); | 6446 | r = si_pcie_gart_enable(rdev); |
6442 | if (r) | 6447 | if (r) |
6443 | return r; | 6448 | return r; |
@@ -6621,7 +6626,7 @@ int si_suspend(struct radeon_device *rdev) | |||
6621 | si_cp_enable(rdev, false); | 6626 | si_cp_enable(rdev, false); |
6622 | cayman_dma_stop(rdev); | 6627 | cayman_dma_stop(rdev); |
6623 | if (rdev->has_uvd) { | 6628 | if (rdev->has_uvd) { |
6624 | r600_uvd_rbc_stop(rdev); | 6629 | r600_uvd_stop(rdev); |
6625 | radeon_uvd_suspend(rdev); | 6630 | radeon_uvd_suspend(rdev); |
6626 | } | 6631 | } |
6627 | si_irq_suspend(rdev); | 6632 | si_irq_suspend(rdev); |
@@ -6763,8 +6768,10 @@ void si_fini(struct radeon_device *rdev) | |||
6763 | radeon_vm_manager_fini(rdev); | 6768 | radeon_vm_manager_fini(rdev); |
6764 | radeon_ib_pool_fini(rdev); | 6769 | radeon_ib_pool_fini(rdev); |
6765 | radeon_irq_kms_fini(rdev); | 6770 | radeon_irq_kms_fini(rdev); |
6766 | if (rdev->has_uvd) | 6771 | if (rdev->has_uvd) { |
6772 | r600_uvd_stop(rdev); | ||
6767 | radeon_uvd_fini(rdev); | 6773 | radeon_uvd_fini(rdev); |
6774 | } | ||
6768 | si_pcie_gart_fini(rdev); | 6775 | si_pcie_gart_fini(rdev); |
6769 | r600_vram_scratch_fini(rdev); | 6776 | r600_vram_scratch_fini(rdev); |
6770 | radeon_gem_fini(rdev); | 6777 | radeon_gem_fini(rdev); |
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 41825575b403..88699e3cd868 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c | |||
@@ -2903,7 +2903,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2903 | { | 2903 | { |
2904 | struct ni_ps *ps = ni_get_ps(rps); | 2904 | struct ni_ps *ps = ni_get_ps(rps); |
2905 | struct radeon_clock_and_voltage_limits *max_limits; | 2905 | struct radeon_clock_and_voltage_limits *max_limits; |
2906 | bool disable_mclk_switching; | 2906 | bool disable_mclk_switching = false; |
2907 | bool disable_sclk_switching = false; | ||
2907 | u32 mclk, sclk; | 2908 | u32 mclk, sclk; |
2908 | u16 vddc, vddci; | 2909 | u16 vddc, vddci; |
2909 | int i; | 2910 | int i; |
@@ -2911,8 +2912,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2911 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || | 2912 | if ((rdev->pm.dpm.new_active_crtc_count > 1) || |
2912 | ni_dpm_vblank_too_short(rdev)) | 2913 | ni_dpm_vblank_too_short(rdev)) |
2913 | disable_mclk_switching = true; | 2914 | disable_mclk_switching = true; |
2914 | else | 2915 | |
2915 | disable_mclk_switching = false; | 2916 | if (rps->vclk || rps->dclk) { |
2917 | disable_mclk_switching = true; | ||
2918 | disable_sclk_switching = true; | ||
2919 | } | ||
2916 | 2920 | ||
2917 | if (rdev->pm.dpm.ac_power) | 2921 | if (rdev->pm.dpm.ac_power) |
2918 | max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; | 2922 | max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; |
@@ -2940,27 +2944,43 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, | |||
2940 | 2944 | ||
2941 | if (disable_mclk_switching) { | 2945 | if (disable_mclk_switching) { |
2942 | mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; | 2946 | mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; |
2943 | sclk = ps->performance_levels[0].sclk; | ||
2944 | vddc = ps->performance_levels[0].vddc; | ||
2945 | vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; | 2947 | vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; |
2946 | } else { | 2948 | } else { |
2947 | sclk = ps->performance_levels[0].sclk; | ||
2948 | mclk = ps->performance_levels[0].mclk; | 2949 | mclk = ps->performance_levels[0].mclk; |
2949 | vddc = ps->performance_levels[0].vddc; | ||
2950 | vddci = ps->performance_levels[0].vddci; | 2950 | vddci = ps->performance_levels[0].vddci; |
2951 | } | 2951 | } |
2952 | 2952 | ||
2953 | if (disable_sclk_switching) { | ||
2954 | sclk = ps->performance_levels[ps->performance_level_count - 1].sclk; | ||
2955 | vddc = ps->performance_levels[ps->performance_level_count - 1].vddc; | ||
2956 | } else { | ||
2957 | sclk = ps->performance_levels[0].sclk; | ||
2958 | vddc = ps->performance_levels[0].vddc; | ||
2959 | } | ||
2960 | |||
2953 | /* adjusted low state */ | 2961 | /* adjusted low state */ |
2954 | ps->performance_levels[0].sclk = sclk; | 2962 | ps->performance_levels[0].sclk = sclk; |
2955 | ps->performance_levels[0].mclk = mclk; | 2963 | ps->performance_levels[0].mclk = mclk; |
2956 | ps->performance_levels[0].vddc = vddc; | 2964 | ps->performance_levels[0].vddc = vddc; |
2957 | ps->performance_levels[0].vddci = vddci; | 2965 | ps->performance_levels[0].vddci = vddci; |
2958 | 2966 | ||
2959 | for (i = 1; i < ps->performance_level_count; i++) { | 2967 | if (disable_sclk_switching) { |
2960 | if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) | 2968 | sclk = ps->performance_levels[0].sclk; |
2961 | ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; | 2969 | for (i = 1; i < ps->performance_level_count; i++) { |
2962 | if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) | 2970 | if (sclk < ps->performance_levels[i].sclk) |
2963 | ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; | 2971 | sclk = ps->performance_levels[i].sclk; |
2972 | } | ||
2973 | for (i = 0; i < ps->performance_level_count; i++) { | ||
2974 | ps->performance_levels[i].sclk = sclk; | ||
2975 | ps->performance_levels[i].vddc = vddc; | ||
2976 | } | ||
2977 | } else { | ||
2978 | for (i = 1; i < ps->performance_level_count; i++) { | ||
2979 | if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) | ||
2980 | ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; | ||
2981 | if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) | ||
2982 | ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; | ||
2983 | } | ||
2964 | } | 2984 | } |
2965 | 2985 | ||
2966 | if (disable_mclk_switching) { | 2986 | if (disable_mclk_switching) { |
@@ -6253,9 +6273,6 @@ int si_dpm_init(struct radeon_device *rdev) | |||
6253 | struct evergreen_power_info *eg_pi; | 6273 | struct evergreen_power_info *eg_pi; |
6254 | struct ni_power_info *ni_pi; | 6274 | struct ni_power_info *ni_pi; |
6255 | struct si_power_info *si_pi; | 6275 | struct si_power_info *si_pi; |
6256 | int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); | ||
6257 | u16 data_offset, size; | ||
6258 | u8 frev, crev; | ||
6259 | struct atom_clock_dividers dividers; | 6276 | struct atom_clock_dividers dividers; |
6260 | int ret; | 6277 | int ret; |
6261 | u32 mask; | 6278 | u32 mask; |
@@ -6346,16 +6363,7 @@ int si_dpm_init(struct radeon_device *rdev) | |||
6346 | si_pi->vddc_phase_shed_control = | 6363 | si_pi->vddc_phase_shed_control = |
6347 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); | 6364 | radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); |
6348 | 6365 | ||
6349 | if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, | 6366 | rv770_get_engine_memory_ss(rdev); |
6350 | &frev, &crev, &data_offset)) { | ||
6351 | pi->sclk_ss = true; | ||
6352 | pi->mclk_ss = true; | ||
6353 | pi->dynamic_ss = true; | ||
6354 | } else { | ||
6355 | pi->sclk_ss = false; | ||
6356 | pi->mclk_ss = false; | ||
6357 | pi->dynamic_ss = true; | ||
6358 | } | ||
6359 | 6367 | ||
6360 | pi->asi = RV770_ASI_DFLT; | 6368 | pi->asi = RV770_ASI_DFLT; |
6361 | pi->pasi = CYPRESS_HASI_DFLT; | 6369 | pi->pasi = CYPRESS_HASI_DFLT; |
@@ -6366,8 +6374,7 @@ int si_dpm_init(struct radeon_device *rdev) | |||
6366 | eg_pi->sclk_deep_sleep = true; | 6374 | eg_pi->sclk_deep_sleep = true; |
6367 | si_pi->sclk_deep_sleep_above_low = false; | 6375 | si_pi->sclk_deep_sleep_above_low = false; |
6368 | 6376 | ||
6369 | if (pi->gfx_clock_gating && | 6377 | if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) |
6370 | (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) | ||
6371 | pi->thermal_protection = true; | 6378 | pi->thermal_protection = true; |
6372 | else | 6379 | else |
6373 | pi->thermal_protection = false; | 6380 | pi->thermal_protection = false; |
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index 7a5764843bfb..cd33084c7860 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c | |||
@@ -488,8 +488,6 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev) | |||
488 | if (djrcv_dev->querying_devices) | 488 | if (djrcv_dev->querying_devices) |
489 | return 0; | 489 | return 0; |
490 | 490 | ||
491 | djrcv_dev->querying_devices = true; | ||
492 | |||
493 | dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); | 491 | dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); |
494 | if (!dj_report) | 492 | if (!dj_report) |
495 | return -ENOMEM; | 493 | return -ENOMEM; |
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index 0f34bca9f5e5..6099f50b28aa 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c | |||
@@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg, | |||
215 | u16 value) | 215 | u16 value) |
216 | { | 216 | { |
217 | return i2c_smbus_write_byte_data(client, reg, value & 0xFF) | 217 | return i2c_smbus_write_byte_data(client, reg, value & 0xFF) |
218 | && i2c_smbus_write_byte_data(client, reg + 1, value >> 8); | 218 | || i2c_smbus_write_byte_data(client, reg + 1, value >> 8); |
219 | } | 219 | } |
220 | 220 | ||
221 | static void adt7470_init_client(struct i2c_client *client) | 221 | static void adt7470_init_client(struct i2c_client *client) |
diff --git a/drivers/i2c/busses/i2c-kempld.c b/drivers/i2c/busses/i2c-kempld.c index ccec916bc3eb..af8f65fb1c05 100644 --- a/drivers/i2c/busses/i2c-kempld.c +++ b/drivers/i2c/busses/i2c-kempld.c | |||
@@ -246,9 +246,9 @@ static void kempld_i2c_device_init(struct kempld_i2c_data *i2c) | |||
246 | bus_frequency = KEMPLD_I2C_FREQ_MAX; | 246 | bus_frequency = KEMPLD_I2C_FREQ_MAX; |
247 | 247 | ||
248 | if (pld->info.spec_major == 1) | 248 | if (pld->info.spec_major == 1) |
249 | prescale = pld->pld_clock / bus_frequency * 5 - 1000; | 249 | prescale = pld->pld_clock / (bus_frequency * 5) - 1000; |
250 | else | 250 | else |
251 | prescale = pld->pld_clock / bus_frequency * 4 - 3000; | 251 | prescale = pld->pld_clock / (bus_frequency * 4) - 3000; |
252 | 252 | ||
253 | if (prescale < 0) | 253 | if (prescale < 0) |
254 | prescale = 0; | 254 | prescale = 0; |
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index df8ff5aea5b5..e2e9a0dade96 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c | |||
@@ -493,7 +493,7 @@ static int mxs_i2c_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, | |||
493 | * based on this empirical measurement and a lot of previous frobbing. | 493 | * based on this empirical measurement and a lot of previous frobbing. |
494 | */ | 494 | */ |
495 | i2c->cmd_err = 0; | 495 | i2c->cmd_err = 0; |
496 | if (msg->len < 8) { | 496 | if (0) { /* disable PIO mode until a proper fix is made */ |
497 | ret = mxs_i2c_pio_setup_xfer(adap, msg, flags); | 497 | ret = mxs_i2c_pio_setup_xfer(adap, msg, flags); |
498 | if (ret) | 498 | if (ret) |
499 | mxs_i2c_reset(i2c); | 499 | mxs_i2c_reset(i2c); |
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index 0ad208a69c29..3ceac3e91dde 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c | |||
@@ -60,7 +60,6 @@ static void tiadc_step_config(struct tiadc_device *adc_dev) | |||
60 | { | 60 | { |
61 | unsigned int stepconfig; | 61 | unsigned int stepconfig; |
62 | int i, steps; | 62 | int i, steps; |
63 | u32 step_en; | ||
64 | 63 | ||
65 | /* | 64 | /* |
66 | * There are 16 configurable steps and 8 analog input | 65 | * There are 16 configurable steps and 8 analog input |
@@ -86,8 +85,7 @@ static void tiadc_step_config(struct tiadc_device *adc_dev) | |||
86 | adc_dev->channel_step[i] = steps; | 85 | adc_dev->channel_step[i] = steps; |
87 | steps++; | 86 | steps++; |
88 | } | 87 | } |
89 | step_en = get_adc_step_mask(adc_dev); | 88 | |
90 | am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en); | ||
91 | } | 89 | } |
92 | 90 | ||
93 | static const char * const chan_name_ain[] = { | 91 | static const char * const chan_name_ain[] = { |
@@ -142,10 +140,22 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, | |||
142 | int *val, int *val2, long mask) | 140 | int *val, int *val2, long mask) |
143 | { | 141 | { |
144 | struct tiadc_device *adc_dev = iio_priv(indio_dev); | 142 | struct tiadc_device *adc_dev = iio_priv(indio_dev); |
145 | int i; | 143 | int i, map_val; |
146 | unsigned int fifo1count, read; | 144 | unsigned int fifo1count, read, stepid; |
147 | u32 step = UINT_MAX; | 145 | u32 step = UINT_MAX; |
148 | bool found = false; | 146 | bool found = false; |
147 | u32 step_en; | ||
148 | unsigned long timeout = jiffies + usecs_to_jiffies | ||
149 | (IDLE_TIMEOUT * adc_dev->channels); | ||
150 | step_en = get_adc_step_mask(adc_dev); | ||
151 | am335x_tsc_se_set(adc_dev->mfd_tscadc, step_en); | ||
152 | |||
153 | /* Wait for ADC sequencer to complete sampling */ | ||
154 | while (tiadc_readl(adc_dev, REG_ADCFSM) & SEQ_STATUS) { | ||
155 | if (time_after(jiffies, timeout)) | ||
156 | return -EAGAIN; | ||
157 | } | ||
158 | map_val = chan->channel + TOTAL_CHANNELS; | ||
149 | 159 | ||
150 | /* | 160 | /* |
151 | * When the sub-system is first enabled, | 161 | * When the sub-system is first enabled, |
@@ -170,12 +180,16 @@ static int tiadc_read_raw(struct iio_dev *indio_dev, | |||
170 | fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); | 180 | fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); |
171 | for (i = 0; i < fifo1count; i++) { | 181 | for (i = 0; i < fifo1count; i++) { |
172 | read = tiadc_readl(adc_dev, REG_FIFO1); | 182 | read = tiadc_readl(adc_dev, REG_FIFO1); |
173 | if (read >> 16 == step) { | 183 | stepid = read & FIFOREAD_CHNLID_MASK; |
174 | *val = read & 0xfff; | 184 | stepid = stepid >> 0x10; |
185 | |||
186 | if (stepid == map_val) { | ||
187 | read = read & FIFOREAD_DATA_MASK; | ||
175 | found = true; | 188 | found = true; |
189 | *val = read; | ||
176 | } | 190 | } |
177 | } | 191 | } |
178 | am335x_tsc_se_update(adc_dev->mfd_tscadc); | 192 | |
179 | if (found == false) | 193 | if (found == false) |
180 | return -EBUSY; | 194 | return -EBUSY; |
181 | return IIO_VAL_INT; | 195 | return IIO_VAL_INT; |
diff --git a/drivers/iio/industrialio-trigger.c b/drivers/iio/industrialio-trigger.c index ea8a4146620d..0dd9bb873130 100644 --- a/drivers/iio/industrialio-trigger.c +++ b/drivers/iio/industrialio-trigger.c | |||
@@ -127,12 +127,17 @@ static struct iio_trigger *iio_trigger_find_by_name(const char *name, | |||
127 | void iio_trigger_poll(struct iio_trigger *trig, s64 time) | 127 | void iio_trigger_poll(struct iio_trigger *trig, s64 time) |
128 | { | 128 | { |
129 | int i; | 129 | int i; |
130 | if (!trig->use_count) | 130 | |
131 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) | 131 | if (!atomic_read(&trig->use_count)) { |
132 | if (trig->subirqs[i].enabled) { | 132 | atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
133 | trig->use_count++; | 133 | |
134 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { | ||
135 | if (trig->subirqs[i].enabled) | ||
134 | generic_handle_irq(trig->subirq_base + i); | 136 | generic_handle_irq(trig->subirq_base + i); |
135 | } | 137 | else |
138 | iio_trigger_notify_done(trig); | ||
139 | } | ||
140 | } | ||
136 | } | 141 | } |
137 | EXPORT_SYMBOL(iio_trigger_poll); | 142 | EXPORT_SYMBOL(iio_trigger_poll); |
138 | 143 | ||
@@ -146,19 +151,24 @@ EXPORT_SYMBOL(iio_trigger_generic_data_rdy_poll); | |||
146 | void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time) | 151 | void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time) |
147 | { | 152 | { |
148 | int i; | 153 | int i; |
149 | if (!trig->use_count) | 154 | |
150 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) | 155 | if (!atomic_read(&trig->use_count)) { |
151 | if (trig->subirqs[i].enabled) { | 156 | atomic_set(&trig->use_count, CONFIG_IIO_CONSUMERS_PER_TRIGGER); |
152 | trig->use_count++; | 157 | |
158 | for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) { | ||
159 | if (trig->subirqs[i].enabled) | ||
153 | handle_nested_irq(trig->subirq_base + i); | 160 | handle_nested_irq(trig->subirq_base + i); |
154 | } | 161 | else |
162 | iio_trigger_notify_done(trig); | ||
163 | } | ||
164 | } | ||
155 | } | 165 | } |
156 | EXPORT_SYMBOL(iio_trigger_poll_chained); | 166 | EXPORT_SYMBOL(iio_trigger_poll_chained); |
157 | 167 | ||
158 | void iio_trigger_notify_done(struct iio_trigger *trig) | 168 | void iio_trigger_notify_done(struct iio_trigger *trig) |
159 | { | 169 | { |
160 | trig->use_count--; | 170 | if (atomic_dec_and_test(&trig->use_count) && trig->ops && |
161 | if (trig->use_count == 0 && trig->ops && trig->ops->try_reenable) | 171 | trig->ops->try_reenable) |
162 | if (trig->ops->try_reenable(trig)) | 172 | if (trig->ops->try_reenable(trig)) |
163 | /* Missed an interrupt so launch new poll now */ | 173 | /* Missed an interrupt so launch new poll now */ |
164 | iio_trigger_poll(trig, 0); | 174 | iio_trigger_poll(trig, 0); |
diff --git a/drivers/media/i2c/ml86v7667.c b/drivers/media/i2c/ml86v7667.c index efdc873e58d1..a9857022f71d 100644 --- a/drivers/media/i2c/ml86v7667.c +++ b/drivers/media/i2c/ml86v7667.c | |||
@@ -117,7 +117,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl) | |||
117 | { | 117 | { |
118 | struct v4l2_subdev *sd = to_sd(ctrl); | 118 | struct v4l2_subdev *sd = to_sd(ctrl); |
119 | struct i2c_client *client = v4l2_get_subdevdata(sd); | 119 | struct i2c_client *client = v4l2_get_subdevdata(sd); |
120 | int ret; | 120 | int ret = -EINVAL; |
121 | 121 | ||
122 | switch (ctrl->id) { | 122 | switch (ctrl->id) { |
123 | case V4L2_CID_BRIGHTNESS: | 123 | case V4L2_CID_BRIGHTNESS: |
@@ -157,7 +157,7 @@ static int ml86v7667_s_ctrl(struct v4l2_ctrl *ctrl) | |||
157 | break; | 157 | break; |
158 | } | 158 | } |
159 | 159 | ||
160 | return 0; | 160 | return ret; |
161 | } | 161 | } |
162 | 162 | ||
163 | static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) | 163 | static int ml86v7667_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) |
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c index df4ada880e42..bd9405df1bd6 100644 --- a/drivers/media/platform/coda.c +++ b/drivers/media/platform/coda.c | |||
@@ -1987,7 +1987,7 @@ MODULE_DEVICE_TABLE(platform, coda_platform_ids); | |||
1987 | 1987 | ||
1988 | #ifdef CONFIG_OF | 1988 | #ifdef CONFIG_OF |
1989 | static const struct of_device_id coda_dt_ids[] = { | 1989 | static const struct of_device_id coda_dt_ids[] = { |
1990 | { .compatible = "fsl,imx27-vpu", .data = &coda_platform_ids[CODA_IMX27] }, | 1990 | { .compatible = "fsl,imx27-vpu", .data = &coda_devdata[CODA_IMX27] }, |
1991 | { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] }, | 1991 | { .compatible = "fsl,imx53-vpu", .data = &coda_devdata[CODA_IMX53] }, |
1992 | { /* sentinel */ } | 1992 | { /* sentinel */ } |
1993 | }; | 1993 | }; |
diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c index 553d87e5ceab..fd6289d60cde 100644 --- a/drivers/media/platform/s5p-g2d/g2d.c +++ b/drivers/media/platform/s5p-g2d/g2d.c | |||
@@ -784,6 +784,7 @@ static int g2d_probe(struct platform_device *pdev) | |||
784 | } | 784 | } |
785 | *vfd = g2d_videodev; | 785 | *vfd = g2d_videodev; |
786 | vfd->lock = &dev->mutex; | 786 | vfd->lock = &dev->mutex; |
787 | vfd->v4l2_dev = &dev->v4l2_dev; | ||
787 | ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); | 788 | ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0); |
788 | if (ret) { | 789 | if (ret) { |
789 | v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); | 790 | v4l2_err(&dev->v4l2_dev, "Failed to register video device\n"); |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c index 5296385153d5..4f6dd42c9adb 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c | |||
@@ -344,7 +344,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
344 | pix_mp->num_planes = 2; | 344 | pix_mp->num_planes = 2; |
345 | /* Set pixelformat to the format in which MFC | 345 | /* Set pixelformat to the format in which MFC |
346 | outputs the decoded frame */ | 346 | outputs the decoded frame */ |
347 | pix_mp->pixelformat = V4L2_PIX_FMT_NV12MT; | 347 | pix_mp->pixelformat = ctx->dst_fmt->fourcc; |
348 | pix_mp->plane_fmt[0].bytesperline = ctx->buf_width; | 348 | pix_mp->plane_fmt[0].bytesperline = ctx->buf_width; |
349 | pix_mp->plane_fmt[0].sizeimage = ctx->luma_size; | 349 | pix_mp->plane_fmt[0].sizeimage = ctx->luma_size; |
350 | pix_mp->plane_fmt[1].bytesperline = ctx->buf_width; | 350 | pix_mp->plane_fmt[1].bytesperline = ctx->buf_width; |
@@ -382,10 +382,16 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
382 | mfc_err("Unsupported format for source.\n"); | 382 | mfc_err("Unsupported format for source.\n"); |
383 | return -EINVAL; | 383 | return -EINVAL; |
384 | } | 384 | } |
385 | if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) { | 385 | if (fmt->codec_mode == S5P_FIMV_CODEC_NONE) { |
386 | mfc_err("Not supported format.\n"); | 386 | mfc_err("Unknown codec\n"); |
387 | return -EINVAL; | 387 | return -EINVAL; |
388 | } | 388 | } |
389 | if (!IS_MFCV6(dev)) { | ||
390 | if (fmt->fourcc == V4L2_PIX_FMT_VP8) { | ||
391 | mfc_err("Not supported format.\n"); | ||
392 | return -EINVAL; | ||
393 | } | ||
394 | } | ||
389 | } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { | 395 | } else if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { |
390 | fmt = find_format(f, MFC_FMT_RAW); | 396 | fmt = find_format(f, MFC_FMT_RAW); |
391 | if (!fmt) { | 397 | if (!fmt) { |
@@ -411,7 +417,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
411 | struct s5p_mfc_dev *dev = video_drvdata(file); | 417 | struct s5p_mfc_dev *dev = video_drvdata(file); |
412 | struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); | 418 | struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); |
413 | int ret = 0; | 419 | int ret = 0; |
414 | struct s5p_mfc_fmt *fmt; | ||
415 | struct v4l2_pix_format_mplane *pix_mp; | 420 | struct v4l2_pix_format_mplane *pix_mp; |
416 | 421 | ||
417 | mfc_debug_enter(); | 422 | mfc_debug_enter(); |
@@ -425,54 +430,32 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
425 | goto out; | 430 | goto out; |
426 | } | 431 | } |
427 | if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { | 432 | if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { |
428 | fmt = find_format(f, MFC_FMT_RAW); | 433 | /* dst_fmt is validated by call to vidioc_try_fmt */ |
429 | if (!fmt) { | 434 | ctx->dst_fmt = find_format(f, MFC_FMT_RAW); |
430 | mfc_err("Unsupported format for source.\n"); | 435 | ret = 0; |
431 | return -EINVAL; | ||
432 | } | ||
433 | if (!IS_MFCV6(dev) && (fmt->fourcc != V4L2_PIX_FMT_NV12MT)) { | ||
434 | mfc_err("Not supported format.\n"); | ||
435 | return -EINVAL; | ||
436 | } else if (IS_MFCV6(dev) && | ||
437 | (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) { | ||
438 | mfc_err("Not supported format.\n"); | ||
439 | return -EINVAL; | ||
440 | } | ||
441 | ctx->dst_fmt = fmt; | ||
442 | mfc_debug_leave(); | ||
443 | return ret; | ||
444 | } else if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { | ||
445 | mfc_err("Wrong type error for S_FMT : %d", f->type); | ||
446 | return -EINVAL; | ||
447 | } | ||
448 | fmt = find_format(f, MFC_FMT_DEC); | ||
449 | if (!fmt || fmt->codec_mode == S5P_MFC_CODEC_NONE) { | ||
450 | mfc_err("Unknown codec\n"); | ||
451 | ret = -EINVAL; | ||
452 | goto out; | 436 | goto out; |
453 | } | 437 | } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { |
454 | if (fmt->type != MFC_FMT_DEC) { | 438 | /* src_fmt is validated by call to vidioc_try_fmt */ |
455 | mfc_err("Wrong format selected, you should choose " | 439 | ctx->src_fmt = find_format(f, MFC_FMT_DEC); |
456 | "format for decoding\n"); | 440 | ctx->codec_mode = ctx->src_fmt->codec_mode; |
441 | mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode); | ||
442 | pix_mp->height = 0; | ||
443 | pix_mp->width = 0; | ||
444 | if (pix_mp->plane_fmt[0].sizeimage) | ||
445 | ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage; | ||
446 | else | ||
447 | pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size = | ||
448 | DEF_CPB_SIZE; | ||
449 | pix_mp->plane_fmt[0].bytesperline = 0; | ||
450 | ctx->state = MFCINST_INIT; | ||
451 | ret = 0; | ||
452 | goto out; | ||
453 | } else { | ||
454 | mfc_err("Wrong type error for S_FMT : %d", f->type); | ||
457 | ret = -EINVAL; | 455 | ret = -EINVAL; |
458 | goto out; | 456 | goto out; |
459 | } | 457 | } |
460 | if (!IS_MFCV6(dev) && (fmt->fourcc == V4L2_PIX_FMT_VP8)) { | 458 | |
461 | mfc_err("Not supported format.\n"); | ||
462 | return -EINVAL; | ||
463 | } | ||
464 | ctx->src_fmt = fmt; | ||
465 | ctx->codec_mode = fmt->codec_mode; | ||
466 | mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode); | ||
467 | pix_mp->height = 0; | ||
468 | pix_mp->width = 0; | ||
469 | if (pix_mp->plane_fmt[0].sizeimage) | ||
470 | ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage; | ||
471 | else | ||
472 | pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size = | ||
473 | DEF_CPB_SIZE; | ||
474 | pix_mp->plane_fmt[0].bytesperline = 0; | ||
475 | ctx->state = MFCINST_INIT; | ||
476 | out: | 459 | out: |
477 | mfc_debug_leave(); | 460 | mfc_debug_leave(); |
478 | return ret; | 461 | return ret; |
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c index 2549967b2f85..59e56f4c8ce3 100644 --- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c +++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c | |||
@@ -906,6 +906,7 @@ static int vidioc_g_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
906 | 906 | ||
907 | static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) | 907 | static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) |
908 | { | 908 | { |
909 | struct s5p_mfc_dev *dev = video_drvdata(file); | ||
909 | struct s5p_mfc_fmt *fmt; | 910 | struct s5p_mfc_fmt *fmt; |
910 | struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; | 911 | struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; |
911 | 912 | ||
@@ -930,6 +931,18 @@ static int vidioc_try_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
930 | return -EINVAL; | 931 | return -EINVAL; |
931 | } | 932 | } |
932 | 933 | ||
934 | if (!IS_MFCV6(dev)) { | ||
935 | if (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) { | ||
936 | mfc_err("Not supported format.\n"); | ||
937 | return -EINVAL; | ||
938 | } | ||
939 | } else if (IS_MFCV6(dev)) { | ||
940 | if (fmt->fourcc == V4L2_PIX_FMT_NV12MT) { | ||
941 | mfc_err("Not supported format.\n"); | ||
942 | return -EINVAL; | ||
943 | } | ||
944 | } | ||
945 | |||
933 | if (fmt->num_planes != pix_fmt_mp->num_planes) { | 946 | if (fmt->num_planes != pix_fmt_mp->num_planes) { |
934 | mfc_err("failed to try output format\n"); | 947 | mfc_err("failed to try output format\n"); |
935 | return -EINVAL; | 948 | return -EINVAL; |
@@ -947,7 +960,6 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
947 | { | 960 | { |
948 | struct s5p_mfc_dev *dev = video_drvdata(file); | 961 | struct s5p_mfc_dev *dev = video_drvdata(file); |
949 | struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); | 962 | struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); |
950 | struct s5p_mfc_fmt *fmt; | ||
951 | struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; | 963 | struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp; |
952 | int ret = 0; | 964 | int ret = 0; |
953 | 965 | ||
@@ -960,13 +972,9 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
960 | goto out; | 972 | goto out; |
961 | } | 973 | } |
962 | if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { | 974 | if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { |
963 | fmt = find_format(f, MFC_FMT_ENC); | 975 | /* dst_fmt is validated by call to vidioc_try_fmt */ |
964 | if (!fmt) { | 976 | ctx->dst_fmt = find_format(f, MFC_FMT_ENC); |
965 | mfc_err("failed to set capture format\n"); | ||
966 | return -EINVAL; | ||
967 | } | ||
968 | ctx->state = MFCINST_INIT; | 977 | ctx->state = MFCINST_INIT; |
969 | ctx->dst_fmt = fmt; | ||
970 | ctx->codec_mode = ctx->dst_fmt->codec_mode; | 978 | ctx->codec_mode = ctx->dst_fmt->codec_mode; |
971 | ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage; | 979 | ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage; |
972 | pix_fmt_mp->plane_fmt[0].bytesperline = 0; | 980 | pix_fmt_mp->plane_fmt[0].bytesperline = 0; |
@@ -987,28 +995,8 @@ static int vidioc_s_fmt(struct file *file, void *priv, struct v4l2_format *f) | |||
987 | } | 995 | } |
988 | mfc_debug(2, "Got instance number: %d\n", ctx->inst_no); | 996 | mfc_debug(2, "Got instance number: %d\n", ctx->inst_no); |
989 | } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { | 997 | } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { |
990 | fmt = find_format(f, MFC_FMT_RAW); | 998 | /* src_fmt is validated by call to vidioc_try_fmt */ |
991 | if (!fmt) { | 999 | ctx->src_fmt = find_format(f, MFC_FMT_RAW); |
992 | mfc_err("failed to set output format\n"); | ||
993 | return -EINVAL; | ||
994 | } | ||
995 | |||
996 | if (!IS_MFCV6(dev) && | ||
997 | (fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16)) { | ||
998 | mfc_err("Not supported format.\n"); | ||
999 | return -EINVAL; | ||
1000 | } else if (IS_MFCV6(dev) && | ||
1001 | (fmt->fourcc == V4L2_PIX_FMT_NV12MT)) { | ||
1002 | mfc_err("Not supported format.\n"); | ||
1003 | return -EINVAL; | ||
1004 | } | ||
1005 | |||
1006 | if (fmt->num_planes != pix_fmt_mp->num_planes) { | ||
1007 | mfc_err("failed to set output format\n"); | ||
1008 | ret = -EINVAL; | ||
1009 | goto out; | ||
1010 | } | ||
1011 | ctx->src_fmt = fmt; | ||
1012 | ctx->img_width = pix_fmt_mp->width; | 1000 | ctx->img_width = pix_fmt_mp->width; |
1013 | ctx->img_height = pix_fmt_mp->height; | 1001 | ctx->img_height = pix_fmt_mp->height; |
1014 | mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode); | 1002 | mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode); |
diff --git a/drivers/media/usb/em28xx/em28xx-i2c.c b/drivers/media/usb/em28xx/em28xx-i2c.c index 4851cc2e4a4d..c4ff9739a7ae 100644 --- a/drivers/media/usb/em28xx/em28xx-i2c.c +++ b/drivers/media/usb/em28xx/em28xx-i2c.c | |||
@@ -726,7 +726,7 @@ static int em28xx_i2c_eeprom(struct em28xx *dev, unsigned bus, | |||
726 | 726 | ||
727 | *eedata = data; | 727 | *eedata = data; |
728 | *eedata_len = len; | 728 | *eedata_len = len; |
729 | dev_config = (void *)eedata; | 729 | dev_config = (void *)*eedata; |
730 | 730 | ||
731 | switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) { | 731 | switch (le16_to_cpu(dev_config->chip_conf) >> 4 & 0x3) { |
732 | case 0: | 732 | case 0: |
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c index cb694055ba7d..6e5070774dc2 100644 --- a/drivers/media/usb/hdpvr/hdpvr-core.c +++ b/drivers/media/usb/hdpvr/hdpvr-core.c | |||
@@ -303,6 +303,11 @@ static int hdpvr_probe(struct usb_interface *interface, | |||
303 | 303 | ||
304 | dev->workqueue = 0; | 304 | dev->workqueue = 0; |
305 | 305 | ||
306 | /* init video transfer queues first of all */ | ||
307 | /* to prevent oops in hdpvr_delete() on error paths */ | ||
308 | INIT_LIST_HEAD(&dev->free_buff_list); | ||
309 | INIT_LIST_HEAD(&dev->rec_buff_list); | ||
310 | |||
306 | /* register v4l2_device early so it can be used for printks */ | 311 | /* register v4l2_device early so it can be used for printks */ |
307 | if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { | 312 | if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { |
308 | dev_err(&interface->dev, "v4l2_device_register failed\n"); | 313 | dev_err(&interface->dev, "v4l2_device_register failed\n"); |
@@ -325,10 +330,6 @@ static int hdpvr_probe(struct usb_interface *interface, | |||
325 | if (!dev->workqueue) | 330 | if (!dev->workqueue) |
326 | goto error; | 331 | goto error; |
327 | 332 | ||
328 | /* init video transfer queues */ | ||
329 | INIT_LIST_HEAD(&dev->free_buff_list); | ||
330 | INIT_LIST_HEAD(&dev->rec_buff_list); | ||
331 | |||
332 | dev->options = hdpvr_default_options; | 333 | dev->options = hdpvr_default_options; |
333 | 334 | ||
334 | if (default_video_input < HDPVR_VIDEO_INPUTS) | 335 | if (default_video_input < HDPVR_VIDEO_INPUTS) |
@@ -405,7 +406,7 @@ static int hdpvr_probe(struct usb_interface *interface, | |||
405 | video_nr[atomic_inc_return(&dev_nr)]); | 406 | video_nr[atomic_inc_return(&dev_nr)]); |
406 | if (retval < 0) { | 407 | if (retval < 0) { |
407 | v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); | 408 | v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); |
408 | goto error; | 409 | goto reg_fail; |
409 | } | 410 | } |
410 | 411 | ||
411 | /* let the user know what node this device is now attached to */ | 412 | /* let the user know what node this device is now attached to */ |
diff --git a/drivers/media/usb/usbtv/Kconfig b/drivers/media/usb/usbtv/Kconfig index 8864436464bf..7c5b86006ee6 100644 --- a/drivers/media/usb/usbtv/Kconfig +++ b/drivers/media/usb/usbtv/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config VIDEO_USBTV | 1 | config VIDEO_USBTV |
2 | tristate "USBTV007 video capture support" | 2 | tristate "USBTV007 video capture support" |
3 | depends on VIDEO_DEV | 3 | depends on VIDEO_V4L2 |
4 | select VIDEOBUF2_VMALLOC | 4 | select VIDEOBUF2_VMALLOC |
5 | 5 | ||
6 | ---help--- | 6 | ---help--- |
diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c index bf43f874685e..91650173941a 100644 --- a/drivers/media/usb/usbtv/usbtv.c +++ b/drivers/media/usb/usbtv/usbtv.c | |||
@@ -57,7 +57,7 @@ | |||
57 | #define USBTV_CHUNK_SIZE 256 | 57 | #define USBTV_CHUNK_SIZE 256 |
58 | #define USBTV_CHUNK 240 | 58 | #define USBTV_CHUNK 240 |
59 | #define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \ | 59 | #define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \ |
60 | / 2 / USBTV_CHUNK) | 60 | / 4 / USBTV_CHUNK) |
61 | 61 | ||
62 | /* Chunk header. */ | 62 | /* Chunk header. */ |
63 | #define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ | 63 | #define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \ |
@@ -89,6 +89,7 @@ struct usbtv { | |||
89 | /* Number of currently processed frame, useful find | 89 | /* Number of currently processed frame, useful find |
90 | * out when a new one begins. */ | 90 | * out when a new one begins. */ |
91 | u32 frame_id; | 91 | u32 frame_id; |
92 | int chunks_done; | ||
92 | 93 | ||
93 | int iso_size; | 94 | int iso_size; |
94 | unsigned int sequence; | 95 | unsigned int sequence; |
@@ -202,6 +203,26 @@ static int usbtv_setup_capture(struct usbtv *usbtv) | |||
202 | return 0; | 203 | return 0; |
203 | } | 204 | } |
204 | 205 | ||
206 | /* Copy data from chunk into a frame buffer, deinterlacing the data | ||
207 | * into every second line. Unfortunately, they don't align nicely into | ||
208 | * 720 pixel lines, as the chunk is 240 words long, which is 480 pixels. | ||
209 | * Therefore, we break down the chunk into two halves before copyting, | ||
210 | * so that we can interleave a line if needed. */ | ||
211 | static void usbtv_chunk_to_vbuf(u32 *frame, u32 *src, int chunk_no, int odd) | ||
212 | { | ||
213 | int half; | ||
214 | |||
215 | for (half = 0; half < 2; half++) { | ||
216 | int part_no = chunk_no * 2 + half; | ||
217 | int line = part_no / 3; | ||
218 | int part_index = (line * 2 + !odd) * 3 + (part_no % 3); | ||
219 | |||
220 | u32 *dst = &frame[part_index * USBTV_CHUNK/2]; | ||
221 | memcpy(dst, src, USBTV_CHUNK/2 * sizeof(*src)); | ||
222 | src += USBTV_CHUNK/2; | ||
223 | } | ||
224 | } | ||
225 | |||
205 | /* Called for each 256-byte image chunk. | 226 | /* Called for each 256-byte image chunk. |
206 | * First word identifies the chunk, followed by 240 words of image | 227 | * First word identifies the chunk, followed by 240 words of image |
207 | * data and padding. */ | 228 | * data and padding. */ |
@@ -218,17 +239,17 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) | |||
218 | frame_id = USBTV_FRAME_ID(chunk); | 239 | frame_id = USBTV_FRAME_ID(chunk); |
219 | odd = USBTV_ODD(chunk); | 240 | odd = USBTV_ODD(chunk); |
220 | chunk_no = USBTV_CHUNK_NO(chunk); | 241 | chunk_no = USBTV_CHUNK_NO(chunk); |
221 | |||
222 | /* Deinterlace. TODO: Use interlaced frame format. */ | ||
223 | chunk_no = (chunk_no - chunk_no % 3) * 2 + chunk_no % 3; | ||
224 | chunk_no += !odd * 3; | ||
225 | |||
226 | if (chunk_no >= USBTV_CHUNKS) | 242 | if (chunk_no >= USBTV_CHUNKS) |
227 | return; | 243 | return; |
228 | 244 | ||
229 | /* Beginning of a frame. */ | 245 | /* Beginning of a frame. */ |
230 | if (chunk_no == 0) | 246 | if (chunk_no == 0) { |
231 | usbtv->frame_id = frame_id; | 247 | usbtv->frame_id = frame_id; |
248 | usbtv->chunks_done = 0; | ||
249 | } | ||
250 | |||
251 | if (usbtv->frame_id != frame_id) | ||
252 | return; | ||
232 | 253 | ||
233 | spin_lock_irqsave(&usbtv->buflock, flags); | 254 | spin_lock_irqsave(&usbtv->buflock, flags); |
234 | if (list_empty(&usbtv->bufs)) { | 255 | if (list_empty(&usbtv->bufs)) { |
@@ -241,19 +262,23 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk) | |||
241 | buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list); | 262 | buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list); |
242 | frame = vb2_plane_vaddr(&buf->vb, 0); | 263 | frame = vb2_plane_vaddr(&buf->vb, 0); |
243 | 264 | ||
244 | /* Copy the chunk. */ | 265 | /* Copy the chunk data. */ |
245 | memcpy(&frame[chunk_no * USBTV_CHUNK], &chunk[1], | 266 | usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd); |
246 | USBTV_CHUNK * sizeof(chunk[1])); | 267 | usbtv->chunks_done++; |
247 | 268 | ||
248 | /* Last chunk in a frame, signalling an end */ | 269 | /* Last chunk in a frame, signalling an end */ |
249 | if (usbtv->frame_id && chunk_no == USBTV_CHUNKS-1) { | 270 | if (odd && chunk_no == USBTV_CHUNKS-1) { |
250 | int size = vb2_plane_size(&buf->vb, 0); | 271 | int size = vb2_plane_size(&buf->vb, 0); |
272 | enum vb2_buffer_state state = usbtv->chunks_done == | ||
273 | USBTV_CHUNKS ? | ||
274 | VB2_BUF_STATE_DONE : | ||
275 | VB2_BUF_STATE_ERROR; | ||
251 | 276 | ||
252 | buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; | 277 | buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; |
253 | buf->vb.v4l2_buf.sequence = usbtv->sequence++; | 278 | buf->vb.v4l2_buf.sequence = usbtv->sequence++; |
254 | v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); | 279 | v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); |
255 | vb2_set_plane_payload(&buf->vb, 0, size); | 280 | vb2_set_plane_payload(&buf->vb, 0, size); |
256 | vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); | 281 | vb2_buffer_done(&buf->vb, state); |
257 | list_del(&buf->list); | 282 | list_del(&buf->list); |
258 | } | 283 | } |
259 | 284 | ||
@@ -518,7 +543,7 @@ static int usbtv_queue_setup(struct vb2_queue *vq, | |||
518 | if (*nbuffers < 2) | 543 | if (*nbuffers < 2) |
519 | *nbuffers = 2; | 544 | *nbuffers = 2; |
520 | *nplanes = 1; | 545 | *nplanes = 1; |
521 | sizes[0] = USBTV_CHUNK * USBTV_CHUNKS * sizeof(u32); | 546 | sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32); |
522 | 547 | ||
523 | return 0; | 548 | return 0; |
524 | } | 549 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 07f257d44a1e..e48cb339c0c6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3714,11 +3714,17 @@ static int bond_neigh_init(struct neighbour *n) | |||
3714 | * The bonding ndo_neigh_setup is called at init time beofre any | 3714 | * The bonding ndo_neigh_setup is called at init time beofre any |
3715 | * slave exists. So we must declare proxy setup function which will | 3715 | * slave exists. So we must declare proxy setup function which will |
3716 | * be used at run time to resolve the actual slave neigh param setup. | 3716 | * be used at run time to resolve the actual slave neigh param setup. |
3717 | * | ||
3718 | * It's also called by master devices (such as vlans) to setup their | ||
3719 | * underlying devices. In that case - do nothing, we're already set up from | ||
3720 | * our init. | ||
3717 | */ | 3721 | */ |
3718 | static int bond_neigh_setup(struct net_device *dev, | 3722 | static int bond_neigh_setup(struct net_device *dev, |
3719 | struct neigh_parms *parms) | 3723 | struct neigh_parms *parms) |
3720 | { | 3724 | { |
3721 | parms->neigh_setup = bond_neigh_init; | 3725 | /* modify only our neigh_parms */ |
3726 | if (parms->dev == dev) | ||
3727 | parms->neigh_setup = bond_neigh_init; | ||
3722 | 3728 | ||
3723 | return 0; | 3729 | return 0; |
3724 | } | 3730 | } |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 25723d8ee201..925ab8ec9329 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c | |||
@@ -649,7 +649,7 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) | |||
649 | if ((mc->ptr + rec_len) > mc->end) | 649 | if ((mc->ptr + rec_len) > mc->end) |
650 | goto decode_failed; | 650 | goto decode_failed; |
651 | 651 | ||
652 | memcpy(cf->data, mc->ptr, rec_len); | 652 | memcpy(cf->data, mc->ptr, cf->can_dlc); |
653 | mc->ptr += rec_len; | 653 | mc->ptr += rec_len; |
654 | } | 654 | } |
655 | 655 | ||
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index f1b121ee5525..55d79cb53a79 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -199,7 +199,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget) | |||
199 | struct arc_emac_priv *priv = netdev_priv(ndev); | 199 | struct arc_emac_priv *priv = netdev_priv(ndev); |
200 | unsigned int work_done; | 200 | unsigned int work_done; |
201 | 201 | ||
202 | for (work_done = 0; work_done <= budget; work_done++) { | 202 | for (work_done = 0; work_done < budget; work_done++) { |
203 | unsigned int *last_rx_bd = &priv->last_rx_bd; | 203 | unsigned int *last_rx_bd = &priv->last_rx_bd; |
204 | struct net_device_stats *stats = &priv->stats; | 204 | struct net_device_stats *stats = &priv->stats; |
205 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; | 205 | struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd]; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h index d80e34b8285f..ce9b387b5a19 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h | |||
@@ -1502,6 +1502,7 @@ struct bnx2x { | |||
1502 | #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) | 1502 | #define BC_SUPPORTS_DCBX_MSG_NON_PMF (1 << 21) |
1503 | #define IS_VF_FLAG (1 << 22) | 1503 | #define IS_VF_FLAG (1 << 22) |
1504 | #define INTERRUPTS_ENABLED_FLAG (1 << 23) | 1504 | #define INTERRUPTS_ENABLED_FLAG (1 << 23) |
1505 | #define BC_SUPPORTS_RMMOD_CMD (1 << 24) | ||
1505 | 1506 | ||
1506 | #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) | 1507 | #define BP_NOMCP(bp) ((bp)->flags & NO_MCP_FLAG) |
1507 | 1508 | ||
@@ -1830,6 +1831,8 @@ struct bnx2x { | |||
1830 | 1831 | ||
1831 | int fp_array_size; | 1832 | int fp_array_size; |
1832 | u32 dump_preset_idx; | 1833 | u32 dump_preset_idx; |
1834 | bool stats_started; | ||
1835 | struct semaphore stats_sema; | ||
1833 | }; | 1836 | }; |
1834 | 1837 | ||
1835 | /* Tx queues may be less or equal to Rx queues */ | 1838 | /* Tx queues may be less or equal to Rx queues */ |
@@ -2451,4 +2454,6 @@ enum bnx2x_pci_bus_speed { | |||
2451 | BNX2X_PCI_LINK_SPEED_5000 = 5000, | 2454 | BNX2X_PCI_LINK_SPEED_5000 = 5000, |
2452 | BNX2X_PCI_LINK_SPEED_8000 = 8000 | 2455 | BNX2X_PCI_LINK_SPEED_8000 = 8000 |
2453 | }; | 2456 | }; |
2457 | |||
2458 | void bnx2x_set_local_cmng(struct bnx2x *bp); | ||
2454 | #endif /* bnx2x.h */ | 2459 | #endif /* bnx2x.h */ |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c index 0c94df47e0e8..f9122f2d6b65 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c | |||
@@ -753,6 +753,10 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state) | |||
753 | bnx2x_pfc_set_pfc(bp); | 753 | bnx2x_pfc_set_pfc(bp); |
754 | 754 | ||
755 | bnx2x_dcbx_update_ets_params(bp); | 755 | bnx2x_dcbx_update_ets_params(bp); |
756 | |||
757 | /* ets may affect cmng configuration: reinit it in hw */ | ||
758 | bnx2x_set_local_cmng(bp); | ||
759 | |||
756 | bnx2x_dcbx_resume_hw_tx(bp); | 760 | bnx2x_dcbx_resume_hw_tx(bp); |
757 | 761 | ||
758 | return; | 762 | return; |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index 5018e52ae2ad..32767f6aa33f 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h | |||
@@ -1300,6 +1300,9 @@ struct drv_func_mb { | |||
1300 | 1300 | ||
1301 | #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 | 1301 | #define DRV_MSG_CODE_EEE_RESULTS_ACK 0xda000000 |
1302 | 1302 | ||
1303 | #define DRV_MSG_CODE_RMMOD 0xdb000000 | ||
1304 | #define REQ_BC_VER_4_RMMOD_CMD 0x0007080f | ||
1305 | |||
1303 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 | 1306 | #define DRV_MSG_CODE_SET_MF_BW 0xe0000000 |
1304 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 | 1307 | #define REQ_BC_VER_4_SET_MF_BW 0x00060202 |
1305 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 | 1308 | #define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000 |
@@ -1372,6 +1375,8 @@ struct drv_func_mb { | |||
1372 | 1375 | ||
1373 | #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 | 1376 | #define FW_MSG_CODE_EEE_RESULS_ACK 0xda100000 |
1374 | 1377 | ||
1378 | #define FW_MSG_CODE_RMMOD_ACK 0xdb100000 | ||
1379 | |||
1375 | #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 | 1380 | #define FW_MSG_CODE_SET_MF_BW_SENT 0xe0000000 |
1376 | #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 | 1381 | #define FW_MSG_CODE_SET_MF_BW_DONE 0xe1000000 |
1377 | 1382 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index e06186c305d8..955d6cfd9cb7 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | |||
@@ -2476,7 +2476,7 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | |||
2476 | 2476 | ||
2477 | input.port_rate = bp->link_vars.line_speed; | 2477 | input.port_rate = bp->link_vars.line_speed; |
2478 | 2478 | ||
2479 | if (cmng_type == CMNG_FNS_MINMAX) { | 2479 | if (cmng_type == CMNG_FNS_MINMAX && input.port_rate) { |
2480 | int vn; | 2480 | int vn; |
2481 | 2481 | ||
2482 | /* read mf conf from shmem */ | 2482 | /* read mf conf from shmem */ |
@@ -2533,6 +2533,21 @@ static void storm_memset_cmng(struct bnx2x *bp, | |||
2533 | } | 2533 | } |
2534 | } | 2534 | } |
2535 | 2535 | ||
2536 | /* init cmng mode in HW according to local configuration */ | ||
2537 | void bnx2x_set_local_cmng(struct bnx2x *bp) | ||
2538 | { | ||
2539 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); | ||
2540 | |||
2541 | if (cmng_fns != CMNG_FNS_NONE) { | ||
2542 | bnx2x_cmng_fns_init(bp, false, cmng_fns); | ||
2543 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | ||
2544 | } else { | ||
2545 | /* rate shaping and fairness are disabled */ | ||
2546 | DP(NETIF_MSG_IFUP, | ||
2547 | "single function mode without fairness\n"); | ||
2548 | } | ||
2549 | } | ||
2550 | |||
2536 | /* This function is called upon link interrupt */ | 2551 | /* This function is called upon link interrupt */ |
2537 | static void bnx2x_link_attn(struct bnx2x *bp) | 2552 | static void bnx2x_link_attn(struct bnx2x *bp) |
2538 | { | 2553 | { |
@@ -2568,17 +2583,8 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2568 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); | 2583 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); |
2569 | } | 2584 | } |
2570 | 2585 | ||
2571 | if (bp->link_vars.link_up && bp->link_vars.line_speed) { | 2586 | if (bp->link_vars.link_up && bp->link_vars.line_speed) |
2572 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); | 2587 | bnx2x_set_local_cmng(bp); |
2573 | |||
2574 | if (cmng_fns != CMNG_FNS_NONE) { | ||
2575 | bnx2x_cmng_fns_init(bp, false, cmng_fns); | ||
2576 | storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp)); | ||
2577 | } else | ||
2578 | /* rate shaping and fairness are disabled */ | ||
2579 | DP(NETIF_MSG_IFUP, | ||
2580 | "single function mode without fairness\n"); | ||
2581 | } | ||
2582 | 2588 | ||
2583 | __bnx2x_link_report(bp); | 2589 | __bnx2x_link_report(bp); |
2584 | 2590 | ||
@@ -10362,6 +10368,10 @@ static void bnx2x_get_common_hwinfo(struct bnx2x *bp) | |||
10362 | 10368 | ||
10363 | bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? | 10369 | bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ? |
10364 | BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; | 10370 | BC_SUPPORTS_DCBX_MSG_NON_PMF : 0; |
10371 | |||
10372 | bp->flags |= (val >= REQ_BC_VER_4_RMMOD_CMD) ? | ||
10373 | BC_SUPPORTS_RMMOD_CMD : 0; | ||
10374 | |||
10365 | boot_mode = SHMEM_RD(bp, | 10375 | boot_mode = SHMEM_RD(bp, |
10366 | dev_info.port_feature_config[BP_PORT(bp)].mba_config) & | 10376 | dev_info.port_feature_config[BP_PORT(bp)].mba_config) & |
10367 | PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; | 10377 | PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK; |
@@ -11524,6 +11534,7 @@ static int bnx2x_init_bp(struct bnx2x *bp) | |||
11524 | mutex_init(&bp->port.phy_mutex); | 11534 | mutex_init(&bp->port.phy_mutex); |
11525 | mutex_init(&bp->fw_mb_mutex); | 11535 | mutex_init(&bp->fw_mb_mutex); |
11526 | spin_lock_init(&bp->stats_lock); | 11536 | spin_lock_init(&bp->stats_lock); |
11537 | sema_init(&bp->stats_sema, 1); | ||
11527 | 11538 | ||
11528 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); | 11539 | INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); |
11529 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); | 11540 | INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task); |
@@ -12817,13 +12828,17 @@ static void __bnx2x_remove(struct pci_dev *pdev, | |||
12817 | bnx2x_dcbnl_update_applist(bp, true); | 12828 | bnx2x_dcbnl_update_applist(bp, true); |
12818 | #endif | 12829 | #endif |
12819 | 12830 | ||
12831 | if (IS_PF(bp) && | ||
12832 | !BP_NOMCP(bp) && | ||
12833 | (bp->flags & BC_SUPPORTS_RMMOD_CMD)) | ||
12834 | bnx2x_fw_command(bp, DRV_MSG_CODE_RMMOD, 0); | ||
12835 | |||
12820 | /* Close the interface - either directly or implicitly */ | 12836 | /* Close the interface - either directly or implicitly */ |
12821 | if (remove_netdev) { | 12837 | if (remove_netdev) { |
12822 | unregister_netdev(dev); | 12838 | unregister_netdev(dev); |
12823 | } else { | 12839 | } else { |
12824 | rtnl_lock(); | 12840 | rtnl_lock(); |
12825 | if (netif_running(dev)) | 12841 | dev_close(dev); |
12826 | bnx2x_close(dev); | ||
12827 | rtnl_unlock(); | 12842 | rtnl_unlock(); |
12828 | } | 12843 | } |
12829 | 12844 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 95861efb5051..44104fb27947 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c | |||
@@ -3463,7 +3463,7 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp) | |||
3463 | alloc_mem_err: | 3463 | alloc_mem_err: |
3464 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, | 3464 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, |
3465 | sizeof(struct bnx2x_vf_mbx_msg)); | 3465 | sizeof(struct bnx2x_vf_mbx_msg)); |
3466 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->vf2pf_mbox_mapping, | 3466 | BNX2X_PCI_FREE(bp->vf2pf_mbox, bp->pf2vf_bulletin_mapping, |
3467 | sizeof(union pf_vf_bulletin)); | 3467 | sizeof(union pf_vf_bulletin)); |
3468 | return -ENOMEM; | 3468 | return -ENOMEM; |
3469 | } | 3469 | } |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c index 98366abd02bd..d63d1327b051 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c | |||
@@ -221,7 +221,8 @@ static int bnx2x_stats_comp(struct bnx2x *bp) | |||
221 | * Statistics service functions | 221 | * Statistics service functions |
222 | */ | 222 | */ |
223 | 223 | ||
224 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | 224 | /* should be called under stats_sema */ |
225 | static void __bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
225 | { | 226 | { |
226 | struct dmae_command *dmae; | 227 | struct dmae_command *dmae; |
227 | u32 opcode; | 228 | u32 opcode; |
@@ -518,7 +519,8 @@ static void bnx2x_func_stats_init(struct bnx2x *bp) | |||
518 | *stats_comp = 0; | 519 | *stats_comp = 0; |
519 | } | 520 | } |
520 | 521 | ||
521 | static void bnx2x_stats_start(struct bnx2x *bp) | 522 | /* should be called under stats_sema */ |
523 | static void __bnx2x_stats_start(struct bnx2x *bp) | ||
522 | { | 524 | { |
523 | /* vfs travel through here as part of the statistics FSM, but no action | 525 | /* vfs travel through here as part of the statistics FSM, but no action |
524 | * is required | 526 | * is required |
@@ -534,13 +536,34 @@ static void bnx2x_stats_start(struct bnx2x *bp) | |||
534 | 536 | ||
535 | bnx2x_hw_stats_post(bp); | 537 | bnx2x_hw_stats_post(bp); |
536 | bnx2x_storm_stats_post(bp); | 538 | bnx2x_storm_stats_post(bp); |
539 | |||
540 | bp->stats_started = true; | ||
541 | } | ||
542 | |||
543 | static void bnx2x_stats_start(struct bnx2x *bp) | ||
544 | { | ||
545 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
546 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
547 | __bnx2x_stats_start(bp); | ||
548 | up(&bp->stats_sema); | ||
537 | } | 549 | } |
538 | 550 | ||
539 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) | 551 | static void bnx2x_stats_pmf_start(struct bnx2x *bp) |
540 | { | 552 | { |
553 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
554 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
541 | bnx2x_stats_comp(bp); | 555 | bnx2x_stats_comp(bp); |
542 | bnx2x_stats_pmf_update(bp); | 556 | __bnx2x_stats_pmf_update(bp); |
543 | bnx2x_stats_start(bp); | 557 | __bnx2x_stats_start(bp); |
558 | up(&bp->stats_sema); | ||
559 | } | ||
560 | |||
561 | static void bnx2x_stats_pmf_update(struct bnx2x *bp) | ||
562 | { | ||
563 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
564 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
565 | __bnx2x_stats_pmf_update(bp); | ||
566 | up(&bp->stats_sema); | ||
544 | } | 567 | } |
545 | 568 | ||
546 | static void bnx2x_stats_restart(struct bnx2x *bp) | 569 | static void bnx2x_stats_restart(struct bnx2x *bp) |
@@ -550,8 +573,11 @@ static void bnx2x_stats_restart(struct bnx2x *bp) | |||
550 | */ | 573 | */ |
551 | if (IS_VF(bp)) | 574 | if (IS_VF(bp)) |
552 | return; | 575 | return; |
576 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
577 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
553 | bnx2x_stats_comp(bp); | 578 | bnx2x_stats_comp(bp); |
554 | bnx2x_stats_start(bp); | 579 | __bnx2x_stats_start(bp); |
580 | up(&bp->stats_sema); | ||
555 | } | 581 | } |
556 | 582 | ||
557 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) | 583 | static void bnx2x_bmac_stats_update(struct bnx2x *bp) |
@@ -888,9 +914,7 @@ static int bnx2x_storm_stats_validate_counters(struct bnx2x *bp) | |||
888 | /* Make sure we use the value of the counter | 914 | /* Make sure we use the value of the counter |
889 | * used for sending the last stats ramrod. | 915 | * used for sending the last stats ramrod. |
890 | */ | 916 | */ |
891 | spin_lock_bh(&bp->stats_lock); | ||
892 | cur_stats_counter = bp->stats_counter - 1; | 917 | cur_stats_counter = bp->stats_counter - 1; |
893 | spin_unlock_bh(&bp->stats_lock); | ||
894 | 918 | ||
895 | /* are storm stats valid? */ | 919 | /* are storm stats valid? */ |
896 | if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { | 920 | if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { |
@@ -1227,12 +1251,18 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1227 | { | 1251 | { |
1228 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); | 1252 | u32 *stats_comp = bnx2x_sp(bp, stats_comp); |
1229 | 1253 | ||
1230 | if (bnx2x_edebug_stats_stopped(bp)) | 1254 | /* we run update from timer context, so give up |
1255 | * if somebody is in the middle of transition | ||
1256 | */ | ||
1257 | if (down_trylock(&bp->stats_sema)) | ||
1231 | return; | 1258 | return; |
1232 | 1259 | ||
1260 | if (bnx2x_edebug_stats_stopped(bp) || !bp->stats_started) | ||
1261 | goto out; | ||
1262 | |||
1233 | if (IS_PF(bp)) { | 1263 | if (IS_PF(bp)) { |
1234 | if (*stats_comp != DMAE_COMP_VAL) | 1264 | if (*stats_comp != DMAE_COMP_VAL) |
1235 | return; | 1265 | goto out; |
1236 | 1266 | ||
1237 | if (bp->port.pmf) | 1267 | if (bp->port.pmf) |
1238 | bnx2x_hw_stats_update(bp); | 1268 | bnx2x_hw_stats_update(bp); |
@@ -1242,7 +1272,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1242 | BNX2X_ERR("storm stats were not updated for 3 times\n"); | 1272 | BNX2X_ERR("storm stats were not updated for 3 times\n"); |
1243 | bnx2x_panic(); | 1273 | bnx2x_panic(); |
1244 | } | 1274 | } |
1245 | return; | 1275 | goto out; |
1246 | } | 1276 | } |
1247 | } else { | 1277 | } else { |
1248 | /* vf doesn't collect HW statistics, and doesn't get completions | 1278 | /* vf doesn't collect HW statistics, and doesn't get completions |
@@ -1256,7 +1286,7 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1256 | 1286 | ||
1257 | /* vf is done */ | 1287 | /* vf is done */ |
1258 | if (IS_VF(bp)) | 1288 | if (IS_VF(bp)) |
1259 | return; | 1289 | goto out; |
1260 | 1290 | ||
1261 | if (netif_msg_timer(bp)) { | 1291 | if (netif_msg_timer(bp)) { |
1262 | struct bnx2x_eth_stats *estats = &bp->eth_stats; | 1292 | struct bnx2x_eth_stats *estats = &bp->eth_stats; |
@@ -1267,6 +1297,9 @@ static void bnx2x_stats_update(struct bnx2x *bp) | |||
1267 | 1297 | ||
1268 | bnx2x_hw_stats_post(bp); | 1298 | bnx2x_hw_stats_post(bp); |
1269 | bnx2x_storm_stats_post(bp); | 1299 | bnx2x_storm_stats_post(bp); |
1300 | |||
1301 | out: | ||
1302 | up(&bp->stats_sema); | ||
1270 | } | 1303 | } |
1271 | 1304 | ||
1272 | static void bnx2x_port_stats_stop(struct bnx2x *bp) | 1305 | static void bnx2x_port_stats_stop(struct bnx2x *bp) |
@@ -1332,6 +1365,11 @@ static void bnx2x_stats_stop(struct bnx2x *bp) | |||
1332 | { | 1365 | { |
1333 | int update = 0; | 1366 | int update = 0; |
1334 | 1367 | ||
1368 | if (down_timeout(&bp->stats_sema, HZ/10)) | ||
1369 | BNX2X_ERR("Unable to acquire stats lock\n"); | ||
1370 | |||
1371 | bp->stats_started = false; | ||
1372 | |||
1335 | bnx2x_stats_comp(bp); | 1373 | bnx2x_stats_comp(bp); |
1336 | 1374 | ||
1337 | if (bp->port.pmf) | 1375 | if (bp->port.pmf) |
@@ -1348,6 +1386,8 @@ static void bnx2x_stats_stop(struct bnx2x *bp) | |||
1348 | bnx2x_hw_stats_post(bp); | 1386 | bnx2x_hw_stats_post(bp); |
1349 | bnx2x_stats_comp(bp); | 1387 | bnx2x_stats_comp(bp); |
1350 | } | 1388 | } |
1389 | |||
1390 | up(&bp->stats_sema); | ||
1351 | } | 1391 | } |
1352 | 1392 | ||
1353 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) | 1393 | static void bnx2x_stats_do_nothing(struct bnx2x *bp) |
@@ -1376,15 +1416,17 @@ static const struct { | |||
1376 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | 1416 | void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) |
1377 | { | 1417 | { |
1378 | enum bnx2x_stats_state state; | 1418 | enum bnx2x_stats_state state; |
1419 | void (*action)(struct bnx2x *bp); | ||
1379 | if (unlikely(bp->panic)) | 1420 | if (unlikely(bp->panic)) |
1380 | return; | 1421 | return; |
1381 | 1422 | ||
1382 | spin_lock_bh(&bp->stats_lock); | 1423 | spin_lock_bh(&bp->stats_lock); |
1383 | state = bp->stats_state; | 1424 | state = bp->stats_state; |
1384 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1425 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
1426 | action = bnx2x_stats_stm[state][event].action; | ||
1385 | spin_unlock_bh(&bp->stats_lock); | 1427 | spin_unlock_bh(&bp->stats_lock); |
1386 | 1428 | ||
1387 | bnx2x_stats_stm[state][event].action(bp); | 1429 | action(bp); |
1388 | 1430 | ||
1389 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1431 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
1390 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1432 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index ddebc7a5dda0..0da2214ef1b9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c | |||
@@ -17796,8 +17796,10 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev, | |||
17796 | 17796 | ||
17797 | done: | 17797 | done: |
17798 | if (state == pci_channel_io_perm_failure) { | 17798 | if (state == pci_channel_io_perm_failure) { |
17799 | tg3_napi_enable(tp); | 17799 | if (netdev) { |
17800 | dev_close(netdev); | 17800 | tg3_napi_enable(tp); |
17801 | dev_close(netdev); | ||
17802 | } | ||
17801 | err = PCI_ERS_RESULT_DISCONNECT; | 17803 | err = PCI_ERS_RESULT_DISCONNECT; |
17802 | } else { | 17804 | } else { |
17803 | pci_disable_device(pdev); | 17805 | pci_disable_device(pdev); |
@@ -17827,7 +17829,8 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
17827 | rtnl_lock(); | 17829 | rtnl_lock(); |
17828 | 17830 | ||
17829 | if (pci_enable_device(pdev)) { | 17831 | if (pci_enable_device(pdev)) { |
17830 | netdev_err(netdev, "Cannot re-enable PCI device after reset.\n"); | 17832 | dev_err(&pdev->dev, |
17833 | "Cannot re-enable PCI device after reset.\n"); | ||
17831 | goto done; | 17834 | goto done; |
17832 | } | 17835 | } |
17833 | 17836 | ||
@@ -17835,7 +17838,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
17835 | pci_restore_state(pdev); | 17838 | pci_restore_state(pdev); |
17836 | pci_save_state(pdev); | 17839 | pci_save_state(pdev); |
17837 | 17840 | ||
17838 | if (!netif_running(netdev)) { | 17841 | if (!netdev || !netif_running(netdev)) { |
17839 | rc = PCI_ERS_RESULT_RECOVERED; | 17842 | rc = PCI_ERS_RESULT_RECOVERED; |
17840 | goto done; | 17843 | goto done; |
17841 | } | 17844 | } |
@@ -17847,7 +17850,7 @@ static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev) | |||
17847 | rc = PCI_ERS_RESULT_RECOVERED; | 17850 | rc = PCI_ERS_RESULT_RECOVERED; |
17848 | 17851 | ||
17849 | done: | 17852 | done: |
17850 | if (rc != PCI_ERS_RESULT_RECOVERED && netif_running(netdev)) { | 17853 | if (rc != PCI_ERS_RESULT_RECOVERED && netdev && netif_running(netdev)) { |
17851 | tg3_napi_enable(tp); | 17854 | tg3_napi_enable(tp); |
17852 | dev_close(netdev); | 17855 | dev_close(netdev); |
17853 | } | 17856 | } |
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index 687ec4a8bb48..9c89dc8fe105 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c | |||
@@ -455,11 +455,6 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, | |||
455 | q->pg_chunk.offset = 0; | 455 | q->pg_chunk.offset = 0; |
456 | mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, | 456 | mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, |
457 | 0, q->alloc_size, PCI_DMA_FROMDEVICE); | 457 | 0, q->alloc_size, PCI_DMA_FROMDEVICE); |
458 | if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { | ||
459 | __free_pages(q->pg_chunk.page, order); | ||
460 | q->pg_chunk.page = NULL; | ||
461 | return -EIO; | ||
462 | } | ||
463 | q->pg_chunk.mapping = mapping; | 458 | q->pg_chunk.mapping = mapping; |
464 | } | 459 | } |
465 | sd->pg_chunk = q->pg_chunk; | 460 | sd->pg_chunk = q->pg_chunk; |
@@ -954,75 +949,40 @@ static inline unsigned int calc_tx_descs(const struct sk_buff *skb) | |||
954 | return flits_to_desc(flits); | 949 | return flits_to_desc(flits); |
955 | } | 950 | } |
956 | 951 | ||
957 | |||
958 | /* map_skb - map a packet main body and its page fragments | ||
959 | * @pdev: the PCI device | ||
960 | * @skb: the packet | ||
961 | * @addr: placeholder to save the mapped addresses | ||
962 | * | ||
963 | * map the main body of an sk_buff and its page fragments, if any. | ||
964 | */ | ||
965 | static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, | ||
966 | dma_addr_t *addr) | ||
967 | { | ||
968 | const skb_frag_t *fp, *end; | ||
969 | const struct skb_shared_info *si; | ||
970 | |||
971 | *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), | ||
972 | PCI_DMA_TODEVICE); | ||
973 | if (pci_dma_mapping_error(pdev, *addr)) | ||
974 | goto out_err; | ||
975 | |||
976 | si = skb_shinfo(skb); | ||
977 | end = &si->frags[si->nr_frags]; | ||
978 | |||
979 | for (fp = si->frags; fp < end; fp++) { | ||
980 | *++addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), | ||
981 | DMA_TO_DEVICE); | ||
982 | if (pci_dma_mapping_error(pdev, *addr)) | ||
983 | goto unwind; | ||
984 | } | ||
985 | return 0; | ||
986 | |||
987 | unwind: | ||
988 | while (fp-- > si->frags) | ||
989 | dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), | ||
990 | DMA_TO_DEVICE); | ||
991 | |||
992 | pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); | ||
993 | out_err: | ||
994 | return -ENOMEM; | ||
995 | } | ||
996 | |||
997 | /** | 952 | /** |
998 | * write_sgl - populate a scatter/gather list for a packet | 953 | * make_sgl - populate a scatter/gather list for a packet |
999 | * @skb: the packet | 954 | * @skb: the packet |
1000 | * @sgp: the SGL to populate | 955 | * @sgp: the SGL to populate |
1001 | * @start: start address of skb main body data to include in the SGL | 956 | * @start: start address of skb main body data to include in the SGL |
1002 | * @len: length of skb main body data to include in the SGL | 957 | * @len: length of skb main body data to include in the SGL |
1003 | * @addr: the list of the mapped addresses | 958 | * @pdev: the PCI device |
1004 | * | 959 | * |
1005 | * Copies the scatter/gather list for the buffers that make up a packet | 960 | * Generates a scatter/gather list for the buffers that make up a packet |
1006 | * and returns the SGL size in 8-byte words. The caller must size the SGL | 961 | * and returns the SGL size in 8-byte words. The caller must size the SGL |
1007 | * appropriately. | 962 | * appropriately. |
1008 | */ | 963 | */ |
1009 | static inline unsigned int write_sgl(const struct sk_buff *skb, | 964 | static inline unsigned int make_sgl(const struct sk_buff *skb, |
1010 | struct sg_ent *sgp, unsigned char *start, | 965 | struct sg_ent *sgp, unsigned char *start, |
1011 | unsigned int len, const dma_addr_t *addr) | 966 | unsigned int len, struct pci_dev *pdev) |
1012 | { | 967 | { |
1013 | unsigned int i, j = 0, k = 0, nfrags; | 968 | dma_addr_t mapping; |
969 | unsigned int i, j = 0, nfrags; | ||
1014 | 970 | ||
1015 | if (len) { | 971 | if (len) { |
972 | mapping = pci_map_single(pdev, start, len, PCI_DMA_TODEVICE); | ||
1016 | sgp->len[0] = cpu_to_be32(len); | 973 | sgp->len[0] = cpu_to_be32(len); |
1017 | sgp->addr[j++] = cpu_to_be64(addr[k++]); | 974 | sgp->addr[0] = cpu_to_be64(mapping); |
975 | j = 1; | ||
1018 | } | 976 | } |
1019 | 977 | ||
1020 | nfrags = skb_shinfo(skb)->nr_frags; | 978 | nfrags = skb_shinfo(skb)->nr_frags; |
1021 | for (i = 0; i < nfrags; i++) { | 979 | for (i = 0; i < nfrags; i++) { |
1022 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 980 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1023 | 981 | ||
982 | mapping = skb_frag_dma_map(&pdev->dev, frag, 0, skb_frag_size(frag), | ||
983 | DMA_TO_DEVICE); | ||
1024 | sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); | 984 | sgp->len[j] = cpu_to_be32(skb_frag_size(frag)); |
1025 | sgp->addr[j] = cpu_to_be64(addr[k++]); | 985 | sgp->addr[j] = cpu_to_be64(mapping); |
1026 | j ^= 1; | 986 | j ^= 1; |
1027 | if (j == 0) | 987 | if (j == 0) |
1028 | ++sgp; | 988 | ++sgp; |
@@ -1178,7 +1138,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
1178 | const struct port_info *pi, | 1138 | const struct port_info *pi, |
1179 | unsigned int pidx, unsigned int gen, | 1139 | unsigned int pidx, unsigned int gen, |
1180 | struct sge_txq *q, unsigned int ndesc, | 1140 | struct sge_txq *q, unsigned int ndesc, |
1181 | unsigned int compl, const dma_addr_t *addr) | 1141 | unsigned int compl) |
1182 | { | 1142 | { |
1183 | unsigned int flits, sgl_flits, cntrl, tso_info; | 1143 | unsigned int flits, sgl_flits, cntrl, tso_info; |
1184 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; | 1144 | struct sg_ent *sgp, sgl[MAX_SKB_FRAGS / 2 + 1]; |
@@ -1236,7 +1196,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb, | |||
1236 | } | 1196 | } |
1237 | 1197 | ||
1238 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | 1198 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
1239 | sgl_flits = write_sgl(skb, sgp, skb->data, skb_headlen(skb), addr); | 1199 | sgl_flits = make_sgl(skb, sgp, skb->data, skb_headlen(skb), adap->pdev); |
1240 | 1200 | ||
1241 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, | 1201 | write_wr_hdr_sgl(ndesc, skb, d, pidx, q, sgl, flits, sgl_flits, gen, |
1242 | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), | 1202 | htonl(V_WR_OP(FW_WROPCODE_TUNNEL_TX_PKT) | compl), |
@@ -1267,7 +1227,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1267 | struct netdev_queue *txq; | 1227 | struct netdev_queue *txq; |
1268 | struct sge_qset *qs; | 1228 | struct sge_qset *qs; |
1269 | struct sge_txq *q; | 1229 | struct sge_txq *q; |
1270 | dma_addr_t addr[MAX_SKB_FRAGS + 1]; | ||
1271 | 1230 | ||
1272 | /* | 1231 | /* |
1273 | * The chip min packet length is 9 octets but play safe and reject | 1232 | * The chip min packet length is 9 octets but play safe and reject |
@@ -1296,11 +1255,6 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1296 | return NETDEV_TX_BUSY; | 1255 | return NETDEV_TX_BUSY; |
1297 | } | 1256 | } |
1298 | 1257 | ||
1299 | if (unlikely(map_skb(adap->pdev, skb, addr) < 0)) { | ||
1300 | dev_kfree_skb(skb); | ||
1301 | return NETDEV_TX_OK; | ||
1302 | } | ||
1303 | |||
1304 | q->in_use += ndesc; | 1258 | q->in_use += ndesc; |
1305 | if (unlikely(credits - ndesc < q->stop_thres)) { | 1259 | if (unlikely(credits - ndesc < q->stop_thres)) { |
1306 | t3_stop_tx_queue(txq, qs, q); | 1260 | t3_stop_tx_queue(txq, qs, q); |
@@ -1358,7 +1312,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1358 | if (likely(!skb_shared(skb))) | 1312 | if (likely(!skb_shared(skb))) |
1359 | skb_orphan(skb); | 1313 | skb_orphan(skb); |
1360 | 1314 | ||
1361 | write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl, addr); | 1315 | write_tx_pkt_wr(adap, skb, pi, pidx, gen, q, ndesc, compl); |
1362 | check_ring_tx_db(adap, q); | 1316 | check_ring_tx_db(adap, q); |
1363 | return NETDEV_TX_OK; | 1317 | return NETDEV_TX_OK; |
1364 | } | 1318 | } |
@@ -1623,8 +1577,7 @@ static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, | |||
1623 | */ | 1577 | */ |
1624 | static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | 1578 | static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, |
1625 | struct sge_txq *q, unsigned int pidx, | 1579 | struct sge_txq *q, unsigned int pidx, |
1626 | unsigned int gen, unsigned int ndesc, | 1580 | unsigned int gen, unsigned int ndesc) |
1627 | const dma_addr_t *addr) | ||
1628 | { | 1581 | { |
1629 | unsigned int sgl_flits, flits; | 1582 | unsigned int sgl_flits, flits; |
1630 | struct work_request_hdr *from; | 1583 | struct work_request_hdr *from; |
@@ -1645,9 +1598,9 @@ static void write_ofld_wr(struct adapter *adap, struct sk_buff *skb, | |||
1645 | 1598 | ||
1646 | flits = skb_transport_offset(skb) / 8; | 1599 | flits = skb_transport_offset(skb) / 8; |
1647 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; | 1600 | sgp = ndesc == 1 ? (struct sg_ent *)&d->flit[flits] : sgl; |
1648 | sgl_flits = write_sgl(skb, sgp, skb_transport_header(skb), | 1601 | sgl_flits = make_sgl(skb, sgp, skb_transport_header(skb), |
1649 | skb_tail_pointer(skb) - | 1602 | skb->tail - skb->transport_header, |
1650 | skb_transport_header(skb), addr); | 1603 | adap->pdev); |
1651 | if (need_skb_unmap()) { | 1604 | if (need_skb_unmap()) { |
1652 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); | 1605 | setup_deferred_unmapping(skb, adap->pdev, sgp, sgl_flits); |
1653 | skb->destructor = deferred_unmap_destructor; | 1606 | skb->destructor = deferred_unmap_destructor; |
@@ -1705,11 +1658,6 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1705 | goto again; | 1658 | goto again; |
1706 | } | 1659 | } |
1707 | 1660 | ||
1708 | if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) { | ||
1709 | spin_unlock(&q->lock); | ||
1710 | return NET_XMIT_SUCCESS; | ||
1711 | } | ||
1712 | |||
1713 | gen = q->gen; | 1661 | gen = q->gen; |
1714 | q->in_use += ndesc; | 1662 | q->in_use += ndesc; |
1715 | pidx = q->pidx; | 1663 | pidx = q->pidx; |
@@ -1720,7 +1668,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1720 | } | 1668 | } |
1721 | spin_unlock(&q->lock); | 1669 | spin_unlock(&q->lock); |
1722 | 1670 | ||
1723 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc, (dma_addr_t *)skb->head); | 1671 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); |
1724 | check_ring_tx_db(adap, q); | 1672 | check_ring_tx_db(adap, q); |
1725 | return NET_XMIT_SUCCESS; | 1673 | return NET_XMIT_SUCCESS; |
1726 | } | 1674 | } |
@@ -1738,7 +1686,6 @@ static void restart_offloadq(unsigned long data) | |||
1738 | struct sge_txq *q = &qs->txq[TXQ_OFLD]; | 1686 | struct sge_txq *q = &qs->txq[TXQ_OFLD]; |
1739 | const struct port_info *pi = netdev_priv(qs->netdev); | 1687 | const struct port_info *pi = netdev_priv(qs->netdev); |
1740 | struct adapter *adap = pi->adapter; | 1688 | struct adapter *adap = pi->adapter; |
1741 | unsigned int written = 0; | ||
1742 | 1689 | ||
1743 | spin_lock(&q->lock); | 1690 | spin_lock(&q->lock); |
1744 | again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | 1691 | again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); |
@@ -1758,14 +1705,10 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1758 | break; | 1705 | break; |
1759 | } | 1706 | } |
1760 | 1707 | ||
1761 | if (map_skb(adap->pdev, skb, (dma_addr_t *)skb->head)) | ||
1762 | break; | ||
1763 | |||
1764 | gen = q->gen; | 1708 | gen = q->gen; |
1765 | q->in_use += ndesc; | 1709 | q->in_use += ndesc; |
1766 | pidx = q->pidx; | 1710 | pidx = q->pidx; |
1767 | q->pidx += ndesc; | 1711 | q->pidx += ndesc; |
1768 | written += ndesc; | ||
1769 | if (q->pidx >= q->size) { | 1712 | if (q->pidx >= q->size) { |
1770 | q->pidx -= q->size; | 1713 | q->pidx -= q->size; |
1771 | q->gen ^= 1; | 1714 | q->gen ^= 1; |
@@ -1773,8 +1716,7 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1773 | __skb_unlink(skb, &q->sendq); | 1716 | __skb_unlink(skb, &q->sendq); |
1774 | spin_unlock(&q->lock); | 1717 | spin_unlock(&q->lock); |
1775 | 1718 | ||
1776 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc, | 1719 | write_ofld_wr(adap, skb, q, pidx, gen, ndesc); |
1777 | (dma_addr_t *)skb->head); | ||
1778 | spin_lock(&q->lock); | 1720 | spin_lock(&q->lock); |
1779 | } | 1721 | } |
1780 | spin_unlock(&q->lock); | 1722 | spin_unlock(&q->lock); |
@@ -1784,9 +1726,8 @@ again: reclaim_completed_tx(adap, q, TX_RECLAIM_CHUNK); | |||
1784 | set_bit(TXQ_LAST_PKT_DB, &q->flags); | 1726 | set_bit(TXQ_LAST_PKT_DB, &q->flags); |
1785 | #endif | 1727 | #endif |
1786 | wmb(); | 1728 | wmb(); |
1787 | if (likely(written)) | 1729 | t3_write_reg(adap, A_SG_KDOORBELL, |
1788 | t3_write_reg(adap, A_SG_KDOORBELL, | 1730 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); |
1789 | F_SELEGRCNTX | V_EGRCNTX(q->cntxt_id)); | ||
1790 | } | 1731 | } |
1791 | 1732 | ||
1792 | /** | 1733 | /** |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 6e6e0a117ee2..8ec5d74ad44d 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -3048,6 +3048,9 @@ int be_cmd_get_func_config(struct be_adapter *adapter) | |||
3048 | 3048 | ||
3049 | adapter->max_event_queues = le16_to_cpu(desc->eq_count); | 3049 | adapter->max_event_queues = le16_to_cpu(desc->eq_count); |
3050 | adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); | 3050 | adapter->if_cap_flags = le32_to_cpu(desc->cap_flags); |
3051 | |||
3052 | /* Clear flags that driver is not interested in */ | ||
3053 | adapter->if_cap_flags &= BE_IF_CAP_FLAGS_WANT; | ||
3051 | } | 3054 | } |
3052 | err: | 3055 | err: |
3053 | mutex_unlock(&adapter->mbox_lock); | 3056 | mutex_unlock(&adapter->mbox_lock); |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index 5228d88c5a02..1b3b9e886412 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h | |||
@@ -563,6 +563,12 @@ enum be_if_flags { | |||
563 | BE_IF_FLAGS_MULTICAST = 0x1000 | 563 | BE_IF_FLAGS_MULTICAST = 0x1000 |
564 | }; | 564 | }; |
565 | 565 | ||
566 | #define BE_IF_CAP_FLAGS_WANT (BE_IF_FLAGS_RSS | BE_IF_FLAGS_PROMISCUOUS |\ | ||
567 | BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_VLAN_PROMISCUOUS |\ | ||
568 | BE_IF_FLAGS_VLAN | BE_IF_FLAGS_MCAST_PROMISCUOUS |\ | ||
569 | BE_IF_FLAGS_PASS_L3L4_ERRORS | BE_IF_FLAGS_MULTICAST |\ | ||
570 | BE_IF_FLAGS_UNTAGGED) | ||
571 | |||
566 | /* An RX interface is an object with one or more MAC addresses and | 572 | /* An RX interface is an object with one or more MAC addresses and |
567 | * filtering capabilities. */ | 573 | * filtering capabilities. */ |
568 | struct be_cmd_req_if_create { | 574 | struct be_cmd_req_if_create { |
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index c896079728e1..ef94a591f9e5 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base) | |||
931 | } | 931 | } |
932 | 932 | ||
933 | /* Allocate and setup a new buffer for receiving */ | 933 | /* Allocate and setup a new buffer for receiving */ |
934 | static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | 934 | static int skge_rx_setup(struct skge_port *skge, struct skge_element *e, |
935 | struct sk_buff *skb, unsigned int bufsize) | 935 | struct sk_buff *skb, unsigned int bufsize) |
936 | { | 936 | { |
937 | struct skge_rx_desc *rd = e->desc; | 937 | struct skge_rx_desc *rd = e->desc; |
938 | u64 map; | 938 | dma_addr_t map; |
939 | 939 | ||
940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, | 940 | map = pci_map_single(skge->hw->pdev, skb->data, bufsize, |
941 | PCI_DMA_FROMDEVICE); | 941 | PCI_DMA_FROMDEVICE); |
942 | 942 | ||
943 | rd->dma_lo = map; | 943 | if (pci_dma_mapping_error(skge->hw->pdev, map)) |
944 | rd->dma_hi = map >> 32; | 944 | return -1; |
945 | |||
946 | rd->dma_lo = lower_32_bits(map); | ||
947 | rd->dma_hi = upper_32_bits(map); | ||
945 | e->skb = skb; | 948 | e->skb = skb; |
946 | rd->csum1_start = ETH_HLEN; | 949 | rd->csum1_start = ETH_HLEN; |
947 | rd->csum2_start = ETH_HLEN; | 950 | rd->csum2_start = ETH_HLEN; |
@@ -953,6 +956,7 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e, | |||
953 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; | 956 | rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; |
954 | dma_unmap_addr_set(e, mapaddr, map); | 957 | dma_unmap_addr_set(e, mapaddr, map); |
955 | dma_unmap_len_set(e, maplen, bufsize); | 958 | dma_unmap_len_set(e, maplen, bufsize); |
959 | return 0; | ||
956 | } | 960 | } |
957 | 961 | ||
958 | /* Resume receiving using existing skb, | 962 | /* Resume receiving using existing skb, |
@@ -1014,7 +1018,10 @@ static int skge_rx_fill(struct net_device *dev) | |||
1014 | return -ENOMEM; | 1018 | return -ENOMEM; |
1015 | 1019 | ||
1016 | skb_reserve(skb, NET_IP_ALIGN); | 1020 | skb_reserve(skb, NET_IP_ALIGN); |
1017 | skge_rx_setup(skge, e, skb, skge->rx_buf_size); | 1021 | if (skge_rx_setup(skge, e, skb, skge->rx_buf_size) < 0) { |
1022 | dev_kfree_skb(skb); | ||
1023 | return -EIO; | ||
1024 | } | ||
1018 | } while ((e = e->next) != ring->start); | 1025 | } while ((e = e->next) != ring->start); |
1019 | 1026 | ||
1020 | ring->to_clean = ring->start; | 1027 | ring->to_clean = ring->start; |
@@ -2544,7 +2551,7 @@ static int skge_up(struct net_device *dev) | |||
2544 | 2551 | ||
2545 | BUG_ON(skge->dma & 7); | 2552 | BUG_ON(skge->dma & 7); |
2546 | 2553 | ||
2547 | if ((u64)skge->dma >> 32 != ((u64) skge->dma + skge->mem_size) >> 32) { | 2554 | if (upper_32_bits(skge->dma) != upper_32_bits(skge->dma + skge->mem_size)) { |
2548 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); | 2555 | dev_err(&hw->pdev->dev, "pci_alloc_consistent region crosses 4G boundary\n"); |
2549 | err = -EINVAL; | 2556 | err = -EINVAL; |
2550 | goto free_pci_mem; | 2557 | goto free_pci_mem; |
@@ -2729,7 +2736,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2729 | struct skge_tx_desc *td; | 2736 | struct skge_tx_desc *td; |
2730 | int i; | 2737 | int i; |
2731 | u32 control, len; | 2738 | u32 control, len; |
2732 | u64 map; | 2739 | dma_addr_t map; |
2733 | 2740 | ||
2734 | if (skb_padto(skb, ETH_ZLEN)) | 2741 | if (skb_padto(skb, ETH_ZLEN)) |
2735 | return NETDEV_TX_OK; | 2742 | return NETDEV_TX_OK; |
@@ -2743,11 +2750,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2743 | e->skb = skb; | 2750 | e->skb = skb; |
2744 | len = skb_headlen(skb); | 2751 | len = skb_headlen(skb); |
2745 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); | 2752 | map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE); |
2753 | if (pci_dma_mapping_error(hw->pdev, map)) | ||
2754 | goto mapping_error; | ||
2755 | |||
2746 | dma_unmap_addr_set(e, mapaddr, map); | 2756 | dma_unmap_addr_set(e, mapaddr, map); |
2747 | dma_unmap_len_set(e, maplen, len); | 2757 | dma_unmap_len_set(e, maplen, len); |
2748 | 2758 | ||
2749 | td->dma_lo = map; | 2759 | td->dma_lo = lower_32_bits(map); |
2750 | td->dma_hi = map >> 32; | 2760 | td->dma_hi = upper_32_bits(map); |
2751 | 2761 | ||
2752 | if (skb->ip_summed == CHECKSUM_PARTIAL) { | 2762 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
2753 | const int offset = skb_checksum_start_offset(skb); | 2763 | const int offset = skb_checksum_start_offset(skb); |
@@ -2778,14 +2788,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2778 | 2788 | ||
2779 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, | 2789 | map = skb_frag_dma_map(&hw->pdev->dev, frag, 0, |
2780 | skb_frag_size(frag), DMA_TO_DEVICE); | 2790 | skb_frag_size(frag), DMA_TO_DEVICE); |
2791 | if (dma_mapping_error(&hw->pdev->dev, map)) | ||
2792 | goto mapping_unwind; | ||
2781 | 2793 | ||
2782 | e = e->next; | 2794 | e = e->next; |
2783 | e->skb = skb; | 2795 | e->skb = skb; |
2784 | tf = e->desc; | 2796 | tf = e->desc; |
2785 | BUG_ON(tf->control & BMU_OWN); | 2797 | BUG_ON(tf->control & BMU_OWN); |
2786 | 2798 | ||
2787 | tf->dma_lo = map; | 2799 | tf->dma_lo = lower_32_bits(map); |
2788 | tf->dma_hi = (u64) map >> 32; | 2800 | tf->dma_hi = upper_32_bits(map); |
2789 | dma_unmap_addr_set(e, mapaddr, map); | 2801 | dma_unmap_addr_set(e, mapaddr, map); |
2790 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); | 2802 | dma_unmap_len_set(e, maplen, skb_frag_size(frag)); |
2791 | 2803 | ||
@@ -2815,6 +2827,26 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb, | |||
2815 | } | 2827 | } |
2816 | 2828 | ||
2817 | return NETDEV_TX_OK; | 2829 | return NETDEV_TX_OK; |
2830 | |||
2831 | mapping_unwind: | ||
2832 | e = skge->tx_ring.to_use; | ||
2833 | pci_unmap_single(hw->pdev, | ||
2834 | dma_unmap_addr(e, mapaddr), | ||
2835 | dma_unmap_len(e, maplen), | ||
2836 | PCI_DMA_TODEVICE); | ||
2837 | while (i-- > 0) { | ||
2838 | e = e->next; | ||
2839 | pci_unmap_page(hw->pdev, | ||
2840 | dma_unmap_addr(e, mapaddr), | ||
2841 | dma_unmap_len(e, maplen), | ||
2842 | PCI_DMA_TODEVICE); | ||
2843 | } | ||
2844 | |||
2845 | mapping_error: | ||
2846 | if (net_ratelimit()) | ||
2847 | dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name); | ||
2848 | dev_kfree_skb(skb); | ||
2849 | return NETDEV_TX_OK; | ||
2818 | } | 2850 | } |
2819 | 2851 | ||
2820 | 2852 | ||
@@ -3045,11 +3077,13 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3045 | 3077 | ||
3046 | pci_dma_sync_single_for_cpu(skge->hw->pdev, | 3078 | pci_dma_sync_single_for_cpu(skge->hw->pdev, |
3047 | dma_unmap_addr(e, mapaddr), | 3079 | dma_unmap_addr(e, mapaddr), |
3048 | len, PCI_DMA_FROMDEVICE); | 3080 | dma_unmap_len(e, maplen), |
3081 | PCI_DMA_FROMDEVICE); | ||
3049 | skb_copy_from_linear_data(e->skb, skb->data, len); | 3082 | skb_copy_from_linear_data(e->skb, skb->data, len); |
3050 | pci_dma_sync_single_for_device(skge->hw->pdev, | 3083 | pci_dma_sync_single_for_device(skge->hw->pdev, |
3051 | dma_unmap_addr(e, mapaddr), | 3084 | dma_unmap_addr(e, mapaddr), |
3052 | len, PCI_DMA_FROMDEVICE); | 3085 | dma_unmap_len(e, maplen), |
3086 | PCI_DMA_FROMDEVICE); | ||
3053 | skge_rx_reuse(e, skge->rx_buf_size); | 3087 | skge_rx_reuse(e, skge->rx_buf_size); |
3054 | } else { | 3088 | } else { |
3055 | struct sk_buff *nskb; | 3089 | struct sk_buff *nskb; |
@@ -3058,13 +3092,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev, | |||
3058 | if (!nskb) | 3092 | if (!nskb) |
3059 | goto resubmit; | 3093 | goto resubmit; |
3060 | 3094 | ||
3095 | if (skge_rx_setup(skge, e, nskb, skge->rx_buf_size) < 0) { | ||
3096 | dev_kfree_skb(nskb); | ||
3097 | goto resubmit; | ||
3098 | } | ||
3099 | |||
3061 | pci_unmap_single(skge->hw->pdev, | 3100 | pci_unmap_single(skge->hw->pdev, |
3062 | dma_unmap_addr(e, mapaddr), | 3101 | dma_unmap_addr(e, mapaddr), |
3063 | dma_unmap_len(e, maplen), | 3102 | dma_unmap_len(e, maplen), |
3064 | PCI_DMA_FROMDEVICE); | 3103 | PCI_DMA_FROMDEVICE); |
3065 | skb = e->skb; | 3104 | skb = e->skb; |
3066 | prefetch(skb->data); | 3105 | prefetch(skb->data); |
3067 | skge_rx_setup(skge, e, nskb, skge->rx_buf_size); | ||
3068 | } | 3106 | } |
3069 | 3107 | ||
3070 | skb_put(skb, len); | 3108 | skb_put(skb, len); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index c571de85d0f9..5472cbd34028 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c | |||
@@ -46,7 +46,7 @@ | |||
46 | #include "mlx5_core.h" | 46 | #include "mlx5_core.h" |
47 | 47 | ||
48 | enum { | 48 | enum { |
49 | CMD_IF_REV = 4, | 49 | CMD_IF_REV = 5, |
50 | }; | 50 | }; |
51 | 51 | ||
52 | enum { | 52 | enum { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index c02cbcfd0fb8..443cc4d7b024 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
@@ -268,7 +268,7 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq) | |||
268 | case MLX5_EVENT_TYPE_PAGE_REQUEST: | 268 | case MLX5_EVENT_TYPE_PAGE_REQUEST: |
269 | { | 269 | { |
270 | u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); | 270 | u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); |
271 | s16 npages = be16_to_cpu(eqe->data.req_pages.num_pages); | 271 | s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); |
272 | 272 | ||
273 | mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); | 273 | mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages); |
274 | mlx5_core_req_pages_handler(dev, func_id, npages); | 274 | mlx5_core_req_pages_handler(dev, func_id, npages); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c index 72a5222447f5..f012658b6a92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c | |||
@@ -113,7 +113,7 @@ int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, | |||
113 | caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; | 113 | caps->log_max_srq = out->hca_cap.log_max_srqs & 0x1f; |
114 | caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; | 114 | caps->local_ca_ack_delay = out->hca_cap.local_ca_ack_delay & 0x1f; |
115 | caps->log_max_mcg = out->hca_cap.log_max_mcg; | 115 | caps->log_max_mcg = out->hca_cap.log_max_mcg; |
116 | caps->max_qp_mcg = be16_to_cpu(out->hca_cap.max_qp_mcg); | 116 | caps->max_qp_mcg = be32_to_cpu(out->hca_cap.max_qp_mcg) & 0xffffff; |
117 | caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); | 117 | caps->max_ra_res_qp = 1 << (out->hca_cap.log_max_ra_res_qp & 0x3f); |
118 | caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); | 118 | caps->max_ra_req_qp = 1 << (out->hca_cap.log_max_ra_req_qp & 0x3f); |
119 | caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; | 119 | caps->max_srq_wqes = 1 << out->hca_cap.log_max_srq_sz; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 748f10a155c4..3e6670c4a7cd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -55,33 +55,9 @@ enum { | |||
55 | }; | 55 | }; |
56 | 56 | ||
57 | static DEFINE_SPINLOCK(health_lock); | 57 | static DEFINE_SPINLOCK(health_lock); |
58 | |||
59 | static LIST_HEAD(health_list); | 58 | static LIST_HEAD(health_list); |
60 | static struct work_struct health_work; | 59 | static struct work_struct health_work; |
61 | 60 | ||
62 | static health_handler_t reg_handler; | ||
63 | int mlx5_register_health_report_handler(health_handler_t handler) | ||
64 | { | ||
65 | spin_lock_irq(&health_lock); | ||
66 | if (reg_handler) { | ||
67 | spin_unlock_irq(&health_lock); | ||
68 | return -EEXIST; | ||
69 | } | ||
70 | reg_handler = handler; | ||
71 | spin_unlock_irq(&health_lock); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | EXPORT_SYMBOL(mlx5_register_health_report_handler); | ||
76 | |||
77 | void mlx5_unregister_health_report_handler(void) | ||
78 | { | ||
79 | spin_lock_irq(&health_lock); | ||
80 | reg_handler = NULL; | ||
81 | spin_unlock_irq(&health_lock); | ||
82 | } | ||
83 | EXPORT_SYMBOL(mlx5_unregister_health_report_handler); | ||
84 | |||
85 | static void health_care(struct work_struct *work) | 61 | static void health_care(struct work_struct *work) |
86 | { | 62 | { |
87 | struct mlx5_core_health *health, *n; | 63 | struct mlx5_core_health *health, *n; |
@@ -98,11 +74,8 @@ static void health_care(struct work_struct *work) | |||
98 | priv = container_of(health, struct mlx5_priv, health); | 74 | priv = container_of(health, struct mlx5_priv, health); |
99 | dev = container_of(priv, struct mlx5_core_dev, priv); | 75 | dev = container_of(priv, struct mlx5_core_dev, priv); |
100 | mlx5_core_warn(dev, "handling bad device here\n"); | 76 | mlx5_core_warn(dev, "handling bad device here\n"); |
77 | /* nothing yet */ | ||
101 | spin_lock_irq(&health_lock); | 78 | spin_lock_irq(&health_lock); |
102 | if (reg_handler) | ||
103 | reg_handler(dev->pdev, health->health, | ||
104 | sizeof(health->health)); | ||
105 | |||
106 | list_del_init(&health->list); | 79 | list_del_init(&health->list); |
107 | spin_unlock_irq(&health_lock); | 80 | spin_unlock_irq(&health_lock); |
108 | } | 81 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 4a3e137931a3..3a2408d44820 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c | |||
@@ -43,10 +43,16 @@ enum { | |||
43 | MLX5_PAGES_TAKE = 2 | 43 | MLX5_PAGES_TAKE = 2 |
44 | }; | 44 | }; |
45 | 45 | ||
46 | enum { | ||
47 | MLX5_BOOT_PAGES = 1, | ||
48 | MLX5_INIT_PAGES = 2, | ||
49 | MLX5_POST_INIT_PAGES = 3 | ||
50 | }; | ||
51 | |||
46 | struct mlx5_pages_req { | 52 | struct mlx5_pages_req { |
47 | struct mlx5_core_dev *dev; | 53 | struct mlx5_core_dev *dev; |
48 | u32 func_id; | 54 | u32 func_id; |
49 | s16 npages; | 55 | s32 npages; |
50 | struct work_struct work; | 56 | struct work_struct work; |
51 | }; | 57 | }; |
52 | 58 | ||
@@ -64,27 +70,23 @@ struct mlx5_query_pages_inbox { | |||
64 | 70 | ||
65 | struct mlx5_query_pages_outbox { | 71 | struct mlx5_query_pages_outbox { |
66 | struct mlx5_outbox_hdr hdr; | 72 | struct mlx5_outbox_hdr hdr; |
67 | __be16 num_boot_pages; | 73 | __be16 rsvd; |
68 | __be16 func_id; | 74 | __be16 func_id; |
69 | __be16 init_pages; | 75 | __be32 num_pages; |
70 | __be16 num_pages; | ||
71 | }; | 76 | }; |
72 | 77 | ||
73 | struct mlx5_manage_pages_inbox { | 78 | struct mlx5_manage_pages_inbox { |
74 | struct mlx5_inbox_hdr hdr; | 79 | struct mlx5_inbox_hdr hdr; |
75 | __be16 rsvd0; | 80 | __be16 rsvd; |
76 | __be16 func_id; | 81 | __be16 func_id; |
77 | __be16 rsvd1; | 82 | __be32 num_entries; |
78 | __be16 num_entries; | ||
79 | u8 rsvd2[16]; | ||
80 | __be64 pas[0]; | 83 | __be64 pas[0]; |
81 | }; | 84 | }; |
82 | 85 | ||
83 | struct mlx5_manage_pages_outbox { | 86 | struct mlx5_manage_pages_outbox { |
84 | struct mlx5_outbox_hdr hdr; | 87 | struct mlx5_outbox_hdr hdr; |
85 | u8 rsvd0[2]; | 88 | __be32 num_entries; |
86 | __be16 num_entries; | 89 | u8 rsvd[4]; |
87 | u8 rsvd1[20]; | ||
88 | __be64 pas[0]; | 90 | __be64 pas[0]; |
89 | }; | 91 | }; |
90 | 92 | ||
@@ -146,7 +148,7 @@ static struct page *remove_page(struct mlx5_core_dev *dev, u64 addr) | |||
146 | } | 148 | } |
147 | 149 | ||
148 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | 150 | static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, |
149 | s16 *pages, s16 *init_pages, u16 *boot_pages) | 151 | s32 *npages, int boot) |
150 | { | 152 | { |
151 | struct mlx5_query_pages_inbox in; | 153 | struct mlx5_query_pages_inbox in; |
152 | struct mlx5_query_pages_outbox out; | 154 | struct mlx5_query_pages_outbox out; |
@@ -155,6 +157,8 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |||
155 | memset(&in, 0, sizeof(in)); | 157 | memset(&in, 0, sizeof(in)); |
156 | memset(&out, 0, sizeof(out)); | 158 | memset(&out, 0, sizeof(out)); |
157 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); | 159 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_QUERY_PAGES); |
160 | in.hdr.opmod = boot ? cpu_to_be16(MLX5_BOOT_PAGES) : cpu_to_be16(MLX5_INIT_PAGES); | ||
161 | |||
158 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); | 162 | err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); |
159 | if (err) | 163 | if (err) |
160 | return err; | 164 | return err; |
@@ -162,15 +166,7 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, | |||
162 | if (out.hdr.status) | 166 | if (out.hdr.status) |
163 | return mlx5_cmd_status_to_err(&out.hdr); | 167 | return mlx5_cmd_status_to_err(&out.hdr); |
164 | 168 | ||
165 | if (pages) | 169 | *npages = be32_to_cpu(out.num_pages); |
166 | *pages = be16_to_cpu(out.num_pages); | ||
167 | |||
168 | if (init_pages) | ||
169 | *init_pages = be16_to_cpu(out.init_pages); | ||
170 | |||
171 | if (boot_pages) | ||
172 | *boot_pages = be16_to_cpu(out.num_boot_pages); | ||
173 | |||
174 | *func_id = be16_to_cpu(out.func_id); | 170 | *func_id = be16_to_cpu(out.func_id); |
175 | 171 | ||
176 | return err; | 172 | return err; |
@@ -224,7 +220,7 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, | |||
224 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | 220 | in->hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); |
225 | in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); | 221 | in->hdr.opmod = cpu_to_be16(MLX5_PAGES_GIVE); |
226 | in->func_id = cpu_to_be16(func_id); | 222 | in->func_id = cpu_to_be16(func_id); |
227 | in->num_entries = cpu_to_be16(npages); | 223 | in->num_entries = cpu_to_be32(npages); |
228 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); | 224 | err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out)); |
229 | mlx5_core_dbg(dev, "err %d\n", err); | 225 | mlx5_core_dbg(dev, "err %d\n", err); |
230 | if (err) { | 226 | if (err) { |
@@ -292,7 +288,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | |||
292 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); | 288 | in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_MANAGE_PAGES); |
293 | in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); | 289 | in.hdr.opmod = cpu_to_be16(MLX5_PAGES_TAKE); |
294 | in.func_id = cpu_to_be16(func_id); | 290 | in.func_id = cpu_to_be16(func_id); |
295 | in.num_entries = cpu_to_be16(npages); | 291 | in.num_entries = cpu_to_be32(npages); |
296 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); | 292 | mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen); |
297 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); | 293 | err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen); |
298 | if (err) { | 294 | if (err) { |
@@ -306,7 +302,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, | |||
306 | goto out_free; | 302 | goto out_free; |
307 | } | 303 | } |
308 | 304 | ||
309 | num_claimed = be16_to_cpu(out->num_entries); | 305 | num_claimed = be32_to_cpu(out->num_entries); |
310 | if (nclaimed) | 306 | if (nclaimed) |
311 | *nclaimed = num_claimed; | 307 | *nclaimed = num_claimed; |
312 | 308 | ||
@@ -345,7 +341,7 @@ static void pages_work_handler(struct work_struct *work) | |||
345 | } | 341 | } |
346 | 342 | ||
347 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | 343 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, |
348 | s16 npages) | 344 | s32 npages) |
349 | { | 345 | { |
350 | struct mlx5_pages_req *req; | 346 | struct mlx5_pages_req *req; |
351 | 347 | ||
@@ -364,20 +360,18 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | |||
364 | 360 | ||
365 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) | 361 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) |
366 | { | 362 | { |
367 | u16 uninitialized_var(boot_pages); | ||
368 | s16 uninitialized_var(init_pages); | ||
369 | u16 uninitialized_var(func_id); | 363 | u16 uninitialized_var(func_id); |
364 | s32 uninitialized_var(npages); | ||
370 | int err; | 365 | int err; |
371 | 366 | ||
372 | err = mlx5_cmd_query_pages(dev, &func_id, NULL, &init_pages, | 367 | err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot); |
373 | &boot_pages); | ||
374 | if (err) | 368 | if (err) |
375 | return err; | 369 | return err; |
376 | 370 | ||
371 | mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n", | ||
372 | npages, boot ? "boot" : "init", func_id); | ||
377 | 373 | ||
378 | mlx5_core_dbg(dev, "requested %d init pages and %d boot pages for func_id 0x%x\n", | 374 | return give_pages(dev, func_id, npages, 0); |
379 | init_pages, boot_pages, func_id); | ||
380 | return give_pages(dev, func_id, boot ? boot_pages : init_pages, 0); | ||
381 | } | 375 | } |
382 | 376 | ||
383 | static int optimal_reclaimed_pages(void) | 377 | static int optimal_reclaimed_pages(void) |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c index 92da9980a0a0..9d4bb7f83904 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c | |||
@@ -3266,6 +3266,11 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev) | |||
3266 | u8 val; | 3266 | u8 val; |
3267 | int ret, max_sds_rings = adapter->max_sds_rings; | 3267 | int ret, max_sds_rings = adapter->max_sds_rings; |
3268 | 3268 | ||
3269 | if (test_bit(__QLCNIC_RESETTING, &adapter->state)) { | ||
3270 | netdev_info(netdev, "Device is resetting\n"); | ||
3271 | return -EBUSY; | ||
3272 | } | ||
3273 | |||
3269 | if (qlcnic_get_diag_lock(adapter)) { | 3274 | if (qlcnic_get_diag_lock(adapter)) { |
3270 | netdev_info(netdev, "Device in diagnostics mode\n"); | 3275 | netdev_info(netdev, "Device in diagnostics mode\n"); |
3271 | return -EBUSY; | 3276 | return -EBUSY; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index 9f4b8d5f0865..345d987aede4 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c | |||
@@ -629,7 +629,8 @@ int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter) | |||
629 | return -EIO; | 629 | return -EIO; |
630 | } | 630 | } |
631 | 631 | ||
632 | qlcnic_set_drv_version(adapter); | 632 | if (adapter->portnum == 0) |
633 | qlcnic_set_drv_version(adapter); | ||
633 | qlcnic_83xx_idc_attach_driver(adapter); | 634 | qlcnic_83xx_idc_attach_driver(adapter); |
634 | 635 | ||
635 | return 0; | 636 | return 0; |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index ee013fcc3322..bc05d016c859 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c | |||
@@ -2165,7 +2165,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2165 | if (err) | 2165 | if (err) |
2166 | goto err_out_disable_mbx_intr; | 2166 | goto err_out_disable_mbx_intr; |
2167 | 2167 | ||
2168 | qlcnic_set_drv_version(adapter); | 2168 | if (adapter->portnum == 0) |
2169 | qlcnic_set_drv_version(adapter); | ||
2169 | 2170 | ||
2170 | pci_set_drvdata(pdev, adapter); | 2171 | pci_set_drvdata(pdev, adapter); |
2171 | 2172 | ||
@@ -3085,7 +3086,8 @@ done: | |||
3085 | adapter->fw_fail_cnt = 0; | 3086 | adapter->fw_fail_cnt = 0; |
3086 | adapter->flags &= ~QLCNIC_FW_HANG; | 3087 | adapter->flags &= ~QLCNIC_FW_HANG; |
3087 | clear_bit(__QLCNIC_RESETTING, &adapter->state); | 3088 | clear_bit(__QLCNIC_RESETTING, &adapter->state); |
3088 | qlcnic_set_drv_version(adapter); | 3089 | if (adapter->portnum == 0) |
3090 | qlcnic_set_drv_version(adapter); | ||
3089 | 3091 | ||
3090 | if (!qlcnic_clr_drv_state(adapter)) | 3092 | if (!qlcnic_clr_drv_state(adapter)) |
3091 | qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, | 3093 | qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c index 10ed82b3baca..660c3f5b2237 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c | |||
@@ -170,9 +170,9 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter, | |||
170 | 170 | ||
171 | if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { | 171 | if (ahw->extra_capability[0] & QLCNIC_FW_CAPABILITY_2_BEACON) { |
172 | err = qlcnic_get_beacon_state(adapter, &h_beacon_state); | 172 | err = qlcnic_get_beacon_state(adapter, &h_beacon_state); |
173 | if (!err) { | 173 | if (err) { |
174 | dev_info(&adapter->pdev->dev, | 174 | netdev_err(adapter->netdev, |
175 | "Failed to get current beacon state\n"); | 175 | "Failed to get current beacon state\n"); |
176 | } else { | 176 | } else { |
177 | if (h_beacon_state == QLCNIC_BEACON_DISABLE) | 177 | if (h_beacon_state == QLCNIC_BEACON_DISABLE) |
178 | ahw->beacon_state = 0; | 178 | ahw->beacon_state = 0; |
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 6f35f8404d68..d2e591955bdd 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c | |||
@@ -524,6 +524,7 @@ rx_status_loop: | |||
524 | PCI_DMA_FROMDEVICE); | 524 | PCI_DMA_FROMDEVICE); |
525 | if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { | 525 | if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { |
526 | dev->stats.rx_dropped++; | 526 | dev->stats.rx_dropped++; |
527 | kfree_skb(new_skb); | ||
527 | goto rx_next; | 528 | goto rx_next; |
528 | } | 529 | } |
529 | 530 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index c9d942a5c335..1ef9d8a555aa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c | |||
@@ -33,10 +33,15 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; | 33 | struct stmmac_priv *priv = (struct stmmac_priv *)p; |
34 | unsigned int txsize = priv->dma_tx_size; | 34 | unsigned int txsize = priv->dma_tx_size; |
35 | unsigned int entry = priv->cur_tx % txsize; | 35 | unsigned int entry = priv->cur_tx % txsize; |
36 | struct dma_desc *desc = priv->dma_tx + entry; | 36 | struct dma_desc *desc; |
37 | unsigned int nopaged_len = skb_headlen(skb); | 37 | unsigned int nopaged_len = skb_headlen(skb); |
38 | unsigned int bmax, len; | 38 | unsigned int bmax, len; |
39 | 39 | ||
40 | if (priv->extend_desc) | ||
41 | desc = (struct dma_desc *)(priv->dma_etx + entry); | ||
42 | else | ||
43 | desc = priv->dma_tx + entry; | ||
44 | |||
40 | if (priv->plat->enh_desc) | 45 | if (priv->plat->enh_desc) |
41 | bmax = BUF_SIZE_8KiB; | 46 | bmax = BUF_SIZE_8KiB; |
42 | else | 47 | else |
@@ -54,7 +59,11 @@ static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) | |||
54 | STMMAC_RING_MODE); | 59 | STMMAC_RING_MODE); |
55 | wmb(); | 60 | wmb(); |
56 | entry = (++priv->cur_tx) % txsize; | 61 | entry = (++priv->cur_tx) % txsize; |
57 | desc = priv->dma_tx + entry; | 62 | |
63 | if (priv->extend_desc) | ||
64 | desc = (struct dma_desc *)(priv->dma_etx + entry); | ||
65 | else | ||
66 | desc = priv->dma_tx + entry; | ||
58 | 67 | ||
59 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, | 68 | desc->des2 = dma_map_single(priv->device, skb->data + bmax, |
60 | len, DMA_TO_DEVICE); | 69 | len, DMA_TO_DEVICE); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f2ccb36e8685..0a9bb9d30c3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -939,15 +939,20 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
939 | 939 | ||
940 | skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, | 940 | skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, |
941 | GFP_KERNEL); | 941 | GFP_KERNEL); |
942 | if (unlikely(skb == NULL)) { | 942 | if (!skb) { |
943 | pr_err("%s: Rx init fails; skb is NULL\n", __func__); | 943 | pr_err("%s: Rx init fails; skb is NULL\n", __func__); |
944 | return 1; | 944 | return -ENOMEM; |
945 | } | 945 | } |
946 | skb_reserve(skb, NET_IP_ALIGN); | 946 | skb_reserve(skb, NET_IP_ALIGN); |
947 | priv->rx_skbuff[i] = skb; | 947 | priv->rx_skbuff[i] = skb; |
948 | priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, | 948 | priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, |
949 | priv->dma_buf_sz, | 949 | priv->dma_buf_sz, |
950 | DMA_FROM_DEVICE); | 950 | DMA_FROM_DEVICE); |
951 | if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) { | ||
952 | pr_err("%s: DMA mapping error\n", __func__); | ||
953 | dev_kfree_skb_any(skb); | ||
954 | return -EINVAL; | ||
955 | } | ||
951 | 956 | ||
952 | p->des2 = priv->rx_skbuff_dma[i]; | 957 | p->des2 = priv->rx_skbuff_dma[i]; |
953 | 958 | ||
@@ -958,6 +963,16 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
958 | return 0; | 963 | return 0; |
959 | } | 964 | } |
960 | 965 | ||
966 | static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i) | ||
967 | { | ||
968 | if (priv->rx_skbuff[i]) { | ||
969 | dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], | ||
970 | priv->dma_buf_sz, DMA_FROM_DEVICE); | ||
971 | dev_kfree_skb_any(priv->rx_skbuff[i]); | ||
972 | } | ||
973 | priv->rx_skbuff[i] = NULL; | ||
974 | } | ||
975 | |||
961 | /** | 976 | /** |
962 | * init_dma_desc_rings - init the RX/TX descriptor rings | 977 | * init_dma_desc_rings - init the RX/TX descriptor rings |
963 | * @dev: net device structure | 978 | * @dev: net device structure |
@@ -965,13 +980,14 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, | |||
965 | * and allocates the socket buffers. It suppors the chained and ring | 980 | * and allocates the socket buffers. It suppors the chained and ring |
966 | * modes. | 981 | * modes. |
967 | */ | 982 | */ |
968 | static void init_dma_desc_rings(struct net_device *dev) | 983 | static int init_dma_desc_rings(struct net_device *dev) |
969 | { | 984 | { |
970 | int i; | 985 | int i; |
971 | struct stmmac_priv *priv = netdev_priv(dev); | 986 | struct stmmac_priv *priv = netdev_priv(dev); |
972 | unsigned int txsize = priv->dma_tx_size; | 987 | unsigned int txsize = priv->dma_tx_size; |
973 | unsigned int rxsize = priv->dma_rx_size; | 988 | unsigned int rxsize = priv->dma_rx_size; |
974 | unsigned int bfsize = 0; | 989 | unsigned int bfsize = 0; |
990 | int ret = -ENOMEM; | ||
975 | 991 | ||
976 | /* Set the max buffer size according to the DESC mode | 992 | /* Set the max buffer size according to the DESC mode |
977 | * and the MTU. Note that RING mode allows 16KiB bsize. | 993 | * and the MTU. Note that RING mode allows 16KiB bsize. |
@@ -992,34 +1008,60 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
992 | dma_extended_desc), | 1008 | dma_extended_desc), |
993 | &priv->dma_rx_phy, | 1009 | &priv->dma_rx_phy, |
994 | GFP_KERNEL); | 1010 | GFP_KERNEL); |
1011 | if (!priv->dma_erx) | ||
1012 | goto err_dma; | ||
1013 | |||
995 | priv->dma_etx = dma_alloc_coherent(priv->device, txsize * | 1014 | priv->dma_etx = dma_alloc_coherent(priv->device, txsize * |
996 | sizeof(struct | 1015 | sizeof(struct |
997 | dma_extended_desc), | 1016 | dma_extended_desc), |
998 | &priv->dma_tx_phy, | 1017 | &priv->dma_tx_phy, |
999 | GFP_KERNEL); | 1018 | GFP_KERNEL); |
1000 | if ((!priv->dma_erx) || (!priv->dma_etx)) | 1019 | if (!priv->dma_etx) { |
1001 | return; | 1020 | dma_free_coherent(priv->device, priv->dma_rx_size * |
1021 | sizeof(struct dma_extended_desc), | ||
1022 | priv->dma_erx, priv->dma_rx_phy); | ||
1023 | goto err_dma; | ||
1024 | } | ||
1002 | } else { | 1025 | } else { |
1003 | priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * | 1026 | priv->dma_rx = dma_alloc_coherent(priv->device, rxsize * |
1004 | sizeof(struct dma_desc), | 1027 | sizeof(struct dma_desc), |
1005 | &priv->dma_rx_phy, | 1028 | &priv->dma_rx_phy, |
1006 | GFP_KERNEL); | 1029 | GFP_KERNEL); |
1030 | if (!priv->dma_rx) | ||
1031 | goto err_dma; | ||
1032 | |||
1007 | priv->dma_tx = dma_alloc_coherent(priv->device, txsize * | 1033 | priv->dma_tx = dma_alloc_coherent(priv->device, txsize * |
1008 | sizeof(struct dma_desc), | 1034 | sizeof(struct dma_desc), |
1009 | &priv->dma_tx_phy, | 1035 | &priv->dma_tx_phy, |
1010 | GFP_KERNEL); | 1036 | GFP_KERNEL); |
1011 | if ((!priv->dma_rx) || (!priv->dma_tx)) | 1037 | if (!priv->dma_tx) { |
1012 | return; | 1038 | dma_free_coherent(priv->device, priv->dma_rx_size * |
1039 | sizeof(struct dma_desc), | ||
1040 | priv->dma_rx, priv->dma_rx_phy); | ||
1041 | goto err_dma; | ||
1042 | } | ||
1013 | } | 1043 | } |
1014 | 1044 | ||
1015 | priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), | 1045 | priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t), |
1016 | GFP_KERNEL); | 1046 | GFP_KERNEL); |
1047 | if (!priv->rx_skbuff_dma) | ||
1048 | goto err_rx_skbuff_dma; | ||
1049 | |||
1017 | priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), | 1050 | priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), |
1018 | GFP_KERNEL); | 1051 | GFP_KERNEL); |
1052 | if (!priv->rx_skbuff) | ||
1053 | goto err_rx_skbuff; | ||
1054 | |||
1019 | priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), | 1055 | priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t), |
1020 | GFP_KERNEL); | 1056 | GFP_KERNEL); |
1057 | if (!priv->tx_skbuff_dma) | ||
1058 | goto err_tx_skbuff_dma; | ||
1059 | |||
1021 | priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), | 1060 | priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), |
1022 | GFP_KERNEL); | 1061 | GFP_KERNEL); |
1062 | if (!priv->tx_skbuff) | ||
1063 | goto err_tx_skbuff; | ||
1064 | |||
1023 | if (netif_msg_probe(priv)) { | 1065 | if (netif_msg_probe(priv)) { |
1024 | pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, | 1066 | pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__, |
1025 | (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); | 1067 | (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy); |
@@ -1034,8 +1076,9 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
1034 | else | 1076 | else |
1035 | p = priv->dma_rx + i; | 1077 | p = priv->dma_rx + i; |
1036 | 1078 | ||
1037 | if (stmmac_init_rx_buffers(priv, p, i)) | 1079 | ret = stmmac_init_rx_buffers(priv, p, i); |
1038 | break; | 1080 | if (ret) |
1081 | goto err_init_rx_buffers; | ||
1039 | 1082 | ||
1040 | if (netif_msg_probe(priv)) | 1083 | if (netif_msg_probe(priv)) |
1041 | pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], | 1084 | pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i], |
@@ -1081,20 +1124,44 @@ static void init_dma_desc_rings(struct net_device *dev) | |||
1081 | 1124 | ||
1082 | if (netif_msg_hw(priv)) | 1125 | if (netif_msg_hw(priv)) |
1083 | stmmac_display_rings(priv); | 1126 | stmmac_display_rings(priv); |
1127 | |||
1128 | return 0; | ||
1129 | err_init_rx_buffers: | ||
1130 | while (--i >= 0) | ||
1131 | stmmac_free_rx_buffers(priv, i); | ||
1132 | kfree(priv->tx_skbuff); | ||
1133 | err_tx_skbuff: | ||
1134 | kfree(priv->tx_skbuff_dma); | ||
1135 | err_tx_skbuff_dma: | ||
1136 | kfree(priv->rx_skbuff); | ||
1137 | err_rx_skbuff: | ||
1138 | kfree(priv->rx_skbuff_dma); | ||
1139 | err_rx_skbuff_dma: | ||
1140 | if (priv->extend_desc) { | ||
1141 | dma_free_coherent(priv->device, priv->dma_tx_size * | ||
1142 | sizeof(struct dma_extended_desc), | ||
1143 | priv->dma_etx, priv->dma_tx_phy); | ||
1144 | dma_free_coherent(priv->device, priv->dma_rx_size * | ||
1145 | sizeof(struct dma_extended_desc), | ||
1146 | priv->dma_erx, priv->dma_rx_phy); | ||
1147 | } else { | ||
1148 | dma_free_coherent(priv->device, | ||
1149 | priv->dma_tx_size * sizeof(struct dma_desc), | ||
1150 | priv->dma_tx, priv->dma_tx_phy); | ||
1151 | dma_free_coherent(priv->device, | ||
1152 | priv->dma_rx_size * sizeof(struct dma_desc), | ||
1153 | priv->dma_rx, priv->dma_rx_phy); | ||
1154 | } | ||
1155 | err_dma: | ||
1156 | return ret; | ||
1084 | } | 1157 | } |
1085 | 1158 | ||
1086 | static void dma_free_rx_skbufs(struct stmmac_priv *priv) | 1159 | static void dma_free_rx_skbufs(struct stmmac_priv *priv) |
1087 | { | 1160 | { |
1088 | int i; | 1161 | int i; |
1089 | 1162 | ||
1090 | for (i = 0; i < priv->dma_rx_size; i++) { | 1163 | for (i = 0; i < priv->dma_rx_size; i++) |
1091 | if (priv->rx_skbuff[i]) { | 1164 | stmmac_free_rx_buffers(priv, i); |
1092 | dma_unmap_single(priv->device, priv->rx_skbuff_dma[i], | ||
1093 | priv->dma_buf_sz, DMA_FROM_DEVICE); | ||
1094 | dev_kfree_skb_any(priv->rx_skbuff[i]); | ||
1095 | } | ||
1096 | priv->rx_skbuff[i] = NULL; | ||
1097 | } | ||
1098 | } | 1165 | } |
1099 | 1166 | ||
1100 | static void dma_free_tx_skbufs(struct stmmac_priv *priv) | 1167 | static void dma_free_tx_skbufs(struct stmmac_priv *priv) |
@@ -1560,12 +1627,17 @@ static int stmmac_open(struct net_device *dev) | |||
1560 | priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); | 1627 | priv->dma_tx_size = STMMAC_ALIGN(dma_txsize); |
1561 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); | 1628 | priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize); |
1562 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); | 1629 | priv->dma_buf_sz = STMMAC_ALIGN(buf_sz); |
1563 | init_dma_desc_rings(dev); | 1630 | |
1631 | ret = init_dma_desc_rings(dev); | ||
1632 | if (ret < 0) { | ||
1633 | pr_err("%s: DMA descriptors initialization failed\n", __func__); | ||
1634 | goto dma_desc_error; | ||
1635 | } | ||
1564 | 1636 | ||
1565 | /* DMA initialization and SW reset */ | 1637 | /* DMA initialization and SW reset */ |
1566 | ret = stmmac_init_dma_engine(priv); | 1638 | ret = stmmac_init_dma_engine(priv); |
1567 | if (ret < 0) { | 1639 | if (ret < 0) { |
1568 | pr_err("%s: DMA initialization failed\n", __func__); | 1640 | pr_err("%s: DMA engine initialization failed\n", __func__); |
1569 | goto init_error; | 1641 | goto init_error; |
1570 | } | 1642 | } |
1571 | 1643 | ||
@@ -1672,6 +1744,7 @@ wolirq_error: | |||
1672 | 1744 | ||
1673 | init_error: | 1745 | init_error: |
1674 | free_dma_desc_resources(priv); | 1746 | free_dma_desc_resources(priv); |
1747 | dma_desc_error: | ||
1675 | if (priv->phydev) | 1748 | if (priv->phydev) |
1676 | phy_disconnect(priv->phydev); | 1749 | phy_disconnect(priv->phydev); |
1677 | phy_error: | 1750 | phy_error: |
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 1d6dc41f755d..d01cacf8a7c2 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c | |||
@@ -2100,7 +2100,7 @@ static int velocity_receive_frame(struct velocity_info *vptr, int idx) | |||
2100 | 2100 | ||
2101 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); | 2101 | __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); |
2102 | } | 2102 | } |
2103 | netif_rx(skb); | 2103 | netif_receive_skb(skb); |
2104 | 2104 | ||
2105 | stats->rx_bytes += pkt_len; | 2105 | stats->rx_bytes += pkt_len; |
2106 | stats->rx_packets++; | 2106 | stats->rx_packets++; |
@@ -2884,6 +2884,7 @@ out: | |||
2884 | return ret; | 2884 | return ret; |
2885 | 2885 | ||
2886 | err_iounmap: | 2886 | err_iounmap: |
2887 | netif_napi_del(&vptr->napi); | ||
2887 | iounmap(regs); | 2888 | iounmap(regs); |
2888 | err_free_dev: | 2889 | err_free_dev: |
2889 | free_netdev(netdev); | 2890 | free_netdev(netdev); |
@@ -2904,6 +2905,7 @@ static int velocity_remove(struct device *dev) | |||
2904 | struct velocity_info *vptr = netdev_priv(netdev); | 2905 | struct velocity_info *vptr = netdev_priv(netdev); |
2905 | 2906 | ||
2906 | unregister_netdev(netdev); | 2907 | unregister_netdev(netdev); |
2908 | netif_napi_del(&vptr->napi); | ||
2907 | iounmap(vptr->mac_regs); | 2909 | iounmap(vptr->mac_regs); |
2908 | free_netdev(netdev); | 2910 | free_netdev(netdev); |
2909 | velocity_nics--; | 2911 | velocity_nics--; |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index d0f9c2fd1d4f..16b43bf544b7 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -739,6 +739,10 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) | |||
739 | return -EADDRNOTAVAIL; | 739 | return -EADDRNOTAVAIL; |
740 | } | 740 | } |
741 | 741 | ||
742 | if (data && data[IFLA_MACVLAN_FLAGS] && | ||
743 | nla_get_u16(data[IFLA_MACVLAN_FLAGS]) & ~MACVLAN_FLAG_NOPROMISC) | ||
744 | return -EINVAL; | ||
745 | |||
742 | if (data && data[IFLA_MACVLAN_MODE]) { | 746 | if (data && data[IFLA_MACVLAN_MODE]) { |
743 | switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { | 747 | switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { |
744 | case MACVLAN_MODE_PRIVATE: | 748 | case MACVLAN_MODE_PRIVATE: |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index a98fb0ed6aef..b51db2abfe44 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -818,10 +818,13 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, | |||
818 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; | 818 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; |
819 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; | 819 | skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; |
820 | } | 820 | } |
821 | if (vlan) | 821 | if (vlan) { |
822 | local_bh_disable(); | ||
822 | macvlan_start_xmit(skb, vlan->dev); | 823 | macvlan_start_xmit(skb, vlan->dev); |
823 | else | 824 | local_bh_enable(); |
825 | } else { | ||
824 | kfree_skb(skb); | 826 | kfree_skb(skb); |
827 | } | ||
825 | rcu_read_unlock(); | 828 | rcu_read_unlock(); |
826 | 829 | ||
827 | return total_len; | 830 | return total_len; |
@@ -912,8 +915,11 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q, | |||
912 | done: | 915 | done: |
913 | rcu_read_lock(); | 916 | rcu_read_lock(); |
914 | vlan = rcu_dereference(q->vlan); | 917 | vlan = rcu_dereference(q->vlan); |
915 | if (vlan) | 918 | if (vlan) { |
919 | preempt_disable(); | ||
916 | macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); | 920 | macvlan_count_rx(vlan, copied - vnet_hdr_len, ret == 0, 0); |
921 | preempt_enable(); | ||
922 | } | ||
917 | rcu_read_unlock(); | 923 | rcu_read_unlock(); |
918 | 924 | ||
919 | return ret ? ret : copied; | 925 | return ret ? ret : copied; |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index db690a372260..71af122edf2d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1074,8 +1074,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1074 | u32 rxhash; | 1074 | u32 rxhash; |
1075 | 1075 | ||
1076 | if (!(tun->flags & TUN_NO_PI)) { | 1076 | if (!(tun->flags & TUN_NO_PI)) { |
1077 | if ((len -= sizeof(pi)) > total_len) | 1077 | if (len < sizeof(pi)) |
1078 | return -EINVAL; | 1078 | return -EINVAL; |
1079 | len -= sizeof(pi); | ||
1079 | 1080 | ||
1080 | if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) | 1081 | if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) |
1081 | return -EFAULT; | 1082 | return -EFAULT; |
@@ -1083,8 +1084,9 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1083 | } | 1084 | } |
1084 | 1085 | ||
1085 | if (tun->flags & TUN_VNET_HDR) { | 1086 | if (tun->flags & TUN_VNET_HDR) { |
1086 | if ((len -= tun->vnet_hdr_sz) > total_len) | 1087 | if (len < tun->vnet_hdr_sz) |
1087 | return -EINVAL; | 1088 | return -EINVAL; |
1089 | len -= tun->vnet_hdr_sz; | ||
1088 | 1090 | ||
1089 | if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) | 1091 | if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) |
1090 | return -EFAULT; | 1092 | return -EFAULT; |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f4c6db419ddb..767f7af3bd40 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -1386,7 +1386,7 @@ static int vxlan_open(struct net_device *dev) | |||
1386 | return -ENOTCONN; | 1386 | return -ENOTCONN; |
1387 | 1387 | ||
1388 | if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && | 1388 | if (IN_MULTICAST(ntohl(vxlan->default_dst.remote_ip)) && |
1389 | ! vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { | 1389 | vxlan_group_used(vn, vxlan->default_dst.remote_ip)) { |
1390 | vxlan_sock_hold(vs); | 1390 | vxlan_sock_hold(vs); |
1391 | dev_hold(dev); | 1391 | dev_hold(dev); |
1392 | queue_work(vxlan_wq, &vxlan->igmp_join); | 1392 | queue_work(vxlan_wq, &vxlan->igmp_join); |
@@ -1793,8 +1793,6 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head) | |||
1793 | struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); | 1793 | struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id); |
1794 | struct vxlan_dev *vxlan = netdev_priv(dev); | 1794 | struct vxlan_dev *vxlan = netdev_priv(dev); |
1795 | 1795 | ||
1796 | flush_workqueue(vxlan_wq); | ||
1797 | |||
1798 | spin_lock(&vn->sock_lock); | 1796 | spin_lock(&vn->sock_lock); |
1799 | hlist_del_rcu(&vxlan->hlist); | 1797 | hlist_del_rcu(&vxlan->hlist); |
1800 | spin_unlock(&vn->sock_lock); | 1798 | spin_unlock(&vn->sock_lock); |
diff --git a/drivers/net/wireless/cw1200/sta.c b/drivers/net/wireless/cw1200/sta.c index 7365674366f4..010b252be584 100644 --- a/drivers/net/wireless/cw1200/sta.c +++ b/drivers/net/wireless/cw1200/sta.c | |||
@@ -1406,11 +1406,8 @@ static void cw1200_do_unjoin(struct cw1200_common *priv) | |||
1406 | if (!priv->join_status) | 1406 | if (!priv->join_status) |
1407 | goto done; | 1407 | goto done; |
1408 | 1408 | ||
1409 | if (priv->join_status > CW1200_JOIN_STATUS_IBSS) { | 1409 | if (priv->join_status == CW1200_JOIN_STATUS_AP) |
1410 | wiphy_err(priv->hw->wiphy, "Unexpected: join status: %d\n", | 1410 | goto done; |
1411 | priv->join_status); | ||
1412 | BUG_ON(1); | ||
1413 | } | ||
1414 | 1411 | ||
1415 | cancel_work_sync(&priv->update_filtering_work); | 1412 | cancel_work_sync(&priv->update_filtering_work); |
1416 | cancel_work_sync(&priv->set_beacon_wakeup_period_work); | 1413 | cancel_work_sync(&priv->set_beacon_wakeup_period_work); |
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c index b9b2bb51e605..f2ed62e37340 100644 --- a/drivers/net/wireless/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/iwlegacy/4965-mac.c | |||
@@ -4460,12 +4460,12 @@ il4965_irq_tasklet(struct il_priv *il) | |||
4460 | * is killed. Hence update the killswitch state here. The | 4460 | * is killed. Hence update the killswitch state here. The |
4461 | * rfkill handler will care about restarting if needed. | 4461 | * rfkill handler will care about restarting if needed. |
4462 | */ | 4462 | */ |
4463 | if (!test_bit(S_ALIVE, &il->status)) { | 4463 | if (hw_rf_kill) { |
4464 | if (hw_rf_kill) | 4464 | set_bit(S_RFKILL, &il->status); |
4465 | set_bit(S_RFKILL, &il->status); | 4465 | } else { |
4466 | else | 4466 | clear_bit(S_RFKILL, &il->status); |
4467 | clear_bit(S_RFKILL, &il->status); | ||
4468 | wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); | 4467 | wiphy_rfkill_set_hw_state(il->hw->wiphy, hw_rf_kill); |
4468 | il_force_reset(il, true); | ||
4469 | } | 4469 | } |
4470 | 4470 | ||
4471 | handled |= CSR_INT_BIT_RF_KILL; | 4471 | handled |= CSR_INT_BIT_RF_KILL; |
@@ -5334,6 +5334,9 @@ il4965_alive_start(struct il_priv *il) | |||
5334 | 5334 | ||
5335 | il->active_rate = RATES_MASK; | 5335 | il->active_rate = RATES_MASK; |
5336 | 5336 | ||
5337 | il_power_update_mode(il, true); | ||
5338 | D_INFO("Updated power mode\n"); | ||
5339 | |||
5337 | if (il_is_associated(il)) { | 5340 | if (il_is_associated(il)) { |
5338 | struct il_rxon_cmd *active_rxon = | 5341 | struct il_rxon_cmd *active_rxon = |
5339 | (struct il_rxon_cmd *)&il->active; | 5342 | (struct il_rxon_cmd *)&il->active; |
@@ -5364,9 +5367,6 @@ il4965_alive_start(struct il_priv *il) | |||
5364 | D_INFO("ALIVE processing complete.\n"); | 5367 | D_INFO("ALIVE processing complete.\n"); |
5365 | wake_up(&il->wait_command_queue); | 5368 | wake_up(&il->wait_command_queue); |
5366 | 5369 | ||
5367 | il_power_update_mode(il, true); | ||
5368 | D_INFO("Updated power mode\n"); | ||
5369 | |||
5370 | return; | 5370 | return; |
5371 | 5371 | ||
5372 | restart: | 5372 | restart: |
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c index 3195aad440dd..b03e22ef5462 100644 --- a/drivers/net/wireless/iwlegacy/common.c +++ b/drivers/net/wireless/iwlegacy/common.c | |||
@@ -4660,6 +4660,7 @@ il_force_reset(struct il_priv *il, bool external) | |||
4660 | 4660 | ||
4661 | return 0; | 4661 | return 0; |
4662 | } | 4662 | } |
4663 | EXPORT_SYMBOL(il_force_reset); | ||
4663 | 4664 | ||
4664 | int | 4665 | int |
4665 | il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, | 4666 | il_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index dbdc5f7e2b29..01e264fb50e0 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -317,13 +317,20 @@ void acpi_pci_remove_bus(struct pci_bus *bus) | |||
317 | /* ACPI bus type */ | 317 | /* ACPI bus type */ |
318 | static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) | 318 | static int acpi_pci_find_device(struct device *dev, acpi_handle *handle) |
319 | { | 319 | { |
320 | struct pci_dev * pci_dev; | 320 | struct pci_dev *pci_dev = to_pci_dev(dev); |
321 | u64 addr; | 321 | bool is_bridge; |
322 | u64 addr; | ||
322 | 323 | ||
323 | pci_dev = to_pci_dev(dev); | 324 | /* |
325 | * pci_is_bridge() is not suitable here, because pci_dev->subordinate | ||
326 | * is set only after acpi_pci_find_device() has been called for the | ||
327 | * given device. | ||
328 | */ | ||
329 | is_bridge = pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE | ||
330 | || pci_dev->hdr_type == PCI_HEADER_TYPE_CARDBUS; | ||
324 | /* Please ref to ACPI spec for the syntax of _ADR */ | 331 | /* Please ref to ACPI spec for the syntax of _ADR */ |
325 | addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); | 332 | addr = (PCI_SLOT(pci_dev->devfn) << 16) | PCI_FUNC(pci_dev->devfn); |
326 | *handle = acpi_get_child(DEVICE_ACPI_HANDLE(dev->parent), addr); | 333 | *handle = acpi_find_child(ACPI_HANDLE(dev->parent), addr, is_bridge); |
327 | if (!*handle) | 334 | if (!*handle) |
328 | return -ENODEV; | 335 | return -ENODEV; |
329 | return 0; | 336 | return 0; |
diff --git a/drivers/rtc/rtc-stmp3xxx.c b/drivers/rtc/rtc-stmp3xxx.c index 767fee2ab340..26019531db15 100644 --- a/drivers/rtc/rtc-stmp3xxx.c +++ b/drivers/rtc/rtc-stmp3xxx.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/delay.h> | ||
26 | #include <linux/rtc.h> | 27 | #include <linux/rtc.h> |
27 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
28 | #include <linux/of_device.h> | 29 | #include <linux/of_device.h> |
@@ -119,24 +120,39 @@ static void stmp3xxx_wdt_register(struct platform_device *rtc_pdev) | |||
119 | } | 120 | } |
120 | #endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */ | 121 | #endif /* CONFIG_STMP3XXX_RTC_WATCHDOG */ |
121 | 122 | ||
122 | static void stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) | 123 | static int stmp3xxx_wait_time(struct stmp3xxx_rtc_data *rtc_data) |
123 | { | 124 | { |
125 | int timeout = 5000; /* 3ms according to i.MX28 Ref Manual */ | ||
124 | /* | 126 | /* |
125 | * The datasheet doesn't say which way round the | 127 | * The i.MX28 Applications Processor Reference Manual, Rev. 1, 2010 |
126 | * NEW_REGS/STALE_REGS bitfields go. In fact it's 0x1=P0, | 128 | * states: |
127 | * 0x2=P1, .., 0x20=P5, 0x40=ALARM, 0x80=SECONDS | 129 | * | The order in which registers are updated is |
130 | * | Persistent 0, 1, 2, 3, 4, 5, Alarm, Seconds. | ||
131 | * | (This list is in bitfield order, from LSB to MSB, as they would | ||
132 | * | appear in the STALE_REGS and NEW_REGS bitfields of the HW_RTC_STAT | ||
133 | * | register. For example, the Seconds register corresponds to | ||
134 | * | STALE_REGS or NEW_REGS containing 0x80.) | ||
128 | */ | 135 | */ |
129 | while (readl(rtc_data->io + STMP3XXX_RTC_STAT) & | 136 | do { |
130 | (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) | 137 | if (!(readl(rtc_data->io + STMP3XXX_RTC_STAT) & |
131 | cpu_relax(); | 138 | (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT))) |
139 | return 0; | ||
140 | udelay(1); | ||
141 | } while (--timeout > 0); | ||
142 | return (readl(rtc_data->io + STMP3XXX_RTC_STAT) & | ||
143 | (0x80 << STMP3XXX_RTC_STAT_STALE_SHIFT)) ? -ETIME : 0; | ||
132 | } | 144 | } |
133 | 145 | ||
134 | /* Time read/write */ | 146 | /* Time read/write */ |
135 | static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) | 147 | static int stmp3xxx_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) |
136 | { | 148 | { |
149 | int ret; | ||
137 | struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); | 150 | struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); |
138 | 151 | ||
139 | stmp3xxx_wait_time(rtc_data); | 152 | ret = stmp3xxx_wait_time(rtc_data); |
153 | if (ret) | ||
154 | return ret; | ||
155 | |||
140 | rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); | 156 | rtc_time_to_tm(readl(rtc_data->io + STMP3XXX_RTC_SECONDS), rtc_tm); |
141 | return 0; | 157 | return 0; |
142 | } | 158 | } |
@@ -146,8 +162,7 @@ static int stmp3xxx_rtc_set_mmss(struct device *dev, unsigned long t) | |||
146 | struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); | 162 | struct stmp3xxx_rtc_data *rtc_data = dev_get_drvdata(dev); |
147 | 163 | ||
148 | writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); | 164 | writel(t, rtc_data->io + STMP3XXX_RTC_SECONDS); |
149 | stmp3xxx_wait_time(rtc_data); | 165 | return stmp3xxx_wait_time(rtc_data); |
150 | return 0; | ||
151 | } | 166 | } |
152 | 167 | ||
153 | /* interrupt(s) handler */ | 168 | /* interrupt(s) handler */ |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 17150a778984..451bf99582ff 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -2392,6 +2392,12 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) | |||
2392 | rc = cqr->intrc; | 2392 | rc = cqr->intrc; |
2393 | else | 2393 | else |
2394 | rc = -EIO; | 2394 | rc = -EIO; |
2395 | |||
2396 | /* kick tasklets */ | ||
2397 | dasd_schedule_device_bh(device); | ||
2398 | if (device->block) | ||
2399 | dasd_schedule_block_bh(device->block); | ||
2400 | |||
2395 | return rc; | 2401 | return rc; |
2396 | } | 2402 | } |
2397 | 2403 | ||
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index b6d1f92ed33c..c18c68150e9f 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h | |||
@@ -38,7 +38,7 @@ | |||
38 | 38 | ||
39 | #define DRV_NAME "fnic" | 39 | #define DRV_NAME "fnic" |
40 | #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" | 40 | #define DRV_DESCRIPTION "Cisco FCoE HBA Driver" |
41 | #define DRV_VERSION "1.5.0.22" | 41 | #define DRV_VERSION "1.5.0.23" |
42 | #define PFX DRV_NAME ": " | 42 | #define PFX DRV_NAME ": " |
43 | #define DFX DRV_NAME "%d: " | 43 | #define DFX DRV_NAME "%d: " |
44 | 44 | ||
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 5f09d1814d26..42e15ee6e1bb 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c | |||
@@ -642,19 +642,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
642 | INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); | 642 | INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); |
643 | INIT_WORK(&fnic->event_work, fnic_handle_event); | 643 | INIT_WORK(&fnic->event_work, fnic_handle_event); |
644 | skb_queue_head_init(&fnic->fip_frame_queue); | 644 | skb_queue_head_init(&fnic->fip_frame_queue); |
645 | spin_lock_irqsave(&fnic_list_lock, flags); | ||
646 | if (!fnic_fip_queue) { | ||
647 | fnic_fip_queue = | ||
648 | create_singlethread_workqueue("fnic_fip_q"); | ||
649 | if (!fnic_fip_queue) { | ||
650 | spin_unlock_irqrestore(&fnic_list_lock, flags); | ||
651 | printk(KERN_ERR PFX "fnic FIP work queue " | ||
652 | "create failed\n"); | ||
653 | err = -ENOMEM; | ||
654 | goto err_out_free_max_pool; | ||
655 | } | ||
656 | } | ||
657 | spin_unlock_irqrestore(&fnic_list_lock, flags); | ||
658 | INIT_LIST_HEAD(&fnic->evlist); | 645 | INIT_LIST_HEAD(&fnic->evlist); |
659 | INIT_LIST_HEAD(&fnic->vlans); | 646 | INIT_LIST_HEAD(&fnic->vlans); |
660 | } else { | 647 | } else { |
@@ -960,6 +947,13 @@ static int __init fnic_init_module(void) | |||
960 | spin_lock_init(&fnic_list_lock); | 947 | spin_lock_init(&fnic_list_lock); |
961 | INIT_LIST_HEAD(&fnic_list); | 948 | INIT_LIST_HEAD(&fnic_list); |
962 | 949 | ||
950 | fnic_fip_queue = create_singlethread_workqueue("fnic_fip_q"); | ||
951 | if (!fnic_fip_queue) { | ||
952 | printk(KERN_ERR PFX "fnic FIP work queue create failed\n"); | ||
953 | err = -ENOMEM; | ||
954 | goto err_create_fip_workq; | ||
955 | } | ||
956 | |||
963 | fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); | 957 | fnic_fc_transport = fc_attach_transport(&fnic_fc_functions); |
964 | if (!fnic_fc_transport) { | 958 | if (!fnic_fc_transport) { |
965 | printk(KERN_ERR PFX "fc_attach_transport error\n"); | 959 | printk(KERN_ERR PFX "fc_attach_transport error\n"); |
@@ -978,6 +972,8 @@ static int __init fnic_init_module(void) | |||
978 | err_pci_register: | 972 | err_pci_register: |
979 | fc_release_transport(fnic_fc_transport); | 973 | fc_release_transport(fnic_fc_transport); |
980 | err_fc_transport: | 974 | err_fc_transport: |
975 | destroy_workqueue(fnic_fip_queue); | ||
976 | err_create_fip_workq: | ||
981 | destroy_workqueue(fnic_event_queue); | 977 | destroy_workqueue(fnic_event_queue); |
982 | err_create_fnic_workq: | 978 | err_create_fnic_workq: |
983 | kmem_cache_destroy(fnic_io_req_cache); | 979 | kmem_cache_destroy(fnic_io_req_cache); |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 0177295599e0..1f0ca68409d4 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -3547,11 +3547,21 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
3547 | break; | 3547 | break; |
3548 | } | 3548 | } |
3549 | 3549 | ||
3550 | /* | 3550 | if (megasas_transition_to_ready(instance, 0)) { |
3551 | * We expect the FW state to be READY | 3551 | atomic_set(&instance->fw_reset_no_pci_access, 1); |
3552 | */ | 3552 | instance->instancet->adp_reset |
3553 | if (megasas_transition_to_ready(instance, 0)) | 3553 | (instance, instance->reg_set); |
3554 | goto fail_ready_state; | 3554 | atomic_set(&instance->fw_reset_no_pci_access, 0); |
3555 | dev_info(&instance->pdev->dev, | ||
3556 | "megasas: FW restarted successfully from %s!\n", | ||
3557 | __func__); | ||
3558 | |||
3559 | /*waitting for about 30 second before retry*/ | ||
3560 | ssleep(30); | ||
3561 | |||
3562 | if (megasas_transition_to_ready(instance, 0)) | ||
3563 | goto fail_ready_state; | ||
3564 | } | ||
3555 | 3565 | ||
3556 | /* | 3566 | /* |
3557 | * MSI-X host index 0 is common for all adapter. | 3567 | * MSI-X host index 0 is common for all adapter. |
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 3b1ea34e1f5a..eaa808e6ba91 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -1031,6 +1031,9 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf, | |||
1031 | { | 1031 | { |
1032 | int i, result; | 1032 | int i, result; |
1033 | 1033 | ||
1034 | if (sdev->skip_vpd_pages) | ||
1035 | goto fail; | ||
1036 | |||
1034 | /* Ask for all the pages supported by this device */ | 1037 | /* Ask for all the pages supported by this device */ |
1035 | result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); | 1038 | result = scsi_vpd_inquiry(sdev, buf, 0, buf_len); |
1036 | if (result) | 1039 | if (result) |
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 2168258fb2c3..74b88efde6ad 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
@@ -751,7 +751,7 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity) | |||
751 | 751 | ||
752 | vscsi->affinity_hint_set = true; | 752 | vscsi->affinity_hint_set = true; |
753 | } else { | 753 | } else { |
754 | for (i = 0; i < vscsi->num_queues - VIRTIO_SCSI_VQ_BASE; i++) | 754 | for (i = 0; i < vscsi->num_queues; i++) |
755 | virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); | 755 | virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1); |
756 | 756 | ||
757 | vscsi->affinity_hint_set = false; | 757 | vscsi->affinity_hint_set = false; |
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c index 222d3e37fc28..707966bd5610 100644 --- a/drivers/spi/spi-davinci.c +++ b/drivers/spi/spi-davinci.c | |||
@@ -609,7 +609,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) | |||
609 | else | 609 | else |
610 | buf = (void *)t->tx_buf; | 610 | buf = (void *)t->tx_buf; |
611 | t->tx_dma = dma_map_single(&spi->dev, buf, | 611 | t->tx_dma = dma_map_single(&spi->dev, buf, |
612 | t->len, DMA_FROM_DEVICE); | 612 | t->len, DMA_TO_DEVICE); |
613 | if (!t->tx_dma) { | 613 | if (!t->tx_dma) { |
614 | ret = -EFAULT; | 614 | ret = -EFAULT; |
615 | goto err_tx_map; | 615 | goto err_tx_map; |
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c index dcceed29d31a..81972fa47beb 100644 --- a/drivers/staging/zcache/zcache-main.c +++ b/drivers/staging/zcache/zcache-main.c | |||
@@ -1811,10 +1811,12 @@ static int zcache_comp_init(void) | |||
1811 | #else | 1811 | #else |
1812 | if (*zcache_comp_name != '\0') { | 1812 | if (*zcache_comp_name != '\0') { |
1813 | ret = crypto_has_comp(zcache_comp_name, 0, 0); | 1813 | ret = crypto_has_comp(zcache_comp_name, 0, 0); |
1814 | if (!ret) | 1814 | if (!ret) { |
1815 | pr_info("zcache: %s not supported\n", | 1815 | pr_info("zcache: %s not supported\n", |
1816 | zcache_comp_name); | 1816 | zcache_comp_name); |
1817 | goto out; | 1817 | ret = 1; |
1818 | goto out; | ||
1819 | } | ||
1818 | } | 1820 | } |
1819 | if (!ret) | 1821 | if (!ret) |
1820 | strcpy(zcache_comp_name, "lzo"); | 1822 | strcpy(zcache_comp_name, "lzo"); |
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c index 60dd8918aeb9..66c4001306f0 100644 --- a/drivers/usb/class/usbtmc.c +++ b/drivers/usb/class/usbtmc.c | |||
@@ -1117,11 +1117,11 @@ static int usbtmc_probe(struct usb_interface *intf, | |||
1117 | /* Determine if it is a Rigol or not */ | 1117 | /* Determine if it is a Rigol or not */ |
1118 | data->rigol_quirk = 0; | 1118 | data->rigol_quirk = 0; |
1119 | dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", | 1119 | dev_dbg(&intf->dev, "Trying to find if device Vendor 0x%04X Product 0x%04X has the RIGOL quirk\n", |
1120 | data->usb_dev->descriptor.idVendor, | 1120 | le16_to_cpu(data->usb_dev->descriptor.idVendor), |
1121 | data->usb_dev->descriptor.idProduct); | 1121 | le16_to_cpu(data->usb_dev->descriptor.idProduct)); |
1122 | for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { | 1122 | for(n = 0; usbtmc_id_quirk[n].idVendor > 0; n++) { |
1123 | if ((usbtmc_id_quirk[n].idVendor == data->usb_dev->descriptor.idVendor) && | 1123 | if ((usbtmc_id_quirk[n].idVendor == le16_to_cpu(data->usb_dev->descriptor.idVendor)) && |
1124 | (usbtmc_id_quirk[n].idProduct == data->usb_dev->descriptor.idProduct)) { | 1124 | (usbtmc_id_quirk[n].idProduct == le16_to_cpu(data->usb_dev->descriptor.idProduct))) { |
1125 | dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); | 1125 | dev_dbg(&intf->dev, "Setting this device as having the RIGOL quirk\n"); |
1126 | data->rigol_quirk = 1; | 1126 | data->rigol_quirk = 1; |
1127 | break; | 1127 | break; |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index a63598895077..5b44cd47da5b 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -78,6 +78,12 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
78 | { USB_DEVICE(0x04d8, 0x000c), .driver_info = | 78 | { USB_DEVICE(0x04d8, 0x000c), .driver_info = |
79 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 79 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
80 | 80 | ||
81 | /* CarrolTouch 4000U */ | ||
82 | { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
83 | |||
84 | /* CarrolTouch 4500U */ | ||
85 | { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME }, | ||
86 | |||
81 | /* Samsung Android phone modem - ID conflict with SPH-I500 */ | 87 | /* Samsung Android phone modem - ID conflict with SPH-I500 */ |
82 | { USB_DEVICE(0x04e8, 0x6601), .driver_info = | 88 | { USB_DEVICE(0x04e8, 0x6601), .driver_info = |
83 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 89 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 94388738a6f7..66310894ad97 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c | |||
@@ -1434,21 +1434,20 @@ iso_stream_schedule ( | |||
1434 | 1434 | ||
1435 | /* Behind the scheduling threshold? */ | 1435 | /* Behind the scheduling threshold? */ |
1436 | if (unlikely(start < next)) { | 1436 | if (unlikely(start < next)) { |
1437 | unsigned now2 = (now - base) & (mod - 1); | ||
1437 | 1438 | ||
1438 | /* USB_ISO_ASAP: Round up to the first available slot */ | 1439 | /* USB_ISO_ASAP: Round up to the first available slot */ |
1439 | if (urb->transfer_flags & URB_ISO_ASAP) | 1440 | if (urb->transfer_flags & URB_ISO_ASAP) |
1440 | start += (next - start + period - 1) & -period; | 1441 | start += (next - start + period - 1) & -period; |
1441 | 1442 | ||
1442 | /* | 1443 | /* |
1443 | * Not ASAP: Use the next slot in the stream. If | 1444 | * Not ASAP: Use the next slot in the stream, |
1444 | * the entire URB falls before the threshold, fail. | 1445 | * no matter what. |
1445 | */ | 1446 | */ |
1446 | else if (start + span - period < next) { | 1447 | else if (start + span - period < now2) { |
1447 | ehci_dbg(ehci, "iso urb late %p (%u+%u < %u)\n", | 1448 | ehci_dbg(ehci, "iso underrun %p (%u+%u < %u)\n", |
1448 | urb, start + base, | 1449 | urb, start + base, |
1449 | span - period, next + base); | 1450 | span - period, now2 + base); |
1450 | status = -EXDEV; | ||
1451 | goto fail; | ||
1452 | } | 1451 | } |
1453 | } | 1452 | } |
1454 | 1453 | ||
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index b150360d1e78..53b972c2a09f 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/pci.h> | 24 | #include <linux/pci.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/dmapool.h> | 26 | #include <linux/dmapool.h> |
27 | #include <linux/dma-mapping.h> | ||
27 | 28 | ||
28 | #include "xhci.h" | 29 | #include "xhci.h" |
29 | #include "xhci-trace.h" | 30 | #include "xhci-trace.h" |
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 7299b591a341..bf11af9a4699 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/moduleparam.h> | 27 | #include <linux/moduleparam.h> |
28 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
29 | #include <linux/dmi.h> | 29 | #include <linux/dmi.h> |
30 | #include <linux/dma-mapping.h> | ||
30 | 31 | ||
31 | #include "xhci.h" | 32 | #include "xhci.h" |
32 | #include "xhci-trace.h" | 33 | #include "xhci-trace.h" |
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index 7078e9bf0fc0..3eaa83f05086 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c | |||
@@ -783,7 +783,7 @@ static int adu_probe(struct usb_interface *interface, | |||
783 | 783 | ||
784 | /* let the user know what node this device is now attached to */ | 784 | /* let the user know what node this device is now attached to */ |
785 | dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", | 785 | dev_info(&interface->dev, "ADU%d %s now attached to /dev/usb/adutux%d\n", |
786 | udev->descriptor.idProduct, dev->serial_number, | 786 | le16_to_cpu(udev->descriptor.idProduct), dev->serial_number, |
787 | (dev->minor - ADU_MINOR_BASE)); | 787 | (dev->minor - ADU_MINOR_BASE)); |
788 | exit: | 788 | exit: |
789 | return retval; | 789 | return retval; |
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index e731bbc166a0..d6960aebe246 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c | |||
@@ -2307,7 +2307,7 @@ static int keyspan_startup(struct usb_serial *serial) | |||
2307 | if (d_details == NULL) { | 2307 | if (d_details == NULL) { |
2308 | dev_err(&serial->dev->dev, "%s - unknown product id %x\n", | 2308 | dev_err(&serial->dev->dev, "%s - unknown product id %x\n", |
2309 | __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); | 2309 | __func__, le16_to_cpu(serial->dev->descriptor.idProduct)); |
2310 | return 1; | 2310 | return -ENODEV; |
2311 | } | 2311 | } |
2312 | 2312 | ||
2313 | /* Setup private data for serial driver */ | 2313 | /* Setup private data for serial driver */ |
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 51da424327b0..b01300164fc0 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c | |||
@@ -90,6 +90,7 @@ struct urbtracker { | |||
90 | struct list_head urblist_entry; | 90 | struct list_head urblist_entry; |
91 | struct kref ref_count; | 91 | struct kref ref_count; |
92 | struct urb *urb; | 92 | struct urb *urb; |
93 | struct usb_ctrlrequest *setup; | ||
93 | }; | 94 | }; |
94 | 95 | ||
95 | enum mos7715_pp_modes { | 96 | enum mos7715_pp_modes { |
@@ -271,6 +272,7 @@ static void destroy_urbtracker(struct kref *kref) | |||
271 | struct mos7715_parport *mos_parport = urbtrack->mos_parport; | 272 | struct mos7715_parport *mos_parport = urbtrack->mos_parport; |
272 | 273 | ||
273 | usb_free_urb(urbtrack->urb); | 274 | usb_free_urb(urbtrack->urb); |
275 | kfree(urbtrack->setup); | ||
274 | kfree(urbtrack); | 276 | kfree(urbtrack); |
275 | kref_put(&mos_parport->ref_count, destroy_mos_parport); | 277 | kref_put(&mos_parport->ref_count, destroy_mos_parport); |
276 | } | 278 | } |
@@ -355,7 +357,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, | |||
355 | struct urbtracker *urbtrack; | 357 | struct urbtracker *urbtrack; |
356 | int ret_val; | 358 | int ret_val; |
357 | unsigned long flags; | 359 | unsigned long flags; |
358 | struct usb_ctrlrequest setup; | ||
359 | struct usb_serial *serial = mos_parport->serial; | 360 | struct usb_serial *serial = mos_parport->serial; |
360 | struct usb_device *usbdev = serial->dev; | 361 | struct usb_device *usbdev = serial->dev; |
361 | 362 | ||
@@ -373,14 +374,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, | |||
373 | kfree(urbtrack); | 374 | kfree(urbtrack); |
374 | return -ENOMEM; | 375 | return -ENOMEM; |
375 | } | 376 | } |
376 | setup.bRequestType = (__u8)0x40; | 377 | urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_KERNEL); |
377 | setup.bRequest = (__u8)0x0e; | 378 | if (!urbtrack->setup) { |
378 | setup.wValue = get_reg_value(reg, dummy); | 379 | usb_free_urb(urbtrack->urb); |
379 | setup.wIndex = get_reg_index(reg); | 380 | kfree(urbtrack); |
380 | setup.wLength = 0; | 381 | return -ENOMEM; |
382 | } | ||
383 | urbtrack->setup->bRequestType = (__u8)0x40; | ||
384 | urbtrack->setup->bRequest = (__u8)0x0e; | ||
385 | urbtrack->setup->wValue = get_reg_value(reg, dummy); | ||
386 | urbtrack->setup->wIndex = get_reg_index(reg); | ||
387 | urbtrack->setup->wLength = 0; | ||
381 | usb_fill_control_urb(urbtrack->urb, usbdev, | 388 | usb_fill_control_urb(urbtrack->urb, usbdev, |
382 | usb_sndctrlpipe(usbdev, 0), | 389 | usb_sndctrlpipe(usbdev, 0), |
383 | (unsigned char *)&setup, | 390 | (unsigned char *)urbtrack->setup, |
384 | NULL, 0, async_complete, urbtrack); | 391 | NULL, 0, async_complete, urbtrack); |
385 | kref_init(&urbtrack->ref_count); | 392 | kref_init(&urbtrack->ref_count); |
386 | INIT_LIST_HEAD(&urbtrack->urblist_entry); | 393 | INIT_LIST_HEAD(&urbtrack->urblist_entry); |
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index dfa678906632..fdf953539c62 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c | |||
@@ -2145,7 +2145,7 @@ static int mos7810_check(struct usb_serial *serial) | |||
2145 | static int mos7840_probe(struct usb_serial *serial, | 2145 | static int mos7840_probe(struct usb_serial *serial, |
2146 | const struct usb_device_id *id) | 2146 | const struct usb_device_id *id) |
2147 | { | 2147 | { |
2148 | u16 product = serial->dev->descriptor.idProduct; | 2148 | u16 product = le16_to_cpu(serial->dev->descriptor.idProduct); |
2149 | u8 *buf; | 2149 | u8 *buf; |
2150 | int device_type; | 2150 | int device_type; |
2151 | 2151 | ||
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 5c07d55ece7a..760b78560f67 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c | |||
@@ -1463,14 +1463,15 @@ static int ti_download_firmware(struct ti_device *tdev) | |||
1463 | char buf[32]; | 1463 | char buf[32]; |
1464 | 1464 | ||
1465 | /* try ID specific firmware first, then try generic firmware */ | 1465 | /* try ID specific firmware first, then try generic firmware */ |
1466 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", dev->descriptor.idVendor, | 1466 | sprintf(buf, "ti_usb-v%04x-p%04x.fw", |
1467 | dev->descriptor.idProduct); | 1467 | le16_to_cpu(dev->descriptor.idVendor), |
1468 | le16_to_cpu(dev->descriptor.idProduct)); | ||
1468 | status = request_firmware(&fw_p, buf, &dev->dev); | 1469 | status = request_firmware(&fw_p, buf, &dev->dev); |
1469 | 1470 | ||
1470 | if (status != 0) { | 1471 | if (status != 0) { |
1471 | buf[0] = '\0'; | 1472 | buf[0] = '\0'; |
1472 | if (dev->descriptor.idVendor == MTS_VENDOR_ID) { | 1473 | if (le16_to_cpu(dev->descriptor.idVendor) == MTS_VENDOR_ID) { |
1473 | switch (dev->descriptor.idProduct) { | 1474 | switch (le16_to_cpu(dev->descriptor.idProduct)) { |
1474 | case MTS_CDMA_PRODUCT_ID: | 1475 | case MTS_CDMA_PRODUCT_ID: |
1475 | strcpy(buf, "mts_cdma.fw"); | 1476 | strcpy(buf, "mts_cdma.fw"); |
1476 | break; | 1477 | break; |
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index 8257d30c4072..85365784040b 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c | |||
@@ -291,18 +291,18 @@ static void usb_wwan_indat_callback(struct urb *urb) | |||
291 | tty_flip_buffer_push(&port->port); | 291 | tty_flip_buffer_push(&port->port); |
292 | } else | 292 | } else |
293 | dev_dbg(dev, "%s: empty read urb received\n", __func__); | 293 | dev_dbg(dev, "%s: empty read urb received\n", __func__); |
294 | 294 | } | |
295 | /* Resubmit urb so we continue receiving */ | 295 | /* Resubmit urb so we continue receiving */ |
296 | err = usb_submit_urb(urb, GFP_ATOMIC); | 296 | err = usb_submit_urb(urb, GFP_ATOMIC); |
297 | if (err) { | 297 | if (err) { |
298 | if (err != -EPERM) { | 298 | if (err != -EPERM) { |
299 | dev_err(dev, "%s: resubmit read urb failed. (%d)\n", __func__, err); | 299 | dev_err(dev, "%s: resubmit read urb failed. (%d)\n", |
300 | /* busy also in error unless we are killed */ | 300 | __func__, err); |
301 | usb_mark_last_busy(port->serial->dev); | 301 | /* busy also in error unless we are killed */ |
302 | } | ||
303 | } else { | ||
304 | usb_mark_last_busy(port->serial->dev); | 302 | usb_mark_last_busy(port->serial->dev); |
305 | } | 303 | } |
304 | } else { | ||
305 | usb_mark_last_busy(port->serial->dev); | ||
306 | } | 306 | } |
307 | } | 307 | } |
308 | 308 | ||
diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c index 86dd3b65692f..6ad02f57c366 100644 --- a/drivers/usb/wusbcore/wa-xfer.c +++ b/drivers/usb/wusbcore/wa-xfer.c | |||
@@ -1279,6 +1279,12 @@ int wa_urb_dequeue(struct wahc *wa, struct urb *urb) | |||
1279 | } | 1279 | } |
1280 | spin_lock_irqsave(&xfer->lock, flags); | 1280 | spin_lock_irqsave(&xfer->lock, flags); |
1281 | rpipe = xfer->ep->hcpriv; | 1281 | rpipe = xfer->ep->hcpriv; |
1282 | if (rpipe == NULL) { | ||
1283 | pr_debug("%s: xfer id 0x%08X has no RPIPE. %s", | ||
1284 | __func__, wa_xfer_id(xfer), | ||
1285 | "Probably already aborted.\n" ); | ||
1286 | goto out_unlock; | ||
1287 | } | ||
1282 | /* Check the delayed list -> if there, release and complete */ | 1288 | /* Check the delayed list -> if there, release and complete */ |
1283 | spin_lock_irqsave(&wa->xfer_list_lock, flags2); | 1289 | spin_lock_irqsave(&wa->xfer_list_lock, flags2); |
1284 | if (!list_empty(&xfer->list_node) && xfer->seg == NULL) | 1290 | if (!list_empty(&xfer->list_node) && xfer->seg == NULL) |
@@ -1717,8 +1723,7 @@ static void wa_xfer_result_cb(struct urb *urb) | |||
1717 | break; | 1723 | break; |
1718 | } | 1724 | } |
1719 | usb_status = xfer_result->bTransferStatus & 0x3f; | 1725 | usb_status = xfer_result->bTransferStatus & 0x3f; |
1720 | if (usb_status == WA_XFER_STATUS_ABORTED | 1726 | if (usb_status == WA_XFER_STATUS_NOT_FOUND) |
1721 | || usb_status == WA_XFER_STATUS_NOT_FOUND) | ||
1722 | /* taken care of already */ | 1727 | /* taken care of already */ |
1723 | break; | 1728 | break; |
1724 | xfer_id = xfer_result->dwTransferID; | 1729 | xfer_id = xfer_result->dwTransferID; |
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c index 3ba37713b1f9..dc09ebe4aba5 100644 --- a/drivers/video/mxsfb.c +++ b/drivers/video/mxsfb.c | |||
@@ -239,24 +239,6 @@ static const struct fb_bitfield def_rgb565[] = { | |||
239 | } | 239 | } |
240 | }; | 240 | }; |
241 | 241 | ||
242 | static const struct fb_bitfield def_rgb666[] = { | ||
243 | [RED] = { | ||
244 | .offset = 16, | ||
245 | .length = 6, | ||
246 | }, | ||
247 | [GREEN] = { | ||
248 | .offset = 8, | ||
249 | .length = 6, | ||
250 | }, | ||
251 | [BLUE] = { | ||
252 | .offset = 0, | ||
253 | .length = 6, | ||
254 | }, | ||
255 | [TRANSP] = { /* no support for transparency */ | ||
256 | .length = 0, | ||
257 | } | ||
258 | }; | ||
259 | |||
260 | static const struct fb_bitfield def_rgb888[] = { | 242 | static const struct fb_bitfield def_rgb888[] = { |
261 | [RED] = { | 243 | [RED] = { |
262 | .offset = 16, | 244 | .offset = 16, |
@@ -309,9 +291,6 @@ static int mxsfb_check_var(struct fb_var_screeninfo *var, | |||
309 | break; | 291 | break; |
310 | case STMLCDIF_16BIT: | 292 | case STMLCDIF_16BIT: |
311 | case STMLCDIF_18BIT: | 293 | case STMLCDIF_18BIT: |
312 | /* 24 bit to 18 bit mapping */ | ||
313 | rgb = def_rgb666; | ||
314 | break; | ||
315 | case STMLCDIF_24BIT: | 294 | case STMLCDIF_24BIT: |
316 | /* real 24 bit */ | 295 | /* real 24 bit */ |
317 | rgb = def_rgb888; | 296 | rgb = def_rgb888; |
@@ -453,11 +432,6 @@ static int mxsfb_set_par(struct fb_info *fb_info) | |||
453 | return -EINVAL; | 432 | return -EINVAL; |
454 | case STMLCDIF_16BIT: | 433 | case STMLCDIF_16BIT: |
455 | case STMLCDIF_18BIT: | 434 | case STMLCDIF_18BIT: |
456 | /* 24 bit to 18 bit mapping */ | ||
457 | ctrl |= CTRL_DF24; /* ignore the upper 2 bits in | ||
458 | * each colour component | ||
459 | */ | ||
460 | break; | ||
461 | case STMLCDIF_24BIT: | 435 | case STMLCDIF_24BIT: |
462 | /* real 24 bit */ | 436 | /* real 24 bit */ |
463 | break; | 437 | break; |
diff --git a/drivers/video/omap2/displays-new/connector-analog-tv.c b/drivers/video/omap2/displays-new/connector-analog-tv.c index 5338f362293b..1b60698f141e 100644 --- a/drivers/video/omap2/displays-new/connector-analog-tv.c +++ b/drivers/video/omap2/displays-new/connector-analog-tv.c | |||
@@ -28,6 +28,20 @@ struct panel_drv_data { | |||
28 | bool invert_polarity; | 28 | bool invert_polarity; |
29 | }; | 29 | }; |
30 | 30 | ||
31 | static const struct omap_video_timings tvc_pal_timings = { | ||
32 | .x_res = 720, | ||
33 | .y_res = 574, | ||
34 | .pixel_clock = 13500, | ||
35 | .hsw = 64, | ||
36 | .hfp = 12, | ||
37 | .hbp = 68, | ||
38 | .vsw = 5, | ||
39 | .vfp = 5, | ||
40 | .vbp = 41, | ||
41 | |||
42 | .interlace = true, | ||
43 | }; | ||
44 | |||
31 | #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) | 45 | #define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev) |
32 | 46 | ||
33 | static int tvc_connect(struct omap_dss_device *dssdev) | 47 | static int tvc_connect(struct omap_dss_device *dssdev) |
@@ -212,14 +226,14 @@ static int tvc_probe(struct platform_device *pdev) | |||
212 | return -ENODEV; | 226 | return -ENODEV; |
213 | } | 227 | } |
214 | 228 | ||
215 | ddata->timings = omap_dss_pal_timings; | 229 | ddata->timings = tvc_pal_timings; |
216 | 230 | ||
217 | dssdev = &ddata->dssdev; | 231 | dssdev = &ddata->dssdev; |
218 | dssdev->driver = &tvc_driver; | 232 | dssdev->driver = &tvc_driver; |
219 | dssdev->dev = &pdev->dev; | 233 | dssdev->dev = &pdev->dev; |
220 | dssdev->type = OMAP_DISPLAY_TYPE_VENC; | 234 | dssdev->type = OMAP_DISPLAY_TYPE_VENC; |
221 | dssdev->owner = THIS_MODULE; | 235 | dssdev->owner = THIS_MODULE; |
222 | dssdev->panel.timings = omap_dss_pal_timings; | 236 | dssdev->panel.timings = tvc_pal_timings; |
223 | 237 | ||
224 | r = omapdss_register_display(dssdev); | 238 | r = omapdss_register_display(dssdev); |
225 | if (r) { | 239 | if (r) { |
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index eaf133384a8f..8bc5e8ccb091 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c | |||
@@ -36,16 +36,23 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb, | |||
36 | u64 extent_item_pos, | 36 | u64 extent_item_pos, |
37 | struct extent_inode_elem **eie) | 37 | struct extent_inode_elem **eie) |
38 | { | 38 | { |
39 | u64 data_offset; | 39 | u64 offset = 0; |
40 | u64 data_len; | ||
41 | struct extent_inode_elem *e; | 40 | struct extent_inode_elem *e; |
42 | 41 | ||
43 | data_offset = btrfs_file_extent_offset(eb, fi); | 42 | if (!btrfs_file_extent_compression(eb, fi) && |
44 | data_len = btrfs_file_extent_num_bytes(eb, fi); | 43 | !btrfs_file_extent_encryption(eb, fi) && |
44 | !btrfs_file_extent_other_encoding(eb, fi)) { | ||
45 | u64 data_offset; | ||
46 | u64 data_len; | ||
45 | 47 | ||
46 | if (extent_item_pos < data_offset || | 48 | data_offset = btrfs_file_extent_offset(eb, fi); |
47 | extent_item_pos >= data_offset + data_len) | 49 | data_len = btrfs_file_extent_num_bytes(eb, fi); |
48 | return 1; | 50 | |
51 | if (extent_item_pos < data_offset || | ||
52 | extent_item_pos >= data_offset + data_len) | ||
53 | return 1; | ||
54 | offset = extent_item_pos - data_offset; | ||
55 | } | ||
49 | 56 | ||
50 | e = kmalloc(sizeof(*e), GFP_NOFS); | 57 | e = kmalloc(sizeof(*e), GFP_NOFS); |
51 | if (!e) | 58 | if (!e) |
@@ -53,7 +60,7 @@ static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb, | |||
53 | 60 | ||
54 | e->next = *eie; | 61 | e->next = *eie; |
55 | e->inum = key->objectid; | 62 | e->inum = key->objectid; |
56 | e->offset = key->offset + (extent_item_pos - data_offset); | 63 | e->offset = key->offset + offset; |
57 | *eie = e; | 64 | *eie = e; |
58 | 65 | ||
59 | return 0; | 66 | return 0; |
@@ -189,7 +196,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, | |||
189 | struct extent_buffer *eb; | 196 | struct extent_buffer *eb; |
190 | struct btrfs_key key; | 197 | struct btrfs_key key; |
191 | struct btrfs_file_extent_item *fi; | 198 | struct btrfs_file_extent_item *fi; |
192 | struct extent_inode_elem *eie = NULL; | 199 | struct extent_inode_elem *eie = NULL, *old = NULL; |
193 | u64 disk_byte; | 200 | u64 disk_byte; |
194 | 201 | ||
195 | if (level != 0) { | 202 | if (level != 0) { |
@@ -223,6 +230,7 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, | |||
223 | 230 | ||
224 | if (disk_byte == wanted_disk_byte) { | 231 | if (disk_byte == wanted_disk_byte) { |
225 | eie = NULL; | 232 | eie = NULL; |
233 | old = NULL; | ||
226 | if (extent_item_pos) { | 234 | if (extent_item_pos) { |
227 | ret = check_extent_in_eb(&key, eb, fi, | 235 | ret = check_extent_in_eb(&key, eb, fi, |
228 | *extent_item_pos, | 236 | *extent_item_pos, |
@@ -230,18 +238,20 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, | |||
230 | if (ret < 0) | 238 | if (ret < 0) |
231 | break; | 239 | break; |
232 | } | 240 | } |
233 | if (!ret) { | 241 | if (ret > 0) |
234 | ret = ulist_add(parents, eb->start, | 242 | goto next; |
235 | (uintptr_t)eie, GFP_NOFS); | 243 | ret = ulist_add_merge(parents, eb->start, |
236 | if (ret < 0) | 244 | (uintptr_t)eie, |
237 | break; | 245 | (u64 *)&old, GFP_NOFS); |
238 | if (!extent_item_pos) { | 246 | if (ret < 0) |
239 | ret = btrfs_next_old_leaf(root, path, | 247 | break; |
240 | time_seq); | 248 | if (!ret && extent_item_pos) { |
241 | continue; | 249 | while (old->next) |
242 | } | 250 | old = old->next; |
251 | old->next = eie; | ||
243 | } | 252 | } |
244 | } | 253 | } |
254 | next: | ||
245 | ret = btrfs_next_old_item(root, path, time_seq); | 255 | ret = btrfs_next_old_item(root, path, time_seq); |
246 | } | 256 | } |
247 | 257 | ||
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 5bf4c39e2ad6..ed504607d8ec 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -1271,7 +1271,6 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb, | |||
1271 | BUG_ON(!eb_rewin); | 1271 | BUG_ON(!eb_rewin); |
1272 | } | 1272 | } |
1273 | 1273 | ||
1274 | extent_buffer_get(eb_rewin); | ||
1275 | btrfs_tree_read_unlock(eb); | 1274 | btrfs_tree_read_unlock(eb); |
1276 | free_extent_buffer(eb); | 1275 | free_extent_buffer(eb); |
1277 | 1276 | ||
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 583d98bd065e..fe443fece851 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -4048,7 +4048,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
4048 | } | 4048 | } |
4049 | 4049 | ||
4050 | while (!end) { | 4050 | while (!end) { |
4051 | u64 offset_in_extent; | 4051 | u64 offset_in_extent = 0; |
4052 | 4052 | ||
4053 | /* break if the extent we found is outside the range */ | 4053 | /* break if the extent we found is outside the range */ |
4054 | if (em->start >= max || extent_map_end(em) < off) | 4054 | if (em->start >= max || extent_map_end(em) < off) |
@@ -4064,9 +4064,12 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
4064 | 4064 | ||
4065 | /* | 4065 | /* |
4066 | * record the offset from the start of the extent | 4066 | * record the offset from the start of the extent |
4067 | * for adjusting the disk offset below | 4067 | * for adjusting the disk offset below. Only do this if the |
4068 | * extent isn't compressed since our in ram offset may be past | ||
4069 | * what we have actually allocated on disk. | ||
4068 | */ | 4070 | */ |
4069 | offset_in_extent = em_start - em->start; | 4071 | if (!test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) |
4072 | offset_in_extent = em_start - em->start; | ||
4070 | em_end = extent_map_end(em); | 4073 | em_end = extent_map_end(em); |
4071 | em_len = em_end - em_start; | 4074 | em_len = em_end - em_start; |
4072 | emflags = em->flags; | 4075 | emflags = em->flags; |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index a005fe2c072a..8e686a427ce2 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -596,20 +596,29 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
596 | if (no_splits) | 596 | if (no_splits) |
597 | goto next; | 597 | goto next; |
598 | 598 | ||
599 | if (em->block_start < EXTENT_MAP_LAST_BYTE && | 599 | if (em->start < start) { |
600 | em->start < start) { | ||
601 | split->start = em->start; | 600 | split->start = em->start; |
602 | split->len = start - em->start; | 601 | split->len = start - em->start; |
603 | split->orig_start = em->orig_start; | ||
604 | split->block_start = em->block_start; | ||
605 | 602 | ||
606 | if (compressed) | 603 | if (em->block_start < EXTENT_MAP_LAST_BYTE) { |
607 | split->block_len = em->block_len; | 604 | split->orig_start = em->orig_start; |
608 | else | 605 | split->block_start = em->block_start; |
609 | split->block_len = split->len; | 606 | |
610 | split->ram_bytes = em->ram_bytes; | 607 | if (compressed) |
611 | split->orig_block_len = max(split->block_len, | 608 | split->block_len = em->block_len; |
612 | em->orig_block_len); | 609 | else |
610 | split->block_len = split->len; | ||
611 | split->orig_block_len = max(split->block_len, | ||
612 | em->orig_block_len); | ||
613 | split->ram_bytes = em->ram_bytes; | ||
614 | } else { | ||
615 | split->orig_start = split->start; | ||
616 | split->block_len = 0; | ||
617 | split->block_start = em->block_start; | ||
618 | split->orig_block_len = 0; | ||
619 | split->ram_bytes = split->len; | ||
620 | } | ||
621 | |||
613 | split->generation = gen; | 622 | split->generation = gen; |
614 | split->bdev = em->bdev; | 623 | split->bdev = em->bdev; |
615 | split->flags = flags; | 624 | split->flags = flags; |
@@ -620,8 +629,7 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
620 | split = split2; | 629 | split = split2; |
621 | split2 = NULL; | 630 | split2 = NULL; |
622 | } | 631 | } |
623 | if (em->block_start < EXTENT_MAP_LAST_BYTE && | 632 | if (testend && em->start + em->len > start + len) { |
624 | testend && em->start + em->len > start + len) { | ||
625 | u64 diff = start + len - em->start; | 633 | u64 diff = start + len - em->start; |
626 | 634 | ||
627 | split->start = start + len; | 635 | split->start = start + len; |
@@ -630,18 +638,28 @@ void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
630 | split->flags = flags; | 638 | split->flags = flags; |
631 | split->compress_type = em->compress_type; | 639 | split->compress_type = em->compress_type; |
632 | split->generation = gen; | 640 | split->generation = gen; |
633 | split->orig_block_len = max(em->block_len, | 641 | |
642 | if (em->block_start < EXTENT_MAP_LAST_BYTE) { | ||
643 | split->orig_block_len = max(em->block_len, | ||
634 | em->orig_block_len); | 644 | em->orig_block_len); |
635 | split->ram_bytes = em->ram_bytes; | ||
636 | 645 | ||
637 | if (compressed) { | 646 | split->ram_bytes = em->ram_bytes; |
638 | split->block_len = em->block_len; | 647 | if (compressed) { |
639 | split->block_start = em->block_start; | 648 | split->block_len = em->block_len; |
640 | split->orig_start = em->orig_start; | 649 | split->block_start = em->block_start; |
650 | split->orig_start = em->orig_start; | ||
651 | } else { | ||
652 | split->block_len = split->len; | ||
653 | split->block_start = em->block_start | ||
654 | + diff; | ||
655 | split->orig_start = em->orig_start; | ||
656 | } | ||
641 | } else { | 657 | } else { |
642 | split->block_len = split->len; | 658 | split->ram_bytes = split->len; |
643 | split->block_start = em->block_start + diff; | 659 | split->orig_start = split->start; |
644 | split->orig_start = em->orig_start; | 660 | split->block_len = 0; |
661 | split->block_start = em->block_start; | ||
662 | split->orig_block_len = 0; | ||
645 | } | 663 | } |
646 | 664 | ||
647 | ret = add_extent_mapping(em_tree, split, modified); | 665 | ret = add_extent_mapping(em_tree, split, modified); |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 6d1b93c8aafb..021694c08181 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -2166,16 +2166,23 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, | |||
2166 | if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr) | 2166 | if (btrfs_file_extent_disk_bytenr(leaf, extent) != old->bytenr) |
2167 | continue; | 2167 | continue; |
2168 | 2168 | ||
2169 | extent_offset = btrfs_file_extent_offset(leaf, extent); | 2169 | /* |
2170 | if (key.offset - extent_offset != offset) | 2170 | * 'offset' refers to the exact key.offset, |
2171 | * NOT the 'offset' field in btrfs_extent_data_ref, ie. | ||
2172 | * (key.offset - extent_offset). | ||
2173 | */ | ||
2174 | if (key.offset != offset) | ||
2171 | continue; | 2175 | continue; |
2172 | 2176 | ||
2177 | extent_offset = btrfs_file_extent_offset(leaf, extent); | ||
2173 | num_bytes = btrfs_file_extent_num_bytes(leaf, extent); | 2178 | num_bytes = btrfs_file_extent_num_bytes(leaf, extent); |
2179 | |||
2174 | if (extent_offset >= old->extent_offset + old->offset + | 2180 | if (extent_offset >= old->extent_offset + old->offset + |
2175 | old->len || extent_offset + num_bytes <= | 2181 | old->len || extent_offset + num_bytes <= |
2176 | old->extent_offset + old->offset) | 2182 | old->extent_offset + old->offset) |
2177 | continue; | 2183 | continue; |
2178 | 2184 | ||
2185 | ret = 0; | ||
2179 | break; | 2186 | break; |
2180 | } | 2187 | } |
2181 | 2188 | ||
@@ -2187,7 +2194,7 @@ static noinline int record_one_backref(u64 inum, u64 offset, u64 root_id, | |||
2187 | 2194 | ||
2188 | backref->root_id = root_id; | 2195 | backref->root_id = root_id; |
2189 | backref->inum = inum; | 2196 | backref->inum = inum; |
2190 | backref->file_pos = offset + extent_offset; | 2197 | backref->file_pos = offset; |
2191 | backref->num_bytes = num_bytes; | 2198 | backref->num_bytes = num_bytes; |
2192 | backref->extent_offset = extent_offset; | 2199 | backref->extent_offset = extent_offset; |
2193 | backref->generation = btrfs_file_extent_generation(leaf, extent); | 2200 | backref->generation = btrfs_file_extent_generation(leaf, extent); |
@@ -2210,7 +2217,8 @@ static noinline bool record_extent_backrefs(struct btrfs_path *path, | |||
2210 | new->path = path; | 2217 | new->path = path; |
2211 | 2218 | ||
2212 | list_for_each_entry_safe(old, tmp, &new->head, list) { | 2219 | list_for_each_entry_safe(old, tmp, &new->head, list) { |
2213 | ret = iterate_inodes_from_logical(old->bytenr, fs_info, | 2220 | ret = iterate_inodes_from_logical(old->bytenr + |
2221 | old->extent_offset, fs_info, | ||
2214 | path, record_one_backref, | 2222 | path, record_one_backref, |
2215 | old); | 2223 | old); |
2216 | BUG_ON(ret < 0 && ret != -ENOENT); | 2224 | BUG_ON(ret < 0 && ret != -ENOENT); |
@@ -4391,9 +4399,6 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) | |||
4391 | int mask = attr->ia_valid; | 4399 | int mask = attr->ia_valid; |
4392 | int ret; | 4400 | int ret; |
4393 | 4401 | ||
4394 | if (newsize == oldsize) | ||
4395 | return 0; | ||
4396 | |||
4397 | /* | 4402 | /* |
4398 | * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a | 4403 | * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a |
4399 | * special case where we need to update the times despite not having | 4404 | * special case where we need to update the times despite not having |
@@ -5165,14 +5170,31 @@ next: | |||
5165 | } | 5170 | } |
5166 | 5171 | ||
5167 | /* Reached end of directory/root. Bump pos past the last item. */ | 5172 | /* Reached end of directory/root. Bump pos past the last item. */ |
5168 | if (key_type == BTRFS_DIR_INDEX_KEY) | 5173 | ctx->pos++; |
5169 | /* | 5174 | |
5170 | * 32-bit glibc will use getdents64, but then strtol - | 5175 | /* |
5171 | * so the last number we can serve is this. | 5176 | * Stop new entries from being returned after we return the last |
5172 | */ | 5177 | * entry. |
5173 | ctx->pos = 0x7fffffff; | 5178 | * |
5174 | else | 5179 | * New directory entries are assigned a strictly increasing |
5175 | ctx->pos++; | 5180 | * offset. This means that new entries created during readdir |
5181 | * are *guaranteed* to be seen in the future by that readdir. | ||
5182 | * This has broken buggy programs which operate on names as | ||
5183 | * they're returned by readdir. Until we re-use freed offsets | ||
5184 | * we have this hack to stop new entries from being returned | ||
5185 | * under the assumption that they'll never reach this huge | ||
5186 | * offset. | ||
5187 | * | ||
5188 | * This is being careful not to overflow 32bit loff_t unless the | ||
5189 | * last entry requires it because doing so has broken 32bit apps | ||
5190 | * in the past. | ||
5191 | */ | ||
5192 | if (key_type == BTRFS_DIR_INDEX_KEY) { | ||
5193 | if (ctx->pos >= INT_MAX) | ||
5194 | ctx->pos = LLONG_MAX; | ||
5195 | else | ||
5196 | ctx->pos = INT_MAX; | ||
5197 | } | ||
5176 | nopos: | 5198 | nopos: |
5177 | ret = 0; | 5199 | ret = 0; |
5178 | err: | 5200 | err: |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index d58cce77fc6c..af1931a5960d 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -983,12 +983,12 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, | |||
983 | * a dirty root struct and adds it into the list of dead roots that need to | 983 | * a dirty root struct and adds it into the list of dead roots that need to |
984 | * be deleted | 984 | * be deleted |
985 | */ | 985 | */ |
986 | int btrfs_add_dead_root(struct btrfs_root *root) | 986 | void btrfs_add_dead_root(struct btrfs_root *root) |
987 | { | 987 | { |
988 | spin_lock(&root->fs_info->trans_lock); | 988 | spin_lock(&root->fs_info->trans_lock); |
989 | list_add_tail(&root->root_list, &root->fs_info->dead_roots); | 989 | if (list_empty(&root->root_list)) |
990 | list_add_tail(&root->root_list, &root->fs_info->dead_roots); | ||
990 | spin_unlock(&root->fs_info->trans_lock); | 991 | spin_unlock(&root->fs_info->trans_lock); |
991 | return 0; | ||
992 | } | 992 | } |
993 | 993 | ||
994 | /* | 994 | /* |
@@ -1925,7 +1925,7 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root) | |||
1925 | } | 1925 | } |
1926 | root = list_first_entry(&fs_info->dead_roots, | 1926 | root = list_first_entry(&fs_info->dead_roots, |
1927 | struct btrfs_root, root_list); | 1927 | struct btrfs_root, root_list); |
1928 | list_del(&root->root_list); | 1928 | list_del_init(&root->root_list); |
1929 | spin_unlock(&fs_info->trans_lock); | 1929 | spin_unlock(&fs_info->trans_lock); |
1930 | 1930 | ||
1931 | pr_debug("btrfs: cleaner removing %llu\n", | 1931 | pr_debug("btrfs: cleaner removing %llu\n", |
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 005b0375d18c..defbc4269897 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h | |||
@@ -143,7 +143,7 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); | |||
143 | int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, | 143 | int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, |
144 | struct btrfs_root *root); | 144 | struct btrfs_root *root); |
145 | 145 | ||
146 | int btrfs_add_dead_root(struct btrfs_root *root); | 146 | void btrfs_add_dead_root(struct btrfs_root *root); |
147 | int btrfs_defrag_root(struct btrfs_root *root); | 147 | int btrfs_defrag_root(struct btrfs_root *root); |
148 | int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root); | 148 | int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root); |
149 | int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | 149 | int btrfs_commit_transaction(struct btrfs_trans_handle *trans, |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 2c6791493637..ff60d8978ae2 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -3746,8 +3746,9 @@ next_slot: | |||
3746 | } | 3746 | } |
3747 | 3747 | ||
3748 | log_extents: | 3748 | log_extents: |
3749 | btrfs_release_path(path); | ||
3750 | btrfs_release_path(dst_path); | ||
3749 | if (fast_search) { | 3751 | if (fast_search) { |
3750 | btrfs_release_path(dst_path); | ||
3751 | ret = btrfs_log_changed_extents(trans, root, inode, dst_path); | 3752 | ret = btrfs_log_changed_extents(trans, root, inode, dst_path); |
3752 | if (ret) { | 3753 | if (ret) { |
3753 | err = ret; | 3754 | err = ret; |
@@ -3764,8 +3765,6 @@ log_extents: | |||
3764 | } | 3765 | } |
3765 | 3766 | ||
3766 | if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { | 3767 | if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { |
3767 | btrfs_release_path(path); | ||
3768 | btrfs_release_path(dst_path); | ||
3769 | ret = log_directory_changes(trans, root, inode, path, dst_path); | 3768 | ret = log_directory_changes(trans, root, inode, path, dst_path); |
3770 | if (ret) { | 3769 | if (ret) { |
3771 | err = ret; | 3770 | err = ret; |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 45e57cc38200..fc6f4f3a1a9d 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -43,17 +43,18 @@ cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server) | |||
43 | server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); | 43 | server->secmech.md5 = crypto_alloc_shash("md5", 0, 0); |
44 | if (IS_ERR(server->secmech.md5)) { | 44 | if (IS_ERR(server->secmech.md5)) { |
45 | cifs_dbg(VFS, "could not allocate crypto md5\n"); | 45 | cifs_dbg(VFS, "could not allocate crypto md5\n"); |
46 | return PTR_ERR(server->secmech.md5); | 46 | rc = PTR_ERR(server->secmech.md5); |
47 | server->secmech.md5 = NULL; | ||
48 | return rc; | ||
47 | } | 49 | } |
48 | 50 | ||
49 | size = sizeof(struct shash_desc) + | 51 | size = sizeof(struct shash_desc) + |
50 | crypto_shash_descsize(server->secmech.md5); | 52 | crypto_shash_descsize(server->secmech.md5); |
51 | server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); | 53 | server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL); |
52 | if (!server->secmech.sdescmd5) { | 54 | if (!server->secmech.sdescmd5) { |
53 | rc = -ENOMEM; | ||
54 | crypto_free_shash(server->secmech.md5); | 55 | crypto_free_shash(server->secmech.md5); |
55 | server->secmech.md5 = NULL; | 56 | server->secmech.md5 = NULL; |
56 | return rc; | 57 | return -ENOMEM; |
57 | } | 58 | } |
58 | server->secmech.sdescmd5->shash.tfm = server->secmech.md5; | 59 | server->secmech.sdescmd5->shash.tfm = server->secmech.md5; |
59 | server->secmech.sdescmd5->shash.flags = 0x0; | 60 | server->secmech.sdescmd5->shash.flags = 0x0; |
@@ -421,7 +422,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp) | |||
421 | if (blobptr + attrsize > blobend) | 422 | if (blobptr + attrsize > blobend) |
422 | break; | 423 | break; |
423 | if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { | 424 | if (type == NTLMSSP_AV_NB_DOMAIN_NAME) { |
424 | if (!attrsize) | 425 | if (!attrsize || attrsize >= CIFS_MAX_DOMAINNAME_LEN) |
425 | break; | 426 | break; |
426 | if (!ses->domainName) { | 427 | if (!ses->domainName) { |
427 | ses->domainName = | 428 | ses->domainName = |
@@ -591,6 +592,7 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash) | |||
591 | 592 | ||
592 | static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) | 593 | static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) |
593 | { | 594 | { |
595 | int rc; | ||
594 | unsigned int size; | 596 | unsigned int size; |
595 | 597 | ||
596 | /* check if already allocated */ | 598 | /* check if already allocated */ |
@@ -600,7 +602,9 @@ static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server) | |||
600 | server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); | 602 | server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0); |
601 | if (IS_ERR(server->secmech.hmacmd5)) { | 603 | if (IS_ERR(server->secmech.hmacmd5)) { |
602 | cifs_dbg(VFS, "could not allocate crypto hmacmd5\n"); | 604 | cifs_dbg(VFS, "could not allocate crypto hmacmd5\n"); |
603 | return PTR_ERR(server->secmech.hmacmd5); | 605 | rc = PTR_ERR(server->secmech.hmacmd5); |
606 | server->secmech.hmacmd5 = NULL; | ||
607 | return rc; | ||
604 | } | 608 | } |
605 | 609 | ||
606 | size = sizeof(struct shash_desc) + | 610 | size = sizeof(struct shash_desc) + |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 4bdd547dbf6f..85ea98d139fc 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -147,18 +147,17 @@ cifs_read_super(struct super_block *sb) | |||
147 | goto out_no_root; | 147 | goto out_no_root; |
148 | } | 148 | } |
149 | 149 | ||
150 | if (cifs_sb_master_tcon(cifs_sb)->nocase) | ||
151 | sb->s_d_op = &cifs_ci_dentry_ops; | ||
152 | else | ||
153 | sb->s_d_op = &cifs_dentry_ops; | ||
154 | |||
150 | sb->s_root = d_make_root(inode); | 155 | sb->s_root = d_make_root(inode); |
151 | if (!sb->s_root) { | 156 | if (!sb->s_root) { |
152 | rc = -ENOMEM; | 157 | rc = -ENOMEM; |
153 | goto out_no_root; | 158 | goto out_no_root; |
154 | } | 159 | } |
155 | 160 | ||
156 | /* do that *after* d_make_root() - we want NULL ->d_op for root here */ | ||
157 | if (cifs_sb_master_tcon(cifs_sb)->nocase) | ||
158 | sb->s_d_op = &cifs_ci_dentry_ops; | ||
159 | else | ||
160 | sb->s_d_op = &cifs_dentry_ops; | ||
161 | |||
162 | #ifdef CONFIG_CIFS_NFSD_EXPORT | 161 | #ifdef CONFIG_CIFS_NFSD_EXPORT |
163 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { | 162 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { |
164 | cifs_dbg(FYI, "export ops supported\n"); | 163 | cifs_dbg(FYI, "export ops supported\n"); |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 1fdc37041057..52ca861ed35e 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) | 44 | #define MAX_TREE_SIZE (2 + MAX_SERVER_SIZE + 1 + MAX_SHARE_SIZE + 1) |
45 | #define MAX_SERVER_SIZE 15 | 45 | #define MAX_SERVER_SIZE 15 |
46 | #define MAX_SHARE_SIZE 80 | 46 | #define MAX_SHARE_SIZE 80 |
47 | #define CIFS_MAX_DOMAINNAME_LEN 256 /* max domain name length */ | ||
47 | #define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */ | 48 | #define MAX_USERNAME_SIZE 256 /* reasonable maximum for current servers */ |
48 | #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ | 49 | #define MAX_PASSWORD_SIZE 512 /* max for windows seems to be 256 wide chars */ |
49 | 50 | ||
@@ -369,6 +370,9 @@ struct smb_version_operations { | |||
369 | void (*generate_signingkey)(struct TCP_Server_Info *server); | 370 | void (*generate_signingkey)(struct TCP_Server_Info *server); |
370 | int (*calc_signature)(struct smb_rqst *rqst, | 371 | int (*calc_signature)(struct smb_rqst *rqst, |
371 | struct TCP_Server_Info *server); | 372 | struct TCP_Server_Info *server); |
373 | int (*query_mf_symlink)(const unsigned char *path, char *pbuf, | ||
374 | unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, | ||
375 | unsigned int xid); | ||
372 | }; | 376 | }; |
373 | 377 | ||
374 | struct smb_version_values { | 378 | struct smb_version_values { |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index f7e584d047e2..b29a012bed33 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -497,5 +497,7 @@ void cifs_writev_complete(struct work_struct *work); | |||
497 | struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages, | 497 | struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages, |
498 | work_func_t complete); | 498 | work_func_t complete); |
499 | void cifs_writedata_release(struct kref *refcount); | 499 | void cifs_writedata_release(struct kref *refcount); |
500 | 500 | int open_query_close_cifs_symlink(const unsigned char *path, char *pbuf, | |
501 | unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, | ||
502 | unsigned int xid); | ||
501 | #endif /* _CIFSPROTO_H */ | 503 | #endif /* _CIFSPROTO_H */ |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index fa68813396b5..d67c550c4980 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -1675,7 +1675,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1675 | if (string == NULL) | 1675 | if (string == NULL) |
1676 | goto out_nomem; | 1676 | goto out_nomem; |
1677 | 1677 | ||
1678 | if (strnlen(string, 256) == 256) { | 1678 | if (strnlen(string, CIFS_MAX_DOMAINNAME_LEN) |
1679 | == CIFS_MAX_DOMAINNAME_LEN) { | ||
1679 | printk(KERN_WARNING "CIFS: domain name too" | 1680 | printk(KERN_WARNING "CIFS: domain name too" |
1680 | " long\n"); | 1681 | " long\n"); |
1681 | goto cifs_parse_mount_err; | 1682 | goto cifs_parse_mount_err; |
@@ -2276,8 +2277,8 @@ cifs_put_smb_ses(struct cifs_ses *ses) | |||
2276 | 2277 | ||
2277 | #ifdef CONFIG_KEYS | 2278 | #ifdef CONFIG_KEYS |
2278 | 2279 | ||
2279 | /* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */ | 2280 | /* strlen("cifs:a:") + CIFS_MAX_DOMAINNAME_LEN + 1 */ |
2280 | #define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1) | 2281 | #define CIFSCREDS_DESC_SIZE (7 + CIFS_MAX_DOMAINNAME_LEN + 1) |
2281 | 2282 | ||
2282 | /* Populate username and pw fields from keyring if possible */ | 2283 | /* Populate username and pw fields from keyring if possible */ |
2283 | static int | 2284 | static int |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 1e57f36ea1b2..7e36ae34e947 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -647,6 +647,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush) | |||
647 | oflags, &oplock, &cfile->fid.netfid, xid); | 647 | oflags, &oplock, &cfile->fid.netfid, xid); |
648 | if (rc == 0) { | 648 | if (rc == 0) { |
649 | cifs_dbg(FYI, "posix reopen succeeded\n"); | 649 | cifs_dbg(FYI, "posix reopen succeeded\n"); |
650 | oparms.reconnect = true; | ||
650 | goto reopen_success; | 651 | goto reopen_success; |
651 | } | 652 | } |
652 | /* | 653 | /* |
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index b83c3f5646bd..562044f700e5 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
@@ -305,67 +305,89 @@ CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr) | |||
305 | } | 305 | } |
306 | 306 | ||
307 | int | 307 | int |
308 | CIFSCheckMFSymlink(struct cifs_fattr *fattr, | 308 | open_query_close_cifs_symlink(const unsigned char *path, char *pbuf, |
309 | const unsigned char *path, | 309 | unsigned int *pbytes_read, struct cifs_sb_info *cifs_sb, |
310 | struct cifs_sb_info *cifs_sb, unsigned int xid) | 310 | unsigned int xid) |
311 | { | 311 | { |
312 | int rc; | 312 | int rc; |
313 | int oplock = 0; | 313 | int oplock = 0; |
314 | __u16 netfid = 0; | 314 | __u16 netfid = 0; |
315 | struct tcon_link *tlink; | 315 | struct tcon_link *tlink; |
316 | struct cifs_tcon *pTcon; | 316 | struct cifs_tcon *ptcon; |
317 | struct cifs_io_parms io_parms; | 317 | struct cifs_io_parms io_parms; |
318 | u8 *buf; | ||
319 | char *pbuf; | ||
320 | unsigned int bytes_read = 0; | ||
321 | int buf_type = CIFS_NO_BUFFER; | 318 | int buf_type = CIFS_NO_BUFFER; |
322 | unsigned int link_len = 0; | ||
323 | FILE_ALL_INFO file_info; | 319 | FILE_ALL_INFO file_info; |
324 | 320 | ||
325 | if (!CIFSCouldBeMFSymlink(fattr)) | ||
326 | /* it's not a symlink */ | ||
327 | return 0; | ||
328 | |||
329 | tlink = cifs_sb_tlink(cifs_sb); | 321 | tlink = cifs_sb_tlink(cifs_sb); |
330 | if (IS_ERR(tlink)) | 322 | if (IS_ERR(tlink)) |
331 | return PTR_ERR(tlink); | 323 | return PTR_ERR(tlink); |
332 | pTcon = tlink_tcon(tlink); | 324 | ptcon = tlink_tcon(tlink); |
333 | 325 | ||
334 | rc = CIFSSMBOpen(xid, pTcon, path, FILE_OPEN, GENERIC_READ, | 326 | rc = CIFSSMBOpen(xid, ptcon, path, FILE_OPEN, GENERIC_READ, |
335 | CREATE_NOT_DIR, &netfid, &oplock, &file_info, | 327 | CREATE_NOT_DIR, &netfid, &oplock, &file_info, |
336 | cifs_sb->local_nls, | 328 | cifs_sb->local_nls, |
337 | cifs_sb->mnt_cifs_flags & | 329 | cifs_sb->mnt_cifs_flags & |
338 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 330 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
339 | if (rc != 0) | 331 | if (rc != 0) { |
340 | goto out; | 332 | cifs_put_tlink(tlink); |
333 | return rc; | ||
334 | } | ||
341 | 335 | ||
342 | if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { | 336 | if (file_info.EndOfFile != cpu_to_le64(CIFS_MF_SYMLINK_FILE_SIZE)) { |
343 | CIFSSMBClose(xid, pTcon, netfid); | 337 | CIFSSMBClose(xid, ptcon, netfid); |
338 | cifs_put_tlink(tlink); | ||
344 | /* it's not a symlink */ | 339 | /* it's not a symlink */ |
345 | goto out; | 340 | return rc; |
346 | } | 341 | } |
347 | 342 | ||
348 | buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); | ||
349 | if (!buf) { | ||
350 | rc = -ENOMEM; | ||
351 | goto out; | ||
352 | } | ||
353 | pbuf = buf; | ||
354 | io_parms.netfid = netfid; | 343 | io_parms.netfid = netfid; |
355 | io_parms.pid = current->tgid; | 344 | io_parms.pid = current->tgid; |
356 | io_parms.tcon = pTcon; | 345 | io_parms.tcon = ptcon; |
357 | io_parms.offset = 0; | 346 | io_parms.offset = 0; |
358 | io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; | 347 | io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; |
359 | 348 | ||
360 | rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type); | 349 | rc = CIFSSMBRead(xid, &io_parms, pbytes_read, &pbuf, &buf_type); |
361 | CIFSSMBClose(xid, pTcon, netfid); | 350 | CIFSSMBClose(xid, ptcon, netfid); |
362 | if (rc != 0) { | 351 | cifs_put_tlink(tlink); |
363 | kfree(buf); | 352 | return rc; |
353 | } | ||
354 | |||
355 | |||
356 | int | ||
357 | CIFSCheckMFSymlink(struct cifs_fattr *fattr, | ||
358 | const unsigned char *path, | ||
359 | struct cifs_sb_info *cifs_sb, unsigned int xid) | ||
360 | { | ||
361 | int rc = 0; | ||
362 | u8 *buf = NULL; | ||
363 | unsigned int link_len = 0; | ||
364 | unsigned int bytes_read = 0; | ||
365 | struct cifs_tcon *ptcon; | ||
366 | |||
367 | if (!CIFSCouldBeMFSymlink(fattr)) | ||
368 | /* it's not a symlink */ | ||
369 | return 0; | ||
370 | |||
371 | buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); | ||
372 | if (!buf) { | ||
373 | rc = -ENOMEM; | ||
364 | goto out; | 374 | goto out; |
365 | } | 375 | } |
366 | 376 | ||
377 | ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb)); | ||
378 | if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink)) | ||
379 | rc = ptcon->ses->server->ops->query_mf_symlink(path, buf, | ||
380 | &bytes_read, cifs_sb, xid); | ||
381 | else | ||
382 | goto out; | ||
383 | |||
384 | if (rc != 0) | ||
385 | goto out; | ||
386 | |||
387 | if (bytes_read == 0) /* not a symlink */ | ||
388 | goto out; | ||
389 | |||
367 | rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL); | 390 | rc = CIFSParseMFSymlink(buf, bytes_read, &link_len, NULL); |
368 | kfree(buf); | ||
369 | if (rc == -EINVAL) { | 391 | if (rc == -EINVAL) { |
370 | /* it's not a symlink */ | 392 | /* it's not a symlink */ |
371 | rc = 0; | 393 | rc = 0; |
@@ -381,7 +403,7 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr, | |||
381 | fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO; | 403 | fattr->cf_mode |= S_IFLNK | S_IRWXU | S_IRWXG | S_IRWXO; |
382 | fattr->cf_dtype = DT_LNK; | 404 | fattr->cf_dtype = DT_LNK; |
383 | out: | 405 | out: |
384 | cifs_put_tlink(tlink); | 406 | kfree(buf); |
385 | return rc; | 407 | return rc; |
386 | } | 408 | } |
387 | 409 | ||
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index ab8778469394..69d2c826a23b 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -111,6 +111,14 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, | |||
111 | return; | 111 | return; |
112 | } | 112 | } |
113 | 113 | ||
114 | /* | ||
115 | * If we know that the inode will need to be revalidated immediately, | ||
116 | * then don't create a new dentry for it. We'll end up doing an on | ||
117 | * the wire call either way and this spares us an invalidation. | ||
118 | */ | ||
119 | if (fattr->cf_flags & CIFS_FATTR_NEED_REVAL) | ||
120 | return; | ||
121 | |||
114 | dentry = d_alloc(parent, name); | 122 | dentry = d_alloc(parent, name); |
115 | if (!dentry) | 123 | if (!dentry) |
116 | return; | 124 | return; |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 79358e341fd2..08dd37bb23aa 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -197,7 +197,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses, | |||
197 | bytes_ret = 0; | 197 | bytes_ret = 0; |
198 | } else | 198 | } else |
199 | bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName, | 199 | bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName, |
200 | 256, nls_cp); | 200 | CIFS_MAX_DOMAINNAME_LEN, nls_cp); |
201 | bcc_ptr += 2 * bytes_ret; | 201 | bcc_ptr += 2 * bytes_ret; |
202 | bcc_ptr += 2; /* account for null terminator */ | 202 | bcc_ptr += 2; /* account for null terminator */ |
203 | 203 | ||
@@ -255,8 +255,8 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, | |||
255 | 255 | ||
256 | /* copy domain */ | 256 | /* copy domain */ |
257 | if (ses->domainName != NULL) { | 257 | if (ses->domainName != NULL) { |
258 | strncpy(bcc_ptr, ses->domainName, 256); | 258 | strncpy(bcc_ptr, ses->domainName, CIFS_MAX_DOMAINNAME_LEN); |
259 | bcc_ptr += strnlen(ses->domainName, 256); | 259 | bcc_ptr += strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN); |
260 | } /* else we will send a null domain name | 260 | } /* else we will send a null domain name |
261 | so the server will default to its own domain */ | 261 | so the server will default to its own domain */ |
262 | *bcc_ptr = 0; | 262 | *bcc_ptr = 0; |
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c index 6457690731a2..60943978aec3 100644 --- a/fs/cifs/smb1ops.c +++ b/fs/cifs/smb1ops.c | |||
@@ -944,6 +944,7 @@ struct smb_version_operations smb1_operations = { | |||
944 | .mand_lock = cifs_mand_lock, | 944 | .mand_lock = cifs_mand_lock, |
945 | .mand_unlock_range = cifs_unlock_range, | 945 | .mand_unlock_range = cifs_unlock_range, |
946 | .push_mand_locks = cifs_push_mandatory_locks, | 946 | .push_mand_locks = cifs_push_mandatory_locks, |
947 | .query_mf_symlink = open_query_close_cifs_symlink, | ||
947 | }; | 948 | }; |
948 | 949 | ||
949 | struct smb_version_values smb1_values = { | 950 | struct smb_version_values smb1_values = { |
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 301b191270b9..4f2300d020c7 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c | |||
@@ -42,6 +42,7 @@ | |||
42 | static int | 42 | static int |
43 | smb2_crypto_shash_allocate(struct TCP_Server_Info *server) | 43 | smb2_crypto_shash_allocate(struct TCP_Server_Info *server) |
44 | { | 44 | { |
45 | int rc; | ||
45 | unsigned int size; | 46 | unsigned int size; |
46 | 47 | ||
47 | if (server->secmech.sdeschmacsha256 != NULL) | 48 | if (server->secmech.sdeschmacsha256 != NULL) |
@@ -50,7 +51,9 @@ smb2_crypto_shash_allocate(struct TCP_Server_Info *server) | |||
50 | server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); | 51 | server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0); |
51 | if (IS_ERR(server->secmech.hmacsha256)) { | 52 | if (IS_ERR(server->secmech.hmacsha256)) { |
52 | cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); | 53 | cifs_dbg(VFS, "could not allocate crypto hmacsha256\n"); |
53 | return PTR_ERR(server->secmech.hmacsha256); | 54 | rc = PTR_ERR(server->secmech.hmacsha256); |
55 | server->secmech.hmacsha256 = NULL; | ||
56 | return rc; | ||
54 | } | 57 | } |
55 | 58 | ||
56 | size = sizeof(struct shash_desc) + | 59 | size = sizeof(struct shash_desc) + |
@@ -87,7 +90,9 @@ smb3_crypto_shash_allocate(struct TCP_Server_Info *server) | |||
87 | server->secmech.sdeschmacsha256 = NULL; | 90 | server->secmech.sdeschmacsha256 = NULL; |
88 | crypto_free_shash(server->secmech.hmacsha256); | 91 | crypto_free_shash(server->secmech.hmacsha256); |
89 | server->secmech.hmacsha256 = NULL; | 92 | server->secmech.hmacsha256 = NULL; |
90 | return PTR_ERR(server->secmech.cmacaes); | 93 | rc = PTR_ERR(server->secmech.cmacaes); |
94 | server->secmech.cmacaes = NULL; | ||
95 | return rc; | ||
91 | } | 96 | } |
92 | 97 | ||
93 | size = sizeof(struct shash_desc) + | 98 | size = sizeof(struct shash_desc) + |
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 4888cb3fdef7..c7c83ff0f752 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c | |||
@@ -533,8 +533,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove); | |||
533 | */ | 533 | */ |
534 | void debugfs_remove_recursive(struct dentry *dentry) | 534 | void debugfs_remove_recursive(struct dentry *dentry) |
535 | { | 535 | { |
536 | struct dentry *child; | 536 | struct dentry *child, *next, *parent; |
537 | struct dentry *parent; | ||
538 | 537 | ||
539 | if (IS_ERR_OR_NULL(dentry)) | 538 | if (IS_ERR_OR_NULL(dentry)) |
540 | return; | 539 | return; |
@@ -544,61 +543,37 @@ void debugfs_remove_recursive(struct dentry *dentry) | |||
544 | return; | 543 | return; |
545 | 544 | ||
546 | parent = dentry; | 545 | parent = dentry; |
546 | down: | ||
547 | mutex_lock(&parent->d_inode->i_mutex); | 547 | mutex_lock(&parent->d_inode->i_mutex); |
548 | list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { | ||
549 | if (!debugfs_positive(child)) | ||
550 | continue; | ||
548 | 551 | ||
549 | while (1) { | 552 | /* perhaps simple_empty(child) makes more sense */ |
550 | /* | ||
551 | * When all dentries under "parent" has been removed, | ||
552 | * walk up the tree until we reach our starting point. | ||
553 | */ | ||
554 | if (list_empty(&parent->d_subdirs)) { | ||
555 | mutex_unlock(&parent->d_inode->i_mutex); | ||
556 | if (parent == dentry) | ||
557 | break; | ||
558 | parent = parent->d_parent; | ||
559 | mutex_lock(&parent->d_inode->i_mutex); | ||
560 | } | ||
561 | child = list_entry(parent->d_subdirs.next, struct dentry, | ||
562 | d_u.d_child); | ||
563 | next_sibling: | ||
564 | |||
565 | /* | ||
566 | * If "child" isn't empty, walk down the tree and | ||
567 | * remove all its descendants first. | ||
568 | */ | ||
569 | if (!list_empty(&child->d_subdirs)) { | 553 | if (!list_empty(&child->d_subdirs)) { |
570 | mutex_unlock(&parent->d_inode->i_mutex); | 554 | mutex_unlock(&parent->d_inode->i_mutex); |
571 | parent = child; | 555 | parent = child; |
572 | mutex_lock(&parent->d_inode->i_mutex); | 556 | goto down; |
573 | continue; | ||
574 | } | 557 | } |
575 | __debugfs_remove(child, parent); | 558 | up: |
576 | if (parent->d_subdirs.next == &child->d_u.d_child) { | 559 | if (!__debugfs_remove(child, parent)) |
577 | /* | 560 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); |
578 | * Try the next sibling. | ||
579 | */ | ||
580 | if (child->d_u.d_child.next != &parent->d_subdirs) { | ||
581 | child = list_entry(child->d_u.d_child.next, | ||
582 | struct dentry, | ||
583 | d_u.d_child); | ||
584 | goto next_sibling; | ||
585 | } | ||
586 | |||
587 | /* | ||
588 | * Avoid infinite loop if we fail to remove | ||
589 | * one dentry. | ||
590 | */ | ||
591 | mutex_unlock(&parent->d_inode->i_mutex); | ||
592 | break; | ||
593 | } | ||
594 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); | ||
595 | } | 561 | } |
596 | 562 | ||
597 | parent = dentry->d_parent; | 563 | mutex_unlock(&parent->d_inode->i_mutex); |
564 | child = parent; | ||
565 | parent = parent->d_parent; | ||
598 | mutex_lock(&parent->d_inode->i_mutex); | 566 | mutex_lock(&parent->d_inode->i_mutex); |
599 | __debugfs_remove(dentry, parent); | 567 | |
568 | if (child != dentry) { | ||
569 | next = list_entry(child->d_u.d_child.next, struct dentry, | ||
570 | d_u.d_child); | ||
571 | goto up; | ||
572 | } | ||
573 | |||
574 | if (!__debugfs_remove(child, parent)) | ||
575 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); | ||
600 | mutex_unlock(&parent->d_inode->i_mutex); | 576 | mutex_unlock(&parent->d_inode->i_mutex); |
601 | simple_release_fs(&debugfs_mount, &debugfs_mount_count); | ||
602 | } | 577 | } |
603 | EXPORT_SYMBOL_GPL(debugfs_remove_recursive); | 578 | EXPORT_SYMBOL_GPL(debugfs_remove_recursive); |
604 | 579 | ||
diff --git a/fs/dlm/user.c b/fs/dlm/user.c index 911649a47dd5..812149119fa3 100644 --- a/fs/dlm/user.c +++ b/fs/dlm/user.c | |||
@@ -686,7 +686,6 @@ static int device_close(struct inode *inode, struct file *file) | |||
686 | device_remove_lockspace() */ | 686 | device_remove_lockspace() */ |
687 | 687 | ||
688 | sigprocmask(SIG_SETMASK, &tmpsig, NULL); | 688 | sigprocmask(SIG_SETMASK, &tmpsig, NULL); |
689 | recalc_sigpending(); | ||
690 | 689 | ||
691 | return 0; | 690 | return 0; |
692 | } | 691 | } |
@@ -608,7 +608,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
608 | return -ENOMEM; | 608 | return -ENOMEM; |
609 | 609 | ||
610 | lru_add_drain(); | 610 | lru_add_drain(); |
611 | tlb_gather_mmu(&tlb, mm, 0); | 611 | tlb_gather_mmu(&tlb, mm, old_start, old_end); |
612 | if (new_end > old_start) { | 612 | if (new_end > old_start) { |
613 | /* | 613 | /* |
614 | * when the old and new regions overlap clear from new_end. | 614 | * when the old and new regions overlap clear from new_end. |
@@ -625,7 +625,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) | |||
625 | free_pgd_range(&tlb, old_start, old_end, new_end, | 625 | free_pgd_range(&tlb, old_start, old_end, new_end, |
626 | vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); | 626 | vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); |
627 | } | 627 | } |
628 | tlb_finish_mmu(&tlb, new_end, old_end); | 628 | tlb_finish_mmu(&tlb, old_start, old_end); |
629 | 629 | ||
630 | /* | 630 | /* |
631 | * Shrink the vma to just the new range. Always succeeds. | 631 | * Shrink the vma to just the new range. Always succeeds. |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index b577e45425b0..0ab26fbf3380 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -2086,6 +2086,7 @@ extern int ext4_sync_inode(handle_t *, struct inode *); | |||
2086 | extern void ext4_dirty_inode(struct inode *, int); | 2086 | extern void ext4_dirty_inode(struct inode *, int); |
2087 | extern int ext4_change_inode_journal_flag(struct inode *, int); | 2087 | extern int ext4_change_inode_journal_flag(struct inode *, int); |
2088 | extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); | 2088 | extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); |
2089 | extern int ext4_inode_attach_jinode(struct inode *inode); | ||
2089 | extern int ext4_can_truncate(struct inode *inode); | 2090 | extern int ext4_can_truncate(struct inode *inode); |
2090 | extern void ext4_truncate(struct inode *); | 2091 | extern void ext4_truncate(struct inode *); |
2091 | extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length); | 2092 | extern int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length); |
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 72a3600aedbd..17ac112ab101 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c | |||
@@ -255,10 +255,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line, | |||
255 | set_buffer_prio(bh); | 255 | set_buffer_prio(bh); |
256 | if (ext4_handle_valid(handle)) { | 256 | if (ext4_handle_valid(handle)) { |
257 | err = jbd2_journal_dirty_metadata(handle, bh); | 257 | err = jbd2_journal_dirty_metadata(handle, bh); |
258 | if (err) { | 258 | /* Errors can only happen if there is a bug */ |
259 | /* Errors can only happen if there is a bug */ | 259 | if (WARN_ON_ONCE(err)) { |
260 | handle->h_err = err; | 260 | ext4_journal_abort_handle(where, line, __func__, bh, |
261 | __ext4_journal_stop(where, line, handle); | 261 | handle, err); |
262 | } | 262 | } |
263 | } else { | 263 | } else { |
264 | if (inode) | 264 | if (inode) |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index a61873808f76..72ba4705d4fa 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -4412,7 +4412,7 @@ void ext4_ext_truncate(handle_t *handle, struct inode *inode) | |||
4412 | retry: | 4412 | retry: |
4413 | err = ext4_es_remove_extent(inode, last_block, | 4413 | err = ext4_es_remove_extent(inode, last_block, |
4414 | EXT_MAX_BLOCKS - last_block); | 4414 | EXT_MAX_BLOCKS - last_block); |
4415 | if (err == ENOMEM) { | 4415 | if (err == -ENOMEM) { |
4416 | cond_resched(); | 4416 | cond_resched(); |
4417 | congestion_wait(BLK_RW_ASYNC, HZ/50); | 4417 | congestion_wait(BLK_RW_ASYNC, HZ/50); |
4418 | goto retry; | 4418 | goto retry; |
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 6f4cc567c382..319c9d26279a 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
@@ -219,7 +219,6 @@ static int ext4_file_open(struct inode * inode, struct file * filp) | |||
219 | { | 219 | { |
220 | struct super_block *sb = inode->i_sb; | 220 | struct super_block *sb = inode->i_sb; |
221 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 221 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
222 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
223 | struct vfsmount *mnt = filp->f_path.mnt; | 222 | struct vfsmount *mnt = filp->f_path.mnt; |
224 | struct path path; | 223 | struct path path; |
225 | char buf[64], *cp; | 224 | char buf[64], *cp; |
@@ -259,22 +258,10 @@ static int ext4_file_open(struct inode * inode, struct file * filp) | |||
259 | * Set up the jbd2_inode if we are opening the inode for | 258 | * Set up the jbd2_inode if we are opening the inode for |
260 | * writing and the journal is present | 259 | * writing and the journal is present |
261 | */ | 260 | */ |
262 | if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) { | 261 | if (filp->f_mode & FMODE_WRITE) { |
263 | struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL); | 262 | int ret = ext4_inode_attach_jinode(inode); |
264 | 263 | if (ret < 0) | |
265 | spin_lock(&inode->i_lock); | 264 | return ret; |
266 | if (!ei->jinode) { | ||
267 | if (!jinode) { | ||
268 | spin_unlock(&inode->i_lock); | ||
269 | return -ENOMEM; | ||
270 | } | ||
271 | ei->jinode = jinode; | ||
272 | jbd2_journal_init_jbd_inode(ei->jinode, inode); | ||
273 | jinode = NULL; | ||
274 | } | ||
275 | spin_unlock(&inode->i_lock); | ||
276 | if (unlikely(jinode != NULL)) | ||
277 | jbd2_free_inode(jinode); | ||
278 | } | 265 | } |
279 | return dquot_file_open(inode, filp); | 266 | return dquot_file_open(inode, filp); |
280 | } | 267 | } |
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index f03598c6ffd3..8bf5999875ee 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
@@ -734,11 +734,8 @@ repeat_in_this_group: | |||
734 | ino = ext4_find_next_zero_bit((unsigned long *) | 734 | ino = ext4_find_next_zero_bit((unsigned long *) |
735 | inode_bitmap_bh->b_data, | 735 | inode_bitmap_bh->b_data, |
736 | EXT4_INODES_PER_GROUP(sb), ino); | 736 | EXT4_INODES_PER_GROUP(sb), ino); |
737 | if (ino >= EXT4_INODES_PER_GROUP(sb)) { | 737 | if (ino >= EXT4_INODES_PER_GROUP(sb)) |
738 | if (++group == ngroups) | 738 | goto next_group; |
739 | group = 0; | ||
740 | continue; | ||
741 | } | ||
742 | if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) { | 739 | if (group == 0 && (ino+1) < EXT4_FIRST_INO(sb)) { |
743 | ext4_error(sb, "reserved inode found cleared - " | 740 | ext4_error(sb, "reserved inode found cleared - " |
744 | "inode=%lu", ino + 1); | 741 | "inode=%lu", ino + 1); |
@@ -769,6 +766,9 @@ repeat_in_this_group: | |||
769 | goto got; /* we grabbed the inode! */ | 766 | goto got; /* we grabbed the inode! */ |
770 | if (ino < EXT4_INODES_PER_GROUP(sb)) | 767 | if (ino < EXT4_INODES_PER_GROUP(sb)) |
771 | goto repeat_in_this_group; | 768 | goto repeat_in_this_group; |
769 | next_group: | ||
770 | if (++group == ngroups) | ||
771 | group = 0; | ||
772 | } | 772 | } |
773 | err = -ENOSPC; | 773 | err = -ENOSPC; |
774 | goto out; | 774 | goto out; |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index ba33c67d6e48..c2ca04e67a4f 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -555,14 +555,13 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode, | |||
555 | int ret; | 555 | int ret; |
556 | unsigned long long status; | 556 | unsigned long long status; |
557 | 557 | ||
558 | #ifdef ES_AGGRESSIVE_TEST | 558 | if (unlikely(retval != map->m_len)) { |
559 | if (retval != map->m_len) { | 559 | ext4_warning(inode->i_sb, |
560 | printk("ES len assertion failed for inode: %lu " | 560 | "ES len assertion failed for inode " |
561 | "retval %d != map->m_len %d " | 561 | "%lu: retval %d != map->m_len %d", |
562 | "in %s (lookup)\n", inode->i_ino, retval, | 562 | inode->i_ino, retval, map->m_len); |
563 | map->m_len, __func__); | 563 | WARN_ON(1); |
564 | } | 564 | } |
565 | #endif | ||
566 | 565 | ||
567 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? | 566 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? |
568 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; | 567 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; |
@@ -656,14 +655,13 @@ found: | |||
656 | int ret; | 655 | int ret; |
657 | unsigned long long status; | 656 | unsigned long long status; |
658 | 657 | ||
659 | #ifdef ES_AGGRESSIVE_TEST | 658 | if (unlikely(retval != map->m_len)) { |
660 | if (retval != map->m_len) { | 659 | ext4_warning(inode->i_sb, |
661 | printk("ES len assertion failed for inode: %lu " | 660 | "ES len assertion failed for inode " |
662 | "retval %d != map->m_len %d " | 661 | "%lu: retval %d != map->m_len %d", |
663 | "in %s (allocation)\n", inode->i_ino, retval, | 662 | inode->i_ino, retval, map->m_len); |
664 | map->m_len, __func__); | 663 | WARN_ON(1); |
665 | } | 664 | } |
666 | #endif | ||
667 | 665 | ||
668 | /* | 666 | /* |
669 | * If the extent has been zeroed out, we don't need to update | 667 | * If the extent has been zeroed out, we don't need to update |
@@ -1637,14 +1635,13 @@ add_delayed: | |||
1637 | int ret; | 1635 | int ret; |
1638 | unsigned long long status; | 1636 | unsigned long long status; |
1639 | 1637 | ||
1640 | #ifdef ES_AGGRESSIVE_TEST | 1638 | if (unlikely(retval != map->m_len)) { |
1641 | if (retval != map->m_len) { | 1639 | ext4_warning(inode->i_sb, |
1642 | printk("ES len assertion failed for inode: %lu " | 1640 | "ES len assertion failed for inode " |
1643 | "retval %d != map->m_len %d " | 1641 | "%lu: retval %d != map->m_len %d", |
1644 | "in %s (lookup)\n", inode->i_ino, retval, | 1642 | inode->i_ino, retval, map->m_len); |
1645 | map->m_len, __func__); | 1643 | WARN_ON(1); |
1646 | } | 1644 | } |
1647 | #endif | ||
1648 | 1645 | ||
1649 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? | 1646 | status = map->m_flags & EXT4_MAP_UNWRITTEN ? |
1650 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; | 1647 | EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN; |
@@ -3536,6 +3533,18 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length) | |||
3536 | offset; | 3533 | offset; |
3537 | } | 3534 | } |
3538 | 3535 | ||
3536 | if (offset & (sb->s_blocksize - 1) || | ||
3537 | (offset + length) & (sb->s_blocksize - 1)) { | ||
3538 | /* | ||
3539 | * Attach jinode to inode for jbd2 if we do any zeroing of | ||
3540 | * partial block | ||
3541 | */ | ||
3542 | ret = ext4_inode_attach_jinode(inode); | ||
3543 | if (ret < 0) | ||
3544 | goto out_mutex; | ||
3545 | |||
3546 | } | ||
3547 | |||
3539 | first_block_offset = round_up(offset, sb->s_blocksize); | 3548 | first_block_offset = round_up(offset, sb->s_blocksize); |
3540 | last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; | 3549 | last_block_offset = round_down((offset + length), sb->s_blocksize) - 1; |
3541 | 3550 | ||
@@ -3604,6 +3613,31 @@ out_mutex: | |||
3604 | return ret; | 3613 | return ret; |
3605 | } | 3614 | } |
3606 | 3615 | ||
3616 | int ext4_inode_attach_jinode(struct inode *inode) | ||
3617 | { | ||
3618 | struct ext4_inode_info *ei = EXT4_I(inode); | ||
3619 | struct jbd2_inode *jinode; | ||
3620 | |||
3621 | if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal) | ||
3622 | return 0; | ||
3623 | |||
3624 | jinode = jbd2_alloc_inode(GFP_KERNEL); | ||
3625 | spin_lock(&inode->i_lock); | ||
3626 | if (!ei->jinode) { | ||
3627 | if (!jinode) { | ||
3628 | spin_unlock(&inode->i_lock); | ||
3629 | return -ENOMEM; | ||
3630 | } | ||
3631 | ei->jinode = jinode; | ||
3632 | jbd2_journal_init_jbd_inode(ei->jinode, inode); | ||
3633 | jinode = NULL; | ||
3634 | } | ||
3635 | spin_unlock(&inode->i_lock); | ||
3636 | if (unlikely(jinode != NULL)) | ||
3637 | jbd2_free_inode(jinode); | ||
3638 | return 0; | ||
3639 | } | ||
3640 | |||
3607 | /* | 3641 | /* |
3608 | * ext4_truncate() | 3642 | * ext4_truncate() |
3609 | * | 3643 | * |
@@ -3664,6 +3698,12 @@ void ext4_truncate(struct inode *inode) | |||
3664 | return; | 3698 | return; |
3665 | } | 3699 | } |
3666 | 3700 | ||
3701 | /* If we zero-out tail of the page, we have to create jinode for jbd2 */ | ||
3702 | if (inode->i_size & (inode->i_sb->s_blocksize - 1)) { | ||
3703 | if (ext4_inode_attach_jinode(inode) < 0) | ||
3704 | return; | ||
3705 | } | ||
3706 | |||
3667 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) | 3707 | if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) |
3668 | credits = ext4_writepage_trans_blocks(inode); | 3708 | credits = ext4_writepage_trans_blocks(inode); |
3669 | else | 3709 | else |
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c index 9491ac0590f7..c0427e2f6648 100644 --- a/fs/ext4/ioctl.c +++ b/fs/ext4/ioctl.c | |||
@@ -77,8 +77,10 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2) | |||
77 | memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data)); | 77 | memswap(ei1->i_data, ei2->i_data, sizeof(ei1->i_data)); |
78 | memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags)); | 78 | memswap(&ei1->i_flags, &ei2->i_flags, sizeof(ei1->i_flags)); |
79 | memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize)); | 79 | memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize)); |
80 | memswap(&ei1->i_es_tree, &ei2->i_es_tree, sizeof(ei1->i_es_tree)); | 80 | ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS); |
81 | memswap(&ei1->i_es_lru_nr, &ei2->i_es_lru_nr, sizeof(ei1->i_es_lru_nr)); | 81 | ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS); |
82 | ext4_es_lru_del(inode1); | ||
83 | ext4_es_lru_del(inode2); | ||
82 | 84 | ||
83 | isize = i_size_read(inode1); | 85 | isize = i_size_read(inode1); |
84 | i_size_write(inode1, i_size_read(inode2)); | 86 | i_size_write(inode1, i_size_read(inode2)); |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index bca26f34edf4..b59373b625e9 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -1359,7 +1359,7 @@ static const struct mount_opts { | |||
1359 | {Opt_delalloc, EXT4_MOUNT_DELALLOC, | 1359 | {Opt_delalloc, EXT4_MOUNT_DELALLOC, |
1360 | MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, | 1360 | MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, |
1361 | {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, | 1361 | {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, |
1362 | MOPT_EXT4_ONLY | MOPT_CLEAR | MOPT_EXPLICIT}, | 1362 | MOPT_EXT4_ONLY | MOPT_CLEAR}, |
1363 | {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, | 1363 | {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, |
1364 | MOPT_EXT4_ONLY | MOPT_SET}, | 1364 | MOPT_EXT4_ONLY | MOPT_SET}, |
1365 | {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | | 1365 | {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | |
@@ -3483,7 +3483,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3483 | } | 3483 | } |
3484 | if (test_opt(sb, DIOREAD_NOLOCK)) { | 3484 | if (test_opt(sb, DIOREAD_NOLOCK)) { |
3485 | ext4_msg(sb, KERN_ERR, "can't mount with " | 3485 | ext4_msg(sb, KERN_ERR, "can't mount with " |
3486 | "both data=journal and delalloc"); | 3486 | "both data=journal and dioread_nolock"); |
3487 | goto failed_mount; | 3487 | goto failed_mount; |
3488 | } | 3488 | } |
3489 | if (test_opt(sb, DELALLOC)) | 3489 | if (test_opt(sb, DELALLOC)) |
@@ -4727,6 +4727,21 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) | |||
4727 | goto restore_opts; | 4727 | goto restore_opts; |
4728 | } | 4728 | } |
4729 | 4729 | ||
4730 | if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { | ||
4731 | if (test_opt2(sb, EXPLICIT_DELALLOC)) { | ||
4732 | ext4_msg(sb, KERN_ERR, "can't mount with " | ||
4733 | "both data=journal and delalloc"); | ||
4734 | err = -EINVAL; | ||
4735 | goto restore_opts; | ||
4736 | } | ||
4737 | if (test_opt(sb, DIOREAD_NOLOCK)) { | ||
4738 | ext4_msg(sb, KERN_ERR, "can't mount with " | ||
4739 | "both data=journal and dioread_nolock"); | ||
4740 | err = -EINVAL; | ||
4741 | goto restore_opts; | ||
4742 | } | ||
4743 | } | ||
4744 | |||
4730 | if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) | 4745 | if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) |
4731 | ext4_abort(sb, "Abort forced by user"); | 4746 | ext4_abort(sb, "Abort forced by user"); |
4732 | 4747 | ||
@@ -5481,6 +5496,7 @@ static void __exit ext4_exit_fs(void) | |||
5481 | kset_unregister(ext4_kset); | 5496 | kset_unregister(ext4_kset); |
5482 | ext4_exit_system_zone(); | 5497 | ext4_exit_system_zone(); |
5483 | ext4_exit_pageio(); | 5498 | ext4_exit_pageio(); |
5499 | ext4_exit_es(); | ||
5484 | } | 5500 | } |
5485 | 5501 | ||
5486 | MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); | 5502 | MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others"); |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 6599222536eb..65343c3741ff 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -730,14 +730,14 @@ static int __init fcntl_init(void) | |||
730 | * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY | 730 | * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY |
731 | * is defined as O_NONBLOCK on some platforms and not on others. | 731 | * is defined as O_NONBLOCK on some platforms and not on others. |
732 | */ | 732 | */ |
733 | BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( | 733 | BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( |
734 | O_RDONLY | O_WRONLY | O_RDWR | | 734 | O_RDONLY | O_WRONLY | O_RDWR | |
735 | O_CREAT | O_EXCL | O_NOCTTY | | 735 | O_CREAT | O_EXCL | O_NOCTTY | |
736 | O_TRUNC | O_APPEND | /* O_NONBLOCK | */ | 736 | O_TRUNC | O_APPEND | /* O_NONBLOCK | */ |
737 | __O_SYNC | O_DSYNC | FASYNC | | 737 | __O_SYNC | O_DSYNC | FASYNC | |
738 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | | 738 | O_DIRECT | O_LARGEFILE | O_DIRECTORY | |
739 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | | 739 | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | |
740 | __FMODE_EXEC | O_PATH | 740 | __FMODE_EXEC | O_PATH | __O_TMPFILE |
741 | )); | 741 | )); |
742 | 742 | ||
743 | fasync_cache = kmem_cache_create("fasync_cache", | 743 | fasync_cache = kmem_cache_create("fasync_cache", |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index a3f868ae3fd4..34423978b170 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -463,6 +463,14 @@ static struct inode *hugetlbfs_get_root(struct super_block *sb, | |||
463 | return inode; | 463 | return inode; |
464 | } | 464 | } |
465 | 465 | ||
466 | /* | ||
467 | * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never | ||
468 | * be taken from reclaim -- unlike regular filesystems. This needs an | ||
469 | * annotation because huge_pmd_share() does an allocation under | ||
470 | * i_mmap_mutex. | ||
471 | */ | ||
472 | struct lock_class_key hugetlbfs_i_mmap_mutex_key; | ||
473 | |||
466 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, | 474 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, |
467 | struct inode *dir, | 475 | struct inode *dir, |
468 | umode_t mode, dev_t dev) | 476 | umode_t mode, dev_t dev) |
@@ -474,6 +482,8 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, | |||
474 | struct hugetlbfs_inode_info *info; | 482 | struct hugetlbfs_inode_info *info; |
475 | inode->i_ino = get_next_ino(); | 483 | inode->i_ino = get_next_ino(); |
476 | inode_init_owner(inode, dir, mode); | 484 | inode_init_owner(inode, dir, mode); |
485 | lockdep_set_class(&inode->i_mapping->i_mmap_mutex, | ||
486 | &hugetlbfs_i_mmap_mutex_key); | ||
477 | inode->i_mapping->a_ops = &hugetlbfs_aops; | 487 | inode->i_mapping->a_ops = &hugetlbfs_aops; |
478 | inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; | 488 | inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; |
479 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 489 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 01bfe7662751..41e491b8e5d7 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c | |||
@@ -64,12 +64,17 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init) | |||
64 | nlm_init->protocol, nlm_version, | 64 | nlm_init->protocol, nlm_version, |
65 | nlm_init->hostname, nlm_init->noresvport, | 65 | nlm_init->hostname, nlm_init->noresvport, |
66 | nlm_init->net); | 66 | nlm_init->net); |
67 | if (host == NULL) { | 67 | if (host == NULL) |
68 | lockd_down(nlm_init->net); | 68 | goto out_nohost; |
69 | return ERR_PTR(-ENOLCK); | 69 | if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL) |
70 | } | 70 | goto out_nobind; |
71 | 71 | ||
72 | return host; | 72 | return host; |
73 | out_nobind: | ||
74 | nlmclnt_release_host(host); | ||
75 | out_nohost: | ||
76 | lockd_down(nlm_init->net); | ||
77 | return ERR_PTR(-ENOLCK); | ||
73 | } | 78 | } |
74 | EXPORT_SYMBOL_GPL(nlmclnt_init); | 79 | EXPORT_SYMBOL_GPL(nlmclnt_init); |
75 | 80 | ||
diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 9760ecb9b60f..acd394716349 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c | |||
@@ -125,14 +125,15 @@ static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) | |||
125 | { | 125 | { |
126 | struct nlm_args *argp = &req->a_args; | 126 | struct nlm_args *argp = &req->a_args; |
127 | struct nlm_lock *lock = &argp->lock; | 127 | struct nlm_lock *lock = &argp->lock; |
128 | char *nodename = req->a_host->h_rpcclnt->cl_nodename; | ||
128 | 129 | ||
129 | nlmclnt_next_cookie(&argp->cookie); | 130 | nlmclnt_next_cookie(&argp->cookie); |
130 | memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh)); | 131 | memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh)); |
131 | lock->caller = utsname()->nodename; | 132 | lock->caller = nodename; |
132 | lock->oh.data = req->a_owner; | 133 | lock->oh.data = req->a_owner; |
133 | lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", | 134 | lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", |
134 | (unsigned int)fl->fl_u.nfs_fl.owner->pid, | 135 | (unsigned int)fl->fl_u.nfs_fl.owner->pid, |
135 | utsname()->nodename); | 136 | nodename); |
136 | lock->svid = fl->fl_u.nfs_fl.owner->pid; | 137 | lock->svid = fl->fl_u.nfs_fl.owner->pid; |
137 | lock->fl.fl_start = fl->fl_start; | 138 | lock->fl.fl_start = fl->fl_start; |
138 | lock->fl.fl_end = fl->fl_end; | 139 | lock->fl.fl_end = fl->fl_end; |
diff --git a/fs/namei.c b/fs/namei.c index 8b61d103a8a7..89a612e392eb 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -3671,15 +3671,11 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname, | |||
3671 | if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) | 3671 | if ((flags & ~(AT_SYMLINK_FOLLOW | AT_EMPTY_PATH)) != 0) |
3672 | return -EINVAL; | 3672 | return -EINVAL; |
3673 | /* | 3673 | /* |
3674 | * To use null names we require CAP_DAC_READ_SEARCH | 3674 | * Using empty names is equivalent to using AT_SYMLINK_FOLLOW |
3675 | * This ensures that not everyone will be able to create | 3675 | * on /proc/self/fd/<fd>. |
3676 | * handlink using the passed filedescriptor. | ||
3677 | */ | 3676 | */ |
3678 | if (flags & AT_EMPTY_PATH) { | 3677 | if (flags & AT_EMPTY_PATH) |
3679 | if (!capable(CAP_DAC_READ_SEARCH)) | ||
3680 | return -ENOENT; | ||
3681 | how = LOOKUP_EMPTY; | 3678 | how = LOOKUP_EMPTY; |
3682 | } | ||
3683 | 3679 | ||
3684 | if (flags & AT_SYMLINK_FOLLOW) | 3680 | if (flags & AT_SYMLINK_FOLLOW) |
3685 | how |= LOOKUP_FOLLOW; | 3681 | how |= LOOKUP_FOLLOW; |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index af6e806044d7..941246f2b43d 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -463,7 +463,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st | |||
463 | unlock_new_inode(inode); | 463 | unlock_new_inode(inode); |
464 | } else | 464 | } else |
465 | nfs_refresh_inode(inode, fattr); | 465 | nfs_refresh_inode(inode, fattr); |
466 | nfs_setsecurity(inode, fattr, label); | ||
467 | dprintk("NFS: nfs_fhget(%s/%Ld fh_crc=0x%08x ct=%d)\n", | 466 | dprintk("NFS: nfs_fhget(%s/%Ld fh_crc=0x%08x ct=%d)\n", |
468 | inode->i_sb->s_id, | 467 | inode->i_sb->s_id, |
469 | (long long)NFS_FILEID(inode), | 468 | (long long)NFS_FILEID(inode), |
@@ -963,9 +962,15 @@ EXPORT_SYMBOL_GPL(nfs_revalidate_inode); | |||
963 | static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping) | 962 | static int nfs_invalidate_mapping(struct inode *inode, struct address_space *mapping) |
964 | { | 963 | { |
965 | struct nfs_inode *nfsi = NFS_I(inode); | 964 | struct nfs_inode *nfsi = NFS_I(inode); |
966 | 965 | int ret; | |
966 | |||
967 | if (mapping->nrpages != 0) { | 967 | if (mapping->nrpages != 0) { |
968 | int ret = invalidate_inode_pages2(mapping); | 968 | if (S_ISREG(inode->i_mode)) { |
969 | ret = nfs_sync_mapping(mapping); | ||
970 | if (ret < 0) | ||
971 | return ret; | ||
972 | } | ||
973 | ret = invalidate_inode_pages2(mapping); | ||
969 | if (ret < 0) | 974 | if (ret < 0) |
970 | return ret; | 975 | return ret; |
971 | } | 976 | } |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index cf11799297c4..108a774095f7 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -3071,15 +3071,13 @@ struct rpc_clnt * | |||
3071 | nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, | 3071 | nfs4_proc_lookup_mountpoint(struct inode *dir, struct qstr *name, |
3072 | struct nfs_fh *fhandle, struct nfs_fattr *fattr) | 3072 | struct nfs_fh *fhandle, struct nfs_fattr *fattr) |
3073 | { | 3073 | { |
3074 | struct rpc_clnt *client = NFS_CLIENT(dir); | ||
3074 | int status; | 3075 | int status; |
3075 | struct rpc_clnt *client = rpc_clone_client(NFS_CLIENT(dir)); | ||
3076 | 3076 | ||
3077 | status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL); | 3077 | status = nfs4_proc_lookup_common(&client, dir, name, fhandle, fattr, NULL); |
3078 | if (status < 0) { | 3078 | if (status < 0) |
3079 | rpc_shutdown_client(client); | ||
3080 | return ERR_PTR(status); | 3079 | return ERR_PTR(status); |
3081 | } | 3080 | return (client == NFS_CLIENT(dir)) ? rpc_clone_client(client) : client; |
3082 | return client; | ||
3083 | } | 3081 | } |
3084 | 3082 | ||
3085 | static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) | 3083 | static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) |
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 71fdc0dfa0d2..f6db66d8f647 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -2478,6 +2478,10 @@ struct dentry *nfs_fs_mount_common(struct nfs_server *server, | |||
2478 | if (server->flags & NFS_MOUNT_NOAC) | 2478 | if (server->flags & NFS_MOUNT_NOAC) |
2479 | sb_mntdata.mntflags |= MS_SYNCHRONOUS; | 2479 | sb_mntdata.mntflags |= MS_SYNCHRONOUS; |
2480 | 2480 | ||
2481 | if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL) | ||
2482 | if (mount_info->cloned->sb->s_flags & MS_SYNCHRONOUS) | ||
2483 | sb_mntdata.mntflags |= MS_SYNCHRONOUS; | ||
2484 | |||
2481 | /* Get a superblock - note that we may end up sharing one that already exists */ | 2485 | /* Get a superblock - note that we may end up sharing one that already exists */ |
2482 | s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata); | 2486 | s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata); |
2483 | if (IS_ERR(s)) { | 2487 | if (IS_ERR(s)) { |
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 0d4c410e4589..419572f33b72 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c | |||
@@ -1524,7 +1524,7 @@ static inline u32 nfsd4_write_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) | |||
1524 | static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) | 1524 | static inline u32 nfsd4_exchange_id_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) |
1525 | { | 1525 | { |
1526 | return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\ | 1526 | return (op_encode_hdr_size + 2 + 1 + /* eir_clientid, eir_sequenceid */\ |
1527 | 1 + 1 + 0 + /* eir_flags, spr_how, SP4_NONE (for now) */\ | 1527 | 1 + 1 + 2 + /* eir_flags, spr_how, spo_must_enforce & _allow */\ |
1528 | 2 + /*eir_server_owner.so_minor_id */\ | 1528 | 2 + /*eir_server_owner.so_minor_id */\ |
1529 | /* eir_server_owner.so_major_id<> */\ | 1529 | /* eir_server_owner.so_major_id<> */\ |
1530 | XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\ | 1530 | XDR_QUADLEN(NFS4_OPAQUE_LIMIT) + 1 +\ |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 280acef6f0dc..43f42290e5df 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -1264,6 +1264,8 @@ static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp) | |||
1264 | struct svc_cred *cr = &rqstp->rq_cred; | 1264 | struct svc_cred *cr = &rqstp->rq_cred; |
1265 | u32 service; | 1265 | u32 service; |
1266 | 1266 | ||
1267 | if (!cr->cr_gss_mech) | ||
1268 | return false; | ||
1267 | service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); | 1269 | service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor); |
1268 | return service == RPC_GSS_SVC_INTEGRITY || | 1270 | return service == RPC_GSS_SVC_INTEGRITY || |
1269 | service == RPC_GSS_SVC_PRIVACY; | 1271 | service == RPC_GSS_SVC_PRIVACY; |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 0c0f3ea90de5..c2a4701d7286 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -3360,7 +3360,8 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr, | |||
3360 | 8 /* eir_clientid */ + | 3360 | 8 /* eir_clientid */ + |
3361 | 4 /* eir_sequenceid */ + | 3361 | 4 /* eir_sequenceid */ + |
3362 | 4 /* eir_flags */ + | 3362 | 4 /* eir_flags */ + |
3363 | 4 /* spr_how (SP4_NONE) */ + | 3363 | 4 /* spr_how */ + |
3364 | 8 /* spo_must_enforce, spo_must_allow */ + | ||
3364 | 8 /* so_minor_id */ + | 3365 | 8 /* so_minor_id */ + |
3365 | 4 /* so_major_id.len */ + | 3366 | 4 /* so_major_id.len */ + |
3366 | (XDR_QUADLEN(major_id_sz) * 4) + | 3367 | (XDR_QUADLEN(major_id_sz) * 4) + |
@@ -3372,8 +3373,6 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr, | |||
3372 | WRITE32(exid->seqid); | 3373 | WRITE32(exid->seqid); |
3373 | WRITE32(exid->flags); | 3374 | WRITE32(exid->flags); |
3374 | 3375 | ||
3375 | /* state_protect4_r. Currently only support SP4_NONE */ | ||
3376 | BUG_ON(exid->spa_how != SP4_NONE); | ||
3377 | WRITE32(exid->spa_how); | 3376 | WRITE32(exid->spa_how); |
3378 | switch (exid->spa_how) { | 3377 | switch (exid->spa_how) { |
3379 | case SP4_NONE: | 3378 | case SP4_NONE: |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 79736a28d84f..2abf97b2a592 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -1757,7 +1757,7 @@ try_again: | |||
1757 | goto out; | 1757 | goto out; |
1758 | } else if (ret == 1) { | 1758 | } else if (ret == 1) { |
1759 | clusters_need = wc->w_clen; | 1759 | clusters_need = wc->w_clen; |
1760 | ret = ocfs2_refcount_cow(inode, filp, di_bh, | 1760 | ret = ocfs2_refcount_cow(inode, di_bh, |
1761 | wc->w_cpos, wc->w_clen, UINT_MAX); | 1761 | wc->w_cpos, wc->w_clen, UINT_MAX); |
1762 | if (ret) { | 1762 | if (ret) { |
1763 | mlog_errno(ret); | 1763 | mlog_errno(ret); |
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c index eb760d8acd50..30544ce8e9f7 100644 --- a/fs/ocfs2/dir.c +++ b/fs/ocfs2/dir.c | |||
@@ -2153,11 +2153,9 @@ int ocfs2_empty_dir(struct inode *inode) | |||
2153 | { | 2153 | { |
2154 | int ret; | 2154 | int ret; |
2155 | struct ocfs2_empty_dir_priv priv = { | 2155 | struct ocfs2_empty_dir_priv priv = { |
2156 | .ctx.actor = ocfs2_empty_dir_filldir | 2156 | .ctx.actor = ocfs2_empty_dir_filldir, |
2157 | }; | 2157 | }; |
2158 | 2158 | ||
2159 | memset(&priv, 0, sizeof(priv)); | ||
2160 | |||
2161 | if (ocfs2_dir_indexed(inode)) { | 2159 | if (ocfs2_dir_indexed(inode)) { |
2162 | ret = ocfs2_empty_dir_dx(inode, &priv); | 2160 | ret = ocfs2_empty_dir_dx(inode, &priv); |
2163 | if (ret) | 2161 | if (ret) |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 41000f223ca4..3261d71319ee 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -370,7 +370,7 @@ static int ocfs2_cow_file_pos(struct inode *inode, | |||
370 | if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) | 370 | if (!(ext_flags & OCFS2_EXT_REFCOUNTED)) |
371 | goto out; | 371 | goto out; |
372 | 372 | ||
373 | return ocfs2_refcount_cow(inode, NULL, fe_bh, cpos, 1, cpos+1); | 373 | return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1); |
374 | 374 | ||
375 | out: | 375 | out: |
376 | return status; | 376 | return status; |
@@ -899,7 +899,7 @@ static int ocfs2_zero_extend_get_range(struct inode *inode, | |||
899 | zero_clusters = last_cpos - zero_cpos; | 899 | zero_clusters = last_cpos - zero_cpos; |
900 | 900 | ||
901 | if (needs_cow) { | 901 | if (needs_cow) { |
902 | rc = ocfs2_refcount_cow(inode, NULL, di_bh, zero_cpos, | 902 | rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos, |
903 | zero_clusters, UINT_MAX); | 903 | zero_clusters, UINT_MAX); |
904 | if (rc) { | 904 | if (rc) { |
905 | mlog_errno(rc); | 905 | mlog_errno(rc); |
@@ -2078,7 +2078,7 @@ static int ocfs2_prepare_inode_for_refcount(struct inode *inode, | |||
2078 | 2078 | ||
2079 | *meta_level = 1; | 2079 | *meta_level = 1; |
2080 | 2080 | ||
2081 | ret = ocfs2_refcount_cow(inode, file, di_bh, cpos, clusters, UINT_MAX); | 2081 | ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX); |
2082 | if (ret) | 2082 | if (ret) |
2083 | mlog_errno(ret); | 2083 | mlog_errno(ret); |
2084 | out: | 2084 | out: |
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 96f9ac237e86..0a992737dcaf 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h | |||
@@ -537,7 +537,7 @@ static inline int ocfs2_calc_extend_credits(struct super_block *sb, | |||
537 | extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); | 537 | extent_blocks = 1 + 1 + le16_to_cpu(root_el->l_tree_depth); |
538 | 538 | ||
539 | return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + | 539 | return bitmap_blocks + sysfile_bitmap_blocks + extent_blocks + |
540 | ocfs2_quota_trans_credits(sb) + bits_wanted; | 540 | ocfs2_quota_trans_credits(sb); |
541 | } | 541 | } |
542 | 542 | ||
543 | static inline int ocfs2_calc_symlink_credits(struct super_block *sb) | 543 | static inline int ocfs2_calc_symlink_credits(struct super_block *sb) |
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c index f1fc172175b6..452068b45749 100644 --- a/fs/ocfs2/move_extents.c +++ b/fs/ocfs2/move_extents.c | |||
@@ -69,7 +69,7 @@ static int __ocfs2_move_extent(handle_t *handle, | |||
69 | u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); | 69 | u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); |
70 | u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); | 70 | u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); |
71 | 71 | ||
72 | ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos, | 72 | ret = ocfs2_duplicate_clusters_by_page(handle, inode, cpos, |
73 | p_cpos, new_p_cpos, len); | 73 | p_cpos, new_p_cpos, len); |
74 | if (ret) { | 74 | if (ret) { |
75 | mlog_errno(ret); | 75 | mlog_errno(ret); |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 9f6b96a09615..a70d604593b6 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
@@ -49,7 +49,6 @@ | |||
49 | 49 | ||
50 | struct ocfs2_cow_context { | 50 | struct ocfs2_cow_context { |
51 | struct inode *inode; | 51 | struct inode *inode; |
52 | struct file *file; | ||
53 | u32 cow_start; | 52 | u32 cow_start; |
54 | u32 cow_len; | 53 | u32 cow_len; |
55 | struct ocfs2_extent_tree data_et; | 54 | struct ocfs2_extent_tree data_et; |
@@ -66,7 +65,7 @@ struct ocfs2_cow_context { | |||
66 | u32 *num_clusters, | 65 | u32 *num_clusters, |
67 | unsigned int *extent_flags); | 66 | unsigned int *extent_flags); |
68 | int (*cow_duplicate_clusters)(handle_t *handle, | 67 | int (*cow_duplicate_clusters)(handle_t *handle, |
69 | struct file *file, | 68 | struct inode *inode, |
70 | u32 cpos, u32 old_cluster, | 69 | u32 cpos, u32 old_cluster, |
71 | u32 new_cluster, u32 new_len); | 70 | u32 new_cluster, u32 new_len); |
72 | }; | 71 | }; |
@@ -2922,14 +2921,12 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh) | |||
2922 | } | 2921 | } |
2923 | 2922 | ||
2924 | int ocfs2_duplicate_clusters_by_page(handle_t *handle, | 2923 | int ocfs2_duplicate_clusters_by_page(handle_t *handle, |
2925 | struct file *file, | 2924 | struct inode *inode, |
2926 | u32 cpos, u32 old_cluster, | 2925 | u32 cpos, u32 old_cluster, |
2927 | u32 new_cluster, u32 new_len) | 2926 | u32 new_cluster, u32 new_len) |
2928 | { | 2927 | { |
2929 | int ret = 0, partial; | 2928 | int ret = 0, partial; |
2930 | struct inode *inode = file_inode(file); | 2929 | struct super_block *sb = inode->i_sb; |
2931 | struct ocfs2_caching_info *ci = INODE_CACHE(inode); | ||
2932 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); | ||
2933 | u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); | 2930 | u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); |
2934 | struct page *page; | 2931 | struct page *page; |
2935 | pgoff_t page_index; | 2932 | pgoff_t page_index; |
@@ -2978,13 +2975,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, | |||
2978 | if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) | 2975 | if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) |
2979 | BUG_ON(PageDirty(page)); | 2976 | BUG_ON(PageDirty(page)); |
2980 | 2977 | ||
2981 | if (PageReadahead(page)) { | ||
2982 | page_cache_async_readahead(mapping, | ||
2983 | &file->f_ra, file, | ||
2984 | page, page_index, | ||
2985 | readahead_pages); | ||
2986 | } | ||
2987 | |||
2988 | if (!PageUptodate(page)) { | 2978 | if (!PageUptodate(page)) { |
2989 | ret = block_read_full_page(page, ocfs2_get_block); | 2979 | ret = block_read_full_page(page, ocfs2_get_block); |
2990 | if (ret) { | 2980 | if (ret) { |
@@ -3004,7 +2994,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle, | |||
3004 | } | 2994 | } |
3005 | } | 2995 | } |
3006 | 2996 | ||
3007 | ocfs2_map_and_dirty_page(inode, handle, from, to, | 2997 | ocfs2_map_and_dirty_page(inode, |
2998 | handle, from, to, | ||
3008 | page, 0, &new_block); | 2999 | page, 0, &new_block); |
3009 | mark_page_accessed(page); | 3000 | mark_page_accessed(page); |
3010 | unlock: | 3001 | unlock: |
@@ -3020,12 +3011,11 @@ unlock: | |||
3020 | } | 3011 | } |
3021 | 3012 | ||
3022 | int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, | 3013 | int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, |
3023 | struct file *file, | 3014 | struct inode *inode, |
3024 | u32 cpos, u32 old_cluster, | 3015 | u32 cpos, u32 old_cluster, |
3025 | u32 new_cluster, u32 new_len) | 3016 | u32 new_cluster, u32 new_len) |
3026 | { | 3017 | { |
3027 | int ret = 0; | 3018 | int ret = 0; |
3028 | struct inode *inode = file_inode(file); | ||
3029 | struct super_block *sb = inode->i_sb; | 3019 | struct super_block *sb = inode->i_sb; |
3030 | struct ocfs2_caching_info *ci = INODE_CACHE(inode); | 3020 | struct ocfs2_caching_info *ci = INODE_CACHE(inode); |
3031 | int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); | 3021 | int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); |
@@ -3150,7 +3140,7 @@ static int ocfs2_replace_clusters(handle_t *handle, | |||
3150 | 3140 | ||
3151 | /*If the old clusters is unwritten, no need to duplicate. */ | 3141 | /*If the old clusters is unwritten, no need to duplicate. */ |
3152 | if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { | 3142 | if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { |
3153 | ret = context->cow_duplicate_clusters(handle, context->file, | 3143 | ret = context->cow_duplicate_clusters(handle, context->inode, |
3154 | cpos, old, new, len); | 3144 | cpos, old, new, len); |
3155 | if (ret) { | 3145 | if (ret) { |
3156 | mlog_errno(ret); | 3146 | mlog_errno(ret); |
@@ -3428,35 +3418,12 @@ static int ocfs2_replace_cow(struct ocfs2_cow_context *context) | |||
3428 | return ret; | 3418 | return ret; |
3429 | } | 3419 | } |
3430 | 3420 | ||
3431 | static void ocfs2_readahead_for_cow(struct inode *inode, | ||
3432 | struct file *file, | ||
3433 | u32 start, u32 len) | ||
3434 | { | ||
3435 | struct address_space *mapping; | ||
3436 | pgoff_t index; | ||
3437 | unsigned long num_pages; | ||
3438 | int cs_bits = OCFS2_SB(inode->i_sb)->s_clustersize_bits; | ||
3439 | |||
3440 | if (!file) | ||
3441 | return; | ||
3442 | |||
3443 | mapping = file->f_mapping; | ||
3444 | num_pages = (len << cs_bits) >> PAGE_CACHE_SHIFT; | ||
3445 | if (!num_pages) | ||
3446 | num_pages = 1; | ||
3447 | |||
3448 | index = ((loff_t)start << cs_bits) >> PAGE_CACHE_SHIFT; | ||
3449 | page_cache_sync_readahead(mapping, &file->f_ra, file, | ||
3450 | index, num_pages); | ||
3451 | } | ||
3452 | |||
3453 | /* | 3421 | /* |
3454 | * Starting at cpos, try to CoW write_len clusters. Don't CoW | 3422 | * Starting at cpos, try to CoW write_len clusters. Don't CoW |
3455 | * past max_cpos. This will stop when it runs into a hole or an | 3423 | * past max_cpos. This will stop when it runs into a hole or an |
3456 | * unrefcounted extent. | 3424 | * unrefcounted extent. |
3457 | */ | 3425 | */ |
3458 | static int ocfs2_refcount_cow_hunk(struct inode *inode, | 3426 | static int ocfs2_refcount_cow_hunk(struct inode *inode, |
3459 | struct file *file, | ||
3460 | struct buffer_head *di_bh, | 3427 | struct buffer_head *di_bh, |
3461 | u32 cpos, u32 write_len, u32 max_cpos) | 3428 | u32 cpos, u32 write_len, u32 max_cpos) |
3462 | { | 3429 | { |
@@ -3485,8 +3452,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode, | |||
3485 | 3452 | ||
3486 | BUG_ON(cow_len == 0); | 3453 | BUG_ON(cow_len == 0); |
3487 | 3454 | ||
3488 | ocfs2_readahead_for_cow(inode, file, cow_start, cow_len); | ||
3489 | |||
3490 | context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); | 3455 | context = kzalloc(sizeof(struct ocfs2_cow_context), GFP_NOFS); |
3491 | if (!context) { | 3456 | if (!context) { |
3492 | ret = -ENOMEM; | 3457 | ret = -ENOMEM; |
@@ -3508,7 +3473,6 @@ static int ocfs2_refcount_cow_hunk(struct inode *inode, | |||
3508 | context->ref_root_bh = ref_root_bh; | 3473 | context->ref_root_bh = ref_root_bh; |
3509 | context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; | 3474 | context->cow_duplicate_clusters = ocfs2_duplicate_clusters_by_page; |
3510 | context->get_clusters = ocfs2_di_get_clusters; | 3475 | context->get_clusters = ocfs2_di_get_clusters; |
3511 | context->file = file; | ||
3512 | 3476 | ||
3513 | ocfs2_init_dinode_extent_tree(&context->data_et, | 3477 | ocfs2_init_dinode_extent_tree(&context->data_et, |
3514 | INODE_CACHE(inode), di_bh); | 3478 | INODE_CACHE(inode), di_bh); |
@@ -3537,7 +3501,6 @@ out: | |||
3537 | * clusters between cpos and cpos+write_len are safe to modify. | 3501 | * clusters between cpos and cpos+write_len are safe to modify. |
3538 | */ | 3502 | */ |
3539 | int ocfs2_refcount_cow(struct inode *inode, | 3503 | int ocfs2_refcount_cow(struct inode *inode, |
3540 | struct file *file, | ||
3541 | struct buffer_head *di_bh, | 3504 | struct buffer_head *di_bh, |
3542 | u32 cpos, u32 write_len, u32 max_cpos) | 3505 | u32 cpos, u32 write_len, u32 max_cpos) |
3543 | { | 3506 | { |
@@ -3557,7 +3520,7 @@ int ocfs2_refcount_cow(struct inode *inode, | |||
3557 | num_clusters = write_len; | 3520 | num_clusters = write_len; |
3558 | 3521 | ||
3559 | if (ext_flags & OCFS2_EXT_REFCOUNTED) { | 3522 | if (ext_flags & OCFS2_EXT_REFCOUNTED) { |
3560 | ret = ocfs2_refcount_cow_hunk(inode, file, di_bh, cpos, | 3523 | ret = ocfs2_refcount_cow_hunk(inode, di_bh, cpos, |
3561 | num_clusters, max_cpos); | 3524 | num_clusters, max_cpos); |
3562 | if (ret) { | 3525 | if (ret) { |
3563 | mlog_errno(ret); | 3526 | mlog_errno(ret); |
diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h index 7754608c83a4..6422bbcdb525 100644 --- a/fs/ocfs2/refcounttree.h +++ b/fs/ocfs2/refcounttree.h | |||
@@ -53,7 +53,7 @@ int ocfs2_prepare_refcount_change_for_del(struct inode *inode, | |||
53 | int *credits, | 53 | int *credits, |
54 | int *ref_blocks); | 54 | int *ref_blocks); |
55 | int ocfs2_refcount_cow(struct inode *inode, | 55 | int ocfs2_refcount_cow(struct inode *inode, |
56 | struct file *filep, struct buffer_head *di_bh, | 56 | struct buffer_head *di_bh, |
57 | u32 cpos, u32 write_len, u32 max_cpos); | 57 | u32 cpos, u32 write_len, u32 max_cpos); |
58 | 58 | ||
59 | typedef int (ocfs2_post_refcount_func)(struct inode *inode, | 59 | typedef int (ocfs2_post_refcount_func)(struct inode *inode, |
@@ -85,11 +85,11 @@ int ocfs2_refcount_cow_xattr(struct inode *inode, | |||
85 | u32 cpos, u32 write_len, | 85 | u32 cpos, u32 write_len, |
86 | struct ocfs2_post_refcount *post); | 86 | struct ocfs2_post_refcount *post); |
87 | int ocfs2_duplicate_clusters_by_page(handle_t *handle, | 87 | int ocfs2_duplicate_clusters_by_page(handle_t *handle, |
88 | struct file *file, | 88 | struct inode *inode, |
89 | u32 cpos, u32 old_cluster, | 89 | u32 cpos, u32 old_cluster, |
90 | u32 new_cluster, u32 new_len); | 90 | u32 new_cluster, u32 new_len); |
91 | int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, | 91 | int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, |
92 | struct file *file, | 92 | struct inode *inode, |
93 | u32 cpos, u32 old_cluster, | 93 | u32 cpos, u32 old_cluster, |
94 | u32 new_cluster, u32 new_len); | 94 | u32 new_cluster, u32 new_len); |
95 | int ocfs2_cow_sync_writeback(struct super_block *sb, | 95 | int ocfs2_cow_sync_writeback(struct super_block *sb, |
@@ -823,7 +823,7 @@ static inline int build_open_flags(int flags, umode_t mode, struct open_flags *o | |||
823 | int lookup_flags = 0; | 823 | int lookup_flags = 0; |
824 | int acc_mode; | 824 | int acc_mode; |
825 | 825 | ||
826 | if (flags & O_CREAT) | 826 | if (flags & (O_CREAT | __O_TMPFILE)) |
827 | op->mode = (mode & S_IALLUGO) | S_IFREG; | 827 | op->mode = (mode & S_IALLUGO) | S_IFREG; |
828 | else | 828 | else |
829 | op->mode = 0; | 829 | op->mode = 0; |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index dbf61f6174f0..107d026f5d6e 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -730,8 +730,16 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, | |||
730 | * of how soft-dirty works. | 730 | * of how soft-dirty works. |
731 | */ | 731 | */ |
732 | pte_t ptent = *pte; | 732 | pte_t ptent = *pte; |
733 | ptent = pte_wrprotect(ptent); | 733 | |
734 | ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); | 734 | if (pte_present(ptent)) { |
735 | ptent = pte_wrprotect(ptent); | ||
736 | ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY); | ||
737 | } else if (is_swap_pte(ptent)) { | ||
738 | ptent = pte_swp_clear_soft_dirty(ptent); | ||
739 | } else if (pte_file(ptent)) { | ||
740 | ptent = pte_file_clear_soft_dirty(ptent); | ||
741 | } | ||
742 | |||
735 | set_pte_at(vma->vm_mm, addr, pte, ptent); | 743 | set_pte_at(vma->vm_mm, addr, pte, ptent); |
736 | #endif | 744 | #endif |
737 | } | 745 | } |
@@ -752,14 +760,15 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, | |||
752 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 760 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
753 | for (; addr != end; pte++, addr += PAGE_SIZE) { | 761 | for (; addr != end; pte++, addr += PAGE_SIZE) { |
754 | ptent = *pte; | 762 | ptent = *pte; |
755 | if (!pte_present(ptent)) | ||
756 | continue; | ||
757 | 763 | ||
758 | if (cp->type == CLEAR_REFS_SOFT_DIRTY) { | 764 | if (cp->type == CLEAR_REFS_SOFT_DIRTY) { |
759 | clear_soft_dirty(vma, addr, pte); | 765 | clear_soft_dirty(vma, addr, pte); |
760 | continue; | 766 | continue; |
761 | } | 767 | } |
762 | 768 | ||
769 | if (!pte_present(ptent)) | ||
770 | continue; | ||
771 | |||
763 | page = vm_normal_page(vma, addr, ptent); | 772 | page = vm_normal_page(vma, addr, ptent); |
764 | if (!page) | 773 | if (!page) |
765 | continue; | 774 | continue; |
@@ -859,7 +868,7 @@ typedef struct { | |||
859 | } pagemap_entry_t; | 868 | } pagemap_entry_t; |
860 | 869 | ||
861 | struct pagemapread { | 870 | struct pagemapread { |
862 | int pos, len; | 871 | int pos, len; /* units: PM_ENTRY_BYTES, not bytes */ |
863 | pagemap_entry_t *buffer; | 872 | pagemap_entry_t *buffer; |
864 | bool v2; | 873 | bool v2; |
865 | }; | 874 | }; |
@@ -867,7 +876,7 @@ struct pagemapread { | |||
867 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) | 876 | #define PAGEMAP_WALK_SIZE (PMD_SIZE) |
868 | #define PAGEMAP_WALK_MASK (PMD_MASK) | 877 | #define PAGEMAP_WALK_MASK (PMD_MASK) |
869 | 878 | ||
870 | #define PM_ENTRY_BYTES sizeof(u64) | 879 | #define PM_ENTRY_BYTES sizeof(pagemap_entry_t) |
871 | #define PM_STATUS_BITS 3 | 880 | #define PM_STATUS_BITS 3 |
872 | #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) | 881 | #define PM_STATUS_OFFSET (64 - PM_STATUS_BITS) |
873 | #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) | 882 | #define PM_STATUS_MASK (((1LL << PM_STATUS_BITS) - 1) << PM_STATUS_OFFSET) |
@@ -930,8 +939,10 @@ static void pte_to_pagemap_entry(pagemap_entry_t *pme, struct pagemapread *pm, | |||
930 | flags = PM_PRESENT; | 939 | flags = PM_PRESENT; |
931 | page = vm_normal_page(vma, addr, pte); | 940 | page = vm_normal_page(vma, addr, pte); |
932 | } else if (is_swap_pte(pte)) { | 941 | } else if (is_swap_pte(pte)) { |
933 | swp_entry_t entry = pte_to_swp_entry(pte); | 942 | swp_entry_t entry; |
934 | 943 | if (pte_swp_soft_dirty(pte)) | |
944 | flags2 |= __PM_SOFT_DIRTY; | ||
945 | entry = pte_to_swp_entry(pte); | ||
935 | frame = swp_type(entry) | | 946 | frame = swp_type(entry) | |
936 | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); | 947 | (swp_offset(entry) << MAX_SWAPFILES_SHIFT); |
937 | flags = PM_SWAP; | 948 | flags = PM_SWAP; |
@@ -1116,8 +1127,8 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
1116 | goto out_task; | 1127 | goto out_task; |
1117 | 1128 | ||
1118 | pm.v2 = soft_dirty_cleared; | 1129 | pm.v2 = soft_dirty_cleared; |
1119 | pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); | 1130 | pm.len = (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); |
1120 | pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); | 1131 | pm.buffer = kmalloc(pm.len * PM_ENTRY_BYTES, GFP_TEMPORARY); |
1121 | ret = -ENOMEM; | 1132 | ret = -ENOMEM; |
1122 | if (!pm.buffer) | 1133 | if (!pm.buffer) |
1123 | goto out_task; | 1134 | goto out_task; |
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index 33532f79b4f7..a958444a75fc 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c | |||
@@ -19,12 +19,13 @@ | |||
19 | /* | 19 | /* |
20 | * LOCKING: | 20 | * LOCKING: |
21 | * | 21 | * |
22 | * We rely on new Alexander Viro's super-block locking. | 22 | * These guys are evicted from procfs as the very first step in ->kill_sb(). |
23 | * | 23 | * |
24 | */ | 24 | */ |
25 | 25 | ||
26 | static int show_version(struct seq_file *m, struct super_block *sb) | 26 | static int show_version(struct seq_file *m, void *unused) |
27 | { | 27 | { |
28 | struct super_block *sb = m->private; | ||
28 | char *format; | 29 | char *format; |
29 | 30 | ||
30 | if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) { | 31 | if (REISERFS_SB(sb)->s_properties & (1 << REISERFS_3_6)) { |
@@ -66,8 +67,9 @@ static int show_version(struct seq_file *m, struct super_block *sb) | |||
66 | #define DJP( x ) le32_to_cpu( jp -> x ) | 67 | #define DJP( x ) le32_to_cpu( jp -> x ) |
67 | #define JF( x ) ( r -> s_journal -> x ) | 68 | #define JF( x ) ( r -> s_journal -> x ) |
68 | 69 | ||
69 | static int show_super(struct seq_file *m, struct super_block *sb) | 70 | static int show_super(struct seq_file *m, void *unused) |
70 | { | 71 | { |
72 | struct super_block *sb = m->private; | ||
71 | struct reiserfs_sb_info *r = REISERFS_SB(sb); | 73 | struct reiserfs_sb_info *r = REISERFS_SB(sb); |
72 | 74 | ||
73 | seq_printf(m, "state: \t%s\n" | 75 | seq_printf(m, "state: \t%s\n" |
@@ -128,8 +130,9 @@ static int show_super(struct seq_file *m, struct super_block *sb) | |||
128 | return 0; | 130 | return 0; |
129 | } | 131 | } |
130 | 132 | ||
131 | static int show_per_level(struct seq_file *m, struct super_block *sb) | 133 | static int show_per_level(struct seq_file *m, void *unused) |
132 | { | 134 | { |
135 | struct super_block *sb = m->private; | ||
133 | struct reiserfs_sb_info *r = REISERFS_SB(sb); | 136 | struct reiserfs_sb_info *r = REISERFS_SB(sb); |
134 | int level; | 137 | int level; |
135 | 138 | ||
@@ -186,8 +189,9 @@ static int show_per_level(struct seq_file *m, struct super_block *sb) | |||
186 | return 0; | 189 | return 0; |
187 | } | 190 | } |
188 | 191 | ||
189 | static int show_bitmap(struct seq_file *m, struct super_block *sb) | 192 | static int show_bitmap(struct seq_file *m, void *unused) |
190 | { | 193 | { |
194 | struct super_block *sb = m->private; | ||
191 | struct reiserfs_sb_info *r = REISERFS_SB(sb); | 195 | struct reiserfs_sb_info *r = REISERFS_SB(sb); |
192 | 196 | ||
193 | seq_printf(m, "free_block: %lu\n" | 197 | seq_printf(m, "free_block: %lu\n" |
@@ -218,8 +222,9 @@ static int show_bitmap(struct seq_file *m, struct super_block *sb) | |||
218 | return 0; | 222 | return 0; |
219 | } | 223 | } |
220 | 224 | ||
221 | static int show_on_disk_super(struct seq_file *m, struct super_block *sb) | 225 | static int show_on_disk_super(struct seq_file *m, void *unused) |
222 | { | 226 | { |
227 | struct super_block *sb = m->private; | ||
223 | struct reiserfs_sb_info *sb_info = REISERFS_SB(sb); | 228 | struct reiserfs_sb_info *sb_info = REISERFS_SB(sb); |
224 | struct reiserfs_super_block *rs = sb_info->s_rs; | 229 | struct reiserfs_super_block *rs = sb_info->s_rs; |
225 | int hash_code = DFL(s_hash_function_code); | 230 | int hash_code = DFL(s_hash_function_code); |
@@ -261,8 +266,9 @@ static int show_on_disk_super(struct seq_file *m, struct super_block *sb) | |||
261 | return 0; | 266 | return 0; |
262 | } | 267 | } |
263 | 268 | ||
264 | static int show_oidmap(struct seq_file *m, struct super_block *sb) | 269 | static int show_oidmap(struct seq_file *m, void *unused) |
265 | { | 270 | { |
271 | struct super_block *sb = m->private; | ||
266 | struct reiserfs_sb_info *sb_info = REISERFS_SB(sb); | 272 | struct reiserfs_sb_info *sb_info = REISERFS_SB(sb); |
267 | struct reiserfs_super_block *rs = sb_info->s_rs; | 273 | struct reiserfs_super_block *rs = sb_info->s_rs; |
268 | unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize); | 274 | unsigned int mapsize = le16_to_cpu(rs->s_v1.s_oid_cursize); |
@@ -291,8 +297,9 @@ static int show_oidmap(struct seq_file *m, struct super_block *sb) | |||
291 | return 0; | 297 | return 0; |
292 | } | 298 | } |
293 | 299 | ||
294 | static int show_journal(struct seq_file *m, struct super_block *sb) | 300 | static int show_journal(struct seq_file *m, void *unused) |
295 | { | 301 | { |
302 | struct super_block *sb = m->private; | ||
296 | struct reiserfs_sb_info *r = REISERFS_SB(sb); | 303 | struct reiserfs_sb_info *r = REISERFS_SB(sb); |
297 | struct reiserfs_super_block *rs = r->s_rs; | 304 | struct reiserfs_super_block *rs = r->s_rs; |
298 | struct journal_params *jp = &rs->s_v1.s_journal; | 305 | struct journal_params *jp = &rs->s_v1.s_journal; |
@@ -383,92 +390,24 @@ static int show_journal(struct seq_file *m, struct super_block *sb) | |||
383 | return 0; | 390 | return 0; |
384 | } | 391 | } |
385 | 392 | ||
386 | /* iterator */ | ||
387 | static int test_sb(struct super_block *sb, void *data) | ||
388 | { | ||
389 | return data == sb; | ||
390 | } | ||
391 | |||
392 | static int set_sb(struct super_block *sb, void *data) | ||
393 | { | ||
394 | return -ENOENT; | ||
395 | } | ||
396 | |||
397 | struct reiserfs_seq_private { | ||
398 | struct super_block *sb; | ||
399 | int (*show) (struct seq_file *, struct super_block *); | ||
400 | }; | ||
401 | |||
402 | static void *r_start(struct seq_file *m, loff_t * pos) | ||
403 | { | ||
404 | struct reiserfs_seq_private *priv = m->private; | ||
405 | loff_t l = *pos; | ||
406 | |||
407 | if (l) | ||
408 | return NULL; | ||
409 | |||
410 | if (IS_ERR(sget(&reiserfs_fs_type, test_sb, set_sb, 0, priv->sb))) | ||
411 | return NULL; | ||
412 | |||
413 | up_write(&priv->sb->s_umount); | ||
414 | return priv->sb; | ||
415 | } | ||
416 | |||
417 | static void *r_next(struct seq_file *m, void *v, loff_t * pos) | ||
418 | { | ||
419 | ++*pos; | ||
420 | if (v) | ||
421 | deactivate_super(v); | ||
422 | return NULL; | ||
423 | } | ||
424 | |||
425 | static void r_stop(struct seq_file *m, void *v) | ||
426 | { | ||
427 | if (v) | ||
428 | deactivate_super(v); | ||
429 | } | ||
430 | |||
431 | static int r_show(struct seq_file *m, void *v) | ||
432 | { | ||
433 | struct reiserfs_seq_private *priv = m->private; | ||
434 | return priv->show(m, v); | ||
435 | } | ||
436 | |||
437 | static const struct seq_operations r_ops = { | ||
438 | .start = r_start, | ||
439 | .next = r_next, | ||
440 | .stop = r_stop, | ||
441 | .show = r_show, | ||
442 | }; | ||
443 | |||
444 | static int r_open(struct inode *inode, struct file *file) | 393 | static int r_open(struct inode *inode, struct file *file) |
445 | { | 394 | { |
446 | struct reiserfs_seq_private *priv; | 395 | return single_open(file, PDE_DATA(inode), |
447 | int ret = seq_open_private(file, &r_ops, | 396 | proc_get_parent_data(inode)); |
448 | sizeof(struct reiserfs_seq_private)); | ||
449 | |||
450 | if (!ret) { | ||
451 | struct seq_file *m = file->private_data; | ||
452 | priv = m->private; | ||
453 | priv->sb = proc_get_parent_data(inode); | ||
454 | priv->show = PDE_DATA(inode); | ||
455 | } | ||
456 | return ret; | ||
457 | } | 397 | } |
458 | 398 | ||
459 | static const struct file_operations r_file_operations = { | 399 | static const struct file_operations r_file_operations = { |
460 | .open = r_open, | 400 | .open = r_open, |
461 | .read = seq_read, | 401 | .read = seq_read, |
462 | .llseek = seq_lseek, | 402 | .llseek = seq_lseek, |
463 | .release = seq_release_private, | 403 | .release = single_release, |
464 | .owner = THIS_MODULE, | ||
465 | }; | 404 | }; |
466 | 405 | ||
467 | static struct proc_dir_entry *proc_info_root = NULL; | 406 | static struct proc_dir_entry *proc_info_root = NULL; |
468 | static const char proc_info_root_name[] = "fs/reiserfs"; | 407 | static const char proc_info_root_name[] = "fs/reiserfs"; |
469 | 408 | ||
470 | static void add_file(struct super_block *sb, char *name, | 409 | static void add_file(struct super_block *sb, char *name, |
471 | int (*func) (struct seq_file *, struct super_block *)) | 410 | int (*func) (struct seq_file *, void *)) |
472 | { | 411 | { |
473 | proc_create_data(name, 0, REISERFS_SB(sb)->procdir, | 412 | proc_create_data(name, 0, REISERFS_SB(sb)->procdir, |
474 | &r_file_operations, func); | 413 | &r_file_operations, func); |
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index f8a23c3078f8..e2e202a07b31 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c | |||
@@ -499,6 +499,7 @@ int remove_save_link(struct inode *inode, int truncate) | |||
499 | static void reiserfs_kill_sb(struct super_block *s) | 499 | static void reiserfs_kill_sb(struct super_block *s) |
500 | { | 500 | { |
501 | if (REISERFS_SB(s)) { | 501 | if (REISERFS_SB(s)) { |
502 | reiserfs_proc_info_done(s); | ||
502 | /* | 503 | /* |
503 | * Force any pending inode evictions to occur now. Any | 504 | * Force any pending inode evictions to occur now. Any |
504 | * inodes to be removed that have extended attributes | 505 | * inodes to be removed that have extended attributes |
@@ -554,8 +555,6 @@ static void reiserfs_put_super(struct super_block *s) | |||
554 | REISERFS_SB(s)->reserved_blocks); | 555 | REISERFS_SB(s)->reserved_blocks); |
555 | } | 556 | } |
556 | 557 | ||
557 | reiserfs_proc_info_done(s); | ||
558 | |||
559 | reiserfs_write_unlock(s); | 558 | reiserfs_write_unlock(s); |
560 | mutex_destroy(&REISERFS_SB(s)->lock); | 559 | mutex_destroy(&REISERFS_SB(s)->lock); |
561 | kfree(s->s_fs_info); | 560 | kfree(s->s_fs_info); |
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 56e6b68c8d2f..94383a70c1a3 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
@@ -274,15 +274,12 @@ struct acpi_device_wakeup { | |||
274 | }; | 274 | }; |
275 | 275 | ||
276 | struct acpi_device_physical_node { | 276 | struct acpi_device_physical_node { |
277 | u8 node_id; | 277 | unsigned int node_id; |
278 | struct list_head node; | 278 | struct list_head node; |
279 | struct device *dev; | 279 | struct device *dev; |
280 | bool put_online:1; | 280 | bool put_online:1; |
281 | }; | 281 | }; |
282 | 282 | ||
283 | /* set maximum of physical nodes to 32 for expansibility */ | ||
284 | #define ACPI_MAX_PHYSICAL_NODE 32 | ||
285 | |||
286 | /* Device */ | 283 | /* Device */ |
287 | struct acpi_device { | 284 | struct acpi_device { |
288 | int device_type; | 285 | int device_type; |
@@ -302,10 +299,9 @@ struct acpi_device { | |||
302 | struct acpi_driver *driver; | 299 | struct acpi_driver *driver; |
303 | void *driver_data; | 300 | void *driver_data; |
304 | struct device dev; | 301 | struct device dev; |
305 | u8 physical_node_count; | 302 | unsigned int physical_node_count; |
306 | struct list_head physical_node_list; | 303 | struct list_head physical_node_list; |
307 | struct mutex physical_node_lock; | 304 | struct mutex physical_node_lock; |
308 | DECLARE_BITMAP(physical_node_id_bitmap, ACPI_MAX_PHYSICAL_NODE); | ||
309 | struct list_head power_dependent; | 305 | struct list_head power_dependent; |
310 | void (*remove)(struct acpi_device *); | 306 | void (*remove)(struct acpi_device *); |
311 | }; | 307 | }; |
@@ -445,7 +441,11 @@ struct acpi_pci_root { | |||
445 | }; | 441 | }; |
446 | 442 | ||
447 | /* helper */ | 443 | /* helper */ |
448 | acpi_handle acpi_get_child(acpi_handle, u64); | 444 | acpi_handle acpi_find_child(acpi_handle, u64, bool); |
445 | static inline acpi_handle acpi_get_child(acpi_handle handle, u64 addr) | ||
446 | { | ||
447 | return acpi_find_child(handle, addr, false); | ||
448 | } | ||
449 | int acpi_is_root_bridge(acpi_handle); | 449 | int acpi_is_root_bridge(acpi_handle); |
450 | struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); | 450 | struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle); |
451 | #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev)) | 451 | #define DEVICE_ACPI_HANDLE(dev) ((acpi_handle)ACPI_HANDLE(dev)) |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 2f47ade1b567..0807ddf97b05 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -417,6 +417,36 @@ static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) | |||
417 | { | 417 | { |
418 | return pmd; | 418 | return pmd; |
419 | } | 419 | } |
420 | |||
421 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) | ||
422 | { | ||
423 | return pte; | ||
424 | } | ||
425 | |||
426 | static inline int pte_swp_soft_dirty(pte_t pte) | ||
427 | { | ||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) | ||
432 | { | ||
433 | return pte; | ||
434 | } | ||
435 | |||
436 | static inline pte_t pte_file_clear_soft_dirty(pte_t pte) | ||
437 | { | ||
438 | return pte; | ||
439 | } | ||
440 | |||
441 | static inline pte_t pte_file_mksoft_dirty(pte_t pte) | ||
442 | { | ||
443 | return pte; | ||
444 | } | ||
445 | |||
446 | static inline int pte_file_soft_dirty(pte_t pte) | ||
447 | { | ||
448 | return 0; | ||
449 | } | ||
420 | #endif | 450 | #endif |
421 | 451 | ||
422 | #ifndef __HAVE_PFNMAP_TRACKING | 452 | #ifndef __HAVE_PFNMAP_TRACKING |
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 13821c339a41..5672d7ea1fa0 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h | |||
@@ -112,7 +112,7 @@ struct mmu_gather { | |||
112 | 112 | ||
113 | #define HAVE_GENERIC_MMU_GATHER | 113 | #define HAVE_GENERIC_MMU_GATHER |
114 | 114 | ||
115 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm); | 115 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end); |
116 | void tlb_flush_mmu(struct mmu_gather *tlb); | 116 | void tlb_flush_mmu(struct mmu_gather *tlb); |
117 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, | 117 | void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, |
118 | unsigned long end); | 118 | unsigned long end); |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 4372658c73ae..120d57a1c3a5 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -78,6 +78,11 @@ struct trace_iterator { | |||
78 | /* trace_seq for __print_flags() and __print_symbolic() etc. */ | 78 | /* trace_seq for __print_flags() and __print_symbolic() etc. */ |
79 | struct trace_seq tmp_seq; | 79 | struct trace_seq tmp_seq; |
80 | 80 | ||
81 | cpumask_var_t started; | ||
82 | |||
83 | /* it's true when current open file is snapshot */ | ||
84 | bool snapshot; | ||
85 | |||
81 | /* The below is zeroed out in pipe_read */ | 86 | /* The below is zeroed out in pipe_read */ |
82 | struct trace_seq seq; | 87 | struct trace_seq seq; |
83 | struct trace_entry *ent; | 88 | struct trace_entry *ent; |
@@ -90,10 +95,7 @@ struct trace_iterator { | |||
90 | loff_t pos; | 95 | loff_t pos; |
91 | long idx; | 96 | long idx; |
92 | 97 | ||
93 | cpumask_var_t started; | 98 | /* All new field here will be zeroed out in pipe_read */ |
94 | |||
95 | /* it's true when current open file is snapshot */ | ||
96 | bool snapshot; | ||
97 | }; | 99 | }; |
98 | 100 | ||
99 | enum trace_iter_flags { | 101 | enum trace_iter_flags { |
@@ -332,7 +334,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type, | |||
332 | const char *name, int offset, int size, | 334 | const char *name, int offset, int size, |
333 | int is_signed, int filter_type); | 335 | int is_signed, int filter_type); |
334 | extern int trace_add_event_call(struct ftrace_event_call *call); | 336 | extern int trace_add_event_call(struct ftrace_event_call *call); |
335 | extern void trace_remove_event_call(struct ftrace_event_call *call); | 337 | extern int trace_remove_event_call(struct ftrace_event_call *call); |
336 | 338 | ||
337 | #define is_signed_type(type) (((type)(-1)) < (type)1) | 339 | #define is_signed_type(type) (((type)(-1)) < (type)1) |
338 | 340 | ||
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 3869c525b052..369cf2cd5144 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/atomic.h> | ||
11 | 12 | ||
12 | #ifndef _IIO_TRIGGER_H_ | 13 | #ifndef _IIO_TRIGGER_H_ |
13 | #define _IIO_TRIGGER_H_ | 14 | #define _IIO_TRIGGER_H_ |
@@ -61,7 +62,7 @@ struct iio_trigger { | |||
61 | 62 | ||
62 | struct list_head list; | 63 | struct list_head list; |
63 | struct list_head alloc_list; | 64 | struct list_head alloc_list; |
64 | int use_count; | 65 | atomic_t use_count; |
65 | 66 | ||
66 | struct irq_chip subirq_chip; | 67 | struct irq_chip subirq_chip; |
67 | int subirq_base; | 68 | int subirq_base; |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 3bef14c6586b..482ad2d84a32 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -629,7 +629,7 @@ extern void ftrace_dump(enum ftrace_dump_mode oops_dump_mode); | |||
629 | static inline void tracing_start(void) { } | 629 | static inline void tracing_start(void) { } |
630 | static inline void tracing_stop(void) { } | 630 | static inline void tracing_stop(void) { } |
631 | static inline void ftrace_off_permanent(void) { } | 631 | static inline void ftrace_off_permanent(void) { } |
632 | static inline void trace_dump_stack(void) { } | 632 | static inline void trace_dump_stack(int skip) { } |
633 | 633 | ||
634 | static inline void tracing_on(void) { } | 634 | static inline void tracing_on(void) { } |
635 | static inline void tracing_off(void) { } | 635 | static inline void tracing_off(void) { } |
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index 8d73fe29796a..db1791bb997a 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h | |||
@@ -113,11 +113,27 @@ | |||
113 | #define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3) | 113 | #define CNTRLREG_8WIRE CNTRLREG_AFE_CTRL(3) |
114 | #define CNTRLREG_TSCENB BIT(7) | 114 | #define CNTRLREG_TSCENB BIT(7) |
115 | 115 | ||
116 | /* FIFO READ Register */ | ||
117 | #define FIFOREAD_DATA_MASK (0xfff << 0) | ||
118 | #define FIFOREAD_CHNLID_MASK (0xf << 16) | ||
119 | |||
120 | /* Sequencer Status */ | ||
121 | #define SEQ_STATUS BIT(5) | ||
122 | |||
116 | #define ADC_CLK 3000000 | 123 | #define ADC_CLK 3000000 |
117 | #define MAX_CLK_DIV 7 | 124 | #define MAX_CLK_DIV 7 |
118 | #define TOTAL_STEPS 16 | 125 | #define TOTAL_STEPS 16 |
119 | #define TOTAL_CHANNELS 8 | 126 | #define TOTAL_CHANNELS 8 |
120 | 127 | ||
128 | /* | ||
129 | * ADC runs at 3MHz, and it takes | ||
130 | * 15 cycles to latch one data output. | ||
131 | * Hence the idle time for ADC to | ||
132 | * process one sample data would be | ||
133 | * around 5 micro seconds. | ||
134 | */ | ||
135 | #define IDLE_TIMEOUT 5 /* microsec */ | ||
136 | |||
121 | #define TSCADC_CELLS 2 | 137 | #define TSCADC_CELLS 2 |
122 | 138 | ||
123 | struct ti_tscadc_dev { | 139 | struct ti_tscadc_dev { |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 737685e9e852..68029b30c3dc 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -309,21 +309,20 @@ struct mlx5_hca_cap { | |||
309 | __be16 max_desc_sz_rq; | 309 | __be16 max_desc_sz_rq; |
310 | u8 rsvd21[2]; | 310 | u8 rsvd21[2]; |
311 | __be16 max_desc_sz_sq_dc; | 311 | __be16 max_desc_sz_sq_dc; |
312 | u8 rsvd22[4]; | 312 | __be32 max_qp_mcg; |
313 | __be16 max_qp_mcg; | 313 | u8 rsvd22[3]; |
314 | u8 rsvd23; | ||
315 | u8 log_max_mcg; | 314 | u8 log_max_mcg; |
316 | u8 rsvd24; | 315 | u8 rsvd23; |
317 | u8 log_max_pd; | 316 | u8 log_max_pd; |
318 | u8 rsvd25; | 317 | u8 rsvd24; |
319 | u8 log_max_xrcd; | 318 | u8 log_max_xrcd; |
320 | u8 rsvd26[42]; | 319 | u8 rsvd25[42]; |
321 | __be16 log_uar_page_sz; | 320 | __be16 log_uar_page_sz; |
322 | u8 rsvd27[28]; | 321 | u8 rsvd26[28]; |
323 | u8 log_msx_atomic_size_qp; | 322 | u8 log_msx_atomic_size_qp; |
324 | u8 rsvd28[2]; | 323 | u8 rsvd27[2]; |
325 | u8 log_msx_atomic_size_dc; | 324 | u8 log_msx_atomic_size_dc; |
326 | u8 rsvd29[76]; | 325 | u8 rsvd28[76]; |
327 | }; | 326 | }; |
328 | 327 | ||
329 | 328 | ||
@@ -472,9 +471,8 @@ struct mlx5_eqe_cmd { | |||
472 | struct mlx5_eqe_page_req { | 471 | struct mlx5_eqe_page_req { |
473 | u8 rsvd0[2]; | 472 | u8 rsvd0[2]; |
474 | __be16 func_id; | 473 | __be16 func_id; |
475 | u8 rsvd1[2]; | 474 | __be32 num_pages; |
476 | __be16 num_pages; | 475 | __be32 rsvd1[5]; |
477 | __be32 rsvd2[5]; | ||
478 | }; | 476 | }; |
479 | 477 | ||
480 | union ev_data { | 478 | union ev_data { |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 2aa258b0ced1..8888381fc150 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -358,7 +358,7 @@ struct mlx5_caps { | |||
358 | u32 reserved_lkey; | 358 | u32 reserved_lkey; |
359 | u8 local_ca_ack_delay; | 359 | u8 local_ca_ack_delay; |
360 | u8 log_max_mcg; | 360 | u8 log_max_mcg; |
361 | u16 max_qp_mcg; | 361 | u32 max_qp_mcg; |
362 | int min_page_sz; | 362 | int min_page_sz; |
363 | }; | 363 | }; |
364 | 364 | ||
@@ -691,7 +691,7 @@ void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); | |||
691 | int mlx5_pagealloc_start(struct mlx5_core_dev *dev); | 691 | int mlx5_pagealloc_start(struct mlx5_core_dev *dev); |
692 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); | 692 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); |
693 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | 693 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, |
694 | s16 npages); | 694 | s32 npages); |
695 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); | 695 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); |
696 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); | 696 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); |
697 | void mlx5_register_debugfs(void); | 697 | void mlx5_register_debugfs(void); |
@@ -731,9 +731,6 @@ void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); | |||
731 | int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); | 731 | int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db); |
732 | void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); | 732 | void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db); |
733 | 733 | ||
734 | typedef void (*health_handler_t)(struct pci_dev *pdev, struct health_buffer __iomem *buf, int size); | ||
735 | int mlx5_register_health_report_handler(health_handler_t handler); | ||
736 | void mlx5_unregister_health_report_handler(void); | ||
737 | const char *mlx5_command_str(int command); | 734 | const char *mlx5_command_str(int command); |
738 | int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); | 735 | int mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev); |
739 | void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); | 736 | void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev); |
diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 75981d0b57dc..580a5320cc96 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
17 | #include <linux/rbtree.h> | 17 | #include <linux/rbtree.h> |
18 | #include <linux/err.h> | ||
18 | 19 | ||
19 | struct module; | 20 | struct module; |
20 | struct device; | 21 | struct device; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index d722490da030..e9995eb5985c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -314,6 +314,7 @@ struct nsproxy; | |||
314 | struct user_namespace; | 314 | struct user_namespace; |
315 | 315 | ||
316 | #ifdef CONFIG_MMU | 316 | #ifdef CONFIG_MMU |
317 | extern unsigned long mmap_legacy_base(void); | ||
317 | extern void arch_pick_mmap_layout(struct mm_struct *mm); | 318 | extern void arch_pick_mmap_layout(struct mm_struct *mm); |
318 | extern unsigned long | 319 | extern unsigned long |
319 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, | 320 | arch_get_unmapped_area(struct file *, unsigned long, unsigned long, |
@@ -1532,6 +1533,8 @@ static inline pid_t task_pgrp_nr(struct task_struct *tsk) | |||
1532 | * Test if a process is not yet dead (at most zombie state) | 1533 | * Test if a process is not yet dead (at most zombie state) |
1533 | * If pid_alive fails, then pointers within the task structure | 1534 | * If pid_alive fails, then pointers within the task structure |
1534 | * can be stale and must not be dereferenced. | 1535 | * can be stale and must not be dereferenced. |
1536 | * | ||
1537 | * Return: 1 if the process is alive. 0 otherwise. | ||
1535 | */ | 1538 | */ |
1536 | static inline int pid_alive(struct task_struct *p) | 1539 | static inline int pid_alive(struct task_struct *p) |
1537 | { | 1540 | { |
@@ -1543,6 +1546,8 @@ static inline int pid_alive(struct task_struct *p) | |||
1543 | * @tsk: Task structure to be checked. | 1546 | * @tsk: Task structure to be checked. |
1544 | * | 1547 | * |
1545 | * Check if a task structure is the first user space task the kernel created. | 1548 | * Check if a task structure is the first user space task the kernel created. |
1549 | * | ||
1550 | * Return: 1 if the task structure is init. 0 otherwise. | ||
1546 | */ | 1551 | */ |
1547 | static inline int is_global_init(struct task_struct *tsk) | 1552 | static inline int is_global_init(struct task_struct *tsk) |
1548 | { | 1553 | { |
@@ -1894,6 +1899,8 @@ extern struct task_struct *idle_task(int cpu); | |||
1894 | /** | 1899 | /** |
1895 | * is_idle_task - is the specified task an idle task? | 1900 | * is_idle_task - is the specified task an idle task? |
1896 | * @p: the task in question. | 1901 | * @p: the task in question. |
1902 | * | ||
1903 | * Return: 1 if @p is an idle task. 0 otherwise. | ||
1897 | */ | 1904 | */ |
1898 | static inline bool is_idle_task(const struct task_struct *p) | 1905 | static inline bool is_idle_task(const struct task_struct *p) |
1899 | { | 1906 | { |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 7d537ced949a..75f34949d9ab 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -117,9 +117,17 @@ do { \ | |||
117 | #endif /*arch_spin_is_contended*/ | 117 | #endif /*arch_spin_is_contended*/ |
118 | #endif | 118 | #endif |
119 | 119 | ||
120 | /* The lock does not imply full memory barrier. */ | 120 | /* |
121 | #ifndef ARCH_HAS_SMP_MB_AFTER_LOCK | 121 | * Despite its name it doesn't necessarily has to be a full barrier. |
122 | static inline void smp_mb__after_lock(void) { smp_mb(); } | 122 | * It should only guarantee that a STORE before the critical section |
123 | * can not be reordered with a LOAD inside this section. | ||
124 | * spin_lock() is the one-way barrier, this LOAD can not escape out | ||
125 | * of the region. So the default implementation simply ensures that | ||
126 | * a STORE can not move into the critical section, smp_wmb() should | ||
127 | * serialize it with another STORE done by spin_lock(). | ||
128 | */ | ||
129 | #ifndef smp_mb__before_spinlock | ||
130 | #define smp_mb__before_spinlock() smp_wmb() | ||
123 | #endif | 131 | #endif |
124 | 132 | ||
125 | /** | 133 | /** |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 6d870353674a..1821445708d6 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -121,6 +121,7 @@ struct rpc_task_setup { | |||
121 | #define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ | 121 | #define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */ |
122 | #define RPC_TASK_SENT 0x0800 /* message was sent */ | 122 | #define RPC_TASK_SENT 0x0800 /* message was sent */ |
123 | #define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ | 123 | #define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */ |
124 | #define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */ | ||
124 | 125 | ||
125 | #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) | 126 | #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) |
126 | #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) | 127 | #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) |
diff --git a/include/linux/swapops.h b/include/linux/swapops.h index c5fd30d2a415..8d4fa82bfb91 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h | |||
@@ -67,6 +67,8 @@ static inline swp_entry_t pte_to_swp_entry(pte_t pte) | |||
67 | swp_entry_t arch_entry; | 67 | swp_entry_t arch_entry; |
68 | 68 | ||
69 | BUG_ON(pte_file(pte)); | 69 | BUG_ON(pte_file(pte)); |
70 | if (pte_swp_soft_dirty(pte)) | ||
71 | pte = pte_swp_clear_soft_dirty(pte); | ||
70 | arch_entry = __pte_to_swp_entry(pte); | 72 | arch_entry = __pte_to_swp_entry(pte); |
71 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); | 73 | return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry)); |
72 | } | 74 | } |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 4147d700a293..84662ecc7b51 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -802,9 +802,14 @@ asmlinkage long sys_vfork(void); | |||
802 | asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, | 802 | asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, int, |
803 | int __user *); | 803 | int __user *); |
804 | #else | 804 | #else |
805 | #ifdef CONFIG_CLONE_BACKWARDS3 | ||
806 | asmlinkage long sys_clone(unsigned long, unsigned long, int, int __user *, | ||
807 | int __user *, int); | ||
808 | #else | ||
805 | asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, | 809 | asmlinkage long sys_clone(unsigned long, unsigned long, int __user *, |
806 | int __user *, int); | 810 | int __user *, int); |
807 | #endif | 811 | #endif |
812 | #endif | ||
808 | 813 | ||
809 | asmlinkage long sys_execve(const char __user *filename, | 814 | asmlinkage long sys_execve(const char __user *filename, |
810 | const char __user *const __user *argv, | 815 | const char __user *const __user *argv, |
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index b6b215f13b45..14105c26a836 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h | |||
@@ -23,6 +23,7 @@ struct user_namespace { | |||
23 | struct uid_gid_map projid_map; | 23 | struct uid_gid_map projid_map; |
24 | atomic_t count; | 24 | atomic_t count; |
25 | struct user_namespace *parent; | 25 | struct user_namespace *parent; |
26 | int level; | ||
26 | kuid_t owner; | 27 | kuid_t owner; |
27 | kgid_t group; | 28 | kgid_t group; |
28 | unsigned int proc_inum; | 29 | unsigned int proc_inum; |
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index 7343a27fe819..47ada23345a1 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #define _V4L2_CTRLS_H | 22 | #define _V4L2_CTRLS_H |
23 | 23 | ||
24 | #include <linux/list.h> | 24 | #include <linux/list.h> |
25 | #include <linux/mutex.h> | ||
25 | #include <linux/videodev2.h> | 26 | #include <linux/videodev2.h> |
26 | 27 | ||
27 | /* forward references */ | 28 | /* forward references */ |
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h index f18b91966d3d..8a358a2c97e6 100644 --- a/include/net/busy_poll.h +++ b/include/net/busy_poll.h | |||
@@ -122,7 +122,7 @@ static inline bool sk_busy_loop(struct sock *sk, int nonblock) | |||
122 | if (rc > 0) | 122 | if (rc > 0) |
123 | /* local bh are disabled so it is ok to use _BH */ | 123 | /* local bh are disabled so it is ok to use _BH */ |
124 | NET_ADD_STATS_BH(sock_net(sk), | 124 | NET_ADD_STATS_BH(sock_net(sk), |
125 | LINUX_MIB_LOWLATENCYRXPACKETS, rc); | 125 | LINUX_MIB_BUSYPOLLRXPACKETS, rc); |
126 | 126 | ||
127 | } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && | 127 | } while (!nonblock && skb_queue_empty(&sk->sk_receive_queue) && |
128 | !need_resched() && !busy_loop_timeout(end_time)); | 128 | !need_resched() && !busy_loop_timeout(end_time)); |
@@ -162,11 +162,6 @@ static inline bool sk_can_busy_loop(struct sock *sk) | |||
162 | return false; | 162 | return false; |
163 | } | 163 | } |
164 | 164 | ||
165 | static inline bool sk_busy_poll(struct sock *sk, int nonblock) | ||
166 | { | ||
167 | return false; | ||
168 | } | ||
169 | |||
170 | static inline void skb_mark_napi_id(struct sk_buff *skb, | 165 | static inline void skb_mark_napi_id(struct sk_buff *skb, |
171 | struct napi_struct *napi) | 166 | struct napi_struct *napi) |
172 | { | 167 | { |
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h index 781b3cf86a2f..a354db5b7662 100644 --- a/include/net/ip_tunnels.h +++ b/include/net/ip_tunnels.h | |||
@@ -145,20 +145,6 @@ static inline u8 ip_tunnel_ecn_encap(u8 tos, const struct iphdr *iph, | |||
145 | return INET_ECN_encapsulate(tos, inner); | 145 | return INET_ECN_encapsulate(tos, inner); |
146 | } | 146 | } |
147 | 147 | ||
148 | static inline void tunnel_ip_select_ident(struct sk_buff *skb, | ||
149 | const struct iphdr *old_iph, | ||
150 | struct dst_entry *dst) | ||
151 | { | ||
152 | struct iphdr *iph = ip_hdr(skb); | ||
153 | |||
154 | /* Use inner packet iph-id if possible. */ | ||
155 | if (skb->protocol == htons(ETH_P_IP) && old_iph->id) | ||
156 | iph->id = old_iph->id; | ||
157 | else | ||
158 | __ip_select_ident(iph, dst, | ||
159 | (skb_shinfo(skb)->gso_segs ?: 1) - 1); | ||
160 | } | ||
161 | |||
162 | int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); | 148 | int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto); |
163 | int iptunnel_xmit(struct net *net, struct rtable *rt, | 149 | int iptunnel_xmit(struct net *net, struct rtable *rt, |
164 | struct sk_buff *skb, | 150 | struct sk_buff *skb, |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 6eab63363e59..e5ae0c50fa9c 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -683,13 +683,19 @@ struct psched_ratecfg { | |||
683 | u64 rate_bytes_ps; /* bytes per second */ | 683 | u64 rate_bytes_ps; /* bytes per second */ |
684 | u32 mult; | 684 | u32 mult; |
685 | u16 overhead; | 685 | u16 overhead; |
686 | u8 linklayer; | ||
686 | u8 shift; | 687 | u8 shift; |
687 | }; | 688 | }; |
688 | 689 | ||
689 | static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, | 690 | static inline u64 psched_l2t_ns(const struct psched_ratecfg *r, |
690 | unsigned int len) | 691 | unsigned int len) |
691 | { | 692 | { |
692 | return ((u64)(len + r->overhead) * r->mult) >> r->shift; | 693 | len += r->overhead; |
694 | |||
695 | if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) | ||
696 | return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift; | ||
697 | |||
698 | return ((u64)len * r->mult) >> r->shift; | ||
693 | } | 699 | } |
694 | 700 | ||
695 | extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf); | 701 | extern void psched_ratecfg_precompute(struct psched_ratecfg *r, const struct tc_ratespec *conf); |
@@ -700,6 +706,7 @@ static inline void psched_ratecfg_getrate(struct tc_ratespec *res, | |||
700 | memset(res, 0, sizeof(*res)); | 706 | memset(res, 0, sizeof(*res)); |
701 | res->rate = r->rate_bytes_ps; | 707 | res->rate = r->rate_bytes_ps; |
702 | res->overhead = r->overhead; | 708 | res->overhead = r->overhead; |
709 | res->linklayer = (r->linklayer & TC_LINKLAYER_MASK); | ||
703 | } | 710 | } |
704 | 711 | ||
705 | #endif | 712 | #endif |
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index dbd71b0c7d8c..09d62b9228ff 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h | |||
@@ -73,9 +73,17 @@ struct tc_estimator { | |||
73 | #define TC_H_ROOT (0xFFFFFFFFU) | 73 | #define TC_H_ROOT (0xFFFFFFFFU) |
74 | #define TC_H_INGRESS (0xFFFFFFF1U) | 74 | #define TC_H_INGRESS (0xFFFFFFF1U) |
75 | 75 | ||
76 | /* Need to corrospond to iproute2 tc/tc_core.h "enum link_layer" */ | ||
77 | enum tc_link_layer { | ||
78 | TC_LINKLAYER_UNAWARE, /* Indicate unaware old iproute2 util */ | ||
79 | TC_LINKLAYER_ETHERNET, | ||
80 | TC_LINKLAYER_ATM, | ||
81 | }; | ||
82 | #define TC_LINKLAYER_MASK 0x0F /* limit use to lower 4 bits */ | ||
83 | |||
76 | struct tc_ratespec { | 84 | struct tc_ratespec { |
77 | unsigned char cell_log; | 85 | unsigned char cell_log; |
78 | unsigned char __reserved; | 86 | __u8 linklayer; /* lower 4 bits */ |
79 | unsigned short overhead; | 87 | unsigned short overhead; |
80 | short cell_align; | 88 | short cell_align; |
81 | unsigned short mpu; | 89 | unsigned short mpu; |
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h index af0a674cc677..a1356d3b54df 100644 --- a/include/uapi/linux/snmp.h +++ b/include/uapi/linux/snmp.h | |||
@@ -253,7 +253,7 @@ enum | |||
253 | LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ | 253 | LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ |
254 | LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ | 254 | LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ |
255 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ | 255 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */ |
256 | LINUX_MIB_LOWLATENCYRXPACKETS, /* LowLatencyRxPackets */ | 256 | LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */ |
257 | __LINUX_MIB_MAX | 257 | __LINUX_MIB_MAX |
258 | }; | 258 | }; |
259 | 259 | ||
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 789ec4683db3..781845a013ab 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -4335,8 +4335,10 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
4335 | } | 4335 | } |
4336 | 4336 | ||
4337 | err = percpu_ref_init(&css->refcnt, css_release); | 4337 | err = percpu_ref_init(&css->refcnt, css_release); |
4338 | if (err) | 4338 | if (err) { |
4339 | ss->css_free(cgrp); | ||
4339 | goto err_free_all; | 4340 | goto err_free_all; |
4341 | } | ||
4340 | 4342 | ||
4341 | init_cgroup_css(css, ss, cgrp); | 4343 | init_cgroup_css(css, ss, cgrp); |
4342 | 4344 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index e5657788fedd..010a0083c0ae 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1608,11 +1608,13 @@ static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) | |||
1608 | { | 1608 | { |
1609 | struct cpuset *cs = cgroup_cs(cgrp); | 1609 | struct cpuset *cs = cgroup_cs(cgrp); |
1610 | cpuset_filetype_t type = cft->private; | 1610 | cpuset_filetype_t type = cft->private; |
1611 | int retval = -ENODEV; | 1611 | int retval = 0; |
1612 | 1612 | ||
1613 | mutex_lock(&cpuset_mutex); | 1613 | mutex_lock(&cpuset_mutex); |
1614 | if (!is_cpuset_online(cs)) | 1614 | if (!is_cpuset_online(cs)) { |
1615 | retval = -ENODEV; | ||
1615 | goto out_unlock; | 1616 | goto out_unlock; |
1617 | } | ||
1616 | 1618 | ||
1617 | switch (type) { | 1619 | switch (type) { |
1618 | case FILE_CPU_EXCLUSIVE: | 1620 | case FILE_CPU_EXCLUSIVE: |
diff --git a/kernel/fork.c b/kernel/fork.c index 403d2bb8a968..e23bb19e2a3e 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1679,6 +1679,12 @@ SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags, | |||
1679 | int __user *, parent_tidptr, | 1679 | int __user *, parent_tidptr, |
1680 | int __user *, child_tidptr, | 1680 | int __user *, child_tidptr, |
1681 | int, tls_val) | 1681 | int, tls_val) |
1682 | #elif defined(CONFIG_CLONE_BACKWARDS3) | ||
1683 | SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp, | ||
1684 | int, stack_size, | ||
1685 | int __user *, parent_tidptr, | ||
1686 | int __user *, child_tidptr, | ||
1687 | int, tls_val) | ||
1682 | #else | 1688 | #else |
1683 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, | 1689 | SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp, |
1684 | int __user *, parent_tidptr, | 1690 | int __user *, parent_tidptr, |
diff --git a/kernel/mutex.c b/kernel/mutex.c index ff05f4bd86eb..a52ee7bb830d 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -686,7 +686,7 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |||
686 | might_sleep(); | 686 | might_sleep(); |
687 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, | 687 | ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, |
688 | 0, &ctx->dep_map, _RET_IP_, ctx); | 688 | 0, &ctx->dep_map, _RET_IP_, ctx); |
689 | if (!ret && ctx->acquired > 0) | 689 | if (!ret && ctx->acquired > 1) |
690 | return ww_mutex_deadlock_injection(lock, ctx); | 690 | return ww_mutex_deadlock_injection(lock, ctx); |
691 | 691 | ||
692 | return ret; | 692 | return ret; |
@@ -702,7 +702,7 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) | |||
702 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, | 702 | ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, |
703 | 0, &ctx->dep_map, _RET_IP_, ctx); | 703 | 0, &ctx->dep_map, _RET_IP_, ctx); |
704 | 704 | ||
705 | if (!ret && ctx->acquired > 0) | 705 | if (!ret && ctx->acquired > 1) |
706 | return ww_mutex_deadlock_injection(lock, ctx); | 706 | return ww_mutex_deadlock_injection(lock, ctx); |
707 | 707 | ||
708 | return ret; | 708 | return ret; |
diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 06fe28589e9c..a394297f8b2f 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c | |||
@@ -296,6 +296,17 @@ int pm_qos_request_active(struct pm_qos_request *req) | |||
296 | } | 296 | } |
297 | EXPORT_SYMBOL_GPL(pm_qos_request_active); | 297 | EXPORT_SYMBOL_GPL(pm_qos_request_active); |
298 | 298 | ||
299 | static void __pm_qos_update_request(struct pm_qos_request *req, | ||
300 | s32 new_value) | ||
301 | { | ||
302 | trace_pm_qos_update_request(req->pm_qos_class, new_value); | ||
303 | |||
304 | if (new_value != req->node.prio) | ||
305 | pm_qos_update_target( | ||
306 | pm_qos_array[req->pm_qos_class]->constraints, | ||
307 | &req->node, PM_QOS_UPDATE_REQ, new_value); | ||
308 | } | ||
309 | |||
299 | /** | 310 | /** |
300 | * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout | 311 | * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout |
301 | * @work: work struct for the delayed work (timeout) | 312 | * @work: work struct for the delayed work (timeout) |
@@ -308,7 +319,7 @@ static void pm_qos_work_fn(struct work_struct *work) | |||
308 | struct pm_qos_request, | 319 | struct pm_qos_request, |
309 | work); | 320 | work); |
310 | 321 | ||
311 | pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); | 322 | __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); |
312 | } | 323 | } |
313 | 324 | ||
314 | /** | 325 | /** |
@@ -364,12 +375,7 @@ void pm_qos_update_request(struct pm_qos_request *req, | |||
364 | } | 375 | } |
365 | 376 | ||
366 | cancel_delayed_work_sync(&req->work); | 377 | cancel_delayed_work_sync(&req->work); |
367 | 378 | __pm_qos_update_request(req, new_value); | |
368 | trace_pm_qos_update_request(req->pm_qos_class, new_value); | ||
369 | if (new_value != req->node.prio) | ||
370 | pm_qos_update_target( | ||
371 | pm_qos_array[req->pm_qos_class]->constraints, | ||
372 | &req->node, PM_QOS_UPDATE_REQ, new_value); | ||
373 | } | 379 | } |
374 | EXPORT_SYMBOL_GPL(pm_qos_update_request); | 380 | EXPORT_SYMBOL_GPL(pm_qos_update_request); |
375 | 381 | ||
diff --git a/kernel/printk/braille.c b/kernel/printk/braille.c index b51087fb9ace..276762f3a460 100644 --- a/kernel/printk/braille.c +++ b/kernel/printk/braille.c | |||
@@ -19,7 +19,8 @@ char *_braille_console_setup(char **str, char **brl_options) | |||
19 | pr_err("need port name after brl=\n"); | 19 | pr_err("need port name after brl=\n"); |
20 | else | 20 | else |
21 | *((*str)++) = 0; | 21 | *((*str)++) = 0; |
22 | } | 22 | } else |
23 | return NULL; | ||
23 | 24 | ||
24 | return *str; | 25 | return *str; |
25 | } | 26 | } |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 4041f5747e73..a146ee327f6a 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -469,7 +469,6 @@ static int ptrace_detach(struct task_struct *child, unsigned int data) | |||
469 | /* Architecture-specific hardware disable .. */ | 469 | /* Architecture-specific hardware disable .. */ |
470 | ptrace_disable(child); | 470 | ptrace_disable(child); |
471 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | 471 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); |
472 | flush_ptrace_hw_breakpoint(child); | ||
473 | 472 | ||
474 | write_lock_irq(&tasklist_lock); | 473 | write_lock_irq(&tasklist_lock); |
475 | /* | 474 | /* |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b7c32cb7bfeb..05c39f030314 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -933,6 +933,8 @@ static int effective_prio(struct task_struct *p) | |||
933 | /** | 933 | /** |
934 | * task_curr - is this task currently executing on a CPU? | 934 | * task_curr - is this task currently executing on a CPU? |
935 | * @p: the task in question. | 935 | * @p: the task in question. |
936 | * | ||
937 | * Return: 1 if the task is currently executing. 0 otherwise. | ||
936 | */ | 938 | */ |
937 | inline int task_curr(const struct task_struct *p) | 939 | inline int task_curr(const struct task_struct *p) |
938 | { | 940 | { |
@@ -1482,7 +1484,7 @@ static void ttwu_queue(struct task_struct *p, int cpu) | |||
1482 | * the simpler "current->state = TASK_RUNNING" to mark yourself | 1484 | * the simpler "current->state = TASK_RUNNING" to mark yourself |
1483 | * runnable without the overhead of this. | 1485 | * runnable without the overhead of this. |
1484 | * | 1486 | * |
1485 | * Returns %true if @p was woken up, %false if it was already running | 1487 | * Return: %true if @p was woken up, %false if it was already running. |
1486 | * or @state didn't match @p's state. | 1488 | * or @state didn't match @p's state. |
1487 | */ | 1489 | */ |
1488 | static int | 1490 | static int |
@@ -1491,7 +1493,13 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) | |||
1491 | unsigned long flags; | 1493 | unsigned long flags; |
1492 | int cpu, success = 0; | 1494 | int cpu, success = 0; |
1493 | 1495 | ||
1494 | smp_wmb(); | 1496 | /* |
1497 | * If we are going to wake up a thread waiting for CONDITION we | ||
1498 | * need to ensure that CONDITION=1 done by the caller can not be | ||
1499 | * reordered with p->state check below. This pairs with mb() in | ||
1500 | * set_current_state() the waiting thread does. | ||
1501 | */ | ||
1502 | smp_mb__before_spinlock(); | ||
1495 | raw_spin_lock_irqsave(&p->pi_lock, flags); | 1503 | raw_spin_lock_irqsave(&p->pi_lock, flags); |
1496 | if (!(p->state & state)) | 1504 | if (!(p->state & state)) |
1497 | goto out; | 1505 | goto out; |
@@ -1577,8 +1585,9 @@ out: | |||
1577 | * @p: The process to be woken up. | 1585 | * @p: The process to be woken up. |
1578 | * | 1586 | * |
1579 | * Attempt to wake up the nominated process and move it to the set of runnable | 1587 | * Attempt to wake up the nominated process and move it to the set of runnable |
1580 | * processes. Returns 1 if the process was woken up, 0 if it was already | 1588 | * processes. |
1581 | * running. | 1589 | * |
1590 | * Return: 1 if the process was woken up, 0 if it was already running. | ||
1582 | * | 1591 | * |
1583 | * It may be assumed that this function implies a write memory barrier before | 1592 | * It may be assumed that this function implies a write memory barrier before |
1584 | * changing the task state if and only if any tasks are woken up. | 1593 | * changing the task state if and only if any tasks are woken up. |
@@ -2191,6 +2200,8 @@ void scheduler_tick(void) | |||
2191 | * This makes sure that uptime, CFS vruntime, load | 2200 | * This makes sure that uptime, CFS vruntime, load |
2192 | * balancing, etc... continue to move forward, even | 2201 | * balancing, etc... continue to move forward, even |
2193 | * with a very low granularity. | 2202 | * with a very low granularity. |
2203 | * | ||
2204 | * Return: Maximum deferment in nanoseconds. | ||
2194 | */ | 2205 | */ |
2195 | u64 scheduler_tick_max_deferment(void) | 2206 | u64 scheduler_tick_max_deferment(void) |
2196 | { | 2207 | { |
@@ -2394,6 +2405,12 @@ need_resched: | |||
2394 | if (sched_feat(HRTICK)) | 2405 | if (sched_feat(HRTICK)) |
2395 | hrtick_clear(rq); | 2406 | hrtick_clear(rq); |
2396 | 2407 | ||
2408 | /* | ||
2409 | * Make sure that signal_pending_state()->signal_pending() below | ||
2410 | * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) | ||
2411 | * done by the caller to avoid the race with signal_wake_up(). | ||
2412 | */ | ||
2413 | smp_mb__before_spinlock(); | ||
2397 | raw_spin_lock_irq(&rq->lock); | 2414 | raw_spin_lock_irq(&rq->lock); |
2398 | 2415 | ||
2399 | switch_count = &prev->nivcsw; | 2416 | switch_count = &prev->nivcsw; |
@@ -2796,8 +2813,8 @@ EXPORT_SYMBOL(wait_for_completion); | |||
2796 | * specified timeout to expire. The timeout is in jiffies. It is not | 2813 | * specified timeout to expire. The timeout is in jiffies. It is not |
2797 | * interruptible. | 2814 | * interruptible. |
2798 | * | 2815 | * |
2799 | * The return value is 0 if timed out, and positive (at least 1, or number of | 2816 | * Return: 0 if timed out, and positive (at least 1, or number of jiffies left |
2800 | * jiffies left till timeout) if completed. | 2817 | * till timeout) if completed. |
2801 | */ | 2818 | */ |
2802 | unsigned long __sched | 2819 | unsigned long __sched |
2803 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) | 2820 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) |
@@ -2829,8 +2846,8 @@ EXPORT_SYMBOL(wait_for_completion_io); | |||
2829 | * specified timeout to expire. The timeout is in jiffies. It is not | 2846 | * specified timeout to expire. The timeout is in jiffies. It is not |
2830 | * interruptible. The caller is accounted as waiting for IO. | 2847 | * interruptible. The caller is accounted as waiting for IO. |
2831 | * | 2848 | * |
2832 | * The return value is 0 if timed out, and positive (at least 1, or number of | 2849 | * Return: 0 if timed out, and positive (at least 1, or number of jiffies left |
2833 | * jiffies left till timeout) if completed. | 2850 | * till timeout) if completed. |
2834 | */ | 2851 | */ |
2835 | unsigned long __sched | 2852 | unsigned long __sched |
2836 | wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) | 2853 | wait_for_completion_io_timeout(struct completion *x, unsigned long timeout) |
@@ -2846,7 +2863,7 @@ EXPORT_SYMBOL(wait_for_completion_io_timeout); | |||
2846 | * This waits for completion of a specific task to be signaled. It is | 2863 | * This waits for completion of a specific task to be signaled. It is |
2847 | * interruptible. | 2864 | * interruptible. |
2848 | * | 2865 | * |
2849 | * The return value is -ERESTARTSYS if interrupted, 0 if completed. | 2866 | * Return: -ERESTARTSYS if interrupted, 0 if completed. |
2850 | */ | 2867 | */ |
2851 | int __sched wait_for_completion_interruptible(struct completion *x) | 2868 | int __sched wait_for_completion_interruptible(struct completion *x) |
2852 | { | 2869 | { |
@@ -2865,8 +2882,8 @@ EXPORT_SYMBOL(wait_for_completion_interruptible); | |||
2865 | * This waits for either a completion of a specific task to be signaled or for a | 2882 | * This waits for either a completion of a specific task to be signaled or for a |
2866 | * specified timeout to expire. It is interruptible. The timeout is in jiffies. | 2883 | * specified timeout to expire. It is interruptible. The timeout is in jiffies. |
2867 | * | 2884 | * |
2868 | * The return value is -ERESTARTSYS if interrupted, 0 if timed out, | 2885 | * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, |
2869 | * positive (at least 1, or number of jiffies left till timeout) if completed. | 2886 | * or number of jiffies left till timeout) if completed. |
2870 | */ | 2887 | */ |
2871 | long __sched | 2888 | long __sched |
2872 | wait_for_completion_interruptible_timeout(struct completion *x, | 2889 | wait_for_completion_interruptible_timeout(struct completion *x, |
@@ -2883,7 +2900,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); | |||
2883 | * This waits to be signaled for completion of a specific task. It can be | 2900 | * This waits to be signaled for completion of a specific task. It can be |
2884 | * interrupted by a kill signal. | 2901 | * interrupted by a kill signal. |
2885 | * | 2902 | * |
2886 | * The return value is -ERESTARTSYS if interrupted, 0 if completed. | 2903 | * Return: -ERESTARTSYS if interrupted, 0 if completed. |
2887 | */ | 2904 | */ |
2888 | int __sched wait_for_completion_killable(struct completion *x) | 2905 | int __sched wait_for_completion_killable(struct completion *x) |
2889 | { | 2906 | { |
@@ -2903,8 +2920,8 @@ EXPORT_SYMBOL(wait_for_completion_killable); | |||
2903 | * signaled or for a specified timeout to expire. It can be | 2920 | * signaled or for a specified timeout to expire. It can be |
2904 | * interrupted by a kill signal. The timeout is in jiffies. | 2921 | * interrupted by a kill signal. The timeout is in jiffies. |
2905 | * | 2922 | * |
2906 | * The return value is -ERESTARTSYS if interrupted, 0 if timed out, | 2923 | * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1, |
2907 | * positive (at least 1, or number of jiffies left till timeout) if completed. | 2924 | * or number of jiffies left till timeout) if completed. |
2908 | */ | 2925 | */ |
2909 | long __sched | 2926 | long __sched |
2910 | wait_for_completion_killable_timeout(struct completion *x, | 2927 | wait_for_completion_killable_timeout(struct completion *x, |
@@ -2918,7 +2935,7 @@ EXPORT_SYMBOL(wait_for_completion_killable_timeout); | |||
2918 | * try_wait_for_completion - try to decrement a completion without blocking | 2935 | * try_wait_for_completion - try to decrement a completion without blocking |
2919 | * @x: completion structure | 2936 | * @x: completion structure |
2920 | * | 2937 | * |
2921 | * Returns: 0 if a decrement cannot be done without blocking | 2938 | * Return: 0 if a decrement cannot be done without blocking |
2922 | * 1 if a decrement succeeded. | 2939 | * 1 if a decrement succeeded. |
2923 | * | 2940 | * |
2924 | * If a completion is being used as a counting completion, | 2941 | * If a completion is being used as a counting completion, |
@@ -2945,7 +2962,7 @@ EXPORT_SYMBOL(try_wait_for_completion); | |||
2945 | * completion_done - Test to see if a completion has any waiters | 2962 | * completion_done - Test to see if a completion has any waiters |
2946 | * @x: completion structure | 2963 | * @x: completion structure |
2947 | * | 2964 | * |
2948 | * Returns: 0 if there are waiters (wait_for_completion() in progress) | 2965 | * Return: 0 if there are waiters (wait_for_completion() in progress) |
2949 | * 1 if there are no waiters. | 2966 | * 1 if there are no waiters. |
2950 | * | 2967 | * |
2951 | */ | 2968 | */ |
@@ -3182,7 +3199,7 @@ SYSCALL_DEFINE1(nice, int, increment) | |||
3182 | * task_prio - return the priority value of a given task. | 3199 | * task_prio - return the priority value of a given task. |
3183 | * @p: the task in question. | 3200 | * @p: the task in question. |
3184 | * | 3201 | * |
3185 | * This is the priority value as seen by users in /proc. | 3202 | * Return: The priority value as seen by users in /proc. |
3186 | * RT tasks are offset by -200. Normal tasks are centered | 3203 | * RT tasks are offset by -200. Normal tasks are centered |
3187 | * around 0, value goes from -16 to +15. | 3204 | * around 0, value goes from -16 to +15. |
3188 | */ | 3205 | */ |
@@ -3194,6 +3211,8 @@ int task_prio(const struct task_struct *p) | |||
3194 | /** | 3211 | /** |
3195 | * task_nice - return the nice value of a given task. | 3212 | * task_nice - return the nice value of a given task. |
3196 | * @p: the task in question. | 3213 | * @p: the task in question. |
3214 | * | ||
3215 | * Return: The nice value [ -20 ... 0 ... 19 ]. | ||
3197 | */ | 3216 | */ |
3198 | int task_nice(const struct task_struct *p) | 3217 | int task_nice(const struct task_struct *p) |
3199 | { | 3218 | { |
@@ -3204,6 +3223,8 @@ EXPORT_SYMBOL(task_nice); | |||
3204 | /** | 3223 | /** |
3205 | * idle_cpu - is a given cpu idle currently? | 3224 | * idle_cpu - is a given cpu idle currently? |
3206 | * @cpu: the processor in question. | 3225 | * @cpu: the processor in question. |
3226 | * | ||
3227 | * Return: 1 if the CPU is currently idle. 0 otherwise. | ||
3207 | */ | 3228 | */ |
3208 | int idle_cpu(int cpu) | 3229 | int idle_cpu(int cpu) |
3209 | { | 3230 | { |
@@ -3226,6 +3247,8 @@ int idle_cpu(int cpu) | |||
3226 | /** | 3247 | /** |
3227 | * idle_task - return the idle task for a given cpu. | 3248 | * idle_task - return the idle task for a given cpu. |
3228 | * @cpu: the processor in question. | 3249 | * @cpu: the processor in question. |
3250 | * | ||
3251 | * Return: The idle task for the cpu @cpu. | ||
3229 | */ | 3252 | */ |
3230 | struct task_struct *idle_task(int cpu) | 3253 | struct task_struct *idle_task(int cpu) |
3231 | { | 3254 | { |
@@ -3235,6 +3258,8 @@ struct task_struct *idle_task(int cpu) | |||
3235 | /** | 3258 | /** |
3236 | * find_process_by_pid - find a process with a matching PID value. | 3259 | * find_process_by_pid - find a process with a matching PID value. |
3237 | * @pid: the pid in question. | 3260 | * @pid: the pid in question. |
3261 | * | ||
3262 | * The task of @pid, if found. %NULL otherwise. | ||
3238 | */ | 3263 | */ |
3239 | static struct task_struct *find_process_by_pid(pid_t pid) | 3264 | static struct task_struct *find_process_by_pid(pid_t pid) |
3240 | { | 3265 | { |
@@ -3432,6 +3457,8 @@ recheck: | |||
3432 | * @policy: new policy. | 3457 | * @policy: new policy. |
3433 | * @param: structure containing the new RT priority. | 3458 | * @param: structure containing the new RT priority. |
3434 | * | 3459 | * |
3460 | * Return: 0 on success. An error code otherwise. | ||
3461 | * | ||
3435 | * NOTE that the task may be already dead. | 3462 | * NOTE that the task may be already dead. |
3436 | */ | 3463 | */ |
3437 | int sched_setscheduler(struct task_struct *p, int policy, | 3464 | int sched_setscheduler(struct task_struct *p, int policy, |
@@ -3451,6 +3478,8 @@ EXPORT_SYMBOL_GPL(sched_setscheduler); | |||
3451 | * current context has permission. For example, this is needed in | 3478 | * current context has permission. For example, this is needed in |
3452 | * stop_machine(): we create temporary high priority worker threads, | 3479 | * stop_machine(): we create temporary high priority worker threads, |
3453 | * but our caller might not have that capability. | 3480 | * but our caller might not have that capability. |
3481 | * | ||
3482 | * Return: 0 on success. An error code otherwise. | ||
3454 | */ | 3483 | */ |
3455 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, | 3484 | int sched_setscheduler_nocheck(struct task_struct *p, int policy, |
3456 | const struct sched_param *param) | 3485 | const struct sched_param *param) |
@@ -3485,6 +3514,8 @@ do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) | |||
3485 | * @pid: the pid in question. | 3514 | * @pid: the pid in question. |
3486 | * @policy: new policy. | 3515 | * @policy: new policy. |
3487 | * @param: structure containing the new RT priority. | 3516 | * @param: structure containing the new RT priority. |
3517 | * | ||
3518 | * Return: 0 on success. An error code otherwise. | ||
3488 | */ | 3519 | */ |
3489 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, | 3520 | SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, |
3490 | struct sched_param __user *, param) | 3521 | struct sched_param __user *, param) |
@@ -3500,6 +3531,8 @@ SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, | |||
3500 | * sys_sched_setparam - set/change the RT priority of a thread | 3531 | * sys_sched_setparam - set/change the RT priority of a thread |
3501 | * @pid: the pid in question. | 3532 | * @pid: the pid in question. |
3502 | * @param: structure containing the new RT priority. | 3533 | * @param: structure containing the new RT priority. |
3534 | * | ||
3535 | * Return: 0 on success. An error code otherwise. | ||
3503 | */ | 3536 | */ |
3504 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) | 3537 | SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) |
3505 | { | 3538 | { |
@@ -3509,6 +3542,9 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) | |||
3509 | /** | 3542 | /** |
3510 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread | 3543 | * sys_sched_getscheduler - get the policy (scheduling class) of a thread |
3511 | * @pid: the pid in question. | 3544 | * @pid: the pid in question. |
3545 | * | ||
3546 | * Return: On success, the policy of the thread. Otherwise, a negative error | ||
3547 | * code. | ||
3512 | */ | 3548 | */ |
3513 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) | 3549 | SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) |
3514 | { | 3550 | { |
@@ -3535,6 +3571,9 @@ SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) | |||
3535 | * sys_sched_getparam - get the RT priority of a thread | 3571 | * sys_sched_getparam - get the RT priority of a thread |
3536 | * @pid: the pid in question. | 3572 | * @pid: the pid in question. |
3537 | * @param: structure containing the RT priority. | 3573 | * @param: structure containing the RT priority. |
3574 | * | ||
3575 | * Return: On success, 0 and the RT priority is in @param. Otherwise, an error | ||
3576 | * code. | ||
3538 | */ | 3577 | */ |
3539 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) | 3578 | SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) |
3540 | { | 3579 | { |
@@ -3659,6 +3698,8 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, | |||
3659 | * @pid: pid of the process | 3698 | * @pid: pid of the process |
3660 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 3699 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
3661 | * @user_mask_ptr: user-space pointer to the new cpu mask | 3700 | * @user_mask_ptr: user-space pointer to the new cpu mask |
3701 | * | ||
3702 | * Return: 0 on success. An error code otherwise. | ||
3662 | */ | 3703 | */ |
3663 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, | 3704 | SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, |
3664 | unsigned long __user *, user_mask_ptr) | 3705 | unsigned long __user *, user_mask_ptr) |
@@ -3710,6 +3751,8 @@ out_unlock: | |||
3710 | * @pid: pid of the process | 3751 | * @pid: pid of the process |
3711 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr | 3752 | * @len: length in bytes of the bitmask pointed to by user_mask_ptr |
3712 | * @user_mask_ptr: user-space pointer to hold the current cpu mask | 3753 | * @user_mask_ptr: user-space pointer to hold the current cpu mask |
3754 | * | ||
3755 | * Return: 0 on success. An error code otherwise. | ||
3713 | */ | 3756 | */ |
3714 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | 3757 | SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, |
3715 | unsigned long __user *, user_mask_ptr) | 3758 | unsigned long __user *, user_mask_ptr) |
@@ -3744,6 +3787,8 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, | |||
3744 | * | 3787 | * |
3745 | * This function yields the current CPU to other tasks. If there are no | 3788 | * This function yields the current CPU to other tasks. If there are no |
3746 | * other threads running on this CPU then this function will return. | 3789 | * other threads running on this CPU then this function will return. |
3790 | * | ||
3791 | * Return: 0. | ||
3747 | */ | 3792 | */ |
3748 | SYSCALL_DEFINE0(sched_yield) | 3793 | SYSCALL_DEFINE0(sched_yield) |
3749 | { | 3794 | { |
@@ -3869,7 +3914,7 @@ EXPORT_SYMBOL(yield); | |||
3869 | * It's the caller's job to ensure that the target task struct | 3914 | * It's the caller's job to ensure that the target task struct |
3870 | * can't go away on us before we can do any checks. | 3915 | * can't go away on us before we can do any checks. |
3871 | * | 3916 | * |
3872 | * Returns: | 3917 | * Return: |
3873 | * true (>0) if we indeed boosted the target task. | 3918 | * true (>0) if we indeed boosted the target task. |
3874 | * false (0) if we failed to boost the target. | 3919 | * false (0) if we failed to boost the target. |
3875 | * -ESRCH if there's no task to yield to. | 3920 | * -ESRCH if there's no task to yield to. |
@@ -3972,8 +4017,9 @@ long __sched io_schedule_timeout(long timeout) | |||
3972 | * sys_sched_get_priority_max - return maximum RT priority. | 4017 | * sys_sched_get_priority_max - return maximum RT priority. |
3973 | * @policy: scheduling class. | 4018 | * @policy: scheduling class. |
3974 | * | 4019 | * |
3975 | * this syscall returns the maximum rt_priority that can be used | 4020 | * Return: On success, this syscall returns the maximum |
3976 | * by a given scheduling class. | 4021 | * rt_priority that can be used by a given scheduling class. |
4022 | * On failure, a negative error code is returned. | ||
3977 | */ | 4023 | */ |
3978 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) | 4024 | SYSCALL_DEFINE1(sched_get_priority_max, int, policy) |
3979 | { | 4025 | { |
@@ -3997,8 +4043,9 @@ SYSCALL_DEFINE1(sched_get_priority_max, int, policy) | |||
3997 | * sys_sched_get_priority_min - return minimum RT priority. | 4043 | * sys_sched_get_priority_min - return minimum RT priority. |
3998 | * @policy: scheduling class. | 4044 | * @policy: scheduling class. |
3999 | * | 4045 | * |
4000 | * this syscall returns the minimum rt_priority that can be used | 4046 | * Return: On success, this syscall returns the minimum |
4001 | * by a given scheduling class. | 4047 | * rt_priority that can be used by a given scheduling class. |
4048 | * On failure, a negative error code is returned. | ||
4002 | */ | 4049 | */ |
4003 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) | 4050 | SYSCALL_DEFINE1(sched_get_priority_min, int, policy) |
4004 | { | 4051 | { |
@@ -4024,6 +4071,9 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) | |||
4024 | * | 4071 | * |
4025 | * this syscall writes the default timeslice value of a given process | 4072 | * this syscall writes the default timeslice value of a given process |
4026 | * into the user-space timespec buffer. A value of '0' means infinity. | 4073 | * into the user-space timespec buffer. A value of '0' means infinity. |
4074 | * | ||
4075 | * Return: On success, 0 and the timeslice is in @interval. Otherwise, | ||
4076 | * an error code. | ||
4027 | */ | 4077 | */ |
4028 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | 4078 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
4029 | struct timespec __user *, interval) | 4079 | struct timespec __user *, interval) |
@@ -6632,6 +6682,8 @@ void normalize_rt_tasks(void) | |||
6632 | * @cpu: the processor in question. | 6682 | * @cpu: the processor in question. |
6633 | * | 6683 | * |
6634 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! | 6684 | * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! |
6685 | * | ||
6686 | * Return: The current task for @cpu. | ||
6635 | */ | 6687 | */ |
6636 | struct task_struct *curr_task(int cpu) | 6688 | struct task_struct *curr_task(int cpu) |
6637 | { | 6689 | { |
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c index 1095e878a46f..8b836b376d91 100644 --- a/kernel/sched/cpupri.c +++ b/kernel/sched/cpupri.c | |||
@@ -62,7 +62,7 @@ static int convert_prio(int prio) | |||
62 | * any discrepancies created by racing against the uncertainty of the current | 62 | * any discrepancies created by racing against the uncertainty of the current |
63 | * priority configuration. | 63 | * priority configuration. |
64 | * | 64 | * |
65 | * Returns: (int)bool - CPUs were found | 65 | * Return: (int)bool - CPUs were found |
66 | */ | 66 | */ |
67 | int cpupri_find(struct cpupri *cp, struct task_struct *p, | 67 | int cpupri_find(struct cpupri *cp, struct task_struct *p, |
68 | struct cpumask *lowest_mask) | 68 | struct cpumask *lowest_mask) |
@@ -203,7 +203,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) | |||
203 | * cpupri_init - initialize the cpupri structure | 203 | * cpupri_init - initialize the cpupri structure |
204 | * @cp: The cpupri context | 204 | * @cp: The cpupri context |
205 | * | 205 | * |
206 | * Returns: -ENOMEM if memory fails. | 206 | * Return: -ENOMEM on memory allocation failure. |
207 | */ | 207 | */ |
208 | int cpupri_init(struct cpupri *cp) | 208 | int cpupri_init(struct cpupri *cp) |
209 | { | 209 | { |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9565645e3202..68f1609ca149 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -2032,6 +2032,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) | |||
2032 | */ | 2032 | */ |
2033 | update_entity_load_avg(curr, 1); | 2033 | update_entity_load_avg(curr, 1); |
2034 | update_cfs_rq_blocked_load(cfs_rq, 1); | 2034 | update_cfs_rq_blocked_load(cfs_rq, 1); |
2035 | update_cfs_shares(cfs_rq); | ||
2035 | 2036 | ||
2036 | #ifdef CONFIG_SCHED_HRTICK | 2037 | #ifdef CONFIG_SCHED_HRTICK |
2037 | /* | 2038 | /* |
@@ -4280,6 +4281,8 @@ struct sg_lb_stats { | |||
4280 | * get_sd_load_idx - Obtain the load index for a given sched domain. | 4281 | * get_sd_load_idx - Obtain the load index for a given sched domain. |
4281 | * @sd: The sched_domain whose load_idx is to be obtained. | 4282 | * @sd: The sched_domain whose load_idx is to be obtained. |
4282 | * @idle: The Idle status of the CPU for whose sd load_icx is obtained. | 4283 | * @idle: The Idle status of the CPU for whose sd load_icx is obtained. |
4284 | * | ||
4285 | * Return: The load index. | ||
4283 | */ | 4286 | */ |
4284 | static inline int get_sd_load_idx(struct sched_domain *sd, | 4287 | static inline int get_sd_load_idx(struct sched_domain *sd, |
4285 | enum cpu_idle_type idle) | 4288 | enum cpu_idle_type idle) |
@@ -4574,6 +4577,9 @@ static inline void update_sg_lb_stats(struct lb_env *env, | |||
4574 | * | 4577 | * |
4575 | * Determine if @sg is a busier group than the previously selected | 4578 | * Determine if @sg is a busier group than the previously selected |
4576 | * busiest group. | 4579 | * busiest group. |
4580 | * | ||
4581 | * Return: %true if @sg is a busier group than the previously selected | ||
4582 | * busiest group. %false otherwise. | ||
4577 | */ | 4583 | */ |
4578 | static bool update_sd_pick_busiest(struct lb_env *env, | 4584 | static bool update_sd_pick_busiest(struct lb_env *env, |
4579 | struct sd_lb_stats *sds, | 4585 | struct sd_lb_stats *sds, |
@@ -4691,7 +4697,7 @@ static inline void update_sd_lb_stats(struct lb_env *env, | |||
4691 | * assuming lower CPU number will be equivalent to lower a SMT thread | 4697 | * assuming lower CPU number will be equivalent to lower a SMT thread |
4692 | * number. | 4698 | * number. |
4693 | * | 4699 | * |
4694 | * Returns 1 when packing is required and a task should be moved to | 4700 | * Return: 1 when packing is required and a task should be moved to |
4695 | * this CPU. The amount of the imbalance is returned in *imbalance. | 4701 | * this CPU. The amount of the imbalance is returned in *imbalance. |
4696 | * | 4702 | * |
4697 | * @env: The load balancing environment. | 4703 | * @env: The load balancing environment. |
@@ -4869,7 +4875,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s | |||
4869 | * @balance: Pointer to a variable indicating if this_cpu | 4875 | * @balance: Pointer to a variable indicating if this_cpu |
4870 | * is the appropriate cpu to perform load balancing at this_level. | 4876 | * is the appropriate cpu to perform load balancing at this_level. |
4871 | * | 4877 | * |
4872 | * Returns: - the busiest group if imbalance exists. | 4878 | * Return: - The busiest group if imbalance exists. |
4873 | * - If no imbalance and user has opted for power-savings balance, | 4879 | * - If no imbalance and user has opted for power-savings balance, |
4874 | * return the least loaded group whose CPUs can be | 4880 | * return the least loaded group whose CPUs can be |
4875 | * put to idle by rebalancing its tasks onto our group. | 4881 | * put to idle by rebalancing its tasks onto our group. |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 8ce9eefc5bb4..a6d098c6df3f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2169,12 +2169,57 @@ static cycle_t ftrace_update_time; | |||
2169 | static unsigned long ftrace_update_cnt; | 2169 | static unsigned long ftrace_update_cnt; |
2170 | unsigned long ftrace_update_tot_cnt; | 2170 | unsigned long ftrace_update_tot_cnt; |
2171 | 2171 | ||
2172 | static int ops_traces_mod(struct ftrace_ops *ops) | 2172 | static inline int ops_traces_mod(struct ftrace_ops *ops) |
2173 | { | 2173 | { |
2174 | struct ftrace_hash *hash; | 2174 | /* |
2175 | * Filter_hash being empty will default to trace module. | ||
2176 | * But notrace hash requires a test of individual module functions. | ||
2177 | */ | ||
2178 | return ftrace_hash_empty(ops->filter_hash) && | ||
2179 | ftrace_hash_empty(ops->notrace_hash); | ||
2180 | } | ||
2181 | |||
2182 | /* | ||
2183 | * Check if the current ops references the record. | ||
2184 | * | ||
2185 | * If the ops traces all functions, then it was already accounted for. | ||
2186 | * If the ops does not trace the current record function, skip it. | ||
2187 | * If the ops ignores the function via notrace filter, skip it. | ||
2188 | */ | ||
2189 | static inline bool | ||
2190 | ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec) | ||
2191 | { | ||
2192 | /* If ops isn't enabled, ignore it */ | ||
2193 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | ||
2194 | return 0; | ||
2195 | |||
2196 | /* If ops traces all mods, we already accounted for it */ | ||
2197 | if (ops_traces_mod(ops)) | ||
2198 | return 0; | ||
2199 | |||
2200 | /* The function must be in the filter */ | ||
2201 | if (!ftrace_hash_empty(ops->filter_hash) && | ||
2202 | !ftrace_lookup_ip(ops->filter_hash, rec->ip)) | ||
2203 | return 0; | ||
2204 | |||
2205 | /* If in notrace hash, we ignore it too */ | ||
2206 | if (ftrace_lookup_ip(ops->notrace_hash, rec->ip)) | ||
2207 | return 0; | ||
2208 | |||
2209 | return 1; | ||
2210 | } | ||
2211 | |||
2212 | static int referenced_filters(struct dyn_ftrace *rec) | ||
2213 | { | ||
2214 | struct ftrace_ops *ops; | ||
2215 | int cnt = 0; | ||
2175 | 2216 | ||
2176 | hash = ops->filter_hash; | 2217 | for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) { |
2177 | return ftrace_hash_empty(hash); | 2218 | if (ops_references_rec(ops, rec)) |
2219 | cnt++; | ||
2220 | } | ||
2221 | |||
2222 | return cnt; | ||
2178 | } | 2223 | } |
2179 | 2224 | ||
2180 | static int ftrace_update_code(struct module *mod) | 2225 | static int ftrace_update_code(struct module *mod) |
@@ -2183,6 +2228,7 @@ static int ftrace_update_code(struct module *mod) | |||
2183 | struct dyn_ftrace *p; | 2228 | struct dyn_ftrace *p; |
2184 | cycle_t start, stop; | 2229 | cycle_t start, stop; |
2185 | unsigned long ref = 0; | 2230 | unsigned long ref = 0; |
2231 | bool test = false; | ||
2186 | int i; | 2232 | int i; |
2187 | 2233 | ||
2188 | /* | 2234 | /* |
@@ -2196,9 +2242,12 @@ static int ftrace_update_code(struct module *mod) | |||
2196 | 2242 | ||
2197 | for (ops = ftrace_ops_list; | 2243 | for (ops = ftrace_ops_list; |
2198 | ops != &ftrace_list_end; ops = ops->next) { | 2244 | ops != &ftrace_list_end; ops = ops->next) { |
2199 | if (ops->flags & FTRACE_OPS_FL_ENABLED && | 2245 | if (ops->flags & FTRACE_OPS_FL_ENABLED) { |
2200 | ops_traces_mod(ops)) | 2246 | if (ops_traces_mod(ops)) |
2201 | ref++; | 2247 | ref++; |
2248 | else | ||
2249 | test = true; | ||
2250 | } | ||
2202 | } | 2251 | } |
2203 | } | 2252 | } |
2204 | 2253 | ||
@@ -2208,12 +2257,16 @@ static int ftrace_update_code(struct module *mod) | |||
2208 | for (pg = ftrace_new_pgs; pg; pg = pg->next) { | 2257 | for (pg = ftrace_new_pgs; pg; pg = pg->next) { |
2209 | 2258 | ||
2210 | for (i = 0; i < pg->index; i++) { | 2259 | for (i = 0; i < pg->index; i++) { |
2260 | int cnt = ref; | ||
2261 | |||
2211 | /* If something went wrong, bail without enabling anything */ | 2262 | /* If something went wrong, bail without enabling anything */ |
2212 | if (unlikely(ftrace_disabled)) | 2263 | if (unlikely(ftrace_disabled)) |
2213 | return -1; | 2264 | return -1; |
2214 | 2265 | ||
2215 | p = &pg->records[i]; | 2266 | p = &pg->records[i]; |
2216 | p->flags = ref; | 2267 | if (test) |
2268 | cnt += referenced_filters(p); | ||
2269 | p->flags = cnt; | ||
2217 | 2270 | ||
2218 | /* | 2271 | /* |
2219 | * Do the initial record conversion from mcount jump | 2272 | * Do the initial record conversion from mcount jump |
@@ -2233,7 +2286,7 @@ static int ftrace_update_code(struct module *mod) | |||
2233 | * conversion puts the module to the correct state, thus | 2286 | * conversion puts the module to the correct state, thus |
2234 | * passing the ftrace_make_call check. | 2287 | * passing the ftrace_make_call check. |
2235 | */ | 2288 | */ |
2236 | if (ftrace_start_up && ref) { | 2289 | if (ftrace_start_up && cnt) { |
2237 | int failed = __ftrace_replace_code(p, 1); | 2290 | int failed = __ftrace_replace_code(p, 1); |
2238 | if (failed) | 2291 | if (failed) |
2239 | ftrace_bug(failed, p->ip); | 2292 | ftrace_bug(failed, p->ip); |
@@ -3384,6 +3437,12 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) | |||
3384 | return add_hash_entry(hash, ip); | 3437 | return add_hash_entry(hash, ip); |
3385 | } | 3438 | } |
3386 | 3439 | ||
3440 | static void ftrace_ops_update_code(struct ftrace_ops *ops) | ||
3441 | { | ||
3442 | if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) | ||
3443 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | ||
3444 | } | ||
3445 | |||
3387 | static int | 3446 | static int |
3388 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | 3447 | ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, |
3389 | unsigned long ip, int remove, int reset, int enable) | 3448 | unsigned long ip, int remove, int reset, int enable) |
@@ -3426,9 +3485,8 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
3426 | 3485 | ||
3427 | mutex_lock(&ftrace_lock); | 3486 | mutex_lock(&ftrace_lock); |
3428 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | 3487 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); |
3429 | if (!ret && ops->flags & FTRACE_OPS_FL_ENABLED | 3488 | if (!ret) |
3430 | && ftrace_enabled) | 3489 | ftrace_ops_update_code(ops); |
3431 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | ||
3432 | 3490 | ||
3433 | mutex_unlock(&ftrace_lock); | 3491 | mutex_unlock(&ftrace_lock); |
3434 | 3492 | ||
@@ -3655,9 +3713,8 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
3655 | mutex_lock(&ftrace_lock); | 3713 | mutex_lock(&ftrace_lock); |
3656 | ret = ftrace_hash_move(iter->ops, filter_hash, | 3714 | ret = ftrace_hash_move(iter->ops, filter_hash, |
3657 | orig_hash, iter->hash); | 3715 | orig_hash, iter->hash); |
3658 | if (!ret && (iter->ops->flags & FTRACE_OPS_FL_ENABLED) | 3716 | if (!ret) |
3659 | && ftrace_enabled) | 3717 | ftrace_ops_update_code(iter->ops); |
3660 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | ||
3661 | 3718 | ||
3662 | mutex_unlock(&ftrace_lock); | 3719 | mutex_unlock(&ftrace_lock); |
3663 | } | 3720 | } |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 882ec1dd1515..496f94d57698 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -243,20 +243,25 @@ int filter_current_check_discard(struct ring_buffer *buffer, | |||
243 | } | 243 | } |
244 | EXPORT_SYMBOL_GPL(filter_current_check_discard); | 244 | EXPORT_SYMBOL_GPL(filter_current_check_discard); |
245 | 245 | ||
246 | cycle_t ftrace_now(int cpu) | 246 | cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu) |
247 | { | 247 | { |
248 | u64 ts; | 248 | u64 ts; |
249 | 249 | ||
250 | /* Early boot up does not have a buffer yet */ | 250 | /* Early boot up does not have a buffer yet */ |
251 | if (!global_trace.trace_buffer.buffer) | 251 | if (!buf->buffer) |
252 | return trace_clock_local(); | 252 | return trace_clock_local(); |
253 | 253 | ||
254 | ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu); | 254 | ts = ring_buffer_time_stamp(buf->buffer, cpu); |
255 | ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts); | 255 | ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts); |
256 | 256 | ||
257 | return ts; | 257 | return ts; |
258 | } | 258 | } |
259 | 259 | ||
260 | cycle_t ftrace_now(int cpu) | ||
261 | { | ||
262 | return buffer_ftrace_now(&global_trace.trace_buffer, cpu); | ||
263 | } | ||
264 | |||
260 | /** | 265 | /** |
261 | * tracing_is_enabled - Show if global_trace has been disabled | 266 | * tracing_is_enabled - Show if global_trace has been disabled |
262 | * | 267 | * |
@@ -1211,7 +1216,7 @@ void tracing_reset_online_cpus(struct trace_buffer *buf) | |||
1211 | /* Make sure all commits have finished */ | 1216 | /* Make sure all commits have finished */ |
1212 | synchronize_sched(); | 1217 | synchronize_sched(); |
1213 | 1218 | ||
1214 | buf->time_start = ftrace_now(buf->cpu); | 1219 | buf->time_start = buffer_ftrace_now(buf, buf->cpu); |
1215 | 1220 | ||
1216 | for_each_online_cpu(cpu) | 1221 | for_each_online_cpu(cpu) |
1217 | ring_buffer_reset_cpu(buffer, cpu); | 1222 | ring_buffer_reset_cpu(buffer, cpu); |
@@ -1219,11 +1224,6 @@ void tracing_reset_online_cpus(struct trace_buffer *buf) | |||
1219 | ring_buffer_record_enable(buffer); | 1224 | ring_buffer_record_enable(buffer); |
1220 | } | 1225 | } |
1221 | 1226 | ||
1222 | void tracing_reset_current(int cpu) | ||
1223 | { | ||
1224 | tracing_reset(&global_trace.trace_buffer, cpu); | ||
1225 | } | ||
1226 | |||
1227 | /* Must have trace_types_lock held */ | 1227 | /* Must have trace_types_lock held */ |
1228 | void tracing_reset_all_online_cpus(void) | 1228 | void tracing_reset_all_online_cpus(void) |
1229 | { | 1229 | { |
@@ -4151,6 +4151,7 @@ waitagain: | |||
4151 | memset(&iter->seq, 0, | 4151 | memset(&iter->seq, 0, |
4152 | sizeof(struct trace_iterator) - | 4152 | sizeof(struct trace_iterator) - |
4153 | offsetof(struct trace_iterator, seq)); | 4153 | offsetof(struct trace_iterator, seq)); |
4154 | cpumask_clear(iter->started); | ||
4154 | iter->pos = -1; | 4155 | iter->pos = -1; |
4155 | 4156 | ||
4156 | trace_event_read_lock(); | 4157 | trace_event_read_lock(); |
@@ -4468,7 +4469,7 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp) | |||
4468 | 4469 | ||
4469 | /* disable tracing ? */ | 4470 | /* disable tracing ? */ |
4470 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) | 4471 | if (trace_flags & TRACE_ITER_STOP_ON_FREE) |
4471 | tracing_off(); | 4472 | tracer_tracing_off(tr); |
4472 | /* resize the ring buffer to 0 */ | 4473 | /* resize the ring buffer to 0 */ |
4473 | tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); | 4474 | tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS); |
4474 | 4475 | ||
@@ -4633,12 +4634,12 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf, | |||
4633 | * New clock may not be consistent with the previous clock. | 4634 | * New clock may not be consistent with the previous clock. |
4634 | * Reset the buffer so that it doesn't have incomparable timestamps. | 4635 | * Reset the buffer so that it doesn't have incomparable timestamps. |
4635 | */ | 4636 | */ |
4636 | tracing_reset_online_cpus(&global_trace.trace_buffer); | 4637 | tracing_reset_online_cpus(&tr->trace_buffer); |
4637 | 4638 | ||
4638 | #ifdef CONFIG_TRACER_MAX_TRACE | 4639 | #ifdef CONFIG_TRACER_MAX_TRACE |
4639 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) | 4640 | if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer) |
4640 | ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); | 4641 | ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func); |
4641 | tracing_reset_online_cpus(&global_trace.max_buffer); | 4642 | tracing_reset_online_cpus(&tr->max_buffer); |
4642 | #endif | 4643 | #endif |
4643 | 4644 | ||
4644 | mutex_unlock(&trace_types_lock); | 4645 | mutex_unlock(&trace_types_lock); |
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 898f868833f2..29a7ebcfb426 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -409,33 +409,42 @@ static void put_system(struct ftrace_subsystem_dir *dir) | |||
409 | mutex_unlock(&event_mutex); | 409 | mutex_unlock(&event_mutex); |
410 | } | 410 | } |
411 | 411 | ||
412 | /* | 412 | static void remove_subsystem(struct ftrace_subsystem_dir *dir) |
413 | * Open and update trace_array ref count. | ||
414 | * Must have the current trace_array passed to it. | ||
415 | */ | ||
416 | static int tracing_open_generic_file(struct inode *inode, struct file *filp) | ||
417 | { | 413 | { |
418 | struct ftrace_event_file *file = inode->i_private; | 414 | if (!dir) |
419 | struct trace_array *tr = file->tr; | 415 | return; |
420 | int ret; | ||
421 | 416 | ||
422 | if (trace_array_get(tr) < 0) | 417 | if (!--dir->nr_events) { |
423 | return -ENODEV; | 418 | debugfs_remove_recursive(dir->entry); |
419 | list_del(&dir->list); | ||
420 | __put_system_dir(dir); | ||
421 | } | ||
422 | } | ||
424 | 423 | ||
425 | ret = tracing_open_generic(inode, filp); | 424 | static void *event_file_data(struct file *filp) |
426 | if (ret < 0) | 425 | { |
427 | trace_array_put(tr); | 426 | return ACCESS_ONCE(file_inode(filp)->i_private); |
428 | return ret; | ||
429 | } | 427 | } |
430 | 428 | ||
431 | static int tracing_release_generic_file(struct inode *inode, struct file *filp) | 429 | static void remove_event_file_dir(struct ftrace_event_file *file) |
432 | { | 430 | { |
433 | struct ftrace_event_file *file = inode->i_private; | 431 | struct dentry *dir = file->dir; |
434 | struct trace_array *tr = file->tr; | 432 | struct dentry *child; |
435 | 433 | ||
436 | trace_array_put(tr); | 434 | if (dir) { |
435 | spin_lock(&dir->d_lock); /* probably unneeded */ | ||
436 | list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) { | ||
437 | if (child->d_inode) /* probably unneeded */ | ||
438 | child->d_inode->i_private = NULL; | ||
439 | } | ||
440 | spin_unlock(&dir->d_lock); | ||
437 | 441 | ||
438 | return 0; | 442 | debugfs_remove_recursive(dir); |
443 | } | ||
444 | |||
445 | list_del(&file->list); | ||
446 | remove_subsystem(file->system); | ||
447 | kmem_cache_free(file_cachep, file); | ||
439 | } | 448 | } |
440 | 449 | ||
441 | /* | 450 | /* |
@@ -679,15 +688,25 @@ static ssize_t | |||
679 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | 688 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, |
680 | loff_t *ppos) | 689 | loff_t *ppos) |
681 | { | 690 | { |
682 | struct ftrace_event_file *file = filp->private_data; | 691 | struct ftrace_event_file *file; |
692 | unsigned long flags; | ||
683 | char buf[4] = "0"; | 693 | char buf[4] = "0"; |
684 | 694 | ||
685 | if (file->flags & FTRACE_EVENT_FL_ENABLED && | 695 | mutex_lock(&event_mutex); |
686 | !(file->flags & FTRACE_EVENT_FL_SOFT_DISABLED)) | 696 | file = event_file_data(filp); |
697 | if (likely(file)) | ||
698 | flags = file->flags; | ||
699 | mutex_unlock(&event_mutex); | ||
700 | |||
701 | if (!file) | ||
702 | return -ENODEV; | ||
703 | |||
704 | if (flags & FTRACE_EVENT_FL_ENABLED && | ||
705 | !(flags & FTRACE_EVENT_FL_SOFT_DISABLED)) | ||
687 | strcpy(buf, "1"); | 706 | strcpy(buf, "1"); |
688 | 707 | ||
689 | if (file->flags & FTRACE_EVENT_FL_SOFT_DISABLED || | 708 | if (flags & FTRACE_EVENT_FL_SOFT_DISABLED || |
690 | file->flags & FTRACE_EVENT_FL_SOFT_MODE) | 709 | flags & FTRACE_EVENT_FL_SOFT_MODE) |
691 | strcat(buf, "*"); | 710 | strcat(buf, "*"); |
692 | 711 | ||
693 | strcat(buf, "\n"); | 712 | strcat(buf, "\n"); |
@@ -699,13 +718,10 @@ static ssize_t | |||
699 | event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | 718 | event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, |
700 | loff_t *ppos) | 719 | loff_t *ppos) |
701 | { | 720 | { |
702 | struct ftrace_event_file *file = filp->private_data; | 721 | struct ftrace_event_file *file; |
703 | unsigned long val; | 722 | unsigned long val; |
704 | int ret; | 723 | int ret; |
705 | 724 | ||
706 | if (!file) | ||
707 | return -EINVAL; | ||
708 | |||
709 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); | 725 | ret = kstrtoul_from_user(ubuf, cnt, 10, &val); |
710 | if (ret) | 726 | if (ret) |
711 | return ret; | 727 | return ret; |
@@ -717,8 +733,11 @@ event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
717 | switch (val) { | 733 | switch (val) { |
718 | case 0: | 734 | case 0: |
719 | case 1: | 735 | case 1: |
736 | ret = -ENODEV; | ||
720 | mutex_lock(&event_mutex); | 737 | mutex_lock(&event_mutex); |
721 | ret = ftrace_event_enable_disable(file, val); | 738 | file = event_file_data(filp); |
739 | if (likely(file)) | ||
740 | ret = ftrace_event_enable_disable(file, val); | ||
722 | mutex_unlock(&event_mutex); | 741 | mutex_unlock(&event_mutex); |
723 | break; | 742 | break; |
724 | 743 | ||
@@ -825,7 +844,7 @@ enum { | |||
825 | 844 | ||
826 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) | 845 | static void *f_next(struct seq_file *m, void *v, loff_t *pos) |
827 | { | 846 | { |
828 | struct ftrace_event_call *call = m->private; | 847 | struct ftrace_event_call *call = event_file_data(m->private); |
829 | struct list_head *common_head = &ftrace_common_fields; | 848 | struct list_head *common_head = &ftrace_common_fields; |
830 | struct list_head *head = trace_get_fields(call); | 849 | struct list_head *head = trace_get_fields(call); |
831 | struct list_head *node = v; | 850 | struct list_head *node = v; |
@@ -857,7 +876,7 @@ static void *f_next(struct seq_file *m, void *v, loff_t *pos) | |||
857 | 876 | ||
858 | static int f_show(struct seq_file *m, void *v) | 877 | static int f_show(struct seq_file *m, void *v) |
859 | { | 878 | { |
860 | struct ftrace_event_call *call = m->private; | 879 | struct ftrace_event_call *call = event_file_data(m->private); |
861 | struct ftrace_event_field *field; | 880 | struct ftrace_event_field *field; |
862 | const char *array_descriptor; | 881 | const char *array_descriptor; |
863 | 882 | ||
@@ -910,6 +929,11 @@ static void *f_start(struct seq_file *m, loff_t *pos) | |||
910 | void *p = (void *)FORMAT_HEADER; | 929 | void *p = (void *)FORMAT_HEADER; |
911 | loff_t l = 0; | 930 | loff_t l = 0; |
912 | 931 | ||
932 | /* ->stop() is called even if ->start() fails */ | ||
933 | mutex_lock(&event_mutex); | ||
934 | if (!event_file_data(m->private)) | ||
935 | return ERR_PTR(-ENODEV); | ||
936 | |||
913 | while (l < *pos && p) | 937 | while (l < *pos && p) |
914 | p = f_next(m, p, &l); | 938 | p = f_next(m, p, &l); |
915 | 939 | ||
@@ -918,6 +942,7 @@ static void *f_start(struct seq_file *m, loff_t *pos) | |||
918 | 942 | ||
919 | static void f_stop(struct seq_file *m, void *p) | 943 | static void f_stop(struct seq_file *m, void *p) |
920 | { | 944 | { |
945 | mutex_unlock(&event_mutex); | ||
921 | } | 946 | } |
922 | 947 | ||
923 | static const struct seq_operations trace_format_seq_ops = { | 948 | static const struct seq_operations trace_format_seq_ops = { |
@@ -929,7 +954,6 @@ static const struct seq_operations trace_format_seq_ops = { | |||
929 | 954 | ||
930 | static int trace_format_open(struct inode *inode, struct file *file) | 955 | static int trace_format_open(struct inode *inode, struct file *file) |
931 | { | 956 | { |
932 | struct ftrace_event_call *call = inode->i_private; | ||
933 | struct seq_file *m; | 957 | struct seq_file *m; |
934 | int ret; | 958 | int ret; |
935 | 959 | ||
@@ -938,7 +962,7 @@ static int trace_format_open(struct inode *inode, struct file *file) | |||
938 | return ret; | 962 | return ret; |
939 | 963 | ||
940 | m = file->private_data; | 964 | m = file->private_data; |
941 | m->private = call; | 965 | m->private = file; |
942 | 966 | ||
943 | return 0; | 967 | return 0; |
944 | } | 968 | } |
@@ -946,14 +970,18 @@ static int trace_format_open(struct inode *inode, struct file *file) | |||
946 | static ssize_t | 970 | static ssize_t |
947 | event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) | 971 | event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) |
948 | { | 972 | { |
949 | struct ftrace_event_call *call = filp->private_data; | 973 | int id = (long)event_file_data(filp); |
950 | char buf[32]; | 974 | char buf[32]; |
951 | int len; | 975 | int len; |
952 | 976 | ||
953 | if (*ppos) | 977 | if (*ppos) |
954 | return 0; | 978 | return 0; |
955 | 979 | ||
956 | len = sprintf(buf, "%d\n", call->event.type); | 980 | if (unlikely(!id)) |
981 | return -ENODEV; | ||
982 | |||
983 | len = sprintf(buf, "%d\n", id); | ||
984 | |||
957 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); | 985 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); |
958 | } | 986 | } |
959 | 987 | ||
@@ -961,21 +989,28 @@ static ssize_t | |||
961 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | 989 | event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, |
962 | loff_t *ppos) | 990 | loff_t *ppos) |
963 | { | 991 | { |
964 | struct ftrace_event_call *call = filp->private_data; | 992 | struct ftrace_event_call *call; |
965 | struct trace_seq *s; | 993 | struct trace_seq *s; |
966 | int r; | 994 | int r = -ENODEV; |
967 | 995 | ||
968 | if (*ppos) | 996 | if (*ppos) |
969 | return 0; | 997 | return 0; |
970 | 998 | ||
971 | s = kmalloc(sizeof(*s), GFP_KERNEL); | 999 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
1000 | |||
972 | if (!s) | 1001 | if (!s) |
973 | return -ENOMEM; | 1002 | return -ENOMEM; |
974 | 1003 | ||
975 | trace_seq_init(s); | 1004 | trace_seq_init(s); |
976 | 1005 | ||
977 | print_event_filter(call, s); | 1006 | mutex_lock(&event_mutex); |
978 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | 1007 | call = event_file_data(filp); |
1008 | if (call) | ||
1009 | print_event_filter(call, s); | ||
1010 | mutex_unlock(&event_mutex); | ||
1011 | |||
1012 | if (call) | ||
1013 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | ||
979 | 1014 | ||
980 | kfree(s); | 1015 | kfree(s); |
981 | 1016 | ||
@@ -986,9 +1021,9 @@ static ssize_t | |||
986 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | 1021 | event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, |
987 | loff_t *ppos) | 1022 | loff_t *ppos) |
988 | { | 1023 | { |
989 | struct ftrace_event_call *call = filp->private_data; | 1024 | struct ftrace_event_call *call; |
990 | char *buf; | 1025 | char *buf; |
991 | int err; | 1026 | int err = -ENODEV; |
992 | 1027 | ||
993 | if (cnt >= PAGE_SIZE) | 1028 | if (cnt >= PAGE_SIZE) |
994 | return -EINVAL; | 1029 | return -EINVAL; |
@@ -1003,7 +1038,12 @@ event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
1003 | } | 1038 | } |
1004 | buf[cnt] = '\0'; | 1039 | buf[cnt] = '\0'; |
1005 | 1040 | ||
1006 | err = apply_event_filter(call, buf); | 1041 | mutex_lock(&event_mutex); |
1042 | call = event_file_data(filp); | ||
1043 | if (call) | ||
1044 | err = apply_event_filter(call, buf); | ||
1045 | mutex_unlock(&event_mutex); | ||
1046 | |||
1007 | free_page((unsigned long) buf); | 1047 | free_page((unsigned long) buf); |
1008 | if (err < 0) | 1048 | if (err < 0) |
1009 | return err; | 1049 | return err; |
@@ -1225,10 +1265,9 @@ static const struct file_operations ftrace_set_event_fops = { | |||
1225 | }; | 1265 | }; |
1226 | 1266 | ||
1227 | static const struct file_operations ftrace_enable_fops = { | 1267 | static const struct file_operations ftrace_enable_fops = { |
1228 | .open = tracing_open_generic_file, | 1268 | .open = tracing_open_generic, |
1229 | .read = event_enable_read, | 1269 | .read = event_enable_read, |
1230 | .write = event_enable_write, | 1270 | .write = event_enable_write, |
1231 | .release = tracing_release_generic_file, | ||
1232 | .llseek = default_llseek, | 1271 | .llseek = default_llseek, |
1233 | }; | 1272 | }; |
1234 | 1273 | ||
@@ -1240,7 +1279,6 @@ static const struct file_operations ftrace_event_format_fops = { | |||
1240 | }; | 1279 | }; |
1241 | 1280 | ||
1242 | static const struct file_operations ftrace_event_id_fops = { | 1281 | static const struct file_operations ftrace_event_id_fops = { |
1243 | .open = tracing_open_generic, | ||
1244 | .read = event_id_read, | 1282 | .read = event_id_read, |
1245 | .llseek = default_llseek, | 1283 | .llseek = default_llseek, |
1246 | }; | 1284 | }; |
@@ -1488,8 +1526,8 @@ event_create_dir(struct dentry *parent, | |||
1488 | 1526 | ||
1489 | #ifdef CONFIG_PERF_EVENTS | 1527 | #ifdef CONFIG_PERF_EVENTS |
1490 | if (call->event.type && call->class->reg) | 1528 | if (call->event.type && call->class->reg) |
1491 | trace_create_file("id", 0444, file->dir, call, | 1529 | trace_create_file("id", 0444, file->dir, |
1492 | id); | 1530 | (void *)(long)call->event.type, id); |
1493 | #endif | 1531 | #endif |
1494 | 1532 | ||
1495 | /* | 1533 | /* |
@@ -1514,33 +1552,16 @@ event_create_dir(struct dentry *parent, | |||
1514 | return 0; | 1552 | return 0; |
1515 | } | 1553 | } |
1516 | 1554 | ||
1517 | static void remove_subsystem(struct ftrace_subsystem_dir *dir) | ||
1518 | { | ||
1519 | if (!dir) | ||
1520 | return; | ||
1521 | |||
1522 | if (!--dir->nr_events) { | ||
1523 | debugfs_remove_recursive(dir->entry); | ||
1524 | list_del(&dir->list); | ||
1525 | __put_system_dir(dir); | ||
1526 | } | ||
1527 | } | ||
1528 | |||
1529 | static void remove_event_from_tracers(struct ftrace_event_call *call) | 1555 | static void remove_event_from_tracers(struct ftrace_event_call *call) |
1530 | { | 1556 | { |
1531 | struct ftrace_event_file *file; | 1557 | struct ftrace_event_file *file; |
1532 | struct trace_array *tr; | 1558 | struct trace_array *tr; |
1533 | 1559 | ||
1534 | do_for_each_event_file_safe(tr, file) { | 1560 | do_for_each_event_file_safe(tr, file) { |
1535 | |||
1536 | if (file->event_call != call) | 1561 | if (file->event_call != call) |
1537 | continue; | 1562 | continue; |
1538 | 1563 | ||
1539 | list_del(&file->list); | 1564 | remove_event_file_dir(file); |
1540 | debugfs_remove_recursive(file->dir); | ||
1541 | remove_subsystem(file->system); | ||
1542 | kmem_cache_free(file_cachep, file); | ||
1543 | |||
1544 | /* | 1565 | /* |
1545 | * The do_for_each_event_file_safe() is | 1566 | * The do_for_each_event_file_safe() is |
1546 | * a double loop. After finding the call for this | 1567 | * a double loop. After finding the call for this |
@@ -1692,16 +1713,53 @@ static void __trace_remove_event_call(struct ftrace_event_call *call) | |||
1692 | destroy_preds(call); | 1713 | destroy_preds(call); |
1693 | } | 1714 | } |
1694 | 1715 | ||
1716 | static int probe_remove_event_call(struct ftrace_event_call *call) | ||
1717 | { | ||
1718 | struct trace_array *tr; | ||
1719 | struct ftrace_event_file *file; | ||
1720 | |||
1721 | #ifdef CONFIG_PERF_EVENTS | ||
1722 | if (call->perf_refcount) | ||
1723 | return -EBUSY; | ||
1724 | #endif | ||
1725 | do_for_each_event_file(tr, file) { | ||
1726 | if (file->event_call != call) | ||
1727 | continue; | ||
1728 | /* | ||
1729 | * We can't rely on ftrace_event_enable_disable(enable => 0) | ||
1730 | * we are going to do, FTRACE_EVENT_FL_SOFT_MODE can suppress | ||
1731 | * TRACE_REG_UNREGISTER. | ||
1732 | */ | ||
1733 | if (file->flags & FTRACE_EVENT_FL_ENABLED) | ||
1734 | return -EBUSY; | ||
1735 | /* | ||
1736 | * The do_for_each_event_file_safe() is | ||
1737 | * a double loop. After finding the call for this | ||
1738 | * trace_array, we use break to jump to the next | ||
1739 | * trace_array. | ||
1740 | */ | ||
1741 | break; | ||
1742 | } while_for_each_event_file(); | ||
1743 | |||
1744 | __trace_remove_event_call(call); | ||
1745 | |||
1746 | return 0; | ||
1747 | } | ||
1748 | |||
1695 | /* Remove an event_call */ | 1749 | /* Remove an event_call */ |
1696 | void trace_remove_event_call(struct ftrace_event_call *call) | 1750 | int trace_remove_event_call(struct ftrace_event_call *call) |
1697 | { | 1751 | { |
1752 | int ret; | ||
1753 | |||
1698 | mutex_lock(&trace_types_lock); | 1754 | mutex_lock(&trace_types_lock); |
1699 | mutex_lock(&event_mutex); | 1755 | mutex_lock(&event_mutex); |
1700 | down_write(&trace_event_sem); | 1756 | down_write(&trace_event_sem); |
1701 | __trace_remove_event_call(call); | 1757 | ret = probe_remove_event_call(call); |
1702 | up_write(&trace_event_sem); | 1758 | up_write(&trace_event_sem); |
1703 | mutex_unlock(&event_mutex); | 1759 | mutex_unlock(&event_mutex); |
1704 | mutex_unlock(&trace_types_lock); | 1760 | mutex_unlock(&trace_types_lock); |
1761 | |||
1762 | return ret; | ||
1705 | } | 1763 | } |
1706 | 1764 | ||
1707 | #define for_each_event(event, start, end) \ | 1765 | #define for_each_event(event, start, end) \ |
@@ -2270,12 +2328,8 @@ __trace_remove_event_dirs(struct trace_array *tr) | |||
2270 | { | 2328 | { |
2271 | struct ftrace_event_file *file, *next; | 2329 | struct ftrace_event_file *file, *next; |
2272 | 2330 | ||
2273 | list_for_each_entry_safe(file, next, &tr->events, list) { | 2331 | list_for_each_entry_safe(file, next, &tr->events, list) |
2274 | list_del(&file->list); | 2332 | remove_event_file_dir(file); |
2275 | debugfs_remove_recursive(file->dir); | ||
2276 | remove_subsystem(file->system); | ||
2277 | kmem_cache_free(file_cachep, file); | ||
2278 | } | ||
2279 | } | 2333 | } |
2280 | 2334 | ||
2281 | static void | 2335 | static void |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 0c7b75a8acc8..97daa8cf958d 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
@@ -637,17 +637,15 @@ static void append_filter_err(struct filter_parse_state *ps, | |||
637 | free_page((unsigned long) buf); | 637 | free_page((unsigned long) buf); |
638 | } | 638 | } |
639 | 639 | ||
640 | /* caller must hold event_mutex */ | ||
640 | void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) | 641 | void print_event_filter(struct ftrace_event_call *call, struct trace_seq *s) |
641 | { | 642 | { |
642 | struct event_filter *filter; | 643 | struct event_filter *filter = call->filter; |
643 | 644 | ||
644 | mutex_lock(&event_mutex); | ||
645 | filter = call->filter; | ||
646 | if (filter && filter->filter_string) | 645 | if (filter && filter->filter_string) |
647 | trace_seq_printf(s, "%s\n", filter->filter_string); | 646 | trace_seq_printf(s, "%s\n", filter->filter_string); |
648 | else | 647 | else |
649 | trace_seq_puts(s, "none\n"); | 648 | trace_seq_puts(s, "none\n"); |
650 | mutex_unlock(&event_mutex); | ||
651 | } | 649 | } |
652 | 650 | ||
653 | void print_subsystem_event_filter(struct event_subsystem *system, | 651 | void print_subsystem_event_filter(struct event_subsystem *system, |
@@ -1841,23 +1839,22 @@ static int create_system_filter(struct event_subsystem *system, | |||
1841 | return err; | 1839 | return err; |
1842 | } | 1840 | } |
1843 | 1841 | ||
1842 | /* caller must hold event_mutex */ | ||
1844 | int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | 1843 | int apply_event_filter(struct ftrace_event_call *call, char *filter_string) |
1845 | { | 1844 | { |
1846 | struct event_filter *filter; | 1845 | struct event_filter *filter; |
1847 | int err = 0; | 1846 | int err; |
1848 | |||
1849 | mutex_lock(&event_mutex); | ||
1850 | 1847 | ||
1851 | if (!strcmp(strstrip(filter_string), "0")) { | 1848 | if (!strcmp(strstrip(filter_string), "0")) { |
1852 | filter_disable(call); | 1849 | filter_disable(call); |
1853 | filter = call->filter; | 1850 | filter = call->filter; |
1854 | if (!filter) | 1851 | if (!filter) |
1855 | goto out_unlock; | 1852 | return 0; |
1856 | RCU_INIT_POINTER(call->filter, NULL); | 1853 | RCU_INIT_POINTER(call->filter, NULL); |
1857 | /* Make sure the filter is not being used */ | 1854 | /* Make sure the filter is not being used */ |
1858 | synchronize_sched(); | 1855 | synchronize_sched(); |
1859 | __free_filter(filter); | 1856 | __free_filter(filter); |
1860 | goto out_unlock; | 1857 | return 0; |
1861 | } | 1858 | } |
1862 | 1859 | ||
1863 | err = create_filter(call, filter_string, true, &filter); | 1860 | err = create_filter(call, filter_string, true, &filter); |
@@ -1884,8 +1881,6 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) | |||
1884 | __free_filter(tmp); | 1881 | __free_filter(tmp); |
1885 | } | 1882 | } |
1886 | } | 1883 | } |
1887 | out_unlock: | ||
1888 | mutex_unlock(&event_mutex); | ||
1889 | 1884 | ||
1890 | return err; | 1885 | return err; |
1891 | } | 1886 | } |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 3811487e7a7a..243f6834d026 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -95,7 +95,7 @@ static __kprobes bool trace_probe_is_on_module(struct trace_probe *tp) | |||
95 | } | 95 | } |
96 | 96 | ||
97 | static int register_probe_event(struct trace_probe *tp); | 97 | static int register_probe_event(struct trace_probe *tp); |
98 | static void unregister_probe_event(struct trace_probe *tp); | 98 | static int unregister_probe_event(struct trace_probe *tp); |
99 | 99 | ||
100 | static DEFINE_MUTEX(probe_lock); | 100 | static DEFINE_MUTEX(probe_lock); |
101 | static LIST_HEAD(probe_list); | 101 | static LIST_HEAD(probe_list); |
@@ -351,9 +351,12 @@ static int unregister_trace_probe(struct trace_probe *tp) | |||
351 | if (trace_probe_is_enabled(tp)) | 351 | if (trace_probe_is_enabled(tp)) |
352 | return -EBUSY; | 352 | return -EBUSY; |
353 | 353 | ||
354 | /* Will fail if probe is being used by ftrace or perf */ | ||
355 | if (unregister_probe_event(tp)) | ||
356 | return -EBUSY; | ||
357 | |||
354 | __unregister_trace_probe(tp); | 358 | __unregister_trace_probe(tp); |
355 | list_del(&tp->list); | 359 | list_del(&tp->list); |
356 | unregister_probe_event(tp); | ||
357 | 360 | ||
358 | return 0; | 361 | return 0; |
359 | } | 362 | } |
@@ -632,7 +635,9 @@ static int release_all_trace_probes(void) | |||
632 | /* TODO: Use batch unregistration */ | 635 | /* TODO: Use batch unregistration */ |
633 | while (!list_empty(&probe_list)) { | 636 | while (!list_empty(&probe_list)) { |
634 | tp = list_entry(probe_list.next, struct trace_probe, list); | 637 | tp = list_entry(probe_list.next, struct trace_probe, list); |
635 | unregister_trace_probe(tp); | 638 | ret = unregister_trace_probe(tp); |
639 | if (ret) | ||
640 | goto end; | ||
636 | free_trace_probe(tp); | 641 | free_trace_probe(tp); |
637 | } | 642 | } |
638 | 643 | ||
@@ -1247,11 +1252,15 @@ static int register_probe_event(struct trace_probe *tp) | |||
1247 | return ret; | 1252 | return ret; |
1248 | } | 1253 | } |
1249 | 1254 | ||
1250 | static void unregister_probe_event(struct trace_probe *tp) | 1255 | static int unregister_probe_event(struct trace_probe *tp) |
1251 | { | 1256 | { |
1257 | int ret; | ||
1258 | |||
1252 | /* tp->event is unregistered in trace_remove_event_call() */ | 1259 | /* tp->event is unregistered in trace_remove_event_call() */ |
1253 | trace_remove_event_call(&tp->call); | 1260 | ret = trace_remove_event_call(&tp->call); |
1254 | kfree(tp->call.print_fmt); | 1261 | if (!ret) |
1262 | kfree(tp->call.print_fmt); | ||
1263 | return ret; | ||
1255 | } | 1264 | } |
1256 | 1265 | ||
1257 | /* Make a debugfs interface for controlling probe points */ | 1266 | /* Make a debugfs interface for controlling probe points */ |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index a23d2d71188e..272261b5f94f 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -70,7 +70,7 @@ struct trace_uprobe { | |||
70 | (sizeof(struct probe_arg) * (n))) | 70 | (sizeof(struct probe_arg) * (n))) |
71 | 71 | ||
72 | static int register_uprobe_event(struct trace_uprobe *tu); | 72 | static int register_uprobe_event(struct trace_uprobe *tu); |
73 | static void unregister_uprobe_event(struct trace_uprobe *tu); | 73 | static int unregister_uprobe_event(struct trace_uprobe *tu); |
74 | 74 | ||
75 | static DEFINE_MUTEX(uprobe_lock); | 75 | static DEFINE_MUTEX(uprobe_lock); |
76 | static LIST_HEAD(uprobe_list); | 76 | static LIST_HEAD(uprobe_list); |
@@ -164,11 +164,17 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou | |||
164 | } | 164 | } |
165 | 165 | ||
166 | /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ | 166 | /* Unregister a trace_uprobe and probe_event: call with locking uprobe_lock */ |
167 | static void unregister_trace_uprobe(struct trace_uprobe *tu) | 167 | static int unregister_trace_uprobe(struct trace_uprobe *tu) |
168 | { | 168 | { |
169 | int ret; | ||
170 | |||
171 | ret = unregister_uprobe_event(tu); | ||
172 | if (ret) | ||
173 | return ret; | ||
174 | |||
169 | list_del(&tu->list); | 175 | list_del(&tu->list); |
170 | unregister_uprobe_event(tu); | ||
171 | free_trace_uprobe(tu); | 176 | free_trace_uprobe(tu); |
177 | return 0; | ||
172 | } | 178 | } |
173 | 179 | ||
174 | /* Register a trace_uprobe and probe_event */ | 180 | /* Register a trace_uprobe and probe_event */ |
@@ -181,9 +187,12 @@ static int register_trace_uprobe(struct trace_uprobe *tu) | |||
181 | 187 | ||
182 | /* register as an event */ | 188 | /* register as an event */ |
183 | old_tp = find_probe_event(tu->call.name, tu->call.class->system); | 189 | old_tp = find_probe_event(tu->call.name, tu->call.class->system); |
184 | if (old_tp) | 190 | if (old_tp) { |
185 | /* delete old event */ | 191 | /* delete old event */ |
186 | unregister_trace_uprobe(old_tp); | 192 | ret = unregister_trace_uprobe(old_tp); |
193 | if (ret) | ||
194 | goto end; | ||
195 | } | ||
187 | 196 | ||
188 | ret = register_uprobe_event(tu); | 197 | ret = register_uprobe_event(tu); |
189 | if (ret) { | 198 | if (ret) { |
@@ -256,6 +265,8 @@ static int create_trace_uprobe(int argc, char **argv) | |||
256 | group = UPROBE_EVENT_SYSTEM; | 265 | group = UPROBE_EVENT_SYSTEM; |
257 | 266 | ||
258 | if (is_delete) { | 267 | if (is_delete) { |
268 | int ret; | ||
269 | |||
259 | if (!event) { | 270 | if (!event) { |
260 | pr_info("Delete command needs an event name.\n"); | 271 | pr_info("Delete command needs an event name.\n"); |
261 | return -EINVAL; | 272 | return -EINVAL; |
@@ -269,9 +280,9 @@ static int create_trace_uprobe(int argc, char **argv) | |||
269 | return -ENOENT; | 280 | return -ENOENT; |
270 | } | 281 | } |
271 | /* delete an event */ | 282 | /* delete an event */ |
272 | unregister_trace_uprobe(tu); | 283 | ret = unregister_trace_uprobe(tu); |
273 | mutex_unlock(&uprobe_lock); | 284 | mutex_unlock(&uprobe_lock); |
274 | return 0; | 285 | return ret; |
275 | } | 286 | } |
276 | 287 | ||
277 | if (argc < 2) { | 288 | if (argc < 2) { |
@@ -408,16 +419,20 @@ fail_address_parse: | |||
408 | return ret; | 419 | return ret; |
409 | } | 420 | } |
410 | 421 | ||
411 | static void cleanup_all_probes(void) | 422 | static int cleanup_all_probes(void) |
412 | { | 423 | { |
413 | struct trace_uprobe *tu; | 424 | struct trace_uprobe *tu; |
425 | int ret = 0; | ||
414 | 426 | ||
415 | mutex_lock(&uprobe_lock); | 427 | mutex_lock(&uprobe_lock); |
416 | while (!list_empty(&uprobe_list)) { | 428 | while (!list_empty(&uprobe_list)) { |
417 | tu = list_entry(uprobe_list.next, struct trace_uprobe, list); | 429 | tu = list_entry(uprobe_list.next, struct trace_uprobe, list); |
418 | unregister_trace_uprobe(tu); | 430 | ret = unregister_trace_uprobe(tu); |
431 | if (ret) | ||
432 | break; | ||
419 | } | 433 | } |
420 | mutex_unlock(&uprobe_lock); | 434 | mutex_unlock(&uprobe_lock); |
435 | return ret; | ||
421 | } | 436 | } |
422 | 437 | ||
423 | /* Probes listing interfaces */ | 438 | /* Probes listing interfaces */ |
@@ -462,8 +477,13 @@ static const struct seq_operations probes_seq_op = { | |||
462 | 477 | ||
463 | static int probes_open(struct inode *inode, struct file *file) | 478 | static int probes_open(struct inode *inode, struct file *file) |
464 | { | 479 | { |
465 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) | 480 | int ret; |
466 | cleanup_all_probes(); | 481 | |
482 | if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) { | ||
483 | ret = cleanup_all_probes(); | ||
484 | if (ret) | ||
485 | return ret; | ||
486 | } | ||
467 | 487 | ||
468 | return seq_open(file, &probes_seq_op); | 488 | return seq_open(file, &probes_seq_op); |
469 | } | 489 | } |
@@ -968,12 +988,17 @@ static int register_uprobe_event(struct trace_uprobe *tu) | |||
968 | return ret; | 988 | return ret; |
969 | } | 989 | } |
970 | 990 | ||
971 | static void unregister_uprobe_event(struct trace_uprobe *tu) | 991 | static int unregister_uprobe_event(struct trace_uprobe *tu) |
972 | { | 992 | { |
993 | int ret; | ||
994 | |||
973 | /* tu->event is unregistered in trace_remove_event_call() */ | 995 | /* tu->event is unregistered in trace_remove_event_call() */ |
974 | trace_remove_event_call(&tu->call); | 996 | ret = trace_remove_event_call(&tu->call); |
997 | if (ret) | ||
998 | return ret; | ||
975 | kfree(tu->call.print_fmt); | 999 | kfree(tu->call.print_fmt); |
976 | tu->call.print_fmt = NULL; | 1000 | tu->call.print_fmt = NULL; |
1001 | return 0; | ||
977 | } | 1002 | } |
978 | 1003 | ||
979 | /* Make a trace interface for controling probe points */ | 1004 | /* Make a trace interface for controling probe points */ |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index d8c30db06c5b..9064b919a406 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -62,6 +62,9 @@ int create_user_ns(struct cred *new) | |||
62 | kgid_t group = new->egid; | 62 | kgid_t group = new->egid; |
63 | int ret; | 63 | int ret; |
64 | 64 | ||
65 | if (parent_ns->level > 32) | ||
66 | return -EUSERS; | ||
67 | |||
65 | /* | 68 | /* |
66 | * Verify that we can not violate the policy of which files | 69 | * Verify that we can not violate the policy of which files |
67 | * may be accessed that is specified by the root directory, | 70 | * may be accessed that is specified by the root directory, |
@@ -92,6 +95,7 @@ int create_user_ns(struct cred *new) | |||
92 | atomic_set(&ns->count, 1); | 95 | atomic_set(&ns->count, 1); |
93 | /* Leave the new->user_ns reference with the new user namespace. */ | 96 | /* Leave the new->user_ns reference with the new user namespace. */ |
94 | ns->parent = parent_ns; | 97 | ns->parent = parent_ns; |
98 | ns->level = parent_ns->level + 1; | ||
95 | ns->owner = owner; | 99 | ns->owner = owner; |
96 | ns->group = group; | 100 | ns->group = group; |
97 | 101 | ||
@@ -105,16 +109,21 @@ int create_user_ns(struct cred *new) | |||
105 | int unshare_userns(unsigned long unshare_flags, struct cred **new_cred) | 109 | int unshare_userns(unsigned long unshare_flags, struct cred **new_cred) |
106 | { | 110 | { |
107 | struct cred *cred; | 111 | struct cred *cred; |
112 | int err = -ENOMEM; | ||
108 | 113 | ||
109 | if (!(unshare_flags & CLONE_NEWUSER)) | 114 | if (!(unshare_flags & CLONE_NEWUSER)) |
110 | return 0; | 115 | return 0; |
111 | 116 | ||
112 | cred = prepare_creds(); | 117 | cred = prepare_creds(); |
113 | if (!cred) | 118 | if (cred) { |
114 | return -ENOMEM; | 119 | err = create_user_ns(cred); |
120 | if (err) | ||
121 | put_cred(cred); | ||
122 | else | ||
123 | *new_cred = cred; | ||
124 | } | ||
115 | 125 | ||
116 | *new_cred = cred; | 126 | return err; |
117 | return create_user_ns(cred); | ||
118 | } | 127 | } |
119 | 128 | ||
120 | void free_user_ns(struct user_namespace *ns) | 129 | void free_user_ns(struct user_namespace *ns) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0b72e816b8d0..7f5d4be22034 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2817,6 +2817,19 @@ already_gone: | |||
2817 | return false; | 2817 | return false; |
2818 | } | 2818 | } |
2819 | 2819 | ||
2820 | static bool __flush_work(struct work_struct *work) | ||
2821 | { | ||
2822 | struct wq_barrier barr; | ||
2823 | |||
2824 | if (start_flush_work(work, &barr)) { | ||
2825 | wait_for_completion(&barr.done); | ||
2826 | destroy_work_on_stack(&barr.work); | ||
2827 | return true; | ||
2828 | } else { | ||
2829 | return false; | ||
2830 | } | ||
2831 | } | ||
2832 | |||
2820 | /** | 2833 | /** |
2821 | * flush_work - wait for a work to finish executing the last queueing instance | 2834 | * flush_work - wait for a work to finish executing the last queueing instance |
2822 | * @work: the work to flush | 2835 | * @work: the work to flush |
@@ -2830,18 +2843,10 @@ already_gone: | |||
2830 | */ | 2843 | */ |
2831 | bool flush_work(struct work_struct *work) | 2844 | bool flush_work(struct work_struct *work) |
2832 | { | 2845 | { |
2833 | struct wq_barrier barr; | ||
2834 | |||
2835 | lock_map_acquire(&work->lockdep_map); | 2846 | lock_map_acquire(&work->lockdep_map); |
2836 | lock_map_release(&work->lockdep_map); | 2847 | lock_map_release(&work->lockdep_map); |
2837 | 2848 | ||
2838 | if (start_flush_work(work, &barr)) { | 2849 | return __flush_work(work); |
2839 | wait_for_completion(&barr.done); | ||
2840 | destroy_work_on_stack(&barr.work); | ||
2841 | return true; | ||
2842 | } else { | ||
2843 | return false; | ||
2844 | } | ||
2845 | } | 2850 | } |
2846 | EXPORT_SYMBOL_GPL(flush_work); | 2851 | EXPORT_SYMBOL_GPL(flush_work); |
2847 | 2852 | ||
@@ -3411,6 +3416,12 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to, | |||
3411 | { | 3416 | { |
3412 | to->nice = from->nice; | 3417 | to->nice = from->nice; |
3413 | cpumask_copy(to->cpumask, from->cpumask); | 3418 | cpumask_copy(to->cpumask, from->cpumask); |
3419 | /* | ||
3420 | * Unlike hash and equality test, this function doesn't ignore | ||
3421 | * ->no_numa as it is used for both pool and wq attrs. Instead, | ||
3422 | * get_unbound_pool() explicitly clears ->no_numa after copying. | ||
3423 | */ | ||
3424 | to->no_numa = from->no_numa; | ||
3414 | } | 3425 | } |
3415 | 3426 | ||
3416 | /* hash value of the content of @attr */ | 3427 | /* hash value of the content of @attr */ |
@@ -3578,6 +3589,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) | |||
3578 | lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ | 3589 | lockdep_set_subclass(&pool->lock, 1); /* see put_pwq() */ |
3579 | copy_workqueue_attrs(pool->attrs, attrs); | 3590 | copy_workqueue_attrs(pool->attrs, attrs); |
3580 | 3591 | ||
3592 | /* | ||
3593 | * no_numa isn't a worker_pool attribute, always clear it. See | ||
3594 | * 'struct workqueue_attrs' comments for detail. | ||
3595 | */ | ||
3596 | pool->attrs->no_numa = false; | ||
3597 | |||
3581 | /* if cpumask is contained inside a NUMA node, we belong to that node */ | 3598 | /* if cpumask is contained inside a NUMA node, we belong to that node */ |
3582 | if (wq_numa_enabled) { | 3599 | if (wq_numa_enabled) { |
3583 | for_each_node(node) { | 3600 | for_each_node(node) { |
@@ -4756,7 +4773,14 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | |||
4756 | 4773 | ||
4757 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); | 4774 | INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn); |
4758 | schedule_work_on(cpu, &wfc.work); | 4775 | schedule_work_on(cpu, &wfc.work); |
4759 | flush_work(&wfc.work); | 4776 | |
4777 | /* | ||
4778 | * The work item is on-stack and can't lead to deadlock through | ||
4779 | * flushing. Use __flush_work() to avoid spurious lockdep warnings | ||
4780 | * when work_on_cpu()s are nested. | ||
4781 | */ | ||
4782 | __flush_work(&wfc.work); | ||
4783 | |||
4760 | return wfc.ret; | 4784 | return wfc.ret; |
4761 | } | 4785 | } |
4762 | EXPORT_SYMBOL_GPL(work_on_cpu); | 4786 | EXPORT_SYMBOL_GPL(work_on_cpu); |
diff --git a/mm/fremap.c b/mm/fremap.c index 87da3590c61e..5bff08147768 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -57,17 +57,22 @@ static int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, | |||
57 | unsigned long addr, unsigned long pgoff, pgprot_t prot) | 57 | unsigned long addr, unsigned long pgoff, pgprot_t prot) |
58 | { | 58 | { |
59 | int err = -ENOMEM; | 59 | int err = -ENOMEM; |
60 | pte_t *pte; | 60 | pte_t *pte, ptfile; |
61 | spinlock_t *ptl; | 61 | spinlock_t *ptl; |
62 | 62 | ||
63 | pte = get_locked_pte(mm, addr, &ptl); | 63 | pte = get_locked_pte(mm, addr, &ptl); |
64 | if (!pte) | 64 | if (!pte) |
65 | goto out; | 65 | goto out; |
66 | 66 | ||
67 | if (!pte_none(*pte)) | 67 | ptfile = pgoff_to_pte(pgoff); |
68 | |||
69 | if (!pte_none(*pte)) { | ||
70 | if (pte_present(*pte) && pte_soft_dirty(*pte)) | ||
71 | pte_file_mksoft_dirty(ptfile); | ||
68 | zap_pte(mm, vma, addr, pte); | 72 | zap_pte(mm, vma, addr, pte); |
73 | } | ||
69 | 74 | ||
70 | set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff)); | 75 | set_pte_at(mm, addr, pte, ptfile); |
71 | /* | 76 | /* |
72 | * We don't need to run update_mmu_cache() here because the "file pte" | 77 | * We don't need to run update_mmu_cache() here because the "file pte" |
73 | * being installed by install_file_pte() is not a real pte - it's a | 78 | * being installed by install_file_pte() is not a real pte - it's a |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 83aff0a4d093..b60f33080a28 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2490,7 +2490,7 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, | |||
2490 | 2490 | ||
2491 | mm = vma->vm_mm; | 2491 | mm = vma->vm_mm; |
2492 | 2492 | ||
2493 | tlb_gather_mmu(&tlb, mm, 0); | 2493 | tlb_gather_mmu(&tlb, mm, start, end); |
2494 | __unmap_hugepage_range(&tlb, vma, start, end, ref_page); | 2494 | __unmap_hugepage_range(&tlb, vma, start, end, ref_page); |
2495 | tlb_finish_mmu(&tlb, start, end); | 2495 | tlb_finish_mmu(&tlb, start, end); |
2496 | } | 2496 | } |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c290a1cf3862..c5792a5d87ce 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -3195,11 +3195,11 @@ int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s, | |||
3195 | if (!s->memcg_params) | 3195 | if (!s->memcg_params) |
3196 | return -ENOMEM; | 3196 | return -ENOMEM; |
3197 | 3197 | ||
3198 | INIT_WORK(&s->memcg_params->destroy, | ||
3199 | kmem_cache_destroy_work_func); | ||
3200 | if (memcg) { | 3198 | if (memcg) { |
3201 | s->memcg_params->memcg = memcg; | 3199 | s->memcg_params->memcg = memcg; |
3202 | s->memcg_params->root_cache = root_cache; | 3200 | s->memcg_params->root_cache = root_cache; |
3201 | INIT_WORK(&s->memcg_params->destroy, | ||
3202 | kmem_cache_destroy_work_func); | ||
3203 | } else | 3203 | } else |
3204 | s->memcg_params->is_root_cache = true; | 3204 | s->memcg_params->is_root_cache = true; |
3205 | 3205 | ||
diff --git a/mm/memory.c b/mm/memory.c index 1ce2e2a734fc..af84bc0ec17c 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -209,14 +209,15 @@ static int tlb_next_batch(struct mmu_gather *tlb) | |||
209 | * tear-down from @mm. The @fullmm argument is used when @mm is without | 209 | * tear-down from @mm. The @fullmm argument is used when @mm is without |
210 | * users and we're going to destroy the full address space (exit/execve). | 210 | * users and we're going to destroy the full address space (exit/execve). |
211 | */ | 211 | */ |
212 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm) | 212 | void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long start, unsigned long end) |
213 | { | 213 | { |
214 | tlb->mm = mm; | 214 | tlb->mm = mm; |
215 | 215 | ||
216 | tlb->fullmm = fullmm; | 216 | /* Is it from 0 to ~0? */ |
217 | tlb->fullmm = !(start | (end+1)); | ||
217 | tlb->need_flush_all = 0; | 218 | tlb->need_flush_all = 0; |
218 | tlb->start = -1UL; | 219 | tlb->start = start; |
219 | tlb->end = 0; | 220 | tlb->end = end; |
220 | tlb->need_flush = 0; | 221 | tlb->need_flush = 0; |
221 | tlb->local.next = NULL; | 222 | tlb->local.next = NULL; |
222 | tlb->local.nr = 0; | 223 | tlb->local.nr = 0; |
@@ -256,8 +257,6 @@ void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long e | |||
256 | { | 257 | { |
257 | struct mmu_gather_batch *batch, *next; | 258 | struct mmu_gather_batch *batch, *next; |
258 | 259 | ||
259 | tlb->start = start; | ||
260 | tlb->end = end; | ||
261 | tlb_flush_mmu(tlb); | 260 | tlb_flush_mmu(tlb); |
262 | 261 | ||
263 | /* keep the page table cache within bounds */ | 262 | /* keep the page table cache within bounds */ |
@@ -1099,7 +1098,6 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, | |||
1099 | spinlock_t *ptl; | 1098 | spinlock_t *ptl; |
1100 | pte_t *start_pte; | 1099 | pte_t *start_pte; |
1101 | pte_t *pte; | 1100 | pte_t *pte; |
1102 | unsigned long range_start = addr; | ||
1103 | 1101 | ||
1104 | again: | 1102 | again: |
1105 | init_rss_vec(rss); | 1103 | init_rss_vec(rss); |
@@ -1141,9 +1139,12 @@ again: | |||
1141 | continue; | 1139 | continue; |
1142 | if (unlikely(details) && details->nonlinear_vma | 1140 | if (unlikely(details) && details->nonlinear_vma |
1143 | && linear_page_index(details->nonlinear_vma, | 1141 | && linear_page_index(details->nonlinear_vma, |
1144 | addr) != page->index) | 1142 | addr) != page->index) { |
1145 | set_pte_at(mm, addr, pte, | 1143 | pte_t ptfile = pgoff_to_pte(page->index); |
1146 | pgoff_to_pte(page->index)); | 1144 | if (pte_soft_dirty(ptent)) |
1145 | pte_file_mksoft_dirty(ptfile); | ||
1146 | set_pte_at(mm, addr, pte, ptfile); | ||
1147 | } | ||
1147 | if (PageAnon(page)) | 1148 | if (PageAnon(page)) |
1148 | rss[MM_ANONPAGES]--; | 1149 | rss[MM_ANONPAGES]--; |
1149 | else { | 1150 | else { |
@@ -1202,17 +1203,25 @@ again: | |||
1202 | * and page-free while holding it. | 1203 | * and page-free while holding it. |
1203 | */ | 1204 | */ |
1204 | if (force_flush) { | 1205 | if (force_flush) { |
1206 | unsigned long old_end; | ||
1207 | |||
1205 | force_flush = 0; | 1208 | force_flush = 0; |
1206 | 1209 | ||
1207 | #ifdef HAVE_GENERIC_MMU_GATHER | 1210 | /* |
1208 | tlb->start = range_start; | 1211 | * Flush the TLB just for the previous segment, |
1212 | * then update the range to be the remaining | ||
1213 | * TLB range. | ||
1214 | */ | ||
1215 | old_end = tlb->end; | ||
1209 | tlb->end = addr; | 1216 | tlb->end = addr; |
1210 | #endif | 1217 | |
1211 | tlb_flush_mmu(tlb); | 1218 | tlb_flush_mmu(tlb); |
1212 | if (addr != end) { | 1219 | |
1213 | range_start = addr; | 1220 | tlb->start = addr; |
1221 | tlb->end = old_end; | ||
1222 | |||
1223 | if (addr != end) | ||
1214 | goto again; | 1224 | goto again; |
1215 | } | ||
1216 | } | 1225 | } |
1217 | 1226 | ||
1218 | return addr; | 1227 | return addr; |
@@ -1397,7 +1406,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start, | |||
1397 | unsigned long end = start + size; | 1406 | unsigned long end = start + size; |
1398 | 1407 | ||
1399 | lru_add_drain(); | 1408 | lru_add_drain(); |
1400 | tlb_gather_mmu(&tlb, mm, 0); | 1409 | tlb_gather_mmu(&tlb, mm, start, end); |
1401 | update_hiwater_rss(mm); | 1410 | update_hiwater_rss(mm); |
1402 | mmu_notifier_invalidate_range_start(mm, start, end); | 1411 | mmu_notifier_invalidate_range_start(mm, start, end); |
1403 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) | 1412 | for ( ; vma && vma->vm_start < end; vma = vma->vm_next) |
@@ -1423,7 +1432,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr | |||
1423 | unsigned long end = address + size; | 1432 | unsigned long end = address + size; |
1424 | 1433 | ||
1425 | lru_add_drain(); | 1434 | lru_add_drain(); |
1426 | tlb_gather_mmu(&tlb, mm, 0); | 1435 | tlb_gather_mmu(&tlb, mm, address, end); |
1427 | update_hiwater_rss(mm); | 1436 | update_hiwater_rss(mm); |
1428 | mmu_notifier_invalidate_range_start(mm, address, end); | 1437 | mmu_notifier_invalidate_range_start(mm, address, end); |
1429 | unmap_single_vma(&tlb, vma, address, end, details); | 1438 | unmap_single_vma(&tlb, vma, address, end, details); |
@@ -3115,6 +3124,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3115 | exclusive = 1; | 3124 | exclusive = 1; |
3116 | } | 3125 | } |
3117 | flush_icache_page(vma, page); | 3126 | flush_icache_page(vma, page); |
3127 | if (pte_swp_soft_dirty(orig_pte)) | ||
3128 | pte = pte_mksoft_dirty(pte); | ||
3118 | set_pte_at(mm, address, page_table, pte); | 3129 | set_pte_at(mm, address, page_table, pte); |
3119 | if (page == swapcache) | 3130 | if (page == swapcache) |
3120 | do_page_add_anon_rmap(page, vma, address, exclusive); | 3131 | do_page_add_anon_rmap(page, vma, address, exclusive); |
@@ -3408,6 +3419,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3408 | entry = mk_pte(page, vma->vm_page_prot); | 3419 | entry = mk_pte(page, vma->vm_page_prot); |
3409 | if (flags & FAULT_FLAG_WRITE) | 3420 | if (flags & FAULT_FLAG_WRITE) |
3410 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | 3421 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); |
3422 | else if (pte_file(orig_pte) && pte_file_soft_dirty(orig_pte)) | ||
3423 | pte_mksoft_dirty(entry); | ||
3411 | if (anon) { | 3424 | if (anon) { |
3412 | inc_mm_counter_fast(mm, MM_ANONPAGES); | 3425 | inc_mm_counter_fast(mm, MM_ANONPAGES); |
3413 | page_add_new_anon_rmap(page, vma, address); | 3426 | page_add_new_anon_rmap(page, vma, address); |
@@ -2336,7 +2336,7 @@ static void unmap_region(struct mm_struct *mm, | |||
2336 | struct mmu_gather tlb; | 2336 | struct mmu_gather tlb; |
2337 | 2337 | ||
2338 | lru_add_drain(); | 2338 | lru_add_drain(); |
2339 | tlb_gather_mmu(&tlb, mm, 0); | 2339 | tlb_gather_mmu(&tlb, mm, start, end); |
2340 | update_hiwater_rss(mm); | 2340 | update_hiwater_rss(mm); |
2341 | unmap_vmas(&tlb, vma, start, end); | 2341 | unmap_vmas(&tlb, vma, start, end); |
2342 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, | 2342 | free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, |
@@ -2709,7 +2709,7 @@ void exit_mmap(struct mm_struct *mm) | |||
2709 | 2709 | ||
2710 | lru_add_drain(); | 2710 | lru_add_drain(); |
2711 | flush_cache_mm(mm); | 2711 | flush_cache_mm(mm); |
2712 | tlb_gather_mmu(&tlb, mm, 1); | 2712 | tlb_gather_mmu(&tlb, mm, 0, -1); |
2713 | /* update_hiwater_rss(mm) here? but nobody should be looking */ | 2713 | /* update_hiwater_rss(mm) here? but nobody should be looking */ |
2714 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 2714 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
2715 | unmap_vmas(&tlb, vma, 0, -1); | 2715 | unmap_vmas(&tlb, vma, 0, -1); |
@@ -1236,6 +1236,7 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1236 | swp_entry_to_pte(make_hwpoison_entry(page))); | 1236 | swp_entry_to_pte(make_hwpoison_entry(page))); |
1237 | } else if (PageAnon(page)) { | 1237 | } else if (PageAnon(page)) { |
1238 | swp_entry_t entry = { .val = page_private(page) }; | 1238 | swp_entry_t entry = { .val = page_private(page) }; |
1239 | pte_t swp_pte; | ||
1239 | 1240 | ||
1240 | if (PageSwapCache(page)) { | 1241 | if (PageSwapCache(page)) { |
1241 | /* | 1242 | /* |
@@ -1264,7 +1265,10 @@ int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, | |||
1264 | BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); | 1265 | BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION); |
1265 | entry = make_migration_entry(page, pte_write(pteval)); | 1266 | entry = make_migration_entry(page, pte_write(pteval)); |
1266 | } | 1267 | } |
1267 | set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); | 1268 | swp_pte = swp_entry_to_pte(entry); |
1269 | if (pte_soft_dirty(pteval)) | ||
1270 | swp_pte = pte_swp_mksoft_dirty(swp_pte); | ||
1271 | set_pte_at(mm, address, pte, swp_pte); | ||
1268 | BUG_ON(pte_file(*pte)); | 1272 | BUG_ON(pte_file(*pte)); |
1269 | } else if (IS_ENABLED(CONFIG_MIGRATION) && | 1273 | } else if (IS_ENABLED(CONFIG_MIGRATION) && |
1270 | (TTU_ACTION(flags) == TTU_MIGRATION)) { | 1274 | (TTU_ACTION(flags) == TTU_MIGRATION)) { |
@@ -1401,8 +1405,12 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount, | |||
1401 | pteval = ptep_clear_flush(vma, address, pte); | 1405 | pteval = ptep_clear_flush(vma, address, pte); |
1402 | 1406 | ||
1403 | /* If nonlinear, store the file page offset in the pte. */ | 1407 | /* If nonlinear, store the file page offset in the pte. */ |
1404 | if (page->index != linear_page_index(vma, address)) | 1408 | if (page->index != linear_page_index(vma, address)) { |
1405 | set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); | 1409 | pte_t ptfile = pgoff_to_pte(page->index); |
1410 | if (pte_soft_dirty(pteval)) | ||
1411 | pte_file_mksoft_dirty(ptfile); | ||
1412 | set_pte_at(mm, address, pte, ptfile); | ||
1413 | } | ||
1406 | 1414 | ||
1407 | /* Move the dirty bit to the physical page now the pte is gone. */ | 1415 | /* Move the dirty bit to the physical page now the pte is gone. */ |
1408 | if (pte_dirty(pteval)) | 1416 | if (pte_dirty(pteval)) |
@@ -1968,9 +1968,6 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1968 | int pages; | 1968 | int pages; |
1969 | int pobjects; | 1969 | int pobjects; |
1970 | 1970 | ||
1971 | if (!s->cpu_partial) | ||
1972 | return; | ||
1973 | |||
1974 | do { | 1971 | do { |
1975 | pages = 0; | 1972 | pages = 0; |
1976 | pobjects = 0; | 1973 | pobjects = 0; |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 36af6eeaa67e..6cf2e60983b7 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -866,6 +866,21 @@ unsigned int count_swap_pages(int type, int free) | |||
866 | } | 866 | } |
867 | #endif /* CONFIG_HIBERNATION */ | 867 | #endif /* CONFIG_HIBERNATION */ |
868 | 868 | ||
869 | static inline int maybe_same_pte(pte_t pte, pte_t swp_pte) | ||
870 | { | ||
871 | #ifdef CONFIG_MEM_SOFT_DIRTY | ||
872 | /* | ||
873 | * When pte keeps soft dirty bit the pte generated | ||
874 | * from swap entry does not has it, still it's same | ||
875 | * pte from logical point of view. | ||
876 | */ | ||
877 | pte_t swp_pte_dirty = pte_swp_mksoft_dirty(swp_pte); | ||
878 | return pte_same(pte, swp_pte) || pte_same(pte, swp_pte_dirty); | ||
879 | #else | ||
880 | return pte_same(pte, swp_pte); | ||
881 | #endif | ||
882 | } | ||
883 | |||
869 | /* | 884 | /* |
870 | * No need to decide whether this PTE shares the swap entry with others, | 885 | * No need to decide whether this PTE shares the swap entry with others, |
871 | * just let do_wp_page work it out if a write is requested later - to | 886 | * just let do_wp_page work it out if a write is requested later - to |
@@ -892,7 +907,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, | |||
892 | } | 907 | } |
893 | 908 | ||
894 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); | 909 | pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); |
895 | if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { | 910 | if (unlikely(!maybe_same_pte(*pte, swp_entry_to_pte(entry)))) { |
896 | mem_cgroup_cancel_charge_swapin(memcg); | 911 | mem_cgroup_cancel_charge_swapin(memcg); |
897 | ret = 0; | 912 | ret = 0; |
898 | goto out; | 913 | goto out; |
@@ -947,7 +962,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, | |||
947 | * swapoff spends a _lot_ of time in this loop! | 962 | * swapoff spends a _lot_ of time in this loop! |
948 | * Test inline before going to call unuse_pte. | 963 | * Test inline before going to call unuse_pte. |
949 | */ | 964 | */ |
950 | if (unlikely(pte_same(*pte, swp_pte))) { | 965 | if (unlikely(maybe_same_pte(*pte, swp_pte))) { |
951 | pte_unmap(pte); | 966 | pte_unmap(pte); |
952 | ret = unuse_pte(vma, pmd, addr, entry, page); | 967 | ret = unuse_pte(vma, pmd, addr, entry, page); |
953 | if (ret) | 968 | if (ret) |
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c index 4a78c4de9f20..6ee48aac776f 100644 --- a/net/8021q/vlan_core.c +++ b/net/8021q/vlan_core.c | |||
@@ -91,7 +91,12 @@ EXPORT_SYMBOL(__vlan_find_dev_deep); | |||
91 | 91 | ||
92 | struct net_device *vlan_dev_real_dev(const struct net_device *dev) | 92 | struct net_device *vlan_dev_real_dev(const struct net_device *dev) |
93 | { | 93 | { |
94 | return vlan_dev_priv(dev)->real_dev; | 94 | struct net_device *ret = vlan_dev_priv(dev)->real_dev; |
95 | |||
96 | while (is_vlan_dev(ret)) | ||
97 | ret = vlan_dev_priv(ret)->real_dev; | ||
98 | |||
99 | return ret; | ||
95 | } | 100 | } |
96 | EXPORT_SYMBOL(vlan_dev_real_dev); | 101 | EXPORT_SYMBOL(vlan_dev_real_dev); |
97 | 102 | ||
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index e14531f1ce1c..264de88db320 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c | |||
@@ -1529,6 +1529,8 @@ out: | |||
1529 | * in these cases, the skb is further handled by this function and | 1529 | * in these cases, the skb is further handled by this function and |
1530 | * returns 1, otherwise it returns 0 and the caller shall further | 1530 | * returns 1, otherwise it returns 0 and the caller shall further |
1531 | * process the skb. | 1531 | * process the skb. |
1532 | * | ||
1533 | * This call might reallocate skb data. | ||
1532 | */ | 1534 | */ |
1533 | int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, | 1535 | int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, |
1534 | unsigned short vid) | 1536 | unsigned short vid) |
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c index f105219f4a4b..7614af31daff 100644 --- a/net/batman-adv/gateway_client.c +++ b/net/batman-adv/gateway_client.c | |||
@@ -508,6 +508,7 @@ out: | |||
508 | return 0; | 508 | return 0; |
509 | } | 509 | } |
510 | 510 | ||
511 | /* this call might reallocate skb data */ | ||
511 | static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) | 512 | static bool batadv_is_type_dhcprequest(struct sk_buff *skb, int header_len) |
512 | { | 513 | { |
513 | int ret = false; | 514 | int ret = false; |
@@ -568,6 +569,7 @@ out: | |||
568 | return ret; | 569 | return ret; |
569 | } | 570 | } |
570 | 571 | ||
572 | /* this call might reallocate skb data */ | ||
571 | bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) | 573 | bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) |
572 | { | 574 | { |
573 | struct ethhdr *ethhdr; | 575 | struct ethhdr *ethhdr; |
@@ -619,6 +621,12 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) | |||
619 | 621 | ||
620 | if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) | 622 | if (!pskb_may_pull(skb, *header_len + sizeof(*udphdr))) |
621 | return false; | 623 | return false; |
624 | |||
625 | /* skb->data might have been reallocated by pskb_may_pull() */ | ||
626 | ethhdr = (struct ethhdr *)skb->data; | ||
627 | if (ntohs(ethhdr->h_proto) == ETH_P_8021Q) | ||
628 | ethhdr = (struct ethhdr *)(skb->data + VLAN_HLEN); | ||
629 | |||
622 | udphdr = (struct udphdr *)(skb->data + *header_len); | 630 | udphdr = (struct udphdr *)(skb->data + *header_len); |
623 | *header_len += sizeof(*udphdr); | 631 | *header_len += sizeof(*udphdr); |
624 | 632 | ||
@@ -634,12 +642,14 @@ bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len) | |||
634 | return true; | 642 | return true; |
635 | } | 643 | } |
636 | 644 | ||
645 | /* this call might reallocate skb data */ | ||
637 | bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, | 646 | bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, |
638 | struct sk_buff *skb, struct ethhdr *ethhdr) | 647 | struct sk_buff *skb) |
639 | { | 648 | { |
640 | struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; | 649 | struct batadv_neigh_node *neigh_curr = NULL, *neigh_old = NULL; |
641 | struct batadv_orig_node *orig_dst_node = NULL; | 650 | struct batadv_orig_node *orig_dst_node = NULL; |
642 | struct batadv_gw_node *curr_gw = NULL; | 651 | struct batadv_gw_node *curr_gw = NULL; |
652 | struct ethhdr *ethhdr; | ||
643 | bool ret, out_of_range = false; | 653 | bool ret, out_of_range = false; |
644 | unsigned int header_len = 0; | 654 | unsigned int header_len = 0; |
645 | uint8_t curr_tq_avg; | 655 | uint8_t curr_tq_avg; |
@@ -648,6 +658,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, | |||
648 | if (!ret) | 658 | if (!ret) |
649 | goto out; | 659 | goto out; |
650 | 660 | ||
661 | ethhdr = (struct ethhdr *)skb->data; | ||
651 | orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, | 662 | orig_dst_node = batadv_transtable_search(bat_priv, ethhdr->h_source, |
652 | ethhdr->h_dest); | 663 | ethhdr->h_dest); |
653 | if (!orig_dst_node) | 664 | if (!orig_dst_node) |
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h index 039902dca4a6..1037d75da51f 100644 --- a/net/batman-adv/gateway_client.h +++ b/net/batman-adv/gateway_client.h | |||
@@ -34,7 +34,6 @@ void batadv_gw_node_delete(struct batadv_priv *bat_priv, | |||
34 | void batadv_gw_node_purge(struct batadv_priv *bat_priv); | 34 | void batadv_gw_node_purge(struct batadv_priv *bat_priv); |
35 | int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); | 35 | int batadv_gw_client_seq_print_text(struct seq_file *seq, void *offset); |
36 | bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); | 36 | bool batadv_gw_is_dhcp_target(struct sk_buff *skb, unsigned int *header_len); |
37 | bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, | 37 | bool batadv_gw_out_of_range(struct batadv_priv *bat_priv, struct sk_buff *skb); |
38 | struct sk_buff *skb, struct ethhdr *ethhdr); | ||
39 | 38 | ||
40 | #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ | 39 | #endif /* _NET_BATMAN_ADV_GATEWAY_CLIENT_H_ */ |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 700d0b49742d..0f04e1c302b4 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -180,6 +180,9 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
180 | if (batadv_bla_tx(bat_priv, skb, vid)) | 180 | if (batadv_bla_tx(bat_priv, skb, vid)) |
181 | goto dropped; | 181 | goto dropped; |
182 | 182 | ||
183 | /* skb->data might have been reallocated by batadv_bla_tx() */ | ||
184 | ethhdr = (struct ethhdr *)skb->data; | ||
185 | |||
183 | /* Register the client MAC in the transtable */ | 186 | /* Register the client MAC in the transtable */ |
184 | if (!is_multicast_ether_addr(ethhdr->h_source)) | 187 | if (!is_multicast_ether_addr(ethhdr->h_source)) |
185 | batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); | 188 | batadv_tt_local_add(soft_iface, ethhdr->h_source, skb->skb_iif); |
@@ -220,6 +223,10 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
220 | default: | 223 | default: |
221 | break; | 224 | break; |
222 | } | 225 | } |
226 | |||
227 | /* reminder: ethhdr might have become unusable from here on | ||
228 | * (batadv_gw_is_dhcp_target() might have reallocated skb data) | ||
229 | */ | ||
223 | } | 230 | } |
224 | 231 | ||
225 | /* ethernet packet should be broadcasted */ | 232 | /* ethernet packet should be broadcasted */ |
@@ -266,7 +273,7 @@ static int batadv_interface_tx(struct sk_buff *skb, | |||
266 | /* unicast packet */ | 273 | /* unicast packet */ |
267 | } else { | 274 | } else { |
268 | if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { | 275 | if (atomic_read(&bat_priv->gw_mode) != BATADV_GW_MODE_OFF) { |
269 | ret = batadv_gw_out_of_range(bat_priv, skb, ethhdr); | 276 | ret = batadv_gw_out_of_range(bat_priv, skb); |
270 | if (ret) | 277 | if (ret) |
271 | goto dropped; | 278 | goto dropped; |
272 | } | 279 | } |
diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c index dc8b5d4dd636..688a0419756b 100644 --- a/net/batman-adv/unicast.c +++ b/net/batman-adv/unicast.c | |||
@@ -326,7 +326,9 @@ static bool batadv_unicast_push_and_fill_skb(struct sk_buff *skb, int hdr_size, | |||
326 | * @skb: the skb containing the payload to encapsulate | 326 | * @skb: the skb containing the payload to encapsulate |
327 | * @orig_node: the destination node | 327 | * @orig_node: the destination node |
328 | * | 328 | * |
329 | * Returns false if the payload could not be encapsulated or true otherwise | 329 | * Returns false if the payload could not be encapsulated or true otherwise. |
330 | * | ||
331 | * This call might reallocate skb data. | ||
330 | */ | 332 | */ |
331 | static bool batadv_unicast_prepare_skb(struct sk_buff *skb, | 333 | static bool batadv_unicast_prepare_skb(struct sk_buff *skb, |
332 | struct batadv_orig_node *orig_node) | 334 | struct batadv_orig_node *orig_node) |
@@ -343,7 +345,9 @@ static bool batadv_unicast_prepare_skb(struct sk_buff *skb, | |||
343 | * @orig_node: the destination node | 345 | * @orig_node: the destination node |
344 | * @packet_subtype: the batman 4addr packet subtype to use | 346 | * @packet_subtype: the batman 4addr packet subtype to use |
345 | * | 347 | * |
346 | * Returns false if the payload could not be encapsulated or true otherwise | 348 | * Returns false if the payload could not be encapsulated or true otherwise. |
349 | * | ||
350 | * This call might reallocate skb data. | ||
347 | */ | 351 | */ |
348 | bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, | 352 | bool batadv_unicast_4addr_prepare_skb(struct batadv_priv *bat_priv, |
349 | struct sk_buff *skb, | 353 | struct sk_buff *skb, |
@@ -401,7 +405,7 @@ int batadv_unicast_generic_send_skb(struct batadv_priv *bat_priv, | |||
401 | struct batadv_neigh_node *neigh_node; | 405 | struct batadv_neigh_node *neigh_node; |
402 | int data_len = skb->len; | 406 | int data_len = skb->len; |
403 | int ret = NET_RX_DROP; | 407 | int ret = NET_RX_DROP; |
404 | unsigned int dev_mtu; | 408 | unsigned int dev_mtu, header_len; |
405 | 409 | ||
406 | /* get routing information */ | 410 | /* get routing information */ |
407 | if (is_multicast_ether_addr(ethhdr->h_dest)) { | 411 | if (is_multicast_ether_addr(ethhdr->h_dest)) { |
@@ -429,10 +433,12 @@ find_router: | |||
429 | switch (packet_type) { | 433 | switch (packet_type) { |
430 | case BATADV_UNICAST: | 434 | case BATADV_UNICAST: |
431 | batadv_unicast_prepare_skb(skb, orig_node); | 435 | batadv_unicast_prepare_skb(skb, orig_node); |
436 | header_len = sizeof(struct batadv_unicast_packet); | ||
432 | break; | 437 | break; |
433 | case BATADV_UNICAST_4ADDR: | 438 | case BATADV_UNICAST_4ADDR: |
434 | batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, | 439 | batadv_unicast_4addr_prepare_skb(bat_priv, skb, orig_node, |
435 | packet_subtype); | 440 | packet_subtype); |
441 | header_len = sizeof(struct batadv_unicast_4addr_packet); | ||
436 | break; | 442 | break; |
437 | default: | 443 | default: |
438 | /* this function supports UNICAST and UNICAST_4ADDR only. It | 444 | /* this function supports UNICAST and UNICAST_4ADDR only. It |
@@ -441,6 +447,7 @@ find_router: | |||
441 | goto out; | 447 | goto out; |
442 | } | 448 | } |
443 | 449 | ||
450 | ethhdr = (struct ethhdr *)(skb->data + header_len); | ||
444 | unicast_packet = (struct batadv_unicast_packet *)skb->data; | 451 | unicast_packet = (struct batadv_unicast_packet *)skb->data; |
445 | 452 | ||
446 | /* inform the destination node that we are still missing a correct route | 453 | /* inform the destination node that we are still missing a correct route |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 61c5e819380e..08e576ada0b2 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -1195,7 +1195,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1195 | max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); | 1195 | max_delay = msecs_to_jiffies(ntohs(mld->mld_maxdelay)); |
1196 | if (max_delay) | 1196 | if (max_delay) |
1197 | group = &mld->mld_mca; | 1197 | group = &mld->mld_mca; |
1198 | } else if (skb->len >= sizeof(*mld2q)) { | 1198 | } else { |
1199 | if (!pskb_may_pull(skb, sizeof(*mld2q))) { | 1199 | if (!pskb_may_pull(skb, sizeof(*mld2q))) { |
1200 | err = -EINVAL; | 1200 | err = -EINVAL; |
1201 | goto out; | 1201 | goto out; |
diff --git a/net/bridge/br_sysfs_br.c b/net/bridge/br_sysfs_br.c index 394bb96b6087..3b9637fb7939 100644 --- a/net/bridge/br_sysfs_br.c +++ b/net/bridge/br_sysfs_br.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Sysfs attributes of bridge ports | 2 | * Sysfs attributes of bridge |
3 | * Linux ethernet bridge | 3 | * Linux ethernet bridge |
4 | * | 4 | * |
5 | * Authors: | 5 | * Authors: |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 00ee068efc1c..b84a1b155bc1 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -65,6 +65,7 @@ ipv6: | |||
65 | nhoff += sizeof(struct ipv6hdr); | 65 | nhoff += sizeof(struct ipv6hdr); |
66 | break; | 66 | break; |
67 | } | 67 | } |
68 | case __constant_htons(ETH_P_8021AD): | ||
68 | case __constant_htons(ETH_P_8021Q): { | 69 | case __constant_htons(ETH_P_8021Q): { |
69 | const struct vlan_hdr *vlan; | 70 | const struct vlan_hdr *vlan; |
70 | struct vlan_hdr _vlan; | 71 | struct vlan_hdr _vlan; |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 9232c68941ab..60533db8b72d 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1441,16 +1441,18 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev, | |||
1441 | atomic_set(&p->refcnt, 1); | 1441 | atomic_set(&p->refcnt, 1); |
1442 | p->reachable_time = | 1442 | p->reachable_time = |
1443 | neigh_rand_reach_time(p->base_reachable_time); | 1443 | neigh_rand_reach_time(p->base_reachable_time); |
1444 | dev_hold(dev); | ||
1445 | p->dev = dev; | ||
1446 | write_pnet(&p->net, hold_net(net)); | ||
1447 | p->sysctl_table = NULL; | ||
1444 | 1448 | ||
1445 | if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { | 1449 | if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) { |
1450 | release_net(net); | ||
1451 | dev_put(dev); | ||
1446 | kfree(p); | 1452 | kfree(p); |
1447 | return NULL; | 1453 | return NULL; |
1448 | } | 1454 | } |
1449 | 1455 | ||
1450 | dev_hold(dev); | ||
1451 | p->dev = dev; | ||
1452 | write_pnet(&p->net, hold_net(net)); | ||
1453 | p->sysctl_table = NULL; | ||
1454 | write_lock_bh(&tbl->lock); | 1456 | write_lock_bh(&tbl->lock); |
1455 | p->next = tbl->parms.next; | 1457 | p->next = tbl->parms.next; |
1456 | tbl->parms.next = p; | 1458 | tbl->parms.next = p; |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 3de740834d1f..ca198c1d1d30 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -2156,7 +2156,7 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm, | |||
2156 | /* If aging addresses are supported device will need to | 2156 | /* If aging addresses are supported device will need to |
2157 | * implement its own handler for this. | 2157 | * implement its own handler for this. |
2158 | */ | 2158 | */ |
2159 | if (ndm->ndm_state & NUD_PERMANENT) { | 2159 | if (!(ndm->ndm_state & NUD_PERMANENT)) { |
2160 | pr_info("%s: FDB only supports static addresses\n", dev->name); | 2160 | pr_info("%s: FDB only supports static addresses\n", dev->name); |
2161 | return -EINVAL; | 2161 | return -EINVAL; |
2162 | } | 2162 | } |
@@ -2384,7 +2384,7 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb) | |||
2384 | struct nlattr *extfilt; | 2384 | struct nlattr *extfilt; |
2385 | u32 filter_mask = 0; | 2385 | u32 filter_mask = 0; |
2386 | 2386 | ||
2387 | extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct rtgenmsg), | 2387 | extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), |
2388 | IFLA_EXT_MASK); | 2388 | IFLA_EXT_MASK); |
2389 | if (extfilt) | 2389 | if (extfilt) |
2390 | filter_mask = nla_get_u32(extfilt); | 2390 | filter_mask = nla_get_u32(extfilt); |
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index ab3d814bc80a..109ee89f123e 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c | |||
@@ -477,7 +477,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu) | |||
477 | } | 477 | } |
478 | 478 | ||
479 | return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - | 479 | return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - |
480 | net_adj) & ~(align - 1)) + (net_adj - 2); | 480 | net_adj) & ~(align - 1)) + net_adj - 2; |
481 | } | 481 | } |
482 | 482 | ||
483 | static void esp4_err(struct sk_buff *skb, u32 info) | 483 | static void esp4_err(struct sk_buff *skb, u32 info) |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 108a1e9c9eac..3df6d3edb2a1 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -71,7 +71,6 @@ | |||
71 | #include <linux/init.h> | 71 | #include <linux/init.h> |
72 | #include <linux/list.h> | 72 | #include <linux/list.h> |
73 | #include <linux/slab.h> | 73 | #include <linux/slab.h> |
74 | #include <linux/prefetch.h> | ||
75 | #include <linux/export.h> | 74 | #include <linux/export.h> |
76 | #include <net/net_namespace.h> | 75 | #include <net/net_namespace.h> |
77 | #include <net/ip.h> | 76 | #include <net/ip.h> |
@@ -1761,10 +1760,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c) | |||
1761 | if (!c) | 1760 | if (!c) |
1762 | continue; | 1761 | continue; |
1763 | 1762 | ||
1764 | if (IS_LEAF(c)) { | 1763 | if (IS_LEAF(c)) |
1765 | prefetch(rcu_dereference_rtnl(p->child[idx])); | ||
1766 | return (struct leaf *) c; | 1764 | return (struct leaf *) c; |
1767 | } | ||
1768 | 1765 | ||
1769 | /* Rescan start scanning in new node */ | 1766 | /* Rescan start scanning in new node */ |
1770 | p = (struct tnode *) c; | 1767 | p = (struct tnode *) c; |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 1f6eab66f7ce..8d6939eeb492 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -383,7 +383,7 @@ static int ipgre_header(struct sk_buff *skb, struct net_device *dev, | |||
383 | if (daddr) | 383 | if (daddr) |
384 | memcpy(&iph->daddr, daddr, 4); | 384 | memcpy(&iph->daddr, daddr, 4); |
385 | if (iph->daddr) | 385 | if (iph->daddr) |
386 | return t->hlen; | 386 | return t->hlen + sizeof(*iph); |
387 | 387 | ||
388 | return -(t->hlen + sizeof(*iph)); | 388 | return -(t->hlen + sizeof(*iph)); |
389 | } | 389 | } |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 7167b08977df..850525b34899 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -76,9 +76,7 @@ int iptunnel_xmit(struct net *net, struct rtable *rt, | |||
76 | iph->daddr = dst; | 76 | iph->daddr = dst; |
77 | iph->saddr = src; | 77 | iph->saddr = src; |
78 | iph->ttl = ttl; | 78 | iph->ttl = ttl; |
79 | tunnel_ip_select_ident(skb, | 79 | __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); |
80 | (const struct iphdr *)skb_inner_network_header(skb), | ||
81 | &rt->dst); | ||
82 | 80 | ||
83 | err = ip_local_out(skb); | 81 | err = ip_local_out(skb); |
84 | if (unlikely(net_xmit_eval(err))) | 82 | if (unlikely(net_xmit_eval(err))) |
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 6577a1149a47..463bd1273346 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c | |||
@@ -273,7 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = { | |||
273 | SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), | 273 | SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW), |
274 | SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), | 274 | SNMP_MIB_ITEM("TCPFastOpenCookieReqd", LINUX_MIB_TCPFASTOPENCOOKIEREQD), |
275 | SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), | 275 | SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES), |
276 | SNMP_MIB_ITEM("LowLatencyRxPackets", LINUX_MIB_LOWLATENCYRXPACKETS), | 276 | SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS), |
277 | SNMP_MIB_SENTINEL | 277 | SNMP_MIB_SENTINEL |
278 | }; | 278 | }; |
279 | 279 | ||
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index a9077f441cb2..b6ae92a51f58 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c | |||
@@ -206,8 +206,8 @@ static u32 cubic_root(u64 a) | |||
206 | */ | 206 | */ |
207 | static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | 207 | static inline void bictcp_update(struct bictcp *ca, u32 cwnd) |
208 | { | 208 | { |
209 | u64 offs; | 209 | u32 delta, bic_target, max_cnt; |
210 | u32 delta, t, bic_target, max_cnt; | 210 | u64 offs, t; |
211 | 211 | ||
212 | ca->ack_cnt++; /* count the number of ACKs */ | 212 | ca->ack_cnt++; /* count the number of ACKs */ |
213 | 213 | ||
@@ -250,9 +250,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) | |||
250 | * if the cwnd < 1 million packets !!! | 250 | * if the cwnd < 1 million packets !!! |
251 | */ | 251 | */ |
252 | 252 | ||
253 | t = (s32)(tcp_time_stamp - ca->epoch_start); | ||
254 | t += msecs_to_jiffies(ca->delay_min >> 3); | ||
253 | /* change the unit from HZ to bictcp_HZ */ | 255 | /* change the unit from HZ to bictcp_HZ */ |
254 | t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3) | 256 | t <<= BICTCP_HZ; |
255 | - ca->epoch_start) << BICTCP_HZ) / HZ; | 257 | do_div(t, HZ); |
256 | 258 | ||
257 | if (t < ca->bic_K) /* t - K */ | 259 | if (t < ca->bic_K) /* t - K */ |
258 | offs = ca->bic_K - t; | 260 | offs = ca->bic_K - t; |
@@ -414,7 +416,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) | |||
414 | return; | 416 | return; |
415 | 417 | ||
416 | /* Discard delay samples right after fast recovery */ | 418 | /* Discard delay samples right after fast recovery */ |
417 | if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ) | 419 | if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ) |
418 | return; | 420 | return; |
419 | 421 | ||
420 | delay = (rtt_us << 3) / USEC_PER_MSEC; | 422 | delay = (rtt_us << 3) / USEC_PER_MSEC; |
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 40ffd72243a4..aeac0dc3635d 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c | |||
@@ -425,7 +425,7 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu) | |||
425 | net_adj = 0; | 425 | net_adj = 0; |
426 | 426 | ||
427 | return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - | 427 | return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) - |
428 | net_adj) & ~(align - 1)) + (net_adj - 2); | 428 | net_adj) & ~(align - 1)) + net_adj - 2; |
429 | } | 429 | } |
430 | 430 | ||
431 | static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | 431 | static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, |
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index bff3d821c7eb..c4ff5bbb45c4 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c | |||
@@ -993,14 +993,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root, | |||
993 | 993 | ||
994 | if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { | 994 | if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { |
995 | #ifdef CONFIG_IPV6_SUBTREES | 995 | #ifdef CONFIG_IPV6_SUBTREES |
996 | if (fn->subtree) | 996 | if (fn->subtree) { |
997 | fn = fib6_lookup_1(fn->subtree, args + 1); | 997 | struct fib6_node *sfn; |
998 | sfn = fib6_lookup_1(fn->subtree, | ||
999 | args + 1); | ||
1000 | if (!sfn) | ||
1001 | goto backtrack; | ||
1002 | fn = sfn; | ||
1003 | } | ||
998 | #endif | 1004 | #endif |
999 | if (!fn || fn->fn_flags & RTN_RTINFO) | 1005 | if (fn->fn_flags & RTN_RTINFO) |
1000 | return fn; | 1006 | return fn; |
1001 | } | 1007 | } |
1002 | } | 1008 | } |
1003 | 1009 | #ifdef CONFIG_IPV6_SUBTREES | |
1010 | backtrack: | ||
1011 | #endif | ||
1004 | if (fn->fn_flags & RTN_ROOT) | 1012 | if (fn->fn_flags & RTN_ROOT) |
1005 | break; | 1013 | break; |
1006 | 1014 | ||
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index ae31968d42d3..cc9e02d79b55 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -31,10 +31,12 @@ | |||
31 | #include "led.h" | 31 | #include "led.h" |
32 | 32 | ||
33 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) | 33 | #define IEEE80211_AUTH_TIMEOUT (HZ / 5) |
34 | #define IEEE80211_AUTH_TIMEOUT_LONG (HZ / 2) | ||
34 | #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) | 35 | #define IEEE80211_AUTH_TIMEOUT_SHORT (HZ / 10) |
35 | #define IEEE80211_AUTH_MAX_TRIES 3 | 36 | #define IEEE80211_AUTH_MAX_TRIES 3 |
36 | #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) | 37 | #define IEEE80211_AUTH_WAIT_ASSOC (HZ * 5) |
37 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) | 38 | #define IEEE80211_ASSOC_TIMEOUT (HZ / 5) |
39 | #define IEEE80211_ASSOC_TIMEOUT_LONG (HZ / 2) | ||
38 | #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) | 40 | #define IEEE80211_ASSOC_TIMEOUT_SHORT (HZ / 10) |
39 | #define IEEE80211_ASSOC_MAX_TRIES 3 | 41 | #define IEEE80211_ASSOC_MAX_TRIES 3 |
40 | 42 | ||
@@ -209,8 +211,9 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
209 | struct ieee80211_channel *channel, | 211 | struct ieee80211_channel *channel, |
210 | const struct ieee80211_ht_operation *ht_oper, | 212 | const struct ieee80211_ht_operation *ht_oper, |
211 | const struct ieee80211_vht_operation *vht_oper, | 213 | const struct ieee80211_vht_operation *vht_oper, |
212 | struct cfg80211_chan_def *chandef, bool verbose) | 214 | struct cfg80211_chan_def *chandef, bool tracking) |
213 | { | 215 | { |
216 | struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; | ||
214 | struct cfg80211_chan_def vht_chandef; | 217 | struct cfg80211_chan_def vht_chandef; |
215 | u32 ht_cfreq, ret; | 218 | u32 ht_cfreq, ret; |
216 | 219 | ||
@@ -229,7 +232,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
229 | ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, | 232 | ht_cfreq = ieee80211_channel_to_frequency(ht_oper->primary_chan, |
230 | channel->band); | 233 | channel->band); |
231 | /* check that channel matches the right operating channel */ | 234 | /* check that channel matches the right operating channel */ |
232 | if (channel->center_freq != ht_cfreq) { | 235 | if (!tracking && channel->center_freq != ht_cfreq) { |
233 | /* | 236 | /* |
234 | * It's possible that some APs are confused here; | 237 | * It's possible that some APs are confused here; |
235 | * Netgear WNDR3700 sometimes reports 4 higher than | 238 | * Netgear WNDR3700 sometimes reports 4 higher than |
@@ -237,11 +240,10 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
237 | * since we look at probe response/beacon data here | 240 | * since we look at probe response/beacon data here |
238 | * it should be OK. | 241 | * it should be OK. |
239 | */ | 242 | */ |
240 | if (verbose) | 243 | sdata_info(sdata, |
241 | sdata_info(sdata, | 244 | "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", |
242 | "Wrong control channel: center-freq: %d ht-cfreq: %d ht->primary_chan: %d band: %d - Disabling HT\n", | 245 | channel->center_freq, ht_cfreq, |
243 | channel->center_freq, ht_cfreq, | 246 | ht_oper->primary_chan, channel->band); |
244 | ht_oper->primary_chan, channel->band); | ||
245 | ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; | 247 | ret = IEEE80211_STA_DISABLE_HT | IEEE80211_STA_DISABLE_VHT; |
246 | goto out; | 248 | goto out; |
247 | } | 249 | } |
@@ -295,7 +297,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
295 | channel->band); | 297 | channel->band); |
296 | break; | 298 | break; |
297 | default: | 299 | default: |
298 | if (verbose) | 300 | if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) |
299 | sdata_info(sdata, | 301 | sdata_info(sdata, |
300 | "AP VHT operation IE has invalid channel width (%d), disable VHT\n", | 302 | "AP VHT operation IE has invalid channel width (%d), disable VHT\n", |
301 | vht_oper->chan_width); | 303 | vht_oper->chan_width); |
@@ -304,7 +306,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
304 | } | 306 | } |
305 | 307 | ||
306 | if (!cfg80211_chandef_valid(&vht_chandef)) { | 308 | if (!cfg80211_chandef_valid(&vht_chandef)) { |
307 | if (verbose) | 309 | if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) |
308 | sdata_info(sdata, | 310 | sdata_info(sdata, |
309 | "AP VHT information is invalid, disable VHT\n"); | 311 | "AP VHT information is invalid, disable VHT\n"); |
310 | ret = IEEE80211_STA_DISABLE_VHT; | 312 | ret = IEEE80211_STA_DISABLE_VHT; |
@@ -317,7 +319,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata, | |||
317 | } | 319 | } |
318 | 320 | ||
319 | if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { | 321 | if (!cfg80211_chandef_compatible(chandef, &vht_chandef)) { |
320 | if (verbose) | 322 | if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) |
321 | sdata_info(sdata, | 323 | sdata_info(sdata, |
322 | "AP VHT information doesn't match HT, disable VHT\n"); | 324 | "AP VHT information doesn't match HT, disable VHT\n"); |
323 | ret = IEEE80211_STA_DISABLE_VHT; | 325 | ret = IEEE80211_STA_DISABLE_VHT; |
@@ -333,18 +335,27 @@ out: | |||
333 | if (ret & IEEE80211_STA_DISABLE_VHT) | 335 | if (ret & IEEE80211_STA_DISABLE_VHT) |
334 | vht_chandef = *chandef; | 336 | vht_chandef = *chandef; |
335 | 337 | ||
338 | /* | ||
339 | * Ignore the DISABLED flag when we're already connected and only | ||
340 | * tracking the APs beacon for bandwidth changes - otherwise we | ||
341 | * might get disconnected here if we connect to an AP, update our | ||
342 | * regulatory information based on the AP's country IE and the | ||
343 | * information we have is wrong/outdated and disables the channel | ||
344 | * that we're actually using for the connection to the AP. | ||
345 | */ | ||
336 | while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, | 346 | while (!cfg80211_chandef_usable(sdata->local->hw.wiphy, chandef, |
337 | IEEE80211_CHAN_DISABLED)) { | 347 | tracking ? 0 : |
348 | IEEE80211_CHAN_DISABLED)) { | ||
338 | if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { | 349 | if (WARN_ON(chandef->width == NL80211_CHAN_WIDTH_20_NOHT)) { |
339 | ret = IEEE80211_STA_DISABLE_HT | | 350 | ret = IEEE80211_STA_DISABLE_HT | |
340 | IEEE80211_STA_DISABLE_VHT; | 351 | IEEE80211_STA_DISABLE_VHT; |
341 | goto out; | 352 | break; |
342 | } | 353 | } |
343 | 354 | ||
344 | ret |= chandef_downgrade(chandef); | 355 | ret |= chandef_downgrade(chandef); |
345 | } | 356 | } |
346 | 357 | ||
347 | if (chandef->width != vht_chandef.width && verbose) | 358 | if (chandef->width != vht_chandef.width && !tracking) |
348 | sdata_info(sdata, | 359 | sdata_info(sdata, |
349 | "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); | 360 | "capabilities/regulatory prevented using AP HT/VHT configuration, downgraded\n"); |
350 | 361 | ||
@@ -384,7 +395,7 @@ static int ieee80211_config_bw(struct ieee80211_sub_if_data *sdata, | |||
384 | 395 | ||
385 | /* calculate new channel (type) based on HT/VHT operation IEs */ | 396 | /* calculate new channel (type) based on HT/VHT operation IEs */ |
386 | flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, | 397 | flags = ieee80211_determine_chantype(sdata, sband, chan, ht_oper, |
387 | vht_oper, &chandef, false); | 398 | vht_oper, &chandef, true); |
388 | 399 | ||
389 | /* | 400 | /* |
390 | * Downgrade the new channel if we associated with restricted | 401 | * Downgrade the new channel if we associated with restricted |
@@ -3394,10 +3405,13 @@ static int ieee80211_probe_auth(struct ieee80211_sub_if_data *sdata) | |||
3394 | 3405 | ||
3395 | if (tx_flags == 0) { | 3406 | if (tx_flags == 0) { |
3396 | auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; | 3407 | auth_data->timeout = jiffies + IEEE80211_AUTH_TIMEOUT; |
3397 | ifmgd->auth_data->timeout_started = true; | 3408 | auth_data->timeout_started = true; |
3398 | run_again(sdata, auth_data->timeout); | 3409 | run_again(sdata, auth_data->timeout); |
3399 | } else { | 3410 | } else { |
3400 | auth_data->timeout_started = false; | 3411 | auth_data->timeout = |
3412 | round_jiffies_up(jiffies + IEEE80211_AUTH_TIMEOUT_LONG); | ||
3413 | auth_data->timeout_started = true; | ||
3414 | run_again(sdata, auth_data->timeout); | ||
3401 | } | 3415 | } |
3402 | 3416 | ||
3403 | return 0; | 3417 | return 0; |
@@ -3434,7 +3448,11 @@ static int ieee80211_do_assoc(struct ieee80211_sub_if_data *sdata) | |||
3434 | assoc_data->timeout_started = true; | 3448 | assoc_data->timeout_started = true; |
3435 | run_again(sdata, assoc_data->timeout); | 3449 | run_again(sdata, assoc_data->timeout); |
3436 | } else { | 3450 | } else { |
3437 | assoc_data->timeout_started = false; | 3451 | assoc_data->timeout = |
3452 | round_jiffies_up(jiffies + | ||
3453 | IEEE80211_ASSOC_TIMEOUT_LONG); | ||
3454 | assoc_data->timeout_started = true; | ||
3455 | run_again(sdata, assoc_data->timeout); | ||
3438 | } | 3456 | } |
3439 | 3457 | ||
3440 | return 0; | 3458 | return 0; |
@@ -3829,7 +3847,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, | |||
3829 | ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, | 3847 | ifmgd->flags |= ieee80211_determine_chantype(sdata, sband, |
3830 | cbss->channel, | 3848 | cbss->channel, |
3831 | ht_oper, vht_oper, | 3849 | ht_oper, vht_oper, |
3832 | &chandef, true); | 3850 | &chandef, false); |
3833 | 3851 | ||
3834 | sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), | 3852 | sdata->needed_rx_chains = min(ieee80211_ht_vht_rx_chains(sdata, cbss), |
3835 | local->rx_chains); | 3853 | local->rx_chains); |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index 7dcc376eea5f..2f8010707d01 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
@@ -526,7 +526,7 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
526 | const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; | 526 | const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; |
527 | __u32 seq, ack, sack, end, win, swin; | 527 | __u32 seq, ack, sack, end, win, swin; |
528 | s16 receiver_offset; | 528 | s16 receiver_offset; |
529 | bool res; | 529 | bool res, in_recv_win; |
530 | 530 | ||
531 | /* | 531 | /* |
532 | * Get the required data from the packet. | 532 | * Get the required data from the packet. |
@@ -649,14 +649,18 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
649 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, | 649 | receiver->td_end, receiver->td_maxend, receiver->td_maxwin, |
650 | receiver->td_scale); | 650 | receiver->td_scale); |
651 | 651 | ||
652 | /* Is the ending sequence in the receive window (if available)? */ | ||
653 | in_recv_win = !receiver->td_maxwin || | ||
654 | after(end, sender->td_end - receiver->td_maxwin - 1); | ||
655 | |||
652 | pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n", | 656 | pr_debug("tcp_in_window: I=%i II=%i III=%i IV=%i\n", |
653 | before(seq, sender->td_maxend + 1), | 657 | before(seq, sender->td_maxend + 1), |
654 | after(end, sender->td_end - receiver->td_maxwin - 1), | 658 | (in_recv_win ? 1 : 0), |
655 | before(sack, receiver->td_end + 1), | 659 | before(sack, receiver->td_end + 1), |
656 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)); | 660 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)); |
657 | 661 | ||
658 | if (before(seq, sender->td_maxend + 1) && | 662 | if (before(seq, sender->td_maxend + 1) && |
659 | after(end, sender->td_end - receiver->td_maxwin - 1) && | 663 | in_recv_win && |
660 | before(sack, receiver->td_end + 1) && | 664 | before(sack, receiver->td_end + 1) && |
661 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { | 665 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1)) { |
662 | /* | 666 | /* |
@@ -725,7 +729,7 @@ static bool tcp_in_window(const struct nf_conn *ct, | |||
725 | nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, | 729 | nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, |
726 | "nf_ct_tcp: %s ", | 730 | "nf_ct_tcp: %s ", |
727 | before(seq, sender->td_maxend + 1) ? | 731 | before(seq, sender->td_maxend + 1) ? |
728 | after(end, sender->td_end - receiver->td_maxwin - 1) ? | 732 | in_recv_win ? |
729 | before(sack, receiver->td_end + 1) ? | 733 | before(sack, receiver->td_end + 1) ? |
730 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" | 734 | after(sack, receiver->td_end - MAXACKWINDOW(sender) - 1) ? "BUG" |
731 | : "ACK is under the lower bound (possible overly delayed ACK)" | 735 | : "ACK is under the lower bound (possible overly delayed ACK)" |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index 962e9792e317..d92cc317bf8b 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -419,6 +419,7 @@ __build_packet_message(struct nfnl_log_net *log, | |||
419 | nfmsg->version = NFNETLINK_V0; | 419 | nfmsg->version = NFNETLINK_V0; |
420 | nfmsg->res_id = htons(inst->group_num); | 420 | nfmsg->res_id = htons(inst->group_num); |
421 | 421 | ||
422 | memset(&pmsg, 0, sizeof(pmsg)); | ||
422 | pmsg.hw_protocol = skb->protocol; | 423 | pmsg.hw_protocol = skb->protocol; |
423 | pmsg.hook = hooknum; | 424 | pmsg.hook = hooknum; |
424 | 425 | ||
@@ -498,7 +499,10 @@ __build_packet_message(struct nfnl_log_net *log, | |||
498 | if (indev && skb->dev && | 499 | if (indev && skb->dev && |
499 | skb->mac_header != skb->network_header) { | 500 | skb->mac_header != skb->network_header) { |
500 | struct nfulnl_msg_packet_hw phw; | 501 | struct nfulnl_msg_packet_hw phw; |
501 | int len = dev_parse_header(skb, phw.hw_addr); | 502 | int len; |
503 | |||
504 | memset(&phw, 0, sizeof(phw)); | ||
505 | len = dev_parse_header(skb, phw.hw_addr); | ||
502 | if (len > 0) { | 506 | if (len > 0) { |
503 | phw.hw_addrlen = htons(len); | 507 | phw.hw_addrlen = htons(len); |
504 | if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) | 508 | if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw)) |
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 971ea145ab3e..8a703c3dd318 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c | |||
@@ -463,7 +463,10 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue, | |||
463 | if (indev && entskb->dev && | 463 | if (indev && entskb->dev && |
464 | entskb->mac_header != entskb->network_header) { | 464 | entskb->mac_header != entskb->network_header) { |
465 | struct nfqnl_msg_packet_hw phw; | 465 | struct nfqnl_msg_packet_hw phw; |
466 | int len = dev_parse_header(entskb, phw.hw_addr); | 466 | int len; |
467 | |||
468 | memset(&phw, 0, sizeof(phw)); | ||
469 | len = dev_parse_header(entskb, phw.hw_addr); | ||
467 | if (len) { | 470 | if (len) { |
468 | phw.hw_addrlen = htons(len); | 471 | phw.hw_addrlen = htons(len); |
469 | if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) | 472 | if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw)) |
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 7011c71646f0..6113cc7efffc 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c | |||
@@ -52,7 +52,8 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
52 | { | 52 | { |
53 | const struct xt_tcpmss_info *info = par->targinfo; | 53 | const struct xt_tcpmss_info *info = par->targinfo; |
54 | struct tcphdr *tcph; | 54 | struct tcphdr *tcph; |
55 | unsigned int tcplen, i; | 55 | int len, tcp_hdrlen; |
56 | unsigned int i; | ||
56 | __be16 oldval; | 57 | __be16 oldval; |
57 | u16 newmss; | 58 | u16 newmss; |
58 | u8 *opt; | 59 | u8 *opt; |
@@ -64,11 +65,14 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
64 | if (!skb_make_writable(skb, skb->len)) | 65 | if (!skb_make_writable(skb, skb->len)) |
65 | return -1; | 66 | return -1; |
66 | 67 | ||
67 | tcplen = skb->len - tcphoff; | 68 | len = skb->len - tcphoff; |
69 | if (len < (int)sizeof(struct tcphdr)) | ||
70 | return -1; | ||
71 | |||
68 | tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); | 72 | tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); |
73 | tcp_hdrlen = tcph->doff * 4; | ||
69 | 74 | ||
70 | /* Header cannot be larger than the packet */ | 75 | if (len < tcp_hdrlen) |
71 | if (tcplen < tcph->doff*4) | ||
72 | return -1; | 76 | return -1; |
73 | 77 | ||
74 | if (info->mss == XT_TCPMSS_CLAMP_PMTU) { | 78 | if (info->mss == XT_TCPMSS_CLAMP_PMTU) { |
@@ -87,9 +91,8 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
87 | newmss = info->mss; | 91 | newmss = info->mss; |
88 | 92 | ||
89 | opt = (u_int8_t *)tcph; | 93 | opt = (u_int8_t *)tcph; |
90 | for (i = sizeof(struct tcphdr); i < tcph->doff*4; i += optlen(opt, i)) { | 94 | for (i = sizeof(struct tcphdr); i <= tcp_hdrlen - TCPOLEN_MSS; i += optlen(opt, i)) { |
91 | if (opt[i] == TCPOPT_MSS && tcph->doff*4 - i >= TCPOLEN_MSS && | 95 | if (opt[i] == TCPOPT_MSS && opt[i+1] == TCPOLEN_MSS) { |
92 | opt[i+1] == TCPOLEN_MSS) { | ||
93 | u_int16_t oldmss; | 96 | u_int16_t oldmss; |
94 | 97 | ||
95 | oldmss = (opt[i+2] << 8) | opt[i+3]; | 98 | oldmss = (opt[i+2] << 8) | opt[i+3]; |
@@ -112,9 +115,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
112 | } | 115 | } |
113 | 116 | ||
114 | /* There is data after the header so the option can't be added | 117 | /* There is data after the header so the option can't be added |
115 | without moving it, and doing so may make the SYN packet | 118 | * without moving it, and doing so may make the SYN packet |
116 | itself too large. Accept the packet unmodified instead. */ | 119 | * itself too large. Accept the packet unmodified instead. |
117 | if (tcplen > tcph->doff*4) | 120 | */ |
121 | if (len > tcp_hdrlen) | ||
118 | return 0; | 122 | return 0; |
119 | 123 | ||
120 | /* | 124 | /* |
@@ -143,10 +147,10 @@ tcpmss_mangle_packet(struct sk_buff *skb, | |||
143 | newmss = min(newmss, (u16)1220); | 147 | newmss = min(newmss, (u16)1220); |
144 | 148 | ||
145 | opt = (u_int8_t *)tcph + sizeof(struct tcphdr); | 149 | opt = (u_int8_t *)tcph + sizeof(struct tcphdr); |
146 | memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr)); | 150 | memmove(opt + TCPOLEN_MSS, opt, len - sizeof(struct tcphdr)); |
147 | 151 | ||
148 | inet_proto_csum_replace2(&tcph->check, skb, | 152 | inet_proto_csum_replace2(&tcph->check, skb, |
149 | htons(tcplen), htons(tcplen + TCPOLEN_MSS), 1); | 153 | htons(len), htons(len + TCPOLEN_MSS), 1); |
150 | opt[0] = TCPOPT_MSS; | 154 | opt[0] = TCPOPT_MSS; |
151 | opt[1] = TCPOLEN_MSS; | 155 | opt[1] = TCPOLEN_MSS; |
152 | opt[2] = (newmss & 0xff00) >> 8; | 156 | opt[2] = (newmss & 0xff00) >> 8; |
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c index b68fa191710f..625fa1d636a0 100644 --- a/net/netfilter/xt_TCPOPTSTRIP.c +++ b/net/netfilter/xt_TCPOPTSTRIP.c | |||
@@ -38,7 +38,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, | |||
38 | struct tcphdr *tcph; | 38 | struct tcphdr *tcph; |
39 | u_int16_t n, o; | 39 | u_int16_t n, o; |
40 | u_int8_t *opt; | 40 | u_int8_t *opt; |
41 | int len; | 41 | int len, tcp_hdrlen; |
42 | 42 | ||
43 | /* This is a fragment, no TCP header is available */ | 43 | /* This is a fragment, no TCP header is available */ |
44 | if (par->fragoff != 0) | 44 | if (par->fragoff != 0) |
@@ -52,7 +52,9 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, | |||
52 | return NF_DROP; | 52 | return NF_DROP; |
53 | 53 | ||
54 | tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); | 54 | tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); |
55 | if (tcph->doff * 4 > len) | 55 | tcp_hdrlen = tcph->doff * 4; |
56 | |||
57 | if (len < tcp_hdrlen) | ||
56 | return NF_DROP; | 58 | return NF_DROP; |
57 | 59 | ||
58 | opt = (u_int8_t *)tcph; | 60 | opt = (u_int8_t *)tcph; |
@@ -61,10 +63,10 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb, | |||
61 | * Walk through all TCP options - if we find some option to remove, | 63 | * Walk through all TCP options - if we find some option to remove, |
62 | * set all octets to %TCPOPT_NOP and adjust checksum. | 64 | * set all octets to %TCPOPT_NOP and adjust checksum. |
63 | */ | 65 | */ |
64 | for (i = sizeof(struct tcphdr); i < tcp_hdrlen(skb); i += optl) { | 66 | for (i = sizeof(struct tcphdr); i < tcp_hdrlen - 1; i += optl) { |
65 | optl = optlen(opt, i); | 67 | optl = optlen(opt, i); |
66 | 68 | ||
67 | if (i + optl > tcp_hdrlen(skb)) | 69 | if (i + optl > tcp_hdrlen) |
68 | break; | 70 | break; |
69 | 71 | ||
70 | if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i])) | 72 | if (!tcpoptstrip_test_bit(info->strip_bmap, opt[i])) |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 512718adb0d5..f85f8a2ad6cf 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -789,6 +789,10 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) | |||
789 | struct net *net = sock_net(skb->sk); | 789 | struct net *net = sock_net(skb->sk); |
790 | int chains_to_skip = cb->args[0]; | 790 | int chains_to_skip = cb->args[0]; |
791 | int fams_to_skip = cb->args[1]; | 791 | int fams_to_skip = cb->args[1]; |
792 | bool need_locking = chains_to_skip || fams_to_skip; | ||
793 | |||
794 | if (need_locking) | ||
795 | genl_lock(); | ||
792 | 796 | ||
793 | for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) { | 797 | for (i = chains_to_skip; i < GENL_FAM_TAB_SIZE; i++) { |
794 | n = 0; | 798 | n = 0; |
@@ -810,6 +814,9 @@ errout: | |||
810 | cb->args[0] = i; | 814 | cb->args[0] = i; |
811 | cb->args[1] = n; | 815 | cb->args[1] = n; |
812 | 816 | ||
817 | if (need_locking) | ||
818 | genl_unlock(); | ||
819 | |||
813 | return skb->len; | 820 | return skb->len; |
814 | } | 821 | } |
815 | 822 | ||
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 22c5f399f1cf..ab101f715447 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c | |||
@@ -535,6 +535,7 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb) | |||
535 | { | 535 | { |
536 | struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); | 536 | struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts); |
537 | 537 | ||
538 | OVS_CB(skb)->tun_key = NULL; | ||
538 | return do_execute_actions(dp, skb, acts->actions, | 539 | return do_execute_actions(dp, skb, acts->actions, |
539 | acts->actions_len, false); | 540 | acts->actions_len, false); |
540 | } | 541 | } |
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index f7e3a0d84c40..f2ed7600084e 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c | |||
@@ -2076,9 +2076,6 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info) | |||
2076 | ovs_notify(reply, info, &ovs_dp_vport_multicast_group); | 2076 | ovs_notify(reply, info, &ovs_dp_vport_multicast_group); |
2077 | return 0; | 2077 | return 0; |
2078 | 2078 | ||
2079 | rtnl_unlock(); | ||
2080 | return 0; | ||
2081 | |||
2082 | exit_free: | 2079 | exit_free: |
2083 | kfree_skb(reply); | 2080 | kfree_skb(reply); |
2084 | exit_unlock: | 2081 | exit_unlock: |
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 5c519b121e1b..1aa84dc58777 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c | |||
@@ -240,7 +240,7 @@ static struct flex_array *alloc_buckets(unsigned int n_buckets) | |||
240 | struct flex_array *buckets; | 240 | struct flex_array *buckets; |
241 | int i, err; | 241 | int i, err; |
242 | 242 | ||
243 | buckets = flex_array_alloc(sizeof(struct hlist_head *), | 243 | buckets = flex_array_alloc(sizeof(struct hlist_head), |
244 | n_buckets, GFP_KERNEL); | 244 | n_buckets, GFP_KERNEL); |
245 | if (!buckets) | 245 | if (!buckets) |
246 | return NULL; | 246 | return NULL; |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 281c1bded1f6..51b968d3febb 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -285,6 +285,45 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind) | |||
285 | return q; | 285 | return q; |
286 | } | 286 | } |
287 | 287 | ||
288 | /* The linklayer setting were not transferred from iproute2, in older | ||
289 | * versions, and the rate tables lookup systems have been dropped in | ||
290 | * the kernel. To keep backward compatible with older iproute2 tc | ||
291 | * utils, we detect the linklayer setting by detecting if the rate | ||
292 | * table were modified. | ||
293 | * | ||
294 | * For linklayer ATM table entries, the rate table will be aligned to | ||
295 | * 48 bytes, thus some table entries will contain the same value. The | ||
296 | * mpu (min packet unit) is also encoded into the old rate table, thus | ||
297 | * starting from the mpu, we find low and high table entries for | ||
298 | * mapping this cell. If these entries contain the same value, when | ||
299 | * the rate tables have been modified for linklayer ATM. | ||
300 | * | ||
301 | * This is done by rounding mpu to the nearest 48 bytes cell/entry, | ||
302 | * and then roundup to the next cell, calc the table entry one below, | ||
303 | * and compare. | ||
304 | */ | ||
305 | static __u8 __detect_linklayer(struct tc_ratespec *r, __u32 *rtab) | ||
306 | { | ||
307 | int low = roundup(r->mpu, 48); | ||
308 | int high = roundup(low+1, 48); | ||
309 | int cell_low = low >> r->cell_log; | ||
310 | int cell_high = (high >> r->cell_log) - 1; | ||
311 | |||
312 | /* rtab is too inaccurate at rates > 100Mbit/s */ | ||
313 | if ((r->rate > (100000000/8)) || (rtab[0] == 0)) { | ||
314 | pr_debug("TC linklayer: Giving up ATM detection\n"); | ||
315 | return TC_LINKLAYER_ETHERNET; | ||
316 | } | ||
317 | |||
318 | if ((cell_high > cell_low) && (cell_high < 256) | ||
319 | && (rtab[cell_low] == rtab[cell_high])) { | ||
320 | pr_debug("TC linklayer: Detected ATM, low(%d)=high(%d)=%u\n", | ||
321 | cell_low, cell_high, rtab[cell_high]); | ||
322 | return TC_LINKLAYER_ATM; | ||
323 | } | ||
324 | return TC_LINKLAYER_ETHERNET; | ||
325 | } | ||
326 | |||
288 | static struct qdisc_rate_table *qdisc_rtab_list; | 327 | static struct qdisc_rate_table *qdisc_rtab_list; |
289 | 328 | ||
290 | struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) | 329 | struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *tab) |
@@ -308,6 +347,8 @@ struct qdisc_rate_table *qdisc_get_rtab(struct tc_ratespec *r, struct nlattr *ta | |||
308 | rtab->rate = *r; | 347 | rtab->rate = *r; |
309 | rtab->refcnt = 1; | 348 | rtab->refcnt = 1; |
310 | memcpy(rtab->data, nla_data(tab), 1024); | 349 | memcpy(rtab->data, nla_data(tab), 1024); |
350 | if (r->linklayer == TC_LINKLAYER_UNAWARE) | ||
351 | r->linklayer = __detect_linklayer(r, rtab->data); | ||
311 | rtab->next = qdisc_rtab_list; | 352 | rtab->next = qdisc_rtab_list; |
312 | qdisc_rtab_list = rtab; | 353 | qdisc_rtab_list = rtab; |
313 | } | 354 | } |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 4626cef4b76e..48be3d5c0d92 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/rcupdate.h> | 25 | #include <linux/rcupdate.h> |
26 | #include <linux/list.h> | 26 | #include <linux/list.h> |
27 | #include <linux/slab.h> | 27 | #include <linux/slab.h> |
28 | #include <linux/if_vlan.h> | ||
28 | #include <net/sch_generic.h> | 29 | #include <net/sch_generic.h> |
29 | #include <net/pkt_sched.h> | 30 | #include <net/pkt_sched.h> |
30 | #include <net/dst.h> | 31 | #include <net/dst.h> |
@@ -207,15 +208,19 @@ void __qdisc_run(struct Qdisc *q) | |||
207 | 208 | ||
208 | unsigned long dev_trans_start(struct net_device *dev) | 209 | unsigned long dev_trans_start(struct net_device *dev) |
209 | { | 210 | { |
210 | unsigned long val, res = dev->trans_start; | 211 | unsigned long val, res; |
211 | unsigned int i; | 212 | unsigned int i; |
212 | 213 | ||
214 | if (is_vlan_dev(dev)) | ||
215 | dev = vlan_dev_real_dev(dev); | ||
216 | res = dev->trans_start; | ||
213 | for (i = 0; i < dev->num_tx_queues; i++) { | 217 | for (i = 0; i < dev->num_tx_queues; i++) { |
214 | val = netdev_get_tx_queue(dev, i)->trans_start; | 218 | val = netdev_get_tx_queue(dev, i)->trans_start; |
215 | if (val && time_after(val, res)) | 219 | if (val && time_after(val, res)) |
216 | res = val; | 220 | res = val; |
217 | } | 221 | } |
218 | dev->trans_start = res; | 222 | dev->trans_start = res; |
223 | |||
219 | return res; | 224 | return res; |
220 | } | 225 | } |
221 | EXPORT_SYMBOL(dev_trans_start); | 226 | EXPORT_SYMBOL(dev_trans_start); |
@@ -904,6 +909,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, | |||
904 | memset(r, 0, sizeof(*r)); | 909 | memset(r, 0, sizeof(*r)); |
905 | r->overhead = conf->overhead; | 910 | r->overhead = conf->overhead; |
906 | r->rate_bytes_ps = conf->rate; | 911 | r->rate_bytes_ps = conf->rate; |
912 | r->linklayer = (conf->linklayer & TC_LINKLAYER_MASK); | ||
907 | r->mult = 1; | 913 | r->mult = 1; |
908 | /* | 914 | /* |
909 | * The deal here is to replace a divide by a reciprocal one | 915 | * The deal here is to replace a divide by a reciprocal one |
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 45e751527dfc..c2178b15ca6e 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c | |||
@@ -1329,6 +1329,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1329 | struct htb_sched *q = qdisc_priv(sch); | 1329 | struct htb_sched *q = qdisc_priv(sch); |
1330 | struct htb_class *cl = (struct htb_class *)*arg, *parent; | 1330 | struct htb_class *cl = (struct htb_class *)*arg, *parent; |
1331 | struct nlattr *opt = tca[TCA_OPTIONS]; | 1331 | struct nlattr *opt = tca[TCA_OPTIONS]; |
1332 | struct qdisc_rate_table *rtab = NULL, *ctab = NULL; | ||
1332 | struct nlattr *tb[TCA_HTB_MAX + 1]; | 1333 | struct nlattr *tb[TCA_HTB_MAX + 1]; |
1333 | struct tc_htb_opt *hopt; | 1334 | struct tc_htb_opt *hopt; |
1334 | 1335 | ||
@@ -1350,6 +1351,18 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, | |||
1350 | if (!hopt->rate.rate || !hopt->ceil.rate) | 1351 | if (!hopt->rate.rate || !hopt->ceil.rate) |
1351 | goto failure; | 1352 | goto failure; |
1352 | 1353 | ||
1354 | /* Keeping backward compatible with rate_table based iproute2 tc */ | ||
1355 | if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) { | ||
1356 | rtab = qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]); | ||
1357 | if (rtab) | ||
1358 | qdisc_put_rtab(rtab); | ||
1359 | } | ||
1360 | if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) { | ||
1361 | ctab = qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]); | ||
1362 | if (ctab) | ||
1363 | qdisc_put_rtab(ctab); | ||
1364 | } | ||
1365 | |||
1353 | if (!cl) { /* new class */ | 1366 | if (!cl) { /* new class */ |
1354 | struct Qdisc *new_q; | 1367 | struct Qdisc *new_q; |
1355 | int prio; | 1368 | int prio; |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index bce5b79662a6..ab67efc64b24 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -846,12 +846,12 @@ void sctp_assoc_control_transport(struct sctp_association *asoc, | |||
846 | else | 846 | else |
847 | spc_state = SCTP_ADDR_AVAILABLE; | 847 | spc_state = SCTP_ADDR_AVAILABLE; |
848 | /* Don't inform ULP about transition from PF to | 848 | /* Don't inform ULP about transition from PF to |
849 | * active state and set cwnd to 1, see SCTP | 849 | * active state and set cwnd to 1 MTU, see SCTP |
850 | * Quick failover draft section 5.1, point 5 | 850 | * Quick failover draft section 5.1, point 5 |
851 | */ | 851 | */ |
852 | if (transport->state == SCTP_PF) { | 852 | if (transport->state == SCTP_PF) { |
853 | ulp_notify = false; | 853 | ulp_notify = false; |
854 | transport->cwnd = 1; | 854 | transport->cwnd = asoc->pathmtu; |
855 | } | 855 | } |
856 | transport->state = SCTP_ACTIVE; | 856 | transport->state = SCTP_ACTIVE; |
857 | break; | 857 | break; |
diff --git a/net/sctp/transport.c b/net/sctp/transport.c index bdbbc3fd7c14..8fdd16046d66 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c | |||
@@ -181,12 +181,12 @@ static void sctp_transport_destroy(struct sctp_transport *transport) | |||
181 | return; | 181 | return; |
182 | } | 182 | } |
183 | 183 | ||
184 | call_rcu(&transport->rcu, sctp_transport_destroy_rcu); | ||
185 | |||
186 | sctp_packet_free(&transport->packet); | 184 | sctp_packet_free(&transport->packet); |
187 | 185 | ||
188 | if (transport->asoc) | 186 | if (transport->asoc) |
189 | sctp_association_put(transport->asoc); | 187 | sctp_association_put(transport->asoc); |
188 | |||
189 | call_rcu(&transport->rcu, sctp_transport_destroy_rcu); | ||
190 | } | 190 | } |
191 | 191 | ||
192 | /* Start T3_rtx timer if it is not already running and update the heartbeat | 192 | /* Start T3_rtx timer if it is not already running and update the heartbeat |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 74f6a704e374..ecbc4e3d83ad 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -1660,6 +1660,10 @@ call_connect(struct rpc_task *task) | |||
1660 | task->tk_action = call_connect_status; | 1660 | task->tk_action = call_connect_status; |
1661 | if (task->tk_status < 0) | 1661 | if (task->tk_status < 0) |
1662 | return; | 1662 | return; |
1663 | if (task->tk_flags & RPC_TASK_NOCONNECT) { | ||
1664 | rpc_exit(task, -ENOTCONN); | ||
1665 | return; | ||
1666 | } | ||
1663 | xprt_connect(task); | 1667 | xprt_connect(task); |
1664 | } | 1668 | } |
1665 | } | 1669 | } |
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h index 74d948f5d5a1..779742cfc1ff 100644 --- a/net/sunrpc/netns.h +++ b/net/sunrpc/netns.h | |||
@@ -23,6 +23,7 @@ struct sunrpc_net { | |||
23 | struct rpc_clnt *rpcb_local_clnt4; | 23 | struct rpc_clnt *rpcb_local_clnt4; |
24 | spinlock_t rpcb_clnt_lock; | 24 | spinlock_t rpcb_clnt_lock; |
25 | unsigned int rpcb_users; | 25 | unsigned int rpcb_users; |
26 | unsigned int rpcb_is_af_local : 1; | ||
26 | 27 | ||
27 | struct mutex gssp_lock; | 28 | struct mutex gssp_lock; |
28 | wait_queue_head_t gssp_wq; | 29 | wait_queue_head_t gssp_wq; |
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c index 3df764dc330c..1891a1022c17 100644 --- a/net/sunrpc/rpcb_clnt.c +++ b/net/sunrpc/rpcb_clnt.c | |||
@@ -204,13 +204,15 @@ void rpcb_put_local(struct net *net) | |||
204 | } | 204 | } |
205 | 205 | ||
206 | static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt, | 206 | static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt, |
207 | struct rpc_clnt *clnt4) | 207 | struct rpc_clnt *clnt4, |
208 | bool is_af_local) | ||
208 | { | 209 | { |
209 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | 210 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); |
210 | 211 | ||
211 | /* Protected by rpcb_create_local_mutex */ | 212 | /* Protected by rpcb_create_local_mutex */ |
212 | sn->rpcb_local_clnt = clnt; | 213 | sn->rpcb_local_clnt = clnt; |
213 | sn->rpcb_local_clnt4 = clnt4; | 214 | sn->rpcb_local_clnt4 = clnt4; |
215 | sn->rpcb_is_af_local = is_af_local ? 1 : 0; | ||
214 | smp_wmb(); | 216 | smp_wmb(); |
215 | sn->rpcb_users = 1; | 217 | sn->rpcb_users = 1; |
216 | dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: " | 218 | dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: " |
@@ -238,6 +240,14 @@ static int rpcb_create_local_unix(struct net *net) | |||
238 | .program = &rpcb_program, | 240 | .program = &rpcb_program, |
239 | .version = RPCBVERS_2, | 241 | .version = RPCBVERS_2, |
240 | .authflavor = RPC_AUTH_NULL, | 242 | .authflavor = RPC_AUTH_NULL, |
243 | /* | ||
244 | * We turn off the idle timeout to prevent the kernel | ||
245 | * from automatically disconnecting the socket. | ||
246 | * Otherwise, we'd have to cache the mount namespace | ||
247 | * of the caller and somehow pass that to the socket | ||
248 | * reconnect code. | ||
249 | */ | ||
250 | .flags = RPC_CLNT_CREATE_NO_IDLE_TIMEOUT, | ||
241 | }; | 251 | }; |
242 | struct rpc_clnt *clnt, *clnt4; | 252 | struct rpc_clnt *clnt, *clnt4; |
243 | int result = 0; | 253 | int result = 0; |
@@ -263,7 +273,7 @@ static int rpcb_create_local_unix(struct net *net) | |||
263 | clnt4 = NULL; | 273 | clnt4 = NULL; |
264 | } | 274 | } |
265 | 275 | ||
266 | rpcb_set_local(net, clnt, clnt4); | 276 | rpcb_set_local(net, clnt, clnt4, true); |
267 | 277 | ||
268 | out: | 278 | out: |
269 | return result; | 279 | return result; |
@@ -315,7 +325,7 @@ static int rpcb_create_local_net(struct net *net) | |||
315 | clnt4 = NULL; | 325 | clnt4 = NULL; |
316 | } | 326 | } |
317 | 327 | ||
318 | rpcb_set_local(net, clnt, clnt4); | 328 | rpcb_set_local(net, clnt, clnt4, false); |
319 | 329 | ||
320 | out: | 330 | out: |
321 | return result; | 331 | return result; |
@@ -376,13 +386,16 @@ static struct rpc_clnt *rpcb_create(struct net *net, const char *hostname, | |||
376 | return rpc_create(&args); | 386 | return rpc_create(&args); |
377 | } | 387 | } |
378 | 388 | ||
379 | static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg) | 389 | static int rpcb_register_call(struct sunrpc_net *sn, struct rpc_clnt *clnt, struct rpc_message *msg, bool is_set) |
380 | { | 390 | { |
381 | int result, error = 0; | 391 | int flags = RPC_TASK_NOCONNECT; |
392 | int error, result = 0; | ||
382 | 393 | ||
394 | if (is_set || !sn->rpcb_is_af_local) | ||
395 | flags = RPC_TASK_SOFTCONN; | ||
383 | msg->rpc_resp = &result; | 396 | msg->rpc_resp = &result; |
384 | 397 | ||
385 | error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN); | 398 | error = rpc_call_sync(clnt, msg, flags); |
386 | if (error < 0) { | 399 | if (error < 0) { |
387 | dprintk("RPC: failed to contact local rpcbind " | 400 | dprintk("RPC: failed to contact local rpcbind " |
388 | "server (errno %d).\n", -error); | 401 | "server (errno %d).\n", -error); |
@@ -439,16 +452,19 @@ int rpcb_register(struct net *net, u32 prog, u32 vers, int prot, unsigned short | |||
439 | .rpc_argp = &map, | 452 | .rpc_argp = &map, |
440 | }; | 453 | }; |
441 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | 454 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); |
455 | bool is_set = false; | ||
442 | 456 | ||
443 | dprintk("RPC: %sregistering (%u, %u, %d, %u) with local " | 457 | dprintk("RPC: %sregistering (%u, %u, %d, %u) with local " |
444 | "rpcbind\n", (port ? "" : "un"), | 458 | "rpcbind\n", (port ? "" : "un"), |
445 | prog, vers, prot, port); | 459 | prog, vers, prot, port); |
446 | 460 | ||
447 | msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET]; | 461 | msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET]; |
448 | if (port) | 462 | if (port != 0) { |
449 | msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET]; | 463 | msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET]; |
464 | is_set = true; | ||
465 | } | ||
450 | 466 | ||
451 | return rpcb_register_call(sn->rpcb_local_clnt, &msg); | 467 | return rpcb_register_call(sn, sn->rpcb_local_clnt, &msg, is_set); |
452 | } | 468 | } |
453 | 469 | ||
454 | /* | 470 | /* |
@@ -461,6 +477,7 @@ static int rpcb_register_inet4(struct sunrpc_net *sn, | |||
461 | const struct sockaddr_in *sin = (const struct sockaddr_in *)sap; | 477 | const struct sockaddr_in *sin = (const struct sockaddr_in *)sap; |
462 | struct rpcbind_args *map = msg->rpc_argp; | 478 | struct rpcbind_args *map = msg->rpc_argp; |
463 | unsigned short port = ntohs(sin->sin_port); | 479 | unsigned short port = ntohs(sin->sin_port); |
480 | bool is_set = false; | ||
464 | int result; | 481 | int result; |
465 | 482 | ||
466 | map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); | 483 | map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); |
@@ -471,10 +488,12 @@ static int rpcb_register_inet4(struct sunrpc_net *sn, | |||
471 | map->r_addr, map->r_netid); | 488 | map->r_addr, map->r_netid); |
472 | 489 | ||
473 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; | 490 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; |
474 | if (port) | 491 | if (port != 0) { |
475 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; | 492 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; |
493 | is_set = true; | ||
494 | } | ||
476 | 495 | ||
477 | result = rpcb_register_call(sn->rpcb_local_clnt4, msg); | 496 | result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set); |
478 | kfree(map->r_addr); | 497 | kfree(map->r_addr); |
479 | return result; | 498 | return result; |
480 | } | 499 | } |
@@ -489,6 +508,7 @@ static int rpcb_register_inet6(struct sunrpc_net *sn, | |||
489 | const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap; | 508 | const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap; |
490 | struct rpcbind_args *map = msg->rpc_argp; | 509 | struct rpcbind_args *map = msg->rpc_argp; |
491 | unsigned short port = ntohs(sin6->sin6_port); | 510 | unsigned short port = ntohs(sin6->sin6_port); |
511 | bool is_set = false; | ||
492 | int result; | 512 | int result; |
493 | 513 | ||
494 | map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); | 514 | map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL); |
@@ -499,10 +519,12 @@ static int rpcb_register_inet6(struct sunrpc_net *sn, | |||
499 | map->r_addr, map->r_netid); | 519 | map->r_addr, map->r_netid); |
500 | 520 | ||
501 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; | 521 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; |
502 | if (port) | 522 | if (port != 0) { |
503 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; | 523 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET]; |
524 | is_set = true; | ||
525 | } | ||
504 | 526 | ||
505 | result = rpcb_register_call(sn->rpcb_local_clnt4, msg); | 527 | result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set); |
506 | kfree(map->r_addr); | 528 | kfree(map->r_addr); |
507 | return result; | 529 | return result; |
508 | } | 530 | } |
@@ -519,7 +541,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn, | |||
519 | map->r_addr = ""; | 541 | map->r_addr = ""; |
520 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; | 542 | msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET]; |
521 | 543 | ||
522 | return rpcb_register_call(sn->rpcb_local_clnt4, msg); | 544 | return rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, false); |
523 | } | 545 | } |
524 | 546 | ||
525 | /** | 547 | /** |
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c index cb29ef7ba2f0..609c30c80816 100644 --- a/net/tipc/bearer.c +++ b/net/tipc/bearer.c | |||
@@ -460,6 +460,7 @@ static void bearer_disable(struct tipc_bearer *b_ptr) | |||
460 | { | 460 | { |
461 | struct tipc_link *l_ptr; | 461 | struct tipc_link *l_ptr; |
462 | struct tipc_link *temp_l_ptr; | 462 | struct tipc_link *temp_l_ptr; |
463 | struct tipc_link_req *temp_req; | ||
463 | 464 | ||
464 | pr_info("Disabling bearer <%s>\n", b_ptr->name); | 465 | pr_info("Disabling bearer <%s>\n", b_ptr->name); |
465 | spin_lock_bh(&b_ptr->lock); | 466 | spin_lock_bh(&b_ptr->lock); |
@@ -468,9 +469,13 @@ static void bearer_disable(struct tipc_bearer *b_ptr) | |||
468 | list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { | 469 | list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { |
469 | tipc_link_delete(l_ptr); | 470 | tipc_link_delete(l_ptr); |
470 | } | 471 | } |
471 | if (b_ptr->link_req) | 472 | temp_req = b_ptr->link_req; |
472 | tipc_disc_delete(b_ptr->link_req); | 473 | b_ptr->link_req = NULL; |
473 | spin_unlock_bh(&b_ptr->lock); | 474 | spin_unlock_bh(&b_ptr->lock); |
475 | |||
476 | if (temp_req) | ||
477 | tipc_disc_delete(temp_req); | ||
478 | |||
474 | memset(b_ptr, 0, sizeof(struct tipc_bearer)); | 479 | memset(b_ptr, 0, sizeof(struct tipc_bearer)); |
475 | } | 480 | } |
476 | 481 | ||
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index 593071dabd1c..4d9334683f84 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c | |||
@@ -347,7 +347,7 @@ void vsock_for_each_connected_socket(void (*fn)(struct sock *sk)) | |||
347 | for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { | 347 | for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) { |
348 | struct vsock_sock *vsk; | 348 | struct vsock_sock *vsk; |
349 | list_for_each_entry(vsk, &vsock_connected_table[i], | 349 | list_for_each_entry(vsk, &vsock_connected_table[i], |
350 | connected_table); | 350 | connected_table) |
351 | fn(sk_vsock(vsk)); | 351 | fn(sk_vsock(vsk)); |
352 | } | 352 | } |
353 | 353 | ||
diff --git a/net/wireless/core.c b/net/wireless/core.c index 4f9f216665e9..a8c29fa4f1b3 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c | |||
@@ -765,6 +765,7 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, | |||
765 | cfg80211_leave_mesh(rdev, dev); | 765 | cfg80211_leave_mesh(rdev, dev); |
766 | break; | 766 | break; |
767 | case NL80211_IFTYPE_AP: | 767 | case NL80211_IFTYPE_AP: |
768 | case NL80211_IFTYPE_P2P_GO: | ||
768 | cfg80211_stop_ap(rdev, dev); | 769 | cfg80211_stop_ap(rdev, dev); |
769 | break; | 770 | break; |
770 | default: | 771 | default: |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 25d217d90807..3fcba69817e5 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -441,10 +441,12 @@ static int nl80211_prepare_wdev_dump(struct sk_buff *skb, | |||
441 | goto out_unlock; | 441 | goto out_unlock; |
442 | } | 442 | } |
443 | *rdev = wiphy_to_dev((*wdev)->wiphy); | 443 | *rdev = wiphy_to_dev((*wdev)->wiphy); |
444 | cb->args[0] = (*rdev)->wiphy_idx; | 444 | /* 0 is the first index - add 1 to parse only once */ |
445 | cb->args[0] = (*rdev)->wiphy_idx + 1; | ||
445 | cb->args[1] = (*wdev)->identifier; | 446 | cb->args[1] = (*wdev)->identifier; |
446 | } else { | 447 | } else { |
447 | struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0]); | 448 | /* subtract the 1 again here */ |
449 | struct wiphy *wiphy = wiphy_idx_to_wiphy(cb->args[0] - 1); | ||
448 | struct wireless_dev *tmp; | 450 | struct wireless_dev *tmp; |
449 | 451 | ||
450 | if (!wiphy) { | 452 | if (!wiphy) { |
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 3f7682a387b7..eefbd10e408f 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c | |||
@@ -1998,12 +1998,11 @@ static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address) | |||
1998 | * | 1998 | * |
1999 | * Create or update the port list entry | 1999 | * Create or update the port list entry |
2000 | */ | 2000 | */ |
2001 | static int smk_ipv6_port_check(struct sock *sk, struct sockaddr *address, | 2001 | static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address, |
2002 | int act) | 2002 | int act) |
2003 | { | 2003 | { |
2004 | __be16 *bep; | 2004 | __be16 *bep; |
2005 | __be32 *be32p; | 2005 | __be32 *be32p; |
2006 | struct sockaddr_in6 *addr6; | ||
2007 | struct smk_port_label *spp; | 2006 | struct smk_port_label *spp; |
2008 | struct socket_smack *ssp = sk->sk_security; | 2007 | struct socket_smack *ssp = sk->sk_security; |
2009 | struct smack_known *skp; | 2008 | struct smack_known *skp; |
@@ -2025,10 +2024,9 @@ static int smk_ipv6_port_check(struct sock *sk, struct sockaddr *address, | |||
2025 | /* | 2024 | /* |
2026 | * Get the IP address and port from the address. | 2025 | * Get the IP address and port from the address. |
2027 | */ | 2026 | */ |
2028 | addr6 = (struct sockaddr_in6 *)address; | 2027 | port = ntohs(address->sin6_port); |
2029 | port = ntohs(addr6->sin6_port); | 2028 | bep = (__be16 *)(&address->sin6_addr); |
2030 | bep = (__be16 *)(&addr6->sin6_addr); | 2029 | be32p = (__be32 *)(&address->sin6_addr); |
2031 | be32p = (__be32 *)(&addr6->sin6_addr); | ||
2032 | 2030 | ||
2033 | /* | 2031 | /* |
2034 | * It's remote, so port lookup does no good. | 2032 | * It's remote, so port lookup does no good. |
@@ -2060,9 +2058,9 @@ auditout: | |||
2060 | ad.a.u.net->family = sk->sk_family; | 2058 | ad.a.u.net->family = sk->sk_family; |
2061 | ad.a.u.net->dport = port; | 2059 | ad.a.u.net->dport = port; |
2062 | if (act == SMK_RECEIVING) | 2060 | if (act == SMK_RECEIVING) |
2063 | ad.a.u.net->v6info.saddr = addr6->sin6_addr; | 2061 | ad.a.u.net->v6info.saddr = address->sin6_addr; |
2064 | else | 2062 | else |
2065 | ad.a.u.net->v6info.daddr = addr6->sin6_addr; | 2063 | ad.a.u.net->v6info.daddr = address->sin6_addr; |
2066 | #endif | 2064 | #endif |
2067 | return smk_access(skp, object, MAY_WRITE, &ad); | 2065 | return smk_access(skp, object, MAY_WRITE, &ad); |
2068 | } | 2066 | } |
@@ -2201,7 +2199,8 @@ static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, | |||
2201 | case PF_INET6: | 2199 | case PF_INET6: |
2202 | if (addrlen < sizeof(struct sockaddr_in6)) | 2200 | if (addrlen < sizeof(struct sockaddr_in6)) |
2203 | return -EINVAL; | 2201 | return -EINVAL; |
2204 | rc = smk_ipv6_port_check(sock->sk, sap, SMK_CONNECTING); | 2202 | rc = smk_ipv6_port_check(sock->sk, (struct sockaddr_in6 *)sap, |
2203 | SMK_CONNECTING); | ||
2205 | break; | 2204 | break; |
2206 | } | 2205 | } |
2207 | return rc; | 2206 | return rc; |
@@ -3034,7 +3033,7 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg, | |||
3034 | int size) | 3033 | int size) |
3035 | { | 3034 | { |
3036 | struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name; | 3035 | struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name; |
3037 | struct sockaddr *sap = (struct sockaddr *) msg->msg_name; | 3036 | struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name; |
3038 | int rc = 0; | 3037 | int rc = 0; |
3039 | 3038 | ||
3040 | /* | 3039 | /* |
@@ -3121,9 +3120,8 @@ static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap, | |||
3121 | return smack_net_ambient; | 3120 | return smack_net_ambient; |
3122 | } | 3121 | } |
3123 | 3122 | ||
3124 | static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr *sap) | 3123 | static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip) |
3125 | { | 3124 | { |
3126 | struct sockaddr_in6 *sip = (struct sockaddr_in6 *)sap; | ||
3127 | u8 nexthdr; | 3125 | u8 nexthdr; |
3128 | int offset; | 3126 | int offset; |
3129 | int proto = -EINVAL; | 3127 | int proto = -EINVAL; |
@@ -3181,7 +3179,7 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
3181 | struct netlbl_lsm_secattr secattr; | 3179 | struct netlbl_lsm_secattr secattr; |
3182 | struct socket_smack *ssp = sk->sk_security; | 3180 | struct socket_smack *ssp = sk->sk_security; |
3183 | struct smack_known *skp; | 3181 | struct smack_known *skp; |
3184 | struct sockaddr sadd; | 3182 | struct sockaddr_in6 sadd; |
3185 | int rc = 0; | 3183 | int rc = 0; |
3186 | struct smk_audit_info ad; | 3184 | struct smk_audit_info ad; |
3187 | #ifdef CONFIG_AUDIT | 3185 | #ifdef CONFIG_AUDIT |
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 8e77cbbad871..e3c7ba8d7582 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c | |||
@@ -522,7 +522,7 @@ static bool same_amp_caps(struct hda_codec *codec, hda_nid_t nid1, | |||
522 | } | 522 | } |
523 | 523 | ||
524 | #define nid_has_mute(codec, nid, dir) \ | 524 | #define nid_has_mute(codec, nid, dir) \ |
525 | check_amp_caps(codec, nid, dir, AC_AMPCAP_MUTE) | 525 | check_amp_caps(codec, nid, dir, (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) |
526 | #define nid_has_volume(codec, nid, dir) \ | 526 | #define nid_has_volume(codec, nid, dir) \ |
527 | check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS) | 527 | check_amp_caps(codec, nid, dir, AC_AMPCAP_NUM_STEPS) |
528 | 528 | ||
@@ -624,7 +624,7 @@ static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, | |||
624 | if (enable) | 624 | if (enable) |
625 | val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; | 625 | val = (caps & AC_AMPCAP_OFFSET) >> AC_AMPCAP_OFFSET_SHIFT; |
626 | } | 626 | } |
627 | if (caps & AC_AMPCAP_MUTE) { | 627 | if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) { |
628 | if (!enable) | 628 | if (!enable) |
629 | val |= HDA_AMP_MUTE; | 629 | val |= HDA_AMP_MUTE; |
630 | } | 630 | } |
@@ -648,7 +648,7 @@ static unsigned int get_amp_mask_to_modify(struct hda_codec *codec, | |||
648 | { | 648 | { |
649 | unsigned int mask = 0xff; | 649 | unsigned int mask = 0xff; |
650 | 650 | ||
651 | if (caps & AC_AMPCAP_MUTE) { | 651 | if (caps & (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) { |
652 | if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL)) | 652 | if (is_ctl_associated(codec, nid, dir, idx, NID_PATH_MUTE_CTL)) |
653 | mask &= ~0x80; | 653 | mask &= ~0x80; |
654 | } | 654 | } |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 8bd226149868..f303cd898515 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -1031,6 +1031,7 @@ enum { | |||
1031 | ALC880_FIXUP_GPIO2, | 1031 | ALC880_FIXUP_GPIO2, |
1032 | ALC880_FIXUP_MEDION_RIM, | 1032 | ALC880_FIXUP_MEDION_RIM, |
1033 | ALC880_FIXUP_LG, | 1033 | ALC880_FIXUP_LG, |
1034 | ALC880_FIXUP_LG_LW25, | ||
1034 | ALC880_FIXUP_W810, | 1035 | ALC880_FIXUP_W810, |
1035 | ALC880_FIXUP_EAPD_COEF, | 1036 | ALC880_FIXUP_EAPD_COEF, |
1036 | ALC880_FIXUP_TCL_S700, | 1037 | ALC880_FIXUP_TCL_S700, |
@@ -1089,6 +1090,14 @@ static const struct hda_fixup alc880_fixups[] = { | |||
1089 | { } | 1090 | { } |
1090 | } | 1091 | } |
1091 | }, | 1092 | }, |
1093 | [ALC880_FIXUP_LG_LW25] = { | ||
1094 | .type = HDA_FIXUP_PINS, | ||
1095 | .v.pins = (const struct hda_pintbl[]) { | ||
1096 | { 0x1a, 0x0181344f }, /* line-in */ | ||
1097 | { 0x1b, 0x0321403f }, /* headphone */ | ||
1098 | { } | ||
1099 | } | ||
1100 | }, | ||
1092 | [ALC880_FIXUP_W810] = { | 1101 | [ALC880_FIXUP_W810] = { |
1093 | .type = HDA_FIXUP_PINS, | 1102 | .type = HDA_FIXUP_PINS, |
1094 | .v.pins = (const struct hda_pintbl[]) { | 1103 | .v.pins = (const struct hda_pintbl[]) { |
@@ -1341,6 +1350,7 @@ static const struct snd_pci_quirk alc880_fixup_tbl[] = { | |||
1341 | SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG), | 1350 | SND_PCI_QUIRK(0x1854, 0x003b, "LG", ALC880_FIXUP_LG), |
1342 | SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG), | 1351 | SND_PCI_QUIRK(0x1854, 0x005f, "LG P1 Express", ALC880_FIXUP_LG), |
1343 | SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG), | 1352 | SND_PCI_QUIRK(0x1854, 0x0068, "LG w1", ALC880_FIXUP_LG), |
1353 | SND_PCI_QUIRK(0x1854, 0x0077, "LG LW25", ALC880_FIXUP_LG_LW25), | ||
1344 | SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700), | 1354 | SND_PCI_QUIRK(0x19db, 0x4188, "TCL S700", ALC880_FIXUP_TCL_S700), |
1345 | 1355 | ||
1346 | /* Below is the copied entries from alc880_quirks.c. | 1356 | /* Below is the copied entries from alc880_quirks.c. |
@@ -4329,6 +4339,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { | |||
4329 | SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), | 4339 | SND_PCI_QUIRK(0x1025, 0x0308, "Acer Aspire 8942G", ALC662_FIXUP_ASPIRE), |
4330 | SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), | 4340 | SND_PCI_QUIRK(0x1025, 0x031c, "Gateway NV79", ALC662_FIXUP_SKU_IGNORE), |
4331 | SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), | 4341 | SND_PCI_QUIRK(0x1025, 0x0349, "eMachines eM250", ALC662_FIXUP_INV_DMIC), |
4342 | SND_PCI_QUIRK(0x1025, 0x034a, "Gateway LT27", ALC662_FIXUP_INV_DMIC), | ||
4332 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), | 4343 | SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), |
4333 | SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | 4344 | SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
4334 | SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), | 4345 | SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), |
diff --git a/sound/soc/codecs/cs42l52.c b/sound/soc/codecs/cs42l52.c index 987f728718c5..be2ba1b6fe4a 100644 --- a/sound/soc/codecs/cs42l52.c +++ b/sound/soc/codecs/cs42l52.c | |||
@@ -195,6 +195,8 @@ static DECLARE_TLV_DB_SCALE(pga_tlv, -600, 50, 0); | |||
195 | 195 | ||
196 | static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); | 196 | static DECLARE_TLV_DB_SCALE(mix_tlv, -50, 50, 0); |
197 | 197 | ||
198 | static DECLARE_TLV_DB_SCALE(beep_tlv, -56, 200, 0); | ||
199 | |||
198 | static const unsigned int limiter_tlv[] = { | 200 | static const unsigned int limiter_tlv[] = { |
199 | TLV_DB_RANGE_HEAD(2), | 201 | TLV_DB_RANGE_HEAD(2), |
200 | 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0), | 202 | 0, 2, TLV_DB_SCALE_ITEM(-3000, 600, 0), |
@@ -451,7 +453,8 @@ static const struct snd_kcontrol_new cs42l52_snd_controls[] = { | |||
451 | SOC_ENUM("Beep Pitch", beep_pitch_enum), | 453 | SOC_ENUM("Beep Pitch", beep_pitch_enum), |
452 | SOC_ENUM("Beep on Time", beep_ontime_enum), | 454 | SOC_ENUM("Beep on Time", beep_ontime_enum), |
453 | SOC_ENUM("Beep off Time", beep_offtime_enum), | 455 | SOC_ENUM("Beep off Time", beep_offtime_enum), |
454 | SOC_SINGLE_TLV("Beep Volume", CS42L52_BEEP_VOL, 0, 0x1f, 0x07, hl_tlv), | 456 | SOC_SINGLE_SX_TLV("Beep Volume", CS42L52_BEEP_VOL, |
457 | 0, 0x07, 0x1f, beep_tlv), | ||
455 | SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1), | 458 | SOC_SINGLE("Beep Mixer Switch", CS42L52_BEEP_TONE_CTL, 5, 1, 1), |
456 | SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum), | 459 | SOC_ENUM("Beep Treble Corner Freq", beep_treble_enum), |
457 | SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum), | 460 | SOC_ENUM("Beep Bass Corner Freq", beep_bass_enum), |
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c index 6c8a9e7bee25..760e8bfeacaa 100644 --- a/sound/soc/codecs/sgtl5000.c +++ b/sound/soc/codecs/sgtl5000.c | |||
@@ -153,6 +153,8 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w, | |||
153 | static int power_vag_event(struct snd_soc_dapm_widget *w, | 153 | static int power_vag_event(struct snd_soc_dapm_widget *w, |
154 | struct snd_kcontrol *kcontrol, int event) | 154 | struct snd_kcontrol *kcontrol, int event) |
155 | { | 155 | { |
156 | const u32 mask = SGTL5000_DAC_POWERUP | SGTL5000_ADC_POWERUP; | ||
157 | |||
156 | switch (event) { | 158 | switch (event) { |
157 | case SND_SOC_DAPM_POST_PMU: | 159 | case SND_SOC_DAPM_POST_PMU: |
158 | snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, | 160 | snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, |
@@ -160,9 +162,17 @@ static int power_vag_event(struct snd_soc_dapm_widget *w, | |||
160 | break; | 162 | break; |
161 | 163 | ||
162 | case SND_SOC_DAPM_PRE_PMD: | 164 | case SND_SOC_DAPM_PRE_PMD: |
163 | snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, | 165 | /* |
164 | SGTL5000_VAG_POWERUP, 0); | 166 | * Don't clear VAG_POWERUP, when both DAC and ADC are |
165 | msleep(400); | 167 | * operational to prevent inadvertently starving the |
168 | * other one of them. | ||
169 | */ | ||
170 | if ((snd_soc_read(w->codec, SGTL5000_CHIP_ANA_POWER) & | ||
171 | mask) != mask) { | ||
172 | snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, | ||
173 | SGTL5000_VAG_POWERUP, 0); | ||
174 | msleep(400); | ||
175 | } | ||
166 | break; | 176 | break; |
167 | default: | 177 | default: |
168 | break; | 178 | break; |
@@ -388,7 +398,7 @@ static const struct snd_kcontrol_new sgtl5000_snd_controls[] = { | |||
388 | SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0), | 398 | SOC_DOUBLE("Capture Volume", SGTL5000_CHIP_ANA_ADC_CTRL, 0, 4, 0xf, 0), |
389 | SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)", | 399 | SOC_SINGLE_TLV("Capture Attenuate Switch (-6dB)", |
390 | SGTL5000_CHIP_ANA_ADC_CTRL, | 400 | SGTL5000_CHIP_ANA_ADC_CTRL, |
391 | 8, 2, 0, capture_6db_attenuate), | 401 | 8, 1, 0, capture_6db_attenuate), |
392 | SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0), | 402 | SOC_SINGLE("Capture ZC Switch", SGTL5000_CHIP_ANA_CTRL, 1, 1, 0), |
393 | 403 | ||
394 | SOC_DOUBLE_TLV("Headphone Playback Volume", | 404 | SOC_DOUBLE_TLV("Headphone Playback Volume", |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index bd16010441cc..4375c9f2b791 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -679,13 +679,14 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w) | |||
679 | return -EINVAL; | 679 | return -EINVAL; |
680 | } | 680 | } |
681 | 681 | ||
682 | path = list_first_entry(&w->sources, struct snd_soc_dapm_path, | 682 | if (list_empty(&w->sources)) { |
683 | list_sink); | ||
684 | if (!path) { | ||
685 | dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name); | 683 | dev_err(dapm->dev, "ASoC: mux %s has no paths\n", w->name); |
686 | return -EINVAL; | 684 | return -EINVAL; |
687 | } | 685 | } |
688 | 686 | ||
687 | path = list_first_entry(&w->sources, struct snd_soc_dapm_path, | ||
688 | list_sink); | ||
689 | |||
689 | ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path); | 690 | ret = dapm_create_or_share_mixmux_kcontrol(w, 0, path); |
690 | if (ret < 0) | 691 | if (ret < 0) |
691 | return ret; | 692 | return ret; |
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c index d04146cad61f..47565fd04505 100644 --- a/sound/soc/tegra/tegra30_i2s.c +++ b/sound/soc/tegra/tegra30_i2s.c | |||
@@ -228,7 +228,7 @@ static int tegra30_i2s_hw_params(struct snd_pcm_substream *substream, | |||
228 | reg = TEGRA30_I2S_CIF_RX_CTRL; | 228 | reg = TEGRA30_I2S_CIF_RX_CTRL; |
229 | } else { | 229 | } else { |
230 | val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX; | 230 | val |= TEGRA30_AUDIOCIF_CTRL_DIRECTION_TX; |
231 | reg = TEGRA30_I2S_CIF_RX_CTRL; | 231 | reg = TEGRA30_I2S_CIF_TX_CTRL; |
232 | } | 232 | } |
233 | 233 | ||
234 | regmap_write(i2s->regmap, reg, val); | 234 | regmap_write(i2s->regmap, reg, val); |
diff --git a/sound/usb/6fire/comm.c b/sound/usb/6fire/comm.c index 9e6e3ffd86bb..23452ee617e1 100644 --- a/sound/usb/6fire/comm.c +++ b/sound/usb/6fire/comm.c | |||
@@ -110,19 +110,37 @@ static int usb6fire_comm_send_buffer(u8 *buffer, struct usb_device *dev) | |||
110 | static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request, | 110 | static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request, |
111 | u8 reg, u8 value) | 111 | u8 reg, u8 value) |
112 | { | 112 | { |
113 | u8 buffer[13]; /* 13: maximum length of message */ | 113 | u8 *buffer; |
114 | int ret; | ||
115 | |||
116 | /* 13: maximum length of message */ | ||
117 | buffer = kmalloc(13, GFP_KERNEL); | ||
118 | if (!buffer) | ||
119 | return -ENOMEM; | ||
114 | 120 | ||
115 | usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00); | 121 | usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00); |
116 | return usb6fire_comm_send_buffer(buffer, rt->chip->dev); | 122 | ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev); |
123 | |||
124 | kfree(buffer); | ||
125 | return ret; | ||
117 | } | 126 | } |
118 | 127 | ||
119 | static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request, | 128 | static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request, |
120 | u8 reg, u8 vl, u8 vh) | 129 | u8 reg, u8 vl, u8 vh) |
121 | { | 130 | { |
122 | u8 buffer[13]; /* 13: maximum length of message */ | 131 | u8 *buffer; |
132 | int ret; | ||
133 | |||
134 | /* 13: maximum length of message */ | ||
135 | buffer = kmalloc(13, GFP_KERNEL); | ||
136 | if (!buffer) | ||
137 | return -ENOMEM; | ||
123 | 138 | ||
124 | usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh); | 139 | usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh); |
125 | return usb6fire_comm_send_buffer(buffer, rt->chip->dev); | 140 | ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev); |
141 | |||
142 | kfree(buffer); | ||
143 | return ret; | ||
126 | } | 144 | } |
127 | 145 | ||
128 | int usb6fire_comm_init(struct sfire_chip *chip) | 146 | int usb6fire_comm_init(struct sfire_chip *chip) |
@@ -135,6 +153,12 @@ int usb6fire_comm_init(struct sfire_chip *chip) | |||
135 | if (!rt) | 153 | if (!rt) |
136 | return -ENOMEM; | 154 | return -ENOMEM; |
137 | 155 | ||
156 | rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL); | ||
157 | if (!rt->receiver_buffer) { | ||
158 | kfree(rt); | ||
159 | return -ENOMEM; | ||
160 | } | ||
161 | |||
138 | urb = &rt->receiver; | 162 | urb = &rt->receiver; |
139 | rt->serial = 1; | 163 | rt->serial = 1; |
140 | rt->chip = chip; | 164 | rt->chip = chip; |
@@ -153,6 +177,7 @@ int usb6fire_comm_init(struct sfire_chip *chip) | |||
153 | urb->interval = 1; | 177 | urb->interval = 1; |
154 | ret = usb_submit_urb(urb, GFP_KERNEL); | 178 | ret = usb_submit_urb(urb, GFP_KERNEL); |
155 | if (ret < 0) { | 179 | if (ret < 0) { |
180 | kfree(rt->receiver_buffer); | ||
156 | kfree(rt); | 181 | kfree(rt); |
157 | snd_printk(KERN_ERR PREFIX "cannot create comm data receiver."); | 182 | snd_printk(KERN_ERR PREFIX "cannot create comm data receiver."); |
158 | return ret; | 183 | return ret; |
@@ -171,6 +196,9 @@ void usb6fire_comm_abort(struct sfire_chip *chip) | |||
171 | 196 | ||
172 | void usb6fire_comm_destroy(struct sfire_chip *chip) | 197 | void usb6fire_comm_destroy(struct sfire_chip *chip) |
173 | { | 198 | { |
174 | kfree(chip->comm); | 199 | struct comm_runtime *rt = chip->comm; |
200 | |||
201 | kfree(rt->receiver_buffer); | ||
202 | kfree(rt); | ||
175 | chip->comm = NULL; | 203 | chip->comm = NULL; |
176 | } | 204 | } |
diff --git a/sound/usb/6fire/comm.h b/sound/usb/6fire/comm.h index 6a0840b0dcff..780d5ed8e5d8 100644 --- a/sound/usb/6fire/comm.h +++ b/sound/usb/6fire/comm.h | |||
@@ -24,7 +24,7 @@ struct comm_runtime { | |||
24 | struct sfire_chip *chip; | 24 | struct sfire_chip *chip; |
25 | 25 | ||
26 | struct urb receiver; | 26 | struct urb receiver; |
27 | u8 receiver_buffer[COMM_RECEIVER_BUFSIZE]; | 27 | u8 *receiver_buffer; |
28 | 28 | ||
29 | u8 serial; /* urb serial */ | 29 | u8 serial; /* urb serial */ |
30 | 30 | ||
diff --git a/sound/usb/6fire/midi.c b/sound/usb/6fire/midi.c index 26722423330d..f3dd7266c391 100644 --- a/sound/usb/6fire/midi.c +++ b/sound/usb/6fire/midi.c | |||
@@ -19,6 +19,10 @@ | |||
19 | #include "chip.h" | 19 | #include "chip.h" |
20 | #include "comm.h" | 20 | #include "comm.h" |
21 | 21 | ||
22 | enum { | ||
23 | MIDI_BUFSIZE = 64 | ||
24 | }; | ||
25 | |||
22 | static void usb6fire_midi_out_handler(struct urb *urb) | 26 | static void usb6fire_midi_out_handler(struct urb *urb) |
23 | { | 27 | { |
24 | struct midi_runtime *rt = urb->context; | 28 | struct midi_runtime *rt = urb->context; |
@@ -156,6 +160,12 @@ int usb6fire_midi_init(struct sfire_chip *chip) | |||
156 | if (!rt) | 160 | if (!rt) |
157 | return -ENOMEM; | 161 | return -ENOMEM; |
158 | 162 | ||
163 | rt->out_buffer = kzalloc(MIDI_BUFSIZE, GFP_KERNEL); | ||
164 | if (!rt->out_buffer) { | ||
165 | kfree(rt); | ||
166 | return -ENOMEM; | ||
167 | } | ||
168 | |||
159 | rt->chip = chip; | 169 | rt->chip = chip; |
160 | rt->in_received = usb6fire_midi_in_received; | 170 | rt->in_received = usb6fire_midi_in_received; |
161 | rt->out_buffer[0] = 0x80; /* 'send midi' command */ | 171 | rt->out_buffer[0] = 0x80; /* 'send midi' command */ |
@@ -169,6 +179,7 @@ int usb6fire_midi_init(struct sfire_chip *chip) | |||
169 | 179 | ||
170 | ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance); | 180 | ret = snd_rawmidi_new(chip->card, "6FireUSB", 0, 1, 1, &rt->instance); |
171 | if (ret < 0) { | 181 | if (ret < 0) { |
182 | kfree(rt->out_buffer); | ||
172 | kfree(rt); | 183 | kfree(rt); |
173 | snd_printk(KERN_ERR PREFIX "unable to create midi.\n"); | 184 | snd_printk(KERN_ERR PREFIX "unable to create midi.\n"); |
174 | return ret; | 185 | return ret; |
@@ -197,6 +208,9 @@ void usb6fire_midi_abort(struct sfire_chip *chip) | |||
197 | 208 | ||
198 | void usb6fire_midi_destroy(struct sfire_chip *chip) | 209 | void usb6fire_midi_destroy(struct sfire_chip *chip) |
199 | { | 210 | { |
200 | kfree(chip->midi); | 211 | struct midi_runtime *rt = chip->midi; |
212 | |||
213 | kfree(rt->out_buffer); | ||
214 | kfree(rt); | ||
201 | chip->midi = NULL; | 215 | chip->midi = NULL; |
202 | } | 216 | } |
diff --git a/sound/usb/6fire/midi.h b/sound/usb/6fire/midi.h index c321006e5430..84851b9f5559 100644 --- a/sound/usb/6fire/midi.h +++ b/sound/usb/6fire/midi.h | |||
@@ -16,10 +16,6 @@ | |||
16 | 16 | ||
17 | #include "common.h" | 17 | #include "common.h" |
18 | 18 | ||
19 | enum { | ||
20 | MIDI_BUFSIZE = 64 | ||
21 | }; | ||
22 | |||
23 | struct midi_runtime { | 19 | struct midi_runtime { |
24 | struct sfire_chip *chip; | 20 | struct sfire_chip *chip; |
25 | struct snd_rawmidi *instance; | 21 | struct snd_rawmidi *instance; |
@@ -32,7 +28,7 @@ struct midi_runtime { | |||
32 | struct snd_rawmidi_substream *out; | 28 | struct snd_rawmidi_substream *out; |
33 | struct urb out_urb; | 29 | struct urb out_urb; |
34 | u8 out_serial; /* serial number of out packet */ | 30 | u8 out_serial; /* serial number of out packet */ |
35 | u8 out_buffer[MIDI_BUFSIZE]; | 31 | u8 *out_buffer; |
36 | int buffer_offset; | 32 | int buffer_offset; |
37 | 33 | ||
38 | void (*in_received)(struct midi_runtime *rt, u8 *data, int length); | 34 | void (*in_received)(struct midi_runtime *rt, u8 *data, int length); |
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c index 3d2551cc10f2..b5eb97fdc842 100644 --- a/sound/usb/6fire/pcm.c +++ b/sound/usb/6fire/pcm.c | |||
@@ -582,6 +582,33 @@ static void usb6fire_pcm_init_urb(struct pcm_urb *urb, | |||
582 | urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB; | 582 | urb->instance.number_of_packets = PCM_N_PACKETS_PER_URB; |
583 | } | 583 | } |
584 | 584 | ||
585 | static int usb6fire_pcm_buffers_init(struct pcm_runtime *rt) | ||
586 | { | ||
587 | int i; | ||
588 | |||
589 | for (i = 0; i < PCM_N_URBS; i++) { | ||
590 | rt->out_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB | ||
591 | * PCM_MAX_PACKET_SIZE, GFP_KERNEL); | ||
592 | if (!rt->out_urbs[i].buffer) | ||
593 | return -ENOMEM; | ||
594 | rt->in_urbs[i].buffer = kzalloc(PCM_N_PACKETS_PER_URB | ||
595 | * PCM_MAX_PACKET_SIZE, GFP_KERNEL); | ||
596 | if (!rt->in_urbs[i].buffer) | ||
597 | return -ENOMEM; | ||
598 | } | ||
599 | return 0; | ||
600 | } | ||
601 | |||
602 | static void usb6fire_pcm_buffers_destroy(struct pcm_runtime *rt) | ||
603 | { | ||
604 | int i; | ||
605 | |||
606 | for (i = 0; i < PCM_N_URBS; i++) { | ||
607 | kfree(rt->out_urbs[i].buffer); | ||
608 | kfree(rt->in_urbs[i].buffer); | ||
609 | } | ||
610 | } | ||
611 | |||
585 | int usb6fire_pcm_init(struct sfire_chip *chip) | 612 | int usb6fire_pcm_init(struct sfire_chip *chip) |
586 | { | 613 | { |
587 | int i; | 614 | int i; |
@@ -593,6 +620,13 @@ int usb6fire_pcm_init(struct sfire_chip *chip) | |||
593 | if (!rt) | 620 | if (!rt) |
594 | return -ENOMEM; | 621 | return -ENOMEM; |
595 | 622 | ||
623 | ret = usb6fire_pcm_buffers_init(rt); | ||
624 | if (ret) { | ||
625 | usb6fire_pcm_buffers_destroy(rt); | ||
626 | kfree(rt); | ||
627 | return ret; | ||
628 | } | ||
629 | |||
596 | rt->chip = chip; | 630 | rt->chip = chip; |
597 | rt->stream_state = STREAM_DISABLED; | 631 | rt->stream_state = STREAM_DISABLED; |
598 | rt->rate = ARRAY_SIZE(rates); | 632 | rt->rate = ARRAY_SIZE(rates); |
@@ -614,6 +648,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip) | |||
614 | 648 | ||
615 | ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm); | 649 | ret = snd_pcm_new(chip->card, "DMX6FireUSB", 0, 1, 1, &pcm); |
616 | if (ret < 0) { | 650 | if (ret < 0) { |
651 | usb6fire_pcm_buffers_destroy(rt); | ||
617 | kfree(rt); | 652 | kfree(rt); |
618 | snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n"); | 653 | snd_printk(KERN_ERR PREFIX "cannot create pcm instance.\n"); |
619 | return ret; | 654 | return ret; |
@@ -625,6 +660,7 @@ int usb6fire_pcm_init(struct sfire_chip *chip) | |||
625 | snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops); | 660 | snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &pcm_ops); |
626 | 661 | ||
627 | if (ret) { | 662 | if (ret) { |
663 | usb6fire_pcm_buffers_destroy(rt); | ||
628 | kfree(rt); | 664 | kfree(rt); |
629 | snd_printk(KERN_ERR PREFIX | 665 | snd_printk(KERN_ERR PREFIX |
630 | "error preallocating pcm buffers.\n"); | 666 | "error preallocating pcm buffers.\n"); |
@@ -669,6 +705,9 @@ void usb6fire_pcm_abort(struct sfire_chip *chip) | |||
669 | 705 | ||
670 | void usb6fire_pcm_destroy(struct sfire_chip *chip) | 706 | void usb6fire_pcm_destroy(struct sfire_chip *chip) |
671 | { | 707 | { |
672 | kfree(chip->pcm); | 708 | struct pcm_runtime *rt = chip->pcm; |
709 | |||
710 | usb6fire_pcm_buffers_destroy(rt); | ||
711 | kfree(rt); | ||
673 | chip->pcm = NULL; | 712 | chip->pcm = NULL; |
674 | } | 713 | } |
diff --git a/sound/usb/6fire/pcm.h b/sound/usb/6fire/pcm.h index 9b01133ee3fe..f5779d6182c6 100644 --- a/sound/usb/6fire/pcm.h +++ b/sound/usb/6fire/pcm.h | |||
@@ -32,7 +32,7 @@ struct pcm_urb { | |||
32 | struct urb instance; | 32 | struct urb instance; |
33 | struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB]; | 33 | struct usb_iso_packet_descriptor packets[PCM_N_PACKETS_PER_URB]; |
34 | /* END DO NOT SEPARATE */ | 34 | /* END DO NOT SEPARATE */ |
35 | u8 buffer[PCM_N_PACKETS_PER_URB * PCM_MAX_PACKET_SIZE]; | 35 | u8 *buffer; |
36 | 36 | ||
37 | struct pcm_urb *peer; | 37 | struct pcm_urb *peer; |
38 | }; | 38 | }; |
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c index 7a444b5501d9..659950e5b94f 100644 --- a/sound/usb/endpoint.c +++ b/sound/usb/endpoint.c | |||
@@ -591,17 +591,16 @@ static int data_ep_set_params(struct snd_usb_endpoint *ep, | |||
591 | ep->stride = frame_bits >> 3; | 591 | ep->stride = frame_bits >> 3; |
592 | ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0; | 592 | ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0; |
593 | 593 | ||
594 | /* calculate max. frequency */ | 594 | /* assume max. frequency is 25% higher than nominal */ |
595 | if (ep->maxpacksize) { | 595 | ep->freqmax = ep->freqn + (ep->freqn >> 2); |
596 | maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3)) | ||
597 | >> (16 - ep->datainterval); | ||
598 | /* but wMaxPacketSize might reduce this */ | ||
599 | if (ep->maxpacksize && ep->maxpacksize < maxsize) { | ||
596 | /* whatever fits into a max. size packet */ | 600 | /* whatever fits into a max. size packet */ |
597 | maxsize = ep->maxpacksize; | 601 | maxsize = ep->maxpacksize; |
598 | ep->freqmax = (maxsize / (frame_bits >> 3)) | 602 | ep->freqmax = (maxsize / (frame_bits >> 3)) |
599 | << (16 - ep->datainterval); | 603 | << (16 - ep->datainterval); |
600 | } else { | ||
601 | /* no max. packet size: just take 25% higher than nominal */ | ||
602 | ep->freqmax = ep->freqn + (ep->freqn >> 2); | ||
603 | maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3)) | ||
604 | >> (16 - ep->datainterval); | ||
605 | } | 604 | } |
606 | 605 | ||
607 | if (ep->fill_max) | 606 | if (ep->fill_max) |
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index d5438083fd6a..95558ef4a7a0 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c | |||
@@ -888,6 +888,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, | |||
888 | case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ | 888 | case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ |
889 | case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ | 889 | case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ |
890 | case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ | 890 | case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ |
891 | case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */ | ||
891 | case USB_ID(0x046d, 0x0991): | 892 | case USB_ID(0x046d, 0x0991): |
892 | /* Most audio usb devices lie about volume resolution. | 893 | /* Most audio usb devices lie about volume resolution. |
893 | * Most Logitech webcams have res = 384. | 894 | * Most Logitech webcams have res = 384. |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 1bc45e71f1fe..0df9ede99dfd 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -319,19 +319,19 @@ static int create_auto_midi_quirk(struct snd_usb_audio *chip, | |||
319 | if (altsd->bNumEndpoints < 1) | 319 | if (altsd->bNumEndpoints < 1) |
320 | return -ENODEV; | 320 | return -ENODEV; |
321 | epd = get_endpoint(alts, 0); | 321 | epd = get_endpoint(alts, 0); |
322 | if (!usb_endpoint_xfer_bulk(epd) || | 322 | if (!usb_endpoint_xfer_bulk(epd) && |
323 | !usb_endpoint_xfer_int(epd)) | 323 | !usb_endpoint_xfer_int(epd)) |
324 | return -ENODEV; | 324 | return -ENODEV; |
325 | 325 | ||
326 | switch (USB_ID_VENDOR(chip->usb_id)) { | 326 | switch (USB_ID_VENDOR(chip->usb_id)) { |
327 | case 0x0499: /* Yamaha */ | 327 | case 0x0499: /* Yamaha */ |
328 | err = create_yamaha_midi_quirk(chip, iface, driver, alts); | 328 | err = create_yamaha_midi_quirk(chip, iface, driver, alts); |
329 | if (err < 0 && err != -ENODEV) | 329 | if (err != -ENODEV) |
330 | return err; | 330 | return err; |
331 | break; | 331 | break; |
332 | case 0x0582: /* Roland */ | 332 | case 0x0582: /* Roland */ |
333 | err = create_roland_midi_quirk(chip, iface, driver, alts); | 333 | err = create_roland_midi_quirk(chip, iface, driver, alts); |
334 | if (err < 0 && err != -ENODEV) | 334 | if (err != -ENODEV) |
335 | return err; | 335 | return err; |
336 | break; | 336 | break; |
337 | } | 337 | } |