diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-11-28 02:34:10 -0500 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2016-11-28 02:34:10 -0500 |
commit | 0edbf9e55295585bbe9df61b646ca5bf80a8e1eb (patch) | |
tree | d9b6869ff7a245343a9b030511d4bfd18b00f192 | |
parent | eacae5d2b940b39e7234036bf62869aff5ffe055 (diff) | |
parent | e5517c2a5a49ed5e99047008629f1cd60246ea0e (diff) |
Merge 4.9-rc7 into usb-next
We want the USB fixes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
545 files changed, 6307 insertions, 3054 deletions
diff --git a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt index fbbacd958240..6f28969af9dc 100644 --- a/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-bt-bmc.txt +++ b/Documentation/devicetree/bindings/ipmi/aspeed,ast2400-ibt-bmc.txt | |||
@@ -6,7 +6,7 @@ perform in-band IPMI communication with their host. | |||
6 | 6 | ||
7 | Required properties: | 7 | Required properties: |
8 | 8 | ||
9 | - compatible : should be "aspeed,ast2400-bt-bmc" | 9 | - compatible : should be "aspeed,ast2400-ibt-bmc" |
10 | - reg: physical address and size of the registers | 10 | - reg: physical address and size of the registers |
11 | 11 | ||
12 | Optional properties: | 12 | Optional properties: |
@@ -17,7 +17,7 @@ Optional properties: | |||
17 | Example: | 17 | Example: |
18 | 18 | ||
19 | ibt@1e789140 { | 19 | ibt@1e789140 { |
20 | compatible = "aspeed,ast2400-bt-bmc"; | 20 | compatible = "aspeed,ast2400-ibt-bmc"; |
21 | reg = <0x1e789140 0x18>; | 21 | reg = <0x1e789140 0x18>; |
22 | interrupts = <8>; | 22 | interrupts = <8>; |
23 | }; | 23 | }; |
diff --git a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt index fd40c852d7c7..462b04e8209f 100644 --- a/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt +++ b/Documentation/devicetree/bindings/sound/omap-abe-twl6040.txt | |||
@@ -12,7 +12,7 @@ Required properties: | |||
12 | 12 | ||
13 | Optional properties: | 13 | Optional properties: |
14 | - ti,dmic: phandle for the OMAP dmic node if the machine have it connected | 14 | - ti,dmic: phandle for the OMAP dmic node if the machine have it connected |
15 | - ti,jack_detection: Need to be present if the board capable to detect jack | 15 | - ti,jack-detection: Need to be present if the board capable to detect jack |
16 | insertion, removal. | 16 | insertion, removal. |
17 | 17 | ||
18 | Available audio endpoints for the audio-routing table: | 18 | Available audio endpoints for the audio-routing table: |
diff --git a/Documentation/i2c/i2c-topology b/Documentation/i2c/i2c-topology index e0aefeece551..1a014fede0b7 100644 --- a/Documentation/i2c/i2c-topology +++ b/Documentation/i2c/i2c-topology | |||
@@ -326,7 +326,7 @@ Two parent-locked sibling muxes | |||
326 | 326 | ||
327 | This is a good topology. | 327 | This is a good topology. |
328 | 328 | ||
329 | .--------. | 329 | .--------. |
330 | .----------. .--| dev D1 | | 330 | .----------. .--| dev D1 | |
331 | | parent- |--' '--------' | 331 | | parent- |--' '--------' |
332 | .--| locked | .--------. | 332 | .--| locked | .--------. |
@@ -350,7 +350,7 @@ Mux-locked and parent-locked sibling muxes | |||
350 | 350 | ||
351 | This is a good topology. | 351 | This is a good topology. |
352 | 352 | ||
353 | .--------. | 353 | .--------. |
354 | .----------. .--| dev D1 | | 354 | .----------. .--| dev D1 | |
355 | | mux- |--' '--------' | 355 | | mux- |--' '--------' |
356 | .--| locked | .--------. | 356 | .--| locked | .--------. |
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt index 6d6c07cf1a9a..63912ef34606 100644 --- a/Documentation/networking/dsa/dsa.txt +++ b/Documentation/networking/dsa/dsa.txt | |||
@@ -67,13 +67,14 @@ Note that DSA does not currently create network interfaces for the "cpu" and | |||
67 | Switch tagging protocols | 67 | Switch tagging protocols |
68 | ------------------------ | 68 | ------------------------ |
69 | 69 | ||
70 | DSA currently supports 4 different tagging protocols, and a tag-less mode as | 70 | DSA currently supports 5 different tagging protocols, and a tag-less mode as |
71 | well. The different protocols are implemented in: | 71 | well. The different protocols are implemented in: |
72 | 72 | ||
73 | net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy) | 73 | net/dsa/tag_trailer.c: Marvell's 4 trailer tag mode (legacy) |
74 | net/dsa/tag_dsa.c: Marvell's original DSA tag | 74 | net/dsa/tag_dsa.c: Marvell's original DSA tag |
75 | net/dsa/tag_edsa.c: Marvell's enhanced DSA tag | 75 | net/dsa/tag_edsa.c: Marvell's enhanced DSA tag |
76 | net/dsa/tag_brcm.c: Broadcom's 4 bytes tag | 76 | net/dsa/tag_brcm.c: Broadcom's 4 bytes tag |
77 | net/dsa/tag_qca.c: Qualcomm's 2 bytes tag | ||
77 | 78 | ||
78 | The exact format of the tag protocol is vendor specific, but in general, they | 79 | The exact format of the tag protocol is vendor specific, but in general, they |
79 | all contain something which: | 80 | all contain something which: |
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt index 739db9ab16b2..6bbceb9a3a19 100644 --- a/Documentation/virtual/kvm/api.txt +++ b/Documentation/virtual/kvm/api.txt | |||
@@ -777,6 +777,17 @@ Gets the current timestamp of kvmclock as seen by the current guest. In | |||
777 | conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios | 777 | conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios |
778 | such as migration. | 778 | such as migration. |
779 | 779 | ||
780 | When KVM_CAP_ADJUST_CLOCK is passed to KVM_CHECK_EXTENSION, it returns the | ||
781 | set of bits that KVM can return in struct kvm_clock_data's flag member. | ||
782 | |||
783 | The only flag defined now is KVM_CLOCK_TSC_STABLE. If set, the returned | ||
784 | value is the exact kvmclock value seen by all VCPUs at the instant | ||
785 | when KVM_GET_CLOCK was called. If clear, the returned value is simply | ||
786 | CLOCK_MONOTONIC plus a constant offset; the offset can be modified | ||
787 | with KVM_SET_CLOCK. KVM will try to make all VCPUs follow this clock, | ||
788 | but the exact value read by each VCPU could differ, because the host | ||
789 | TSC is not stable. | ||
790 | |||
780 | struct kvm_clock_data { | 791 | struct kvm_clock_data { |
781 | __u64 clock; /* kvmclock current value */ | 792 | __u64 clock; /* kvmclock current value */ |
782 | __u32 flags; | 793 | __u32 flags; |
diff --git a/MAINTAINERS b/MAINTAINERS index 851b89b9edcb..8d4148406923 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -77,6 +77,7 @@ Descriptions of section entries: | |||
77 | Q: Patchwork web based patch tracking system site | 77 | Q: Patchwork web based patch tracking system site |
78 | T: SCM tree type and location. | 78 | T: SCM tree type and location. |
79 | Type is one of: git, hg, quilt, stgit, topgit | 79 | Type is one of: git, hg, quilt, stgit, topgit |
80 | B: Bug tracking system location. | ||
80 | S: Status, one of the following: | 81 | S: Status, one of the following: |
81 | Supported: Someone is actually paid to look after this. | 82 | Supported: Someone is actually paid to look after this. |
82 | Maintained: Someone actually looks after it. | 83 | Maintained: Someone actually looks after it. |
@@ -281,6 +282,7 @@ L: linux-acpi@vger.kernel.org | |||
281 | W: https://01.org/linux-acpi | 282 | W: https://01.org/linux-acpi |
282 | Q: https://patchwork.kernel.org/project/linux-acpi/list/ | 283 | Q: https://patchwork.kernel.org/project/linux-acpi/list/ |
283 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm | 284 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm |
285 | B: https://bugzilla.kernel.org | ||
284 | S: Supported | 286 | S: Supported |
285 | F: drivers/acpi/ | 287 | F: drivers/acpi/ |
286 | F: drivers/pnp/pnpacpi/ | 288 | F: drivers/pnp/pnpacpi/ |
@@ -304,6 +306,8 @@ W: https://acpica.org/ | |||
304 | W: https://github.com/acpica/acpica/ | 306 | W: https://github.com/acpica/acpica/ |
305 | Q: https://patchwork.kernel.org/project/linux-acpi/list/ | 307 | Q: https://patchwork.kernel.org/project/linux-acpi/list/ |
306 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm | 308 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm |
309 | B: https://bugzilla.kernel.org | ||
310 | B: https://bugs.acpica.org | ||
307 | S: Supported | 311 | S: Supported |
308 | F: drivers/acpi/acpica/ | 312 | F: drivers/acpi/acpica/ |
309 | F: include/acpi/ | 313 | F: include/acpi/ |
@@ -313,6 +317,7 @@ ACPI FAN DRIVER | |||
313 | M: Zhang Rui <rui.zhang@intel.com> | 317 | M: Zhang Rui <rui.zhang@intel.com> |
314 | L: linux-acpi@vger.kernel.org | 318 | L: linux-acpi@vger.kernel.org |
315 | W: https://01.org/linux-acpi | 319 | W: https://01.org/linux-acpi |
320 | B: https://bugzilla.kernel.org | ||
316 | S: Supported | 321 | S: Supported |
317 | F: drivers/acpi/fan.c | 322 | F: drivers/acpi/fan.c |
318 | 323 | ||
@@ -328,6 +333,7 @@ ACPI THERMAL DRIVER | |||
328 | M: Zhang Rui <rui.zhang@intel.com> | 333 | M: Zhang Rui <rui.zhang@intel.com> |
329 | L: linux-acpi@vger.kernel.org | 334 | L: linux-acpi@vger.kernel.org |
330 | W: https://01.org/linux-acpi | 335 | W: https://01.org/linux-acpi |
336 | B: https://bugzilla.kernel.org | ||
331 | S: Supported | 337 | S: Supported |
332 | F: drivers/acpi/*thermal* | 338 | F: drivers/acpi/*thermal* |
333 | 339 | ||
@@ -335,6 +341,7 @@ ACPI VIDEO DRIVER | |||
335 | M: Zhang Rui <rui.zhang@intel.com> | 341 | M: Zhang Rui <rui.zhang@intel.com> |
336 | L: linux-acpi@vger.kernel.org | 342 | L: linux-acpi@vger.kernel.org |
337 | W: https://01.org/linux-acpi | 343 | W: https://01.org/linux-acpi |
344 | B: https://bugzilla.kernel.org | ||
338 | S: Supported | 345 | S: Supported |
339 | F: drivers/acpi/acpi_video.c | 346 | F: drivers/acpi/acpi_video.c |
340 | 347 | ||
@@ -5663,6 +5670,7 @@ HIBERNATION (aka Software Suspend, aka swsusp) | |||
5663 | M: "Rafael J. Wysocki" <rjw@rjwysocki.net> | 5670 | M: "Rafael J. Wysocki" <rjw@rjwysocki.net> |
5664 | M: Pavel Machek <pavel@ucw.cz> | 5671 | M: Pavel Machek <pavel@ucw.cz> |
5665 | L: linux-pm@vger.kernel.org | 5672 | L: linux-pm@vger.kernel.org |
5673 | B: https://bugzilla.kernel.org | ||
5666 | S: Supported | 5674 | S: Supported |
5667 | F: arch/x86/power/ | 5675 | F: arch/x86/power/ |
5668 | F: drivers/base/power/ | 5676 | F: drivers/base/power/ |
@@ -7084,6 +7092,7 @@ F: drivers/scsi/53c700* | |||
7084 | LED SUBSYSTEM | 7092 | LED SUBSYSTEM |
7085 | M: Richard Purdie <rpurdie@rpsys.net> | 7093 | M: Richard Purdie <rpurdie@rpsys.net> |
7086 | M: Jacek Anaszewski <j.anaszewski@samsung.com> | 7094 | M: Jacek Anaszewski <j.anaszewski@samsung.com> |
7095 | M: Pavel Machek <pavel@ucw.cz> | ||
7087 | L: linux-leds@vger.kernel.org | 7096 | L: linux-leds@vger.kernel.org |
7088 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git | 7097 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git |
7089 | S: Maintained | 7098 | S: Maintained |
@@ -8057,6 +8066,7 @@ F: drivers/infiniband/hw/mlx4/ | |||
8057 | F: include/linux/mlx4/ | 8066 | F: include/linux/mlx4/ |
8058 | 8067 | ||
8059 | MELLANOX MLX5 core VPI driver | 8068 | MELLANOX MLX5 core VPI driver |
8069 | M: Saeed Mahameed <saeedm@mellanox.com> | ||
8060 | M: Matan Barak <matanb@mellanox.com> | 8070 | M: Matan Barak <matanb@mellanox.com> |
8061 | M: Leon Romanovsky <leonro@mellanox.com> | 8071 | M: Leon Romanovsky <leonro@mellanox.com> |
8062 | L: netdev@vger.kernel.org | 8072 | L: netdev@vger.kernel.org |
@@ -9622,6 +9632,7 @@ POWER MANAGEMENT CORE | |||
9622 | M: "Rafael J. Wysocki" <rjw@rjwysocki.net> | 9632 | M: "Rafael J. Wysocki" <rjw@rjwysocki.net> |
9623 | L: linux-pm@vger.kernel.org | 9633 | L: linux-pm@vger.kernel.org |
9624 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm | 9634 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm |
9635 | B: https://bugzilla.kernel.org | ||
9625 | S: Supported | 9636 | S: Supported |
9626 | F: drivers/base/power/ | 9637 | F: drivers/base/power/ |
9627 | F: include/linux/pm.h | 9638 | F: include/linux/pm.h |
@@ -11611,6 +11622,7 @@ M: "Rafael J. Wysocki" <rjw@rjwysocki.net> | |||
11611 | M: Len Brown <len.brown@intel.com> | 11622 | M: Len Brown <len.brown@intel.com> |
11612 | M: Pavel Machek <pavel@ucw.cz> | 11623 | M: Pavel Machek <pavel@ucw.cz> |
11613 | L: linux-pm@vger.kernel.org | 11624 | L: linux-pm@vger.kernel.org |
11625 | B: https://bugzilla.kernel.org | ||
11614 | S: Supported | 11626 | S: Supported |
11615 | F: Documentation/power/ | 11627 | F: Documentation/power/ |
11616 | F: arch/x86/kernel/acpi/ | 11628 | F: arch/x86/kernel/acpi/ |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 9 | 2 | PATCHLEVEL = 9 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc7 |
5 | NAME = Psychotic Stoned Sheep | 5 | NAME = Psychotic Stoned Sheep |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -399,11 +399,12 @@ KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ | |||
399 | -fno-strict-aliasing -fno-common \ | 399 | -fno-strict-aliasing -fno-common \ |
400 | -Werror-implicit-function-declaration \ | 400 | -Werror-implicit-function-declaration \ |
401 | -Wno-format-security \ | 401 | -Wno-format-security \ |
402 | -std=gnu89 | 402 | -std=gnu89 $(call cc-option,-fno-PIE) |
403 | |||
403 | 404 | ||
404 | KBUILD_AFLAGS_KERNEL := | 405 | KBUILD_AFLAGS_KERNEL := |
405 | KBUILD_CFLAGS_KERNEL := | 406 | KBUILD_CFLAGS_KERNEL := |
406 | KBUILD_AFLAGS := -D__ASSEMBLY__ | 407 | KBUILD_AFLAGS := -D__ASSEMBLY__ $(call cc-option,-fno-PIE) |
407 | KBUILD_AFLAGS_MODULE := -DMODULE | 408 | KBUILD_AFLAGS_MODULE := -DMODULE |
408 | KBUILD_CFLAGS_MODULE := -DMODULE | 409 | KBUILD_CFLAGS_MODULE := -DMODULE |
409 | KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds | 410 | KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds |
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts index dec4b073ceb1..379939699164 100644 --- a/arch/arm/boot/dts/imx53-qsb.dts +++ b/arch/arm/boot/dts/imx53-qsb.dts | |||
@@ -64,8 +64,8 @@ | |||
64 | }; | 64 | }; |
65 | 65 | ||
66 | ldo3_reg: ldo3 { | 66 | ldo3_reg: ldo3 { |
67 | regulator-min-microvolt = <600000>; | 67 | regulator-min-microvolt = <1725000>; |
68 | regulator-max-microvolt = <1800000>; | 68 | regulator-max-microvolt = <3300000>; |
69 | regulator-always-on; | 69 | regulator-always-on; |
70 | }; | 70 | }; |
71 | 71 | ||
@@ -76,8 +76,8 @@ | |||
76 | }; | 76 | }; |
77 | 77 | ||
78 | ldo5_reg: ldo5 { | 78 | ldo5_reg: ldo5 { |
79 | regulator-min-microvolt = <1725000>; | 79 | regulator-min-microvolt = <1200000>; |
80 | regulator-max-microvolt = <3300000>; | 80 | regulator-max-microvolt = <3600000>; |
81 | regulator-always-on; | 81 | regulator-always-on; |
82 | }; | 82 | }; |
83 | 83 | ||
@@ -100,14 +100,14 @@ | |||
100 | }; | 100 | }; |
101 | 101 | ||
102 | ldo9_reg: ldo9 { | 102 | ldo9_reg: ldo9 { |
103 | regulator-min-microvolt = <1200000>; | 103 | regulator-min-microvolt = <1250000>; |
104 | regulator-max-microvolt = <3600000>; | 104 | regulator-max-microvolt = <3600000>; |
105 | regulator-always-on; | 105 | regulator-always-on; |
106 | }; | 106 | }; |
107 | 107 | ||
108 | ldo10_reg: ldo10 { | 108 | ldo10_reg: ldo10 { |
109 | regulator-min-microvolt = <1250000>; | 109 | regulator-min-microvolt = <1200000>; |
110 | regulator-max-microvolt = <3650000>; | 110 | regulator-max-microvolt = <3600000>; |
111 | regulator-always-on; | 111 | regulator-always-on; |
112 | }; | 112 | }; |
113 | }; | 113 | }; |
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi index 0ff1c2de95bf..26cce4d18405 100644 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi | |||
@@ -13,6 +13,11 @@ | |||
13 | }; | 13 | }; |
14 | }; | 14 | }; |
15 | 15 | ||
16 | memory@80000000 { | ||
17 | device_type = "memory"; | ||
18 | reg = <0x80000000 0>; | ||
19 | }; | ||
20 | |||
16 | wl12xx_vmmc: wl12xx_vmmc { | 21 | wl12xx_vmmc: wl12xx_vmmc { |
17 | compatible = "regulator-fixed"; | 22 | compatible = "regulator-fixed"; |
18 | regulator-name = "vwl1271"; | 23 | regulator-name = "vwl1271"; |
diff --git a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi index 731ec37aed5b..8f9a69ca818c 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-som.dtsi +++ b/arch/arm/boot/dts/logicpd-torpedo-som.dtsi | |||
@@ -13,9 +13,9 @@ | |||
13 | }; | 13 | }; |
14 | }; | 14 | }; |
15 | 15 | ||
16 | memory@0 { | 16 | memory@80000000 { |
17 | device_type = "memory"; | 17 | device_type = "memory"; |
18 | reg = <0 0>; | 18 | reg = <0x80000000 0>; |
19 | }; | 19 | }; |
20 | 20 | ||
21 | leds { | 21 | leds { |
diff --git a/arch/arm/boot/dts/omap5-board-common.dtsi b/arch/arm/boot/dts/omap5-board-common.dtsi index 6365635fea5c..4caadb253249 100644 --- a/arch/arm/boot/dts/omap5-board-common.dtsi +++ b/arch/arm/boot/dts/omap5-board-common.dtsi | |||
@@ -124,6 +124,7 @@ | |||
124 | compatible = "ti,abe-twl6040"; | 124 | compatible = "ti,abe-twl6040"; |
125 | ti,model = "omap5-uevm"; | 125 | ti,model = "omap5-uevm"; |
126 | 126 | ||
127 | ti,jack-detection; | ||
127 | ti,mclk-freq = <19200000>; | 128 | ti,mclk-freq = <19200000>; |
128 | 129 | ||
129 | ti,mcpdm = <&mcpdm>; | 130 | ti,mcpdm = <&mcpdm>; |
@@ -415,7 +416,7 @@ | |||
415 | ti,backup-battery-charge-high-current; | 416 | ti,backup-battery-charge-high-current; |
416 | }; | 417 | }; |
417 | 418 | ||
418 | gpadc { | 419 | gpadc: gpadc { |
419 | compatible = "ti,palmas-gpadc"; | 420 | compatible = "ti,palmas-gpadc"; |
420 | interrupts = <18 0 | 421 | interrupts = <18 0 |
421 | 16 0 | 422 | 16 0 |
@@ -475,8 +476,8 @@ | |||
475 | smps6_reg: smps6 { | 476 | smps6_reg: smps6 { |
476 | /* VDD_DDR3 - over VDD_SMPS6 */ | 477 | /* VDD_DDR3 - over VDD_SMPS6 */ |
477 | regulator-name = "smps6"; | 478 | regulator-name = "smps6"; |
478 | regulator-min-microvolt = <1200000>; | 479 | regulator-min-microvolt = <1350000>; |
479 | regulator-max-microvolt = <1200000>; | 480 | regulator-max-microvolt = <1350000>; |
480 | regulator-always-on; | 481 | regulator-always-on; |
481 | regulator-boot-on; | 482 | regulator-boot-on; |
482 | }; | 483 | }; |
diff --git a/arch/arm/boot/dts/stih410-b2260.dts b/arch/arm/boot/dts/stih410-b2260.dts index ef2ff2f518f6..7fb507fcba7e 100644 --- a/arch/arm/boot/dts/stih410-b2260.dts +++ b/arch/arm/boot/dts/stih410-b2260.dts | |||
@@ -74,7 +74,7 @@ | |||
74 | /* Low speed expansion connector */ | 74 | /* Low speed expansion connector */ |
75 | spi0: spi@9844000 { | 75 | spi0: spi@9844000 { |
76 | label = "LS-SPI0"; | 76 | label = "LS-SPI0"; |
77 | cs-gpio = <&pio30 3 0>; | 77 | cs-gpios = <&pio30 3 0>; |
78 | status = "okay"; | 78 | status = "okay"; |
79 | }; | 79 | }; |
80 | 80 | ||
diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi index 48fc24f36fcb..300a1bd5a6ec 100644 --- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi | |||
@@ -282,11 +282,15 @@ | |||
282 | uart1_pins_a: uart1@0 { | 282 | uart1_pins_a: uart1@0 { |
283 | allwinner,pins = "PG6", "PG7"; | 283 | allwinner,pins = "PG6", "PG7"; |
284 | allwinner,function = "uart1"; | 284 | allwinner,function = "uart1"; |
285 | allwinner,drive = <SUN4I_PINCTRL_10_MA>; | ||
286 | allwinner,pull = <SUN4I_PINCTRL_NO_PULL>; | ||
285 | }; | 287 | }; |
286 | 288 | ||
287 | uart1_pins_cts_rts_a: uart1-cts-rts@0 { | 289 | uart1_pins_cts_rts_a: uart1-cts-rts@0 { |
288 | allwinner,pins = "PG8", "PG9"; | 290 | allwinner,pins = "PG8", "PG9"; |
289 | allwinner,function = "uart1"; | 291 | allwinner,function = "uart1"; |
292 | allwinner,drive = <SUN4I_PINCTRL_10_MA>; | ||
293 | allwinner,pull = <SUN4I_PINCTRL_NO_PULL>; | ||
290 | }; | 294 | }; |
291 | 295 | ||
292 | mmc0_pins_a: mmc0@0 { | 296 | mmc0_pins_a: mmc0@0 { |
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index 0745538b26d3..55e0e3ea9cb6 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild | |||
@@ -8,7 +8,6 @@ generic-y += early_ioremap.h | |||
8 | generic-y += emergency-restart.h | 8 | generic-y += emergency-restart.h |
9 | generic-y += errno.h | 9 | generic-y += errno.h |
10 | generic-y += exec.h | 10 | generic-y += exec.h |
11 | generic-y += export.h | ||
12 | generic-y += ioctl.h | 11 | generic-y += ioctl.h |
13 | generic-y += ipcbuf.h | 12 | generic-y += ipcbuf.h |
14 | generic-y += irq_regs.h | 13 | generic-y += irq_regs.h |
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index 68c2c097cffe..ad325a8c7e1e 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
@@ -33,7 +33,7 @@ endif | |||
33 | obj-$(CONFIG_CPU_IDLE) += cpuidle.o | 33 | obj-$(CONFIG_CPU_IDLE) += cpuidle.o |
34 | obj-$(CONFIG_ISA_DMA_API) += dma.o | 34 | obj-$(CONFIG_ISA_DMA_API) += dma.o |
35 | obj-$(CONFIG_FIQ) += fiq.o fiqasm.o | 35 | obj-$(CONFIG_FIQ) += fiq.o fiqasm.o |
36 | obj-$(CONFIG_MODULES) += module.o | 36 | obj-$(CONFIG_MODULES) += armksyms.o module.o |
37 | obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o | 37 | obj-$(CONFIG_ARM_MODULE_PLTS) += module-plts.o |
38 | obj-$(CONFIG_ISA_DMA) += dma-isa.o | 38 | obj-$(CONFIG_ISA_DMA) += dma-isa.o |
39 | obj-$(CONFIG_PCI) += bios32.o isa.o | 39 | obj-$(CONFIG_PCI) += bios32.o isa.o |
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c new file mode 100644 index 000000000000..7e45f69a0ddc --- /dev/null +++ b/arch/arm/kernel/armksyms.c | |||
@@ -0,0 +1,183 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/armksyms.c | ||
3 | * | ||
4 | * Copyright (C) 2000 Russell King | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/export.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/cryptohash.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/in6.h> | ||
16 | #include <linux/syscalls.h> | ||
17 | #include <linux/uaccess.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/arm-smccc.h> | ||
20 | |||
21 | #include <asm/checksum.h> | ||
22 | #include <asm/ftrace.h> | ||
23 | |||
24 | /* | ||
25 | * libgcc functions - functions that are used internally by the | ||
26 | * compiler... (prototypes are not correct though, but that | ||
27 | * doesn't really matter since they're not versioned). | ||
28 | */ | ||
29 | extern void __ashldi3(void); | ||
30 | extern void __ashrdi3(void); | ||
31 | extern void __divsi3(void); | ||
32 | extern void __lshrdi3(void); | ||
33 | extern void __modsi3(void); | ||
34 | extern void __muldi3(void); | ||
35 | extern void __ucmpdi2(void); | ||
36 | extern void __udivsi3(void); | ||
37 | extern void __umodsi3(void); | ||
38 | extern void __do_div64(void); | ||
39 | extern void __bswapsi2(void); | ||
40 | extern void __bswapdi2(void); | ||
41 | |||
42 | extern void __aeabi_idiv(void); | ||
43 | extern void __aeabi_idivmod(void); | ||
44 | extern void __aeabi_lasr(void); | ||
45 | extern void __aeabi_llsl(void); | ||
46 | extern void __aeabi_llsr(void); | ||
47 | extern void __aeabi_lmul(void); | ||
48 | extern void __aeabi_uidiv(void); | ||
49 | extern void __aeabi_uidivmod(void); | ||
50 | extern void __aeabi_ulcmp(void); | ||
51 | |||
52 | extern void fpundefinstr(void); | ||
53 | |||
54 | void mmioset(void *, unsigned int, size_t); | ||
55 | void mmiocpy(void *, const void *, size_t); | ||
56 | |||
57 | /* platform dependent support */ | ||
58 | EXPORT_SYMBOL(arm_delay_ops); | ||
59 | |||
60 | /* networking */ | ||
61 | EXPORT_SYMBOL(csum_partial); | ||
62 | EXPORT_SYMBOL(csum_partial_copy_from_user); | ||
63 | EXPORT_SYMBOL(csum_partial_copy_nocheck); | ||
64 | EXPORT_SYMBOL(__csum_ipv6_magic); | ||
65 | |||
66 | /* io */ | ||
67 | #ifndef __raw_readsb | ||
68 | EXPORT_SYMBOL(__raw_readsb); | ||
69 | #endif | ||
70 | #ifndef __raw_readsw | ||
71 | EXPORT_SYMBOL(__raw_readsw); | ||
72 | #endif | ||
73 | #ifndef __raw_readsl | ||
74 | EXPORT_SYMBOL(__raw_readsl); | ||
75 | #endif | ||
76 | #ifndef __raw_writesb | ||
77 | EXPORT_SYMBOL(__raw_writesb); | ||
78 | #endif | ||
79 | #ifndef __raw_writesw | ||
80 | EXPORT_SYMBOL(__raw_writesw); | ||
81 | #endif | ||
82 | #ifndef __raw_writesl | ||
83 | EXPORT_SYMBOL(__raw_writesl); | ||
84 | #endif | ||
85 | |||
86 | /* string / mem functions */ | ||
87 | EXPORT_SYMBOL(strchr); | ||
88 | EXPORT_SYMBOL(strrchr); | ||
89 | EXPORT_SYMBOL(memset); | ||
90 | EXPORT_SYMBOL(memcpy); | ||
91 | EXPORT_SYMBOL(memmove); | ||
92 | EXPORT_SYMBOL(memchr); | ||
93 | EXPORT_SYMBOL(__memzero); | ||
94 | |||
95 | EXPORT_SYMBOL(mmioset); | ||
96 | EXPORT_SYMBOL(mmiocpy); | ||
97 | |||
98 | #ifdef CONFIG_MMU | ||
99 | EXPORT_SYMBOL(copy_page); | ||
100 | |||
101 | EXPORT_SYMBOL(arm_copy_from_user); | ||
102 | EXPORT_SYMBOL(arm_copy_to_user); | ||
103 | EXPORT_SYMBOL(arm_clear_user); | ||
104 | |||
105 | EXPORT_SYMBOL(__get_user_1); | ||
106 | EXPORT_SYMBOL(__get_user_2); | ||
107 | EXPORT_SYMBOL(__get_user_4); | ||
108 | EXPORT_SYMBOL(__get_user_8); | ||
109 | |||
110 | #ifdef __ARMEB__ | ||
111 | EXPORT_SYMBOL(__get_user_64t_1); | ||
112 | EXPORT_SYMBOL(__get_user_64t_2); | ||
113 | EXPORT_SYMBOL(__get_user_64t_4); | ||
114 | EXPORT_SYMBOL(__get_user_32t_8); | ||
115 | #endif | ||
116 | |||
117 | EXPORT_SYMBOL(__put_user_1); | ||
118 | EXPORT_SYMBOL(__put_user_2); | ||
119 | EXPORT_SYMBOL(__put_user_4); | ||
120 | EXPORT_SYMBOL(__put_user_8); | ||
121 | #endif | ||
122 | |||
123 | /* gcc lib functions */ | ||
124 | EXPORT_SYMBOL(__ashldi3); | ||
125 | EXPORT_SYMBOL(__ashrdi3); | ||
126 | EXPORT_SYMBOL(__divsi3); | ||
127 | EXPORT_SYMBOL(__lshrdi3); | ||
128 | EXPORT_SYMBOL(__modsi3); | ||
129 | EXPORT_SYMBOL(__muldi3); | ||
130 | EXPORT_SYMBOL(__ucmpdi2); | ||
131 | EXPORT_SYMBOL(__udivsi3); | ||
132 | EXPORT_SYMBOL(__umodsi3); | ||
133 | EXPORT_SYMBOL(__do_div64); | ||
134 | EXPORT_SYMBOL(__bswapsi2); | ||
135 | EXPORT_SYMBOL(__bswapdi2); | ||
136 | |||
137 | #ifdef CONFIG_AEABI | ||
138 | EXPORT_SYMBOL(__aeabi_idiv); | ||
139 | EXPORT_SYMBOL(__aeabi_idivmod); | ||
140 | EXPORT_SYMBOL(__aeabi_lasr); | ||
141 | EXPORT_SYMBOL(__aeabi_llsl); | ||
142 | EXPORT_SYMBOL(__aeabi_llsr); | ||
143 | EXPORT_SYMBOL(__aeabi_lmul); | ||
144 | EXPORT_SYMBOL(__aeabi_uidiv); | ||
145 | EXPORT_SYMBOL(__aeabi_uidivmod); | ||
146 | EXPORT_SYMBOL(__aeabi_ulcmp); | ||
147 | #endif | ||
148 | |||
149 | /* bitops */ | ||
150 | EXPORT_SYMBOL(_set_bit); | ||
151 | EXPORT_SYMBOL(_test_and_set_bit); | ||
152 | EXPORT_SYMBOL(_clear_bit); | ||
153 | EXPORT_SYMBOL(_test_and_clear_bit); | ||
154 | EXPORT_SYMBOL(_change_bit); | ||
155 | EXPORT_SYMBOL(_test_and_change_bit); | ||
156 | EXPORT_SYMBOL(_find_first_zero_bit_le); | ||
157 | EXPORT_SYMBOL(_find_next_zero_bit_le); | ||
158 | EXPORT_SYMBOL(_find_first_bit_le); | ||
159 | EXPORT_SYMBOL(_find_next_bit_le); | ||
160 | |||
161 | #ifdef __ARMEB__ | ||
162 | EXPORT_SYMBOL(_find_first_zero_bit_be); | ||
163 | EXPORT_SYMBOL(_find_next_zero_bit_be); | ||
164 | EXPORT_SYMBOL(_find_first_bit_be); | ||
165 | EXPORT_SYMBOL(_find_next_bit_be); | ||
166 | #endif | ||
167 | |||
168 | #ifdef CONFIG_FUNCTION_TRACER | ||
169 | #ifdef CONFIG_OLD_MCOUNT | ||
170 | EXPORT_SYMBOL(mcount); | ||
171 | #endif | ||
172 | EXPORT_SYMBOL(__gnu_mcount_nc); | ||
173 | #endif | ||
174 | |||
175 | #ifdef CONFIG_ARM_PATCH_PHYS_VIRT | ||
176 | EXPORT_SYMBOL(__pv_phys_pfn_offset); | ||
177 | EXPORT_SYMBOL(__pv_offset); | ||
178 | #endif | ||
179 | |||
180 | #ifdef CONFIG_HAVE_ARM_SMCCC | ||
181 | EXPORT_SYMBOL(arm_smccc_smc); | ||
182 | EXPORT_SYMBOL(arm_smccc_hvc); | ||
183 | #endif | ||
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S index b629d3f11c3d..c73c4030ca5d 100644 --- a/arch/arm/kernel/entry-ftrace.S +++ b/arch/arm/kernel/entry-ftrace.S | |||
@@ -7,7 +7,6 @@ | |||
7 | #include <asm/assembler.h> | 7 | #include <asm/assembler.h> |
8 | #include <asm/ftrace.h> | 8 | #include <asm/ftrace.h> |
9 | #include <asm/unwind.h> | 9 | #include <asm/unwind.h> |
10 | #include <asm/export.h> | ||
11 | 10 | ||
12 | #include "entry-header.S" | 11 | #include "entry-header.S" |
13 | 12 | ||
@@ -154,7 +153,6 @@ ENTRY(mcount) | |||
154 | __mcount _old | 153 | __mcount _old |
155 | #endif | 154 | #endif |
156 | ENDPROC(mcount) | 155 | ENDPROC(mcount) |
157 | EXPORT_SYMBOL(mcount) | ||
158 | 156 | ||
159 | #ifdef CONFIG_DYNAMIC_FTRACE | 157 | #ifdef CONFIG_DYNAMIC_FTRACE |
160 | ENTRY(ftrace_caller_old) | 158 | ENTRY(ftrace_caller_old) |
@@ -207,7 +205,6 @@ UNWIND(.fnstart) | |||
207 | #endif | 205 | #endif |
208 | UNWIND(.fnend) | 206 | UNWIND(.fnend) |
209 | ENDPROC(__gnu_mcount_nc) | 207 | ENDPROC(__gnu_mcount_nc) |
210 | EXPORT_SYMBOL(__gnu_mcount_nc) | ||
211 | 208 | ||
212 | #ifdef CONFIG_DYNAMIC_FTRACE | 209 | #ifdef CONFIG_DYNAMIC_FTRACE |
213 | ENTRY(ftrace_caller) | 210 | ENTRY(ftrace_caller) |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index f41cee4c5746..04286fd9e09c 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <asm/memory.h> | 22 | #include <asm/memory.h> |
23 | #include <asm/thread_info.h> | 23 | #include <asm/thread_info.h> |
24 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
25 | #include <asm/export.h> | ||
26 | 25 | ||
27 | #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) | 26 | #if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_SEMIHOSTING) |
28 | #include CONFIG_DEBUG_LL_INCLUDE | 27 | #include CONFIG_DEBUG_LL_INCLUDE |
@@ -728,8 +727,6 @@ __pv_phys_pfn_offset: | |||
728 | __pv_offset: | 727 | __pv_offset: |
729 | .quad 0 | 728 | .quad 0 |
730 | .size __pv_offset, . -__pv_offset | 729 | .size __pv_offset, . -__pv_offset |
731 | EXPORT_SYMBOL(__pv_phys_pfn_offset) | ||
732 | EXPORT_SYMBOL(__pv_offset) | ||
733 | #endif | 730 | #endif |
734 | 731 | ||
735 | #include "head-common.S" | 732 | #include "head-common.S" |
diff --git a/arch/arm/kernel/smccc-call.S b/arch/arm/kernel/smccc-call.S index 37669e7e13af..2e48b674aab1 100644 --- a/arch/arm/kernel/smccc-call.S +++ b/arch/arm/kernel/smccc-call.S | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <asm/opcodes-sec.h> | 16 | #include <asm/opcodes-sec.h> |
17 | #include <asm/opcodes-virt.h> | 17 | #include <asm/opcodes-virt.h> |
18 | #include <asm/unwind.h> | 18 | #include <asm/unwind.h> |
19 | #include <asm/export.h> | ||
20 | 19 | ||
21 | /* | 20 | /* |
22 | * Wrap c macros in asm macros to delay expansion until after the | 21 | * Wrap c macros in asm macros to delay expansion until after the |
@@ -52,7 +51,6 @@ UNWIND( .fnend) | |||
52 | ENTRY(arm_smccc_smc) | 51 | ENTRY(arm_smccc_smc) |
53 | SMCCC SMCCC_SMC | 52 | SMCCC SMCCC_SMC |
54 | ENDPROC(arm_smccc_smc) | 53 | ENDPROC(arm_smccc_smc) |
55 | EXPORT_SYMBOL(arm_smccc_smc) | ||
56 | 54 | ||
57 | /* | 55 | /* |
58 | * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, | 56 | * void smccc_hvc(unsigned long a0, unsigned long a1, unsigned long a2, |
@@ -62,4 +60,3 @@ EXPORT_SYMBOL(arm_smccc_smc) | |||
62 | ENTRY(arm_smccc_hvc) | 60 | ENTRY(arm_smccc_hvc) |
63 | SMCCC SMCCC_HVC | 61 | SMCCC SMCCC_HVC |
64 | ENDPROC(arm_smccc_hvc) | 62 | ENDPROC(arm_smccc_hvc) |
65 | EXPORT_SYMBOL(arm_smccc_hvc) | ||
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index bc698383e822..9688ec0c6ef4 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c | |||
@@ -74,6 +74,26 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long | |||
74 | dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); | 74 | dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs)); |
75 | } | 75 | } |
76 | 76 | ||
77 | void dump_backtrace_stm(u32 *stack, u32 instruction) | ||
78 | { | ||
79 | char str[80], *p; | ||
80 | unsigned int x; | ||
81 | int reg; | ||
82 | |||
83 | for (reg = 10, x = 0, p = str; reg >= 0; reg--) { | ||
84 | if (instruction & BIT(reg)) { | ||
85 | p += sprintf(p, " r%d:%08x", reg, *stack--); | ||
86 | if (++x == 6) { | ||
87 | x = 0; | ||
88 | p = str; | ||
89 | printk("%s\n", str); | ||
90 | } | ||
91 | } | ||
92 | } | ||
93 | if (p != str) | ||
94 | printk("%s\n", str); | ||
95 | } | ||
96 | |||
77 | #ifndef CONFIG_ARM_UNWIND | 97 | #ifndef CONFIG_ARM_UNWIND |
78 | /* | 98 | /* |
79 | * Stack pointers should always be within the kernels view of | 99 | * Stack pointers should always be within the kernels view of |
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S index 7fa487ef7e2f..37b2a11af345 100644 --- a/arch/arm/kernel/vmlinux-xip.lds.S +++ b/arch/arm/kernel/vmlinux-xip.lds.S | |||
@@ -3,6 +3,9 @@ | |||
3 | * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> | 3 | * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> |
4 | */ | 4 | */ |
5 | 5 | ||
6 | /* No __ro_after_init data in the .rodata section - which will always be ro */ | ||
7 | #define RO_AFTER_INIT_DATA | ||
8 | |||
6 | #include <asm-generic/vmlinux.lds.h> | 9 | #include <asm-generic/vmlinux.lds.h> |
7 | #include <asm/cache.h> | 10 | #include <asm/cache.h> |
8 | #include <asm/thread_info.h> | 11 | #include <asm/thread_info.h> |
@@ -223,6 +226,8 @@ SECTIONS | |||
223 | . = ALIGN(PAGE_SIZE); | 226 | . = ALIGN(PAGE_SIZE); |
224 | __init_end = .; | 227 | __init_end = .; |
225 | 228 | ||
229 | *(.data..ro_after_init) | ||
230 | |||
226 | NOSAVE_DATA | 231 | NOSAVE_DATA |
227 | CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) | 232 | CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES) |
228 | READ_MOSTLY_DATA(L1_CACHE_BYTES) | 233 | READ_MOSTLY_DATA(L1_CACHE_BYTES) |
diff --git a/arch/arm/lib/ashldi3.S b/arch/arm/lib/ashldi3.S index a7e7de89bd75..b05e95840651 100644 --- a/arch/arm/lib/ashldi3.S +++ b/arch/arm/lib/ashldi3.S | |||
@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */ | |||
28 | 28 | ||
29 | #include <linux/linkage.h> | 29 | #include <linux/linkage.h> |
30 | #include <asm/assembler.h> | 30 | #include <asm/assembler.h> |
31 | #include <asm/export.h> | ||
32 | 31 | ||
33 | #ifdef __ARMEB__ | 32 | #ifdef __ARMEB__ |
34 | #define al r1 | 33 | #define al r1 |
@@ -53,5 +52,3 @@ ENTRY(__aeabi_llsl) | |||
53 | 52 | ||
54 | ENDPROC(__ashldi3) | 53 | ENDPROC(__ashldi3) |
55 | ENDPROC(__aeabi_llsl) | 54 | ENDPROC(__aeabi_llsl) |
56 | EXPORT_SYMBOL(__ashldi3) | ||
57 | EXPORT_SYMBOL(__aeabi_llsl) | ||
diff --git a/arch/arm/lib/ashrdi3.S b/arch/arm/lib/ashrdi3.S index 490336e42518..275d7d2341a4 100644 --- a/arch/arm/lib/ashrdi3.S +++ b/arch/arm/lib/ashrdi3.S | |||
@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */ | |||
28 | 28 | ||
29 | #include <linux/linkage.h> | 29 | #include <linux/linkage.h> |
30 | #include <asm/assembler.h> | 30 | #include <asm/assembler.h> |
31 | #include <asm/export.h> | ||
32 | 31 | ||
33 | #ifdef __ARMEB__ | 32 | #ifdef __ARMEB__ |
34 | #define al r1 | 33 | #define al r1 |
@@ -53,5 +52,3 @@ ENTRY(__aeabi_lasr) | |||
53 | 52 | ||
54 | ENDPROC(__ashrdi3) | 53 | ENDPROC(__ashrdi3) |
55 | ENDPROC(__aeabi_lasr) | 54 | ENDPROC(__aeabi_lasr) |
56 | EXPORT_SYMBOL(__ashrdi3) | ||
57 | EXPORT_SYMBOL(__aeabi_lasr) | ||
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S index fab5a50503ae..7d7952e5a3b1 100644 --- a/arch/arm/lib/backtrace.S +++ b/arch/arm/lib/backtrace.S | |||
@@ -10,6 +10,7 @@ | |||
10 | * 27/03/03 Ian Molton Clean up CONFIG_CPU | 10 | * 27/03/03 Ian Molton Clean up CONFIG_CPU |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | #include <linux/kern_levels.h> | ||
13 | #include <linux/linkage.h> | 14 | #include <linux/linkage.h> |
14 | #include <asm/assembler.h> | 15 | #include <asm/assembler.h> |
15 | .text | 16 | .text |
@@ -83,13 +84,13 @@ for_each_frame: tst frame, mask @ Check for address exceptions | |||
83 | teq r3, r1, lsr #11 | 84 | teq r3, r1, lsr #11 |
84 | ldreq r0, [frame, #-8] @ get sp | 85 | ldreq r0, [frame, #-8] @ get sp |
85 | subeq r0, r0, #4 @ point at the last arg | 86 | subeq r0, r0, #4 @ point at the last arg |
86 | bleq .Ldumpstm @ dump saved registers | 87 | bleq dump_backtrace_stm @ dump saved registers |
87 | 88 | ||
88 | 1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc} | 89 | 1004: ldr r1, [sv_pc, #0] @ if stmfd sp!, {..., fp, ip, lr, pc} |
89 | ldr r3, .Ldsi @ instruction exists, | 90 | ldr r3, .Ldsi @ instruction exists, |
90 | teq r3, r1, lsr #11 | 91 | teq r3, r1, lsr #11 |
91 | subeq r0, frame, #16 | 92 | subeq r0, frame, #16 |
92 | bleq .Ldumpstm @ dump saved registers | 93 | bleq dump_backtrace_stm @ dump saved registers |
93 | 94 | ||
94 | teq sv_fp, #0 @ zero saved fp means | 95 | teq sv_fp, #0 @ zero saved fp means |
95 | beq no_frame @ no further frames | 96 | beq no_frame @ no further frames |
@@ -112,38 +113,6 @@ ENDPROC(c_backtrace) | |||
112 | .long 1004b, 1006b | 113 | .long 1004b, 1006b |
113 | .popsection | 114 | .popsection |
114 | 115 | ||
115 | #define instr r4 | ||
116 | #define reg r5 | ||
117 | #define stack r6 | ||
118 | |||
119 | .Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr} | ||
120 | mov stack, r0 | ||
121 | mov instr, r1 | ||
122 | mov reg, #10 | ||
123 | mov r7, #0 | ||
124 | 1: mov r3, #1 | ||
125 | ARM( tst instr, r3, lsl reg ) | ||
126 | THUMB( lsl r3, reg ) | ||
127 | THUMB( tst instr, r3 ) | ||
128 | beq 2f | ||
129 | add r7, r7, #1 | ||
130 | teq r7, #6 | ||
131 | moveq r7, #0 | ||
132 | adr r3, .Lcr | ||
133 | addne r3, r3, #1 @ skip newline | ||
134 | ldr r2, [stack], #-4 | ||
135 | mov r1, reg | ||
136 | adr r0, .Lfp | ||
137 | bl printk | ||
138 | 2: subs reg, reg, #1 | ||
139 | bpl 1b | ||
140 | teq r7, #0 | ||
141 | adrne r0, .Lcr | ||
142 | blne printk | ||
143 | ldmfd sp!, {instr, reg, stack, r7, pc} | ||
144 | |||
145 | .Lfp: .asciz " r%d:%08x%s" | ||
146 | .Lcr: .asciz "\n" | ||
147 | .Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" | 116 | .Lbad: .asciz "Backtrace aborted due to bad frame pointer <%p>\n" |
148 | .align | 117 | .align |
149 | .Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc} | 118 | .Ldsi: .word 0xe92dd800 >> 11 @ stmfd sp!, {... fp, ip, lr, pc} |
diff --git a/arch/arm/lib/bitops.h b/arch/arm/lib/bitops.h index df06638b327c..7d807cfd8ef5 100644 --- a/arch/arm/lib/bitops.h +++ b/arch/arm/lib/bitops.h | |||
@@ -1,6 +1,5 @@ | |||
1 | #include <asm/assembler.h> | 1 | #include <asm/assembler.h> |
2 | #include <asm/unwind.h> | 2 | #include <asm/unwind.h> |
3 | #include <asm/export.h> | ||
4 | 3 | ||
5 | #if __LINUX_ARM_ARCH__ >= 6 | 4 | #if __LINUX_ARM_ARCH__ >= 6 |
6 | .macro bitop, name, instr | 5 | .macro bitop, name, instr |
@@ -26,7 +25,6 @@ UNWIND( .fnstart ) | |||
26 | bx lr | 25 | bx lr |
27 | UNWIND( .fnend ) | 26 | UNWIND( .fnend ) |
28 | ENDPROC(\name ) | 27 | ENDPROC(\name ) |
29 | EXPORT_SYMBOL(\name ) | ||
30 | .endm | 28 | .endm |
31 | 29 | ||
32 | .macro testop, name, instr, store | 30 | .macro testop, name, instr, store |
@@ -57,7 +55,6 @@ UNWIND( .fnstart ) | |||
57 | 2: bx lr | 55 | 2: bx lr |
58 | UNWIND( .fnend ) | 56 | UNWIND( .fnend ) |
59 | ENDPROC(\name ) | 57 | ENDPROC(\name ) |
60 | EXPORT_SYMBOL(\name ) | ||
61 | .endm | 58 | .endm |
62 | #else | 59 | #else |
63 | .macro bitop, name, instr | 60 | .macro bitop, name, instr |
@@ -77,7 +74,6 @@ UNWIND( .fnstart ) | |||
77 | ret lr | 74 | ret lr |
78 | UNWIND( .fnend ) | 75 | UNWIND( .fnend ) |
79 | ENDPROC(\name ) | 76 | ENDPROC(\name ) |
80 | EXPORT_SYMBOL(\name ) | ||
81 | .endm | 77 | .endm |
82 | 78 | ||
83 | /** | 79 | /** |
@@ -106,6 +102,5 @@ UNWIND( .fnstart ) | |||
106 | ret lr | 102 | ret lr |
107 | UNWIND( .fnend ) | 103 | UNWIND( .fnend ) |
108 | ENDPROC(\name ) | 104 | ENDPROC(\name ) |
109 | EXPORT_SYMBOL(\name ) | ||
110 | .endm | 105 | .endm |
111 | #endif | 106 | #endif |
diff --git a/arch/arm/lib/bswapsdi2.S b/arch/arm/lib/bswapsdi2.S index f05f78247304..07cda737bb11 100644 --- a/arch/arm/lib/bswapsdi2.S +++ b/arch/arm/lib/bswapsdi2.S | |||
@@ -1,6 +1,5 @@ | |||
1 | #include <linux/linkage.h> | 1 | #include <linux/linkage.h> |
2 | #include <asm/assembler.h> | 2 | #include <asm/assembler.h> |
3 | #include <asm/export.h> | ||
4 | 3 | ||
5 | #if __LINUX_ARM_ARCH__ >= 6 | 4 | #if __LINUX_ARM_ARCH__ >= 6 |
6 | ENTRY(__bswapsi2) | 5 | ENTRY(__bswapsi2) |
@@ -36,5 +35,3 @@ ENTRY(__bswapdi2) | |||
36 | ret lr | 35 | ret lr |
37 | ENDPROC(__bswapdi2) | 36 | ENDPROC(__bswapdi2) |
38 | #endif | 37 | #endif |
39 | EXPORT_SYMBOL(__bswapsi2) | ||
40 | EXPORT_SYMBOL(__bswapdi2) | ||
diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S index b566154f5cf4..e936352ccb00 100644 --- a/arch/arm/lib/clear_user.S +++ b/arch/arm/lib/clear_user.S | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/assembler.h> | 11 | #include <asm/assembler.h> |
12 | #include <asm/unwind.h> | 12 | #include <asm/unwind.h> |
13 | #include <asm/export.h> | ||
14 | 13 | ||
15 | .text | 14 | .text |
16 | 15 | ||
@@ -51,9 +50,6 @@ USER( strnebt r2, [r0]) | |||
51 | UNWIND(.fnend) | 50 | UNWIND(.fnend) |
52 | ENDPROC(arm_clear_user) | 51 | ENDPROC(arm_clear_user) |
53 | ENDPROC(__clear_user_std) | 52 | ENDPROC(__clear_user_std) |
54 | #ifndef CONFIG_UACCESS_WITH_MEMCPY | ||
55 | EXPORT_SYMBOL(arm_clear_user) | ||
56 | #endif | ||
57 | 53 | ||
58 | .pushsection .text.fixup,"ax" | 54 | .pushsection .text.fixup,"ax" |
59 | .align 0 | 55 | .align 0 |
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S index 63e4c1ed0225..7a4b06049001 100644 --- a/arch/arm/lib/copy_from_user.S +++ b/arch/arm/lib/copy_from_user.S | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <asm/assembler.h> | 14 | #include <asm/assembler.h> |
15 | #include <asm/unwind.h> | 15 | #include <asm/unwind.h> |
16 | #include <asm/export.h> | ||
17 | 16 | ||
18 | /* | 17 | /* |
19 | * Prototype: | 18 | * Prototype: |
@@ -95,7 +94,6 @@ ENTRY(arm_copy_from_user) | |||
95 | #include "copy_template.S" | 94 | #include "copy_template.S" |
96 | 95 | ||
97 | ENDPROC(arm_copy_from_user) | 96 | ENDPROC(arm_copy_from_user) |
98 | EXPORT_SYMBOL(arm_copy_from_user) | ||
99 | 97 | ||
100 | .pushsection .fixup,"ax" | 98 | .pushsection .fixup,"ax" |
101 | .align 0 | 99 | .align 0 |
diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S index d97851d4af7a..6ee2f6706f86 100644 --- a/arch/arm/lib/copy_page.S +++ b/arch/arm/lib/copy_page.S | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <asm/assembler.h> | 13 | #include <asm/assembler.h> |
14 | #include <asm/asm-offsets.h> | 14 | #include <asm/asm-offsets.h> |
15 | #include <asm/cache.h> | 15 | #include <asm/cache.h> |
16 | #include <asm/export.h> | ||
17 | 16 | ||
18 | #define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 )) | 17 | #define COPY_COUNT (PAGE_SZ / (2 * L1_CACHE_BYTES) PLD( -1 )) |
19 | 18 | ||
@@ -46,4 +45,3 @@ ENTRY(copy_page) | |||
46 | PLD( beq 2b ) | 45 | PLD( beq 2b ) |
47 | ldmfd sp!, {r4, pc} @ 3 | 46 | ldmfd sp!, {r4, pc} @ 3 |
48 | ENDPROC(copy_page) | 47 | ENDPROC(copy_page) |
49 | EXPORT_SYMBOL(copy_page) | ||
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S index 592c179112d1..caf5019d8161 100644 --- a/arch/arm/lib/copy_to_user.S +++ b/arch/arm/lib/copy_to_user.S | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <asm/assembler.h> | 14 | #include <asm/assembler.h> |
15 | #include <asm/unwind.h> | 15 | #include <asm/unwind.h> |
16 | #include <asm/export.h> | ||
17 | 16 | ||
18 | /* | 17 | /* |
19 | * Prototype: | 18 | * Prototype: |
@@ -100,9 +99,6 @@ WEAK(arm_copy_to_user) | |||
100 | 99 | ||
101 | ENDPROC(arm_copy_to_user) | 100 | ENDPROC(arm_copy_to_user) |
102 | ENDPROC(__copy_to_user_std) | 101 | ENDPROC(__copy_to_user_std) |
103 | #ifndef CONFIG_UACCESS_WITH_MEMCPY | ||
104 | EXPORT_SYMBOL(arm_copy_to_user) | ||
105 | #endif | ||
106 | 102 | ||
107 | .pushsection .text.fixup,"ax" | 103 | .pushsection .text.fixup,"ax" |
108 | .align 0 | 104 | .align 0 |
diff --git a/arch/arm/lib/csumipv6.S b/arch/arm/lib/csumipv6.S index 68603b5ee537..3ac6ef01bc43 100644 --- a/arch/arm/lib/csumipv6.S +++ b/arch/arm/lib/csumipv6.S | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/assembler.h> | 11 | #include <asm/assembler.h> |
12 | #include <asm/export.h> | ||
13 | 12 | ||
14 | .text | 13 | .text |
15 | 14 | ||
@@ -31,4 +30,4 @@ ENTRY(__csum_ipv6_magic) | |||
31 | adcs r0, r0, #0 | 30 | adcs r0, r0, #0 |
32 | ldmfd sp!, {pc} | 31 | ldmfd sp!, {pc} |
33 | ENDPROC(__csum_ipv6_magic) | 32 | ENDPROC(__csum_ipv6_magic) |
34 | EXPORT_SYMBOL(__csum_ipv6_magic) | 33 | |
diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S index 830b20e81c37..984e0f29d548 100644 --- a/arch/arm/lib/csumpartial.S +++ b/arch/arm/lib/csumpartial.S | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/assembler.h> | 11 | #include <asm/assembler.h> |
12 | #include <asm/export.h> | ||
13 | 12 | ||
14 | .text | 13 | .text |
15 | 14 | ||
@@ -141,4 +140,3 @@ ENTRY(csum_partial) | |||
141 | bne 4b | 140 | bne 4b |
142 | b .Lless4 | 141 | b .Lless4 |
143 | ENDPROC(csum_partial) | 142 | ENDPROC(csum_partial) |
144 | EXPORT_SYMBOL(csum_partial) | ||
diff --git a/arch/arm/lib/csumpartialcopy.S b/arch/arm/lib/csumpartialcopy.S index 9c3383fed129..d03fc71fc88c 100644 --- a/arch/arm/lib/csumpartialcopy.S +++ b/arch/arm/lib/csumpartialcopy.S | |||
@@ -49,6 +49,5 @@ | |||
49 | 49 | ||
50 | #define FN_ENTRY ENTRY(csum_partial_copy_nocheck) | 50 | #define FN_ENTRY ENTRY(csum_partial_copy_nocheck) |
51 | #define FN_EXIT ENDPROC(csum_partial_copy_nocheck) | 51 | #define FN_EXIT ENDPROC(csum_partial_copy_nocheck) |
52 | #define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_nocheck) | ||
53 | 52 | ||
54 | #include "csumpartialcopygeneric.S" | 53 | #include "csumpartialcopygeneric.S" |
diff --git a/arch/arm/lib/csumpartialcopygeneric.S b/arch/arm/lib/csumpartialcopygeneric.S index 8b94d20e51d1..10b45909610c 100644 --- a/arch/arm/lib/csumpartialcopygeneric.S +++ b/arch/arm/lib/csumpartialcopygeneric.S | |||
@@ -8,7 +8,6 @@ | |||
8 | * published by the Free Software Foundation. | 8 | * published by the Free Software Foundation. |
9 | */ | 9 | */ |
10 | #include <asm/assembler.h> | 10 | #include <asm/assembler.h> |
11 | #include <asm/export.h> | ||
12 | 11 | ||
13 | /* | 12 | /* |
14 | * unsigned int | 13 | * unsigned int |
@@ -332,4 +331,3 @@ FN_ENTRY | |||
332 | mov r5, r4, get_byte_1 | 331 | mov r5, r4, get_byte_1 |
333 | b .Lexit | 332 | b .Lexit |
334 | FN_EXIT | 333 | FN_EXIT |
335 | FN_EXPORT | ||
diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S index 5d495edf3d83..1712f132b80d 100644 --- a/arch/arm/lib/csumpartialcopyuser.S +++ b/arch/arm/lib/csumpartialcopyuser.S | |||
@@ -73,7 +73,6 @@ | |||
73 | 73 | ||
74 | #define FN_ENTRY ENTRY(csum_partial_copy_from_user) | 74 | #define FN_ENTRY ENTRY(csum_partial_copy_from_user) |
75 | #define FN_EXIT ENDPROC(csum_partial_copy_from_user) | 75 | #define FN_EXIT ENDPROC(csum_partial_copy_from_user) |
76 | #define FN_EXPORT EXPORT_SYMBOL(csum_partial_copy_from_user) | ||
77 | 76 | ||
78 | #include "csumpartialcopygeneric.S" | 77 | #include "csumpartialcopygeneric.S" |
79 | 78 | ||
diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c index 69aad80a3af4..2cef11884857 100644 --- a/arch/arm/lib/delay.c +++ b/arch/arm/lib/delay.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/export.h> | ||
28 | #include <linux/timex.h> | 27 | #include <linux/timex.h> |
29 | 28 | ||
30 | /* | 29 | /* |
@@ -35,7 +34,6 @@ struct arm_delay_ops arm_delay_ops __ro_after_init = { | |||
35 | .const_udelay = __loop_const_udelay, | 34 | .const_udelay = __loop_const_udelay, |
36 | .udelay = __loop_udelay, | 35 | .udelay = __loop_udelay, |
37 | }; | 36 | }; |
38 | EXPORT_SYMBOL(arm_delay_ops); | ||
39 | 37 | ||
40 | static const struct delay_timer *delay_timer; | 38 | static const struct delay_timer *delay_timer; |
41 | static bool delay_calibrated; | 39 | static bool delay_calibrated; |
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S index 0c9e1c18fc9e..a9eafe4981eb 100644 --- a/arch/arm/lib/div64.S +++ b/arch/arm/lib/div64.S | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
16 | #include <asm/assembler.h> | 16 | #include <asm/assembler.h> |
17 | #include <asm/unwind.h> | 17 | #include <asm/unwind.h> |
18 | #include <asm/export.h> | ||
19 | 18 | ||
20 | #ifdef __ARMEB__ | 19 | #ifdef __ARMEB__ |
21 | #define xh r0 | 20 | #define xh r0 |
@@ -211,4 +210,3 @@ Ldiv0_64: | |||
211 | 210 | ||
212 | UNWIND(.fnend) | 211 | UNWIND(.fnend) |
213 | ENDPROC(__do_div64) | 212 | ENDPROC(__do_div64) |
214 | EXPORT_SYMBOL(__do_div64) | ||
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S index 26302b8cd38f..7848780e8834 100644 --- a/arch/arm/lib/findbit.S +++ b/arch/arm/lib/findbit.S | |||
@@ -15,7 +15,6 @@ | |||
15 | */ | 15 | */ |
16 | #include <linux/linkage.h> | 16 | #include <linux/linkage.h> |
17 | #include <asm/assembler.h> | 17 | #include <asm/assembler.h> |
18 | #include <asm/export.h> | ||
19 | .text | 18 | .text |
20 | 19 | ||
21 | /* | 20 | /* |
@@ -38,7 +37,6 @@ ENTRY(_find_first_zero_bit_le) | |||
38 | 3: mov r0, r1 @ no free bits | 37 | 3: mov r0, r1 @ no free bits |
39 | ret lr | 38 | ret lr |
40 | ENDPROC(_find_first_zero_bit_le) | 39 | ENDPROC(_find_first_zero_bit_le) |
41 | EXPORT_SYMBOL(_find_first_zero_bit_le) | ||
42 | 40 | ||
43 | /* | 41 | /* |
44 | * Purpose : Find next 'zero' bit | 42 | * Purpose : Find next 'zero' bit |
@@ -59,7 +57,6 @@ ENTRY(_find_next_zero_bit_le) | |||
59 | add r2, r2, #1 @ align bit pointer | 57 | add r2, r2, #1 @ align bit pointer |
60 | b 2b @ loop for next bit | 58 | b 2b @ loop for next bit |
61 | ENDPROC(_find_next_zero_bit_le) | 59 | ENDPROC(_find_next_zero_bit_le) |
62 | EXPORT_SYMBOL(_find_next_zero_bit_le) | ||
63 | 60 | ||
64 | /* | 61 | /* |
65 | * Purpose : Find a 'one' bit | 62 | * Purpose : Find a 'one' bit |
@@ -81,7 +78,6 @@ ENTRY(_find_first_bit_le) | |||
81 | 3: mov r0, r1 @ no free bits | 78 | 3: mov r0, r1 @ no free bits |
82 | ret lr | 79 | ret lr |
83 | ENDPROC(_find_first_bit_le) | 80 | ENDPROC(_find_first_bit_le) |
84 | EXPORT_SYMBOL(_find_first_bit_le) | ||
85 | 81 | ||
86 | /* | 82 | /* |
87 | * Purpose : Find next 'one' bit | 83 | * Purpose : Find next 'one' bit |
@@ -101,7 +97,6 @@ ENTRY(_find_next_bit_le) | |||
101 | add r2, r2, #1 @ align bit pointer | 97 | add r2, r2, #1 @ align bit pointer |
102 | b 2b @ loop for next bit | 98 | b 2b @ loop for next bit |
103 | ENDPROC(_find_next_bit_le) | 99 | ENDPROC(_find_next_bit_le) |
104 | EXPORT_SYMBOL(_find_next_bit_le) | ||
105 | 100 | ||
106 | #ifdef __ARMEB__ | 101 | #ifdef __ARMEB__ |
107 | 102 | ||
@@ -121,7 +116,6 @@ ENTRY(_find_first_zero_bit_be) | |||
121 | 3: mov r0, r1 @ no free bits | 116 | 3: mov r0, r1 @ no free bits |
122 | ret lr | 117 | ret lr |
123 | ENDPROC(_find_first_zero_bit_be) | 118 | ENDPROC(_find_first_zero_bit_be) |
124 | EXPORT_SYMBOL(_find_first_zero_bit_be) | ||
125 | 119 | ||
126 | ENTRY(_find_next_zero_bit_be) | 120 | ENTRY(_find_next_zero_bit_be) |
127 | teq r1, #0 | 121 | teq r1, #0 |
@@ -139,7 +133,6 @@ ENTRY(_find_next_zero_bit_be) | |||
139 | add r2, r2, #1 @ align bit pointer | 133 | add r2, r2, #1 @ align bit pointer |
140 | b 2b @ loop for next bit | 134 | b 2b @ loop for next bit |
141 | ENDPROC(_find_next_zero_bit_be) | 135 | ENDPROC(_find_next_zero_bit_be) |
142 | EXPORT_SYMBOL(_find_next_zero_bit_be) | ||
143 | 136 | ||
144 | ENTRY(_find_first_bit_be) | 137 | ENTRY(_find_first_bit_be) |
145 | teq r1, #0 | 138 | teq r1, #0 |
@@ -157,7 +150,6 @@ ENTRY(_find_first_bit_be) | |||
157 | 3: mov r0, r1 @ no free bits | 150 | 3: mov r0, r1 @ no free bits |
158 | ret lr | 151 | ret lr |
159 | ENDPROC(_find_first_bit_be) | 152 | ENDPROC(_find_first_bit_be) |
160 | EXPORT_SYMBOL(_find_first_bit_be) | ||
161 | 153 | ||
162 | ENTRY(_find_next_bit_be) | 154 | ENTRY(_find_next_bit_be) |
163 | teq r1, #0 | 155 | teq r1, #0 |
@@ -174,7 +166,6 @@ ENTRY(_find_next_bit_be) | |||
174 | add r2, r2, #1 @ align bit pointer | 166 | add r2, r2, #1 @ align bit pointer |
175 | b 2b @ loop for next bit | 167 | b 2b @ loop for next bit |
176 | ENDPROC(_find_next_bit_be) | 168 | ENDPROC(_find_next_bit_be) |
177 | EXPORT_SYMBOL(_find_next_bit_be) | ||
178 | 169 | ||
179 | #endif | 170 | #endif |
180 | 171 | ||
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S index 9d09a38e73af..8ecfd15c3a02 100644 --- a/arch/arm/lib/getuser.S +++ b/arch/arm/lib/getuser.S | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <asm/assembler.h> | 31 | #include <asm/assembler.h> |
32 | #include <asm/errno.h> | 32 | #include <asm/errno.h> |
33 | #include <asm/domain.h> | 33 | #include <asm/domain.h> |
34 | #include <asm/export.h> | ||
35 | 34 | ||
36 | ENTRY(__get_user_1) | 35 | ENTRY(__get_user_1) |
37 | check_uaccess r0, 1, r1, r2, __get_user_bad | 36 | check_uaccess r0, 1, r1, r2, __get_user_bad |
@@ -39,7 +38,6 @@ ENTRY(__get_user_1) | |||
39 | mov r0, #0 | 38 | mov r0, #0 |
40 | ret lr | 39 | ret lr |
41 | ENDPROC(__get_user_1) | 40 | ENDPROC(__get_user_1) |
42 | EXPORT_SYMBOL(__get_user_1) | ||
43 | 41 | ||
44 | ENTRY(__get_user_2) | 42 | ENTRY(__get_user_2) |
45 | check_uaccess r0, 2, r1, r2, __get_user_bad | 43 | check_uaccess r0, 2, r1, r2, __get_user_bad |
@@ -60,7 +58,6 @@ rb .req r0 | |||
60 | mov r0, #0 | 58 | mov r0, #0 |
61 | ret lr | 59 | ret lr |
62 | ENDPROC(__get_user_2) | 60 | ENDPROC(__get_user_2) |
63 | EXPORT_SYMBOL(__get_user_2) | ||
64 | 61 | ||
65 | ENTRY(__get_user_4) | 62 | ENTRY(__get_user_4) |
66 | check_uaccess r0, 4, r1, r2, __get_user_bad | 63 | check_uaccess r0, 4, r1, r2, __get_user_bad |
@@ -68,7 +65,6 @@ ENTRY(__get_user_4) | |||
68 | mov r0, #0 | 65 | mov r0, #0 |
69 | ret lr | 66 | ret lr |
70 | ENDPROC(__get_user_4) | 67 | ENDPROC(__get_user_4) |
71 | EXPORT_SYMBOL(__get_user_4) | ||
72 | 68 | ||
73 | ENTRY(__get_user_8) | 69 | ENTRY(__get_user_8) |
74 | check_uaccess r0, 8, r1, r2, __get_user_bad | 70 | check_uaccess r0, 8, r1, r2, __get_user_bad |
@@ -82,7 +78,6 @@ ENTRY(__get_user_8) | |||
82 | mov r0, #0 | 78 | mov r0, #0 |
83 | ret lr | 79 | ret lr |
84 | ENDPROC(__get_user_8) | 80 | ENDPROC(__get_user_8) |
85 | EXPORT_SYMBOL(__get_user_8) | ||
86 | 81 | ||
87 | #ifdef __ARMEB__ | 82 | #ifdef __ARMEB__ |
88 | ENTRY(__get_user_32t_8) | 83 | ENTRY(__get_user_32t_8) |
@@ -96,7 +91,6 @@ ENTRY(__get_user_32t_8) | |||
96 | mov r0, #0 | 91 | mov r0, #0 |
97 | ret lr | 92 | ret lr |
98 | ENDPROC(__get_user_32t_8) | 93 | ENDPROC(__get_user_32t_8) |
99 | EXPORT_SYMBOL(__get_user_32t_8) | ||
100 | 94 | ||
101 | ENTRY(__get_user_64t_1) | 95 | ENTRY(__get_user_64t_1) |
102 | check_uaccess r0, 1, r1, r2, __get_user_bad8 | 96 | check_uaccess r0, 1, r1, r2, __get_user_bad8 |
@@ -104,7 +98,6 @@ ENTRY(__get_user_64t_1) | |||
104 | mov r0, #0 | 98 | mov r0, #0 |
105 | ret lr | 99 | ret lr |
106 | ENDPROC(__get_user_64t_1) | 100 | ENDPROC(__get_user_64t_1) |
107 | EXPORT_SYMBOL(__get_user_64t_1) | ||
108 | 101 | ||
109 | ENTRY(__get_user_64t_2) | 102 | ENTRY(__get_user_64t_2) |
110 | check_uaccess r0, 2, r1, r2, __get_user_bad8 | 103 | check_uaccess r0, 2, r1, r2, __get_user_bad8 |
@@ -121,7 +114,6 @@ rb .req r0 | |||
121 | mov r0, #0 | 114 | mov r0, #0 |
122 | ret lr | 115 | ret lr |
123 | ENDPROC(__get_user_64t_2) | 116 | ENDPROC(__get_user_64t_2) |
124 | EXPORT_SYMBOL(__get_user_64t_2) | ||
125 | 117 | ||
126 | ENTRY(__get_user_64t_4) | 118 | ENTRY(__get_user_64t_4) |
127 | check_uaccess r0, 4, r1, r2, __get_user_bad8 | 119 | check_uaccess r0, 4, r1, r2, __get_user_bad8 |
@@ -129,7 +121,6 @@ ENTRY(__get_user_64t_4) | |||
129 | mov r0, #0 | 121 | mov r0, #0 |
130 | ret lr | 122 | ret lr |
131 | ENDPROC(__get_user_64t_4) | 123 | ENDPROC(__get_user_64t_4) |
132 | EXPORT_SYMBOL(__get_user_64t_4) | ||
133 | #endif | 124 | #endif |
134 | 125 | ||
135 | __get_user_bad8: | 126 | __get_user_bad8: |
diff --git a/arch/arm/lib/io-readsb.S b/arch/arm/lib/io-readsb.S index 3dff7a3a2aef..c31b2f3153f1 100644 --- a/arch/arm/lib/io-readsb.S +++ b/arch/arm/lib/io-readsb.S | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/assembler.h> | 11 | #include <asm/assembler.h> |
12 | #include <asm/export.h> | ||
13 | 12 | ||
14 | .Linsb_align: rsb ip, ip, #4 | 13 | .Linsb_align: rsb ip, ip, #4 |
15 | cmp ip, r2 | 14 | cmp ip, r2 |
@@ -122,4 +121,3 @@ ENTRY(__raw_readsb) | |||
122 | 121 | ||
123 | ldmfd sp!, {r4 - r6, pc} | 122 | ldmfd sp!, {r4 - r6, pc} |
124 | ENDPROC(__raw_readsb) | 123 | ENDPROC(__raw_readsb) |
125 | EXPORT_SYMBOL(__raw_readsb) | ||
diff --git a/arch/arm/lib/io-readsl.S b/arch/arm/lib/io-readsl.S index bfd39682325b..2ed86fa5465f 100644 --- a/arch/arm/lib/io-readsl.S +++ b/arch/arm/lib/io-readsl.S | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/assembler.h> | 11 | #include <asm/assembler.h> |
12 | #include <asm/export.h> | ||
13 | 12 | ||
14 | ENTRY(__raw_readsl) | 13 | ENTRY(__raw_readsl) |
15 | teq r2, #0 @ do we have to check for the zero len? | 14 | teq r2, #0 @ do we have to check for the zero len? |
@@ -78,4 +77,3 @@ ENTRY(__raw_readsl) | |||
78 | strb r3, [r1, #0] | 77 | strb r3, [r1, #0] |
79 | ret lr | 78 | ret lr |
80 | ENDPROC(__raw_readsl) | 79 | ENDPROC(__raw_readsl) |
81 | EXPORT_SYMBOL(__raw_readsl) | ||
diff --git a/arch/arm/lib/io-readsw-armv3.S b/arch/arm/lib/io-readsw-armv3.S index b3af3db6caac..413da9914529 100644 --- a/arch/arm/lib/io-readsw-armv3.S +++ b/arch/arm/lib/io-readsw-armv3.S | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/assembler.h> | 11 | #include <asm/assembler.h> |
12 | #include <asm/export.h> | ||
13 | 12 | ||
14 | .Linsw_bad_alignment: | 13 | .Linsw_bad_alignment: |
15 | adr r0, .Linsw_bad_align_msg | 14 | adr r0, .Linsw_bad_align_msg |
@@ -104,4 +103,4 @@ ENTRY(__raw_readsw) | |||
104 | 103 | ||
105 | ldmfd sp!, {r4, r5, r6, pc} | 104 | ldmfd sp!, {r4, r5, r6, pc} |
106 | 105 | ||
107 | EXPORT_SYMBOL(__raw_readsw) | 106 | |
diff --git a/arch/arm/lib/io-readsw-armv4.S b/arch/arm/lib/io-readsw-armv4.S index 3c7a7a40b33e..d9a45e9692ae 100644 --- a/arch/arm/lib/io-readsw-armv4.S +++ b/arch/arm/lib/io-readsw-armv4.S | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/assembler.h> | 11 | #include <asm/assembler.h> |
12 | #include <asm/export.h> | ||
13 | 12 | ||
14 | .macro pack, rd, hw1, hw2 | 13 | .macro pack, rd, hw1, hw2 |
15 | #ifndef __ARMEB__ | 14 | #ifndef __ARMEB__ |
@@ -130,4 +129,3 @@ ENTRY(__raw_readsw) | |||
130 | strneb ip, [r1] | 129 | strneb ip, [r1] |
131 | ldmfd sp!, {r4, pc} | 130 | ldmfd sp!, {r4, pc} |
132 | ENDPROC(__raw_readsw) | 131 | ENDPROC(__raw_readsw) |
133 | EXPORT_SYMBOL(__raw_readsw) | ||
diff --git a/arch/arm/lib/io-writesb.S b/arch/arm/lib/io-writesb.S index fa3633594415..a46bbc9b168b 100644 --- a/arch/arm/lib/io-writesb.S +++ b/arch/arm/lib/io-writesb.S | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/assembler.h> | 11 | #include <asm/assembler.h> |
12 | #include <asm/export.h> | ||
13 | 12 | ||
14 | .macro outword, rd | 13 | .macro outword, rd |
15 | #ifndef __ARMEB__ | 14 | #ifndef __ARMEB__ |
@@ -93,4 +92,3 @@ ENTRY(__raw_writesb) | |||
93 | 92 | ||
94 | ldmfd sp!, {r4, r5, pc} | 93 | ldmfd sp!, {r4, r5, pc} |
95 | ENDPROC(__raw_writesb) | 94 | ENDPROC(__raw_writesb) |
96 | EXPORT_SYMBOL(__raw_writesb) | ||
diff --git a/arch/arm/lib/io-writesl.S b/arch/arm/lib/io-writesl.S index 98ed6aec0b47..4ea2435988c1 100644 --- a/arch/arm/lib/io-writesl.S +++ b/arch/arm/lib/io-writesl.S | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/assembler.h> | 11 | #include <asm/assembler.h> |
12 | #include <asm/export.h> | ||
13 | 12 | ||
14 | ENTRY(__raw_writesl) | 13 | ENTRY(__raw_writesl) |
15 | teq r2, #0 @ do we have to check for the zero len? | 14 | teq r2, #0 @ do we have to check for the zero len? |
@@ -66,4 +65,3 @@ ENTRY(__raw_writesl) | |||
66 | bne 6b | 65 | bne 6b |
67 | ret lr | 66 | ret lr |
68 | ENDPROC(__raw_writesl) | 67 | ENDPROC(__raw_writesl) |
69 | EXPORT_SYMBOL(__raw_writesl) | ||
diff --git a/arch/arm/lib/io-writesw-armv3.S b/arch/arm/lib/io-writesw-armv3.S index 577184c082bb..121789eb6802 100644 --- a/arch/arm/lib/io-writesw-armv3.S +++ b/arch/arm/lib/io-writesw-armv3.S | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/assembler.h> | 11 | #include <asm/assembler.h> |
12 | #include <asm/export.h> | ||
13 | 12 | ||
14 | .Loutsw_bad_alignment: | 13 | .Loutsw_bad_alignment: |
15 | adr r0, .Loutsw_bad_align_msg | 14 | adr r0, .Loutsw_bad_align_msg |
@@ -125,4 +124,3 @@ ENTRY(__raw_writesw) | |||
125 | strne ip, [r0] | 124 | strne ip, [r0] |
126 | 125 | ||
127 | ldmfd sp!, {r4, r5, r6, pc} | 126 | ldmfd sp!, {r4, r5, r6, pc} |
128 | EXPORT_SYMBOL(__raw_writesw) | ||
diff --git a/arch/arm/lib/io-writesw-armv4.S b/arch/arm/lib/io-writesw-armv4.S index e335f489d1fc..269f90c51ad2 100644 --- a/arch/arm/lib/io-writesw-armv4.S +++ b/arch/arm/lib/io-writesw-armv4.S | |||
@@ -9,7 +9,6 @@ | |||
9 | */ | 9 | */ |
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/assembler.h> | 11 | #include <asm/assembler.h> |
12 | #include <asm/export.h> | ||
13 | 12 | ||
14 | .macro outword, rd | 13 | .macro outword, rd |
15 | #ifndef __ARMEB__ | 14 | #ifndef __ARMEB__ |
@@ -99,4 +98,3 @@ ENTRY(__raw_writesw) | |||
99 | strneh ip, [r0] | 98 | strneh ip, [r0] |
100 | ret lr | 99 | ret lr |
101 | ENDPROC(__raw_writesw) | 100 | ENDPROC(__raw_writesw) |
102 | EXPORT_SYMBOL(__raw_writesw) | ||
diff --git a/arch/arm/lib/lib1funcs.S b/arch/arm/lib/lib1funcs.S index f541bc013bff..9397b2e532af 100644 --- a/arch/arm/lib/lib1funcs.S +++ b/arch/arm/lib/lib1funcs.S | |||
@@ -36,7 +36,6 @@ Boston, MA 02111-1307, USA. */ | |||
36 | #include <linux/linkage.h> | 36 | #include <linux/linkage.h> |
37 | #include <asm/assembler.h> | 37 | #include <asm/assembler.h> |
38 | #include <asm/unwind.h> | 38 | #include <asm/unwind.h> |
39 | #include <asm/export.h> | ||
40 | 39 | ||
41 | .macro ARM_DIV_BODY dividend, divisor, result, curbit | 40 | .macro ARM_DIV_BODY dividend, divisor, result, curbit |
42 | 41 | ||
@@ -239,8 +238,6 @@ UNWIND(.fnstart) | |||
239 | UNWIND(.fnend) | 238 | UNWIND(.fnend) |
240 | ENDPROC(__udivsi3) | 239 | ENDPROC(__udivsi3) |
241 | ENDPROC(__aeabi_uidiv) | 240 | ENDPROC(__aeabi_uidiv) |
242 | EXPORT_SYMBOL(__udivsi3) | ||
243 | EXPORT_SYMBOL(__aeabi_uidiv) | ||
244 | 241 | ||
245 | ENTRY(__umodsi3) | 242 | ENTRY(__umodsi3) |
246 | UNWIND(.fnstart) | 243 | UNWIND(.fnstart) |
@@ -259,7 +256,6 @@ UNWIND(.fnstart) | |||
259 | 256 | ||
260 | UNWIND(.fnend) | 257 | UNWIND(.fnend) |
261 | ENDPROC(__umodsi3) | 258 | ENDPROC(__umodsi3) |
262 | EXPORT_SYMBOL(__umodsi3) | ||
263 | 259 | ||
264 | #ifdef CONFIG_ARM_PATCH_IDIV | 260 | #ifdef CONFIG_ARM_PATCH_IDIV |
265 | .align 3 | 261 | .align 3 |
@@ -307,8 +303,6 @@ UNWIND(.fnstart) | |||
307 | UNWIND(.fnend) | 303 | UNWIND(.fnend) |
308 | ENDPROC(__divsi3) | 304 | ENDPROC(__divsi3) |
309 | ENDPROC(__aeabi_idiv) | 305 | ENDPROC(__aeabi_idiv) |
310 | EXPORT_SYMBOL(__divsi3) | ||
311 | EXPORT_SYMBOL(__aeabi_idiv) | ||
312 | 306 | ||
313 | ENTRY(__modsi3) | 307 | ENTRY(__modsi3) |
314 | UNWIND(.fnstart) | 308 | UNWIND(.fnstart) |
@@ -333,7 +327,6 @@ UNWIND(.fnstart) | |||
333 | 327 | ||
334 | UNWIND(.fnend) | 328 | UNWIND(.fnend) |
335 | ENDPROC(__modsi3) | 329 | ENDPROC(__modsi3) |
336 | EXPORT_SYMBOL(__modsi3) | ||
337 | 330 | ||
338 | #ifdef CONFIG_AEABI | 331 | #ifdef CONFIG_AEABI |
339 | 332 | ||
@@ -350,7 +343,6 @@ UNWIND(.save {r0, r1, ip, lr} ) | |||
350 | 343 | ||
351 | UNWIND(.fnend) | 344 | UNWIND(.fnend) |
352 | ENDPROC(__aeabi_uidivmod) | 345 | ENDPROC(__aeabi_uidivmod) |
353 | EXPORT_SYMBOL(__aeabi_uidivmod) | ||
354 | 346 | ||
355 | ENTRY(__aeabi_idivmod) | 347 | ENTRY(__aeabi_idivmod) |
356 | UNWIND(.fnstart) | 348 | UNWIND(.fnstart) |
@@ -364,7 +356,6 @@ UNWIND(.save {r0, r1, ip, lr} ) | |||
364 | 356 | ||
365 | UNWIND(.fnend) | 357 | UNWIND(.fnend) |
366 | ENDPROC(__aeabi_idivmod) | 358 | ENDPROC(__aeabi_idivmod) |
367 | EXPORT_SYMBOL(__aeabi_idivmod) | ||
368 | 359 | ||
369 | #endif | 360 | #endif |
370 | 361 | ||
diff --git a/arch/arm/lib/lshrdi3.S b/arch/arm/lib/lshrdi3.S index e40833981417..922dcd88b02b 100644 --- a/arch/arm/lib/lshrdi3.S +++ b/arch/arm/lib/lshrdi3.S | |||
@@ -28,7 +28,6 @@ Boston, MA 02110-1301, USA. */ | |||
28 | 28 | ||
29 | #include <linux/linkage.h> | 29 | #include <linux/linkage.h> |
30 | #include <asm/assembler.h> | 30 | #include <asm/assembler.h> |
31 | #include <asm/export.h> | ||
32 | 31 | ||
33 | #ifdef __ARMEB__ | 32 | #ifdef __ARMEB__ |
34 | #define al r1 | 33 | #define al r1 |
@@ -53,5 +52,3 @@ ENTRY(__aeabi_llsr) | |||
53 | 52 | ||
54 | ENDPROC(__lshrdi3) | 53 | ENDPROC(__lshrdi3) |
55 | ENDPROC(__aeabi_llsr) | 54 | ENDPROC(__aeabi_llsr) |
56 | EXPORT_SYMBOL(__lshrdi3) | ||
57 | EXPORT_SYMBOL(__aeabi_llsr) | ||
diff --git a/arch/arm/lib/memchr.S b/arch/arm/lib/memchr.S index 44182bf686a5..74a5bed6d999 100644 --- a/arch/arm/lib/memchr.S +++ b/arch/arm/lib/memchr.S | |||
@@ -11,7 +11,6 @@ | |||
11 | */ | 11 | */ |
12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
13 | #include <asm/assembler.h> | 13 | #include <asm/assembler.h> |
14 | #include <asm/export.h> | ||
15 | 14 | ||
16 | .text | 15 | .text |
17 | .align 5 | 16 | .align 5 |
@@ -25,4 +24,3 @@ ENTRY(memchr) | |||
25 | 2: movne r0, #0 | 24 | 2: movne r0, #0 |
26 | ret lr | 25 | ret lr |
27 | ENDPROC(memchr) | 26 | ENDPROC(memchr) |
28 | EXPORT_SYMBOL(memchr) | ||
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S index 1be5b6ddf37c..64111bd4440b 100644 --- a/arch/arm/lib/memcpy.S +++ b/arch/arm/lib/memcpy.S | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <asm/assembler.h> | 14 | #include <asm/assembler.h> |
15 | #include <asm/unwind.h> | 15 | #include <asm/unwind.h> |
16 | #include <asm/export.h> | ||
17 | 16 | ||
18 | #define LDR1W_SHIFT 0 | 17 | #define LDR1W_SHIFT 0 |
19 | #define STR1W_SHIFT 0 | 18 | #define STR1W_SHIFT 0 |
@@ -69,5 +68,3 @@ ENTRY(memcpy) | |||
69 | 68 | ||
70 | ENDPROC(memcpy) | 69 | ENDPROC(memcpy) |
71 | ENDPROC(mmiocpy) | 70 | ENDPROC(mmiocpy) |
72 | EXPORT_SYMBOL(memcpy) | ||
73 | EXPORT_SYMBOL(mmiocpy) | ||
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S index 71dcc5400d02..69a9d47fc5ab 100644 --- a/arch/arm/lib/memmove.S +++ b/arch/arm/lib/memmove.S | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <asm/assembler.h> | 14 | #include <asm/assembler.h> |
15 | #include <asm/unwind.h> | 15 | #include <asm/unwind.h> |
16 | #include <asm/export.h> | ||
17 | 16 | ||
18 | .text | 17 | .text |
19 | 18 | ||
@@ -226,4 +225,3 @@ ENTRY(memmove) | |||
226 | 18: backward_copy_shift push=24 pull=8 | 225 | 18: backward_copy_shift push=24 pull=8 |
227 | 226 | ||
228 | ENDPROC(memmove) | 227 | ENDPROC(memmove) |
229 | EXPORT_SYMBOL(memmove) | ||
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index 7b72044cba62..3c65e3bd790f 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S | |||
@@ -12,7 +12,6 @@ | |||
12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
13 | #include <asm/assembler.h> | 13 | #include <asm/assembler.h> |
14 | #include <asm/unwind.h> | 14 | #include <asm/unwind.h> |
15 | #include <asm/export.h> | ||
16 | 15 | ||
17 | .text | 16 | .text |
18 | .align 5 | 17 | .align 5 |
@@ -136,5 +135,3 @@ UNWIND( .fnstart ) | |||
136 | UNWIND( .fnend ) | 135 | UNWIND( .fnend ) |
137 | ENDPROC(memset) | 136 | ENDPROC(memset) |
138 | ENDPROC(mmioset) | 137 | ENDPROC(mmioset) |
139 | EXPORT_SYMBOL(memset) | ||
140 | EXPORT_SYMBOL(mmioset) | ||
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S index 6dec26ed5bcc..0eded952e089 100644 --- a/arch/arm/lib/memzero.S +++ b/arch/arm/lib/memzero.S | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/linkage.h> | 10 | #include <linux/linkage.h> |
11 | #include <asm/assembler.h> | 11 | #include <asm/assembler.h> |
12 | #include <asm/unwind.h> | 12 | #include <asm/unwind.h> |
13 | #include <asm/export.h> | ||
14 | 13 | ||
15 | .text | 14 | .text |
16 | .align 5 | 15 | .align 5 |
@@ -136,4 +135,3 @@ UNWIND( .fnstart ) | |||
136 | ret lr @ 1 | 135 | ret lr @ 1 |
137 | UNWIND( .fnend ) | 136 | UNWIND( .fnend ) |
138 | ENDPROC(__memzero) | 137 | ENDPROC(__memzero) |
139 | EXPORT_SYMBOL(__memzero) | ||
diff --git a/arch/arm/lib/muldi3.S b/arch/arm/lib/muldi3.S index b8f12388ccac..204305956925 100644 --- a/arch/arm/lib/muldi3.S +++ b/arch/arm/lib/muldi3.S | |||
@@ -12,7 +12,6 @@ | |||
12 | 12 | ||
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <asm/assembler.h> | 14 | #include <asm/assembler.h> |
15 | #include <asm/export.h> | ||
16 | 15 | ||
17 | #ifdef __ARMEB__ | 16 | #ifdef __ARMEB__ |
18 | #define xh r0 | 17 | #define xh r0 |
@@ -47,5 +46,3 @@ ENTRY(__aeabi_lmul) | |||
47 | 46 | ||
48 | ENDPROC(__muldi3) | 47 | ENDPROC(__muldi3) |
49 | ENDPROC(__aeabi_lmul) | 48 | ENDPROC(__aeabi_lmul) |
50 | EXPORT_SYMBOL(__muldi3) | ||
51 | EXPORT_SYMBOL(__aeabi_lmul) | ||
diff --git a/arch/arm/lib/putuser.S b/arch/arm/lib/putuser.S index 11de126e2ed6..38d660d3705f 100644 --- a/arch/arm/lib/putuser.S +++ b/arch/arm/lib/putuser.S | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <asm/assembler.h> | 31 | #include <asm/assembler.h> |
32 | #include <asm/errno.h> | 32 | #include <asm/errno.h> |
33 | #include <asm/domain.h> | 33 | #include <asm/domain.h> |
34 | #include <asm/export.h> | ||
35 | 34 | ||
36 | ENTRY(__put_user_1) | 35 | ENTRY(__put_user_1) |
37 | check_uaccess r0, 1, r1, ip, __put_user_bad | 36 | check_uaccess r0, 1, r1, ip, __put_user_bad |
@@ -39,7 +38,6 @@ ENTRY(__put_user_1) | |||
39 | mov r0, #0 | 38 | mov r0, #0 |
40 | ret lr | 39 | ret lr |
41 | ENDPROC(__put_user_1) | 40 | ENDPROC(__put_user_1) |
42 | EXPORT_SYMBOL(__put_user_1) | ||
43 | 41 | ||
44 | ENTRY(__put_user_2) | 42 | ENTRY(__put_user_2) |
45 | check_uaccess r0, 2, r1, ip, __put_user_bad | 43 | check_uaccess r0, 2, r1, ip, __put_user_bad |
@@ -64,7 +62,6 @@ ENTRY(__put_user_2) | |||
64 | mov r0, #0 | 62 | mov r0, #0 |
65 | ret lr | 63 | ret lr |
66 | ENDPROC(__put_user_2) | 64 | ENDPROC(__put_user_2) |
67 | EXPORT_SYMBOL(__put_user_2) | ||
68 | 65 | ||
69 | ENTRY(__put_user_4) | 66 | ENTRY(__put_user_4) |
70 | check_uaccess r0, 4, r1, ip, __put_user_bad | 67 | check_uaccess r0, 4, r1, ip, __put_user_bad |
@@ -72,7 +69,6 @@ ENTRY(__put_user_4) | |||
72 | mov r0, #0 | 69 | mov r0, #0 |
73 | ret lr | 70 | ret lr |
74 | ENDPROC(__put_user_4) | 71 | ENDPROC(__put_user_4) |
75 | EXPORT_SYMBOL(__put_user_4) | ||
76 | 72 | ||
77 | ENTRY(__put_user_8) | 73 | ENTRY(__put_user_8) |
78 | check_uaccess r0, 8, r1, ip, __put_user_bad | 74 | check_uaccess r0, 8, r1, ip, __put_user_bad |
@@ -86,7 +82,6 @@ ENTRY(__put_user_8) | |||
86 | mov r0, #0 | 82 | mov r0, #0 |
87 | ret lr | 83 | ret lr |
88 | ENDPROC(__put_user_8) | 84 | ENDPROC(__put_user_8) |
89 | EXPORT_SYMBOL(__put_user_8) | ||
90 | 85 | ||
91 | __put_user_bad: | 86 | __put_user_bad: |
92 | mov r0, #-EFAULT | 87 | mov r0, #-EFAULT |
diff --git a/arch/arm/lib/strchr.S b/arch/arm/lib/strchr.S index 7301f6e6046c..013d64c71e8d 100644 --- a/arch/arm/lib/strchr.S +++ b/arch/arm/lib/strchr.S | |||
@@ -11,7 +11,6 @@ | |||
11 | */ | 11 | */ |
12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
13 | #include <asm/assembler.h> | 13 | #include <asm/assembler.h> |
14 | #include <asm/export.h> | ||
15 | 14 | ||
16 | .text | 15 | .text |
17 | .align 5 | 16 | .align 5 |
@@ -26,4 +25,3 @@ ENTRY(strchr) | |||
26 | subeq r0, r0, #1 | 25 | subeq r0, r0, #1 |
27 | ret lr | 26 | ret lr |
28 | ENDPROC(strchr) | 27 | ENDPROC(strchr) |
29 | EXPORT_SYMBOL(strchr) | ||
diff --git a/arch/arm/lib/strrchr.S b/arch/arm/lib/strrchr.S index aaf9fd98b754..3cec1c7482c4 100644 --- a/arch/arm/lib/strrchr.S +++ b/arch/arm/lib/strrchr.S | |||
@@ -11,7 +11,6 @@ | |||
11 | */ | 11 | */ |
12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
13 | #include <asm/assembler.h> | 13 | #include <asm/assembler.h> |
14 | #include <asm/export.h> | ||
15 | 14 | ||
16 | .text | 15 | .text |
17 | .align 5 | 16 | .align 5 |
@@ -25,4 +24,3 @@ ENTRY(strrchr) | |||
25 | mov r0, r3 | 24 | mov r0, r3 |
26 | ret lr | 25 | ret lr |
27 | ENDPROC(strrchr) | 26 | ENDPROC(strrchr) |
28 | EXPORT_SYMBOL(strrchr) | ||
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 1626e3a551a1..6bd1089b07e0 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/gfp.h> | 19 | #include <linux/gfp.h> |
20 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
21 | #include <linux/hugetlb.h> | 21 | #include <linux/hugetlb.h> |
22 | #include <linux/export.h> | ||
23 | #include <asm/current.h> | 22 | #include <asm/current.h> |
24 | #include <asm/page.h> | 23 | #include <asm/page.h> |
25 | 24 | ||
@@ -157,7 +156,6 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n) | |||
157 | } | 156 | } |
158 | return n; | 157 | return n; |
159 | } | 158 | } |
160 | EXPORT_SYMBOL(arm_copy_to_user); | ||
161 | 159 | ||
162 | static unsigned long noinline | 160 | static unsigned long noinline |
163 | __clear_user_memset(void __user *addr, unsigned long n) | 161 | __clear_user_memset(void __user *addr, unsigned long n) |
@@ -215,7 +213,6 @@ unsigned long arm_clear_user(void __user *addr, unsigned long n) | |||
215 | } | 213 | } |
216 | return n; | 214 | return n; |
217 | } | 215 | } |
218 | EXPORT_SYMBOL(arm_clear_user); | ||
219 | 216 | ||
220 | #if 0 | 217 | #if 0 |
221 | 218 | ||
diff --git a/arch/arm/lib/ucmpdi2.S b/arch/arm/lib/ucmpdi2.S index 127a91af46f3..ad4a6309141a 100644 --- a/arch/arm/lib/ucmpdi2.S +++ b/arch/arm/lib/ucmpdi2.S | |||
@@ -12,7 +12,6 @@ | |||
12 | 12 | ||
13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
14 | #include <asm/assembler.h> | 14 | #include <asm/assembler.h> |
15 | #include <asm/export.h> | ||
16 | 15 | ||
17 | #ifdef __ARMEB__ | 16 | #ifdef __ARMEB__ |
18 | #define xh r0 | 17 | #define xh r0 |
@@ -36,7 +35,6 @@ ENTRY(__ucmpdi2) | |||
36 | ret lr | 35 | ret lr |
37 | 36 | ||
38 | ENDPROC(__ucmpdi2) | 37 | ENDPROC(__ucmpdi2) |
39 | EXPORT_SYMBOL(__ucmpdi2) | ||
40 | 38 | ||
41 | #ifdef CONFIG_AEABI | 39 | #ifdef CONFIG_AEABI |
42 | 40 | ||
@@ -50,7 +48,6 @@ ENTRY(__aeabi_ulcmp) | |||
50 | ret lr | 48 | ret lr |
51 | 49 | ||
52 | ENDPROC(__aeabi_ulcmp) | 50 | ENDPROC(__aeabi_ulcmp) |
53 | EXPORT_SYMBOL(__aeabi_ulcmp) | ||
54 | 51 | ||
55 | #endif | 52 | #endif |
56 | 53 | ||
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile index 737450fe790c..cab128913e72 100644 --- a/arch/arm/mach-imx/Makefile +++ b/arch/arm/mach-imx/Makefile | |||
@@ -32,6 +32,7 @@ endif | |||
32 | 32 | ||
33 | ifdef CONFIG_SND_IMX_SOC | 33 | ifdef CONFIG_SND_IMX_SOC |
34 | obj-y += ssi-fiq.o | 34 | obj-y += ssi-fiq.o |
35 | obj-y += ssi-fiq-ksym.o | ||
35 | endif | 36 | endif |
36 | 37 | ||
37 | # i.MX21 based machines | 38 | # i.MX21 based machines |
diff --git a/arch/arm/mach-imx/ssi-fiq-ksym.c b/arch/arm/mach-imx/ssi-fiq-ksym.c new file mode 100644 index 000000000000..792090f9a032 --- /dev/null +++ b/arch/arm/mach-imx/ssi-fiq-ksym.c | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Exported ksyms for the SSI FIQ handler | ||
3 | * | ||
4 | * Copyright (C) 2009, Sascha Hauer <s.hauer@pengutronix.de> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | |||
13 | #include <linux/platform_data/asoc-imx-ssi.h> | ||
14 | |||
15 | EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer); | ||
16 | EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer); | ||
17 | EXPORT_SYMBOL(imx_ssi_fiq_start); | ||
18 | EXPORT_SYMBOL(imx_ssi_fiq_end); | ||
19 | EXPORT_SYMBOL(imx_ssi_fiq_base); | ||
20 | |||
diff --git a/arch/arm/mach-imx/ssi-fiq.S b/arch/arm/mach-imx/ssi-fiq.S index fd7917f1c204..a8b93c5f29b5 100644 --- a/arch/arm/mach-imx/ssi-fiq.S +++ b/arch/arm/mach-imx/ssi-fiq.S | |||
@@ -8,7 +8,6 @@ | |||
8 | 8 | ||
9 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
10 | #include <asm/assembler.h> | 10 | #include <asm/assembler.h> |
11 | #include <asm/export.h> | ||
12 | 11 | ||
13 | /* | 12 | /* |
14 | * r8 = bit 0-15: tx offset, bit 16-31: tx buffer size | 13 | * r8 = bit 0-15: tx offset, bit 16-31: tx buffer size |
@@ -145,8 +144,4 @@ imx_ssi_fiq_tx_buffer: | |||
145 | .word 0x0 | 144 | .word 0x0 |
146 | .L_imx_ssi_fiq_end: | 145 | .L_imx_ssi_fiq_end: |
147 | imx_ssi_fiq_end: | 146 | imx_ssi_fiq_end: |
148 | EXPORT_SYMBOL(imx_ssi_fiq_tx_buffer) | 147 | |
149 | EXPORT_SYMBOL(imx_ssi_fiq_rx_buffer) | ||
150 | EXPORT_SYMBOL(imx_ssi_fiq_start) | ||
151 | EXPORT_SYMBOL(imx_ssi_fiq_end) | ||
152 | EXPORT_SYMBOL(imx_ssi_fiq_base) | ||
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index a9afeebd59f2..0465338183c7 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -71,6 +71,7 @@ config SOC_AM43XX | |||
71 | select HAVE_ARM_TWD | 71 | select HAVE_ARM_TWD |
72 | select ARM_ERRATA_754322 | 72 | select ARM_ERRATA_754322 |
73 | select ARM_ERRATA_775420 | 73 | select ARM_ERRATA_775420 |
74 | select OMAP_INTERCONNECT | ||
74 | 75 | ||
75 | config SOC_DRA7XX | 76 | config SOC_DRA7XX |
76 | bool "TI DRA7XX" | 77 | bool "TI DRA7XX" |
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index 2abd53ae3e7a..cc6d9fa60924 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c | |||
@@ -205,11 +205,15 @@ void __init omap2xxx_check_revision(void) | |||
205 | 205 | ||
206 | #define OMAP3_SHOW_FEATURE(feat) \ | 206 | #define OMAP3_SHOW_FEATURE(feat) \ |
207 | if (omap3_has_ ##feat()) \ | 207 | if (omap3_has_ ##feat()) \ |
208 | printk(#feat" "); | 208 | n += scnprintf(buf + n, sizeof(buf) - n, #feat " "); |
209 | 209 | ||
210 | static void __init omap3_cpuinfo(void) | 210 | static void __init omap3_cpuinfo(void) |
211 | { | 211 | { |
212 | const char *cpu_name; | 212 | const char *cpu_name; |
213 | char buf[64]; | ||
214 | int n = 0; | ||
215 | |||
216 | memset(buf, 0, sizeof(buf)); | ||
213 | 217 | ||
214 | /* | 218 | /* |
215 | * OMAP3430 and OMAP3530 are assumed to be same. | 219 | * OMAP3430 and OMAP3530 are assumed to be same. |
@@ -241,10 +245,10 @@ static void __init omap3_cpuinfo(void) | |||
241 | cpu_name = "OMAP3503"; | 245 | cpu_name = "OMAP3503"; |
242 | } | 246 | } |
243 | 247 | ||
244 | sprintf(soc_name, "%s", cpu_name); | 248 | scnprintf(soc_name, sizeof(soc_name), "%s", cpu_name); |
245 | 249 | ||
246 | /* Print verbose information */ | 250 | /* Print verbose information */ |
247 | pr_info("%s %s (", soc_name, soc_rev); | 251 | n += scnprintf(buf, sizeof(buf) - n, "%s %s (", soc_name, soc_rev); |
248 | 252 | ||
249 | OMAP3_SHOW_FEATURE(l2cache); | 253 | OMAP3_SHOW_FEATURE(l2cache); |
250 | OMAP3_SHOW_FEATURE(iva); | 254 | OMAP3_SHOW_FEATURE(iva); |
@@ -252,8 +256,10 @@ static void __init omap3_cpuinfo(void) | |||
252 | OMAP3_SHOW_FEATURE(neon); | 256 | OMAP3_SHOW_FEATURE(neon); |
253 | OMAP3_SHOW_FEATURE(isp); | 257 | OMAP3_SHOW_FEATURE(isp); |
254 | OMAP3_SHOW_FEATURE(192mhz_clk); | 258 | OMAP3_SHOW_FEATURE(192mhz_clk); |
255 | 259 | if (*(buf + n - 1) == ' ') | |
256 | printk(")\n"); | 260 | n--; |
261 | n += scnprintf(buf + n, sizeof(buf) - n, ")\n"); | ||
262 | pr_info("%s", buf); | ||
257 | } | 263 | } |
258 | 264 | ||
259 | #define OMAP3_CHECK_FEATURE(status,feat) \ | 265 | #define OMAP3_CHECK_FEATURE(status,feat) \ |
diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c index 62680aad2126..718981bb80cd 100644 --- a/arch/arm/mach-omap2/prm3xxx.c +++ b/arch/arm/mach-omap2/prm3xxx.c | |||
@@ -319,6 +319,9 @@ void __init omap3_prm_init_pm(bool has_uart4, bool has_iva) | |||
319 | if (has_uart4) { | 319 | if (has_uart4) { |
320 | en_uart4_mask = OMAP3630_EN_UART4_MASK; | 320 | en_uart4_mask = OMAP3630_EN_UART4_MASK; |
321 | grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK; | 321 | grpsel_uart4_mask = OMAP3630_GRPSEL_UART4_MASK; |
322 | } else { | ||
323 | en_uart4_mask = 0; | ||
324 | grpsel_uart4_mask = 0; | ||
322 | } | 325 | } |
323 | 326 | ||
324 | /* Enable wakeups in PER */ | 327 | /* Enable wakeups in PER */ |
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c index cba8cada8c81..cd15dbd62671 100644 --- a/arch/arm/mach-omap2/voltage.c +++ b/arch/arm/mach-omap2/voltage.c | |||
@@ -87,6 +87,12 @@ int voltdm_scale(struct voltagedomain *voltdm, | |||
87 | return -ENODATA; | 87 | return -ENODATA; |
88 | } | 88 | } |
89 | 89 | ||
90 | if (!voltdm->volt_data) { | ||
91 | pr_err("%s: No voltage data defined for vdd_%s\n", | ||
92 | __func__, voltdm->name); | ||
93 | return -ENODATA; | ||
94 | } | ||
95 | |||
90 | /* Adjust voltage to the exact voltage from the OPP table */ | 96 | /* Adjust voltage to the exact voltage from the OPP table */ |
91 | for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) { | 97 | for (i = 0; voltdm->volt_data[i].volt_nominal != 0; i++) { |
92 | if (voltdm->volt_data[i].volt_nominal >= target_volt) { | 98 | if (voltdm->volt_data[i].volt_nominal >= target_volt) { |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index ab4f74536057..ab7710002ba6 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -1167,7 +1167,7 @@ static int __init dma_debug_do_init(void) | |||
1167 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 1167 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
1168 | return 0; | 1168 | return 0; |
1169 | } | 1169 | } |
1170 | fs_initcall(dma_debug_do_init); | 1170 | core_initcall(dma_debug_do_init); |
1171 | 1171 | ||
1172 | #ifdef CONFIG_ARM_DMA_USE_IOMMU | 1172 | #ifdef CONFIG_ARM_DMA_USE_IOMMU |
1173 | 1173 | ||
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S index f6d333f09bfe..8dea61640cc1 100644 --- a/arch/arm/mm/proc-v7m.S +++ b/arch/arm/mm/proc-v7m.S | |||
@@ -96,7 +96,7 @@ ENTRY(cpu_cm7_proc_fin) | |||
96 | ret lr | 96 | ret lr |
97 | ENDPROC(cpu_cm7_proc_fin) | 97 | ENDPROC(cpu_cm7_proc_fin) |
98 | 98 | ||
99 | .section ".text.init", #alloc, #execinstr | 99 | .section ".init.text", #alloc, #execinstr |
100 | 100 | ||
101 | __v7m_cm7_setup: | 101 | __v7m_cm7_setup: |
102 | mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP) | 102 | mov r8, #(V7M_SCB_CCR_DC | V7M_SCB_CCR_IC| V7M_SCB_CCR_BP) |
diff --git a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi index c4762538ec01..e9bd58793464 100644 --- a/arch/arm64/boot/dts/marvell/armada-37xx.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-37xx.dtsi | |||
@@ -105,7 +105,7 @@ | |||
105 | status = "disabled"; | 105 | status = "disabled"; |
106 | }; | 106 | }; |
107 | 107 | ||
108 | nb_perih_clk: nb-periph-clk@13000{ | 108 | nb_periph_clk: nb-periph-clk@13000 { |
109 | compatible = "marvell,armada-3700-periph-clock-nb"; | 109 | compatible = "marvell,armada-3700-periph-clock-nb"; |
110 | reg = <0x13000 0x100>; | 110 | reg = <0x13000 0x100>; |
111 | clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, | 111 | clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, |
@@ -113,7 +113,7 @@ | |||
113 | #clock-cells = <1>; | 113 | #clock-cells = <1>; |
114 | }; | 114 | }; |
115 | 115 | ||
116 | sb_perih_clk: sb-periph-clk@18000{ | 116 | sb_periph_clk: sb-periph-clk@18000 { |
117 | compatible = "marvell,armada-3700-periph-clock-sb"; | 117 | compatible = "marvell,armada-3700-periph-clock-sb"; |
118 | reg = <0x18000 0x100>; | 118 | reg = <0x18000 0x100>; |
119 | clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, | 119 | clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, |
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi index 842fb333285c..6bf9e241179b 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi | |||
@@ -130,8 +130,8 @@ | |||
130 | reg = <0x700600 0x50>; | 130 | reg = <0x700600 0x50>; |
131 | #address-cells = <0x1>; | 131 | #address-cells = <0x1>; |
132 | #size-cells = <0x0>; | 132 | #size-cells = <0x0>; |
133 | cell-index = <1>; | 133 | cell-index = <3>; |
134 | clocks = <&cps_syscon0 0 3>; | 134 | clocks = <&cps_syscon0 1 21>; |
135 | status = "disabled"; | 135 | status = "disabled"; |
136 | }; | 136 | }; |
137 | 137 | ||
@@ -140,7 +140,7 @@ | |||
140 | reg = <0x700680 0x50>; | 140 | reg = <0x700680 0x50>; |
141 | #address-cells = <1>; | 141 | #address-cells = <1>; |
142 | #size-cells = <0>; | 142 | #size-cells = <0>; |
143 | cell-index = <2>; | 143 | cell-index = <4>; |
144 | clocks = <&cps_syscon0 1 21>; | 144 | clocks = <&cps_syscon0 1 21>; |
145 | status = "disabled"; | 145 | status = "disabled"; |
146 | }; | 146 | }; |
diff --git a/arch/arm64/include/asm/perf_event.h b/arch/arm64/include/asm/perf_event.h index 2065f46fa740..38b6a2b49d68 100644 --- a/arch/arm64/include/asm/perf_event.h +++ b/arch/arm64/include/asm/perf_event.h | |||
@@ -46,7 +46,15 @@ | |||
46 | #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ | 46 | #define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ |
47 | #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ | 47 | #define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ |
48 | 48 | ||
49 | #define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */ | 49 | /* |
50 | * PMUv3 event types: required events | ||
51 | */ | ||
52 | #define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00 | ||
53 | #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03 | ||
54 | #define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04 | ||
55 | #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10 | ||
56 | #define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11 | ||
57 | #define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12 | ||
50 | 58 | ||
51 | /* | 59 | /* |
52 | * Event filters for PMUv3 | 60 | * Event filters for PMUv3 |
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index a9310a69fffd..57ae9d9ed9bb 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -31,17 +31,9 @@ | |||
31 | 31 | ||
32 | /* | 32 | /* |
33 | * ARMv8 PMUv3 Performance Events handling code. | 33 | * ARMv8 PMUv3 Performance Events handling code. |
34 | * Common event types. | 34 | * Common event types (some are defined in asm/perf_event.h). |
35 | */ | 35 | */ |
36 | 36 | ||
37 | /* Required events. */ | ||
38 | #define ARMV8_PMUV3_PERFCTR_SW_INCR 0x00 | ||
39 | #define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x03 | ||
40 | #define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x04 | ||
41 | #define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x10 | ||
42 | #define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x11 | ||
43 | #define ARMV8_PMUV3_PERFCTR_BR_PRED 0x12 | ||
44 | |||
45 | /* At least one of the following is required. */ | 37 | /* At least one of the following is required. */ |
46 | #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08 | 38 | #define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x08 |
47 | #define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B | 39 | #define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x1B |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index f302fdb3a030..87e7e6608cd8 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -597,8 +597,14 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, | |||
597 | 597 | ||
598 | idx = ARMV8_PMU_CYCLE_IDX; | 598 | idx = ARMV8_PMU_CYCLE_IDX; |
599 | } else { | 599 | } else { |
600 | BUG(); | 600 | return false; |
601 | } | 601 | } |
602 | } else if (r->CRn == 0 && r->CRm == 9) { | ||
603 | /* PMCCNTR */ | ||
604 | if (pmu_access_event_counter_el0_disabled(vcpu)) | ||
605 | return false; | ||
606 | |||
607 | idx = ARMV8_PMU_CYCLE_IDX; | ||
602 | } else if (r->CRn == 14 && (r->CRm & 12) == 8) { | 608 | } else if (r->CRn == 14 && (r->CRm & 12) == 8) { |
603 | /* PMEVCNTRn_EL0 */ | 609 | /* PMEVCNTRn_EL0 */ |
604 | if (pmu_access_event_counter_el0_disabled(vcpu)) | 610 | if (pmu_access_event_counter_el0_disabled(vcpu)) |
@@ -606,7 +612,7 @@ static bool access_pmu_evcntr(struct kvm_vcpu *vcpu, | |||
606 | 612 | ||
607 | idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); | 613 | idx = ((r->CRm & 3) << 3) | (r->Op2 & 7); |
608 | } else { | 614 | } else { |
609 | BUG(); | 615 | return false; |
610 | } | 616 | } |
611 | 617 | ||
612 | if (!pmu_counter_idx_valid(vcpu, idx)) | 618 | if (!pmu_counter_idx_valid(vcpu, idx)) |
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 7dd2dd47909a..df78b2ca70eb 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h | |||
@@ -215,6 +215,12 @@ | |||
215 | #endif | 215 | #endif |
216 | 216 | ||
217 | /* | 217 | /* |
218 | * Wired register bits | ||
219 | */ | ||
220 | #define MIPSR6_WIRED_LIMIT (_ULCAST_(0xffff) << 16) | ||
221 | #define MIPSR6_WIRED_WIRED (_ULCAST_(0xffff) << 0) | ||
222 | |||
223 | /* | ||
218 | * Values used for computation of new tlb entries | 224 | * Values used for computation of new tlb entries |
219 | */ | 225 | */ |
220 | #define PL_4K 12 | 226 | #define PL_4K 12 |
diff --git a/arch/mips/include/asm/tlb.h b/arch/mips/include/asm/tlb.h index 4a2349302b55..dd179fd8acda 100644 --- a/arch/mips/include/asm/tlb.h +++ b/arch/mips/include/asm/tlb.h | |||
@@ -1,6 +1,9 @@ | |||
1 | #ifndef __ASM_TLB_H | 1 | #ifndef __ASM_TLB_H |
2 | #define __ASM_TLB_H | 2 | #define __ASM_TLB_H |
3 | 3 | ||
4 | #include <asm/cpu-features.h> | ||
5 | #include <asm/mipsregs.h> | ||
6 | |||
4 | /* | 7 | /* |
5 | * MIPS doesn't need any special per-pte or per-vma handling, except | 8 | * MIPS doesn't need any special per-pte or per-vma handling, except |
6 | * we need to flush cache for area to be unmapped. | 9 | * we need to flush cache for area to be unmapped. |
@@ -22,6 +25,16 @@ | |||
22 | ((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \ | 25 | ((CKSEG0 + ((idx) << (PAGE_SHIFT + 1))) | \ |
23 | (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0)) | 26 | (cpu_has_tlbinv ? MIPS_ENTRYHI_EHINV : 0)) |
24 | 27 | ||
28 | static inline unsigned int num_wired_entries(void) | ||
29 | { | ||
30 | unsigned int wired = read_c0_wired(); | ||
31 | |||
32 | if (cpu_has_mips_r6) | ||
33 | wired &= MIPSR6_WIRED_WIRED; | ||
34 | |||
35 | return wired; | ||
36 | } | ||
37 | |||
25 | #include <asm-generic/tlb.h> | 38 | #include <asm-generic/tlb.h> |
26 | 39 | ||
27 | #endif /* __ASM_TLB_H */ | 40 | #endif /* __ASM_TLB_H */ |
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index d56a855828c2..3bef306cdfdb 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -209,17 +209,18 @@ bad_area_nosemaphore: | |||
209 | if (show_unhandled_signals && | 209 | if (show_unhandled_signals && |
210 | unhandled_signal(tsk, SIGSEGV) && | 210 | unhandled_signal(tsk, SIGSEGV) && |
211 | __ratelimit(&ratelimit_state)) { | 211 | __ratelimit(&ratelimit_state)) { |
212 | pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx", | 212 | pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n", |
213 | tsk->comm, | 213 | tsk->comm, |
214 | write ? "write access to" : "read access from", | 214 | write ? "write access to" : "read access from", |
215 | field, address); | 215 | field, address); |
216 | pr_info("epc = %0*lx in", field, | 216 | pr_info("epc = %0*lx in", field, |
217 | (unsigned long) regs->cp0_epc); | 217 | (unsigned long) regs->cp0_epc); |
218 | print_vma_addr(" ", regs->cp0_epc); | 218 | print_vma_addr(KERN_CONT " ", regs->cp0_epc); |
219 | pr_cont("\n"); | ||
219 | pr_info("ra = %0*lx in", field, | 220 | pr_info("ra = %0*lx in", field, |
220 | (unsigned long) regs->regs[31]); | 221 | (unsigned long) regs->regs[31]); |
221 | print_vma_addr(" ", regs->regs[31]); | 222 | print_vma_addr(KERN_CONT " ", regs->regs[31]); |
222 | pr_info("\n"); | 223 | pr_cont("\n"); |
223 | } | 224 | } |
224 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; | 225 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; |
225 | info.si_signo = SIGSEGV; | 226 | info.si_signo = SIGSEGV; |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 3a6edecc3f38..e86ebcf5c071 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -118,7 +118,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) | |||
118 | writex_c0_entrylo1(entrylo); | 118 | writex_c0_entrylo1(entrylo); |
119 | } | 119 | } |
120 | #endif | 120 | #endif |
121 | tlbidx = read_c0_wired(); | 121 | tlbidx = num_wired_entries(); |
122 | write_c0_wired(tlbidx + 1); | 122 | write_c0_wired(tlbidx + 1); |
123 | write_c0_index(tlbidx); | 123 | write_c0_index(tlbidx); |
124 | mtc0_tlbw_hazard(); | 124 | mtc0_tlbw_hazard(); |
@@ -147,7 +147,7 @@ void kunmap_coherent(void) | |||
147 | 147 | ||
148 | local_irq_save(flags); | 148 | local_irq_save(flags); |
149 | old_ctx = read_c0_entryhi(); | 149 | old_ctx = read_c0_entryhi(); |
150 | wired = read_c0_wired() - 1; | 150 | wired = num_wired_entries() - 1; |
151 | write_c0_wired(wired); | 151 | write_c0_wired(wired); |
152 | write_c0_index(wired); | 152 | write_c0_index(wired); |
153 | write_c0_entryhi(UNIQUE_ENTRYHI(wired)); | 153 | write_c0_entryhi(UNIQUE_ENTRYHI(wired)); |
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index bba9c1484b41..0596505770db 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
@@ -65,7 +65,7 @@ void local_flush_tlb_all(void) | |||
65 | write_c0_entrylo0(0); | 65 | write_c0_entrylo0(0); |
66 | write_c0_entrylo1(0); | 66 | write_c0_entrylo1(0); |
67 | 67 | ||
68 | entry = read_c0_wired(); | 68 | entry = num_wired_entries(); |
69 | 69 | ||
70 | /* | 70 | /* |
71 | * Blast 'em all away. | 71 | * Blast 'em all away. |
@@ -385,7 +385,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
385 | old_ctx = read_c0_entryhi(); | 385 | old_ctx = read_c0_entryhi(); |
386 | htw_stop(); | 386 | htw_stop(); |
387 | old_pagemask = read_c0_pagemask(); | 387 | old_pagemask = read_c0_pagemask(); |
388 | wired = read_c0_wired(); | 388 | wired = num_wired_entries(); |
389 | write_c0_wired(wired + 1); | 389 | write_c0_wired(wired + 1); |
390 | write_c0_index(wired); | 390 | write_c0_index(wired); |
391 | tlbw_use_hazard(); /* What is the hazard here? */ | 391 | tlbw_use_hazard(); /* What is the hazard here? */ |
@@ -449,7 +449,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1, | |||
449 | htw_stop(); | 449 | htw_stop(); |
450 | old_ctx = read_c0_entryhi(); | 450 | old_ctx = read_c0_entryhi(); |
451 | old_pagemask = read_c0_pagemask(); | 451 | old_pagemask = read_c0_pagemask(); |
452 | wired = read_c0_wired(); | 452 | wired = num_wired_entries(); |
453 | if (--temp_tlb_entry < wired) { | 453 | if (--temp_tlb_entry < wired) { |
454 | printk(KERN_WARNING | 454 | printk(KERN_WARNING |
455 | "No TLB space left for add_temporary_entry\n"); | 455 | "No TLB space left for add_temporary_entry\n"); |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 71c4a3aa3752..a14b86587013 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -34,7 +34,9 @@ config PARISC | |||
34 | select HAVE_ARCH_HASH | 34 | select HAVE_ARCH_HASH |
35 | select HAVE_ARCH_SECCOMP_FILTER | 35 | select HAVE_ARCH_SECCOMP_FILTER |
36 | select HAVE_ARCH_TRACEHOOK | 36 | select HAVE_ARCH_TRACEHOOK |
37 | select HAVE_UNSTABLE_SCHED_CLOCK if (SMP || !64BIT) | 37 | select GENERIC_SCHED_CLOCK |
38 | select HAVE_UNSTABLE_SCHED_CLOCK if SMP | ||
39 | select GENERIC_CLOCKEVENTS | ||
38 | select ARCH_NO_COHERENT_DMA_MMAP | 40 | select ARCH_NO_COHERENT_DMA_MMAP |
39 | select CPU_NO_EFFICIENT_FFS | 41 | select CPU_NO_EFFICIENT_FFS |
40 | 42 | ||
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 629eb464d5ba..c263301648f3 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -369,6 +369,7 @@ void __init parisc_setup_cache_timing(void) | |||
369 | { | 369 | { |
370 | unsigned long rangetime, alltime; | 370 | unsigned long rangetime, alltime; |
371 | unsigned long size, start; | 371 | unsigned long size, start; |
372 | unsigned long threshold; | ||
372 | 373 | ||
373 | alltime = mfctl(16); | 374 | alltime = mfctl(16); |
374 | flush_data_cache(); | 375 | flush_data_cache(); |
@@ -382,17 +383,12 @@ void __init parisc_setup_cache_timing(void) | |||
382 | printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", | 383 | printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", |
383 | alltime, size, rangetime); | 384 | alltime, size, rangetime); |
384 | 385 | ||
385 | /* Racy, but if we see an intermediate value, it's ok too... */ | 386 | threshold = L1_CACHE_ALIGN(size * alltime / rangetime); |
386 | parisc_cache_flush_threshold = size * alltime / rangetime; | 387 | if (threshold > cache_info.dc_size) |
387 | 388 | threshold = cache_info.dc_size; | |
388 | parisc_cache_flush_threshold = L1_CACHE_ALIGN(parisc_cache_flush_threshold); | 389 | if (threshold) |
389 | if (!parisc_cache_flush_threshold) | 390 | parisc_cache_flush_threshold = threshold; |
390 | parisc_cache_flush_threshold = FLUSH_THRESHOLD; | 391 | printk(KERN_INFO "Cache flush threshold set to %lu KiB\n", |
391 | |||
392 | if (parisc_cache_flush_threshold > cache_info.dc_size) | ||
393 | parisc_cache_flush_threshold = cache_info.dc_size; | ||
394 | |||
395 | printk(KERN_INFO "Setting cache flush threshold to %lu kB\n", | ||
396 | parisc_cache_flush_threshold/1024); | 392 | parisc_cache_flush_threshold/1024); |
397 | 393 | ||
398 | /* calculate TLB flush threshold */ | 394 | /* calculate TLB flush threshold */ |
@@ -401,7 +397,7 @@ void __init parisc_setup_cache_timing(void) | |||
401 | flush_tlb_all(); | 397 | flush_tlb_all(); |
402 | alltime = mfctl(16) - alltime; | 398 | alltime = mfctl(16) - alltime; |
403 | 399 | ||
404 | size = PAGE_SIZE; | 400 | size = 0; |
405 | start = (unsigned long) _text; | 401 | start = (unsigned long) _text; |
406 | rangetime = mfctl(16); | 402 | rangetime = mfctl(16); |
407 | while (start < (unsigned long) _end) { | 403 | while (start < (unsigned long) _end) { |
@@ -414,13 +410,10 @@ void __init parisc_setup_cache_timing(void) | |||
414 | printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n", | 410 | printk(KERN_DEBUG "Whole TLB flush %lu cycles, flushing %lu bytes %lu cycles\n", |
415 | alltime, size, rangetime); | 411 | alltime, size, rangetime); |
416 | 412 | ||
417 | parisc_tlb_flush_threshold = size * alltime / rangetime; | 413 | threshold = PAGE_ALIGN(num_online_cpus() * size * alltime / rangetime); |
418 | parisc_tlb_flush_threshold *= num_online_cpus(); | 414 | if (threshold) |
419 | parisc_tlb_flush_threshold = PAGE_ALIGN(parisc_tlb_flush_threshold); | 415 | parisc_tlb_flush_threshold = threshold; |
420 | if (!parisc_tlb_flush_threshold) | 416 | printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", |
421 | parisc_tlb_flush_threshold = FLUSH_TLB_THRESHOLD; | ||
422 | |||
423 | printk(KERN_INFO "Setting TLB flush threshold to %lu kB\n", | ||
424 | parisc_tlb_flush_threshold/1024); | 417 | parisc_tlb_flush_threshold/1024); |
425 | } | 418 | } |
426 | 419 | ||
diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c index 545f9d2fe711..c05d1876d27c 100644 --- a/arch/parisc/kernel/inventory.c +++ b/arch/parisc/kernel/inventory.c | |||
@@ -58,7 +58,7 @@ void __init setup_pdc(void) | |||
58 | status = pdc_system_map_find_mods(&module_result, &module_path, 0); | 58 | status = pdc_system_map_find_mods(&module_result, &module_path, 0); |
59 | if (status == PDC_OK) { | 59 | if (status == PDC_OK) { |
60 | pdc_type = PDC_TYPE_SYSTEM_MAP; | 60 | pdc_type = PDC_TYPE_SYSTEM_MAP; |
61 | printk("System Map.\n"); | 61 | pr_cont("System Map.\n"); |
62 | return; | 62 | return; |
63 | } | 63 | } |
64 | 64 | ||
@@ -77,7 +77,7 @@ void __init setup_pdc(void) | |||
77 | status = pdc_pat_cell_get_number(&cell_info); | 77 | status = pdc_pat_cell_get_number(&cell_info); |
78 | if (status == PDC_OK) { | 78 | if (status == PDC_OK) { |
79 | pdc_type = PDC_TYPE_PAT; | 79 | pdc_type = PDC_TYPE_PAT; |
80 | printk("64 bit PAT.\n"); | 80 | pr_cont("64 bit PAT.\n"); |
81 | return; | 81 | return; |
82 | } | 82 | } |
83 | #endif | 83 | #endif |
@@ -97,12 +97,12 @@ void __init setup_pdc(void) | |||
97 | case 0xC: /* 715/64, at least */ | 97 | case 0xC: /* 715/64, at least */ |
98 | 98 | ||
99 | pdc_type = PDC_TYPE_SNAKE; | 99 | pdc_type = PDC_TYPE_SNAKE; |
100 | printk("Snake.\n"); | 100 | pr_cont("Snake.\n"); |
101 | return; | 101 | return; |
102 | 102 | ||
103 | default: /* Everything else */ | 103 | default: /* Everything else */ |
104 | 104 | ||
105 | printk("Unsupported.\n"); | 105 | pr_cont("Unsupported.\n"); |
106 | panic("If this is a 64-bit machine, please try a 64-bit kernel.\n"); | 106 | panic("If this is a 64-bit machine, please try a 64-bit kernel.\n"); |
107 | } | 107 | } |
108 | } | 108 | } |
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index 985e06da37f5..1b39a2acaadf 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S | |||
@@ -96,7 +96,7 @@ fitmanyloop: /* Loop if LOOP >= 2 */ | |||
96 | 96 | ||
97 | fitmanymiddle: /* Loop if LOOP >= 2 */ | 97 | fitmanymiddle: /* Loop if LOOP >= 2 */ |
98 | addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */ | 98 | addib,COND(>) -1, %r31, fitmanymiddle /* Adjusted inner loop decr */ |
99 | pitlbe 0(%sr1, %r28) | 99 | pitlbe %r0(%sr1, %r28) |
100 | pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */ | 100 | pitlbe,m %arg1(%sr1, %r28) /* Last pitlbe and addr adjust */ |
101 | addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */ | 101 | addib,COND(>) -1, %r29, fitmanymiddle /* Middle loop decr */ |
102 | copy %arg3, %r31 /* Re-init inner loop count */ | 102 | copy %arg3, %r31 /* Re-init inner loop count */ |
@@ -139,7 +139,7 @@ fdtmanyloop: /* Loop if LOOP >= 2 */ | |||
139 | 139 | ||
140 | fdtmanymiddle: /* Loop if LOOP >= 2 */ | 140 | fdtmanymiddle: /* Loop if LOOP >= 2 */ |
141 | addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */ | 141 | addib,COND(>) -1, %r31, fdtmanymiddle /* Adjusted inner loop decr */ |
142 | pdtlbe 0(%sr1, %r28) | 142 | pdtlbe %r0(%sr1, %r28) |
143 | pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */ | 143 | pdtlbe,m %arg1(%sr1, %r28) /* Last pdtlbe and addr adjust */ |
144 | addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */ | 144 | addib,COND(>) -1, %r29, fdtmanymiddle /* Middle loop decr */ |
145 | copy %arg3, %r31 /* Re-init inner loop count */ | 145 | copy %arg3, %r31 /* Re-init inner loop count */ |
@@ -626,12 +626,12 @@ ENTRY_CFI(copy_user_page_asm) | |||
626 | /* Purge any old translations */ | 626 | /* Purge any old translations */ |
627 | 627 | ||
628 | #ifdef CONFIG_PA20 | 628 | #ifdef CONFIG_PA20 |
629 | pdtlb,l 0(%r28) | 629 | pdtlb,l %r0(%r28) |
630 | pdtlb,l 0(%r29) | 630 | pdtlb,l %r0(%r29) |
631 | #else | 631 | #else |
632 | tlb_lock %r20,%r21,%r22 | 632 | tlb_lock %r20,%r21,%r22 |
633 | pdtlb 0(%r28) | 633 | pdtlb %r0(%r28) |
634 | pdtlb 0(%r29) | 634 | pdtlb %r0(%r29) |
635 | tlb_unlock %r20,%r21,%r22 | 635 | tlb_unlock %r20,%r21,%r22 |
636 | #endif | 636 | #endif |
637 | 637 | ||
@@ -774,10 +774,10 @@ ENTRY_CFI(clear_user_page_asm) | |||
774 | /* Purge any old translation */ | 774 | /* Purge any old translation */ |
775 | 775 | ||
776 | #ifdef CONFIG_PA20 | 776 | #ifdef CONFIG_PA20 |
777 | pdtlb,l 0(%r28) | 777 | pdtlb,l %r0(%r28) |
778 | #else | 778 | #else |
779 | tlb_lock %r20,%r21,%r22 | 779 | tlb_lock %r20,%r21,%r22 |
780 | pdtlb 0(%r28) | 780 | pdtlb %r0(%r28) |
781 | tlb_unlock %r20,%r21,%r22 | 781 | tlb_unlock %r20,%r21,%r22 |
782 | #endif | 782 | #endif |
783 | 783 | ||
@@ -858,10 +858,10 @@ ENTRY_CFI(flush_dcache_page_asm) | |||
858 | /* Purge any old translation */ | 858 | /* Purge any old translation */ |
859 | 859 | ||
860 | #ifdef CONFIG_PA20 | 860 | #ifdef CONFIG_PA20 |
861 | pdtlb,l 0(%r28) | 861 | pdtlb,l %r0(%r28) |
862 | #else | 862 | #else |
863 | tlb_lock %r20,%r21,%r22 | 863 | tlb_lock %r20,%r21,%r22 |
864 | pdtlb 0(%r28) | 864 | pdtlb %r0(%r28) |
865 | tlb_unlock %r20,%r21,%r22 | 865 | tlb_unlock %r20,%r21,%r22 |
866 | #endif | 866 | #endif |
867 | 867 | ||
@@ -898,10 +898,10 @@ ENTRY_CFI(flush_dcache_page_asm) | |||
898 | sync | 898 | sync |
899 | 899 | ||
900 | #ifdef CONFIG_PA20 | 900 | #ifdef CONFIG_PA20 |
901 | pdtlb,l 0(%r25) | 901 | pdtlb,l %r0(%r25) |
902 | #else | 902 | #else |
903 | tlb_lock %r20,%r21,%r22 | 903 | tlb_lock %r20,%r21,%r22 |
904 | pdtlb 0(%r25) | 904 | pdtlb %r0(%r25) |
905 | tlb_unlock %r20,%r21,%r22 | 905 | tlb_unlock %r20,%r21,%r22 |
906 | #endif | 906 | #endif |
907 | 907 | ||
@@ -931,13 +931,18 @@ ENTRY_CFI(flush_icache_page_asm) | |||
931 | depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ | 931 | depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */ |
932 | #endif | 932 | #endif |
933 | 933 | ||
934 | /* Purge any old translation */ | 934 | /* Purge any old translation. Note that the FIC instruction |
935 | * may use either the instruction or data TLB. Given that we | ||
936 | * have a flat address space, it's not clear which TLB will be | ||
937 | * used. So, we purge both entries. */ | ||
935 | 938 | ||
936 | #ifdef CONFIG_PA20 | 939 | #ifdef CONFIG_PA20 |
940 | pdtlb,l %r0(%r28) | ||
937 | pitlb,l %r0(%sr4,%r28) | 941 | pitlb,l %r0(%sr4,%r28) |
938 | #else | 942 | #else |
939 | tlb_lock %r20,%r21,%r22 | 943 | tlb_lock %r20,%r21,%r22 |
940 | pitlb (%sr4,%r28) | 944 | pdtlb %r0(%r28) |
945 | pitlb %r0(%sr4,%r28) | ||
941 | tlb_unlock %r20,%r21,%r22 | 946 | tlb_unlock %r20,%r21,%r22 |
942 | #endif | 947 | #endif |
943 | 948 | ||
@@ -976,10 +981,12 @@ ENTRY_CFI(flush_icache_page_asm) | |||
976 | sync | 981 | sync |
977 | 982 | ||
978 | #ifdef CONFIG_PA20 | 983 | #ifdef CONFIG_PA20 |
984 | pdtlb,l %r0(%r28) | ||
979 | pitlb,l %r0(%sr4,%r25) | 985 | pitlb,l %r0(%sr4,%r25) |
980 | #else | 986 | #else |
981 | tlb_lock %r20,%r21,%r22 | 987 | tlb_lock %r20,%r21,%r22 |
982 | pitlb (%sr4,%r25) | 988 | pdtlb %r0(%r28) |
989 | pitlb %r0(%sr4,%r25) | ||
983 | tlb_unlock %r20,%r21,%r22 | 990 | tlb_unlock %r20,%r21,%r22 |
984 | #endif | 991 | #endif |
985 | 992 | ||
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 02d9ed0f3949..494ff6e8c88a 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c | |||
@@ -95,8 +95,8 @@ static inline int map_pte_uncached(pte_t * pte, | |||
95 | 95 | ||
96 | if (!pte_none(*pte)) | 96 | if (!pte_none(*pte)) |
97 | printk(KERN_ERR "map_pte_uncached: page already exists\n"); | 97 | printk(KERN_ERR "map_pte_uncached: page already exists\n"); |
98 | set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); | ||
99 | purge_tlb_start(flags); | 98 | purge_tlb_start(flags); |
99 | set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC)); | ||
100 | pdtlb_kernel(orig_vaddr); | 100 | pdtlb_kernel(orig_vaddr); |
101 | purge_tlb_end(flags); | 101 | purge_tlb_end(flags); |
102 | vaddr += PAGE_SIZE; | 102 | vaddr += PAGE_SIZE; |
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index 81d6f6391944..2e66a887788e 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c | |||
@@ -334,6 +334,10 @@ static int __init parisc_init(void) | |||
334 | /* tell PDC we're Linux. Nevermind failure. */ | 334 | /* tell PDC we're Linux. Nevermind failure. */ |
335 | pdc_stable_write(0x40, &osid, sizeof(osid)); | 335 | pdc_stable_write(0x40, &osid, sizeof(osid)); |
336 | 336 | ||
337 | /* start with known state */ | ||
338 | flush_cache_all_local(); | ||
339 | flush_tlb_all_local(NULL); | ||
340 | |||
337 | processor_init(); | 341 | processor_init(); |
338 | #ifdef CONFIG_SMP | 342 | #ifdef CONFIG_SMP |
339 | pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n", | 343 | pr_info("CPU(s): %d out of %d %s at %d.%06d MHz online\n", |
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 9b63b876a13a..325f30d82b64 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/rtc.h> | 15 | #include <linux/rtc.h> |
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | #include <linux/sched_clock.h> | ||
17 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
18 | #include <linux/param.h> | 19 | #include <linux/param.h> |
19 | #include <linux/string.h> | 20 | #include <linux/string.h> |
@@ -39,18 +40,6 @@ | |||
39 | 40 | ||
40 | static unsigned long clocktick __read_mostly; /* timer cycles per tick */ | 41 | static unsigned long clocktick __read_mostly; /* timer cycles per tick */ |
41 | 42 | ||
42 | #ifndef CONFIG_64BIT | ||
43 | /* | ||
44 | * The processor-internal cycle counter (Control Register 16) is used as time | ||
45 | * source for the sched_clock() function. This register is 64bit wide on a | ||
46 | * 64-bit kernel and 32bit on a 32-bit kernel. Since sched_clock() always | ||
47 | * requires a 64bit counter we emulate on the 32-bit kernel the higher 32bits | ||
48 | * with a per-cpu variable which we increase every time the counter | ||
49 | * wraps-around (which happens every ~4 secounds). | ||
50 | */ | ||
51 | static DEFINE_PER_CPU(unsigned long, cr16_high_32_bits); | ||
52 | #endif | ||
53 | |||
54 | /* | 43 | /* |
55 | * We keep time on PA-RISC Linux by using the Interval Timer which is | 44 | * We keep time on PA-RISC Linux by using the Interval Timer which is |
56 | * a pair of registers; one is read-only and one is write-only; both | 45 | * a pair of registers; one is read-only and one is write-only; both |
@@ -121,12 +110,6 @@ irqreturn_t __irq_entry timer_interrupt(int irq, void *dev_id) | |||
121 | */ | 110 | */ |
122 | mtctl(next_tick, 16); | 111 | mtctl(next_tick, 16); |
123 | 112 | ||
124 | #if !defined(CONFIG_64BIT) | ||
125 | /* check for overflow on a 32bit kernel (every ~4 seconds). */ | ||
126 | if (unlikely(next_tick < now)) | ||
127 | this_cpu_inc(cr16_high_32_bits); | ||
128 | #endif | ||
129 | |||
130 | /* Skip one clocktick on purpose if we missed next_tick. | 113 | /* Skip one clocktick on purpose if we missed next_tick. |
131 | * The new CR16 must be "later" than current CR16 otherwise | 114 | * The new CR16 must be "later" than current CR16 otherwise |
132 | * itimer would not fire until CR16 wrapped - e.g 4 seconds | 115 | * itimer would not fire until CR16 wrapped - e.g 4 seconds |
@@ -208,7 +191,7 @@ EXPORT_SYMBOL(profile_pc); | |||
208 | 191 | ||
209 | /* clock source code */ | 192 | /* clock source code */ |
210 | 193 | ||
211 | static cycle_t read_cr16(struct clocksource *cs) | 194 | static cycle_t notrace read_cr16(struct clocksource *cs) |
212 | { | 195 | { |
213 | return get_cycles(); | 196 | return get_cycles(); |
214 | } | 197 | } |
@@ -287,26 +270,9 @@ void read_persistent_clock(struct timespec *ts) | |||
287 | } | 270 | } |
288 | 271 | ||
289 | 272 | ||
290 | /* | 273 | static u64 notrace read_cr16_sched_clock(void) |
291 | * sched_clock() framework | ||
292 | */ | ||
293 | |||
294 | static u32 cyc2ns_mul __read_mostly; | ||
295 | static u32 cyc2ns_shift __read_mostly; | ||
296 | |||
297 | u64 sched_clock(void) | ||
298 | { | 274 | { |
299 | u64 now; | 275 | return get_cycles(); |
300 | |||
301 | /* Get current cycle counter (Control Register 16). */ | ||
302 | #ifdef CONFIG_64BIT | ||
303 | now = mfctl(16); | ||
304 | #else | ||
305 | now = mfctl(16) + (((u64) this_cpu_read(cr16_high_32_bits)) << 32); | ||
306 | #endif | ||
307 | |||
308 | /* return the value in ns (cycles_2_ns) */ | ||
309 | return mul_u64_u32_shr(now, cyc2ns_mul, cyc2ns_shift); | ||
310 | } | 276 | } |
311 | 277 | ||
312 | 278 | ||
@@ -316,17 +282,16 @@ u64 sched_clock(void) | |||
316 | 282 | ||
317 | void __init time_init(void) | 283 | void __init time_init(void) |
318 | { | 284 | { |
319 | unsigned long current_cr16_khz; | 285 | unsigned long cr16_hz; |
320 | 286 | ||
321 | current_cr16_khz = PAGE0->mem_10msec/10; /* kHz */ | ||
322 | clocktick = (100 * PAGE0->mem_10msec) / HZ; | 287 | clocktick = (100 * PAGE0->mem_10msec) / HZ; |
323 | |||
324 | /* calculate mult/shift values for cr16 */ | ||
325 | clocks_calc_mult_shift(&cyc2ns_mul, &cyc2ns_shift, current_cr16_khz, | ||
326 | NSEC_PER_MSEC, 0); | ||
327 | |||
328 | start_cpu_itimer(); /* get CPU 0 started */ | 288 | start_cpu_itimer(); /* get CPU 0 started */ |
329 | 289 | ||
290 | cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */ | ||
291 | |||
330 | /* register at clocksource framework */ | 292 | /* register at clocksource framework */ |
331 | clocksource_register_khz(&clocksource_cr16, current_cr16_khz); | 293 | clocksource_register_hz(&clocksource_cr16, cr16_hz); |
294 | |||
295 | /* register as sched_clock source */ | ||
296 | sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz); | ||
332 | } | 297 | } |
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c index 57d42d129033..78aaf4ffd7ab 100644 --- a/arch/powerpc/boot/main.c +++ b/arch/powerpc/boot/main.c | |||
@@ -232,8 +232,12 @@ void start(void) | |||
232 | console_ops.close(); | 232 | console_ops.close(); |
233 | 233 | ||
234 | kentry = (kernel_entry_t) vmlinux.addr; | 234 | kentry = (kernel_entry_t) vmlinux.addr; |
235 | if (ft_addr) | 235 | if (ft_addr) { |
236 | kentry(ft_addr, 0, NULL); | 236 | if(platform_ops.kentry) |
237 | platform_ops.kentry(ft_addr, vmlinux.addr); | ||
238 | else | ||
239 | kentry(ft_addr, 0, NULL); | ||
240 | } | ||
237 | else | 241 | else |
238 | kentry((unsigned long)initrd.addr, initrd.size, | 242 | kentry((unsigned long)initrd.addr, initrd.size, |
239 | loader_info.promptr); | 243 | loader_info.promptr); |
diff --git a/arch/powerpc/boot/opal-calls.S b/arch/powerpc/boot/opal-calls.S index ff2f1b97bc53..2a99fc9a3ccf 100644 --- a/arch/powerpc/boot/opal-calls.S +++ b/arch/powerpc/boot/opal-calls.S | |||
@@ -12,6 +12,19 @@ | |||
12 | 12 | ||
13 | .text | 13 | .text |
14 | 14 | ||
15 | .globl opal_kentry | ||
16 | opal_kentry: | ||
17 | /* r3 is the fdt ptr */ | ||
18 | mtctr r4 | ||
19 | li r4, 0 | ||
20 | li r5, 0 | ||
21 | li r6, 0 | ||
22 | li r7, 0 | ||
23 | ld r11,opal@got(r2) | ||
24 | ld r8,0(r11) | ||
25 | ld r9,8(r11) | ||
26 | bctr | ||
27 | |||
15 | #define OPAL_CALL(name, token) \ | 28 | #define OPAL_CALL(name, token) \ |
16 | .globl name; \ | 29 | .globl name; \ |
17 | name: \ | 30 | name: \ |
diff --git a/arch/powerpc/boot/opal.c b/arch/powerpc/boot/opal.c index 1f37e1c1d6d8..d7b4fd47eb44 100644 --- a/arch/powerpc/boot/opal.c +++ b/arch/powerpc/boot/opal.c | |||
@@ -23,14 +23,25 @@ struct opal { | |||
23 | 23 | ||
24 | static u32 opal_con_id; | 24 | static u32 opal_con_id; |
25 | 25 | ||
26 | /* see opal-wrappers.S */ | ||
26 | int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer); | 27 | int64_t opal_console_write(int64_t term_number, u64 *length, const u8 *buffer); |
27 | int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer); | 28 | int64_t opal_console_read(int64_t term_number, uint64_t *length, u8 *buffer); |
28 | int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length); | 29 | int64_t opal_console_write_buffer_space(uint64_t term_number, uint64_t *length); |
29 | int64_t opal_console_flush(uint64_t term_number); | 30 | int64_t opal_console_flush(uint64_t term_number); |
30 | int64_t opal_poll_events(uint64_t *outstanding_event_mask); | 31 | int64_t opal_poll_events(uint64_t *outstanding_event_mask); |
31 | 32 | ||
33 | void opal_kentry(unsigned long fdt_addr, void *vmlinux_addr); | ||
34 | |||
32 | static int opal_con_open(void) | 35 | static int opal_con_open(void) |
33 | { | 36 | { |
37 | /* | ||
38 | * When OPAL loads the boot kernel it stashes the OPAL base and entry | ||
39 | * address in r8 and r9 so the kernel can use the OPAL console | ||
40 | * before unflattening the devicetree. While executing the wrapper will | ||
41 | * probably trash r8 and r9 so this kentry hook restores them before | ||
42 | * entering the decompressed kernel. | ||
43 | */ | ||
44 | platform_ops.kentry = opal_kentry; | ||
34 | return 0; | 45 | return 0; |
35 | } | 46 | } |
36 | 47 | ||
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h index 309d1b127e96..fad1862f4b2d 100644 --- a/arch/powerpc/boot/ops.h +++ b/arch/powerpc/boot/ops.h | |||
@@ -30,6 +30,7 @@ struct platform_ops { | |||
30 | void * (*realloc)(void *ptr, unsigned long size); | 30 | void * (*realloc)(void *ptr, unsigned long size); |
31 | void (*exit)(void); | 31 | void (*exit)(void); |
32 | void * (*vmlinux_alloc)(unsigned long size); | 32 | void * (*vmlinux_alloc)(unsigned long size); |
33 | void (*kentry)(unsigned long fdt_addr, void *vmlinux_addr); | ||
33 | }; | 34 | }; |
34 | extern struct platform_ops platform_ops; | 35 | extern struct platform_ops platform_ops; |
35 | 36 | ||
diff --git a/arch/powerpc/include/asm/asm-prototypes.h b/arch/powerpc/include/asm/asm-prototypes.h index d1492736d852..e0baba1535e6 100644 --- a/arch/powerpc/include/asm/asm-prototypes.h +++ b/arch/powerpc/include/asm/asm-prototypes.h | |||
@@ -14,6 +14,10 @@ | |||
14 | 14 | ||
15 | #include <linux/threads.h> | 15 | #include <linux/threads.h> |
16 | #include <linux/kprobes.h> | 16 | #include <linux/kprobes.h> |
17 | #include <asm/cacheflush.h> | ||
18 | #include <asm/checksum.h> | ||
19 | #include <asm/uaccess.h> | ||
20 | #include <asm/epapr_hcalls.h> | ||
17 | 21 | ||
18 | #include <uapi/asm/ucontext.h> | 22 | #include <uapi/asm/ucontext.h> |
19 | 23 | ||
@@ -109,4 +113,12 @@ void early_setup_secondary(void); | |||
109 | /* time */ | 113 | /* time */ |
110 | void accumulate_stolen_time(void); | 114 | void accumulate_stolen_time(void); |
111 | 115 | ||
116 | /* misc runtime */ | ||
117 | extern u64 __bswapdi2(u64); | ||
118 | extern s64 __lshrdi3(s64, int); | ||
119 | extern s64 __ashldi3(s64, int); | ||
120 | extern s64 __ashrdi3(s64, int); | ||
121 | extern int __cmpdi2(s64, s64); | ||
122 | extern int __ucmpdi2(u64, u64); | ||
123 | |||
112 | #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ | 124 | #endif /* _ASM_POWERPC_ASM_PROTOTYPES_H */ |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 84d49b197c32..9a3eee661297 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -91,7 +91,7 @@ | |||
91 | */ | 91 | */ |
92 | #define LOAD_HANDLER(reg, label) \ | 92 | #define LOAD_HANDLER(reg, label) \ |
93 | ld reg,PACAKBASE(r13); /* get high part of &label */ \ | 93 | ld reg,PACAKBASE(r13); /* get high part of &label */ \ |
94 | ori reg,reg,(FIXED_SYMBOL_ABS_ADDR(label))@l; | 94 | ori reg,reg,FIXED_SYMBOL_ABS_ADDR(label); |
95 | 95 | ||
96 | #define __LOAD_HANDLER(reg, label) \ | 96 | #define __LOAD_HANDLER(reg, label) \ |
97 | ld reg,PACAKBASE(r13); \ | 97 | ld reg,PACAKBASE(r13); \ |
@@ -158,14 +158,17 @@ BEGIN_FTR_SECTION_NESTED(943) \ | |||
158 | std ra,offset(r13); \ | 158 | std ra,offset(r13); \ |
159 | END_FTR_SECTION_NESTED(ftr,ftr,943) | 159 | END_FTR_SECTION_NESTED(ftr,ftr,943) |
160 | 160 | ||
161 | #define EXCEPTION_PROLOG_0(area) \ | 161 | #define EXCEPTION_PROLOG_0_PACA(area) \ |
162 | GET_PACA(r13); \ | ||
163 | std r9,area+EX_R9(r13); /* save r9 */ \ | 162 | std r9,area+EX_R9(r13); /* save r9 */ \ |
164 | OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \ | 163 | OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR); \ |
165 | HMT_MEDIUM; \ | 164 | HMT_MEDIUM; \ |
166 | std r10,area+EX_R10(r13); /* save r10 - r12 */ \ | 165 | std r10,area+EX_R10(r13); /* save r10 - r12 */ \ |
167 | OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR) | 166 | OPT_GET_SPR(r10, SPRN_CFAR, CPU_FTR_CFAR) |
168 | 167 | ||
168 | #define EXCEPTION_PROLOG_0(area) \ | ||
169 | GET_PACA(r13); \ | ||
170 | EXCEPTION_PROLOG_0_PACA(area) | ||
171 | |||
169 | #define __EXCEPTION_PROLOG_1(area, extra, vec) \ | 172 | #define __EXCEPTION_PROLOG_1(area, extra, vec) \ |
170 | OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ | 173 | OPT_SAVE_REG_TO_PACA(area+EX_PPR, r9, CPU_FTR_HAS_PPR); \ |
171 | OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ | 174 | OPT_SAVE_REG_TO_PACA(area+EX_CFAR, r10, CPU_FTR_CFAR); \ |
@@ -196,6 +199,12 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) | |||
196 | EXCEPTION_PROLOG_1(area, extra, vec); \ | 199 | EXCEPTION_PROLOG_1(area, extra, vec); \ |
197 | EXCEPTION_PROLOG_PSERIES_1(label, h); | 200 | EXCEPTION_PROLOG_PSERIES_1(label, h); |
198 | 201 | ||
202 | /* Have the PACA in r13 already */ | ||
203 | #define EXCEPTION_PROLOG_PSERIES_PACA(area, label, h, extra, vec) \ | ||
204 | EXCEPTION_PROLOG_0_PACA(area); \ | ||
205 | EXCEPTION_PROLOG_1(area, extra, vec); \ | ||
206 | EXCEPTION_PROLOG_PSERIES_1(label, h); | ||
207 | |||
199 | #define __KVMTEST(h, n) \ | 208 | #define __KVMTEST(h, n) \ |
200 | lbz r10,HSTATE_IN_GUEST(r13); \ | 209 | lbz r10,HSTATE_IN_GUEST(r13); \ |
201 | cmpwi r10,0; \ | 210 | cmpwi r10,0; \ |
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index e88368354e49..e311c25751a4 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h | |||
@@ -29,6 +29,12 @@ | |||
29 | */ | 29 | */ |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * Kernel read only support. | ||
33 | * We added the ppp value 0b110 in ISA 2.04. | ||
34 | */ | ||
35 | #define MMU_FTR_KERNEL_RO ASM_CONST(0x00004000) | ||
36 | |||
37 | /* | ||
32 | * We need to clear top 16bits of va (from the remaining 64 bits )in | 38 | * We need to clear top 16bits of va (from the remaining 64 bits )in |
33 | * tlbie* instructions | 39 | * tlbie* instructions |
34 | */ | 40 | */ |
@@ -103,10 +109,10 @@ | |||
103 | #define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | 109 | #define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2 |
104 | #define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA | 110 | #define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | MMU_FTR_TLBIE_CROP_VA |
105 | #define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | 111 | #define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE |
106 | #define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | 112 | #define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO |
107 | #define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | 113 | #define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO |
108 | #define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | 114 | #define MMU_FTRS_POWER8 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO |
109 | #define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | 115 | #define MMU_FTRS_POWER9 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_KERNEL_RO |
110 | #define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ | 116 | #define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ |
111 | MMU_FTR_CI_LARGE_PAGE | 117 | MMU_FTR_CI_LARGE_PAGE |
112 | #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ | 118 | #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ |
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 0132831b3081..c56ea8c84abb 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
@@ -460,5 +460,6 @@ | |||
460 | 460 | ||
461 | #define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \ | 461 | #define PPC_SLBIA(IH) stringify_in_c(.long PPC_INST_SLBIA | \ |
462 | ((IH & 0x7) << 21)) | 462 | ((IH & 0x7) << 21)) |
463 | #define PPC_INVALIDATE_ERAT PPC_SLBIA(7) | ||
463 | 464 | ||
464 | #endif /* _ASM_POWERPC_PPC_OPCODE_H */ | 465 | #endif /* _ASM_POWERPC_PPC_OPCODE_H */ |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 9cd4e8cbc78c..9e1499f98def 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -355,6 +355,7 @@ | |||
355 | #define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */ | 355 | #define LPCR_PECE0 ASM_CONST(0x0000000000004000) /* ext. exceptions can cause exit */ |
356 | #define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */ | 356 | #define LPCR_PECE1 ASM_CONST(0x0000000000002000) /* decrementer can cause exit */ |
357 | #define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */ | 357 | #define LPCR_PECE2 ASM_CONST(0x0000000000001000) /* machine check etc can cause exit */ |
358 | #define LPCR_PECE_HVEE ASM_CONST(0x0000400000000000) /* P9 Wakeup on HV interrupts */ | ||
358 | #define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */ | 359 | #define LPCR_MER ASM_CONST(0x0000000000000800) /* Mediated External Exception */ |
359 | #define LPCR_MER_SH 11 | 360 | #define LPCR_MER_SH 11 |
360 | #define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */ | 361 | #define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */ |
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 52ff3f025437..37c027ca83b2 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S | |||
@@ -98,8 +98,8 @@ _GLOBAL(__setup_cpu_power9) | |||
98 | li r0,0 | 98 | li r0,0 |
99 | mtspr SPRN_LPID,r0 | 99 | mtspr SPRN_LPID,r0 |
100 | mfspr r3,SPRN_LPCR | 100 | mfspr r3,SPRN_LPCR |
101 | ori r3, r3, LPCR_PECEDH | 101 | LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) |
102 | ori r3, r3, LPCR_HVICE | 102 | or r3, r3, r4 |
103 | bl __init_LPCR | 103 | bl __init_LPCR |
104 | bl __init_HFSCR | 104 | bl __init_HFSCR |
105 | bl __init_tlb_power9 | 105 | bl __init_tlb_power9 |
@@ -118,8 +118,8 @@ _GLOBAL(__restore_cpu_power9) | |||
118 | li r0,0 | 118 | li r0,0 |
119 | mtspr SPRN_LPID,r0 | 119 | mtspr SPRN_LPID,r0 |
120 | mfspr r3,SPRN_LPCR | 120 | mfspr r3,SPRN_LPCR |
121 | ori r3, r3, LPCR_PECEDH | 121 | LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE) |
122 | ori r3, r3, LPCR_HVICE | 122 | or r3, r3, r4 |
123 | bl __init_LPCR | 123 | bl __init_LPCR |
124 | bl __init_HFSCR | 124 | bl __init_HFSCR |
125 | bl __init_tlb_power9 | 125 | bl __init_tlb_power9 |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 08ba447a4b3d..1ba82ea90230 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -116,7 +116,9 @@ EXC_VIRT_NONE(0x4000, 0x4100) | |||
116 | 116 | ||
117 | EXC_REAL_BEGIN(system_reset, 0x100, 0x200) | 117 | EXC_REAL_BEGIN(system_reset, 0x100, 0x200) |
118 | SET_SCRATCH0(r13) | 118 | SET_SCRATCH0(r13) |
119 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, | 119 | GET_PACA(r13) |
120 | clrrdi r13,r13,1 /* Last bit of HSPRG0 is set if waking from winkle */ | ||
121 | EXCEPTION_PROLOG_PSERIES_PACA(PACA_EXGEN, system_reset_common, EXC_STD, | ||
120 | IDLETEST, 0x100) | 122 | IDLETEST, 0x100) |
121 | 123 | ||
122 | EXC_REAL_END(system_reset, 0x100, 0x200) | 124 | EXC_REAL_END(system_reset, 0x100, 0x200) |
@@ -124,6 +126,9 @@ EXC_VIRT_NONE(0x4100, 0x4200) | |||
124 | 126 | ||
125 | #ifdef CONFIG_PPC_P7_NAP | 127 | #ifdef CONFIG_PPC_P7_NAP |
126 | EXC_COMMON_BEGIN(system_reset_idle_common) | 128 | EXC_COMMON_BEGIN(system_reset_idle_common) |
129 | BEGIN_FTR_SECTION | ||
130 | GET_PACA(r13) /* Restore HSPRG0 to get the winkle bit in r13 */ | ||
131 | END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300) | ||
127 | bl pnv_restore_hyp_resource | 132 | bl pnv_restore_hyp_resource |
128 | 133 | ||
129 | li r0,PNV_THREAD_RUNNING | 134 | li r0,PNV_THREAD_RUNNING |
@@ -169,7 +174,7 @@ EXC_REAL_BEGIN(machine_check, 0x200, 0x300) | |||
169 | SET_SCRATCH0(r13) /* save r13 */ | 174 | SET_SCRATCH0(r13) /* save r13 */ |
170 | /* | 175 | /* |
171 | * Running native on arch 2.06 or later, we may wakeup from winkle | 176 | * Running native on arch 2.06 or later, we may wakeup from winkle |
172 | * inside machine check. If yes, then last bit of HSPGR0 would be set | 177 | * inside machine check. If yes, then last bit of HSPRG0 would be set |
173 | * to 1. Hence clear it unconditionally. | 178 | * to 1. Hence clear it unconditionally. |
174 | */ | 179 | */ |
175 | GET_PACA(r13) | 180 | GET_PACA(r13) |
@@ -388,7 +393,7 @@ EXC_COMMON_BEGIN(machine_check_handle_early) | |||
388 | /* | 393 | /* |
389 | * Go back to winkle. Please note that this thread was woken up in | 394 | * Go back to winkle. Please note that this thread was woken up in |
390 | * machine check from winkle and have not restored the per-subcore | 395 | * machine check from winkle and have not restored the per-subcore |
391 | * state. Hence before going back to winkle, set last bit of HSPGR0 | 396 | * state. Hence before going back to winkle, set last bit of HSPRG0 |
392 | * to 1. This will make sure that if this thread gets woken up | 397 | * to 1. This will make sure that if this thread gets woken up |
393 | * again at reset vector 0x100 then it will get chance to restore | 398 | * again at reset vector 0x100 then it will get chance to restore |
394 | * the subcore state. | 399 | * the subcore state. |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index ce6dc61b15b2..49a680d5ae37 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -1215,7 +1215,7 @@ static void show_instructions(struct pt_regs *regs) | |||
1215 | int instr; | 1215 | int instr; |
1216 | 1216 | ||
1217 | if (!(i % 8)) | 1217 | if (!(i % 8)) |
1218 | printk("\n"); | 1218 | pr_cont("\n"); |
1219 | 1219 | ||
1220 | #if !defined(CONFIG_BOOKE) | 1220 | #if !defined(CONFIG_BOOKE) |
1221 | /* If executing with the IMMU off, adjust pc rather | 1221 | /* If executing with the IMMU off, adjust pc rather |
@@ -1227,18 +1227,18 @@ static void show_instructions(struct pt_regs *regs) | |||
1227 | 1227 | ||
1228 | if (!__kernel_text_address(pc) || | 1228 | if (!__kernel_text_address(pc) || |
1229 | probe_kernel_address((unsigned int __user *)pc, instr)) { | 1229 | probe_kernel_address((unsigned int __user *)pc, instr)) { |
1230 | printk(KERN_CONT "XXXXXXXX "); | 1230 | pr_cont("XXXXXXXX "); |
1231 | } else { | 1231 | } else { |
1232 | if (regs->nip == pc) | 1232 | if (regs->nip == pc) |
1233 | printk(KERN_CONT "<%08x> ", instr); | 1233 | pr_cont("<%08x> ", instr); |
1234 | else | 1234 | else |
1235 | printk(KERN_CONT "%08x ", instr); | 1235 | pr_cont("%08x ", instr); |
1236 | } | 1236 | } |
1237 | 1237 | ||
1238 | pc += sizeof(int); | 1238 | pc += sizeof(int); |
1239 | } | 1239 | } |
1240 | 1240 | ||
1241 | printk("\n"); | 1241 | pr_cont("\n"); |
1242 | } | 1242 | } |
1243 | 1243 | ||
1244 | struct regbit { | 1244 | struct regbit { |
@@ -1282,7 +1282,7 @@ static void print_bits(unsigned long val, struct regbit *bits, const char *sep) | |||
1282 | 1282 | ||
1283 | for (; bits->bit; ++bits) | 1283 | for (; bits->bit; ++bits) |
1284 | if (val & bits->bit) { | 1284 | if (val & bits->bit) { |
1285 | printk("%s%s", s, bits->name); | 1285 | pr_cont("%s%s", s, bits->name); |
1286 | s = sep; | 1286 | s = sep; |
1287 | } | 1287 | } |
1288 | } | 1288 | } |
@@ -1305,9 +1305,9 @@ static void print_tm_bits(unsigned long val) | |||
1305 | * T: Transactional (bit 34) | 1305 | * T: Transactional (bit 34) |
1306 | */ | 1306 | */ |
1307 | if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) { | 1307 | if (val & (MSR_TM | MSR_TS_S | MSR_TS_T)) { |
1308 | printk(",TM["); | 1308 | pr_cont(",TM["); |
1309 | print_bits(val, msr_tm_bits, ""); | 1309 | print_bits(val, msr_tm_bits, ""); |
1310 | printk("]"); | 1310 | pr_cont("]"); |
1311 | } | 1311 | } |
1312 | } | 1312 | } |
1313 | #else | 1313 | #else |
@@ -1316,10 +1316,10 @@ static void print_tm_bits(unsigned long val) {} | |||
1316 | 1316 | ||
1317 | static void print_msr_bits(unsigned long val) | 1317 | static void print_msr_bits(unsigned long val) |
1318 | { | 1318 | { |
1319 | printk("<"); | 1319 | pr_cont("<"); |
1320 | print_bits(val, msr_bits, ","); | 1320 | print_bits(val, msr_bits, ","); |
1321 | print_tm_bits(val); | 1321 | print_tm_bits(val); |
1322 | printk(">"); | 1322 | pr_cont(">"); |
1323 | } | 1323 | } |
1324 | 1324 | ||
1325 | #ifdef CONFIG_PPC64 | 1325 | #ifdef CONFIG_PPC64 |
@@ -1347,29 +1347,29 @@ void show_regs(struct pt_regs * regs) | |||
1347 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); | 1347 | printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); |
1348 | trap = TRAP(regs); | 1348 | trap = TRAP(regs); |
1349 | if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) | 1349 | if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) |
1350 | printk("CFAR: "REG" ", regs->orig_gpr3); | 1350 | pr_cont("CFAR: "REG" ", regs->orig_gpr3); |
1351 | if (trap == 0x200 || trap == 0x300 || trap == 0x600) | 1351 | if (trap == 0x200 || trap == 0x300 || trap == 0x600) |
1352 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 1352 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) |
1353 | printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); | 1353 | pr_cont("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr); |
1354 | #else | 1354 | #else |
1355 | printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); | 1355 | pr_cont("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr); |
1356 | #endif | 1356 | #endif |
1357 | #ifdef CONFIG_PPC64 | 1357 | #ifdef CONFIG_PPC64 |
1358 | printk("SOFTE: %ld ", regs->softe); | 1358 | pr_cont("SOFTE: %ld ", regs->softe); |
1359 | #endif | 1359 | #endif |
1360 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 1360 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1361 | if (MSR_TM_ACTIVE(regs->msr)) | 1361 | if (MSR_TM_ACTIVE(regs->msr)) |
1362 | printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); | 1362 | pr_cont("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch); |
1363 | #endif | 1363 | #endif |
1364 | 1364 | ||
1365 | for (i = 0; i < 32; i++) { | 1365 | for (i = 0; i < 32; i++) { |
1366 | if ((i % REGS_PER_LINE) == 0) | 1366 | if ((i % REGS_PER_LINE) == 0) |
1367 | printk("\nGPR%02d: ", i); | 1367 | pr_cont("\nGPR%02d: ", i); |
1368 | printk(REG " ", regs->gpr[i]); | 1368 | pr_cont(REG " ", regs->gpr[i]); |
1369 | if (i == LAST_VOLATILE && !FULL_REGS(regs)) | 1369 | if (i == LAST_VOLATILE && !FULL_REGS(regs)) |
1370 | break; | 1370 | break; |
1371 | } | 1371 | } |
1372 | printk("\n"); | 1372 | pr_cont("\n"); |
1373 | #ifdef CONFIG_KALLSYMS | 1373 | #ifdef CONFIG_KALLSYMS |
1374 | /* | 1374 | /* |
1375 | * Lookup NIP late so we have the best change of getting the | 1375 | * Lookup NIP late so we have the best change of getting the |
@@ -1900,14 +1900,14 @@ void show_stack(struct task_struct *tsk, unsigned long *stack) | |||
1900 | printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); | 1900 | printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); |
1901 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1901 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
1902 | if ((ip == rth) && curr_frame >= 0) { | 1902 | if ((ip == rth) && curr_frame >= 0) { |
1903 | printk(" (%pS)", | 1903 | pr_cont(" (%pS)", |
1904 | (void *)current->ret_stack[curr_frame].ret); | 1904 | (void *)current->ret_stack[curr_frame].ret); |
1905 | curr_frame--; | 1905 | curr_frame--; |
1906 | } | 1906 | } |
1907 | #endif | 1907 | #endif |
1908 | if (firstframe) | 1908 | if (firstframe) |
1909 | printk(" (unreliable)"); | 1909 | pr_cont(" (unreliable)"); |
1910 | printk("\n"); | 1910 | pr_cont("\n"); |
1911 | } | 1911 | } |
1912 | firstframe = 0; | 1912 | firstframe = 0; |
1913 | 1913 | ||
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 7ac8e6eaab5b..8d586cff8a41 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -226,17 +226,25 @@ static void __init configure_exceptions(void) | |||
226 | if (firmware_has_feature(FW_FEATURE_OPAL)) | 226 | if (firmware_has_feature(FW_FEATURE_OPAL)) |
227 | opal_configure_cores(); | 227 | opal_configure_cores(); |
228 | 228 | ||
229 | /* Enable AIL if supported, and we are in hypervisor mode */ | 229 | /* AIL on native is done in cpu_ready_for_interrupts() */ |
230 | if (early_cpu_has_feature(CPU_FTR_HVMODE) && | ||
231 | early_cpu_has_feature(CPU_FTR_ARCH_207S)) { | ||
232 | unsigned long lpcr = mfspr(SPRN_LPCR); | ||
233 | mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); | ||
234 | } | ||
235 | } | 230 | } |
236 | } | 231 | } |
237 | 232 | ||
238 | static void cpu_ready_for_interrupts(void) | 233 | static void cpu_ready_for_interrupts(void) |
239 | { | 234 | { |
235 | /* | ||
236 | * Enable AIL if supported, and we are in hypervisor mode. This | ||
237 | * is called once for every processor. | ||
238 | * | ||
239 | * If we are not in hypervisor mode the job is done once for | ||
240 | * the whole partition in configure_exceptions(). | ||
241 | */ | ||
242 | if (early_cpu_has_feature(CPU_FTR_HVMODE) && | ||
243 | early_cpu_has_feature(CPU_FTR_ARCH_207S)) { | ||
244 | unsigned long lpcr = mfspr(SPRN_LPCR); | ||
245 | mtspr(SPRN_LPCR, lpcr | LPCR_AIL_3); | ||
246 | } | ||
247 | |||
240 | /* Set IR and DR in PACA MSR */ | 248 | /* Set IR and DR in PACA MSR */ |
241 | get_paca()->kernel_msr = MSR_KERNEL; | 249 | get_paca()->kernel_msr = MSR_KERNEL; |
242 | } | 250 | } |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 44d3c3a38e3e..78dabf065ba9 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -193,8 +193,12 @@ unsigned long htab_convert_pte_flags(unsigned long pteflags) | |||
193 | /* | 193 | /* |
194 | * Kernel read only mapped with ppp bits 0b110 | 194 | * Kernel read only mapped with ppp bits 0b110 |
195 | */ | 195 | */ |
196 | if (!(pteflags & _PAGE_WRITE)) | 196 | if (!(pteflags & _PAGE_WRITE)) { |
197 | rflags |= (HPTE_R_PP0 | 0x2); | 197 | if (mmu_has_feature(MMU_FTR_KERNEL_RO)) |
198 | rflags |= (HPTE_R_PP0 | 0x2); | ||
199 | else | ||
200 | rflags |= 0x3; | ||
201 | } | ||
198 | } else { | 202 | } else { |
199 | if (pteflags & _PAGE_RWX) | 203 | if (pteflags & _PAGE_RWX) |
200 | rflags |= 0x2; | 204 | rflags |= 0x2; |
@@ -1029,6 +1033,10 @@ void hash__early_init_mmu_secondary(void) | |||
1029 | { | 1033 | { |
1030 | /* Initialize hash table for that CPU */ | 1034 | /* Initialize hash table for that CPU */ |
1031 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { | 1035 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
1036 | |||
1037 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) | ||
1038 | update_hid_for_hash(); | ||
1039 | |||
1032 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) | 1040 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
1033 | mtspr(SPRN_SDR1, _SDR1); | 1041 | mtspr(SPRN_SDR1, _SDR1); |
1034 | else | 1042 | else |
diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index ed7bddc456b7..688b54517655 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c | |||
@@ -388,6 +388,10 @@ void radix__early_init_mmu_secondary(void) | |||
388 | * update partition table control register and UPRT | 388 | * update partition table control register and UPRT |
389 | */ | 389 | */ |
390 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { | 390 | if (!firmware_has_feature(FW_FEATURE_LPAR)) { |
391 | |||
392 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) | ||
393 | update_hid_for_radix(); | ||
394 | |||
391 | lpcr = mfspr(SPRN_LPCR); | 395 | lpcr = mfspr(SPRN_LPCR); |
392 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); | 396 | mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR); |
393 | 397 | ||
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c index bda8c43be78a..3493cf4e0452 100644 --- a/arch/powerpc/mm/tlb-radix.c +++ b/arch/powerpc/mm/tlb-radix.c | |||
@@ -50,6 +50,8 @@ static inline void _tlbiel_pid(unsigned long pid, unsigned long ric) | |||
50 | for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { | 50 | for (set = 0; set < POWER9_TLB_SETS_RADIX ; set++) { |
51 | __tlbiel_pid(pid, set, ric); | 51 | __tlbiel_pid(pid, set, ric); |
52 | } | 52 | } |
53 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) | ||
54 | asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); | ||
53 | return; | 55 | return; |
54 | } | 56 | } |
55 | 57 | ||
@@ -83,6 +85,8 @@ static inline void _tlbiel_va(unsigned long va, unsigned long pid, | |||
83 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) | 85 | asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1) |
84 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); | 86 | : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); |
85 | asm volatile("ptesync": : :"memory"); | 87 | asm volatile("ptesync": : :"memory"); |
88 | if (cpu_has_feature(CPU_FTR_POWER9_DD1)) | ||
89 | asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); | ||
86 | } | 90 | } |
87 | 91 | ||
88 | static inline void _tlbie_va(unsigned long va, unsigned long pid, | 92 | static inline void _tlbie_va(unsigned long va, unsigned long pid, |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index b23c76b42d6e..165ecdd24d22 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -43,6 +43,7 @@ config SPARC | |||
43 | select ARCH_HAS_SG_CHAIN | 43 | select ARCH_HAS_SG_CHAIN |
44 | select CPU_NO_EFFICIENT_FFS | 44 | select CPU_NO_EFFICIENT_FFS |
45 | select HAVE_ARCH_HARDENED_USERCOPY | 45 | select HAVE_ARCH_HARDENED_USERCOPY |
46 | select PROVE_LOCKING_SMALL if PROVE_LOCKING | ||
46 | 47 | ||
47 | config SPARC32 | 48 | config SPARC32 |
48 | def_bool !64BIT | 49 | def_bool !64BIT |
@@ -89,6 +90,14 @@ config ARCH_DEFCONFIG | |||
89 | config ARCH_PROC_KCORE_TEXT | 90 | config ARCH_PROC_KCORE_TEXT |
90 | def_bool y | 91 | def_bool y |
91 | 92 | ||
93 | config ARCH_ATU | ||
94 | bool | ||
95 | default y if SPARC64 | ||
96 | |||
97 | config ARCH_DMA_ADDR_T_64BIT | ||
98 | bool | ||
99 | default y if ARCH_ATU | ||
100 | |||
92 | config IOMMU_HELPER | 101 | config IOMMU_HELPER |
93 | bool | 102 | bool |
94 | default y if SPARC64 | 103 | default y if SPARC64 |
@@ -304,6 +313,20 @@ config ARCH_SPARSEMEM_ENABLE | |||
304 | config ARCH_SPARSEMEM_DEFAULT | 313 | config ARCH_SPARSEMEM_DEFAULT |
305 | def_bool y if SPARC64 | 314 | def_bool y if SPARC64 |
306 | 315 | ||
316 | config FORCE_MAX_ZONEORDER | ||
317 | int "Maximum zone order" | ||
318 | default "13" | ||
319 | help | ||
320 | The kernel memory allocator divides physically contiguous memory | ||
321 | blocks into "zones", where each zone is a power of two number of | ||
322 | pages. This option selects the largest power of two that the kernel | ||
323 | keeps in the memory allocator. If you need to allocate very large | ||
324 | blocks of physically contiguous memory, then you may need to | ||
325 | increase this value. | ||
326 | |||
327 | This config option is actually maximum order plus one. For example, | ||
328 | a value of 13 means that the largest free memory block is 2^12 pages. | ||
329 | |||
307 | source "mm/Kconfig" | 330 | source "mm/Kconfig" |
308 | 331 | ||
309 | if SPARC64 | 332 | if SPARC64 |
diff --git a/arch/sparc/include/asm/hypervisor.h b/arch/sparc/include/asm/hypervisor.h index 666d5ba230d2..73cb8978df58 100644 --- a/arch/sparc/include/asm/hypervisor.h +++ b/arch/sparc/include/asm/hypervisor.h | |||
@@ -2335,6 +2335,348 @@ unsigned long sun4v_vintr_set_target(unsigned long dev_handle, | |||
2335 | */ | 2335 | */ |
2336 | #define HV_FAST_PCI_MSG_SETVALID 0xd3 | 2336 | #define HV_FAST_PCI_MSG_SETVALID 0xd3 |
2337 | 2337 | ||
2338 | /* PCI IOMMU v2 definitions and services | ||
2339 | * | ||
2340 | * While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO | ||
2341 | * definitions and services. | ||
2342 | * | ||
2343 | * CTE Clump Table Entry. First level table entry in the ATU. | ||
2344 | * | ||
2345 | * pci_device_list | ||
2346 | * A 32-bit aligned list of pci_devices. | ||
2347 | * | ||
2348 | * pci_device_listp | ||
2349 | * real address of a pci_device_list. 32-bit aligned. | ||
2350 | * | ||
2351 | * iotte IOMMU translation table entry. | ||
2352 | * | ||
2353 | * iotte_attributes | ||
2354 | * IO Attributes for IOMMU v2 mappings. In addition to | ||
2355 | * read, write IOMMU v2 supports relax ordering | ||
2356 | * | ||
2357 | * io_page_list A 64-bit aligned list of real addresses. Each real | ||
2358 | * address in an io_page_list must be properly aligned | ||
2359 | * to the pagesize of the given IOTSB. | ||
2360 | * | ||
2361 | * io_page_list_p Real address of an io_page_list, 64-bit aligned. | ||
2362 | * | ||
2363 | * IOTSB IO Translation Storage Buffer. An aligned table of | ||
2364 | * IOTTEs. Each IOTSB has a pagesize, table size, and | ||
2365 | * virtual address associated with it that must match | ||
2366 | * a pagesize and table size supported by the un-derlying | ||
2367 | * hardware implementation. The alignment requirements | ||
2368 | * for an IOTSB depend on the pagesize used for that IOTSB. | ||
2369 | * Each IOTTE in an IOTSB maps one pagesize-sized page. | ||
2370 | * The size of the IOTSB dictates how large of a virtual | ||
2371 | * address space the IOTSB is capable of mapping. | ||
2372 | * | ||
2373 | * iotsb_handle An opaque identifier for an IOTSB. A devhandle plus | ||
2374 | * iotsb_handle represents a binding of an IOTSB to a | ||
2375 | * PCI root complex. | ||
2376 | * | ||
2377 | * iotsb_index Zero-based IOTTE number within an IOTSB. | ||
2378 | */ | ||
2379 | |||
2380 | /* The index_count argument consists of two fields: | ||
2381 | * bits 63:48 #iottes and bits 47:0 iotsb_index | ||
2382 | */ | ||
2383 | #define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \ | ||
2384 | (((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index))) | ||
2385 | |||
2386 | /* pci_iotsb_conf() | ||
2387 | * TRAP: HV_FAST_TRAP | ||
2388 | * FUNCTION: HV_FAST_PCI_IOTSB_CONF | ||
2389 | * ARG0: devhandle | ||
2390 | * ARG1: r_addr | ||
2391 | * ARG2: size | ||
2392 | * ARG3: pagesize | ||
2393 | * ARG4: iova | ||
2394 | * RET0: status | ||
2395 | * RET1: iotsb_handle | ||
2396 | * ERRORS: EINVAL Invalid devhandle, size, iova, or pagesize | ||
2397 | * EBADALIGN r_addr is not properly aligned | ||
2398 | * ENORADDR r_addr is not a valid real address | ||
2399 | * ETOOMANY No further IOTSBs may be configured | ||
2400 | * EBUSY Duplicate devhandle, raddir, iova combination | ||
2401 | * | ||
2402 | * Create an IOTSB suitable for the PCI root complex identified by devhandle, | ||
2403 | * for the DMA virtual address defined by the argument iova. | ||
2404 | * | ||
2405 | * r_addr is the properly aligned base address of the IOTSB and size is the | ||
2406 | * IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to | ||
2407 | * being configured. If it contains any values other than zeros then the | ||
2408 | * behavior is undefined. | ||
2409 | * | ||
2410 | * pagesize is the size of each page in the IOTSB. Note that the combination of | ||
2411 | * size (table size) and pagesize must be valid. | ||
2412 | * | ||
2413 | * virt is the DMA virtual address this IOTSB will map. | ||
2414 | * | ||
2415 | * If successful, the opaque 64-bit handle iotsb_handle is returned in ret1. | ||
2416 | * Once configured, privileged access to the IOTSB memory is prohibited and | ||
2417 | * creates undefined behavior. The only permitted access is indirect via these | ||
2418 | * services. | ||
2419 | */ | ||
2420 | #define HV_FAST_PCI_IOTSB_CONF 0x190 | ||
2421 | |||
2422 | /* pci_iotsb_info() | ||
2423 | * TRAP: HV_FAST_TRAP | ||
2424 | * FUNCTION: HV_FAST_PCI_IOTSB_INFO | ||
2425 | * ARG0: devhandle | ||
2426 | * ARG1: iotsb_handle | ||
2427 | * RET0: status | ||
2428 | * RET1: r_addr | ||
2429 | * RET2: size | ||
2430 | * RET3: pagesize | ||
2431 | * RET4: iova | ||
2432 | * RET5: #bound | ||
2433 | * ERRORS: EINVAL Invalid devhandle or iotsb_handle | ||
2434 | * | ||
2435 | * This service returns configuration information about an IOTSB previously | ||
2436 | * created with pci_iotsb_conf. | ||
2437 | * | ||
2438 | * iotsb_handle value 0 may be used with this service to inquire about the | ||
2439 | * legacy IOTSB that may or may not exist. If the service succeeds, the return | ||
2440 | * values describe the legacy IOTSB and I/O virtual addresses mapped by that | ||
2441 | * table. However, the table base address r_addr may contain the value -1 which | ||
2442 | * indicates a memory range that cannot be accessed or be reclaimed. | ||
2443 | * | ||
2444 | * The return value #bound contains the number of PCI devices that iotsb_handle | ||
2445 | * is currently bound to. | ||
2446 | */ | ||
2447 | #define HV_FAST_PCI_IOTSB_INFO 0x191 | ||
2448 | |||
2449 | /* pci_iotsb_unconf() | ||
2450 | * TRAP: HV_FAST_TRAP | ||
2451 | * FUNCTION: HV_FAST_PCI_IOTSB_UNCONF | ||
2452 | * ARG0: devhandle | ||
2453 | * ARG1: iotsb_handle | ||
2454 | * RET0: status | ||
2455 | * ERRORS: EINVAL Invalid devhandle or iotsb_handle | ||
2456 | * EBUSY The IOTSB is bound and may not be unconfigured | ||
2457 | * | ||
2458 | * This service unconfigures the IOTSB identified by the devhandle and | ||
2459 | * iotsb_handle arguments, previously created with pci_iotsb_conf. | ||
2460 | * The IOTSB must not be currently bound to any device or the service will fail | ||
2461 | * | ||
2462 | * If the call succeeds, iotsb_handle is no longer valid. | ||
2463 | */ | ||
2464 | #define HV_FAST_PCI_IOTSB_UNCONF 0x192 | ||
2465 | |||
2466 | /* pci_iotsb_bind() | ||
2467 | * TRAP: HV_FAST_TRAP | ||
2468 | * FUNCTION: HV_FAST_PCI_IOTSB_BIND | ||
2469 | * ARG0: devhandle | ||
2470 | * ARG1: iotsb_handle | ||
2471 | * ARG2: pci_device | ||
2472 | * RET0: status | ||
2473 | * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device | ||
2474 | * EBUSY A PCI function is already bound to an IOTSB at the same | ||
2475 | * address range as specified by devhandle, iotsb_handle. | ||
2476 | * | ||
2477 | * This service binds the PCI function specified by the argument pci_device to | ||
2478 | * the IOTSB specified by the arguments devhandle and iotsb_handle. | ||
2479 | * | ||
2480 | * The PCI device function is bound to the specified IOTSB with the IOVA range | ||
2481 | * specified when the IOTSB was configured via pci_iotsb_conf. If the function | ||
2482 | * is already bound then it is unbound first. | ||
2483 | */ | ||
2484 | #define HV_FAST_PCI_IOTSB_BIND 0x193 | ||
2485 | |||
2486 | /* pci_iotsb_unbind() | ||
2487 | * TRAP: HV_FAST_TRAP | ||
2488 | * FUNCTION: HV_FAST_PCI_IOTSB_UNBIND | ||
2489 | * ARG0: devhandle | ||
2490 | * ARG1: iotsb_handle | ||
2491 | * ARG2: pci_device | ||
2492 | * RET0: status | ||
2493 | * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device | ||
2494 | * ENOMAP The PCI function was not bound to the specified IOTSB | ||
2495 | * | ||
2496 | * This service unbinds the PCI device specified by the argument pci_device | ||
2497 | * from the IOTSB identified * by the arguments devhandle and iotsb_handle. | ||
2498 | * | ||
2499 | * If the PCI device is not bound to the specified IOTSB then this service will | ||
2500 | * fail with status ENOMAP | ||
2501 | */ | ||
2502 | #define HV_FAST_PCI_IOTSB_UNBIND 0x194 | ||
2503 | |||
2504 | /* pci_iotsb_get_binding() | ||
2505 | * TRAP: HV_FAST_TRAP | ||
2506 | * FUNCTION: HV_FAST_PCI_IOTSB_GET_BINDING | ||
2507 | * ARG0: devhandle | ||
2508 | * ARG1: iotsb_handle | ||
2509 | * ARG2: iova | ||
2510 | * RET0: status | ||
2511 | * RET1: iotsb_handle | ||
2512 | * ERRORS: EINVAL Invalid devhandle, pci_device, or iova | ||
2513 | * ENOMAP The PCI function is not bound to an IOTSB at iova | ||
2514 | * | ||
2515 | * This service returns the IOTSB binding, iotsb_handle, for a given pci_device | ||
2516 | * and DMA virtual address, iova. | ||
2517 | * | ||
2518 | * iova must be the base address of a DMA virtual address range as defined by | ||
2519 | * the iommu-address-ranges property in the root complex device node defined | ||
2520 | * by the argument devhandle. | ||
2521 | */ | ||
2522 | #define HV_FAST_PCI_IOTSB_GET_BINDING 0x195 | ||
2523 | |||
2524 | /* pci_iotsb_map() | ||
2525 | * TRAP: HV_FAST_TRAP | ||
2526 | * FUNCTION: HV_FAST_PCI_IOTSB_MAP | ||
2527 | * ARG0: devhandle | ||
2528 | * ARG1: iotsb_handle | ||
2529 | * ARG2: index_count | ||
2530 | * ARG3: iotte_attributes | ||
2531 | * ARG4: io_page_list_p | ||
2532 | * RET0: status | ||
2533 | * RET1: #mapped | ||
2534 | * ERRORS: EINVAL Invalid devhandle, iotsb_handle, #iottes, | ||
2535 | * iotsb_index or iotte_attributes | ||
2536 | * EBADALIGN Improperly aligned io_page_list_p or I/O page | ||
2537 | * address in the I/O page list. | ||
2538 | * ENORADDR Invalid io_page_list_p or I/O page address in | ||
2539 | * the I/O page list. | ||
2540 | * | ||
2541 | * This service creates and flushes mappings in the IOTSB defined by the | ||
2542 | * arguments devhandle, iotsb. | ||
2543 | * | ||
2544 | * The index_count argument consists of two fields. Bits 63:48 contain #iotte | ||
2545 | * and bits 47:0 contain iotsb_index | ||
2546 | * | ||
2547 | * The first mapping is created in the IOTSB index specified by iotsb_index. | ||
2548 | * Subsequent mappings are created at iotsb_index+1 and so on. | ||
2549 | * | ||
2550 | * The attributes of each mapping are defined by the argument iotte_attributes. | ||
2551 | * | ||
2552 | * The io_page_list_p specifies the real address of the 64-bit-aligned list of | ||
2553 | * #iottes I/O page addresses. Each page address must be a properly aligned | ||
2554 | * real address of a page to be mapped in the IOTSB. The first entry in the I/O | ||
2555 | * page list contains the real address of the first page, the 2nd entry for the | ||
2556 | * 2nd page, and so on. | ||
2557 | * | ||
2558 | * #iottes must be greater than zero. | ||
2559 | * | ||
2560 | * The return value #mapped is the actual number of mappings created, which may | ||
2561 | * be less than or equal to the argument #iottes. If the function returns | ||
2562 | * successfully with a #mapped value less than the requested #iottes then the | ||
2563 | * caller should continue to invoke the service with updated iotsb_index, | ||
2564 | * #iottes, and io_page_list_p arguments until all pages are mapped. | ||
2565 | * | ||
2566 | * This service must not be used to demap a mapping. In other words, all | ||
2567 | * mappings must be valid and have one or both of the RW attribute bits set. | ||
2568 | * | ||
2569 | * Note: | ||
2570 | * It is implementation-defined whether I/O page real address validity checking | ||
2571 | * is done at time mappings are established or deferred until they are | ||
2572 | * accessed. | ||
2573 | */ | ||
2574 | #define HV_FAST_PCI_IOTSB_MAP 0x196 | ||
2575 | |||
2576 | /* pci_iotsb_map_one() | ||
2577 | * TRAP: HV_FAST_TRAP | ||
2578 | * FUNCTION: HV_FAST_PCI_IOTSB_MAP_ONE | ||
2579 | * ARG0: devhandle | ||
2580 | * ARG1: iotsb_handle | ||
2581 | * ARG2: iotsb_index | ||
2582 | * ARG3: iotte_attributes | ||
2583 | * ARG4: r_addr | ||
2584 | * RET0: status | ||
2585 | * ERRORS: EINVAL Invalid devhandle,iotsb_handle, iotsb_index | ||
2586 | * or iotte_attributes | ||
2587 | * EBADALIGN Improperly aligned r_addr | ||
2588 | * ENORADDR Invalid r_addr | ||
2589 | * | ||
2590 | * This service creates and flushes a single mapping in the IOTSB defined by the | ||
2591 | * arguments devhandle, iotsb. | ||
2592 | * | ||
2593 | * The mapping for the page at r_addr is created at the IOTSB index specified by | ||
2594 | * iotsb_index with the attributes iotte_attributes. | ||
2595 | * | ||
2596 | * This service must not be used to demap a mapping. In other words, the mapping | ||
2597 | * must be valid and have one or both of the RW attribute bits set. | ||
2598 | * | ||
2599 | * Note: | ||
2600 | * It is implementation-defined whether I/O page real address validity checking | ||
2601 | * is done at time mappings are established or deferred until they are | ||
2602 | * accessed. | ||
2603 | */ | ||
2604 | #define HV_FAST_PCI_IOTSB_MAP_ONE 0x197 | ||
2605 | |||
2606 | /* pci_iotsb_demap() | ||
2607 | * TRAP: HV_FAST_TRAP | ||
2608 | * FUNCTION: HV_FAST_PCI_IOTSB_DEMAP | ||
2609 | * ARG0: devhandle | ||
2610 | * ARG1: iotsb_handle | ||
2611 | * ARG2: iotsb_index | ||
2612 | * ARG3: #iottes | ||
2613 | * RET0: status | ||
2614 | * RET1: #unmapped | ||
2615 | * ERRORS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index or #iottes | ||
2616 | * | ||
2617 | * This service unmaps and flushes up to #iottes mappings starting at index | ||
2618 | * iotsb_index from the IOTSB defined by the arguments devhandle, iotsb. | ||
2619 | * | ||
2620 | * #iottes must be greater than zero. | ||
2621 | * | ||
2622 | * The actual number of IOTTEs unmapped is returned in #unmapped and may be less | ||
2623 | * than or equal to the requested number of IOTTEs, #iottes. | ||
2624 | * | ||
2625 | * If #unmapped is less than #iottes, the caller should continue to invoke this | ||
2626 | * service with updated iotsb_index and #iottes arguments until all pages are | ||
2627 | * demapped. | ||
2628 | */ | ||
2629 | #define HV_FAST_PCI_IOTSB_DEMAP 0x198 | ||
2630 | |||
2631 | /* pci_iotsb_getmap() | ||
2632 | * TRAP: HV_FAST_TRAP | ||
2633 | * FUNCTION: HV_FAST_PCI_IOTSB_GETMAP | ||
2634 | * ARG0: devhandle | ||
2635 | * ARG1: iotsb_handle | ||
2636 | * ARG2: iotsb_index | ||
2637 | * RET0: status | ||
2638 | * RET1: r_addr | ||
2639 | * RET2: iotte_attributes | ||
2640 | * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or iotsb_index | ||
2641 | * ENOMAP No mapping was found | ||
2642 | * | ||
2643 | * This service returns the mapping specified by index iotsb_index from the | ||
2644 | * IOTSB defined by the arguments devhandle, iotsb. | ||
2645 | * | ||
2646 | * Upon success, the real address of the mapping shall be returned in | ||
2647 | * r_addr and thethe IOTTE mapping attributes shall be returned in | ||
2648 | * iotte_attributes. | ||
2649 | * | ||
2650 | * The return value iotte_attributes may not include optional features used in | ||
2651 | * the call to create the mapping. | ||
2652 | */ | ||
2653 | #define HV_FAST_PCI_IOTSB_GETMAP 0x199 | ||
2654 | |||
2655 | /* pci_iotsb_sync_mappings() | ||
2656 | * TRAP: HV_FAST_TRAP | ||
2657 | * FUNCTION: HV_FAST_PCI_IOTSB_SYNC_MAPPINGS | ||
2658 | * ARG0: devhandle | ||
2659 | * ARG1: iotsb_handle | ||
2660 | * ARG2: iotsb_index | ||
2661 | * ARG3: #iottes | ||
2662 | * RET0: status | ||
2663 | * RET1: #synced | ||
2664 | * ERROS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index, or #iottes | ||
2665 | * | ||
2666 | * This service synchronizes #iottes mappings starting at index iotsb_index in | ||
2667 | * the IOTSB defined by the arguments devhandle, iotsb. | ||
2668 | * | ||
2669 | * #iottes must be greater than zero. | ||
2670 | * | ||
2671 | * The actual number of IOTTEs synchronized is returned in #synced, which may | ||
2672 | * be less than or equal to the requested number, #iottes. | ||
2673 | * | ||
2674 | * Upon a successful return, #synced is less than #iottes, the caller should | ||
2675 | * continue to invoke this service with updated iotsb_index and #iottes | ||
2676 | * arguments until all pages are synchronized. | ||
2677 | */ | ||
2678 | #define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS 0x19a | ||
2679 | |||
2338 | /* Logical Domain Channel services. */ | 2680 | /* Logical Domain Channel services. */ |
2339 | 2681 | ||
2340 | #define LDC_CHANNEL_DOWN 0 | 2682 | #define LDC_CHANNEL_DOWN 0 |
@@ -2993,6 +3335,7 @@ unsigned long sun4v_m7_set_perfreg(unsigned long reg_num, | |||
2993 | #define HV_GRP_SDIO 0x0108 | 3335 | #define HV_GRP_SDIO 0x0108 |
2994 | #define HV_GRP_SDIO_ERR 0x0109 | 3336 | #define HV_GRP_SDIO_ERR 0x0109 |
2995 | #define HV_GRP_REBOOT_DATA 0x0110 | 3337 | #define HV_GRP_REBOOT_DATA 0x0110 |
3338 | #define HV_GRP_ATU 0x0111 | ||
2996 | #define HV_GRP_M7_PERF 0x0114 | 3339 | #define HV_GRP_M7_PERF 0x0114 |
2997 | #define HV_GRP_NIAG_PERF 0x0200 | 3340 | #define HV_GRP_NIAG_PERF 0x0200 |
2998 | #define HV_GRP_FIRE_PERF 0x0201 | 3341 | #define HV_GRP_FIRE_PERF 0x0201 |
diff --git a/arch/sparc/include/asm/iommu_64.h b/arch/sparc/include/asm/iommu_64.h index cd0d69fa7592..f24f356f2503 100644 --- a/arch/sparc/include/asm/iommu_64.h +++ b/arch/sparc/include/asm/iommu_64.h | |||
@@ -24,8 +24,36 @@ struct iommu_arena { | |||
24 | unsigned int limit; | 24 | unsigned int limit; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | #define ATU_64_SPACE_SIZE 0x800000000 /* 32G */ | ||
28 | |||
29 | /* Data structures for SPARC ATU architecture */ | ||
30 | struct atu_iotsb { | ||
31 | void *table; /* IOTSB table base virtual addr*/ | ||
32 | u64 ra; /* IOTSB table real addr */ | ||
33 | u64 dvma_size; /* ranges[3].size or OS slected 32G size */ | ||
34 | u64 dvma_base; /* ranges[3].base */ | ||
35 | u64 table_size; /* IOTSB table size */ | ||
36 | u64 page_size; /* IO PAGE size for IOTSB */ | ||
37 | u32 iotsb_num; /* tsbnum is same as iotsb_handle */ | ||
38 | }; | ||
39 | |||
40 | struct atu_ranges { | ||
41 | u64 base; | ||
42 | u64 size; | ||
43 | }; | ||
44 | |||
45 | struct atu { | ||
46 | struct atu_ranges *ranges; | ||
47 | struct atu_iotsb *iotsb; | ||
48 | struct iommu_map_table tbl; | ||
49 | u64 base; | ||
50 | u64 size; | ||
51 | u64 dma_addr_mask; | ||
52 | }; | ||
53 | |||
27 | struct iommu { | 54 | struct iommu { |
28 | struct iommu_map_table tbl; | 55 | struct iommu_map_table tbl; |
56 | struct atu *atu; | ||
29 | spinlock_t lock; | 57 | spinlock_t lock; |
30 | u32 dma_addr_mask; | 58 | u32 dma_addr_mask; |
31 | iopte_t *page_table; | 59 | iopte_t *page_table; |
diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 662500fa555f..267731234ce8 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c | |||
@@ -39,6 +39,7 @@ static struct api_info api_table[] = { | |||
39 | { .group = HV_GRP_SDIO, }, | 39 | { .group = HV_GRP_SDIO, }, |
40 | { .group = HV_GRP_SDIO_ERR, }, | 40 | { .group = HV_GRP_SDIO_ERR, }, |
41 | { .group = HV_GRP_REBOOT_DATA, }, | 41 | { .group = HV_GRP_REBOOT_DATA, }, |
42 | { .group = HV_GRP_ATU, .flags = FLAG_PRE_API }, | ||
42 | { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, | 43 | { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, |
43 | { .group = HV_GRP_FIRE_PERF, }, | 44 | { .group = HV_GRP_FIRE_PERF, }, |
44 | { .group = HV_GRP_N2_CPU, }, | 45 | { .group = HV_GRP_N2_CPU, }, |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 5c615abff030..852a3291db96 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -760,8 +760,12 @@ int dma_supported(struct device *dev, u64 device_mask) | |||
760 | struct iommu *iommu = dev->archdata.iommu; | 760 | struct iommu *iommu = dev->archdata.iommu; |
761 | u64 dma_addr_mask = iommu->dma_addr_mask; | 761 | u64 dma_addr_mask = iommu->dma_addr_mask; |
762 | 762 | ||
763 | if (device_mask >= (1UL << 32UL)) | 763 | if (device_mask > DMA_BIT_MASK(32)) { |
764 | return 0; | 764 | if (iommu->atu) |
765 | dma_addr_mask = iommu->atu->dma_addr_mask; | ||
766 | else | ||
767 | return 0; | ||
768 | } | ||
765 | 769 | ||
766 | if ((device_mask & dma_addr_mask) == dma_addr_mask) | 770 | if ((device_mask & dma_addr_mask) == dma_addr_mask) |
767 | return 1; | 771 | return 1; |
diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h index b40cec252905..828493329f68 100644 --- a/arch/sparc/kernel/iommu_common.h +++ b/arch/sparc/kernel/iommu_common.h | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/scatterlist.h> | 13 | #include <linux/scatterlist.h> |
14 | #include <linux/device.h> | 14 | #include <linux/device.h> |
15 | #include <linux/iommu-helper.h> | 15 | #include <linux/iommu-helper.h> |
16 | #include <linux/scatterlist.h> | ||
17 | 16 | ||
18 | #include <asm/iommu.h> | 17 | #include <asm/iommu.h> |
19 | 18 | ||
diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index db57d8acdc01..06981cc716b6 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c | |||
@@ -44,6 +44,9 @@ static struct vpci_version vpci_versions[] = { | |||
44 | { .major = 1, .minor = 1 }, | 44 | { .major = 1, .minor = 1 }, |
45 | }; | 45 | }; |
46 | 46 | ||
47 | static unsigned long vatu_major = 1; | ||
48 | static unsigned long vatu_minor = 1; | ||
49 | |||
47 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) | 50 | #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64)) |
48 | 51 | ||
49 | struct iommu_batch { | 52 | struct iommu_batch { |
@@ -69,34 +72,57 @@ static inline void iommu_batch_start(struct device *dev, unsigned long prot, uns | |||
69 | } | 72 | } |
70 | 73 | ||
71 | /* Interrupts must be disabled. */ | 74 | /* Interrupts must be disabled. */ |
72 | static long iommu_batch_flush(struct iommu_batch *p) | 75 | static long iommu_batch_flush(struct iommu_batch *p, u64 mask) |
73 | { | 76 | { |
74 | struct pci_pbm_info *pbm = p->dev->archdata.host_controller; | 77 | struct pci_pbm_info *pbm = p->dev->archdata.host_controller; |
78 | u64 *pglist = p->pglist; | ||
79 | u64 index_count; | ||
75 | unsigned long devhandle = pbm->devhandle; | 80 | unsigned long devhandle = pbm->devhandle; |
76 | unsigned long prot = p->prot; | 81 | unsigned long prot = p->prot; |
77 | unsigned long entry = p->entry; | 82 | unsigned long entry = p->entry; |
78 | u64 *pglist = p->pglist; | ||
79 | unsigned long npages = p->npages; | 83 | unsigned long npages = p->npages; |
84 | unsigned long iotsb_num; | ||
85 | unsigned long ret; | ||
86 | long num; | ||
80 | 87 | ||
81 | /* VPCI maj=1, min=[0,1] only supports read and write */ | 88 | /* VPCI maj=1, min=[0,1] only supports read and write */ |
82 | if (vpci_major < 2) | 89 | if (vpci_major < 2) |
83 | prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); | 90 | prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE); |
84 | 91 | ||
85 | while (npages != 0) { | 92 | while (npages != 0) { |
86 | long num; | 93 | if (mask <= DMA_BIT_MASK(32)) { |
87 | 94 | num = pci_sun4v_iommu_map(devhandle, | |
88 | num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry), | 95 | HV_PCI_TSBID(0, entry), |
89 | npages, prot, __pa(pglist)); | 96 | npages, |
90 | if (unlikely(num < 0)) { | 97 | prot, |
91 | if (printk_ratelimit()) | 98 | __pa(pglist)); |
92 | printk("iommu_batch_flush: IOMMU map of " | 99 | if (unlikely(num < 0)) { |
93 | "[%08lx:%08llx:%lx:%lx:%lx] failed with " | 100 | pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n", |
94 | "status %ld\n", | 101 | __func__, |
95 | devhandle, HV_PCI_TSBID(0, entry), | 102 | devhandle, |
96 | npages, prot, __pa(pglist), num); | 103 | HV_PCI_TSBID(0, entry), |
97 | return -1; | 104 | npages, prot, __pa(pglist), |
105 | num); | ||
106 | return -1; | ||
107 | } | ||
108 | } else { | ||
109 | index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), | ||
110 | iotsb_num = pbm->iommu->atu->iotsb->iotsb_num; | ||
111 | ret = pci_sun4v_iotsb_map(devhandle, | ||
112 | iotsb_num, | ||
113 | index_count, | ||
114 | prot, | ||
115 | __pa(pglist), | ||
116 | &num); | ||
117 | if (unlikely(ret != HV_EOK)) { | ||
118 | pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n", | ||
119 | __func__, | ||
120 | devhandle, iotsb_num, | ||
121 | index_count, prot, | ||
122 | __pa(pglist), ret); | ||
123 | return -1; | ||
124 | } | ||
98 | } | 125 | } |
99 | |||
100 | entry += num; | 126 | entry += num; |
101 | npages -= num; | 127 | npages -= num; |
102 | pglist += num; | 128 | pglist += num; |
@@ -108,19 +134,19 @@ static long iommu_batch_flush(struct iommu_batch *p) | |||
108 | return 0; | 134 | return 0; |
109 | } | 135 | } |
110 | 136 | ||
111 | static inline void iommu_batch_new_entry(unsigned long entry) | 137 | static inline void iommu_batch_new_entry(unsigned long entry, u64 mask) |
112 | { | 138 | { |
113 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); | 139 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
114 | 140 | ||
115 | if (p->entry + p->npages == entry) | 141 | if (p->entry + p->npages == entry) |
116 | return; | 142 | return; |
117 | if (p->entry != ~0UL) | 143 | if (p->entry != ~0UL) |
118 | iommu_batch_flush(p); | 144 | iommu_batch_flush(p, mask); |
119 | p->entry = entry; | 145 | p->entry = entry; |
120 | } | 146 | } |
121 | 147 | ||
122 | /* Interrupts must be disabled. */ | 148 | /* Interrupts must be disabled. */ |
123 | static inline long iommu_batch_add(u64 phys_page) | 149 | static inline long iommu_batch_add(u64 phys_page, u64 mask) |
124 | { | 150 | { |
125 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); | 151 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
126 | 152 | ||
@@ -128,28 +154,31 @@ static inline long iommu_batch_add(u64 phys_page) | |||
128 | 154 | ||
129 | p->pglist[p->npages++] = phys_page; | 155 | p->pglist[p->npages++] = phys_page; |
130 | if (p->npages == PGLIST_NENTS) | 156 | if (p->npages == PGLIST_NENTS) |
131 | return iommu_batch_flush(p); | 157 | return iommu_batch_flush(p, mask); |
132 | 158 | ||
133 | return 0; | 159 | return 0; |
134 | } | 160 | } |
135 | 161 | ||
136 | /* Interrupts must be disabled. */ | 162 | /* Interrupts must be disabled. */ |
137 | static inline long iommu_batch_end(void) | 163 | static inline long iommu_batch_end(u64 mask) |
138 | { | 164 | { |
139 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); | 165 | struct iommu_batch *p = this_cpu_ptr(&iommu_batch); |
140 | 166 | ||
141 | BUG_ON(p->npages >= PGLIST_NENTS); | 167 | BUG_ON(p->npages >= PGLIST_NENTS); |
142 | 168 | ||
143 | return iommu_batch_flush(p); | 169 | return iommu_batch_flush(p, mask); |
144 | } | 170 | } |
145 | 171 | ||
146 | static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | 172 | static void *dma_4v_alloc_coherent(struct device *dev, size_t size, |
147 | dma_addr_t *dma_addrp, gfp_t gfp, | 173 | dma_addr_t *dma_addrp, gfp_t gfp, |
148 | unsigned long attrs) | 174 | unsigned long attrs) |
149 | { | 175 | { |
176 | u64 mask; | ||
150 | unsigned long flags, order, first_page, npages, n; | 177 | unsigned long flags, order, first_page, npages, n; |
151 | unsigned long prot = 0; | 178 | unsigned long prot = 0; |
152 | struct iommu *iommu; | 179 | struct iommu *iommu; |
180 | struct atu *atu; | ||
181 | struct iommu_map_table *tbl; | ||
153 | struct page *page; | 182 | struct page *page; |
154 | void *ret; | 183 | void *ret; |
155 | long entry; | 184 | long entry; |
@@ -174,14 +203,21 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | |||
174 | memset((char *)first_page, 0, PAGE_SIZE << order); | 203 | memset((char *)first_page, 0, PAGE_SIZE << order); |
175 | 204 | ||
176 | iommu = dev->archdata.iommu; | 205 | iommu = dev->archdata.iommu; |
206 | atu = iommu->atu; | ||
207 | |||
208 | mask = dev->coherent_dma_mask; | ||
209 | if (mask <= DMA_BIT_MASK(32)) | ||
210 | tbl = &iommu->tbl; | ||
211 | else | ||
212 | tbl = &atu->tbl; | ||
177 | 213 | ||
178 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, | 214 | entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, |
179 | (unsigned long)(-1), 0); | 215 | (unsigned long)(-1), 0); |
180 | 216 | ||
181 | if (unlikely(entry == IOMMU_ERROR_CODE)) | 217 | if (unlikely(entry == IOMMU_ERROR_CODE)) |
182 | goto range_alloc_fail; | 218 | goto range_alloc_fail; |
183 | 219 | ||
184 | *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); | 220 | *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); |
185 | ret = (void *) first_page; | 221 | ret = (void *) first_page; |
186 | first_page = __pa(first_page); | 222 | first_page = __pa(first_page); |
187 | 223 | ||
@@ -193,12 +229,12 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | |||
193 | entry); | 229 | entry); |
194 | 230 | ||
195 | for (n = 0; n < npages; n++) { | 231 | for (n = 0; n < npages; n++) { |
196 | long err = iommu_batch_add(first_page + (n * PAGE_SIZE)); | 232 | long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask); |
197 | if (unlikely(err < 0L)) | 233 | if (unlikely(err < 0L)) |
198 | goto iommu_map_fail; | 234 | goto iommu_map_fail; |
199 | } | 235 | } |
200 | 236 | ||
201 | if (unlikely(iommu_batch_end() < 0L)) | 237 | if (unlikely(iommu_batch_end(mask) < 0L)) |
202 | goto iommu_map_fail; | 238 | goto iommu_map_fail; |
203 | 239 | ||
204 | local_irq_restore(flags); | 240 | local_irq_restore(flags); |
@@ -206,25 +242,71 @@ static void *dma_4v_alloc_coherent(struct device *dev, size_t size, | |||
206 | return ret; | 242 | return ret; |
207 | 243 | ||
208 | iommu_map_fail: | 244 | iommu_map_fail: |
209 | iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); | 245 | iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE); |
210 | 246 | ||
211 | range_alloc_fail: | 247 | range_alloc_fail: |
212 | free_pages(first_page, order); | 248 | free_pages(first_page, order); |
213 | return NULL; | 249 | return NULL; |
214 | } | 250 | } |
215 | 251 | ||
216 | static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry, | 252 | unsigned long dma_4v_iotsb_bind(unsigned long devhandle, |
217 | unsigned long npages) | 253 | unsigned long iotsb_num, |
254 | struct pci_bus *bus_dev) | ||
255 | { | ||
256 | struct pci_dev *pdev; | ||
257 | unsigned long err; | ||
258 | unsigned int bus; | ||
259 | unsigned int device; | ||
260 | unsigned int fun; | ||
261 | |||
262 | list_for_each_entry(pdev, &bus_dev->devices, bus_list) { | ||
263 | if (pdev->subordinate) { | ||
264 | /* No need to bind pci bridge */ | ||
265 | dma_4v_iotsb_bind(devhandle, iotsb_num, | ||
266 | pdev->subordinate); | ||
267 | } else { | ||
268 | bus = bus_dev->number; | ||
269 | device = PCI_SLOT(pdev->devfn); | ||
270 | fun = PCI_FUNC(pdev->devfn); | ||
271 | err = pci_sun4v_iotsb_bind(devhandle, iotsb_num, | ||
272 | HV_PCI_DEVICE_BUILD(bus, | ||
273 | device, | ||
274 | fun)); | ||
275 | |||
276 | /* If bind fails for one device it is going to fail | ||
277 | * for rest of the devices because we are sharing | ||
278 | * IOTSB. So in case of failure simply return with | ||
279 | * error. | ||
280 | */ | ||
281 | if (err) | ||
282 | return err; | ||
283 | } | ||
284 | } | ||
285 | |||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle, | ||
290 | dma_addr_t dvma, unsigned long iotsb_num, | ||
291 | unsigned long entry, unsigned long npages) | ||
218 | { | 292 | { |
219 | u32 devhandle = *(u32 *)demap_arg; | ||
220 | unsigned long num, flags; | 293 | unsigned long num, flags; |
294 | unsigned long ret; | ||
221 | 295 | ||
222 | local_irq_save(flags); | 296 | local_irq_save(flags); |
223 | do { | 297 | do { |
224 | num = pci_sun4v_iommu_demap(devhandle, | 298 | if (dvma <= DMA_BIT_MASK(32)) { |
225 | HV_PCI_TSBID(0, entry), | 299 | num = pci_sun4v_iommu_demap(devhandle, |
226 | npages); | 300 | HV_PCI_TSBID(0, entry), |
227 | 301 | npages); | |
302 | } else { | ||
303 | ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num, | ||
304 | entry, npages, &num); | ||
305 | if (unlikely(ret != HV_EOK)) { | ||
306 | pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n", | ||
307 | ret); | ||
308 | } | ||
309 | } | ||
228 | entry += num; | 310 | entry += num; |
229 | npages -= num; | 311 | npages -= num; |
230 | } while (npages != 0); | 312 | } while (npages != 0); |
@@ -236,16 +318,28 @@ static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, | |||
236 | { | 318 | { |
237 | struct pci_pbm_info *pbm; | 319 | struct pci_pbm_info *pbm; |
238 | struct iommu *iommu; | 320 | struct iommu *iommu; |
321 | struct atu *atu; | ||
322 | struct iommu_map_table *tbl; | ||
239 | unsigned long order, npages, entry; | 323 | unsigned long order, npages, entry; |
324 | unsigned long iotsb_num; | ||
240 | u32 devhandle; | 325 | u32 devhandle; |
241 | 326 | ||
242 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; | 327 | npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; |
243 | iommu = dev->archdata.iommu; | 328 | iommu = dev->archdata.iommu; |
244 | pbm = dev->archdata.host_controller; | 329 | pbm = dev->archdata.host_controller; |
330 | atu = iommu->atu; | ||
245 | devhandle = pbm->devhandle; | 331 | devhandle = pbm->devhandle; |
246 | entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT); | 332 | |
247 | dma_4v_iommu_demap(&devhandle, entry, npages); | 333 | if (dvma <= DMA_BIT_MASK(32)) { |
248 | iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); | 334 | tbl = &iommu->tbl; |
335 | iotsb_num = 0; /* we don't care for legacy iommu */ | ||
336 | } else { | ||
337 | tbl = &atu->tbl; | ||
338 | iotsb_num = atu->iotsb->iotsb_num; | ||
339 | } | ||
340 | entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT); | ||
341 | dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages); | ||
342 | iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE); | ||
249 | order = get_order(size); | 343 | order = get_order(size); |
250 | if (order < 10) | 344 | if (order < 10) |
251 | free_pages((unsigned long)cpu, order); | 345 | free_pages((unsigned long)cpu, order); |
@@ -257,13 +351,17 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, | |||
257 | unsigned long attrs) | 351 | unsigned long attrs) |
258 | { | 352 | { |
259 | struct iommu *iommu; | 353 | struct iommu *iommu; |
354 | struct atu *atu; | ||
355 | struct iommu_map_table *tbl; | ||
356 | u64 mask; | ||
260 | unsigned long flags, npages, oaddr; | 357 | unsigned long flags, npages, oaddr; |
261 | unsigned long i, base_paddr; | 358 | unsigned long i, base_paddr; |
262 | u32 bus_addr, ret; | ||
263 | unsigned long prot; | 359 | unsigned long prot; |
360 | dma_addr_t bus_addr, ret; | ||
264 | long entry; | 361 | long entry; |
265 | 362 | ||
266 | iommu = dev->archdata.iommu; | 363 | iommu = dev->archdata.iommu; |
364 | atu = iommu->atu; | ||
267 | 365 | ||
268 | if (unlikely(direction == DMA_NONE)) | 366 | if (unlikely(direction == DMA_NONE)) |
269 | goto bad; | 367 | goto bad; |
@@ -272,13 +370,19 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, | |||
272 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); | 370 | npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK); |
273 | npages >>= IO_PAGE_SHIFT; | 371 | npages >>= IO_PAGE_SHIFT; |
274 | 372 | ||
275 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, | 373 | mask = *dev->dma_mask; |
374 | if (mask <= DMA_BIT_MASK(32)) | ||
375 | tbl = &iommu->tbl; | ||
376 | else | ||
377 | tbl = &atu->tbl; | ||
378 | |||
379 | entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL, | ||
276 | (unsigned long)(-1), 0); | 380 | (unsigned long)(-1), 0); |
277 | 381 | ||
278 | if (unlikely(entry == IOMMU_ERROR_CODE)) | 382 | if (unlikely(entry == IOMMU_ERROR_CODE)) |
279 | goto bad; | 383 | goto bad; |
280 | 384 | ||
281 | bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT)); | 385 | bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT)); |
282 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); | 386 | ret = bus_addr | (oaddr & ~IO_PAGE_MASK); |
283 | base_paddr = __pa(oaddr & IO_PAGE_MASK); | 387 | base_paddr = __pa(oaddr & IO_PAGE_MASK); |
284 | prot = HV_PCI_MAP_ATTR_READ; | 388 | prot = HV_PCI_MAP_ATTR_READ; |
@@ -293,11 +397,11 @@ static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page, | |||
293 | iommu_batch_start(dev, prot, entry); | 397 | iommu_batch_start(dev, prot, entry); |
294 | 398 | ||
295 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { | 399 | for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) { |
296 | long err = iommu_batch_add(base_paddr); | 400 | long err = iommu_batch_add(base_paddr, mask); |
297 | if (unlikely(err < 0L)) | 401 | if (unlikely(err < 0L)) |
298 | goto iommu_map_fail; | 402 | goto iommu_map_fail; |
299 | } | 403 | } |
300 | if (unlikely(iommu_batch_end() < 0L)) | 404 | if (unlikely(iommu_batch_end(mask) < 0L)) |
301 | goto iommu_map_fail; | 405 | goto iommu_map_fail; |
302 | 406 | ||
303 | local_irq_restore(flags); | 407 | local_irq_restore(flags); |
@@ -310,7 +414,7 @@ bad: | |||
310 | return DMA_ERROR_CODE; | 414 | return DMA_ERROR_CODE; |
311 | 415 | ||
312 | iommu_map_fail: | 416 | iommu_map_fail: |
313 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); | 417 | iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); |
314 | return DMA_ERROR_CODE; | 418 | return DMA_ERROR_CODE; |
315 | } | 419 | } |
316 | 420 | ||
@@ -320,7 +424,10 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
320 | { | 424 | { |
321 | struct pci_pbm_info *pbm; | 425 | struct pci_pbm_info *pbm; |
322 | struct iommu *iommu; | 426 | struct iommu *iommu; |
427 | struct atu *atu; | ||
428 | struct iommu_map_table *tbl; | ||
323 | unsigned long npages; | 429 | unsigned long npages; |
430 | unsigned long iotsb_num; | ||
324 | long entry; | 431 | long entry; |
325 | u32 devhandle; | 432 | u32 devhandle; |
326 | 433 | ||
@@ -332,14 +439,23 @@ static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr, | |||
332 | 439 | ||
333 | iommu = dev->archdata.iommu; | 440 | iommu = dev->archdata.iommu; |
334 | pbm = dev->archdata.host_controller; | 441 | pbm = dev->archdata.host_controller; |
442 | atu = iommu->atu; | ||
335 | devhandle = pbm->devhandle; | 443 | devhandle = pbm->devhandle; |
336 | 444 | ||
337 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); | 445 | npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK); |
338 | npages >>= IO_PAGE_SHIFT; | 446 | npages >>= IO_PAGE_SHIFT; |
339 | bus_addr &= IO_PAGE_MASK; | 447 | bus_addr &= IO_PAGE_MASK; |
340 | entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT; | 448 | |
341 | dma_4v_iommu_demap(&devhandle, entry, npages); | 449 | if (bus_addr <= DMA_BIT_MASK(32)) { |
342 | iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE); | 450 | iotsb_num = 0; /* we don't care for legacy iommu */ |
451 | tbl = &iommu->tbl; | ||
452 | } else { | ||
453 | iotsb_num = atu->iotsb->iotsb_num; | ||
454 | tbl = &atu->tbl; | ||
455 | } | ||
456 | entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT; | ||
457 | dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages); | ||
458 | iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE); | ||
343 | } | 459 | } |
344 | 460 | ||
345 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | 461 | static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, |
@@ -353,12 +469,17 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
353 | unsigned long seg_boundary_size; | 469 | unsigned long seg_boundary_size; |
354 | int outcount, incount, i; | 470 | int outcount, incount, i; |
355 | struct iommu *iommu; | 471 | struct iommu *iommu; |
472 | struct atu *atu; | ||
473 | struct iommu_map_table *tbl; | ||
474 | u64 mask; | ||
356 | unsigned long base_shift; | 475 | unsigned long base_shift; |
357 | long err; | 476 | long err; |
358 | 477 | ||
359 | BUG_ON(direction == DMA_NONE); | 478 | BUG_ON(direction == DMA_NONE); |
360 | 479 | ||
361 | iommu = dev->archdata.iommu; | 480 | iommu = dev->archdata.iommu; |
481 | atu = iommu->atu; | ||
482 | |||
362 | if (nelems == 0 || !iommu) | 483 | if (nelems == 0 || !iommu) |
363 | return 0; | 484 | return 0; |
364 | 485 | ||
@@ -384,7 +505,15 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
384 | max_seg_size = dma_get_max_seg_size(dev); | 505 | max_seg_size = dma_get_max_seg_size(dev); |
385 | seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, | 506 | seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, |
386 | IO_PAGE_SIZE) >> IO_PAGE_SHIFT; | 507 | IO_PAGE_SIZE) >> IO_PAGE_SHIFT; |
387 | base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; | 508 | |
509 | mask = *dev->dma_mask; | ||
510 | if (mask <= DMA_BIT_MASK(32)) | ||
511 | tbl = &iommu->tbl; | ||
512 | else | ||
513 | tbl = &atu->tbl; | ||
514 | |||
515 | base_shift = tbl->table_map_base >> IO_PAGE_SHIFT; | ||
516 | |||
388 | for_each_sg(sglist, s, nelems, i) { | 517 | for_each_sg(sglist, s, nelems, i) { |
389 | unsigned long paddr, npages, entry, out_entry = 0, slen; | 518 | unsigned long paddr, npages, entry, out_entry = 0, slen; |
390 | 519 | ||
@@ -397,27 +526,26 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
397 | /* Allocate iommu entries for that segment */ | 526 | /* Allocate iommu entries for that segment */ |
398 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); | 527 | paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s); |
399 | npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); | 528 | npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE); |
400 | entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, | 529 | entry = iommu_tbl_range_alloc(dev, tbl, npages, |
401 | &handle, (unsigned long)(-1), 0); | 530 | &handle, (unsigned long)(-1), 0); |
402 | 531 | ||
403 | /* Handle failure */ | 532 | /* Handle failure */ |
404 | if (unlikely(entry == IOMMU_ERROR_CODE)) { | 533 | if (unlikely(entry == IOMMU_ERROR_CODE)) { |
405 | if (printk_ratelimit()) | 534 | pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n", |
406 | printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx" | 535 | tbl, paddr, npages); |
407 | " npages %lx\n", iommu, paddr, npages); | ||
408 | goto iommu_map_failed; | 536 | goto iommu_map_failed; |
409 | } | 537 | } |
410 | 538 | ||
411 | iommu_batch_new_entry(entry); | 539 | iommu_batch_new_entry(entry, mask); |
412 | 540 | ||
413 | /* Convert entry to a dma_addr_t */ | 541 | /* Convert entry to a dma_addr_t */ |
414 | dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT); | 542 | dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT); |
415 | dma_addr |= (s->offset & ~IO_PAGE_MASK); | 543 | dma_addr |= (s->offset & ~IO_PAGE_MASK); |
416 | 544 | ||
417 | /* Insert into HW table */ | 545 | /* Insert into HW table */ |
418 | paddr &= IO_PAGE_MASK; | 546 | paddr &= IO_PAGE_MASK; |
419 | while (npages--) { | 547 | while (npages--) { |
420 | err = iommu_batch_add(paddr); | 548 | err = iommu_batch_add(paddr, mask); |
421 | if (unlikely(err < 0L)) | 549 | if (unlikely(err < 0L)) |
422 | goto iommu_map_failed; | 550 | goto iommu_map_failed; |
423 | paddr += IO_PAGE_SIZE; | 551 | paddr += IO_PAGE_SIZE; |
@@ -452,7 +580,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, | |||
452 | dma_next = dma_addr + slen; | 580 | dma_next = dma_addr + slen; |
453 | } | 581 | } |
454 | 582 | ||
455 | err = iommu_batch_end(); | 583 | err = iommu_batch_end(mask); |
456 | 584 | ||
457 | if (unlikely(err < 0L)) | 585 | if (unlikely(err < 0L)) |
458 | goto iommu_map_failed; | 586 | goto iommu_map_failed; |
@@ -475,7 +603,7 @@ iommu_map_failed: | |||
475 | vaddr = s->dma_address & IO_PAGE_MASK; | 603 | vaddr = s->dma_address & IO_PAGE_MASK; |
476 | npages = iommu_num_pages(s->dma_address, s->dma_length, | 604 | npages = iommu_num_pages(s->dma_address, s->dma_length, |
477 | IO_PAGE_SIZE); | 605 | IO_PAGE_SIZE); |
478 | iommu_tbl_range_free(&iommu->tbl, vaddr, npages, | 606 | iommu_tbl_range_free(tbl, vaddr, npages, |
479 | IOMMU_ERROR_CODE); | 607 | IOMMU_ERROR_CODE); |
480 | /* XXX demap? XXX */ | 608 | /* XXX demap? XXX */ |
481 | s->dma_address = DMA_ERROR_CODE; | 609 | s->dma_address = DMA_ERROR_CODE; |
@@ -496,13 +624,16 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
496 | struct pci_pbm_info *pbm; | 624 | struct pci_pbm_info *pbm; |
497 | struct scatterlist *sg; | 625 | struct scatterlist *sg; |
498 | struct iommu *iommu; | 626 | struct iommu *iommu; |
627 | struct atu *atu; | ||
499 | unsigned long flags, entry; | 628 | unsigned long flags, entry; |
629 | unsigned long iotsb_num; | ||
500 | u32 devhandle; | 630 | u32 devhandle; |
501 | 631 | ||
502 | BUG_ON(direction == DMA_NONE); | 632 | BUG_ON(direction == DMA_NONE); |
503 | 633 | ||
504 | iommu = dev->archdata.iommu; | 634 | iommu = dev->archdata.iommu; |
505 | pbm = dev->archdata.host_controller; | 635 | pbm = dev->archdata.host_controller; |
636 | atu = iommu->atu; | ||
506 | devhandle = pbm->devhandle; | 637 | devhandle = pbm->devhandle; |
507 | 638 | ||
508 | local_irq_save(flags); | 639 | local_irq_save(flags); |
@@ -512,15 +643,24 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, | |||
512 | dma_addr_t dma_handle = sg->dma_address; | 643 | dma_addr_t dma_handle = sg->dma_address; |
513 | unsigned int len = sg->dma_length; | 644 | unsigned int len = sg->dma_length; |
514 | unsigned long npages; | 645 | unsigned long npages; |
515 | struct iommu_map_table *tbl = &iommu->tbl; | 646 | struct iommu_map_table *tbl; |
516 | unsigned long shift = IO_PAGE_SHIFT; | 647 | unsigned long shift = IO_PAGE_SHIFT; |
517 | 648 | ||
518 | if (!len) | 649 | if (!len) |
519 | break; | 650 | break; |
520 | npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); | 651 | npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE); |
652 | |||
653 | if (dma_handle <= DMA_BIT_MASK(32)) { | ||
654 | iotsb_num = 0; /* we don't care for legacy iommu */ | ||
655 | tbl = &iommu->tbl; | ||
656 | } else { | ||
657 | iotsb_num = atu->iotsb->iotsb_num; | ||
658 | tbl = &atu->tbl; | ||
659 | } | ||
521 | entry = ((dma_handle - tbl->table_map_base) >> shift); | 660 | entry = ((dma_handle - tbl->table_map_base) >> shift); |
522 | dma_4v_iommu_demap(&devhandle, entry, npages); | 661 | dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num, |
523 | iommu_tbl_range_free(&iommu->tbl, dma_handle, npages, | 662 | entry, npages); |
663 | iommu_tbl_range_free(tbl, dma_handle, npages, | ||
524 | IOMMU_ERROR_CODE); | 664 | IOMMU_ERROR_CODE); |
525 | sg = sg_next(sg); | 665 | sg = sg_next(sg); |
526 | } | 666 | } |
@@ -581,6 +721,132 @@ static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, | |||
581 | return cnt; | 721 | return cnt; |
582 | } | 722 | } |
583 | 723 | ||
724 | static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm) | ||
725 | { | ||
726 | struct atu *atu = pbm->iommu->atu; | ||
727 | struct atu_iotsb *iotsb; | ||
728 | void *table; | ||
729 | u64 table_size; | ||
730 | u64 iotsb_num; | ||
731 | unsigned long order; | ||
732 | unsigned long err; | ||
733 | |||
734 | iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL); | ||
735 | if (!iotsb) { | ||
736 | err = -ENOMEM; | ||
737 | goto out_err; | ||
738 | } | ||
739 | atu->iotsb = iotsb; | ||
740 | |||
741 | /* calculate size of IOTSB */ | ||
742 | table_size = (atu->size / IO_PAGE_SIZE) * 8; | ||
743 | order = get_order(table_size); | ||
744 | table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | ||
745 | if (!table) { | ||
746 | err = -ENOMEM; | ||
747 | goto table_failed; | ||
748 | } | ||
749 | iotsb->table = table; | ||
750 | iotsb->ra = __pa(table); | ||
751 | iotsb->dvma_size = atu->size; | ||
752 | iotsb->dvma_base = atu->base; | ||
753 | iotsb->table_size = table_size; | ||
754 | iotsb->page_size = IO_PAGE_SIZE; | ||
755 | |||
756 | /* configure and register IOTSB with HV */ | ||
757 | err = pci_sun4v_iotsb_conf(pbm->devhandle, | ||
758 | iotsb->ra, | ||
759 | iotsb->table_size, | ||
760 | iotsb->page_size, | ||
761 | iotsb->dvma_base, | ||
762 | &iotsb_num); | ||
763 | if (err) { | ||
764 | pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err); | ||
765 | goto iotsb_conf_failed; | ||
766 | } | ||
767 | iotsb->iotsb_num = iotsb_num; | ||
768 | |||
769 | err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus); | ||
770 | if (err) { | ||
771 | pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err); | ||
772 | goto iotsb_conf_failed; | ||
773 | } | ||
774 | |||
775 | return 0; | ||
776 | |||
777 | iotsb_conf_failed: | ||
778 | free_pages((unsigned long)table, order); | ||
779 | table_failed: | ||
780 | kfree(iotsb); | ||
781 | out_err: | ||
782 | return err; | ||
783 | } | ||
784 | |||
785 | static int pci_sun4v_atu_init(struct pci_pbm_info *pbm) | ||
786 | { | ||
787 | struct atu *atu = pbm->iommu->atu; | ||
788 | unsigned long err; | ||
789 | const u64 *ranges; | ||
790 | u64 map_size, num_iotte; | ||
791 | u64 dma_mask; | ||
792 | const u32 *page_size; | ||
793 | int len; | ||
794 | |||
795 | ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges", | ||
796 | &len); | ||
797 | if (!ranges) { | ||
798 | pr_err(PFX "No iommu-address-ranges\n"); | ||
799 | return -EINVAL; | ||
800 | } | ||
801 | |||
802 | page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes", | ||
803 | NULL); | ||
804 | if (!page_size) { | ||
805 | pr_err(PFX "No iommu-pagesizes\n"); | ||
806 | return -EINVAL; | ||
807 | } | ||
808 | |||
809 | /* There are 4 iommu-address-ranges supported. Each range is pair of | ||
810 | * {base, size}. The ranges[0] and ranges[1] are 32bit address space | ||
811 | * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit | ||
812 | * address ranges to support 64bit addressing. Because 'size' for | ||
813 | * address ranges[2] and ranges[3] are same we can select either of | ||
814 | * ranges[2] or ranges[3] for mapping. However due to 'size' is too | ||
815 | * large for OS to allocate IOTSB we are using fix size 32G | ||
816 | * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices | ||
817 | * to share. | ||
818 | */ | ||
819 | atu->ranges = (struct atu_ranges *)ranges; | ||
820 | atu->base = atu->ranges[3].base; | ||
821 | atu->size = ATU_64_SPACE_SIZE; | ||
822 | |||
823 | /* Create IOTSB */ | ||
824 | err = pci_sun4v_atu_alloc_iotsb(pbm); | ||
825 | if (err) { | ||
826 | pr_err(PFX "Error creating ATU IOTSB\n"); | ||
827 | return err; | ||
828 | } | ||
829 | |||
830 | /* Create ATU iommu map. | ||
831 | * One bit represents one iotte in IOTSB table. | ||
832 | */ | ||
833 | dma_mask = (roundup_pow_of_two(atu->size) - 1UL); | ||
834 | num_iotte = atu->size / IO_PAGE_SIZE; | ||
835 | map_size = num_iotte / 8; | ||
836 | atu->tbl.table_map_base = atu->base; | ||
837 | atu->dma_addr_mask = dma_mask; | ||
838 | atu->tbl.map = kzalloc(map_size, GFP_KERNEL); | ||
839 | if (!atu->tbl.map) | ||
840 | return -ENOMEM; | ||
841 | |||
842 | iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT, | ||
843 | NULL, false /* no large_pool */, | ||
844 | 0 /* default npools */, | ||
845 | false /* want span boundary checking */); | ||
846 | |||
847 | return 0; | ||
848 | } | ||
849 | |||
584 | static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) | 850 | static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) |
585 | { | 851 | { |
586 | static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; | 852 | static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; |
@@ -918,6 +1184,18 @@ static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm, | |||
918 | 1184 | ||
919 | pci_sun4v_scan_bus(pbm, &op->dev); | 1185 | pci_sun4v_scan_bus(pbm, &op->dev); |
920 | 1186 | ||
1187 | /* if atu_init fails its not complete failure. | ||
1188 | * we can still continue using legacy iommu. | ||
1189 | */ | ||
1190 | if (pbm->iommu->atu) { | ||
1191 | err = pci_sun4v_atu_init(pbm); | ||
1192 | if (err) { | ||
1193 | kfree(pbm->iommu->atu); | ||
1194 | pbm->iommu->atu = NULL; | ||
1195 | pr_err(PFX "ATU init failed, err=%d\n", err); | ||
1196 | } | ||
1197 | } | ||
1198 | |||
921 | pbm->next = pci_pbm_root; | 1199 | pbm->next = pci_pbm_root; |
922 | pci_pbm_root = pbm; | 1200 | pci_pbm_root = pbm; |
923 | 1201 | ||
@@ -931,8 +1209,10 @@ static int pci_sun4v_probe(struct platform_device *op) | |||
931 | struct pci_pbm_info *pbm; | 1209 | struct pci_pbm_info *pbm; |
932 | struct device_node *dp; | 1210 | struct device_node *dp; |
933 | struct iommu *iommu; | 1211 | struct iommu *iommu; |
1212 | struct atu *atu; | ||
934 | u32 devhandle; | 1213 | u32 devhandle; |
935 | int i, err = -ENODEV; | 1214 | int i, err = -ENODEV; |
1215 | static bool hv_atu = true; | ||
936 | 1216 | ||
937 | dp = op->dev.of_node; | 1217 | dp = op->dev.of_node; |
938 | 1218 | ||
@@ -954,6 +1234,19 @@ static int pci_sun4v_probe(struct platform_device *op) | |||
954 | pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n", | 1234 | pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n", |
955 | vpci_major, vpci_minor); | 1235 | vpci_major, vpci_minor); |
956 | 1236 | ||
1237 | err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor); | ||
1238 | if (err) { | ||
1239 | /* don't return an error if we fail to register the | ||
1240 | * ATU group, but ATU hcalls won't be available. | ||
1241 | */ | ||
1242 | hv_atu = false; | ||
1243 | pr_err(PFX "Could not register hvapi ATU err=%d\n", | ||
1244 | err); | ||
1245 | } else { | ||
1246 | pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n", | ||
1247 | vatu_major, vatu_minor); | ||
1248 | } | ||
1249 | |||
957 | dma_ops = &sun4v_dma_ops; | 1250 | dma_ops = &sun4v_dma_ops; |
958 | } | 1251 | } |
959 | 1252 | ||
@@ -991,6 +1284,14 @@ static int pci_sun4v_probe(struct platform_device *op) | |||
991 | } | 1284 | } |
992 | 1285 | ||
993 | pbm->iommu = iommu; | 1286 | pbm->iommu = iommu; |
1287 | iommu->atu = NULL; | ||
1288 | if (hv_atu) { | ||
1289 | atu = kzalloc(sizeof(*atu), GFP_KERNEL); | ||
1290 | if (!atu) | ||
1291 | pr_err(PFX "Could not allocate atu\n"); | ||
1292 | else | ||
1293 | iommu->atu = atu; | ||
1294 | } | ||
994 | 1295 | ||
995 | err = pci_sun4v_pbm_init(pbm, op, devhandle); | 1296 | err = pci_sun4v_pbm_init(pbm, op, devhandle); |
996 | if (err) | 1297 | if (err) |
@@ -1001,6 +1302,7 @@ static int pci_sun4v_probe(struct platform_device *op) | |||
1001 | return 0; | 1302 | return 0; |
1002 | 1303 | ||
1003 | out_free_iommu: | 1304 | out_free_iommu: |
1305 | kfree(iommu->atu); | ||
1004 | kfree(pbm->iommu); | 1306 | kfree(pbm->iommu); |
1005 | 1307 | ||
1006 | out_free_controller: | 1308 | out_free_controller: |
diff --git a/arch/sparc/kernel/pci_sun4v.h b/arch/sparc/kernel/pci_sun4v.h index 5642212390b2..22603a4e48bf 100644 --- a/arch/sparc/kernel/pci_sun4v.h +++ b/arch/sparc/kernel/pci_sun4v.h | |||
@@ -89,4 +89,25 @@ unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle, | |||
89 | unsigned long msinum, | 89 | unsigned long msinum, |
90 | unsigned long valid); | 90 | unsigned long valid); |
91 | 91 | ||
92 | /* Sun4v HV IOMMU v2 APIs */ | ||
93 | unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle, | ||
94 | unsigned long ra, | ||
95 | unsigned long table_size, | ||
96 | unsigned long page_size, | ||
97 | unsigned long dvma_base, | ||
98 | u64 *iotsb_num); | ||
99 | unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle, | ||
100 | unsigned long iotsb_num, | ||
101 | unsigned int pci_device); | ||
102 | unsigned long pci_sun4v_iotsb_map(unsigned long devhandle, | ||
103 | unsigned long iotsb_num, | ||
104 | unsigned long iotsb_index_iottes, | ||
105 | unsigned long io_attributes, | ||
106 | unsigned long io_page_list_pa, | ||
107 | long *mapped); | ||
108 | unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle, | ||
109 | unsigned long iotsb_num, | ||
110 | unsigned long iotsb_index, | ||
111 | unsigned long iottes, | ||
112 | unsigned long *demapped); | ||
92 | #endif /* !(_PCI_SUN4V_H) */ | 113 | #endif /* !(_PCI_SUN4V_H) */ |
diff --git a/arch/sparc/kernel/pci_sun4v_asm.S b/arch/sparc/kernel/pci_sun4v_asm.S index e606d46c6815..578f09657916 100644 --- a/arch/sparc/kernel/pci_sun4v_asm.S +++ b/arch/sparc/kernel/pci_sun4v_asm.S | |||
@@ -360,3 +360,71 @@ ENTRY(pci_sun4v_msg_setvalid) | |||
360 | mov %o0, %o0 | 360 | mov %o0, %o0 |
361 | ENDPROC(pci_sun4v_msg_setvalid) | 361 | ENDPROC(pci_sun4v_msg_setvalid) |
362 | 362 | ||
363 | /* | ||
364 | * %o0: devhandle | ||
365 | * %o1: r_addr | ||
366 | * %o2: size | ||
367 | * %o3: pagesize | ||
368 | * %o4: virt | ||
369 | * %o5: &iotsb_num/&iotsb_handle | ||
370 | * | ||
371 | * returns %o0: status | ||
372 | * %o1: iotsb_num/iotsb_handle | ||
373 | */ | ||
374 | ENTRY(pci_sun4v_iotsb_conf) | ||
375 | mov %o5, %g1 | ||
376 | mov HV_FAST_PCI_IOTSB_CONF, %o5 | ||
377 | ta HV_FAST_TRAP | ||
378 | retl | ||
379 | stx %o1, [%g1] | ||
380 | ENDPROC(pci_sun4v_iotsb_conf) | ||
381 | |||
382 | /* | ||
383 | * %o0: devhandle | ||
384 | * %o1: iotsb_num/iotsb_handle | ||
385 | * %o2: pci_device | ||
386 | * | ||
387 | * returns %o0: status | ||
388 | */ | ||
389 | ENTRY(pci_sun4v_iotsb_bind) | ||
390 | mov HV_FAST_PCI_IOTSB_BIND, %o5 | ||
391 | ta HV_FAST_TRAP | ||
392 | retl | ||
393 | nop | ||
394 | ENDPROC(pci_sun4v_iotsb_bind) | ||
395 | |||
396 | /* | ||
397 | * %o0: devhandle | ||
398 | * %o1: iotsb_num/iotsb_handle | ||
399 | * %o2: index_count | ||
400 | * %o3: iotte_attributes | ||
401 | * %o4: io_page_list_p | ||
402 | * %o5: &mapped | ||
403 | * | ||
404 | * returns %o0: status | ||
405 | * %o1: #mapped | ||
406 | */ | ||
407 | ENTRY(pci_sun4v_iotsb_map) | ||
408 | mov %o5, %g1 | ||
409 | mov HV_FAST_PCI_IOTSB_MAP, %o5 | ||
410 | ta HV_FAST_TRAP | ||
411 | retl | ||
412 | stx %o1, [%g1] | ||
413 | ENDPROC(pci_sun4v_iotsb_map) | ||
414 | |||
415 | /* | ||
416 | * %o0: devhandle | ||
417 | * %o1: iotsb_num/iotsb_handle | ||
418 | * %o2: iotsb_index | ||
419 | * %o3: #iottes | ||
420 | * %o4: &demapped | ||
421 | * | ||
422 | * returns %o0: status | ||
423 | * %o1: #demapped | ||
424 | */ | ||
425 | ENTRY(pci_sun4v_iotsb_demap) | ||
426 | mov HV_FAST_PCI_IOTSB_DEMAP, %o5 | ||
427 | ta HV_FAST_TRAP | ||
428 | retl | ||
429 | stx %o1, [%o4] | ||
430 | ENDPROC(pci_sun4v_iotsb_demap) | ||
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index c3c12efe0bc0..9c0c8fd0b292 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c | |||
@@ -89,7 +89,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) | |||
89 | sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; | 89 | sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; |
90 | 90 | ||
91 | /* 1. Make sure we are not getting garbage from the user */ | 91 | /* 1. Make sure we are not getting garbage from the user */ |
92 | if (!invalid_frame_pointer(sf, sizeof(*sf))) | 92 | if (invalid_frame_pointer(sf, sizeof(*sf))) |
93 | goto segv_and_exit; | 93 | goto segv_and_exit; |
94 | 94 | ||
95 | if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) | 95 | if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) |
@@ -150,7 +150,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) | |||
150 | 150 | ||
151 | synchronize_user_stack(); | 151 | synchronize_user_stack(); |
152 | sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; | 152 | sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; |
153 | if (!invalid_frame_pointer(sf, sizeof(*sf))) | 153 | if (invalid_frame_pointer(sf, sizeof(*sf))) |
154 | goto segv; | 154 | goto segv; |
155 | 155 | ||
156 | if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) | 156 | if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 439784b7b7ac..37aa537b3ad8 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -802,8 +802,10 @@ struct mdesc_mblock { | |||
802 | }; | 802 | }; |
803 | static struct mdesc_mblock *mblocks; | 803 | static struct mdesc_mblock *mblocks; |
804 | static int num_mblocks; | 804 | static int num_mblocks; |
805 | static int find_numa_node_for_addr(unsigned long pa, | ||
806 | struct node_mem_mask *pnode_mask); | ||
805 | 807 | ||
806 | static unsigned long ra_to_pa(unsigned long addr) | 808 | static unsigned long __init ra_to_pa(unsigned long addr) |
807 | { | 809 | { |
808 | int i; | 810 | int i; |
809 | 811 | ||
@@ -819,8 +821,11 @@ static unsigned long ra_to_pa(unsigned long addr) | |||
819 | return addr; | 821 | return addr; |
820 | } | 822 | } |
821 | 823 | ||
822 | static int find_node(unsigned long addr) | 824 | static int __init find_node(unsigned long addr) |
823 | { | 825 | { |
826 | static bool search_mdesc = true; | ||
827 | static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL }; | ||
828 | static int last_index; | ||
824 | int i; | 829 | int i; |
825 | 830 | ||
826 | addr = ra_to_pa(addr); | 831 | addr = ra_to_pa(addr); |
@@ -830,13 +835,30 @@ static int find_node(unsigned long addr) | |||
830 | if ((addr & p->mask) == p->val) | 835 | if ((addr & p->mask) == p->val) |
831 | return i; | 836 | return i; |
832 | } | 837 | } |
833 | /* The following condition has been observed on LDOM guests.*/ | 838 | /* The following condition has been observed on LDOM guests because |
834 | WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node" | 839 | * node_masks only contains the best latency mask and value. |
835 | " rule. Some physical memory will be owned by node 0."); | 840 | * LDOM guest's mdesc can contain a single latency group to |
836 | return 0; | 841 | * cover multiple address range. Print warning message only if the |
842 | * address cannot be found in node_masks nor mdesc. | ||
843 | */ | ||
844 | if ((search_mdesc) && | ||
845 | ((addr & last_mem_mask.mask) != last_mem_mask.val)) { | ||
846 | /* find the available node in the mdesc */ | ||
847 | last_index = find_numa_node_for_addr(addr, &last_mem_mask); | ||
848 | numadbg("find_node: latency group for address 0x%lx is %d\n", | ||
849 | addr, last_index); | ||
850 | if ((last_index < 0) || (last_index >= num_node_masks)) { | ||
851 | /* WARN_ONCE() and use default group 0 */ | ||
852 | WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0."); | ||
853 | search_mdesc = false; | ||
854 | last_index = 0; | ||
855 | } | ||
856 | } | ||
857 | |||
858 | return last_index; | ||
837 | } | 859 | } |
838 | 860 | ||
839 | static u64 memblock_nid_range(u64 start, u64 end, int *nid) | 861 | static u64 __init memblock_nid_range(u64 start, u64 end, int *nid) |
840 | { | 862 | { |
841 | *nid = find_node(start); | 863 | *nid = find_node(start); |
842 | start += PAGE_SIZE; | 864 | start += PAGE_SIZE; |
@@ -1160,6 +1182,41 @@ int __node_distance(int from, int to) | |||
1160 | return numa_latency[from][to]; | 1182 | return numa_latency[from][to]; |
1161 | } | 1183 | } |
1162 | 1184 | ||
1185 | static int find_numa_node_for_addr(unsigned long pa, | ||
1186 | struct node_mem_mask *pnode_mask) | ||
1187 | { | ||
1188 | struct mdesc_handle *md = mdesc_grab(); | ||
1189 | u64 node, arc; | ||
1190 | int i = 0; | ||
1191 | |||
1192 | node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups"); | ||
1193 | if (node == MDESC_NODE_NULL) | ||
1194 | goto out; | ||
1195 | |||
1196 | mdesc_for_each_node_by_name(md, node, "group") { | ||
1197 | mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) { | ||
1198 | u64 target = mdesc_arc_target(md, arc); | ||
1199 | struct mdesc_mlgroup *m = find_mlgroup(target); | ||
1200 | |||
1201 | if (!m) | ||
1202 | continue; | ||
1203 | if ((pa & m->mask) == m->match) { | ||
1204 | if (pnode_mask) { | ||
1205 | pnode_mask->mask = m->mask; | ||
1206 | pnode_mask->val = m->match; | ||
1207 | } | ||
1208 | mdesc_release(md); | ||
1209 | return i; | ||
1210 | } | ||
1211 | } | ||
1212 | i++; | ||
1213 | } | ||
1214 | |||
1215 | out: | ||
1216 | mdesc_release(md); | ||
1217 | return -1; | ||
1218 | } | ||
1219 | |||
1163 | static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) | 1220 | static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp) |
1164 | { | 1221 | { |
1165 | int i; | 1222 | int i; |
diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h index 6160761d5f61..4810e48dbbbf 100644 --- a/arch/tile/include/asm/cache.h +++ b/arch/tile/include/asm/cache.h | |||
@@ -61,4 +61,7 @@ | |||
61 | */ | 61 | */ |
62 | #define __write_once __read_mostly | 62 | #define __write_once __read_mostly |
63 | 63 | ||
64 | /* __ro_after_init is the generic name for the tile arch __write_once. */ | ||
65 | #define __ro_after_init __read_mostly | ||
66 | |||
64 | #endif /* _ASM_TILE_CACHE_H */ | 67 | #endif /* _ASM_TILE_CACHE_H */ |
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index 178989e6d3e3..ea960d660917 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c | |||
@@ -218,8 +218,8 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num) | |||
218 | */ | 218 | */ |
219 | unsigned long long sched_clock(void) | 219 | unsigned long long sched_clock(void) |
220 | { | 220 | { |
221 | return clocksource_cyc2ns(get_cycles(), | 221 | return mult_frac(get_cycles(), |
222 | sched_clock_mult, SCHED_CLOCK_SHIFT); | 222 | sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT); |
223 | } | 223 | } |
224 | 224 | ||
225 | int setup_profiling_timer(unsigned int multiplier) | 225 | int setup_profiling_timer(unsigned int multiplier) |
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 536ccfcc01c6..34d9e15857c3 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -40,8 +40,8 @@ GCOV_PROFILE := n | |||
40 | UBSAN_SANITIZE :=n | 40 | UBSAN_SANITIZE :=n |
41 | 41 | ||
42 | LDFLAGS := -m elf_$(UTS_MACHINE) | 42 | LDFLAGS := -m elf_$(UTS_MACHINE) |
43 | ifeq ($(CONFIG_RELOCATABLE),y) | 43 | # Compressed kernel should be built as PIE since it may be loaded at any |
44 | # If kernel is relocatable, build compressed kernel as PIE. | 44 | # address by the bootloader. |
45 | ifeq ($(CONFIG_X86_32),y) | 45 | ifeq ($(CONFIG_X86_32),y) |
46 | LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) | 46 | LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker) |
47 | else | 47 | else |
@@ -51,7 +51,6 @@ else | |||
51 | LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \ | 51 | LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \ |
52 | && echo "-z noreloc-overflow -pie --no-dynamic-linker") | 52 | && echo "-z noreloc-overflow -pie --no-dynamic-linker") |
53 | endif | 53 | endif |
54 | endif | ||
55 | LDFLAGS_vmlinux := -T | 54 | LDFLAGS_vmlinux := -T |
56 | 55 | ||
57 | hostprogs-y := mkpiggy | 56 | hostprogs-y := mkpiggy |
diff --git a/arch/x86/boot/cpu.c b/arch/x86/boot/cpu.c index 26240dde081e..4224ede43b4e 100644 --- a/arch/x86/boot/cpu.c +++ b/arch/x86/boot/cpu.c | |||
@@ -87,6 +87,12 @@ int validate_cpu(void) | |||
87 | return -1; | 87 | return -1; |
88 | } | 88 | } |
89 | 89 | ||
90 | if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) && | ||
91 | !has_eflag(X86_EFLAGS_ID)) { | ||
92 | printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n"); | ||
93 | return -1; | ||
94 | } | ||
95 | |||
90 | if (err_flags) { | 96 | if (err_flags) { |
91 | puts("This kernel requires the following features " | 97 | puts("This kernel requires the following features " |
92 | "not present on the CPU:\n"); | 98 | "not present on the CPU:\n"); |
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index f5f4b3fbbbc2..afb222b63cae 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c | |||
@@ -662,7 +662,13 @@ static int __init amd_core_pmu_init(void) | |||
662 | pr_cont("Fam15h "); | 662 | pr_cont("Fam15h "); |
663 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; | 663 | x86_pmu.get_event_constraints = amd_get_event_constraints_f15h; |
664 | break; | 664 | break; |
665 | 665 | case 0x17: | |
666 | pr_cont("Fam17h "); | ||
667 | /* | ||
668 | * In family 17h, there are no event constraints in the PMC hardware. | ||
669 | * We fallback to using default amd_get_event_constraints. | ||
670 | */ | ||
671 | break; | ||
666 | default: | 672 | default: |
667 | pr_err("core perfctr but no constraints; unknown hardware!\n"); | 673 | pr_err("core perfctr but no constraints; unknown hardware!\n"); |
668 | return -ENODEV; | 674 | return -ENODEV; |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d31735f37ed7..9d4bf3ab049e 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
@@ -2352,7 +2352,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent | |||
2352 | frame.next_frame = 0; | 2352 | frame.next_frame = 0; |
2353 | frame.return_address = 0; | 2353 | frame.return_address = 0; |
2354 | 2354 | ||
2355 | if (!access_ok(VERIFY_READ, fp, 8)) | 2355 | if (!valid_user_frame(fp, sizeof(frame))) |
2356 | break; | 2356 | break; |
2357 | 2357 | ||
2358 | bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); | 2358 | bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4); |
@@ -2362,9 +2362,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent | |||
2362 | if (bytes != 0) | 2362 | if (bytes != 0) |
2363 | break; | 2363 | break; |
2364 | 2364 | ||
2365 | if (!valid_user_frame(fp, sizeof(frame))) | ||
2366 | break; | ||
2367 | |||
2368 | perf_callchain_store(entry, cs_base + frame.return_address); | 2365 | perf_callchain_store(entry, cs_base + frame.return_address); |
2369 | fp = compat_ptr(ss_base + frame.next_frame); | 2366 | fp = compat_ptr(ss_base + frame.next_frame); |
2370 | } | 2367 | } |
@@ -2413,7 +2410,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs | |||
2413 | frame.next_frame = NULL; | 2410 | frame.next_frame = NULL; |
2414 | frame.return_address = 0; | 2411 | frame.return_address = 0; |
2415 | 2412 | ||
2416 | if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2)) | 2413 | if (!valid_user_frame(fp, sizeof(frame))) |
2417 | break; | 2414 | break; |
2418 | 2415 | ||
2419 | bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); | 2416 | bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp)); |
@@ -2423,9 +2420,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs | |||
2423 | if (bytes != 0) | 2420 | if (bytes != 0) |
2424 | break; | 2421 | break; |
2425 | 2422 | ||
2426 | if (!valid_user_frame(fp, sizeof(frame))) | ||
2427 | break; | ||
2428 | |||
2429 | perf_callchain_store(entry, frame.return_address); | 2423 | perf_callchain_store(entry, frame.return_address); |
2430 | fp = (void __user *)frame.next_frame; | 2424 | fp = (void __user *)frame.next_frame; |
2431 | } | 2425 | } |
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 0319311dbdbb..be202390bbd3 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c | |||
@@ -1108,20 +1108,20 @@ static void setup_pebs_sample_data(struct perf_event *event, | |||
1108 | } | 1108 | } |
1109 | 1109 | ||
1110 | /* | 1110 | /* |
1111 | * We use the interrupt regs as a base because the PEBS record | 1111 | * We use the interrupt regs as a base because the PEBS record does not |
1112 | * does not contain a full regs set, specifically it seems to | 1112 | * contain a full regs set, specifically it seems to lack segment |
1113 | * lack segment descriptors, which get used by things like | 1113 | * descriptors, which get used by things like user_mode(). |
1114 | * user_mode(). | ||
1115 | * | 1114 | * |
1116 | * In the simple case fix up only the IP and BP,SP regs, for | 1115 | * In the simple case fix up only the IP for PERF_SAMPLE_IP. |
1117 | * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly. | 1116 | * |
1118 | * A possible PERF_SAMPLE_REGS will have to transfer all regs. | 1117 | * We must however always use BP,SP from iregs for the unwinder to stay |
1118 | * sane; the record BP,SP can point into thin air when the record is | ||
1119 | * from a previous PMI context or an (I)RET happend between the record | ||
1120 | * and PMI. | ||
1119 | */ | 1121 | */ |
1120 | *regs = *iregs; | 1122 | *regs = *iregs; |
1121 | regs->flags = pebs->flags; | 1123 | regs->flags = pebs->flags; |
1122 | set_linear_ip(regs, pebs->ip); | 1124 | set_linear_ip(regs, pebs->ip); |
1123 | regs->bp = pebs->bp; | ||
1124 | regs->sp = pebs->sp; | ||
1125 | 1125 | ||
1126 | if (sample_type & PERF_SAMPLE_REGS_INTR) { | 1126 | if (sample_type & PERF_SAMPLE_REGS_INTR) { |
1127 | regs->ax = pebs->ax; | 1127 | regs->ax = pebs->ax; |
@@ -1130,10 +1130,21 @@ static void setup_pebs_sample_data(struct perf_event *event, | |||
1130 | regs->dx = pebs->dx; | 1130 | regs->dx = pebs->dx; |
1131 | regs->si = pebs->si; | 1131 | regs->si = pebs->si; |
1132 | regs->di = pebs->di; | 1132 | regs->di = pebs->di; |
1133 | regs->bp = pebs->bp; | ||
1134 | regs->sp = pebs->sp; | ||
1135 | 1133 | ||
1136 | regs->flags = pebs->flags; | 1134 | /* |
1135 | * Per the above; only set BP,SP if we don't need callchains. | ||
1136 | * | ||
1137 | * XXX: does this make sense? | ||
1138 | */ | ||
1139 | if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) { | ||
1140 | regs->bp = pebs->bp; | ||
1141 | regs->sp = pebs->sp; | ||
1142 | } | ||
1143 | |||
1144 | /* | ||
1145 | * Preserve PERF_EFLAGS_VM from set_linear_ip(). | ||
1146 | */ | ||
1147 | regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM); | ||
1137 | #ifndef CONFIG_X86_32 | 1148 | #ifndef CONFIG_X86_32 |
1138 | regs->r8 = pebs->r8; | 1149 | regs->r8 = pebs->r8; |
1139 | regs->r9 = pebs->r9; | 1150 | regs->r9 = pebs->r9; |
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index efca2685d876..dbaaf7dc8373 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c | |||
@@ -319,9 +319,9 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type, | |||
319 | */ | 319 | */ |
320 | static int uncore_pmu_event_init(struct perf_event *event); | 320 | static int uncore_pmu_event_init(struct perf_event *event); |
321 | 321 | ||
322 | static bool is_uncore_event(struct perf_event *event) | 322 | static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event) |
323 | { | 323 | { |
324 | return event->pmu->event_init == uncore_pmu_event_init; | 324 | return &box->pmu->pmu == event->pmu; |
325 | } | 325 | } |
326 | 326 | ||
327 | static int | 327 | static int |
@@ -340,7 +340,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, | |||
340 | 340 | ||
341 | n = box->n_events; | 341 | n = box->n_events; |
342 | 342 | ||
343 | if (is_uncore_event(leader)) { | 343 | if (is_box_event(box, leader)) { |
344 | box->event_list[n] = leader; | 344 | box->event_list[n] = leader; |
345 | n++; | 345 | n++; |
346 | } | 346 | } |
@@ -349,7 +349,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, | |||
349 | return n; | 349 | return n; |
350 | 350 | ||
351 | list_for_each_entry(event, &leader->sibling_list, group_entry) { | 351 | list_for_each_entry(event, &leader->sibling_list, group_entry) { |
352 | if (!is_uncore_event(event) || | 352 | if (!is_box_event(box, event) || |
353 | event->state <= PERF_EVENT_STATE_OFF) | 353 | event->state <= PERF_EVENT_STATE_OFF) |
354 | continue; | 354 | continue; |
355 | 355 | ||
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 5f845eef9a4d..a3dcc12bef4a 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c | |||
@@ -8,8 +8,12 @@ | |||
8 | #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 | 8 | #define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00 |
9 | #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 | 9 | #define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04 |
10 | #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 | 10 | #define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604 |
11 | #define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f | 11 | #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904 |
12 | #define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c | 12 | #define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c |
13 | #define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900 | ||
14 | #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 | ||
15 | #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f | ||
16 | #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f | ||
13 | 17 | ||
14 | /* SNB event control */ | 18 | /* SNB event control */ |
15 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff | 19 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff |
@@ -486,24 +490,12 @@ static int snb_uncore_imc_event_add(struct perf_event *event, int flags) | |||
486 | 490 | ||
487 | snb_uncore_imc_event_start(event, 0); | 491 | snb_uncore_imc_event_start(event, 0); |
488 | 492 | ||
489 | box->n_events++; | ||
490 | |||
491 | return 0; | 493 | return 0; |
492 | } | 494 | } |
493 | 495 | ||
494 | static void snb_uncore_imc_event_del(struct perf_event *event, int flags) | 496 | static void snb_uncore_imc_event_del(struct perf_event *event, int flags) |
495 | { | 497 | { |
496 | struct intel_uncore_box *box = uncore_event_to_box(event); | ||
497 | int i; | ||
498 | |||
499 | snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); | 498 | snb_uncore_imc_event_stop(event, PERF_EF_UPDATE); |
500 | |||
501 | for (i = 0; i < box->n_events; i++) { | ||
502 | if (event == box->event_list[i]) { | ||
503 | --box->n_events; | ||
504 | break; | ||
505 | } | ||
506 | } | ||
507 | } | 499 | } |
508 | 500 | ||
509 | int snb_pci2phy_map_init(int devid) | 501 | int snb_pci2phy_map_init(int devid) |
@@ -616,13 +608,29 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = { | |||
616 | 608 | ||
617 | static const struct pci_device_id skl_uncore_pci_ids[] = { | 609 | static const struct pci_device_id skl_uncore_pci_ids[] = { |
618 | { /* IMC */ | 610 | { /* IMC */ |
619 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC), | 611 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC), |
620 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | 612 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), |
621 | }, | 613 | }, |
622 | { /* IMC */ | 614 | { /* IMC */ |
623 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), | 615 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC), |
624 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | 616 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), |
625 | }, | 617 | }, |
618 | { /* IMC */ | ||
619 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC), | ||
620 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
621 | }, | ||
622 | { /* IMC */ | ||
623 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC), | ||
624 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
625 | }, | ||
626 | { /* IMC */ | ||
627 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC), | ||
628 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
629 | }, | ||
630 | { /* IMC */ | ||
631 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), | ||
632 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
633 | }, | ||
626 | 634 | ||
627 | { /* end: all zeroes */ }, | 635 | { /* end: all zeroes */ }, |
628 | }; | 636 | }; |
@@ -666,8 +674,12 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { | |||
666 | IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ | 674 | IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */ |
667 | IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ | 675 | IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */ |
668 | IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ | 676 | IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */ |
669 | IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */ | 677 | IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */ |
670 | IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ | 678 | IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */ |
679 | IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */ | ||
680 | IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ | ||
681 | IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ | ||
682 | IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ | ||
671 | { /* end marker */ } | 683 | { /* end marker */ } |
672 | }; | 684 | }; |
673 | 685 | ||
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 5874d8de1f8d..a77ee026643d 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h | |||
@@ -113,7 +113,7 @@ struct debug_store { | |||
113 | * Per register state. | 113 | * Per register state. |
114 | */ | 114 | */ |
115 | struct er_account { | 115 | struct er_account { |
116 | raw_spinlock_t lock; /* per-core: protect structure */ | 116 | raw_spinlock_t lock; /* per-core: protect structure */ |
117 | u64 config; /* extra MSR config */ | 117 | u64 config; /* extra MSR config */ |
118 | u64 reg; /* extra MSR number */ | 118 | u64 reg; /* extra MSR number */ |
119 | atomic_t ref; /* reference count */ | 119 | atomic_t ref; /* reference count */ |
diff --git a/arch/x86/include/asm/intel-mid.h b/arch/x86/include/asm/intel-mid.h index 5b6753d1f7f4..49da9f497b90 100644 --- a/arch/x86/include/asm/intel-mid.h +++ b/arch/x86/include/asm/intel-mid.h | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | extern int intel_mid_pci_init(void); | 18 | extern int intel_mid_pci_init(void); |
19 | extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state); | 19 | extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state); |
20 | extern pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev); | ||
20 | 21 | ||
21 | extern void intel_mid_pwr_power_off(void); | 22 | extern void intel_mid_pwr_power_off(void); |
22 | 23 | ||
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index b81fe2d63e15..1e81a37c034e 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) | |||
347 | #ifdef CONFIG_SMP | 347 | #ifdef CONFIG_SMP |
348 | unsigned bits; | 348 | unsigned bits; |
349 | int cpu = smp_processor_id(); | 349 | int cpu = smp_processor_id(); |
350 | unsigned int socket_id, core_complex_id; | ||
351 | 350 | ||
352 | bits = c->x86_coreid_bits; | 351 | bits = c->x86_coreid_bits; |
353 | /* Low order bits define the core id (index of core in socket) */ | 352 | /* Low order bits define the core id (index of core in socket) */ |
@@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c) | |||
365 | if (c->x86 != 0x17 || !cpuid_edx(0x80000006)) | 364 | if (c->x86 != 0x17 || !cpuid_edx(0x80000006)) |
366 | return; | 365 | return; |
367 | 366 | ||
368 | socket_id = (c->apicid >> bits) - 1; | 367 | per_cpu(cpu_llc_id, cpu) = c->apicid >> 3; |
369 | core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3; | ||
370 | |||
371 | per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id; | ||
372 | #endif | 368 | #endif |
373 | } | 369 | } |
374 | 370 | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 9bd910a7dd0a..cc9e980c68ec 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -979,6 +979,35 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c) | |||
979 | } | 979 | } |
980 | 980 | ||
981 | /* | 981 | /* |
982 | * The physical to logical package id mapping is initialized from the | ||
983 | * acpi/mptables information. Make sure that CPUID actually agrees with | ||
984 | * that. | ||
985 | */ | ||
986 | static void sanitize_package_id(struct cpuinfo_x86 *c) | ||
987 | { | ||
988 | #ifdef CONFIG_SMP | ||
989 | unsigned int pkg, apicid, cpu = smp_processor_id(); | ||
990 | |||
991 | apicid = apic->cpu_present_to_apicid(cpu); | ||
992 | pkg = apicid >> boot_cpu_data.x86_coreid_bits; | ||
993 | |||
994 | if (apicid != c->initial_apicid) { | ||
995 | pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n", | ||
996 | cpu, apicid, c->initial_apicid); | ||
997 | c->initial_apicid = apicid; | ||
998 | } | ||
999 | if (pkg != c->phys_proc_id) { | ||
1000 | pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n", | ||
1001 | cpu, pkg, c->phys_proc_id); | ||
1002 | c->phys_proc_id = pkg; | ||
1003 | } | ||
1004 | c->logical_proc_id = topology_phys_to_logical_pkg(pkg); | ||
1005 | #else | ||
1006 | c->logical_proc_id = 0; | ||
1007 | #endif | ||
1008 | } | ||
1009 | |||
1010 | /* | ||
982 | * This does the hard work of actually picking apart the CPU stuff... | 1011 | * This does the hard work of actually picking apart the CPU stuff... |
983 | */ | 1012 | */ |
984 | static void identify_cpu(struct cpuinfo_x86 *c) | 1013 | static void identify_cpu(struct cpuinfo_x86 *c) |
@@ -1103,8 +1132,7 @@ static void identify_cpu(struct cpuinfo_x86 *c) | |||
1103 | #ifdef CONFIG_NUMA | 1132 | #ifdef CONFIG_NUMA |
1104 | numa_add_cpu(smp_processor_id()); | 1133 | numa_add_cpu(smp_processor_id()); |
1105 | #endif | 1134 | #endif |
1106 | /* The boot/hotplug time assigment got cleared, restore it */ | 1135 | sanitize_package_id(c); |
1107 | c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id); | ||
1108 | } | 1136 | } |
1109 | 1137 | ||
1110 | /* | 1138 | /* |
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index 9b7cf5c28f5f..85f854b98a9d 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c | |||
@@ -112,7 +112,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, | |||
112 | for (; stack < stack_info.end; stack++) { | 112 | for (; stack < stack_info.end; stack++) { |
113 | unsigned long real_addr; | 113 | unsigned long real_addr; |
114 | int reliable = 0; | 114 | int reliable = 0; |
115 | unsigned long addr = *stack; | 115 | unsigned long addr = READ_ONCE_NOCHECK(*stack); |
116 | unsigned long *ret_addr_p = | 116 | unsigned long *ret_addr_p = |
117 | unwind_get_return_address_ptr(&state); | 117 | unwind_get_return_address_ptr(&state); |
118 | 118 | ||
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 47004010ad5d..ebb4e95fbd74 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c | |||
@@ -521,14 +521,14 @@ void fpu__clear(struct fpu *fpu) | |||
521 | { | 521 | { |
522 | WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */ | 522 | WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */ |
523 | 523 | ||
524 | if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) { | 524 | fpu__drop(fpu); |
525 | /* FPU state will be reallocated lazily at the first use. */ | 525 | |
526 | fpu__drop(fpu); | 526 | /* |
527 | } else { | 527 | * Make sure fpstate is cleared and initialized. |
528 | if (!fpu->fpstate_active) { | 528 | */ |
529 | fpu__activate_curr(fpu); | 529 | if (static_cpu_has(X86_FEATURE_FPU)) { |
530 | user_fpu_begin(); | 530 | fpu__activate_curr(fpu); |
531 | } | 531 | user_fpu_begin(); |
532 | copy_init_fpstate_to_fpregs(); | 532 | copy_init_fpstate_to_fpregs(); |
533 | } | 533 | } |
534 | } | 534 | } |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index b6b2f0264af3..2dabea46f039 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -665,14 +665,17 @@ __PAGE_ALIGNED_BSS | |||
665 | initial_pg_pmd: | 665 | initial_pg_pmd: |
666 | .fill 1024*KPMDS,4,0 | 666 | .fill 1024*KPMDS,4,0 |
667 | #else | 667 | #else |
668 | ENTRY(initial_page_table) | 668 | .globl initial_page_table |
669 | initial_page_table: | ||
669 | .fill 1024,4,0 | 670 | .fill 1024,4,0 |
670 | #endif | 671 | #endif |
671 | initial_pg_fixmap: | 672 | initial_pg_fixmap: |
672 | .fill 1024,4,0 | 673 | .fill 1024,4,0 |
673 | ENTRY(empty_zero_page) | 674 | .globl empty_zero_page |
675 | empty_zero_page: | ||
674 | .fill 4096,1,0 | 676 | .fill 4096,1,0 |
675 | ENTRY(swapper_pg_dir) | 677 | .globl swapper_pg_dir |
678 | swapper_pg_dir: | ||
676 | .fill 1024,4,0 | 679 | .fill 1024,4,0 |
677 | EXPORT_SYMBOL(empty_zero_page) | 680 | EXPORT_SYMBOL(empty_zero_page) |
678 | 681 | ||
diff --git a/arch/x86/kernel/sysfb_simplefb.c b/arch/x86/kernel/sysfb_simplefb.c index 764a29f84de7..85195d447a92 100644 --- a/arch/x86/kernel/sysfb_simplefb.c +++ b/arch/x86/kernel/sysfb_simplefb.c | |||
@@ -66,13 +66,36 @@ __init int create_simplefb(const struct screen_info *si, | |||
66 | { | 66 | { |
67 | struct platform_device *pd; | 67 | struct platform_device *pd; |
68 | struct resource res; | 68 | struct resource res; |
69 | unsigned long len; | 69 | u64 base, size; |
70 | u32 length; | ||
70 | 71 | ||
71 | /* don't use lfb_size as it may contain the whole VMEM instead of only | 72 | /* |
72 | * the part that is occupied by the framebuffer */ | 73 | * If the 64BIT_BASE capability is set, ext_lfb_base will contain the |
73 | len = mode->height * mode->stride; | 74 | * upper half of the base address. Assemble the address, then make sure |
74 | len = PAGE_ALIGN(len); | 75 | * it is valid and we can actually access it. |
75 | if (len > (u64)si->lfb_size << 16) { | 76 | */ |
77 | base = si->lfb_base; | ||
78 | if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE) | ||
79 | base |= (u64)si->ext_lfb_base << 32; | ||
80 | if (!base || (u64)(resource_size_t)base != base) { | ||
81 | printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n"); | ||
82 | return -EINVAL; | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Don't use lfb_size as IORESOURCE size, since it may contain the | ||
87 | * entire VMEM, and thus require huge mappings. Use just the part we | ||
88 | * need, that is, the part where the framebuffer is located. But verify | ||
89 | * that it does not exceed the advertised VMEM. | ||
90 | * Note that in case of VBE, the lfb_size is shifted by 16 bits for | ||
91 | * historical reasons. | ||
92 | */ | ||
93 | size = si->lfb_size; | ||
94 | if (si->orig_video_isVGA == VIDEO_TYPE_VLFB) | ||
95 | size <<= 16; | ||
96 | length = mode->height * mode->stride; | ||
97 | length = PAGE_ALIGN(length); | ||
98 | if (length > size) { | ||
76 | printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); | 99 | printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n"); |
77 | return -EINVAL; | 100 | return -EINVAL; |
78 | } | 101 | } |
@@ -81,8 +104,8 @@ __init int create_simplefb(const struct screen_info *si, | |||
81 | memset(&res, 0, sizeof(res)); | 104 | memset(&res, 0, sizeof(res)); |
82 | res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 105 | res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
83 | res.name = simplefb_resname; | 106 | res.name = simplefb_resname; |
84 | res.start = si->lfb_base; | 107 | res.start = base; |
85 | res.end = si->lfb_base + len - 1; | 108 | res.end = res.start + length - 1; |
86 | if (res.end <= res.start) | 109 | if (res.end <= res.start) |
87 | return -EINVAL; | 110 | return -EINVAL; |
88 | 111 | ||
diff --git a/arch/x86/kernel/unwind_guess.c b/arch/x86/kernel/unwind_guess.c index 2d721e533cf4..b80e8bf43cc6 100644 --- a/arch/x86/kernel/unwind_guess.c +++ b/arch/x86/kernel/unwind_guess.c | |||
@@ -7,11 +7,13 @@ | |||
7 | 7 | ||
8 | unsigned long unwind_get_return_address(struct unwind_state *state) | 8 | unsigned long unwind_get_return_address(struct unwind_state *state) |
9 | { | 9 | { |
10 | unsigned long addr = READ_ONCE_NOCHECK(*state->sp); | ||
11 | |||
10 | if (unwind_done(state)) | 12 | if (unwind_done(state)) |
11 | return 0; | 13 | return 0; |
12 | 14 | ||
13 | return ftrace_graph_ret_addr(state->task, &state->graph_idx, | 15 | return ftrace_graph_ret_addr(state->task, &state->graph_idx, |
14 | *state->sp, state->sp); | 16 | addr, state->sp); |
15 | } | 17 | } |
16 | EXPORT_SYMBOL_GPL(unwind_get_return_address); | 18 | EXPORT_SYMBOL_GPL(unwind_get_return_address); |
17 | 19 | ||
@@ -23,8 +25,10 @@ bool unwind_next_frame(struct unwind_state *state) | |||
23 | return false; | 25 | return false; |
24 | 26 | ||
25 | do { | 27 | do { |
28 | unsigned long addr = READ_ONCE_NOCHECK(*state->sp); | ||
29 | |||
26 | for (state->sp++; state->sp < info->end; state->sp++) | 30 | for (state->sp++; state->sp < info->end; state->sp++) |
27 | if (__kernel_text_address(*state->sp)) | 31 | if (__kernel_text_address(addr)) |
28 | return true; | 32 | return true; |
29 | 33 | ||
30 | state->sp = info->next_sp; | 34 | state->sp = info->next_sp; |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index cbd7b92585bb..a3ce9d260d68 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -2105,16 +2105,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt) | |||
2105 | static int em_jmp_far(struct x86_emulate_ctxt *ctxt) | 2105 | static int em_jmp_far(struct x86_emulate_ctxt *ctxt) |
2106 | { | 2106 | { |
2107 | int rc; | 2107 | int rc; |
2108 | unsigned short sel, old_sel; | 2108 | unsigned short sel; |
2109 | struct desc_struct old_desc, new_desc; | 2109 | struct desc_struct new_desc; |
2110 | const struct x86_emulate_ops *ops = ctxt->ops; | ||
2111 | u8 cpl = ctxt->ops->cpl(ctxt); | 2110 | u8 cpl = ctxt->ops->cpl(ctxt); |
2112 | 2111 | ||
2113 | /* Assignment of RIP may only fail in 64-bit mode */ | ||
2114 | if (ctxt->mode == X86EMUL_MODE_PROT64) | ||
2115 | ops->get_segment(ctxt, &old_sel, &old_desc, NULL, | ||
2116 | VCPU_SREG_CS); | ||
2117 | |||
2118 | memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); | 2112 | memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); |
2119 | 2113 | ||
2120 | rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, | 2114 | rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, |
@@ -2124,12 +2118,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) | |||
2124 | return rc; | 2118 | return rc; |
2125 | 2119 | ||
2126 | rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); | 2120 | rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc); |
2127 | if (rc != X86EMUL_CONTINUE) { | 2121 | /* Error handling is not implemented. */ |
2128 | WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); | 2122 | if (rc != X86EMUL_CONTINUE) |
2129 | /* assigning eip failed; restore the old cs */ | 2123 | return X86EMUL_UNHANDLEABLE; |
2130 | ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); | 2124 | |
2131 | return rc; | ||
2132 | } | ||
2133 | return rc; | 2125 | return rc; |
2134 | } | 2126 | } |
2135 | 2127 | ||
@@ -2189,14 +2181,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) | |||
2189 | { | 2181 | { |
2190 | int rc; | 2182 | int rc; |
2191 | unsigned long eip, cs; | 2183 | unsigned long eip, cs; |
2192 | u16 old_cs; | ||
2193 | int cpl = ctxt->ops->cpl(ctxt); | 2184 | int cpl = ctxt->ops->cpl(ctxt); |
2194 | struct desc_struct old_desc, new_desc; | 2185 | struct desc_struct new_desc; |
2195 | const struct x86_emulate_ops *ops = ctxt->ops; | ||
2196 | |||
2197 | if (ctxt->mode == X86EMUL_MODE_PROT64) | ||
2198 | ops->get_segment(ctxt, &old_cs, &old_desc, NULL, | ||
2199 | VCPU_SREG_CS); | ||
2200 | 2186 | ||
2201 | rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); | 2187 | rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); |
2202 | if (rc != X86EMUL_CONTINUE) | 2188 | if (rc != X86EMUL_CONTINUE) |
@@ -2213,10 +2199,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) | |||
2213 | if (rc != X86EMUL_CONTINUE) | 2199 | if (rc != X86EMUL_CONTINUE) |
2214 | return rc; | 2200 | return rc; |
2215 | rc = assign_eip_far(ctxt, eip, &new_desc); | 2201 | rc = assign_eip_far(ctxt, eip, &new_desc); |
2216 | if (rc != X86EMUL_CONTINUE) { | 2202 | /* Error handling is not implemented. */ |
2217 | WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); | 2203 | if (rc != X86EMUL_CONTINUE) |
2218 | ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); | 2204 | return X86EMUL_UNHANDLEABLE; |
2219 | } | 2205 | |
2220 | return rc; | 2206 | return rc; |
2221 | } | 2207 | } |
2222 | 2208 | ||
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 1a22de70f7f7..6e219e5c07d2 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c | |||
@@ -94,7 +94,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, | |||
94 | static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) | 94 | static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic) |
95 | { | 95 | { |
96 | ioapic->rtc_status.pending_eoi = 0; | 96 | ioapic->rtc_status.pending_eoi = 0; |
97 | bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS); | 97 | bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID); |
98 | } | 98 | } |
99 | 99 | ||
100 | static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); | 100 | static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic); |
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h index 7d2692a49657..1cc6e54436db 100644 --- a/arch/x86/kvm/ioapic.h +++ b/arch/x86/kvm/ioapic.h | |||
@@ -42,13 +42,13 @@ struct kvm_vcpu; | |||
42 | 42 | ||
43 | struct dest_map { | 43 | struct dest_map { |
44 | /* vcpu bitmap where IRQ has been sent */ | 44 | /* vcpu bitmap where IRQ has been sent */ |
45 | DECLARE_BITMAP(map, KVM_MAX_VCPUS); | 45 | DECLARE_BITMAP(map, KVM_MAX_VCPU_ID); |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Vector sent to a given vcpu, only valid when | 48 | * Vector sent to a given vcpu, only valid when |
49 | * the vcpu's bit in map is set | 49 | * the vcpu's bit in map is set |
50 | */ | 50 | */ |
51 | u8 vectors[KVM_MAX_VCPUS]; | 51 | u8 vectors[KVM_MAX_VCPU_ID]; |
52 | }; | 52 | }; |
53 | 53 | ||
54 | 54 | ||
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c index 25810b144b58..6c0191615f23 100644 --- a/arch/x86/kvm/irq_comm.c +++ b/arch/x86/kvm/irq_comm.c | |||
@@ -41,6 +41,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e, | |||
41 | bool line_status) | 41 | bool line_status) |
42 | { | 42 | { |
43 | struct kvm_pic *pic = pic_irqchip(kvm); | 43 | struct kvm_pic *pic = pic_irqchip(kvm); |
44 | |||
45 | /* | ||
46 | * XXX: rejecting pic routes when pic isn't in use would be better, | ||
47 | * but the default routing table is installed while kvm->arch.vpic is | ||
48 | * NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE. | ||
49 | */ | ||
50 | if (!pic) | ||
51 | return -1; | ||
52 | |||
44 | return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); | 53 | return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level); |
45 | } | 54 | } |
46 | 55 | ||
@@ -49,6 +58,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e, | |||
49 | bool line_status) | 58 | bool line_status) |
50 | { | 59 | { |
51 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; | 60 | struct kvm_ioapic *ioapic = kvm->arch.vioapic; |
61 | |||
62 | if (!ioapic) | ||
63 | return -1; | ||
64 | |||
52 | return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, | 65 | return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level, |
53 | line_status); | 66 | line_status); |
54 | } | 67 | } |
@@ -156,6 +169,16 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, | |||
156 | } | 169 | } |
157 | 170 | ||
158 | 171 | ||
172 | static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e, | ||
173 | struct kvm *kvm, int irq_source_id, int level, | ||
174 | bool line_status) | ||
175 | { | ||
176 | if (!level) | ||
177 | return -1; | ||
178 | |||
179 | return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint); | ||
180 | } | ||
181 | |||
159 | int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, | 182 | int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, |
160 | struct kvm *kvm, int irq_source_id, int level, | 183 | struct kvm *kvm, int irq_source_id, int level, |
161 | bool line_status) | 184 | bool line_status) |
@@ -163,18 +186,26 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, | |||
163 | struct kvm_lapic_irq irq; | 186 | struct kvm_lapic_irq irq; |
164 | int r; | 187 | int r; |
165 | 188 | ||
166 | if (unlikely(e->type != KVM_IRQ_ROUTING_MSI)) | 189 | switch (e->type) { |
167 | return -EWOULDBLOCK; | 190 | case KVM_IRQ_ROUTING_HV_SINT: |
191 | return kvm_hv_set_sint(e, kvm, irq_source_id, level, | ||
192 | line_status); | ||
168 | 193 | ||
169 | if (kvm_msi_route_invalid(kvm, e)) | 194 | case KVM_IRQ_ROUTING_MSI: |
170 | return -EINVAL; | 195 | if (kvm_msi_route_invalid(kvm, e)) |
196 | return -EINVAL; | ||
171 | 197 | ||
172 | kvm_set_msi_irq(kvm, e, &irq); | 198 | kvm_set_msi_irq(kvm, e, &irq); |
173 | 199 | ||
174 | if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) | 200 | if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL)) |
175 | return r; | 201 | return r; |
176 | else | 202 | break; |
177 | return -EWOULDBLOCK; | 203 | |
204 | default: | ||
205 | break; | ||
206 | } | ||
207 | |||
208 | return -EWOULDBLOCK; | ||
178 | } | 209 | } |
179 | 210 | ||
180 | int kvm_request_irq_source_id(struct kvm *kvm) | 211 | int kvm_request_irq_source_id(struct kvm *kvm) |
@@ -254,16 +285,6 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin, | |||
254 | srcu_read_unlock(&kvm->irq_srcu, idx); | 285 | srcu_read_unlock(&kvm->irq_srcu, idx); |
255 | } | 286 | } |
256 | 287 | ||
257 | static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e, | ||
258 | struct kvm *kvm, int irq_source_id, int level, | ||
259 | bool line_status) | ||
260 | { | ||
261 | if (!level) | ||
262 | return -1; | ||
263 | |||
264 | return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint); | ||
265 | } | ||
266 | |||
267 | int kvm_set_routing_entry(struct kvm *kvm, | 288 | int kvm_set_routing_entry(struct kvm *kvm, |
268 | struct kvm_kernel_irq_routing_entry *e, | 289 | struct kvm_kernel_irq_routing_entry *e, |
269 | const struct kvm_irq_routing_entry *ue) | 290 | const struct kvm_irq_routing_entry *ue) |
@@ -423,18 +444,6 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, | |||
423 | srcu_read_unlock(&kvm->irq_srcu, idx); | 444 | srcu_read_unlock(&kvm->irq_srcu, idx); |
424 | } | 445 | } |
425 | 446 | ||
426 | int kvm_arch_set_irq(struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm, | ||
427 | int irq_source_id, int level, bool line_status) | ||
428 | { | ||
429 | switch (irq->type) { | ||
430 | case KVM_IRQ_ROUTING_HV_SINT: | ||
431 | return kvm_hv_set_sint(irq, kvm, irq_source_id, level, | ||
432 | line_status); | ||
433 | default: | ||
434 | return -EWOULDBLOCK; | ||
435 | } | ||
436 | } | ||
437 | |||
438 | void kvm_arch_irq_routing_update(struct kvm *kvm) | 447 | void kvm_arch_irq_routing_update(struct kvm *kvm) |
439 | { | 448 | { |
440 | kvm_hv_irq_routing_update(kvm); | 449 | kvm_hv_irq_routing_update(kvm); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 23b99f305382..6f69340f9fa3 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -138,7 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map, | |||
138 | *mask = dest_id & 0xff; | 138 | *mask = dest_id & 0xff; |
139 | return true; | 139 | return true; |
140 | case KVM_APIC_MODE_XAPIC_CLUSTER: | 140 | case KVM_APIC_MODE_XAPIC_CLUSTER: |
141 | *cluster = map->xapic_cluster_map[dest_id >> 4]; | 141 | *cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf]; |
142 | *mask = dest_id & 0xf; | 142 | *mask = dest_id & 0xf; |
143 | return true; | 143 | return true; |
144 | default: | 144 | default: |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3017de0431bd..04c5d96b1d67 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn) | |||
210 | struct kvm_shared_msrs *locals | 210 | struct kvm_shared_msrs *locals |
211 | = container_of(urn, struct kvm_shared_msrs, urn); | 211 | = container_of(urn, struct kvm_shared_msrs, urn); |
212 | struct kvm_shared_msr_values *values; | 212 | struct kvm_shared_msr_values *values; |
213 | unsigned long flags; | ||
213 | 214 | ||
215 | /* | ||
216 | * Disabling irqs at this point since the following code could be | ||
217 | * interrupted and executed through kvm_arch_hardware_disable() | ||
218 | */ | ||
219 | local_irq_save(flags); | ||
220 | if (locals->registered) { | ||
221 | locals->registered = false; | ||
222 | user_return_notifier_unregister(urn); | ||
223 | } | ||
224 | local_irq_restore(flags); | ||
214 | for (slot = 0; slot < shared_msrs_global.nr; ++slot) { | 225 | for (slot = 0; slot < shared_msrs_global.nr; ++slot) { |
215 | values = &locals->values[slot]; | 226 | values = &locals->values[slot]; |
216 | if (values->host != values->curr) { | 227 | if (values->host != values->curr) { |
@@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn) | |||
218 | values->curr = values->host; | 229 | values->curr = values->host; |
219 | } | 230 | } |
220 | } | 231 | } |
221 | locals->registered = false; | ||
222 | user_return_notifier_unregister(urn); | ||
223 | } | 232 | } |
224 | 233 | ||
225 | static void shared_msr_update(unsigned slot, u32 msr) | 234 | static void shared_msr_update(unsigned slot, u32 msr) |
@@ -1724,18 +1733,23 @@ static void kvm_gen_update_masterclock(struct kvm *kvm) | |||
1724 | 1733 | ||
1725 | static u64 __get_kvmclock_ns(struct kvm *kvm) | 1734 | static u64 __get_kvmclock_ns(struct kvm *kvm) |
1726 | { | 1735 | { |
1727 | struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0); | ||
1728 | struct kvm_arch *ka = &kvm->arch; | 1736 | struct kvm_arch *ka = &kvm->arch; |
1729 | s64 ns; | 1737 | struct pvclock_vcpu_time_info hv_clock; |
1730 | 1738 | ||
1731 | if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) { | 1739 | spin_lock(&ka->pvclock_gtod_sync_lock); |
1732 | u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc()); | 1740 | if (!ka->use_master_clock) { |
1733 | ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc); | 1741 | spin_unlock(&ka->pvclock_gtod_sync_lock); |
1734 | } else { | 1742 | return ktime_get_boot_ns() + ka->kvmclock_offset; |
1735 | ns = ktime_get_boot_ns() + ka->kvmclock_offset; | ||
1736 | } | 1743 | } |
1737 | 1744 | ||
1738 | return ns; | 1745 | hv_clock.tsc_timestamp = ka->master_cycle_now; |
1746 | hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset; | ||
1747 | spin_unlock(&ka->pvclock_gtod_sync_lock); | ||
1748 | |||
1749 | kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL, | ||
1750 | &hv_clock.tsc_shift, | ||
1751 | &hv_clock.tsc_to_system_mul); | ||
1752 | return __pvclock_read_cycles(&hv_clock, rdtsc()); | ||
1739 | } | 1753 | } |
1740 | 1754 | ||
1741 | u64 get_kvmclock_ns(struct kvm *kvm) | 1755 | u64 get_kvmclock_ns(struct kvm *kvm) |
@@ -2596,7 +2610,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
2596 | case KVM_CAP_PIT_STATE2: | 2610 | case KVM_CAP_PIT_STATE2: |
2597 | case KVM_CAP_SET_IDENTITY_MAP_ADDR: | 2611 | case KVM_CAP_SET_IDENTITY_MAP_ADDR: |
2598 | case KVM_CAP_XEN_HVM: | 2612 | case KVM_CAP_XEN_HVM: |
2599 | case KVM_CAP_ADJUST_CLOCK: | ||
2600 | case KVM_CAP_VCPU_EVENTS: | 2613 | case KVM_CAP_VCPU_EVENTS: |
2601 | case KVM_CAP_HYPERV: | 2614 | case KVM_CAP_HYPERV: |
2602 | case KVM_CAP_HYPERV_VAPIC: | 2615 | case KVM_CAP_HYPERV_VAPIC: |
@@ -2623,6 +2636,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
2623 | #endif | 2636 | #endif |
2624 | r = 1; | 2637 | r = 1; |
2625 | break; | 2638 | break; |
2639 | case KVM_CAP_ADJUST_CLOCK: | ||
2640 | r = KVM_CLOCK_TSC_STABLE; | ||
2641 | break; | ||
2626 | case KVM_CAP_X86_SMM: | 2642 | case KVM_CAP_X86_SMM: |
2627 | /* SMBASE is usually relocated above 1M on modern chipsets, | 2643 | /* SMBASE is usually relocated above 1M on modern chipsets, |
2628 | * and SMM handlers might indeed rely on 4G segment limits, | 2644 | * and SMM handlers might indeed rely on 4G segment limits, |
@@ -3415,6 +3431,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
3415 | }; | 3431 | }; |
3416 | case KVM_SET_VAPIC_ADDR: { | 3432 | case KVM_SET_VAPIC_ADDR: { |
3417 | struct kvm_vapic_addr va; | 3433 | struct kvm_vapic_addr va; |
3434 | int idx; | ||
3418 | 3435 | ||
3419 | r = -EINVAL; | 3436 | r = -EINVAL; |
3420 | if (!lapic_in_kernel(vcpu)) | 3437 | if (!lapic_in_kernel(vcpu)) |
@@ -3422,7 +3439,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp, | |||
3422 | r = -EFAULT; | 3439 | r = -EFAULT; |
3423 | if (copy_from_user(&va, argp, sizeof va)) | 3440 | if (copy_from_user(&va, argp, sizeof va)) |
3424 | goto out; | 3441 | goto out; |
3442 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
3425 | r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); | 3443 | r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); |
3444 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
3426 | break; | 3445 | break; |
3427 | } | 3446 | } |
3428 | case KVM_X86_SETUP_MCE: { | 3447 | case KVM_X86_SETUP_MCE: { |
@@ -4103,9 +4122,11 @@ long kvm_arch_vm_ioctl(struct file *filp, | |||
4103 | struct kvm_clock_data user_ns; | 4122 | struct kvm_clock_data user_ns; |
4104 | u64 now_ns; | 4123 | u64 now_ns; |
4105 | 4124 | ||
4106 | now_ns = get_kvmclock_ns(kvm); | 4125 | local_irq_disable(); |
4126 | now_ns = __get_kvmclock_ns(kvm); | ||
4107 | user_ns.clock = now_ns; | 4127 | user_ns.clock = now_ns; |
4108 | user_ns.flags = 0; | 4128 | user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0; |
4129 | local_irq_enable(); | ||
4109 | memset(&user_ns.pad, 0, sizeof(user_ns.pad)); | 4130 | memset(&user_ns.pad, 0, sizeof(user_ns.pad)); |
4110 | 4131 | ||
4111 | r = -EFAULT; | 4132 | r = -EFAULT; |
diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c index 79ae939970d3..fcd06f7526de 100644 --- a/arch/x86/mm/extable.c +++ b/arch/x86/mm/extable.c | |||
@@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr) | |||
135 | if (early_recursion_flag > 2) | 135 | if (early_recursion_flag > 2) |
136 | goto halt_loop; | 136 | goto halt_loop; |
137 | 137 | ||
138 | if (regs->cs != __KERNEL_CS) | 138 | /* |
139 | * Old CPUs leave the high bits of CS on the stack | ||
140 | * undefined. I'm not sure which CPUs do this, but at least | ||
141 | * the 486 DX works this way. | ||
142 | */ | ||
143 | if ((regs->cs & 0xFFFF) != __KERNEL_CS) | ||
139 | goto fail; | 144 | goto fail; |
140 | 145 | ||
141 | /* | 146 | /* |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index bf99aa7005eb..936a488d6cf6 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -861,7 +861,7 @@ static void __init __efi_enter_virtual_mode(void) | |||
861 | int count = 0, pg_shift = 0; | 861 | int count = 0, pg_shift = 0; |
862 | void *new_memmap = NULL; | 862 | void *new_memmap = NULL; |
863 | efi_status_t status; | 863 | efi_status_t status; |
864 | phys_addr_t pa; | 864 | unsigned long pa; |
865 | 865 | ||
866 | efi.systab = NULL; | 866 | efi.systab = NULL; |
867 | 867 | ||
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 58b0f801f66f..319148bd4b05 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/io.h> | 31 | #include <linux/io.h> |
32 | #include <linux/reboot.h> | 32 | #include <linux/reboot.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/ucs2_string.h> | ||
34 | 35 | ||
35 | #include <asm/setup.h> | 36 | #include <asm/setup.h> |
36 | #include <asm/page.h> | 37 | #include <asm/page.h> |
@@ -211,6 +212,35 @@ void efi_sync_low_kernel_mappings(void) | |||
211 | memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); | 212 | memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries); |
212 | } | 213 | } |
213 | 214 | ||
215 | /* | ||
216 | * Wrapper for slow_virt_to_phys() that handles NULL addresses. | ||
217 | */ | ||
218 | static inline phys_addr_t | ||
219 | virt_to_phys_or_null_size(void *va, unsigned long size) | ||
220 | { | ||
221 | bool bad_size; | ||
222 | |||
223 | if (!va) | ||
224 | return 0; | ||
225 | |||
226 | if (virt_addr_valid(va)) | ||
227 | return virt_to_phys(va); | ||
228 | |||
229 | /* | ||
230 | * A fully aligned variable on the stack is guaranteed not to | ||
231 | * cross a page bounary. Try to catch strings on the stack by | ||
232 | * checking that 'size' is a power of two. | ||
233 | */ | ||
234 | bad_size = size > PAGE_SIZE || !is_power_of_2(size); | ||
235 | |||
236 | WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size); | ||
237 | |||
238 | return slow_virt_to_phys(va); | ||
239 | } | ||
240 | |||
241 | #define virt_to_phys_or_null(addr) \ | ||
242 | virt_to_phys_or_null_size((addr), sizeof(*(addr))) | ||
243 | |||
214 | int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) | 244 | int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) |
215 | { | 245 | { |
216 | unsigned long pfn, text; | 246 | unsigned long pfn, text; |
@@ -494,8 +524,8 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc) | |||
494 | 524 | ||
495 | spin_lock(&rtc_lock); | 525 | spin_lock(&rtc_lock); |
496 | 526 | ||
497 | phys_tm = virt_to_phys(tm); | 527 | phys_tm = virt_to_phys_or_null(tm); |
498 | phys_tc = virt_to_phys(tc); | 528 | phys_tc = virt_to_phys_or_null(tc); |
499 | 529 | ||
500 | status = efi_thunk(get_time, phys_tm, phys_tc); | 530 | status = efi_thunk(get_time, phys_tm, phys_tc); |
501 | 531 | ||
@@ -511,7 +541,7 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm) | |||
511 | 541 | ||
512 | spin_lock(&rtc_lock); | 542 | spin_lock(&rtc_lock); |
513 | 543 | ||
514 | phys_tm = virt_to_phys(tm); | 544 | phys_tm = virt_to_phys_or_null(tm); |
515 | 545 | ||
516 | status = efi_thunk(set_time, phys_tm); | 546 | status = efi_thunk(set_time, phys_tm); |
517 | 547 | ||
@@ -529,9 +559,9 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending, | |||
529 | 559 | ||
530 | spin_lock(&rtc_lock); | 560 | spin_lock(&rtc_lock); |
531 | 561 | ||
532 | phys_enabled = virt_to_phys(enabled); | 562 | phys_enabled = virt_to_phys_or_null(enabled); |
533 | phys_pending = virt_to_phys(pending); | 563 | phys_pending = virt_to_phys_or_null(pending); |
534 | phys_tm = virt_to_phys(tm); | 564 | phys_tm = virt_to_phys_or_null(tm); |
535 | 565 | ||
536 | status = efi_thunk(get_wakeup_time, phys_enabled, | 566 | status = efi_thunk(get_wakeup_time, phys_enabled, |
537 | phys_pending, phys_tm); | 567 | phys_pending, phys_tm); |
@@ -549,7 +579,7 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) | |||
549 | 579 | ||
550 | spin_lock(&rtc_lock); | 580 | spin_lock(&rtc_lock); |
551 | 581 | ||
552 | phys_tm = virt_to_phys(tm); | 582 | phys_tm = virt_to_phys_or_null(tm); |
553 | 583 | ||
554 | status = efi_thunk(set_wakeup_time, enabled, phys_tm); | 584 | status = efi_thunk(set_wakeup_time, enabled, phys_tm); |
555 | 585 | ||
@@ -558,6 +588,10 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) | |||
558 | return status; | 588 | return status; |
559 | } | 589 | } |
560 | 590 | ||
591 | static unsigned long efi_name_size(efi_char16_t *name) | ||
592 | { | ||
593 | return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1; | ||
594 | } | ||
561 | 595 | ||
562 | static efi_status_t | 596 | static efi_status_t |
563 | efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, | 597 | efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, |
@@ -567,11 +601,11 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor, | |||
567 | u32 phys_name, phys_vendor, phys_attr; | 601 | u32 phys_name, phys_vendor, phys_attr; |
568 | u32 phys_data_size, phys_data; | 602 | u32 phys_data_size, phys_data; |
569 | 603 | ||
570 | phys_data_size = virt_to_phys(data_size); | 604 | phys_data_size = virt_to_phys_or_null(data_size); |
571 | phys_vendor = virt_to_phys(vendor); | 605 | phys_vendor = virt_to_phys_or_null(vendor); |
572 | phys_name = virt_to_phys(name); | 606 | phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); |
573 | phys_attr = virt_to_phys(attr); | 607 | phys_attr = virt_to_phys_or_null(attr); |
574 | phys_data = virt_to_phys(data); | 608 | phys_data = virt_to_phys_or_null_size(data, *data_size); |
575 | 609 | ||
576 | status = efi_thunk(get_variable, phys_name, phys_vendor, | 610 | status = efi_thunk(get_variable, phys_name, phys_vendor, |
577 | phys_attr, phys_data_size, phys_data); | 611 | phys_attr, phys_data_size, phys_data); |
@@ -586,9 +620,9 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor, | |||
586 | u32 phys_name, phys_vendor, phys_data; | 620 | u32 phys_name, phys_vendor, phys_data; |
587 | efi_status_t status; | 621 | efi_status_t status; |
588 | 622 | ||
589 | phys_name = virt_to_phys(name); | 623 | phys_name = virt_to_phys_or_null_size(name, efi_name_size(name)); |
590 | phys_vendor = virt_to_phys(vendor); | 624 | phys_vendor = virt_to_phys_or_null(vendor); |
591 | phys_data = virt_to_phys(data); | 625 | phys_data = virt_to_phys_or_null_size(data, data_size); |
592 | 626 | ||
593 | /* If data_size is > sizeof(u32) we've got problems */ | 627 | /* If data_size is > sizeof(u32) we've got problems */ |
594 | status = efi_thunk(set_variable, phys_name, phys_vendor, | 628 | status = efi_thunk(set_variable, phys_name, phys_vendor, |
@@ -605,9 +639,9 @@ efi_thunk_get_next_variable(unsigned long *name_size, | |||
605 | efi_status_t status; | 639 | efi_status_t status; |
606 | u32 phys_name_size, phys_name, phys_vendor; | 640 | u32 phys_name_size, phys_name, phys_vendor; |
607 | 641 | ||
608 | phys_name_size = virt_to_phys(name_size); | 642 | phys_name_size = virt_to_phys_or_null(name_size); |
609 | phys_vendor = virt_to_phys(vendor); | 643 | phys_vendor = virt_to_phys_or_null(vendor); |
610 | phys_name = virt_to_phys(name); | 644 | phys_name = virt_to_phys_or_null_size(name, *name_size); |
611 | 645 | ||
612 | status = efi_thunk(get_next_variable, phys_name_size, | 646 | status = efi_thunk(get_next_variable, phys_name_size, |
613 | phys_name, phys_vendor); | 647 | phys_name, phys_vendor); |
@@ -621,7 +655,7 @@ efi_thunk_get_next_high_mono_count(u32 *count) | |||
621 | efi_status_t status; | 655 | efi_status_t status; |
622 | u32 phys_count; | 656 | u32 phys_count; |
623 | 657 | ||
624 | phys_count = virt_to_phys(count); | 658 | phys_count = virt_to_phys_or_null(count); |
625 | status = efi_thunk(get_next_high_mono_count, phys_count); | 659 | status = efi_thunk(get_next_high_mono_count, phys_count); |
626 | 660 | ||
627 | return status; | 661 | return status; |
@@ -633,7 +667,7 @@ efi_thunk_reset_system(int reset_type, efi_status_t status, | |||
633 | { | 667 | { |
634 | u32 phys_data; | 668 | u32 phys_data; |
635 | 669 | ||
636 | phys_data = virt_to_phys(data); | 670 | phys_data = virt_to_phys_or_null_size(data, data_size); |
637 | 671 | ||
638 | efi_thunk(reset_system, reset_type, status, data_size, phys_data); | 672 | efi_thunk(reset_system, reset_type, status, data_size, phys_data); |
639 | } | 673 | } |
@@ -661,9 +695,9 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space, | |||
661 | if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) | 695 | if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) |
662 | return EFI_UNSUPPORTED; | 696 | return EFI_UNSUPPORTED; |
663 | 697 | ||
664 | phys_storage = virt_to_phys(storage_space); | 698 | phys_storage = virt_to_phys_or_null(storage_space); |
665 | phys_remaining = virt_to_phys(remaining_space); | 699 | phys_remaining = virt_to_phys_or_null(remaining_space); |
666 | phys_max = virt_to_phys(max_variable_size); | 700 | phys_max = virt_to_phys_or_null(max_variable_size); |
667 | 701 | ||
668 | status = efi_thunk(query_variable_info, attr, phys_storage, | 702 | status = efi_thunk(query_variable_info, attr, phys_storage, |
669 | phys_remaining, phys_max); | 703 | phys_remaining, phys_max); |
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile index 429d08be7848..dd6cfa4ad3ac 100644 --- a/arch/x86/platform/intel-mid/device_libs/Makefile +++ b/arch/x86/platform/intel-mid/device_libs/Makefile | |||
@@ -28,4 +28,4 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o | |||
28 | obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o | 28 | obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o |
29 | # MISC Devices | 29 | # MISC Devices |
30 | obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o | 30 | obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o |
31 | obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o | 31 | obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o |
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c index de734134bc8d..3f1f1c77d090 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_wdt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * platform_wdt.c: Watchdog platform library file | 2 | * Intel Merrifield watchdog platform device library file |
3 | * | 3 | * |
4 | * (C) Copyright 2014 Intel Corporation | 4 | * (C) Copyright 2014 Intel Corporation |
5 | * Author: David Cohen <david.a.cohen@linux.intel.com> | 5 | * Author: David Cohen <david.a.cohen@linux.intel.com> |
@@ -14,7 +14,9 @@ | |||
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/platform_data/intel-mid_wdt.h> | 16 | #include <linux/platform_data/intel-mid_wdt.h> |
17 | |||
17 | #include <asm/intel-mid.h> | 18 | #include <asm/intel-mid.h> |
19 | #include <asm/intel_scu_ipc.h> | ||
18 | #include <asm/io_apic.h> | 20 | #include <asm/io_apic.h> |
19 | 21 | ||
20 | #define TANGIER_EXT_TIMER0_MSI 15 | 22 | #define TANGIER_EXT_TIMER0_MSI 15 |
@@ -50,14 +52,34 @@ static struct intel_mid_wdt_pdata tangier_pdata = { | |||
50 | .probe = tangier_probe, | 52 | .probe = tangier_probe, |
51 | }; | 53 | }; |
52 | 54 | ||
53 | static int __init register_mid_wdt(void) | 55 | static int wdt_scu_status_change(struct notifier_block *nb, |
56 | unsigned long code, void *data) | ||
54 | { | 57 | { |
55 | if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) { | 58 | if (code == SCU_DOWN) { |
56 | wdt_dev.dev.platform_data = &tangier_pdata; | 59 | platform_device_unregister(&wdt_dev); |
57 | return platform_device_register(&wdt_dev); | 60 | return 0; |
58 | } | 61 | } |
59 | 62 | ||
60 | return -ENODEV; | 63 | return platform_device_register(&wdt_dev); |
61 | } | 64 | } |
62 | 65 | ||
66 | static struct notifier_block wdt_scu_notifier = { | ||
67 | .notifier_call = wdt_scu_status_change, | ||
68 | }; | ||
69 | |||
70 | static int __init register_mid_wdt(void) | ||
71 | { | ||
72 | if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) | ||
73 | return -ENODEV; | ||
74 | |||
75 | wdt_dev.dev.platform_data = &tangier_pdata; | ||
76 | |||
77 | /* | ||
78 | * We need to be sure that the SCU IPC is ready before watchdog device | ||
79 | * can be registered: | ||
80 | */ | ||
81 | intel_scu_notifier_add(&wdt_scu_notifier); | ||
82 | |||
83 | return 0; | ||
84 | } | ||
63 | rootfs_initcall(register_mid_wdt); | 85 | rootfs_initcall(register_mid_wdt); |
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c index 5d3b45ad1c03..67375dda451c 100644 --- a/arch/x86/platform/intel-mid/pwr.c +++ b/arch/x86/platform/intel-mid/pwr.c | |||
@@ -272,6 +272,25 @@ int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state) | |||
272 | } | 272 | } |
273 | EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state); | 273 | EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state); |
274 | 274 | ||
275 | pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev) | ||
276 | { | ||
277 | struct mid_pwr *pwr = midpwr; | ||
278 | int id, reg, bit; | ||
279 | u32 power; | ||
280 | |||
281 | if (!pwr || !pwr->available) | ||
282 | return PCI_UNKNOWN; | ||
283 | |||
284 | id = intel_mid_pwr_get_lss_id(pdev); | ||
285 | if (id < 0) | ||
286 | return PCI_UNKNOWN; | ||
287 | |||
288 | reg = (id * LSS_PWS_BITS) / 32; | ||
289 | bit = (id * LSS_PWS_BITS) % 32; | ||
290 | power = mid_pwr_get_state(pwr, reg); | ||
291 | return (__force pci_power_t)((power >> bit) & 3); | ||
292 | } | ||
293 | |||
275 | void intel_mid_pwr_power_off(void) | 294 | void intel_mid_pwr_power_off(void) |
276 | { | 295 | { |
277 | struct mid_pwr *pwr = midpwr; | 296 | struct mid_pwr *pwr = midpwr; |
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index ac58c1616408..555b9fa0ad43 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile | |||
@@ -16,6 +16,7 @@ KCOV_INSTRUMENT := n | |||
16 | 16 | ||
17 | KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large | 17 | KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large |
18 | KBUILD_CFLAGS += -m$(BITS) | 18 | KBUILD_CFLAGS += -m$(BITS) |
19 | KBUILD_CFLAGS += $(call cc-option,-fno-PIE) | ||
19 | 20 | ||
20 | $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE | 21 | $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE |
21 | $(call if_changed,ld) | 22 | $(call if_changed,ld) |
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index de9b14b2d348..cd400af4a6b2 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h | |||
@@ -767,7 +767,14 @@ __SYSCALL(346, sys_preadv2, 6) | |||
767 | #define __NR_pwritev2 347 | 767 | #define __NR_pwritev2 347 |
768 | __SYSCALL(347, sys_pwritev2, 6) | 768 | __SYSCALL(347, sys_pwritev2, 6) |
769 | 769 | ||
770 | #define __NR_syscall_count 348 | 770 | #define __NR_pkey_mprotect 348 |
771 | __SYSCALL(348, sys_pkey_mprotect, 4) | ||
772 | #define __NR_pkey_alloc 349 | ||
773 | __SYSCALL(349, sys_pkey_alloc, 2) | ||
774 | #define __NR_pkey_free 350 | ||
775 | __SYSCALL(350, sys_pkey_free, 1) | ||
776 | |||
777 | #define __NR_syscall_count 351 | ||
771 | 778 | ||
772 | /* | 779 | /* |
773 | * sysxtensa syscall handler | 780 | * sysxtensa syscall handler |
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 9a5bcd0381a7..be81e69b25bc 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c | |||
@@ -172,10 +172,11 @@ void __init time_init(void) | |||
172 | { | 172 | { |
173 | of_clk_init(NULL); | 173 | of_clk_init(NULL); |
174 | #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT | 174 | #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT |
175 | printk("Calibrating CPU frequency "); | 175 | pr_info("Calibrating CPU frequency "); |
176 | calibrate_ccount(); | 176 | calibrate_ccount(); |
177 | printk("%d.%02d MHz\n", (int)ccount_freq/1000000, | 177 | pr_cont("%d.%02d MHz\n", |
178 | (int)(ccount_freq/10000)%100); | 178 | (int)ccount_freq / 1000000, |
179 | (int)(ccount_freq / 10000) % 100); | ||
179 | #else | 180 | #else |
180 | ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; | 181 | ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; |
181 | #endif | 182 | #endif |
@@ -210,9 +211,8 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
210 | void calibrate_delay(void) | 211 | void calibrate_delay(void) |
211 | { | 212 | { |
212 | loops_per_jiffy = ccount_freq / HZ; | 213 | loops_per_jiffy = ccount_freq / HZ; |
213 | printk("Calibrating delay loop (skipped)... " | 214 | pr_info("Calibrating delay loop (skipped)... %lu.%02lu BogoMIPS preset\n", |
214 | "%lu.%02lu BogoMIPS preset\n", | 215 | loops_per_jiffy / (1000000 / HZ), |
215 | loops_per_jiffy/(1000000/HZ), | 216 | (loops_per_jiffy / (10000 / HZ)) % 100); |
216 | (loops_per_jiffy/(10000/HZ)) % 100); | ||
217 | } | 217 | } |
218 | #endif | 218 | #endif |
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index d02fc304b31c..ce37d5b899fe 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c | |||
@@ -465,26 +465,25 @@ void show_regs(struct pt_regs * regs) | |||
465 | 465 | ||
466 | for (i = 0; i < 16; i++) { | 466 | for (i = 0; i < 16; i++) { |
467 | if ((i % 8) == 0) | 467 | if ((i % 8) == 0) |
468 | printk(KERN_INFO "a%02d:", i); | 468 | pr_info("a%02d:", i); |
469 | printk(KERN_CONT " %08lx", regs->areg[i]); | 469 | pr_cont(" %08lx", regs->areg[i]); |
470 | } | 470 | } |
471 | printk(KERN_CONT "\n"); | 471 | pr_cont("\n"); |
472 | 472 | pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", | |
473 | printk("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n", | 473 | regs->pc, regs->ps, regs->depc, regs->excvaddr); |
474 | regs->pc, regs->ps, regs->depc, regs->excvaddr); | 474 | pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", |
475 | printk("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n", | 475 | regs->lbeg, regs->lend, regs->lcount, regs->sar); |
476 | regs->lbeg, regs->lend, regs->lcount, regs->sar); | ||
477 | if (user_mode(regs)) | 476 | if (user_mode(regs)) |
478 | printk("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", | 477 | pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n", |
479 | regs->windowbase, regs->windowstart, regs->wmask, | 478 | regs->windowbase, regs->windowstart, regs->wmask, |
480 | regs->syscall); | 479 | regs->syscall); |
481 | } | 480 | } |
482 | 481 | ||
483 | static int show_trace_cb(struct stackframe *frame, void *data) | 482 | static int show_trace_cb(struct stackframe *frame, void *data) |
484 | { | 483 | { |
485 | if (kernel_text_address(frame->pc)) { | 484 | if (kernel_text_address(frame->pc)) { |
486 | printk(" [<%08lx>] ", frame->pc); | 485 | pr_cont(" [<%08lx>]", frame->pc); |
487 | print_symbol("%s\n", frame->pc); | 486 | print_symbol(" %s\n", frame->pc); |
488 | } | 487 | } |
489 | return 0; | 488 | return 0; |
490 | } | 489 | } |
@@ -494,19 +493,13 @@ void show_trace(struct task_struct *task, unsigned long *sp) | |||
494 | if (!sp) | 493 | if (!sp) |
495 | sp = stack_pointer(task); | 494 | sp = stack_pointer(task); |
496 | 495 | ||
497 | printk("Call Trace:"); | 496 | pr_info("Call Trace:\n"); |
498 | #ifdef CONFIG_KALLSYMS | ||
499 | printk("\n"); | ||
500 | #endif | ||
501 | walk_stackframe(sp, show_trace_cb, NULL); | 497 | walk_stackframe(sp, show_trace_cb, NULL); |
502 | printk("\n"); | 498 | #ifndef CONFIG_KALLSYMS |
499 | pr_cont("\n"); | ||
500 | #endif | ||
503 | } | 501 | } |
504 | 502 | ||
505 | /* | ||
506 | * This routine abuses get_user()/put_user() to reference pointers | ||
507 | * with at least a bit of error checking ... | ||
508 | */ | ||
509 | |||
510 | static int kstack_depth_to_print = 24; | 503 | static int kstack_depth_to_print = 24; |
511 | 504 | ||
512 | void show_stack(struct task_struct *task, unsigned long *sp) | 505 | void show_stack(struct task_struct *task, unsigned long *sp) |
@@ -518,52 +511,29 @@ void show_stack(struct task_struct *task, unsigned long *sp) | |||
518 | sp = stack_pointer(task); | 511 | sp = stack_pointer(task); |
519 | stack = sp; | 512 | stack = sp; |
520 | 513 | ||
521 | printk("\nStack: "); | 514 | pr_info("Stack:\n"); |
522 | 515 | ||
523 | for (i = 0; i < kstack_depth_to_print; i++) { | 516 | for (i = 0; i < kstack_depth_to_print; i++) { |
524 | if (kstack_end(sp)) | 517 | if (kstack_end(sp)) |
525 | break; | 518 | break; |
526 | if (i && ((i % 8) == 0)) | 519 | pr_cont(" %08lx", *sp++); |
527 | printk("\n "); | 520 | if (i % 8 == 7) |
528 | printk("%08lx ", *sp++); | 521 | pr_cont("\n"); |
529 | } | 522 | } |
530 | printk("\n"); | ||
531 | show_trace(task, stack); | 523 | show_trace(task, stack); |
532 | } | 524 | } |
533 | 525 | ||
534 | void show_code(unsigned int *pc) | ||
535 | { | ||
536 | long i; | ||
537 | |||
538 | printk("\nCode:"); | ||
539 | |||
540 | for(i = -3 ; i < 6 ; i++) { | ||
541 | unsigned long insn; | ||
542 | if (__get_user(insn, pc + i)) { | ||
543 | printk(" (Bad address in pc)\n"); | ||
544 | break; | ||
545 | } | ||
546 | printk("%c%08lx%c",(i?' ':'<'),insn,(i?' ':'>')); | ||
547 | } | ||
548 | } | ||
549 | |||
550 | DEFINE_SPINLOCK(die_lock); | 526 | DEFINE_SPINLOCK(die_lock); |
551 | 527 | ||
552 | void die(const char * str, struct pt_regs * regs, long err) | 528 | void die(const char * str, struct pt_regs * regs, long err) |
553 | { | 529 | { |
554 | static int die_counter; | 530 | static int die_counter; |
555 | int nl = 0; | ||
556 | 531 | ||
557 | console_verbose(); | 532 | console_verbose(); |
558 | spin_lock_irq(&die_lock); | 533 | spin_lock_irq(&die_lock); |
559 | 534 | ||
560 | printk("%s: sig: %ld [#%d]\n", str, err, ++die_counter); | 535 | pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, |
561 | #ifdef CONFIG_PREEMPT | 536 | IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : ""); |
562 | printk("PREEMPT "); | ||
563 | nl = 1; | ||
564 | #endif | ||
565 | if (nl) | ||
566 | printk("\n"); | ||
567 | show_regs(regs); | 537 | show_regs(regs); |
568 | if (!user_mode(regs)) | 538 | if (!user_mode(regs)) |
569 | show_stack(NULL, (unsigned long*)regs->areg[1]); | 539 | show_stack(NULL, (unsigned long*)regs->areg[1]); |
diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 2d8466f9e49b..d19b09cdf284 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c | |||
@@ -214,23 +214,26 @@ static int hash_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, | |||
214 | 214 | ||
215 | ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); | 215 | ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); |
216 | 216 | ||
217 | if (ctx->more) { | 217 | if (!result && !ctx->more) { |
218 | err = af_alg_wait_for_completion( | ||
219 | crypto_ahash_init(&ctx->req), | ||
220 | &ctx->completion); | ||
221 | if (err) | ||
222 | goto unlock; | ||
223 | } | ||
224 | |||
225 | if (!result || ctx->more) { | ||
218 | ctx->more = 0; | 226 | ctx->more = 0; |
219 | err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), | 227 | err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), |
220 | &ctx->completion); | 228 | &ctx->completion); |
221 | if (err) | 229 | if (err) |
222 | goto unlock; | 230 | goto unlock; |
223 | } else if (!result) { | ||
224 | err = af_alg_wait_for_completion( | ||
225 | crypto_ahash_digest(&ctx->req), | ||
226 | &ctx->completion); | ||
227 | } | 231 | } |
228 | 232 | ||
229 | err = memcpy_to_msg(msg, ctx->result, len); | 233 | err = memcpy_to_msg(msg, ctx->result, len); |
230 | 234 | ||
231 | hash_free_result(sk, ctx); | ||
232 | |||
233 | unlock: | 235 | unlock: |
236 | hash_free_result(sk, ctx); | ||
234 | release_sock(sk); | 237 | release_sock(sk); |
235 | 238 | ||
236 | return err ?: len; | 239 | return err ?: len; |
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c index 865f46ea724f..c80765b211cf 100644 --- a/crypto/asymmetric_keys/x509_cert_parser.c +++ b/crypto/asymmetric_keys/x509_cert_parser.c | |||
@@ -133,7 +133,6 @@ struct x509_certificate *x509_cert_parse(const void *data, size_t datalen) | |||
133 | return cert; | 133 | return cert; |
134 | 134 | ||
135 | error_decode: | 135 | error_decode: |
136 | kfree(cert->pub->key); | ||
137 | kfree(ctx); | 136 | kfree(ctx); |
138 | error_no_ctx: | 137 | error_no_ctx: |
139 | x509_free_certificate(cert); | 138 | x509_free_certificate(cert); |
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c index 52ce17a3dd63..c16c94f88733 100644 --- a/crypto/scatterwalk.c +++ b/crypto/scatterwalk.c | |||
@@ -68,10 +68,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, | |||
68 | 68 | ||
69 | sg = scatterwalk_ffwd(tmp, sg, start); | 69 | sg = scatterwalk_ffwd(tmp, sg, start); |
70 | 70 | ||
71 | if (sg_page(sg) == virt_to_page(buf) && | ||
72 | sg->offset == offset_in_page(buf)) | ||
73 | return; | ||
74 | |||
75 | scatterwalk_start(&walk, sg); | 71 | scatterwalk_start(&walk, sg); |
76 | scatterwalk_copychunks(buf, &walk, nbytes, out); | 72 | scatterwalk_copychunks(buf, &walk, nbytes, out); |
77 | scatterwalk_done(&walk, out, 0); | 73 | scatterwalk_done(&walk, out, 0); |
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c index 046c4d0394ee..5fb838e592dc 100644 --- a/drivers/acpi/acpica/tbfadt.c +++ b/drivers/acpi/acpica/tbfadt.c | |||
@@ -480,19 +480,17 @@ static void acpi_tb_convert_fadt(void) | |||
480 | u32 i; | 480 | u32 i; |
481 | 481 | ||
482 | /* | 482 | /* |
483 | * For ACPI 1.0 FADTs (revision 1), ensure that reserved fields which | 483 | * For ACPI 1.0 FADTs (revision 1 or 2), ensure that reserved fields which |
484 | * should be zero are indeed zero. This will workaround BIOSs that | 484 | * should be zero are indeed zero. This will workaround BIOSs that |
485 | * inadvertently place values in these fields. | 485 | * inadvertently place values in these fields. |
486 | * | 486 | * |
487 | * The ACPI 1.0 reserved fields that will be zeroed are the bytes located | 487 | * The ACPI 1.0 reserved fields that will be zeroed are the bytes located |
488 | * at offset 45, 55, 95, and the word located at offset 109, 110. | 488 | * at offset 45, 55, 95, and the word located at offset 109, 110. |
489 | * | 489 | * |
490 | * Note: The FADT revision value is unreliable because of BIOS errors. | 490 | * Note: The FADT revision value is unreliable. Only the length can be |
491 | * The table length is instead used as the final word on the version. | 491 | * trusted. |
492 | * | ||
493 | * Note: FADT revision 3 is the ACPI 2.0 version of the FADT. | ||
494 | */ | 492 | */ |
495 | if (acpi_gbl_FADT.header.length <= ACPI_FADT_V3_SIZE) { | 493 | if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) { |
496 | acpi_gbl_FADT.preferred_profile = 0; | 494 | acpi_gbl_FADT.preferred_profile = 0; |
497 | acpi_gbl_FADT.pstate_control = 0; | 495 | acpi_gbl_FADT.pstate_control = 0; |
498 | acpi_gbl_FADT.cst_control = 0; | 496 | acpi_gbl_FADT.cst_control = 0; |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index deb0ff78eba8..54abb26b7366 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -47,32 +47,15 @@ static void acpi_sleep_tts_switch(u32 acpi_state) | |||
47 | } | 47 | } |
48 | } | 48 | } |
49 | 49 | ||
50 | static void acpi_sleep_pts_switch(u32 acpi_state) | 50 | static int tts_notify_reboot(struct notifier_block *this, |
51 | { | ||
52 | acpi_status status; | ||
53 | |||
54 | status = acpi_execute_simple_method(NULL, "\\_PTS", acpi_state); | ||
55 | if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { | ||
56 | /* | ||
57 | * OS can't evaluate the _PTS object correctly. Some warning | ||
58 | * message will be printed. But it won't break anything. | ||
59 | */ | ||
60 | printk(KERN_NOTICE "Failure in evaluating _PTS object\n"); | ||
61 | } | ||
62 | } | ||
63 | |||
64 | static int sleep_notify_reboot(struct notifier_block *this, | ||
65 | unsigned long code, void *x) | 51 | unsigned long code, void *x) |
66 | { | 52 | { |
67 | acpi_sleep_tts_switch(ACPI_STATE_S5); | 53 | acpi_sleep_tts_switch(ACPI_STATE_S5); |
68 | |||
69 | acpi_sleep_pts_switch(ACPI_STATE_S5); | ||
70 | |||
71 | return NOTIFY_DONE; | 54 | return NOTIFY_DONE; |
72 | } | 55 | } |
73 | 56 | ||
74 | static struct notifier_block sleep_notifier = { | 57 | static struct notifier_block tts_notifier = { |
75 | .notifier_call = sleep_notify_reboot, | 58 | .notifier_call = tts_notify_reboot, |
76 | .next = NULL, | 59 | .next = NULL, |
77 | .priority = 0, | 60 | .priority = 0, |
78 | }; | 61 | }; |
@@ -916,9 +899,9 @@ int __init acpi_sleep_init(void) | |||
916 | pr_info(PREFIX "(supports%s)\n", supported); | 899 | pr_info(PREFIX "(supports%s)\n", supported); |
917 | 900 | ||
918 | /* | 901 | /* |
919 | * Register the sleep_notifier to reboot notifier list so that the _TTS | 902 | * Register the tts_notifier to reboot notifier list so that the _TTS |
920 | * and _PTS object can also be evaluated when the system enters S5. | 903 | * object can also be evaluated when the system enters S5. |
921 | */ | 904 | */ |
922 | register_reboot_notifier(&sleep_notifier); | 905 | register_reboot_notifier(&tts_notifier); |
923 | return 0; | 906 | return 0; |
924 | } | 907 | } |
diff --git a/drivers/char/ipmi/bt-bmc.c b/drivers/char/ipmi/bt-bmc.c index b49e61320952..fc9e8891eae3 100644 --- a/drivers/char/ipmi/bt-bmc.c +++ b/drivers/char/ipmi/bt-bmc.c | |||
@@ -484,7 +484,7 @@ static int bt_bmc_remove(struct platform_device *pdev) | |||
484 | } | 484 | } |
485 | 485 | ||
486 | static const struct of_device_id bt_bmc_match[] = { | 486 | static const struct of_device_id bt_bmc_match[] = { |
487 | { .compatible = "aspeed,ast2400-bt-bmc" }, | 487 | { .compatible = "aspeed,ast2400-ibt-bmc" }, |
488 | { }, | 488 | { }, |
489 | }; | 489 | }; |
490 | 490 | ||
@@ -502,4 +502,4 @@ module_platform_driver(bt_bmc_driver); | |||
502 | MODULE_DEVICE_TABLE(of, bt_bmc_match); | 502 | MODULE_DEVICE_TABLE(of, bt_bmc_match); |
503 | MODULE_LICENSE("GPL"); | 503 | MODULE_LICENSE("GPL"); |
504 | MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>"); | 504 | MODULE_AUTHOR("Alistair Popple <alistair@popple.id.au>"); |
505 | MODULE_DESCRIPTION("Linux device interface to the BT interface"); | 505 | MODULE_DESCRIPTION("Linux device interface to the IPMI BT interface"); |
diff --git a/drivers/clk/berlin/bg2.c b/drivers/clk/berlin/bg2.c index edf3b96b3b73..1d99292e2039 100644 --- a/drivers/clk/berlin/bg2.c +++ b/drivers/clk/berlin/bg2.c | |||
@@ -685,7 +685,7 @@ static void __init berlin2_clock_setup(struct device_node *np) | |||
685 | } | 685 | } |
686 | 686 | ||
687 | /* register clk-provider */ | 687 | /* register clk-provider */ |
688 | of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); | 688 | of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); |
689 | 689 | ||
690 | return; | 690 | return; |
691 | 691 | ||
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c index 0718e831475f..3b784b593afd 100644 --- a/drivers/clk/berlin/bg2q.c +++ b/drivers/clk/berlin/bg2q.c | |||
@@ -382,7 +382,7 @@ static void __init berlin2q_clock_setup(struct device_node *np) | |||
382 | } | 382 | } |
383 | 383 | ||
384 | /* register clk-provider */ | 384 | /* register clk-provider */ |
385 | of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); | 385 | of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); |
386 | 386 | ||
387 | return; | 387 | return; |
388 | 388 | ||
diff --git a/drivers/clk/clk-efm32gg.c b/drivers/clk/clk-efm32gg.c index 8802a2dd56ac..f674778fb3ac 100644 --- a/drivers/clk/clk-efm32gg.c +++ b/drivers/clk/clk-efm32gg.c | |||
@@ -82,6 +82,6 @@ static void __init efm32gg_cmu_init(struct device_node *np) | |||
82 | hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0", | 82 | hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0", |
83 | "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); | 83 | "HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL); |
84 | 84 | ||
85 | of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data); | 85 | of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data); |
86 | } | 86 | } |
87 | CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init); | 87 | CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init); |
diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c index 79596463e0d9..4a82a49cff5e 100644 --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c | |||
@@ -191,6 +191,8 @@ static struct clk_div_table axi_div_table[] = { | |||
191 | static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu", | 191 | static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu", |
192 | 0x050, 0, 3, axi_div_table, 0); | 192 | 0x050, 0, 3, axi_div_table, 0); |
193 | 193 | ||
194 | #define SUN6I_A31_AHB1_REG 0x054 | ||
195 | |||
194 | static const char * const ahb1_parents[] = { "osc32k", "osc24M", | 196 | static const char * const ahb1_parents[] = { "osc32k", "osc24M", |
195 | "axi", "pll-periph" }; | 197 | "axi", "pll-periph" }; |
196 | 198 | ||
@@ -1230,6 +1232,16 @@ static void __init sun6i_a31_ccu_setup(struct device_node *node) | |||
1230 | val &= BIT(16); | 1232 | val &= BIT(16); |
1231 | writel(val, reg + SUN6I_A31_PLL_MIPI_REG); | 1233 | writel(val, reg + SUN6I_A31_PLL_MIPI_REG); |
1232 | 1234 | ||
1235 | /* Force AHB1 to PLL6 / 3 */ | ||
1236 | val = readl(reg + SUN6I_A31_AHB1_REG); | ||
1237 | /* set PLL6 pre-div = 3 */ | ||
1238 | val &= ~GENMASK(7, 6); | ||
1239 | val |= 0x2 << 6; | ||
1240 | /* select PLL6 / pre-div */ | ||
1241 | val &= ~GENMASK(13, 12); | ||
1242 | val |= 0x3 << 12; | ||
1243 | writel(val, reg + SUN6I_A31_AHB1_REG); | ||
1244 | |||
1233 | sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc); | 1245 | sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc); |
1234 | 1246 | ||
1235 | ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk, | 1247 | ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk, |
diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 838b22aa8b67..f2c9274b8bd5 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c | |||
@@ -373,7 +373,7 @@ static void sun4i_get_apb1_factors(struct factors_request *req) | |||
373 | else | 373 | else |
374 | calcp = 3; | 374 | calcp = 3; |
375 | 375 | ||
376 | calcm = (req->parent_rate >> calcp) - 1; | 376 | calcm = (div >> calcp) - 1; |
377 | 377 | ||
378 | req->rate = (req->parent_rate >> calcp) / (calcm + 1); | 378 | req->rate = (req->parent_rate >> calcp) / (calcm + 1); |
379 | req->m = calcm; | 379 | req->m = calcm; |
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index 156aad167cd6..954a64c7757b 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c | |||
@@ -137,7 +137,7 @@ static void dbg_dump_sg(const char *level, const char *prefix_str, | |||
137 | } | 137 | } |
138 | 138 | ||
139 | buf = it_page + it->offset; | 139 | buf = it_page + it->offset; |
140 | len = min(tlen, it->length); | 140 | len = min_t(size_t, tlen, it->length); |
141 | print_hex_dump(level, prefix_str, prefix_type, rowsize, | 141 | print_hex_dump(level, prefix_str, prefix_type, rowsize, |
142 | groupsize, buf, len, ascii); | 142 | groupsize, buf, len, ascii); |
143 | tlen -= len; | 143 | tlen -= len; |
@@ -4583,6 +4583,15 @@ static int __init caam_algapi_init(void) | |||
4583 | if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) | 4583 | if (!aes_inst && (alg_sel == OP_ALG_ALGSEL_AES)) |
4584 | continue; | 4584 | continue; |
4585 | 4585 | ||
4586 | /* | ||
4587 | * Check support for AES modes not available | ||
4588 | * on LP devices. | ||
4589 | */ | ||
4590 | if ((cha_vid & CHA_ID_LS_AES_MASK) == CHA_ID_LS_AES_LP) | ||
4591 | if ((alg->class1_alg_type & OP_ALG_AAI_MASK) == | ||
4592 | OP_ALG_AAI_XTS) | ||
4593 | continue; | ||
4594 | |||
4586 | t_alg = caam_alg_alloc(alg); | 4595 | t_alg = caam_alg_alloc(alg); |
4587 | if (IS_ERR(t_alg)) { | 4596 | if (IS_ERR(t_alg)) { |
4588 | err = PTR_ERR(t_alg); | 4597 | err = PTR_ERR(t_alg); |
diff --git a/drivers/dax/dax.c b/drivers/dax/dax.c index 0e499bfca41c..3d94ff20fdca 100644 --- a/drivers/dax/dax.c +++ b/drivers/dax/dax.c | |||
@@ -270,8 +270,8 @@ static int check_vma(struct dax_dev *dax_dev, struct vm_area_struct *vma, | |||
270 | if (!dax_dev->alive) | 270 | if (!dax_dev->alive) |
271 | return -ENXIO; | 271 | return -ENXIO; |
272 | 272 | ||
273 | /* prevent private / writable mappings from being established */ | 273 | /* prevent private mappings from being established */ |
274 | if ((vma->vm_flags & (VM_NORESERVE|VM_SHARED|VM_WRITE)) == VM_WRITE) { | 274 | if ((vma->vm_flags & VM_SHARED) != VM_SHARED) { |
275 | dev_info(dev, "%s: %s: fail, attempted private mapping\n", | 275 | dev_info(dev, "%s: %s: fail, attempted private mapping\n", |
276 | current->comm, func); | 276 | current->comm, func); |
277 | return -EINVAL; | 277 | return -EINVAL; |
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index 4a15fa5df98b..73c6ce93a0d9 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c | |||
@@ -78,7 +78,9 @@ static int dax_pmem_probe(struct device *dev) | |||
78 | nsio = to_nd_namespace_io(&ndns->dev); | 78 | nsio = to_nd_namespace_io(&ndns->dev); |
79 | 79 | ||
80 | /* parse the 'pfn' info block via ->rw_bytes */ | 80 | /* parse the 'pfn' info block via ->rw_bytes */ |
81 | devm_nsio_enable(dev, nsio); | 81 | rc = devm_nsio_enable(dev, nsio); |
82 | if (rc) | ||
83 | return rc; | ||
82 | altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap); | 84 | altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap); |
83 | if (IS_ERR(altmap)) | 85 | if (IS_ERR(altmap)) |
84 | return PTR_ERR(altmap); | 86 | return PTR_ERR(altmap); |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index af63a6bcf564..141aefbe37ec 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -306,6 +306,7 @@ config MMP_TDMA | |||
306 | depends on ARCH_MMP || COMPILE_TEST | 306 | depends on ARCH_MMP || COMPILE_TEST |
307 | select DMA_ENGINE | 307 | select DMA_ENGINE |
308 | select MMP_SRAM if ARCH_MMP | 308 | select MMP_SRAM if ARCH_MMP |
309 | select GENERIC_ALLOCATOR | ||
309 | help | 310 | help |
310 | Support the MMP Two-Channel DMA engine. | 311 | Support the MMP Two-Channel DMA engine. |
311 | This engine used for MMP Audio DMA and pxa910 SQU. | 312 | This engine used for MMP Audio DMA and pxa910 SQU. |
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index bac5f023013b..d5ba43a87a68 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c | |||
@@ -317,6 +317,12 @@ static irqreturn_t cppi41_irq(int irq, void *data) | |||
317 | 317 | ||
318 | while (val) { | 318 | while (val) { |
319 | u32 desc, len; | 319 | u32 desc, len; |
320 | int error; | ||
321 | |||
322 | error = pm_runtime_get(cdd->ddev.dev); | ||
323 | if (error < 0) | ||
324 | dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", | ||
325 | __func__, error); | ||
320 | 326 | ||
321 | q_num = __fls(val); | 327 | q_num = __fls(val); |
322 | val &= ~(1 << q_num); | 328 | val &= ~(1 << q_num); |
@@ -338,7 +344,6 @@ static irqreturn_t cppi41_irq(int irq, void *data) | |||
338 | dma_cookie_complete(&c->txd); | 344 | dma_cookie_complete(&c->txd); |
339 | dmaengine_desc_get_callback_invoke(&c->txd, NULL); | 345 | dmaengine_desc_get_callback_invoke(&c->txd, NULL); |
340 | 346 | ||
341 | /* Paired with cppi41_dma_issue_pending */ | ||
342 | pm_runtime_mark_last_busy(cdd->ddev.dev); | 347 | pm_runtime_mark_last_busy(cdd->ddev.dev); |
343 | pm_runtime_put_autosuspend(cdd->ddev.dev); | 348 | pm_runtime_put_autosuspend(cdd->ddev.dev); |
344 | } | 349 | } |
@@ -362,8 +367,13 @@ static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan) | |||
362 | int error; | 367 | int error; |
363 | 368 | ||
364 | error = pm_runtime_get_sync(cdd->ddev.dev); | 369 | error = pm_runtime_get_sync(cdd->ddev.dev); |
365 | if (error < 0) | 370 | if (error < 0) { |
371 | dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n", | ||
372 | __func__, error); | ||
373 | pm_runtime_put_noidle(cdd->ddev.dev); | ||
374 | |||
366 | return error; | 375 | return error; |
376 | } | ||
367 | 377 | ||
368 | dma_cookie_init(chan); | 378 | dma_cookie_init(chan); |
369 | dma_async_tx_descriptor_init(&c->txd, chan); | 379 | dma_async_tx_descriptor_init(&c->txd, chan); |
@@ -385,8 +395,11 @@ static void cppi41_dma_free_chan_resources(struct dma_chan *chan) | |||
385 | int error; | 395 | int error; |
386 | 396 | ||
387 | error = pm_runtime_get_sync(cdd->ddev.dev); | 397 | error = pm_runtime_get_sync(cdd->ddev.dev); |
388 | if (error < 0) | 398 | if (error < 0) { |
399 | pm_runtime_put_noidle(cdd->ddev.dev); | ||
400 | |||
389 | return; | 401 | return; |
402 | } | ||
390 | 403 | ||
391 | WARN_ON(!list_empty(&cdd->pending)); | 404 | WARN_ON(!list_empty(&cdd->pending)); |
392 | 405 | ||
@@ -460,9 +473,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan) | |||
460 | struct cppi41_dd *cdd = c->cdd; | 473 | struct cppi41_dd *cdd = c->cdd; |
461 | int error; | 474 | int error; |
462 | 475 | ||
463 | /* PM runtime paired with dmaengine_desc_get_callback_invoke */ | ||
464 | error = pm_runtime_get(cdd->ddev.dev); | 476 | error = pm_runtime_get(cdd->ddev.dev); |
465 | if ((error != -EINPROGRESS) && error < 0) { | 477 | if ((error != -EINPROGRESS) && error < 0) { |
478 | pm_runtime_put_noidle(cdd->ddev.dev); | ||
466 | dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n", | 479 | dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n", |
467 | error); | 480 | error); |
468 | 481 | ||
@@ -473,6 +486,9 @@ static void cppi41_dma_issue_pending(struct dma_chan *chan) | |||
473 | push_desc_queue(c); | 486 | push_desc_queue(c); |
474 | else | 487 | else |
475 | pending_desc(c); | 488 | pending_desc(c); |
489 | |||
490 | pm_runtime_mark_last_busy(cdd->ddev.dev); | ||
491 | pm_runtime_put_autosuspend(cdd->ddev.dev); | ||
476 | } | 492 | } |
477 | 493 | ||
478 | static u32 get_host_pd0(u32 length) | 494 | static u32 get_host_pd0(u32 length) |
@@ -1059,8 +1075,8 @@ err_chans: | |||
1059 | deinit_cppi41(dev, cdd); | 1075 | deinit_cppi41(dev, cdd); |
1060 | err_init_cppi: | 1076 | err_init_cppi: |
1061 | pm_runtime_dont_use_autosuspend(dev); | 1077 | pm_runtime_dont_use_autosuspend(dev); |
1062 | pm_runtime_put_sync(dev); | ||
1063 | err_get_sync: | 1078 | err_get_sync: |
1079 | pm_runtime_put_sync(dev); | ||
1064 | pm_runtime_disable(dev); | 1080 | pm_runtime_disable(dev); |
1065 | iounmap(cdd->usbss_mem); | 1081 | iounmap(cdd->usbss_mem); |
1066 | iounmap(cdd->ctrl_mem); | 1082 | iounmap(cdd->ctrl_mem); |
@@ -1072,7 +1088,12 @@ err_get_sync: | |||
1072 | static int cppi41_dma_remove(struct platform_device *pdev) | 1088 | static int cppi41_dma_remove(struct platform_device *pdev) |
1073 | { | 1089 | { |
1074 | struct cppi41_dd *cdd = platform_get_drvdata(pdev); | 1090 | struct cppi41_dd *cdd = platform_get_drvdata(pdev); |
1091 | int error; | ||
1075 | 1092 | ||
1093 | error = pm_runtime_get_sync(&pdev->dev); | ||
1094 | if (error < 0) | ||
1095 | dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n", | ||
1096 | __func__, error); | ||
1076 | of_dma_controller_free(pdev->dev.of_node); | 1097 | of_dma_controller_free(pdev->dev.of_node); |
1077 | dma_async_device_unregister(&cdd->ddev); | 1098 | dma_async_device_unregister(&cdd->ddev); |
1078 | 1099 | ||
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index e18a58068bca..77242b37ef87 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -1628,6 +1628,7 @@ static int edma_alloc_chan_resources(struct dma_chan *chan) | |||
1628 | if (echan->slot[0] < 0) { | 1628 | if (echan->slot[0] < 0) { |
1629 | dev_err(dev, "Entry slot allocation failed for channel %u\n", | 1629 | dev_err(dev, "Entry slot allocation failed for channel %u\n", |
1630 | EDMA_CHAN_SLOT(echan->ch_num)); | 1630 | EDMA_CHAN_SLOT(echan->ch_num)); |
1631 | ret = echan->slot[0]; | ||
1631 | goto err_slot; | 1632 | goto err_slot; |
1632 | } | 1633 | } |
1633 | 1634 | ||
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 83461994e418..a2358780ab2c 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c | |||
@@ -578,7 +578,7 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy( | |||
578 | 578 | ||
579 | burst = convert_burst(8); | 579 | burst = convert_burst(8); |
580 | width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); | 580 | width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES); |
581 | v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | | 581 | v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | |
582 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | | 582 | DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | |
583 | DMA_CHAN_CFG_DST_LINEAR_MODE | | 583 | DMA_CHAN_CFG_DST_LINEAR_MODE | |
584 | DMA_CHAN_CFG_SRC_LINEAR_MODE | | 584 | DMA_CHAN_CFG_SRC_LINEAR_MODE | |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index d011cb89d25e..ed37e5908b91 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -22,10 +22,6 @@ menuconfig GPIOLIB | |||
22 | 22 | ||
23 | if GPIOLIB | 23 | if GPIOLIB |
24 | 24 | ||
25 | config GPIO_DEVRES | ||
26 | def_bool y | ||
27 | depends on HAS_IOMEM | ||
28 | |||
29 | config OF_GPIO | 25 | config OF_GPIO |
30 | def_bool y | 26 | def_bool y |
31 | depends on OF | 27 | depends on OF |
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index ab28a2daeacc..d074c2299393 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | 2 | ||
3 | ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG | 3 | ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG |
4 | 4 | ||
5 | obj-$(CONFIG_GPIO_DEVRES) += devres.o | 5 | obj-$(CONFIG_GPIOLIB) += devres.o |
6 | obj-$(CONFIG_GPIOLIB) += gpiolib.o | 6 | obj-$(CONFIG_GPIOLIB) += gpiolib.o |
7 | obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o | 7 | obj-$(CONFIG_GPIOLIB) += gpiolib-legacy.o |
8 | obj-$(CONFIG_OF_GPIO) += gpiolib-of.o | 8 | obj-$(CONFIG_OF_GPIO) += gpiolib-of.o |
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index e422568e14ad..fe731f094257 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c | |||
@@ -372,14 +372,15 @@ static void pca953x_gpio_set_multiple(struct gpio_chip *gc, | |||
372 | 372 | ||
373 | bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ); | 373 | bank_shift = fls((chip->gpio_chip.ngpio - 1) / BANK_SZ); |
374 | 374 | ||
375 | memcpy(reg_val, chip->reg_output, NBANK(chip)); | ||
376 | mutex_lock(&chip->i2c_lock); | 375 | mutex_lock(&chip->i2c_lock); |
376 | memcpy(reg_val, chip->reg_output, NBANK(chip)); | ||
377 | for (bank = 0; bank < NBANK(chip); bank++) { | 377 | for (bank = 0; bank < NBANK(chip); bank++) { |
378 | bank_mask = mask[bank / sizeof(*mask)] >> | 378 | bank_mask = mask[bank / sizeof(*mask)] >> |
379 | ((bank % sizeof(*mask)) * 8); | 379 | ((bank % sizeof(*mask)) * 8); |
380 | if (bank_mask) { | 380 | if (bank_mask) { |
381 | bank_val = bits[bank / sizeof(*bits)] >> | 381 | bank_val = bits[bank / sizeof(*bits)] >> |
382 | ((bank % sizeof(*bits)) * 8); | 382 | ((bank % sizeof(*bits)) * 8); |
383 | bank_val &= bank_mask; | ||
383 | reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val; | 384 | reg_val[bank] = (reg_val[bank] & ~bank_mask) | bank_val; |
384 | } | 385 | } |
385 | } | 386 | } |
@@ -607,7 +608,6 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, | |||
607 | 608 | ||
608 | if (client->irq && irq_base != -1 | 609 | if (client->irq && irq_base != -1 |
609 | && (chip->driver_data & PCA_INT)) { | 610 | && (chip->driver_data & PCA_INT)) { |
610 | |||
611 | ret = pca953x_read_regs(chip, | 611 | ret = pca953x_read_regs(chip, |
612 | chip->regs->input, chip->irq_stat); | 612 | chip->regs->input, chip->irq_stat); |
613 | if (ret) | 613 | if (ret) |
diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c index 5a5a6cb00eea..d6e21f1a70a9 100644 --- a/drivers/gpio/gpio-tc3589x.c +++ b/drivers/gpio/gpio-tc3589x.c | |||
@@ -97,7 +97,7 @@ static int tc3589x_gpio_get_direction(struct gpio_chip *chip, | |||
97 | if (ret < 0) | 97 | if (ret < 0) |
98 | return ret; | 98 | return ret; |
99 | 99 | ||
100 | return !!(ret & BIT(pos)); | 100 | return !(ret & BIT(pos)); |
101 | } | 101 | } |
102 | 102 | ||
103 | static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip, | 103 | static int tc3589x_gpio_set_single_ended(struct gpio_chip *chip, |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 93ed0e00c578..868128a676ba 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -2737,8 +2737,11 @@ int gpiochip_lock_as_irq(struct gpio_chip *chip, unsigned int offset) | |||
2737 | if (IS_ERR(desc)) | 2737 | if (IS_ERR(desc)) |
2738 | return PTR_ERR(desc); | 2738 | return PTR_ERR(desc); |
2739 | 2739 | ||
2740 | /* Flush direction if something changed behind our back */ | 2740 | /* |
2741 | if (chip->get_direction) { | 2741 | * If it's fast: flush the direction setting if something changed |
2742 | * behind our back | ||
2743 | */ | ||
2744 | if (!chip->can_sleep && chip->get_direction) { | ||
2742 | int dir = chip->get_direction(chip, offset); | 2745 | int dir = chip->get_direction(chip, offset); |
2743 | 2746 | ||
2744 | if (dir) | 2747 | if (dir) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 039b57e4644c..496f72b134eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -459,6 +459,7 @@ struct amdgpu_bo { | |||
459 | u64 metadata_flags; | 459 | u64 metadata_flags; |
460 | void *metadata; | 460 | void *metadata; |
461 | u32 metadata_size; | 461 | u32 metadata_size; |
462 | unsigned prime_shared_count; | ||
462 | /* list of all virtual address to which this bo | 463 | /* list of all virtual address to which this bo |
463 | * is associated to | 464 | * is associated to |
464 | */ | 465 | */ |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index dae35a96a694..02ca5dd978f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c | |||
@@ -34,6 +34,7 @@ struct amdgpu_atpx { | |||
34 | 34 | ||
35 | static struct amdgpu_atpx_priv { | 35 | static struct amdgpu_atpx_priv { |
36 | bool atpx_detected; | 36 | bool atpx_detected; |
37 | bool bridge_pm_usable; | ||
37 | /* handle for device - and atpx */ | 38 | /* handle for device - and atpx */ |
38 | acpi_handle dhandle; | 39 | acpi_handle dhandle; |
39 | acpi_handle other_handle; | 40 | acpi_handle other_handle; |
@@ -205,7 +206,11 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) | |||
205 | atpx->is_hybrid = false; | 206 | atpx->is_hybrid = false; |
206 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { | 207 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { |
207 | printk("ATPX Hybrid Graphics\n"); | 208 | printk("ATPX Hybrid Graphics\n"); |
208 | atpx->functions.power_cntl = false; | 209 | /* |
210 | * Disable legacy PM methods only when pcie port PM is usable, | ||
211 | * otherwise the device might fail to power off or power on. | ||
212 | */ | ||
213 | atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable; | ||
209 | atpx->is_hybrid = true; | 214 | atpx->is_hybrid = true; |
210 | } | 215 | } |
211 | 216 | ||
@@ -480,6 +485,7 @@ static int amdgpu_atpx_power_state(enum vga_switcheroo_client_id id, | |||
480 | */ | 485 | */ |
481 | static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev) | 486 | static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev) |
482 | { | 487 | { |
488 | struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); | ||
483 | acpi_handle dhandle, atpx_handle; | 489 | acpi_handle dhandle, atpx_handle; |
484 | acpi_status status; | 490 | acpi_status status; |
485 | 491 | ||
@@ -494,6 +500,7 @@ static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev) | |||
494 | } | 500 | } |
495 | amdgpu_atpx_priv.dhandle = dhandle; | 501 | amdgpu_atpx_priv.dhandle = dhandle; |
496 | amdgpu_atpx_priv.atpx.handle = atpx_handle; | 502 | amdgpu_atpx_priv.atpx.handle = atpx_handle; |
503 | amdgpu_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3; | ||
497 | return true; | 504 | return true; |
498 | } | 505 | } |
499 | 506 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 651115dcce12..c02db01f6583 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | |||
@@ -132,7 +132,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, | |||
132 | entry->priority = min(info[i].bo_priority, | 132 | entry->priority = min(info[i].bo_priority, |
133 | AMDGPU_BO_LIST_MAX_PRIORITY); | 133 | AMDGPU_BO_LIST_MAX_PRIORITY); |
134 | entry->tv.bo = &entry->robj->tbo; | 134 | entry->tv.bo = &entry->robj->tbo; |
135 | entry->tv.shared = true; | 135 | entry->tv.shared = !entry->robj->prime_shared_count; |
136 | 136 | ||
137 | if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) | 137 | if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS) |
138 | gds_obj = entry->robj; | 138 | gds_obj = entry->robj; |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7ca07e7b25c1..3161d77bf299 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | |||
@@ -658,12 +658,10 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev) | |||
658 | return false; | 658 | return false; |
659 | 659 | ||
660 | if (amdgpu_passthrough(adev)) { | 660 | if (amdgpu_passthrough(adev)) { |
661 | /* for FIJI: In whole GPU pass-through virtualization case | 661 | /* for FIJI: In whole GPU pass-through virtualization case, after VM reboot |
662 | * old smc fw won't clear some registers (e.g. MEM_SIZE, BIOS_SCRATCH) | 662 | * some old smc fw still need driver do vPost otherwise gpu hang, while |
663 | * so amdgpu_card_posted return false and driver will incorrectly skip vPost. | 663 | * those smc fw version above 22.15 doesn't have this flaw, so we force |
664 | * but if we force vPost do in pass-through case, the driver reload will hang. | 664 | * vpost executed for smc version below 22.15 |
665 | * whether doing vPost depends on amdgpu_card_posted if smc version is above | ||
666 | * 00160e00 for FIJI. | ||
667 | */ | 665 | */ |
668 | if (adev->asic_type == CHIP_FIJI) { | 666 | if (adev->asic_type == CHIP_FIJI) { |
669 | int err; | 667 | int err; |
@@ -674,22 +672,11 @@ static bool amdgpu_vpost_needed(struct amdgpu_device *adev) | |||
674 | return true; | 672 | return true; |
675 | 673 | ||
676 | fw_ver = *((uint32_t *)adev->pm.fw->data + 69); | 674 | fw_ver = *((uint32_t *)adev->pm.fw->data + 69); |
677 | if (fw_ver >= 0x00160e00) | 675 | if (fw_ver < 0x00160e00) |
678 | return !amdgpu_card_posted(adev); | 676 | return true; |
679 | } | 677 | } |
680 | } else { | ||
681 | /* in bare-metal case, amdgpu_card_posted return false | ||
682 | * after system reboot/boot, and return true if driver | ||
683 | * reloaded. | ||
684 | * we shouldn't do vPost after driver reload otherwise GPU | ||
685 | * could hang. | ||
686 | */ | ||
687 | if (amdgpu_card_posted(adev)) | ||
688 | return false; | ||
689 | } | 678 | } |
690 | 679 | return !amdgpu_card_posted(adev); | |
691 | /* we assume vPost is neede for all other cases */ | ||
692 | return true; | ||
693 | } | 680 | } |
694 | 681 | ||
695 | /** | 682 | /** |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c index 7700dc22f243..3826d5aea0a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c | |||
@@ -74,20 +74,36 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, | |||
74 | if (ret) | 74 | if (ret) |
75 | return ERR_PTR(ret); | 75 | return ERR_PTR(ret); |
76 | 76 | ||
77 | bo->prime_shared_count = 1; | ||
77 | return &bo->gem_base; | 78 | return &bo->gem_base; |
78 | } | 79 | } |
79 | 80 | ||
80 | int amdgpu_gem_prime_pin(struct drm_gem_object *obj) | 81 | int amdgpu_gem_prime_pin(struct drm_gem_object *obj) |
81 | { | 82 | { |
82 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); | 83 | struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); |
83 | int ret = 0; | 84 | long ret = 0; |
84 | 85 | ||
85 | ret = amdgpu_bo_reserve(bo, false); | 86 | ret = amdgpu_bo_reserve(bo, false); |
86 | if (unlikely(ret != 0)) | 87 | if (unlikely(ret != 0)) |
87 | return ret; | 88 | return ret; |
88 | 89 | ||
90 | /* | ||
91 | * Wait for all shared fences to complete before we switch to future | ||
92 | * use of exclusive fence on this prime shared bo. | ||
93 | */ | ||
94 | ret = reservation_object_wait_timeout_rcu(bo->tbo.resv, true, false, | ||
95 | MAX_SCHEDULE_TIMEOUT); | ||
96 | if (unlikely(ret < 0)) { | ||
97 | DRM_DEBUG_PRIME("Fence wait failed: %li\n", ret); | ||
98 | amdgpu_bo_unreserve(bo); | ||
99 | return ret; | ||
100 | } | ||
101 | |||
89 | /* pin buffer into GTT */ | 102 | /* pin buffer into GTT */ |
90 | ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); | 103 | ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT, NULL); |
104 | if (likely(ret == 0)) | ||
105 | bo->prime_shared_count++; | ||
106 | |||
91 | amdgpu_bo_unreserve(bo); | 107 | amdgpu_bo_unreserve(bo); |
92 | return ret; | 108 | return ret; |
93 | } | 109 | } |
@@ -102,6 +118,8 @@ void amdgpu_gem_prime_unpin(struct drm_gem_object *obj) | |||
102 | return; | 118 | return; |
103 | 119 | ||
104 | amdgpu_bo_unpin(bo); | 120 | amdgpu_bo_unpin(bo); |
121 | if (bo->prime_shared_count) | ||
122 | bo->prime_shared_count--; | ||
105 | amdgpu_bo_unreserve(bo); | 123 | amdgpu_bo_unreserve(bo); |
106 | } | 124 | } |
107 | 125 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index b0c929dd8beb..08cd0bd3ebe5 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
@@ -1469,8 +1469,6 @@ static int smu7_get_evv_voltages(struct pp_hwmgr *hwmgr) | |||
1469 | table_info->vddgfx_lookup_table, vv_id, &sclk)) { | 1469 | table_info->vddgfx_lookup_table, vv_id, &sclk)) { |
1470 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, | 1470 | if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, |
1471 | PHM_PlatformCaps_ClockStretcher)) { | 1471 | PHM_PlatformCaps_ClockStretcher)) { |
1472 | if (table_info == NULL) | ||
1473 | return -EINVAL; | ||
1474 | sclk_table = table_info->vdd_dep_on_sclk; | 1472 | sclk_table = table_info->vdd_dep_on_sclk; |
1475 | 1473 | ||
1476 | for (j = 1; j < sclk_table->count; j++) { | 1474 | for (j = 1; j < sclk_table->count; j++) { |
@@ -2986,19 +2984,19 @@ static int smu7_get_pp_table_entry_callback_func_v0(struct pp_hwmgr *hwmgr, | |||
2986 | if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) | 2984 | if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk) |
2987 | data->highest_mclk = memory_clock; | 2985 | data->highest_mclk = memory_clock; |
2988 | 2986 | ||
2989 | performance_level = &(ps->performance_levels | ||
2990 | [ps->performance_level_count++]); | ||
2991 | |||
2992 | PP_ASSERT_WITH_CODE( | 2987 | PP_ASSERT_WITH_CODE( |
2993 | (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), | 2988 | (ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)), |
2994 | "Performance levels exceeds SMC limit!", | 2989 | "Performance levels exceeds SMC limit!", |
2995 | return -EINVAL); | 2990 | return -EINVAL); |
2996 | 2991 | ||
2997 | PP_ASSERT_WITH_CODE( | 2992 | PP_ASSERT_WITH_CODE( |
2998 | (ps->performance_level_count <= | 2993 | (ps->performance_level_count < |
2999 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), | 2994 | hwmgr->platform_descriptor.hardwareActivityPerformanceLevels), |
3000 | "Performance levels exceeds Driver limit!", | 2995 | "Performance levels exceeds Driver limit, Skip!", |
3001 | return -EINVAL); | 2996 | return 0); |
2997 | |||
2998 | performance_level = &(ps->performance_levels | ||
2999 | [ps->performance_level_count++]); | ||
3002 | 3000 | ||
3003 | /* Performance levels are arranged from low to high. */ | 3001 | /* Performance levels are arranged from low to high. */ |
3004 | performance_level->memory_clock = memory_clock; | 3002 | performance_level->memory_clock = memory_clock; |
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c index b7a8b2ac4055..b69c66b4897e 100644 --- a/drivers/gpu/drm/arc/arcpgu_hdmi.c +++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c | |||
@@ -14,170 +14,45 @@ | |||
14 | * | 14 | * |
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <drm/drm_crtc_helper.h> | 17 | #include <drm/drm_crtc.h> |
18 | #include <drm/drm_encoder_slave.h> | 18 | #include <drm/drm_encoder_slave.h> |
19 | #include <drm/drm_atomic_helper.h> | ||
20 | 19 | ||
21 | #include "arcpgu.h" | 20 | #include "arcpgu.h" |
22 | 21 | ||
23 | struct arcpgu_drm_connector { | ||
24 | struct drm_connector connector; | ||
25 | struct drm_encoder_slave *encoder_slave; | ||
26 | }; | ||
27 | |||
28 | static int arcpgu_drm_connector_get_modes(struct drm_connector *connector) | ||
29 | { | ||
30 | const struct drm_encoder_slave_funcs *sfuncs; | ||
31 | struct drm_encoder_slave *slave; | ||
32 | struct arcpgu_drm_connector *con = | ||
33 | container_of(connector, struct arcpgu_drm_connector, connector); | ||
34 | |||
35 | slave = con->encoder_slave; | ||
36 | if (slave == NULL) { | ||
37 | dev_err(connector->dev->dev, | ||
38 | "connector_get_modes: cannot find slave encoder for connector\n"); | ||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | sfuncs = slave->slave_funcs; | ||
43 | if (sfuncs->get_modes == NULL) | ||
44 | return 0; | ||
45 | |||
46 | return sfuncs->get_modes(&slave->base, connector); | ||
47 | } | ||
48 | |||
49 | static enum drm_connector_status | ||
50 | arcpgu_drm_connector_detect(struct drm_connector *connector, bool force) | ||
51 | { | ||
52 | enum drm_connector_status status = connector_status_unknown; | ||
53 | const struct drm_encoder_slave_funcs *sfuncs; | ||
54 | struct drm_encoder_slave *slave; | ||
55 | |||
56 | struct arcpgu_drm_connector *con = | ||
57 | container_of(connector, struct arcpgu_drm_connector, connector); | ||
58 | |||
59 | slave = con->encoder_slave; | ||
60 | if (slave == NULL) { | ||
61 | dev_err(connector->dev->dev, | ||
62 | "connector_detect: cannot find slave encoder for connector\n"); | ||
63 | return status; | ||
64 | } | ||
65 | |||
66 | sfuncs = slave->slave_funcs; | ||
67 | if (sfuncs && sfuncs->detect) | ||
68 | return sfuncs->detect(&slave->base, connector); | ||
69 | |||
70 | dev_err(connector->dev->dev, "connector_detect: could not detect slave funcs\n"); | ||
71 | return status; | ||
72 | } | ||
73 | |||
74 | static void arcpgu_drm_connector_destroy(struct drm_connector *connector) | ||
75 | { | ||
76 | drm_connector_unregister(connector); | ||
77 | drm_connector_cleanup(connector); | ||
78 | } | ||
79 | |||
80 | static const struct drm_connector_helper_funcs | ||
81 | arcpgu_drm_connector_helper_funcs = { | ||
82 | .get_modes = arcpgu_drm_connector_get_modes, | ||
83 | }; | ||
84 | |||
85 | static const struct drm_connector_funcs arcpgu_drm_connector_funcs = { | ||
86 | .dpms = drm_helper_connector_dpms, | ||
87 | .reset = drm_atomic_helper_connector_reset, | ||
88 | .detect = arcpgu_drm_connector_detect, | ||
89 | .fill_modes = drm_helper_probe_single_connector_modes, | ||
90 | .destroy = arcpgu_drm_connector_destroy, | ||
91 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, | ||
92 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, | ||
93 | }; | ||
94 | |||
95 | static struct drm_encoder_helper_funcs arcpgu_drm_encoder_helper_funcs = { | ||
96 | .dpms = drm_i2c_encoder_dpms, | ||
97 | .mode_fixup = drm_i2c_encoder_mode_fixup, | ||
98 | .mode_set = drm_i2c_encoder_mode_set, | ||
99 | .prepare = drm_i2c_encoder_prepare, | ||
100 | .commit = drm_i2c_encoder_commit, | ||
101 | .detect = drm_i2c_encoder_detect, | ||
102 | }; | ||
103 | |||
104 | static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = { | 22 | static struct drm_encoder_funcs arcpgu_drm_encoder_funcs = { |
105 | .destroy = drm_encoder_cleanup, | 23 | .destroy = drm_encoder_cleanup, |
106 | }; | 24 | }; |
107 | 25 | ||
108 | int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np) | 26 | int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np) |
109 | { | 27 | { |
110 | struct arcpgu_drm_connector *arcpgu_connector; | 28 | struct drm_encoder *encoder; |
111 | struct drm_i2c_encoder_driver *driver; | 29 | struct drm_bridge *bridge; |
112 | struct drm_encoder_slave *encoder; | 30 | |
113 | struct drm_connector *connector; | 31 | int ret = 0; |
114 | struct i2c_client *i2c_slave; | ||
115 | int ret; | ||
116 | 32 | ||
117 | encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL); | 33 | encoder = devm_kzalloc(drm->dev, sizeof(*encoder), GFP_KERNEL); |
118 | if (encoder == NULL) | 34 | if (encoder == NULL) |
119 | return -ENOMEM; | 35 | return -ENOMEM; |
120 | 36 | ||
121 | i2c_slave = of_find_i2c_device_by_node(np); | 37 | /* Locate drm bridge from the hdmi encoder DT node */ |
122 | if (!i2c_slave || !i2c_get_clientdata(i2c_slave)) { | 38 | bridge = of_drm_find_bridge(np); |
123 | dev_err(drm->dev, "failed to find i2c slave encoder\n"); | 39 | if (!bridge) |
124 | return -EPROBE_DEFER; | ||
125 | } | ||
126 | |||
127 | if (i2c_slave->dev.driver == NULL) { | ||
128 | dev_err(drm->dev, "failed to find i2c slave driver\n"); | ||
129 | return -EPROBE_DEFER; | 40 | return -EPROBE_DEFER; |
130 | } | ||
131 | 41 | ||
132 | driver = | 42 | encoder->possible_crtcs = 1; |
133 | to_drm_i2c_encoder_driver(to_i2c_driver(i2c_slave->dev.driver)); | 43 | encoder->possible_clones = 0; |
134 | ret = driver->encoder_init(i2c_slave, drm, encoder); | 44 | ret = drm_encoder_init(drm, encoder, &arcpgu_drm_encoder_funcs, |
135 | if (ret) { | ||
136 | dev_err(drm->dev, "failed to initialize i2c encoder slave\n"); | ||
137 | return ret; | ||
138 | } | ||
139 | |||
140 | encoder->base.possible_crtcs = 1; | ||
141 | encoder->base.possible_clones = 0; | ||
142 | ret = drm_encoder_init(drm, &encoder->base, &arcpgu_drm_encoder_funcs, | ||
143 | DRM_MODE_ENCODER_TMDS, NULL); | 45 | DRM_MODE_ENCODER_TMDS, NULL); |
144 | if (ret) | 46 | if (ret) |
145 | return ret; | 47 | return ret; |
146 | 48 | ||
147 | drm_encoder_helper_add(&encoder->base, | 49 | /* Link drm_bridge to encoder */ |
148 | &arcpgu_drm_encoder_helper_funcs); | 50 | bridge->encoder = encoder; |
149 | 51 | encoder->bridge = bridge; | |
150 | arcpgu_connector = devm_kzalloc(drm->dev, sizeof(*arcpgu_connector), | ||
151 | GFP_KERNEL); | ||
152 | if (!arcpgu_connector) { | ||
153 | ret = -ENOMEM; | ||
154 | goto error_encoder_cleanup; | ||
155 | } | ||
156 | |||
157 | connector = &arcpgu_connector->connector; | ||
158 | drm_connector_helper_add(connector, &arcpgu_drm_connector_helper_funcs); | ||
159 | ret = drm_connector_init(drm, connector, &arcpgu_drm_connector_funcs, | ||
160 | DRM_MODE_CONNECTOR_HDMIA); | ||
161 | if (ret < 0) { | ||
162 | dev_err(drm->dev, "failed to initialize drm connector\n"); | ||
163 | goto error_encoder_cleanup; | ||
164 | } | ||
165 | 52 | ||
166 | ret = drm_mode_connector_attach_encoder(connector, &encoder->base); | 53 | ret = drm_bridge_attach(drm, bridge); |
167 | if (ret < 0) { | 54 | if (ret) |
168 | dev_err(drm->dev, "could not attach connector to encoder\n"); | 55 | drm_encoder_cleanup(encoder); |
169 | drm_connector_unregister(connector); | ||
170 | goto error_connector_cleanup; | ||
171 | } | ||
172 | |||
173 | arcpgu_connector->encoder_slave = encoder; | ||
174 | |||
175 | return 0; | ||
176 | |||
177 | error_connector_cleanup: | ||
178 | drm_connector_cleanup(connector); | ||
179 | 56 | ||
180 | error_encoder_cleanup: | ||
181 | drm_encoder_cleanup(&encoder->base); | ||
182 | return ret; | 57 | return ret; |
183 | } | 58 | } |
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c index 48019ae22ddb..28341b32067f 100644 --- a/drivers/gpu/drm/arm/hdlcd_crtc.c +++ b/drivers/gpu/drm/arm/hdlcd_crtc.c | |||
@@ -150,15 +150,14 @@ static void hdlcd_crtc_enable(struct drm_crtc *crtc) | |||
150 | clk_prepare_enable(hdlcd->clk); | 150 | clk_prepare_enable(hdlcd->clk); |
151 | hdlcd_crtc_mode_set_nofb(crtc); | 151 | hdlcd_crtc_mode_set_nofb(crtc); |
152 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); | 152 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1); |
153 | drm_crtc_vblank_on(crtc); | ||
153 | } | 154 | } |
154 | 155 | ||
155 | static void hdlcd_crtc_disable(struct drm_crtc *crtc) | 156 | static void hdlcd_crtc_disable(struct drm_crtc *crtc) |
156 | { | 157 | { |
157 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); | 158 | struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc); |
158 | 159 | ||
159 | if (!crtc->state->active) | 160 | drm_crtc_vblank_off(crtc); |
160 | return; | ||
161 | |||
162 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); | 161 | hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0); |
163 | clk_disable_unprepare(hdlcd->clk); | 162 | clk_disable_unprepare(hdlcd->clk); |
164 | } | 163 | } |
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index e8fb6ef947ee..38eaa63afb31 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c | |||
@@ -1907,6 +1907,8 @@ err_disable_pm_runtime: | |||
1907 | err_hdmiphy: | 1907 | err_hdmiphy: |
1908 | if (hdata->hdmiphy_port) | 1908 | if (hdata->hdmiphy_port) |
1909 | put_device(&hdata->hdmiphy_port->dev); | 1909 | put_device(&hdata->hdmiphy_port->dev); |
1910 | if (hdata->regs_hdmiphy) | ||
1911 | iounmap(hdata->regs_hdmiphy); | ||
1910 | err_ddc: | 1912 | err_ddc: |
1911 | put_device(&hdata->ddc_adpt->dev); | 1913 | put_device(&hdata->ddc_adpt->dev); |
1912 | 1914 | ||
@@ -1929,6 +1931,9 @@ static int hdmi_remove(struct platform_device *pdev) | |||
1929 | if (hdata->hdmiphy_port) | 1931 | if (hdata->hdmiphy_port) |
1930 | put_device(&hdata->hdmiphy_port->dev); | 1932 | put_device(&hdata->hdmiphy_port->dev); |
1931 | 1933 | ||
1934 | if (hdata->regs_hdmiphy) | ||
1935 | iounmap(hdata->regs_hdmiphy); | ||
1936 | |||
1932 | put_device(&hdata->ddc_adpt->dev); | 1937 | put_device(&hdata->ddc_adpt->dev); |
1933 | 1938 | ||
1934 | return 0; | 1939 | return 0; |
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index b2d5e188b1b8..deb57435cc89 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c | |||
@@ -25,8 +25,13 @@ | |||
25 | static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, | 25 | static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, |
26 | struct drm_crtc_state *old_crtc_state) | 26 | struct drm_crtc_state *old_crtc_state) |
27 | { | 27 | { |
28 | struct drm_device *dev = crtc->dev; | ||
29 | struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; | ||
28 | struct drm_pending_vblank_event *event = crtc->state->event; | 30 | struct drm_pending_vblank_event *event = crtc->state->event; |
29 | 31 | ||
32 | regmap_write(fsl_dev->regmap, | ||
33 | DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); | ||
34 | |||
30 | if (event) { | 35 | if (event) { |
31 | crtc->state->event = NULL; | 36 | crtc->state->event = NULL; |
32 | 37 | ||
@@ -39,11 +44,15 @@ static void fsl_dcu_drm_crtc_atomic_flush(struct drm_crtc *crtc, | |||
39 | } | 44 | } |
40 | } | 45 | } |
41 | 46 | ||
42 | static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) | 47 | static void fsl_dcu_drm_crtc_atomic_disable(struct drm_crtc *crtc, |
48 | struct drm_crtc_state *old_crtc_state) | ||
43 | { | 49 | { |
44 | struct drm_device *dev = crtc->dev; | 50 | struct drm_device *dev = crtc->dev; |
45 | struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; | 51 | struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; |
46 | 52 | ||
53 | /* always disable planes on the CRTC */ | ||
54 | drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, true); | ||
55 | |||
47 | drm_crtc_vblank_off(crtc); | 56 | drm_crtc_vblank_off(crtc); |
48 | 57 | ||
49 | regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, | 58 | regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, |
@@ -122,8 +131,8 @@ static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) | |||
122 | } | 131 | } |
123 | 132 | ||
124 | static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { | 133 | static const struct drm_crtc_helper_funcs fsl_dcu_drm_crtc_helper_funcs = { |
134 | .atomic_disable = fsl_dcu_drm_crtc_atomic_disable, | ||
125 | .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, | 135 | .atomic_flush = fsl_dcu_drm_crtc_atomic_flush, |
126 | .disable = fsl_dcu_drm_disable_crtc, | ||
127 | .enable = fsl_dcu_drm_crtc_enable, | 136 | .enable = fsl_dcu_drm_crtc_enable, |
128 | .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb, | 137 | .mode_set_nofb = fsl_dcu_drm_crtc_mode_set_nofb, |
129 | }; | 138 | }; |
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index e04efbed1a54..cc2fde2ae5ef 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | |||
@@ -59,8 +59,6 @@ static int fsl_dcu_drm_irq_init(struct drm_device *dev) | |||
59 | 59 | ||
60 | regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0); | 60 | regmap_write(fsl_dev->regmap, DCU_INT_STATUS, 0); |
61 | regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0); | 61 | regmap_write(fsl_dev->regmap, DCU_INT_MASK, ~0); |
62 | regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, | ||
63 | DCU_UPDATE_MODE_READREG); | ||
64 | 62 | ||
65 | return ret; | 63 | return ret; |
66 | } | 64 | } |
@@ -139,8 +137,6 @@ static irqreturn_t fsl_dcu_drm_irq(int irq, void *arg) | |||
139 | drm_handle_vblank(dev, 0); | 137 | drm_handle_vblank(dev, 0); |
140 | 138 | ||
141 | regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status); | 139 | regmap_write(fsl_dev->regmap, DCU_INT_STATUS, int_status); |
142 | regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, | ||
143 | DCU_UPDATE_MODE_READREG); | ||
144 | 140 | ||
145 | return IRQ_HANDLED; | 141 | return IRQ_HANDLED; |
146 | } | 142 | } |
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 9e6f7d8112b3..a99f48847420 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c | |||
@@ -160,11 +160,6 @@ static void fsl_dcu_drm_plane_atomic_update(struct drm_plane *plane, | |||
160 | DCU_LAYER_POST_SKIP(0) | | 160 | DCU_LAYER_POST_SKIP(0) | |
161 | DCU_LAYER_PRE_SKIP(0)); | 161 | DCU_LAYER_PRE_SKIP(0)); |
162 | } | 162 | } |
163 | regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, | ||
164 | DCU_MODE_DCU_MODE_MASK, | ||
165 | DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); | ||
166 | regmap_write(fsl_dev->regmap, | ||
167 | DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); | ||
168 | 163 | ||
169 | return; | 164 | return; |
170 | } | 165 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 7adb4c77cc7f..a218c2e395e7 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -1281,6 +1281,12 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file, | |||
1281 | return ctx; | 1281 | return ctx; |
1282 | } | 1282 | } |
1283 | 1283 | ||
1284 | static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj) | ||
1285 | { | ||
1286 | return !(obj->cache_level == I915_CACHE_NONE || | ||
1287 | obj->cache_level == I915_CACHE_WT); | ||
1288 | } | ||
1289 | |||
1284 | void i915_vma_move_to_active(struct i915_vma *vma, | 1290 | void i915_vma_move_to_active(struct i915_vma *vma, |
1285 | struct drm_i915_gem_request *req, | 1291 | struct drm_i915_gem_request *req, |
1286 | unsigned int flags) | 1292 | unsigned int flags) |
@@ -1311,6 +1317,8 @@ void i915_vma_move_to_active(struct i915_vma *vma, | |||
1311 | 1317 | ||
1312 | /* update for the implicit flush after a batch */ | 1318 | /* update for the implicit flush after a batch */ |
1313 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; | 1319 | obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; |
1320 | if (!obj->cache_dirty && gpu_write_needs_clflush(obj)) | ||
1321 | obj->cache_dirty = true; | ||
1314 | } | 1322 | } |
1315 | 1323 | ||
1316 | if (flags & EXEC_OBJECT_NEEDS_FENCE) | 1324 | if (flags & EXEC_OBJECT_NEEDS_FENCE) |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 1f8af87c6294..cf2560708e03 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1143,7 +1143,7 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, | |||
1143 | if (!child) | 1143 | if (!child) |
1144 | return; | 1144 | return; |
1145 | 1145 | ||
1146 | aux_channel = child->raw[25]; | 1146 | aux_channel = child->common.aux_channel; |
1147 | ddc_pin = child->common.ddc_pin; | 1147 | ddc_pin = child->common.ddc_pin; |
1148 | 1148 | ||
1149 | is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; | 1149 | is_dvi = child->common.device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; |
@@ -1673,7 +1673,8 @@ bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port) | |||
1673 | return false; | 1673 | return false; |
1674 | } | 1674 | } |
1675 | 1675 | ||
1676 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port) | 1676 | static bool child_dev_is_dp_dual_mode(const union child_device_config *p_child, |
1677 | enum port port) | ||
1677 | { | 1678 | { |
1678 | static const struct { | 1679 | static const struct { |
1679 | u16 dp, hdmi; | 1680 | u16 dp, hdmi; |
@@ -1687,22 +1688,35 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum por | |||
1687 | [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, | 1688 | [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, |
1688 | [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, | 1689 | [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, |
1689 | }; | 1690 | }; |
1690 | int i; | ||
1691 | 1691 | ||
1692 | if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) | 1692 | if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) |
1693 | return false; | 1693 | return false; |
1694 | 1694 | ||
1695 | if (!dev_priv->vbt.child_dev_num) | 1695 | if ((p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) != |
1696 | (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) | ||
1696 | return false; | 1697 | return false; |
1697 | 1698 | ||
1699 | if (p_child->common.dvo_port == port_mapping[port].dp) | ||
1700 | return true; | ||
1701 | |||
1702 | /* Only accept a HDMI dvo_port as DP++ if it has an AUX channel */ | ||
1703 | if (p_child->common.dvo_port == port_mapping[port].hdmi && | ||
1704 | p_child->common.aux_channel != 0) | ||
1705 | return true; | ||
1706 | |||
1707 | return false; | ||
1708 | } | ||
1709 | |||
1710 | bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, | ||
1711 | enum port port) | ||
1712 | { | ||
1713 | int i; | ||
1714 | |||
1698 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { | 1715 | for (i = 0; i < dev_priv->vbt.child_dev_num; i++) { |
1699 | const union child_device_config *p_child = | 1716 | const union child_device_config *p_child = |
1700 | &dev_priv->vbt.child_dev[i]; | 1717 | &dev_priv->vbt.child_dev[i]; |
1701 | 1718 | ||
1702 | if ((p_child->common.dvo_port == port_mapping[port].dp || | 1719 | if (child_dev_is_dp_dual_mode(p_child, port)) |
1703 | p_child->common.dvo_port == port_mapping[port].hdmi) && | ||
1704 | (p_child->common.device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) == | ||
1705 | (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) | ||
1706 | return true; | 1720 | return true; |
1707 | } | 1721 | } |
1708 | 1722 | ||
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 3581b5a7f716..bf344d08356a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -4463,21 +4463,11 @@ static enum drm_connector_status | |||
4463 | intel_dp_detect(struct drm_connector *connector, bool force) | 4463 | intel_dp_detect(struct drm_connector *connector, bool force) |
4464 | { | 4464 | { |
4465 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 4465 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
4466 | struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); | ||
4467 | struct intel_encoder *intel_encoder = &intel_dig_port->base; | ||
4468 | enum drm_connector_status status = connector->status; | 4466 | enum drm_connector_status status = connector->status; |
4469 | 4467 | ||
4470 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", | 4468 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", |
4471 | connector->base.id, connector->name); | 4469 | connector->base.id, connector->name); |
4472 | 4470 | ||
4473 | if (intel_dp->is_mst) { | ||
4474 | /* MST devices are disconnected from a monitor POV */ | ||
4475 | intel_dp_unset_edid(intel_dp); | ||
4476 | if (intel_encoder->type != INTEL_OUTPUT_EDP) | ||
4477 | intel_encoder->type = INTEL_OUTPUT_DP; | ||
4478 | return connector_status_disconnected; | ||
4479 | } | ||
4480 | |||
4481 | /* If full detect is not performed yet, do a full detect */ | 4471 | /* If full detect is not performed yet, do a full detect */ |
4482 | if (!intel_dp->detect_done) | 4472 | if (!intel_dp->detect_done) |
4483 | status = intel_dp_long_pulse(intel_dp->attached_connector); | 4473 | status = intel_dp_long_pulse(intel_dp->attached_connector); |
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 73a521fdf1bd..dbed12c484c9 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c | |||
@@ -358,7 +358,7 @@ vlv_update_plane(struct drm_plane *dplane, | |||
358 | int plane = intel_plane->plane; | 358 | int plane = intel_plane->plane; |
359 | u32 sprctl; | 359 | u32 sprctl; |
360 | u32 sprsurf_offset, linear_offset; | 360 | u32 sprsurf_offset, linear_offset; |
361 | unsigned int rotation = dplane->state->rotation; | 361 | unsigned int rotation = plane_state->base.rotation; |
362 | const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; | 362 | const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; |
363 | int crtc_x = plane_state->base.dst.x1; | 363 | int crtc_x = plane_state->base.dst.x1; |
364 | int crtc_y = plane_state->base.dst.y1; | 364 | int crtc_y = plane_state->base.dst.y1; |
diff --git a/drivers/gpu/drm/i915/intel_vbt_defs.h b/drivers/gpu/drm/i915/intel_vbt_defs.h index 68db9621f1f0..8886cab19f98 100644 --- a/drivers/gpu/drm/i915/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/intel_vbt_defs.h | |||
@@ -280,7 +280,8 @@ struct common_child_dev_config { | |||
280 | u8 dp_support:1; | 280 | u8 dp_support:1; |
281 | u8 tmds_support:1; | 281 | u8 tmds_support:1; |
282 | u8 support_reserved:5; | 282 | u8 support_reserved:5; |
283 | u8 not_common3[12]; | 283 | u8 aux_channel; |
284 | u8 not_common3[11]; | ||
284 | u8 iboost_level; | 285 | u8 iboost_level; |
285 | } __packed; | 286 | } __packed; |
286 | 287 | ||
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 019b7ca392d7..c70310206ac5 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c | |||
@@ -80,6 +80,7 @@ static void mtk_ovl_enable_vblank(struct mtk_ddp_comp *comp, | |||
80 | ddp_comp); | 80 | ddp_comp); |
81 | 81 | ||
82 | priv->crtc = crtc; | 82 | priv->crtc = crtc; |
83 | writel(0x0, comp->regs + DISP_REG_OVL_INTSTA); | ||
83 | writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN); | 84 | writel_relaxed(OVL_FME_CPL_INT, comp->regs + DISP_REG_OVL_INTEN); |
84 | } | 85 | } |
85 | 86 | ||
@@ -250,13 +251,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev) | |||
250 | if (irq < 0) | 251 | if (irq < 0) |
251 | return irq; | 252 | return irq; |
252 | 253 | ||
253 | ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler, | ||
254 | IRQF_TRIGGER_NONE, dev_name(dev), priv); | ||
255 | if (ret < 0) { | ||
256 | dev_err(dev, "Failed to request irq %d: %d\n", irq, ret); | ||
257 | return ret; | ||
258 | } | ||
259 | |||
260 | comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL); | 254 | comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL); |
261 | if (comp_id < 0) { | 255 | if (comp_id < 0) { |
262 | dev_err(dev, "Failed to identify by alias: %d\n", comp_id); | 256 | dev_err(dev, "Failed to identify by alias: %d\n", comp_id); |
@@ -272,6 +266,13 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev) | |||
272 | 266 | ||
273 | platform_set_drvdata(pdev, priv); | 267 | platform_set_drvdata(pdev, priv); |
274 | 268 | ||
269 | ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler, | ||
270 | IRQF_TRIGGER_NONE, dev_name(dev), priv); | ||
271 | if (ret < 0) { | ||
272 | dev_err(dev, "Failed to request irq %d: %d\n", irq, ret); | ||
273 | return ret; | ||
274 | } | ||
275 | |||
275 | ret = component_add(dev, &mtk_disp_ovl_component_ops); | 276 | ret = component_add(dev, &mtk_disp_ovl_component_ops); |
276 | if (ret) | 277 | if (ret) |
277 | dev_err(dev, "Failed to add component: %d\n", ret); | 278 | dev_err(dev, "Failed to add component: %d\n", ret); |
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index 0186e500d2a5..90fb831ef031 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c | |||
@@ -432,11 +432,16 @@ static int mtk_dpi_set_display_mode(struct mtk_dpi *dpi, | |||
432 | unsigned long pll_rate; | 432 | unsigned long pll_rate; |
433 | unsigned int factor; | 433 | unsigned int factor; |
434 | 434 | ||
435 | /* let pll_rate can fix the valid range of tvdpll (1G~2GHz) */ | ||
435 | pix_rate = 1000UL * mode->clock; | 436 | pix_rate = 1000UL * mode->clock; |
436 | if (mode->clock <= 74000) | 437 | if (mode->clock <= 27000) |
438 | factor = 16 * 3; | ||
439 | else if (mode->clock <= 84000) | ||
437 | factor = 8 * 3; | 440 | factor = 8 * 3; |
438 | else | 441 | else if (mode->clock <= 167000) |
439 | factor = 4 * 3; | 442 | factor = 4 * 3; |
443 | else | ||
444 | factor = 2 * 3; | ||
440 | pll_rate = pix_rate * factor; | 445 | pll_rate = pix_rate * factor; |
441 | 446 | ||
442 | dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n", | 447 | dev_dbg(dpi->dev, "Want PLL %lu Hz, pixel clock %lu Hz\n", |
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index df33b3ca6ffd..48cc01fd20c7 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c | |||
@@ -123,7 +123,7 @@ static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w, | |||
123 | unsigned int bpc) | 123 | unsigned int bpc) |
124 | { | 124 | { |
125 | writel(w << 16 | h, comp->regs + DISP_OD_SIZE); | 125 | writel(w << 16 | h, comp->regs + DISP_OD_SIZE); |
126 | writel(OD_RELAYMODE, comp->regs + OD_RELAYMODE); | 126 | writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG); |
127 | mtk_dither_set(comp, bpc, DISP_OD_CFG); | 127 | mtk_dither_set(comp, bpc, DISP_OD_CFG); |
128 | } | 128 | } |
129 | 129 | ||
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 28b2044ed9f2..eaa5a2240c0c 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c | |||
@@ -86,7 +86,7 @@ | |||
86 | 86 | ||
87 | #define DSI_PHY_TIMECON0 0x110 | 87 | #define DSI_PHY_TIMECON0 0x110 |
88 | #define LPX (0xff << 0) | 88 | #define LPX (0xff << 0) |
89 | #define HS_PRPR (0xff << 8) | 89 | #define HS_PREP (0xff << 8) |
90 | #define HS_ZERO (0xff << 16) | 90 | #define HS_ZERO (0xff << 16) |
91 | #define HS_TRAIL (0xff << 24) | 91 | #define HS_TRAIL (0xff << 24) |
92 | 92 | ||
@@ -102,10 +102,16 @@ | |||
102 | #define CLK_TRAIL (0xff << 24) | 102 | #define CLK_TRAIL (0xff << 24) |
103 | 103 | ||
104 | #define DSI_PHY_TIMECON3 0x11c | 104 | #define DSI_PHY_TIMECON3 0x11c |
105 | #define CLK_HS_PRPR (0xff << 0) | 105 | #define CLK_HS_PREP (0xff << 0) |
106 | #define CLK_HS_POST (0xff << 8) | 106 | #define CLK_HS_POST (0xff << 8) |
107 | #define CLK_HS_EXIT (0xff << 16) | 107 | #define CLK_HS_EXIT (0xff << 16) |
108 | 108 | ||
109 | #define T_LPX 5 | ||
110 | #define T_HS_PREP 6 | ||
111 | #define T_HS_TRAIL 8 | ||
112 | #define T_HS_EXIT 7 | ||
113 | #define T_HS_ZERO 10 | ||
114 | |||
109 | #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) | 115 | #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) |
110 | 116 | ||
111 | struct phy; | 117 | struct phy; |
@@ -161,20 +167,18 @@ static void mtk_dsi_mask(struct mtk_dsi *dsi, u32 offset, u32 mask, u32 data) | |||
161 | static void dsi_phy_timconfig(struct mtk_dsi *dsi) | 167 | static void dsi_phy_timconfig(struct mtk_dsi *dsi) |
162 | { | 168 | { |
163 | u32 timcon0, timcon1, timcon2, timcon3; | 169 | u32 timcon0, timcon1, timcon2, timcon3; |
164 | unsigned int ui, cycle_time; | 170 | u32 ui, cycle_time; |
165 | unsigned int lpx; | ||
166 | 171 | ||
167 | ui = 1000 / dsi->data_rate + 0x01; | 172 | ui = 1000 / dsi->data_rate + 0x01; |
168 | cycle_time = 8000 / dsi->data_rate + 0x01; | 173 | cycle_time = 8000 / dsi->data_rate + 0x01; |
169 | lpx = 5; | ||
170 | 174 | ||
171 | timcon0 = (8 << 24) | (0xa << 16) | (0x6 << 8) | lpx; | 175 | timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24; |
172 | timcon1 = (7 << 24) | (5 * lpx << 16) | ((3 * lpx) / 2) << 8 | | 176 | timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 | |
173 | (4 * lpx); | 177 | T_HS_EXIT << 24; |
174 | timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) | | 178 | timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) | |
175 | (NS_TO_CYCLE(0x150, cycle_time) << 16); | 179 | (NS_TO_CYCLE(0x150, cycle_time) << 16); |
176 | timcon3 = (2 * lpx) << 16 | NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8 | | 180 | timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 | |
177 | NS_TO_CYCLE(0x40, cycle_time); | 181 | NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8; |
178 | 182 | ||
179 | writel(timcon0, dsi->regs + DSI_PHY_TIMECON0); | 183 | writel(timcon0, dsi->regs + DSI_PHY_TIMECON0); |
180 | writel(timcon1, dsi->regs + DSI_PHY_TIMECON1); | 184 | writel(timcon1, dsi->regs + DSI_PHY_TIMECON1); |
@@ -202,19 +206,47 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi) | |||
202 | { | 206 | { |
203 | struct device *dev = dsi->dev; | 207 | struct device *dev = dsi->dev; |
204 | int ret; | 208 | int ret; |
209 | u64 pixel_clock, total_bits; | ||
210 | u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits; | ||
205 | 211 | ||
206 | if (++dsi->refcount != 1) | 212 | if (++dsi->refcount != 1) |
207 | return 0; | 213 | return 0; |
208 | 214 | ||
215 | switch (dsi->format) { | ||
216 | case MIPI_DSI_FMT_RGB565: | ||
217 | bit_per_pixel = 16; | ||
218 | break; | ||
219 | case MIPI_DSI_FMT_RGB666_PACKED: | ||
220 | bit_per_pixel = 18; | ||
221 | break; | ||
222 | case MIPI_DSI_FMT_RGB666: | ||
223 | case MIPI_DSI_FMT_RGB888: | ||
224 | default: | ||
225 | bit_per_pixel = 24; | ||
226 | break; | ||
227 | } | ||
228 | |||
209 | /** | 229 | /** |
210 | * data_rate = (pixel_clock / 1000) * pixel_dipth * mipi_ratio; | 230 | * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000 |
211 | * pixel_clock unit is Khz, data_rata unit is MHz, so need divide 1000. | 231 | * htotal_time = htotal * byte_per_pixel / num_lanes |
212 | * mipi_ratio is mipi clk coefficient for balance the pixel clk in mipi. | 232 | * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit |
213 | * we set mipi_ratio is 1.05. | 233 | * mipi_ratio = (htotal_time + overhead_time) / htotal_time |
234 | * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes; | ||
214 | */ | 235 | */ |
215 | dsi->data_rate = dsi->vm.pixelclock * 3 * 21 / (1 * 1000 * 10); | 236 | pixel_clock = dsi->vm.pixelclock * 1000; |
237 | htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch + | ||
238 | dsi->vm.hsync_len; | ||
239 | htotal_bits = htotal * bit_per_pixel; | ||
240 | |||
241 | overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL + | ||
242 | T_HS_EXIT; | ||
243 | overhead_bits = overhead_cycles * dsi->lanes * 8; | ||
244 | total_bits = htotal_bits + overhead_bits; | ||
245 | |||
246 | dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits, | ||
247 | htotal * dsi->lanes); | ||
216 | 248 | ||
217 | ret = clk_set_rate(dsi->hs_clk, dsi->data_rate * 1000000); | 249 | ret = clk_set_rate(dsi->hs_clk, dsi->data_rate); |
218 | if (ret < 0) { | 250 | if (ret < 0) { |
219 | dev_err(dev, "Failed to set data rate: %d\n", ret); | 251 | dev_err(dev, "Failed to set data rate: %d\n", ret); |
220 | goto err_refcount; | 252 | goto err_refcount; |
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 71227deef21b..0e8c4d9af340 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c | |||
@@ -1133,12 +1133,6 @@ static int mtk_hdmi_output_set_display_mode(struct mtk_hdmi *hdmi, | |||
1133 | phy_power_on(hdmi->phy); | 1133 | phy_power_on(hdmi->phy); |
1134 | mtk_hdmi_aud_output_config(hdmi, mode); | 1134 | mtk_hdmi_aud_output_config(hdmi, mode); |
1135 | 1135 | ||
1136 | mtk_hdmi_setup_audio_infoframe(hdmi); | ||
1137 | mtk_hdmi_setup_avi_infoframe(hdmi, mode); | ||
1138 | mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI"); | ||
1139 | if (mode->flags & DRM_MODE_FLAG_3D_MASK) | ||
1140 | mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode); | ||
1141 | |||
1142 | mtk_hdmi_hw_vid_black(hdmi, false); | 1136 | mtk_hdmi_hw_vid_black(hdmi, false); |
1143 | mtk_hdmi_hw_aud_unmute(hdmi); | 1137 | mtk_hdmi_hw_aud_unmute(hdmi); |
1144 | mtk_hdmi_hw_send_av_unmute(hdmi); | 1138 | mtk_hdmi_hw_send_av_unmute(hdmi); |
@@ -1401,6 +1395,16 @@ static void mtk_hdmi_bridge_pre_enable(struct drm_bridge *bridge) | |||
1401 | hdmi->powered = true; | 1395 | hdmi->powered = true; |
1402 | } | 1396 | } |
1403 | 1397 | ||
1398 | static void mtk_hdmi_send_infoframe(struct mtk_hdmi *hdmi, | ||
1399 | struct drm_display_mode *mode) | ||
1400 | { | ||
1401 | mtk_hdmi_setup_audio_infoframe(hdmi); | ||
1402 | mtk_hdmi_setup_avi_infoframe(hdmi, mode); | ||
1403 | mtk_hdmi_setup_spd_infoframe(hdmi, "mediatek", "On-chip HDMI"); | ||
1404 | if (mode->flags & DRM_MODE_FLAG_3D_MASK) | ||
1405 | mtk_hdmi_setup_vendor_specific_infoframe(hdmi, mode); | ||
1406 | } | ||
1407 | |||
1404 | static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge) | 1408 | static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge) |
1405 | { | 1409 | { |
1406 | struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); | 1410 | struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge); |
@@ -1409,6 +1413,7 @@ static void mtk_hdmi_bridge_enable(struct drm_bridge *bridge) | |||
1409 | clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); | 1413 | clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PLL]); |
1410 | clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]); | 1414 | clk_prepare_enable(hdmi->clk[MTK_HDMI_CLK_HDMI_PIXEL]); |
1411 | phy_power_on(hdmi->phy); | 1415 | phy_power_on(hdmi->phy); |
1416 | mtk_hdmi_send_infoframe(hdmi, &hdmi->mode); | ||
1412 | 1417 | ||
1413 | hdmi->enabled = true; | 1418 | hdmi->enabled = true; |
1414 | } | 1419 | } |
diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c index 8a24754b440f..51cb9cfb6646 100644 --- a/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c +++ b/drivers/gpu/drm/mediatek/mtk_mt8173_hdmi_phy.c | |||
@@ -265,6 +265,9 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, | |||
265 | struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); | 265 | struct mtk_hdmi_phy *hdmi_phy = to_mtk_hdmi_phy(hw); |
266 | unsigned int pre_div; | 266 | unsigned int pre_div; |
267 | unsigned int div; | 267 | unsigned int div; |
268 | unsigned int pre_ibias; | ||
269 | unsigned int hdmi_ibias; | ||
270 | unsigned int imp_en; | ||
268 | 271 | ||
269 | dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__, | 272 | dev_dbg(hdmi_phy->dev, "%s: %lu Hz, parent: %lu Hz\n", __func__, |
270 | rate, parent_rate); | 273 | rate, parent_rate); |
@@ -298,18 +301,31 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, | |||
298 | (0x1 << PLL_BR_SHIFT), | 301 | (0x1 << PLL_BR_SHIFT), |
299 | RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC | | 302 | RG_HDMITX_PLL_BP | RG_HDMITX_PLL_BC | |
300 | RG_HDMITX_PLL_BR); | 303 | RG_HDMITX_PLL_BR); |
301 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, RG_HDMITX_PRD_IMP_EN); | 304 | if (rate < 165000000) { |
305 | mtk_hdmi_phy_clear_bits(hdmi_phy, HDMI_CON3, | ||
306 | RG_HDMITX_PRD_IMP_EN); | ||
307 | pre_ibias = 0x3; | ||
308 | imp_en = 0x0; | ||
309 | hdmi_ibias = hdmi_phy->ibias; | ||
310 | } else { | ||
311 | mtk_hdmi_phy_set_bits(hdmi_phy, HDMI_CON3, | ||
312 | RG_HDMITX_PRD_IMP_EN); | ||
313 | pre_ibias = 0x6; | ||
314 | imp_en = 0xf; | ||
315 | hdmi_ibias = hdmi_phy->ibias_up; | ||
316 | } | ||
302 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, | 317 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON4, |
303 | (0x3 << PRD_IBIAS_CLK_SHIFT) | | 318 | (pre_ibias << PRD_IBIAS_CLK_SHIFT) | |
304 | (0x3 << PRD_IBIAS_D2_SHIFT) | | 319 | (pre_ibias << PRD_IBIAS_D2_SHIFT) | |
305 | (0x3 << PRD_IBIAS_D1_SHIFT) | | 320 | (pre_ibias << PRD_IBIAS_D1_SHIFT) | |
306 | (0x3 << PRD_IBIAS_D0_SHIFT), | 321 | (pre_ibias << PRD_IBIAS_D0_SHIFT), |
307 | RG_HDMITX_PRD_IBIAS_CLK | | 322 | RG_HDMITX_PRD_IBIAS_CLK | |
308 | RG_HDMITX_PRD_IBIAS_D2 | | 323 | RG_HDMITX_PRD_IBIAS_D2 | |
309 | RG_HDMITX_PRD_IBIAS_D1 | | 324 | RG_HDMITX_PRD_IBIAS_D1 | |
310 | RG_HDMITX_PRD_IBIAS_D0); | 325 | RG_HDMITX_PRD_IBIAS_D0); |
311 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3, | 326 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON3, |
312 | (0x0 << DRV_IMP_EN_SHIFT), RG_HDMITX_DRV_IMP_EN); | 327 | (imp_en << DRV_IMP_EN_SHIFT), |
328 | RG_HDMITX_DRV_IMP_EN); | ||
313 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, | 329 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON6, |
314 | (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) | | 330 | (hdmi_phy->drv_imp_clk << DRV_IMP_CLK_SHIFT) | |
315 | (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) | | 331 | (hdmi_phy->drv_imp_d2 << DRV_IMP_D2_SHIFT) | |
@@ -318,12 +334,14 @@ static int mtk_hdmi_pll_set_rate(struct clk_hw *hw, unsigned long rate, | |||
318 | RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 | | 334 | RG_HDMITX_DRV_IMP_CLK | RG_HDMITX_DRV_IMP_D2 | |
319 | RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0); | 335 | RG_HDMITX_DRV_IMP_D1 | RG_HDMITX_DRV_IMP_D0); |
320 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5, | 336 | mtk_hdmi_phy_mask(hdmi_phy, HDMI_CON5, |
321 | (hdmi_phy->ibias << DRV_IBIAS_CLK_SHIFT) | | 337 | (hdmi_ibias << DRV_IBIAS_CLK_SHIFT) | |
322 | (hdmi_phy->ibias << DRV_IBIAS_D2_SHIFT) | | 338 | (hdmi_ibias << DRV_IBIAS_D2_SHIFT) | |
323 | (hdmi_phy->ibias << DRV_IBIAS_D1_SHIFT) | | 339 | (hdmi_ibias << DRV_IBIAS_D1_SHIFT) | |
324 | (hdmi_phy->ibias << DRV_IBIAS_D0_SHIFT), | 340 | (hdmi_ibias << DRV_IBIAS_D0_SHIFT), |
325 | RG_HDMITX_DRV_IBIAS_CLK | RG_HDMITX_DRV_IBIAS_D2 | | 341 | RG_HDMITX_DRV_IBIAS_CLK | |
326 | RG_HDMITX_DRV_IBIAS_D1 | RG_HDMITX_DRV_IBIAS_D0); | 342 | RG_HDMITX_DRV_IBIAS_D2 | |
343 | RG_HDMITX_DRV_IBIAS_D1 | | ||
344 | RG_HDMITX_DRV_IBIAS_D0); | ||
327 | return 0; | 345 | return 0; |
328 | } | 346 | } |
329 | 347 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 2fdcd04bc93f..4129b12521a6 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c | |||
@@ -34,6 +34,7 @@ struct radeon_atpx { | |||
34 | 34 | ||
35 | static struct radeon_atpx_priv { | 35 | static struct radeon_atpx_priv { |
36 | bool atpx_detected; | 36 | bool atpx_detected; |
37 | bool bridge_pm_usable; | ||
37 | /* handle for device - and atpx */ | 38 | /* handle for device - and atpx */ |
38 | acpi_handle dhandle; | 39 | acpi_handle dhandle; |
39 | struct radeon_atpx atpx; | 40 | struct radeon_atpx atpx; |
@@ -203,7 +204,11 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx) | |||
203 | atpx->is_hybrid = false; | 204 | atpx->is_hybrid = false; |
204 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { | 205 | if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { |
205 | printk("ATPX Hybrid Graphics\n"); | 206 | printk("ATPX Hybrid Graphics\n"); |
206 | atpx->functions.power_cntl = false; | 207 | /* |
208 | * Disable legacy PM methods only when pcie port PM is usable, | ||
209 | * otherwise the device might fail to power off or power on. | ||
210 | */ | ||
211 | atpx->functions.power_cntl = !radeon_atpx_priv.bridge_pm_usable; | ||
207 | atpx->is_hybrid = true; | 212 | atpx->is_hybrid = true; |
208 | } | 213 | } |
209 | 214 | ||
@@ -474,6 +479,7 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id, | |||
474 | */ | 479 | */ |
475 | static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) | 480 | static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) |
476 | { | 481 | { |
482 | struct pci_dev *parent_pdev = pci_upstream_bridge(pdev); | ||
477 | acpi_handle dhandle, atpx_handle; | 483 | acpi_handle dhandle, atpx_handle; |
478 | acpi_status status; | 484 | acpi_status status; |
479 | 485 | ||
@@ -487,6 +493,7 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev) | |||
487 | 493 | ||
488 | radeon_atpx_priv.dhandle = dhandle; | 494 | radeon_atpx_priv.dhandle = dhandle; |
489 | radeon_atpx_priv.atpx.handle = atpx_handle; | 495 | radeon_atpx_priv.atpx.handle = atpx_handle; |
496 | radeon_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3; | ||
490 | return true; | 497 | return true; |
491 | } | 498 | } |
492 | 499 | ||
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 0da9862ad8ed..70e9fd59c5a2 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c | |||
@@ -142,9 +142,9 @@ static int sun4i_drv_bind(struct device *dev) | |||
142 | 142 | ||
143 | /* Create our layers */ | 143 | /* Create our layers */ |
144 | drv->layers = sun4i_layers_init(drm); | 144 | drv->layers = sun4i_layers_init(drm); |
145 | if (!drv->layers) { | 145 | if (IS_ERR(drv->layers)) { |
146 | dev_err(drm->dev, "Couldn't create the planes\n"); | 146 | dev_err(drm->dev, "Couldn't create the planes\n"); |
147 | ret = -EINVAL; | 147 | ret = PTR_ERR(drv->layers); |
148 | goto free_drm; | 148 | goto free_drm; |
149 | } | 149 | } |
150 | 150 | ||
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index c3ff10f559cc..d198ad7e5323 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c | |||
@@ -152,15 +152,13 @@ static void sun4i_rgb_encoder_enable(struct drm_encoder *encoder) | |||
152 | 152 | ||
153 | DRM_DEBUG_DRIVER("Enabling RGB output\n"); | 153 | DRM_DEBUG_DRIVER("Enabling RGB output\n"); |
154 | 154 | ||
155 | if (!IS_ERR(tcon->panel)) { | 155 | if (!IS_ERR(tcon->panel)) |
156 | drm_panel_prepare(tcon->panel); | 156 | drm_panel_prepare(tcon->panel); |
157 | drm_panel_enable(tcon->panel); | ||
158 | } | ||
159 | |||
160 | /* encoder->bridge can be NULL; drm_bridge_enable checks for it */ | ||
161 | drm_bridge_enable(encoder->bridge); | ||
162 | 157 | ||
163 | sun4i_tcon_channel_enable(tcon, 0); | 158 | sun4i_tcon_channel_enable(tcon, 0); |
159 | |||
160 | if (!IS_ERR(tcon->panel)) | ||
161 | drm_panel_enable(tcon->panel); | ||
164 | } | 162 | } |
165 | 163 | ||
166 | static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) | 164 | static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) |
@@ -171,15 +169,13 @@ static void sun4i_rgb_encoder_disable(struct drm_encoder *encoder) | |||
171 | 169 | ||
172 | DRM_DEBUG_DRIVER("Disabling RGB output\n"); | 170 | DRM_DEBUG_DRIVER("Disabling RGB output\n"); |
173 | 171 | ||
174 | sun4i_tcon_channel_disable(tcon, 0); | 172 | if (!IS_ERR(tcon->panel)) |
173 | drm_panel_disable(tcon->panel); | ||
175 | 174 | ||
176 | /* encoder->bridge can be NULL; drm_bridge_disable checks for it */ | 175 | sun4i_tcon_channel_disable(tcon, 0); |
177 | drm_bridge_disable(encoder->bridge); | ||
178 | 176 | ||
179 | if (!IS_ERR(tcon->panel)) { | 177 | if (!IS_ERR(tcon->panel)) |
180 | drm_panel_disable(tcon->panel); | ||
181 | drm_panel_unprepare(tcon->panel); | 178 | drm_panel_unprepare(tcon->panel); |
182 | } | ||
183 | } | 179 | } |
184 | 180 | ||
185 | static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder, | 181 | static void sun4i_rgb_encoder_mode_set(struct drm_encoder *encoder, |
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 086d8a507157..60d30203a5fa 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c | |||
@@ -32,6 +32,11 @@ | |||
32 | #include <linux/usb/ch9.h> | 32 | #include <linux/usb/ch9.h> |
33 | #include "hid-ids.h" | 33 | #include "hid-ids.h" |
34 | 34 | ||
35 | #define CP2112_REPORT_MAX_LENGTH 64 | ||
36 | #define CP2112_GPIO_CONFIG_LENGTH 5 | ||
37 | #define CP2112_GPIO_GET_LENGTH 2 | ||
38 | #define CP2112_GPIO_SET_LENGTH 3 | ||
39 | |||
35 | enum { | 40 | enum { |
36 | CP2112_GPIO_CONFIG = 0x02, | 41 | CP2112_GPIO_CONFIG = 0x02, |
37 | CP2112_GPIO_GET = 0x03, | 42 | CP2112_GPIO_GET = 0x03, |
@@ -161,6 +166,8 @@ struct cp2112_device { | |||
161 | atomic_t read_avail; | 166 | atomic_t read_avail; |
162 | atomic_t xfer_avail; | 167 | atomic_t xfer_avail; |
163 | struct gpio_chip gc; | 168 | struct gpio_chip gc; |
169 | u8 *in_out_buffer; | ||
170 | spinlock_t lock; | ||
164 | }; | 171 | }; |
165 | 172 | ||
166 | static int gpio_push_pull = 0xFF; | 173 | static int gpio_push_pull = 0xFF; |
@@ -171,62 +178,86 @@ static int cp2112_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | |||
171 | { | 178 | { |
172 | struct cp2112_device *dev = gpiochip_get_data(chip); | 179 | struct cp2112_device *dev = gpiochip_get_data(chip); |
173 | struct hid_device *hdev = dev->hdev; | 180 | struct hid_device *hdev = dev->hdev; |
174 | u8 buf[5]; | 181 | u8 *buf = dev->in_out_buffer; |
182 | unsigned long flags; | ||
175 | int ret; | 183 | int ret; |
176 | 184 | ||
185 | spin_lock_irqsave(&dev->lock, flags); | ||
186 | |||
177 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, | 187 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, |
178 | sizeof(buf), HID_FEATURE_REPORT, | 188 | CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, |
179 | HID_REQ_GET_REPORT); | 189 | HID_REQ_GET_REPORT); |
180 | if (ret != sizeof(buf)) { | 190 | if (ret != CP2112_GPIO_CONFIG_LENGTH) { |
181 | hid_err(hdev, "error requesting GPIO config: %d\n", ret); | 191 | hid_err(hdev, "error requesting GPIO config: %d\n", ret); |
182 | return ret; | 192 | goto exit; |
183 | } | 193 | } |
184 | 194 | ||
185 | buf[1] &= ~(1 << offset); | 195 | buf[1] &= ~(1 << offset); |
186 | buf[2] = gpio_push_pull; | 196 | buf[2] = gpio_push_pull; |
187 | 197 | ||
188 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf), | 198 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, |
189 | HID_FEATURE_REPORT, HID_REQ_SET_REPORT); | 199 | CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, |
200 | HID_REQ_SET_REPORT); | ||
190 | if (ret < 0) { | 201 | if (ret < 0) { |
191 | hid_err(hdev, "error setting GPIO config: %d\n", ret); | 202 | hid_err(hdev, "error setting GPIO config: %d\n", ret); |
192 | return ret; | 203 | goto exit; |
193 | } | 204 | } |
194 | 205 | ||
195 | return 0; | 206 | ret = 0; |
207 | |||
208 | exit: | ||
209 | spin_unlock_irqrestore(&dev->lock, flags); | ||
210 | return ret <= 0 ? ret : -EIO; | ||
196 | } | 211 | } |
197 | 212 | ||
198 | static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | 213 | static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value) |
199 | { | 214 | { |
200 | struct cp2112_device *dev = gpiochip_get_data(chip); | 215 | struct cp2112_device *dev = gpiochip_get_data(chip); |
201 | struct hid_device *hdev = dev->hdev; | 216 | struct hid_device *hdev = dev->hdev; |
202 | u8 buf[3]; | 217 | u8 *buf = dev->in_out_buffer; |
218 | unsigned long flags; | ||
203 | int ret; | 219 | int ret; |
204 | 220 | ||
221 | spin_lock_irqsave(&dev->lock, flags); | ||
222 | |||
205 | buf[0] = CP2112_GPIO_SET; | 223 | buf[0] = CP2112_GPIO_SET; |
206 | buf[1] = value ? 0xff : 0; | 224 | buf[1] = value ? 0xff : 0; |
207 | buf[2] = 1 << offset; | 225 | buf[2] = 1 << offset; |
208 | 226 | ||
209 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, sizeof(buf), | 227 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, |
210 | HID_FEATURE_REPORT, HID_REQ_SET_REPORT); | 228 | CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT, |
229 | HID_REQ_SET_REPORT); | ||
211 | if (ret < 0) | 230 | if (ret < 0) |
212 | hid_err(hdev, "error setting GPIO values: %d\n", ret); | 231 | hid_err(hdev, "error setting GPIO values: %d\n", ret); |
232 | |||
233 | spin_unlock_irqrestore(&dev->lock, flags); | ||
213 | } | 234 | } |
214 | 235 | ||
215 | static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset) | 236 | static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset) |
216 | { | 237 | { |
217 | struct cp2112_device *dev = gpiochip_get_data(chip); | 238 | struct cp2112_device *dev = gpiochip_get_data(chip); |
218 | struct hid_device *hdev = dev->hdev; | 239 | struct hid_device *hdev = dev->hdev; |
219 | u8 buf[2]; | 240 | u8 *buf = dev->in_out_buffer; |
241 | unsigned long flags; | ||
220 | int ret; | 242 | int ret; |
221 | 243 | ||
222 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, sizeof(buf), | 244 | spin_lock_irqsave(&dev->lock, flags); |
223 | HID_FEATURE_REPORT, HID_REQ_GET_REPORT); | 245 | |
224 | if (ret != sizeof(buf)) { | 246 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, |
247 | CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT, | ||
248 | HID_REQ_GET_REPORT); | ||
249 | if (ret != CP2112_GPIO_GET_LENGTH) { | ||
225 | hid_err(hdev, "error requesting GPIO values: %d\n", ret); | 250 | hid_err(hdev, "error requesting GPIO values: %d\n", ret); |
226 | return ret; | 251 | ret = ret < 0 ? ret : -EIO; |
252 | goto exit; | ||
227 | } | 253 | } |
228 | 254 | ||
229 | return (buf[1] >> offset) & 1; | 255 | ret = (buf[1] >> offset) & 1; |
256 | |||
257 | exit: | ||
258 | spin_unlock_irqrestore(&dev->lock, flags); | ||
259 | |||
260 | return ret; | ||
230 | } | 261 | } |
231 | 262 | ||
232 | static int cp2112_gpio_direction_output(struct gpio_chip *chip, | 263 | static int cp2112_gpio_direction_output(struct gpio_chip *chip, |
@@ -234,27 +265,33 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip, | |||
234 | { | 265 | { |
235 | struct cp2112_device *dev = gpiochip_get_data(chip); | 266 | struct cp2112_device *dev = gpiochip_get_data(chip); |
236 | struct hid_device *hdev = dev->hdev; | 267 | struct hid_device *hdev = dev->hdev; |
237 | u8 buf[5]; | 268 | u8 *buf = dev->in_out_buffer; |
269 | unsigned long flags; | ||
238 | int ret; | 270 | int ret; |
239 | 271 | ||
272 | spin_lock_irqsave(&dev->lock, flags); | ||
273 | |||
240 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, | 274 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, |
241 | sizeof(buf), HID_FEATURE_REPORT, | 275 | CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, |
242 | HID_REQ_GET_REPORT); | 276 | HID_REQ_GET_REPORT); |
243 | if (ret != sizeof(buf)) { | 277 | if (ret != CP2112_GPIO_CONFIG_LENGTH) { |
244 | hid_err(hdev, "error requesting GPIO config: %d\n", ret); | 278 | hid_err(hdev, "error requesting GPIO config: %d\n", ret); |
245 | return ret; | 279 | goto fail; |
246 | } | 280 | } |
247 | 281 | ||
248 | buf[1] |= 1 << offset; | 282 | buf[1] |= 1 << offset; |
249 | buf[2] = gpio_push_pull; | 283 | buf[2] = gpio_push_pull; |
250 | 284 | ||
251 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf), | 285 | ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, |
252 | HID_FEATURE_REPORT, HID_REQ_SET_REPORT); | 286 | CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT, |
287 | HID_REQ_SET_REPORT); | ||
253 | if (ret < 0) { | 288 | if (ret < 0) { |
254 | hid_err(hdev, "error setting GPIO config: %d\n", ret); | 289 | hid_err(hdev, "error setting GPIO config: %d\n", ret); |
255 | return ret; | 290 | goto fail; |
256 | } | 291 | } |
257 | 292 | ||
293 | spin_unlock_irqrestore(&dev->lock, flags); | ||
294 | |||
258 | /* | 295 | /* |
259 | * Set gpio value when output direction is already set, | 296 | * Set gpio value when output direction is already set, |
260 | * as specified in AN495, Rev. 0.2, cpt. 4.4 | 297 | * as specified in AN495, Rev. 0.2, cpt. 4.4 |
@@ -262,6 +299,10 @@ static int cp2112_gpio_direction_output(struct gpio_chip *chip, | |||
262 | cp2112_gpio_set(chip, offset, value); | 299 | cp2112_gpio_set(chip, offset, value); |
263 | 300 | ||
264 | return 0; | 301 | return 0; |
302 | |||
303 | fail: | ||
304 | spin_unlock_irqrestore(&dev->lock, flags); | ||
305 | return ret < 0 ? ret : -EIO; | ||
265 | } | 306 | } |
266 | 307 | ||
267 | static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number, | 308 | static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number, |
@@ -1007,6 +1048,17 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
1007 | struct cp2112_smbus_config_report config; | 1048 | struct cp2112_smbus_config_report config; |
1008 | int ret; | 1049 | int ret; |
1009 | 1050 | ||
1051 | dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL); | ||
1052 | if (!dev) | ||
1053 | return -ENOMEM; | ||
1054 | |||
1055 | dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH, | ||
1056 | GFP_KERNEL); | ||
1057 | if (!dev->in_out_buffer) | ||
1058 | return -ENOMEM; | ||
1059 | |||
1060 | spin_lock_init(&dev->lock); | ||
1061 | |||
1010 | ret = hid_parse(hdev); | 1062 | ret = hid_parse(hdev); |
1011 | if (ret) { | 1063 | if (ret) { |
1012 | hid_err(hdev, "parse failed\n"); | 1064 | hid_err(hdev, "parse failed\n"); |
@@ -1063,12 +1115,6 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
1063 | goto err_power_normal; | 1115 | goto err_power_normal; |
1064 | } | 1116 | } |
1065 | 1117 | ||
1066 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | ||
1067 | if (!dev) { | ||
1068 | ret = -ENOMEM; | ||
1069 | goto err_power_normal; | ||
1070 | } | ||
1071 | |||
1072 | hid_set_drvdata(hdev, (void *)dev); | 1118 | hid_set_drvdata(hdev, (void *)dev); |
1073 | dev->hdev = hdev; | 1119 | dev->hdev = hdev; |
1074 | dev->adap.owner = THIS_MODULE; | 1120 | dev->adap.owner = THIS_MODULE; |
@@ -1087,7 +1133,7 @@ static int cp2112_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
1087 | 1133 | ||
1088 | if (ret) { | 1134 | if (ret) { |
1089 | hid_err(hdev, "error registering i2c adapter\n"); | 1135 | hid_err(hdev, "error registering i2c adapter\n"); |
1090 | goto err_free_dev; | 1136 | goto err_power_normal; |
1091 | } | 1137 | } |
1092 | 1138 | ||
1093 | hid_dbg(hdev, "adapter registered\n"); | 1139 | hid_dbg(hdev, "adapter registered\n"); |
@@ -1123,8 +1169,6 @@ err_gpiochip_remove: | |||
1123 | gpiochip_remove(&dev->gc); | 1169 | gpiochip_remove(&dev->gc); |
1124 | err_free_i2c: | 1170 | err_free_i2c: |
1125 | i2c_del_adapter(&dev->adap); | 1171 | i2c_del_adapter(&dev->adap); |
1126 | err_free_dev: | ||
1127 | kfree(dev); | ||
1128 | err_power_normal: | 1172 | err_power_normal: |
1129 | hid_hw_power(hdev, PM_HINT_NORMAL); | 1173 | hid_hw_power(hdev, PM_HINT_NORMAL); |
1130 | err_hid_close: | 1174 | err_hid_close: |
@@ -1149,7 +1193,6 @@ static void cp2112_remove(struct hid_device *hdev) | |||
1149 | */ | 1193 | */ |
1150 | hid_hw_close(hdev); | 1194 | hid_hw_close(hdev); |
1151 | hid_hw_stop(hdev); | 1195 | hid_hw_stop(hdev); |
1152 | kfree(dev); | ||
1153 | } | 1196 | } |
1154 | 1197 | ||
1155 | static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report, | 1198 | static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report, |
diff --git a/drivers/hid/hid-lg.c b/drivers/hid/hid-lg.c index 76f644deb0a7..c5c5fbe9d605 100644 --- a/drivers/hid/hid-lg.c +++ b/drivers/hid/hid-lg.c | |||
@@ -756,11 +756,16 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
756 | 756 | ||
757 | /* Setup wireless link with Logitech Wii wheel */ | 757 | /* Setup wireless link with Logitech Wii wheel */ |
758 | if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) { | 758 | if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) { |
759 | unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; | 759 | const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; |
760 | u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL); | ||
760 | 761 | ||
761 | ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf), | 762 | if (!buf) { |
762 | HID_FEATURE_REPORT, HID_REQ_SET_REPORT); | 763 | ret = -ENOMEM; |
764 | goto err_free; | ||
765 | } | ||
763 | 766 | ||
767 | ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), | ||
768 | HID_FEATURE_REPORT, HID_REQ_SET_REPORT); | ||
764 | if (ret >= 0) { | 769 | if (ret >= 0) { |
765 | /* insert a little delay of 10 jiffies ~ 40ms */ | 770 | /* insert a little delay of 10 jiffies ~ 40ms */ |
766 | wait_queue_head_t wait; | 771 | wait_queue_head_t wait; |
@@ -772,9 +777,10 @@ static int lg_probe(struct hid_device *hdev, const struct hid_device_id *id) | |||
772 | buf[1] = 0xB2; | 777 | buf[1] = 0xB2; |
773 | get_random_bytes(&buf[2], 2); | 778 | get_random_bytes(&buf[2], 2); |
774 | 779 | ||
775 | ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf), | 780 | ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf), |
776 | HID_FEATURE_REPORT, HID_REQ_SET_REPORT); | 781 | HID_FEATURE_REPORT, HID_REQ_SET_REPORT); |
777 | } | 782 | } |
783 | kfree(buf); | ||
778 | } | 784 | } |
779 | 785 | ||
780 | if (drv_data->quirks & LG_FF) | 786 | if (drv_data->quirks & LG_FF) |
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index d6fa496d0ca2..20b40ad26325 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c | |||
@@ -493,7 +493,8 @@ static int magicmouse_input_configured(struct hid_device *hdev, | |||
493 | static int magicmouse_probe(struct hid_device *hdev, | 493 | static int magicmouse_probe(struct hid_device *hdev, |
494 | const struct hid_device_id *id) | 494 | const struct hid_device_id *id) |
495 | { | 495 | { |
496 | __u8 feature[] = { 0xd7, 0x01 }; | 496 | const u8 feature[] = { 0xd7, 0x01 }; |
497 | u8 *buf; | ||
497 | struct magicmouse_sc *msc; | 498 | struct magicmouse_sc *msc; |
498 | struct hid_report *report; | 499 | struct hid_report *report; |
499 | int ret; | 500 | int ret; |
@@ -544,6 +545,12 @@ static int magicmouse_probe(struct hid_device *hdev, | |||
544 | } | 545 | } |
545 | report->size = 6; | 546 | report->size = 6; |
546 | 547 | ||
548 | buf = kmemdup(feature, sizeof(feature), GFP_KERNEL); | ||
549 | if (!buf) { | ||
550 | ret = -ENOMEM; | ||
551 | goto err_stop_hw; | ||
552 | } | ||
553 | |||
547 | /* | 554 | /* |
548 | * Some devices repond with 'invalid report id' when feature | 555 | * Some devices repond with 'invalid report id' when feature |
549 | * report switching it into multitouch mode is sent to it. | 556 | * report switching it into multitouch mode is sent to it. |
@@ -552,8 +559,9 @@ static int magicmouse_probe(struct hid_device *hdev, | |||
552 | * but there seems to be no other way of switching the mode. | 559 | * but there seems to be no other way of switching the mode. |
553 | * Thus the super-ugly hacky success check below. | 560 | * Thus the super-ugly hacky success check below. |
554 | */ | 561 | */ |
555 | ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature), | 562 | ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(feature), |
556 | HID_FEATURE_REPORT, HID_REQ_SET_REPORT); | 563 | HID_FEATURE_REPORT, HID_REQ_SET_REPORT); |
564 | kfree(buf); | ||
557 | if (ret != -EIO && ret != sizeof(feature)) { | 565 | if (ret != -EIO && ret != sizeof(feature)) { |
558 | hid_err(hdev, "unable to request touch data (%d)\n", ret); | 566 | hid_err(hdev, "unable to request touch data (%d)\n", ret); |
559 | goto err_stop_hw; | 567 | goto err_stop_hw; |
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c index 9cd2ca34a6be..be89bcbf6a71 100644 --- a/drivers/hid/hid-rmi.c +++ b/drivers/hid/hid-rmi.c | |||
@@ -188,10 +188,16 @@ static int rmi_set_page(struct hid_device *hdev, u8 page) | |||
188 | static int rmi_set_mode(struct hid_device *hdev, u8 mode) | 188 | static int rmi_set_mode(struct hid_device *hdev, u8 mode) |
189 | { | 189 | { |
190 | int ret; | 190 | int ret; |
191 | u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode}; | 191 | const u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode}; |
192 | u8 *buf; | ||
192 | 193 | ||
193 | ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, txbuf, | 194 | buf = kmemdup(txbuf, sizeof(txbuf), GFP_KERNEL); |
195 | if (!buf) | ||
196 | return -ENOMEM; | ||
197 | |||
198 | ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, buf, | ||
194 | sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); | 199 | sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT); |
200 | kfree(buf); | ||
195 | if (ret < 0) { | 201 | if (ret < 0) { |
196 | dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode, | 202 | dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode, |
197 | ret); | 203 | ret); |
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index c5c3d6111729..60875625cbdf 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c | |||
@@ -212,6 +212,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, | |||
212 | __s32 value; | 212 | __s32 value; |
213 | int ret = 0; | 213 | int ret = 0; |
214 | 214 | ||
215 | memset(buffer, 0, buffer_size); | ||
215 | mutex_lock(&data->mutex); | 216 | mutex_lock(&data->mutex); |
216 | report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); | 217 | report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT); |
217 | if (!report || (field_index >= report->maxfield)) { | 218 | if (!report || (field_index >= report->maxfield)) { |
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index d223650a97e4..11edabf425ae 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig | |||
@@ -59,7 +59,6 @@ config I2C_CHARDEV | |||
59 | 59 | ||
60 | config I2C_MUX | 60 | config I2C_MUX |
61 | tristate "I2C bus multiplexing support" | 61 | tristate "I2C bus multiplexing support" |
62 | depends on HAS_IOMEM | ||
63 | help | 62 | help |
64 | Say Y here if you want the I2C core to support the ability to | 63 | Say Y here if you want the I2C core to support the ability to |
65 | handle multiplexed I2C bus topologies, by presenting each | 64 | handle multiplexed I2C bus topologies, by presenting each |
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index 11e866d05368..b403fa5ecf49 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c | |||
@@ -91,9 +91,7 @@ | |||
91 | DW_IC_INTR_TX_ABRT | \ | 91 | DW_IC_INTR_TX_ABRT | \ |
92 | DW_IC_INTR_STOP_DET) | 92 | DW_IC_INTR_STOP_DET) |
93 | 93 | ||
94 | #define DW_IC_STATUS_ACTIVITY 0x1 | 94 | #define DW_IC_STATUS_ACTIVITY 0x1 |
95 | #define DW_IC_STATUS_TFE BIT(2) | ||
96 | #define DW_IC_STATUS_MST_ACTIVITY BIT(5) | ||
97 | 95 | ||
98 | #define DW_IC_SDA_HOLD_RX_SHIFT 16 | 96 | #define DW_IC_SDA_HOLD_RX_SHIFT 16 |
99 | #define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT) | 97 | #define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT) |
@@ -478,25 +476,9 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) | |||
478 | { | 476 | { |
479 | struct i2c_msg *msgs = dev->msgs; | 477 | struct i2c_msg *msgs = dev->msgs; |
480 | u32 ic_tar = 0; | 478 | u32 ic_tar = 0; |
481 | bool enabled; | ||
482 | 479 | ||
483 | enabled = dw_readl(dev, DW_IC_ENABLE_STATUS) & 1; | 480 | /* Disable the adapter */ |
484 | 481 | __i2c_dw_enable_and_wait(dev, false); | |
485 | if (enabled) { | ||
486 | u32 ic_status; | ||
487 | |||
488 | /* | ||
489 | * Only disable adapter if ic_tar and ic_con can't be | ||
490 | * dynamically updated | ||
491 | */ | ||
492 | ic_status = dw_readl(dev, DW_IC_STATUS); | ||
493 | if (!dev->dynamic_tar_update_enabled || | ||
494 | (ic_status & DW_IC_STATUS_MST_ACTIVITY) || | ||
495 | !(ic_status & DW_IC_STATUS_TFE)) { | ||
496 | __i2c_dw_enable_and_wait(dev, false); | ||
497 | enabled = false; | ||
498 | } | ||
499 | } | ||
500 | 482 | ||
501 | /* if the slave address is ten bit address, enable 10BITADDR */ | 483 | /* if the slave address is ten bit address, enable 10BITADDR */ |
502 | if (dev->dynamic_tar_update_enabled) { | 484 | if (dev->dynamic_tar_update_enabled) { |
@@ -526,8 +508,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) | |||
526 | /* enforce disabled interrupts (due to HW issues) */ | 508 | /* enforce disabled interrupts (due to HW issues) */ |
527 | i2c_dw_disable_int(dev); | 509 | i2c_dw_disable_int(dev); |
528 | 510 | ||
529 | if (!enabled) | 511 | /* Enable the adapter */ |
530 | __i2c_dw_enable(dev, true); | 512 | __i2c_dw_enable(dev, true); |
531 | 513 | ||
532 | /* Clear and enable interrupts */ | 514 | /* Clear and enable interrupts */ |
533 | dw_readl(dev, DW_IC_CLR_INTR); | 515 | dw_readl(dev, DW_IC_CLR_INTR); |
@@ -611,7 +593,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) | |||
611 | if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { | 593 | if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { |
612 | 594 | ||
613 | /* avoid rx buffer overrun */ | 595 | /* avoid rx buffer overrun */ |
614 | if (rx_limit - dev->rx_outstanding <= 0) | 596 | if (dev->rx_outstanding >= dev->rx_fifo_depth) |
615 | break; | 597 | break; |
616 | 598 | ||
617 | dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD); | 599 | dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD); |
@@ -708,8 +690,7 @@ static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev) | |||
708 | } | 690 | } |
709 | 691 | ||
710 | /* | 692 | /* |
711 | * Prepare controller for a transaction and start transfer by calling | 693 | * Prepare controller for a transaction and call i2c_dw_xfer_msg |
712 | * i2c_dw_xfer_init() | ||
713 | */ | 694 | */ |
714 | static int | 695 | static int |
715 | i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) | 696 | i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) |
@@ -752,13 +733,23 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) | |||
752 | goto done; | 733 | goto done; |
753 | } | 734 | } |
754 | 735 | ||
736 | /* | ||
737 | * We must disable the adapter before returning and signaling the end | ||
738 | * of the current transfer. Otherwise the hardware might continue | ||
739 | * generating interrupts which in turn causes a race condition with | ||
740 | * the following transfer. Needs some more investigation if the | ||
741 | * additional interrupts are a hardware bug or this driver doesn't | ||
742 | * handle them correctly yet. | ||
743 | */ | ||
744 | __i2c_dw_enable(dev, false); | ||
745 | |||
755 | if (dev->msg_err) { | 746 | if (dev->msg_err) { |
756 | ret = dev->msg_err; | 747 | ret = dev->msg_err; |
757 | goto done; | 748 | goto done; |
758 | } | 749 | } |
759 | 750 | ||
760 | /* no error */ | 751 | /* no error */ |
761 | if (likely(!dev->cmd_err)) { | 752 | if (likely(!dev->cmd_err && !dev->status)) { |
762 | ret = num; | 753 | ret = num; |
763 | goto done; | 754 | goto done; |
764 | } | 755 | } |
@@ -768,6 +759,11 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) | |||
768 | ret = i2c_dw_handle_tx_abort(dev); | 759 | ret = i2c_dw_handle_tx_abort(dev); |
769 | goto done; | 760 | goto done; |
770 | } | 761 | } |
762 | |||
763 | if (dev->status) | ||
764 | dev_err(dev->dev, | ||
765 | "transfer terminated early - interrupt latency too high?\n"); | ||
766 | |||
771 | ret = -EIO; | 767 | ret = -EIO; |
772 | 768 | ||
773 | done: | 769 | done: |
@@ -888,19 +884,9 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) | |||
888 | */ | 884 | */ |
889 | 885 | ||
890 | tx_aborted: | 886 | tx_aborted: |
891 | if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) | 887 | if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) |
892 | || dev->msg_err) { | ||
893 | /* | ||
894 | * We must disable interruts before returning and signaling | ||
895 | * the end of the current transfer. Otherwise the hardware | ||
896 | * might continue generating interrupts for non-existent | ||
897 | * transfers. | ||
898 | */ | ||
899 | i2c_dw_disable_int(dev); | ||
900 | dw_readl(dev, DW_IC_CLR_INTR); | ||
901 | |||
902 | complete(&dev->cmd_complete); | 888 | complete(&dev->cmd_complete); |
903 | } else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) { | 889 | else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) { |
904 | /* workaround to trigger pending interrupt */ | 890 | /* workaround to trigger pending interrupt */ |
905 | stat = dw_readl(dev, DW_IC_INTR_MASK); | 891 | stat = dw_readl(dev, DW_IC_INTR_MASK); |
906 | i2c_dw_disable_int(dev); | 892 | i2c_dw_disable_int(dev); |
diff --git a/drivers/i2c/busses/i2c-digicolor.c b/drivers/i2c/busses/i2c-digicolor.c index 49f2084f7bb5..50813a24c541 100644 --- a/drivers/i2c/busses/i2c-digicolor.c +++ b/drivers/i2c/busses/i2c-digicolor.c | |||
@@ -347,7 +347,7 @@ static int dc_i2c_probe(struct platform_device *pdev) | |||
347 | 347 | ||
348 | ret = i2c_add_adapter(&i2c->adap); | 348 | ret = i2c_add_adapter(&i2c->adap); |
349 | if (ret < 0) { | 349 | if (ret < 0) { |
350 | clk_unprepare(i2c->clk); | 350 | clk_disable_unprepare(i2c->clk); |
351 | return ret; | 351 | return ret; |
352 | } | 352 | } |
353 | 353 | ||
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig index e280c8ecc0b5..96de9ce5669b 100644 --- a/drivers/i2c/muxes/Kconfig +++ b/drivers/i2c/muxes/Kconfig | |||
@@ -63,6 +63,7 @@ config I2C_MUX_PINCTRL | |||
63 | 63 | ||
64 | config I2C_MUX_REG | 64 | config I2C_MUX_REG |
65 | tristate "Register-based I2C multiplexer" | 65 | tristate "Register-based I2C multiplexer" |
66 | depends on HAS_IOMEM | ||
66 | help | 67 | help |
67 | If you say yes to this option, support will be included for a | 68 | If you say yes to this option, support will be included for a |
68 | register based I2C multiplexer. This driver provides access to | 69 | register based I2C multiplexer. This driver provides access to |
diff --git a/drivers/i2c/muxes/i2c-demux-pinctrl.c b/drivers/i2c/muxes/i2c-demux-pinctrl.c index b3893f6282ba..3e6fe1760d82 100644 --- a/drivers/i2c/muxes/i2c-demux-pinctrl.c +++ b/drivers/i2c/muxes/i2c-demux-pinctrl.c | |||
@@ -69,10 +69,28 @@ static int i2c_demux_activate_master(struct i2c_demux_pinctrl_priv *priv, u32 ne | |||
69 | goto err_with_revert; | 69 | goto err_with_revert; |
70 | } | 70 | } |
71 | 71 | ||
72 | p = devm_pinctrl_get_select(adap->dev.parent, priv->bus_name); | 72 | /* |
73 | * Check if there are pinctrl states at all. Note: we cant' use | ||
74 | * devm_pinctrl_get_select() because we need to distinguish between | ||
75 | * the -ENODEV from devm_pinctrl_get() and pinctrl_lookup_state(). | ||
76 | */ | ||
77 | p = devm_pinctrl_get(adap->dev.parent); | ||
73 | if (IS_ERR(p)) { | 78 | if (IS_ERR(p)) { |
74 | ret = PTR_ERR(p); | 79 | ret = PTR_ERR(p); |
75 | goto err_with_put; | 80 | /* continue if just no pinctrl states (e.g. i2c-gpio), otherwise exit */ |
81 | if (ret != -ENODEV) | ||
82 | goto err_with_put; | ||
83 | } else { | ||
84 | /* there are states. check and use them */ | ||
85 | struct pinctrl_state *s = pinctrl_lookup_state(p, priv->bus_name); | ||
86 | |||
87 | if (IS_ERR(s)) { | ||
88 | ret = PTR_ERR(s); | ||
89 | goto err_with_put; | ||
90 | } | ||
91 | ret = pinctrl_select_state(p, s); | ||
92 | if (ret < 0) | ||
93 | goto err_with_put; | ||
76 | } | 94 | } |
77 | 95 | ||
78 | priv->chan[new_chan].parent_adap = adap; | 96 | priv->chan[new_chan].parent_adap = adap; |
diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index 1091346f2480..8bc3d36d2837 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c | |||
@@ -268,9 +268,9 @@ static int pca954x_probe(struct i2c_client *client, | |||
268 | /* discard unconfigured channels */ | 268 | /* discard unconfigured channels */ |
269 | break; | 269 | break; |
270 | idle_disconnect_pd = pdata->modes[num].deselect_on_exit; | 270 | idle_disconnect_pd = pdata->modes[num].deselect_on_exit; |
271 | data->deselect |= (idle_disconnect_pd | ||
272 | || idle_disconnect_dt) << num; | ||
273 | } | 271 | } |
272 | data->deselect |= (idle_disconnect_pd || | ||
273 | idle_disconnect_dt) << num; | ||
274 | 274 | ||
275 | ret = i2c_mux_add_adapter(muxc, force, num, class); | 275 | ret = i2c_mux_add_adapter(muxc, force, num, class); |
276 | 276 | ||
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index b136d3acc5bd..0f58f46dbad7 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c | |||
@@ -699,13 +699,16 @@ EXPORT_SYMBOL(rdma_addr_cancel); | |||
699 | struct resolve_cb_context { | 699 | struct resolve_cb_context { |
700 | struct rdma_dev_addr *addr; | 700 | struct rdma_dev_addr *addr; |
701 | struct completion comp; | 701 | struct completion comp; |
702 | int status; | ||
702 | }; | 703 | }; |
703 | 704 | ||
704 | static void resolve_cb(int status, struct sockaddr *src_addr, | 705 | static void resolve_cb(int status, struct sockaddr *src_addr, |
705 | struct rdma_dev_addr *addr, void *context) | 706 | struct rdma_dev_addr *addr, void *context) |
706 | { | 707 | { |
707 | memcpy(((struct resolve_cb_context *)context)->addr, addr, sizeof(struct | 708 | if (!status) |
708 | rdma_dev_addr)); | 709 | memcpy(((struct resolve_cb_context *)context)->addr, |
710 | addr, sizeof(struct rdma_dev_addr)); | ||
711 | ((struct resolve_cb_context *)context)->status = status; | ||
709 | complete(&((struct resolve_cb_context *)context)->comp); | 712 | complete(&((struct resolve_cb_context *)context)->comp); |
710 | } | 713 | } |
711 | 714 | ||
@@ -743,6 +746,10 @@ int rdma_addr_find_l2_eth_by_grh(const union ib_gid *sgid, | |||
743 | 746 | ||
744 | wait_for_completion(&ctx.comp); | 747 | wait_for_completion(&ctx.comp); |
745 | 748 | ||
749 | ret = ctx.status; | ||
750 | if (ret) | ||
751 | return ret; | ||
752 | |||
746 | memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); | 753 | memcpy(dmac, dev_addr.dst_dev_addr, ETH_ALEN); |
747 | dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if); | 754 | dev = dev_get_by_index(&init_net, dev_addr.bound_dev_if); |
748 | if (!dev) | 755 | if (!dev) |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index c99525512b34..71c7c4c328ef 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -80,6 +80,8 @@ static struct ib_cm { | |||
80 | __be32 random_id_operand; | 80 | __be32 random_id_operand; |
81 | struct list_head timewait_list; | 81 | struct list_head timewait_list; |
82 | struct workqueue_struct *wq; | 82 | struct workqueue_struct *wq; |
83 | /* Sync on cm change port state */ | ||
84 | spinlock_t state_lock; | ||
83 | } cm; | 85 | } cm; |
84 | 86 | ||
85 | /* Counter indexes ordered by attribute ID */ | 87 | /* Counter indexes ordered by attribute ID */ |
@@ -161,6 +163,8 @@ struct cm_port { | |||
161 | struct ib_mad_agent *mad_agent; | 163 | struct ib_mad_agent *mad_agent; |
162 | struct kobject port_obj; | 164 | struct kobject port_obj; |
163 | u8 port_num; | 165 | u8 port_num; |
166 | struct list_head cm_priv_prim_list; | ||
167 | struct list_head cm_priv_altr_list; | ||
164 | struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; | 168 | struct cm_counter_group counter_group[CM_COUNTER_GROUPS]; |
165 | }; | 169 | }; |
166 | 170 | ||
@@ -241,6 +245,12 @@ struct cm_id_private { | |||
241 | u8 service_timeout; | 245 | u8 service_timeout; |
242 | u8 target_ack_delay; | 246 | u8 target_ack_delay; |
243 | 247 | ||
248 | struct list_head prim_list; | ||
249 | struct list_head altr_list; | ||
250 | /* Indicates that the send port mad is registered and av is set */ | ||
251 | int prim_send_port_not_ready; | ||
252 | int altr_send_port_not_ready; | ||
253 | |||
244 | struct list_head work_list; | 254 | struct list_head work_list; |
245 | atomic_t work_count; | 255 | atomic_t work_count; |
246 | }; | 256 | }; |
@@ -259,20 +269,47 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, | |||
259 | struct ib_mad_agent *mad_agent; | 269 | struct ib_mad_agent *mad_agent; |
260 | struct ib_mad_send_buf *m; | 270 | struct ib_mad_send_buf *m; |
261 | struct ib_ah *ah; | 271 | struct ib_ah *ah; |
272 | struct cm_av *av; | ||
273 | unsigned long flags, flags2; | ||
274 | int ret = 0; | ||
262 | 275 | ||
276 | /* don't let the port to be released till the agent is down */ | ||
277 | spin_lock_irqsave(&cm.state_lock, flags2); | ||
278 | spin_lock_irqsave(&cm.lock, flags); | ||
279 | if (!cm_id_priv->prim_send_port_not_ready) | ||
280 | av = &cm_id_priv->av; | ||
281 | else if (!cm_id_priv->altr_send_port_not_ready && | ||
282 | (cm_id_priv->alt_av.port)) | ||
283 | av = &cm_id_priv->alt_av; | ||
284 | else { | ||
285 | pr_info("%s: not valid CM id\n", __func__); | ||
286 | ret = -ENODEV; | ||
287 | spin_unlock_irqrestore(&cm.lock, flags); | ||
288 | goto out; | ||
289 | } | ||
290 | spin_unlock_irqrestore(&cm.lock, flags); | ||
291 | /* Make sure the port haven't released the mad yet */ | ||
263 | mad_agent = cm_id_priv->av.port->mad_agent; | 292 | mad_agent = cm_id_priv->av.port->mad_agent; |
264 | ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr); | 293 | if (!mad_agent) { |
265 | if (IS_ERR(ah)) | 294 | pr_info("%s: not a valid MAD agent\n", __func__); |
266 | return PTR_ERR(ah); | 295 | ret = -ENODEV; |
296 | goto out; | ||
297 | } | ||
298 | ah = ib_create_ah(mad_agent->qp->pd, &av->ah_attr); | ||
299 | if (IS_ERR(ah)) { | ||
300 | ret = PTR_ERR(ah); | ||
301 | goto out; | ||
302 | } | ||
267 | 303 | ||
268 | m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, | 304 | m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn, |
269 | cm_id_priv->av.pkey_index, | 305 | av->pkey_index, |
270 | 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, | 306 | 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, |
271 | GFP_ATOMIC, | 307 | GFP_ATOMIC, |
272 | IB_MGMT_BASE_VERSION); | 308 | IB_MGMT_BASE_VERSION); |
273 | if (IS_ERR(m)) { | 309 | if (IS_ERR(m)) { |
274 | ib_destroy_ah(ah); | 310 | ib_destroy_ah(ah); |
275 | return PTR_ERR(m); | 311 | ret = PTR_ERR(m); |
312 | goto out; | ||
276 | } | 313 | } |
277 | 314 | ||
278 | /* Timeout set by caller if response is expected. */ | 315 | /* Timeout set by caller if response is expected. */ |
@@ -282,7 +319,10 @@ static int cm_alloc_msg(struct cm_id_private *cm_id_priv, | |||
282 | atomic_inc(&cm_id_priv->refcount); | 319 | atomic_inc(&cm_id_priv->refcount); |
283 | m->context[0] = cm_id_priv; | 320 | m->context[0] = cm_id_priv; |
284 | *msg = m; | 321 | *msg = m; |
285 | return 0; | 322 | |
323 | out: | ||
324 | spin_unlock_irqrestore(&cm.state_lock, flags2); | ||
325 | return ret; | ||
286 | } | 326 | } |
287 | 327 | ||
288 | static int cm_alloc_response_msg(struct cm_port *port, | 328 | static int cm_alloc_response_msg(struct cm_port *port, |
@@ -352,7 +392,8 @@ static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc, | |||
352 | grh, &av->ah_attr); | 392 | grh, &av->ah_attr); |
353 | } | 393 | } |
354 | 394 | ||
355 | static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) | 395 | static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av, |
396 | struct cm_id_private *cm_id_priv) | ||
356 | { | 397 | { |
357 | struct cm_device *cm_dev; | 398 | struct cm_device *cm_dev; |
358 | struct cm_port *port = NULL; | 399 | struct cm_port *port = NULL; |
@@ -387,7 +428,17 @@ static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av) | |||
387 | &av->ah_attr); | 428 | &av->ah_attr); |
388 | av->timeout = path->packet_life_time + 1; | 429 | av->timeout = path->packet_life_time + 1; |
389 | 430 | ||
390 | return 0; | 431 | spin_lock_irqsave(&cm.lock, flags); |
432 | if (&cm_id_priv->av == av) | ||
433 | list_add_tail(&cm_id_priv->prim_list, &port->cm_priv_prim_list); | ||
434 | else if (&cm_id_priv->alt_av == av) | ||
435 | list_add_tail(&cm_id_priv->altr_list, &port->cm_priv_altr_list); | ||
436 | else | ||
437 | ret = -EINVAL; | ||
438 | |||
439 | spin_unlock_irqrestore(&cm.lock, flags); | ||
440 | |||
441 | return ret; | ||
391 | } | 442 | } |
392 | 443 | ||
393 | static int cm_alloc_id(struct cm_id_private *cm_id_priv) | 444 | static int cm_alloc_id(struct cm_id_private *cm_id_priv) |
@@ -677,6 +728,8 @@ struct ib_cm_id *ib_create_cm_id(struct ib_device *device, | |||
677 | spin_lock_init(&cm_id_priv->lock); | 728 | spin_lock_init(&cm_id_priv->lock); |
678 | init_completion(&cm_id_priv->comp); | 729 | init_completion(&cm_id_priv->comp); |
679 | INIT_LIST_HEAD(&cm_id_priv->work_list); | 730 | INIT_LIST_HEAD(&cm_id_priv->work_list); |
731 | INIT_LIST_HEAD(&cm_id_priv->prim_list); | ||
732 | INIT_LIST_HEAD(&cm_id_priv->altr_list); | ||
680 | atomic_set(&cm_id_priv->work_count, -1); | 733 | atomic_set(&cm_id_priv->work_count, -1); |
681 | atomic_set(&cm_id_priv->refcount, 1); | 734 | atomic_set(&cm_id_priv->refcount, 1); |
682 | return &cm_id_priv->id; | 735 | return &cm_id_priv->id; |
@@ -892,6 +945,15 @@ retest: | |||
892 | break; | 945 | break; |
893 | } | 946 | } |
894 | 947 | ||
948 | spin_lock_irq(&cm.lock); | ||
949 | if (!list_empty(&cm_id_priv->altr_list) && | ||
950 | (!cm_id_priv->altr_send_port_not_ready)) | ||
951 | list_del(&cm_id_priv->altr_list); | ||
952 | if (!list_empty(&cm_id_priv->prim_list) && | ||
953 | (!cm_id_priv->prim_send_port_not_ready)) | ||
954 | list_del(&cm_id_priv->prim_list); | ||
955 | spin_unlock_irq(&cm.lock); | ||
956 | |||
895 | cm_free_id(cm_id->local_id); | 957 | cm_free_id(cm_id->local_id); |
896 | cm_deref_id(cm_id_priv); | 958 | cm_deref_id(cm_id_priv); |
897 | wait_for_completion(&cm_id_priv->comp); | 959 | wait_for_completion(&cm_id_priv->comp); |
@@ -1192,12 +1254,13 @@ int ib_send_cm_req(struct ib_cm_id *cm_id, | |||
1192 | goto out; | 1254 | goto out; |
1193 | } | 1255 | } |
1194 | 1256 | ||
1195 | ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av); | 1257 | ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av, |
1258 | cm_id_priv); | ||
1196 | if (ret) | 1259 | if (ret) |
1197 | goto error1; | 1260 | goto error1; |
1198 | if (param->alternate_path) { | 1261 | if (param->alternate_path) { |
1199 | ret = cm_init_av_by_path(param->alternate_path, | 1262 | ret = cm_init_av_by_path(param->alternate_path, |
1200 | &cm_id_priv->alt_av); | 1263 | &cm_id_priv->alt_av, cm_id_priv); |
1201 | if (ret) | 1264 | if (ret) |
1202 | goto error1; | 1265 | goto error1; |
1203 | } | 1266 | } |
@@ -1653,7 +1716,8 @@ static int cm_req_handler(struct cm_work *work) | |||
1653 | dev_put(gid_attr.ndev); | 1716 | dev_put(gid_attr.ndev); |
1654 | } | 1717 | } |
1655 | work->path[0].gid_type = gid_attr.gid_type; | 1718 | work->path[0].gid_type = gid_attr.gid_type; |
1656 | ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av); | 1719 | ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av, |
1720 | cm_id_priv); | ||
1657 | } | 1721 | } |
1658 | if (ret) { | 1722 | if (ret) { |
1659 | int err = ib_get_cached_gid(work->port->cm_dev->ib_device, | 1723 | int err = ib_get_cached_gid(work->port->cm_dev->ib_device, |
@@ -1672,7 +1736,8 @@ static int cm_req_handler(struct cm_work *work) | |||
1672 | goto rejected; | 1736 | goto rejected; |
1673 | } | 1737 | } |
1674 | if (req_msg->alt_local_lid) { | 1738 | if (req_msg->alt_local_lid) { |
1675 | ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av); | 1739 | ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av, |
1740 | cm_id_priv); | ||
1676 | if (ret) { | 1741 | if (ret) { |
1677 | ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, | 1742 | ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID, |
1678 | &work->path[0].sgid, | 1743 | &work->path[0].sgid, |
@@ -2727,7 +2792,8 @@ int ib_send_cm_lap(struct ib_cm_id *cm_id, | |||
2727 | goto out; | 2792 | goto out; |
2728 | } | 2793 | } |
2729 | 2794 | ||
2730 | ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av); | 2795 | ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av, |
2796 | cm_id_priv); | ||
2731 | if (ret) | 2797 | if (ret) |
2732 | goto out; | 2798 | goto out; |
2733 | cm_id_priv->alt_av.timeout = | 2799 | cm_id_priv->alt_av.timeout = |
@@ -2839,7 +2905,8 @@ static int cm_lap_handler(struct cm_work *work) | |||
2839 | cm_init_av_for_response(work->port, work->mad_recv_wc->wc, | 2905 | cm_init_av_for_response(work->port, work->mad_recv_wc->wc, |
2840 | work->mad_recv_wc->recv_buf.grh, | 2906 | work->mad_recv_wc->recv_buf.grh, |
2841 | &cm_id_priv->av); | 2907 | &cm_id_priv->av); |
2842 | cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av); | 2908 | cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av, |
2909 | cm_id_priv); | ||
2843 | ret = atomic_inc_and_test(&cm_id_priv->work_count); | 2910 | ret = atomic_inc_and_test(&cm_id_priv->work_count); |
2844 | if (!ret) | 2911 | if (!ret) |
2845 | list_add_tail(&work->list, &cm_id_priv->work_list); | 2912 | list_add_tail(&work->list, &cm_id_priv->work_list); |
@@ -3031,7 +3098,7 @@ int ib_send_cm_sidr_req(struct ib_cm_id *cm_id, | |||
3031 | return -EINVAL; | 3098 | return -EINVAL; |
3032 | 3099 | ||
3033 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); | 3100 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
3034 | ret = cm_init_av_by_path(param->path, &cm_id_priv->av); | 3101 | ret = cm_init_av_by_path(param->path, &cm_id_priv->av, cm_id_priv); |
3035 | if (ret) | 3102 | if (ret) |
3036 | goto out; | 3103 | goto out; |
3037 | 3104 | ||
@@ -3468,7 +3535,9 @@ out: | |||
3468 | static int cm_migrate(struct ib_cm_id *cm_id) | 3535 | static int cm_migrate(struct ib_cm_id *cm_id) |
3469 | { | 3536 | { |
3470 | struct cm_id_private *cm_id_priv; | 3537 | struct cm_id_private *cm_id_priv; |
3538 | struct cm_av tmp_av; | ||
3471 | unsigned long flags; | 3539 | unsigned long flags; |
3540 | int tmp_send_port_not_ready; | ||
3472 | int ret = 0; | 3541 | int ret = 0; |
3473 | 3542 | ||
3474 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); | 3543 | cm_id_priv = container_of(cm_id, struct cm_id_private, id); |
@@ -3477,7 +3546,14 @@ static int cm_migrate(struct ib_cm_id *cm_id) | |||
3477 | (cm_id->lap_state == IB_CM_LAP_UNINIT || | 3546 | (cm_id->lap_state == IB_CM_LAP_UNINIT || |
3478 | cm_id->lap_state == IB_CM_LAP_IDLE)) { | 3547 | cm_id->lap_state == IB_CM_LAP_IDLE)) { |
3479 | cm_id->lap_state = IB_CM_LAP_IDLE; | 3548 | cm_id->lap_state = IB_CM_LAP_IDLE; |
3549 | /* Swap address vector */ | ||
3550 | tmp_av = cm_id_priv->av; | ||
3480 | cm_id_priv->av = cm_id_priv->alt_av; | 3551 | cm_id_priv->av = cm_id_priv->alt_av; |
3552 | cm_id_priv->alt_av = tmp_av; | ||
3553 | /* Swap port send ready state */ | ||
3554 | tmp_send_port_not_ready = cm_id_priv->prim_send_port_not_ready; | ||
3555 | cm_id_priv->prim_send_port_not_ready = cm_id_priv->altr_send_port_not_ready; | ||
3556 | cm_id_priv->altr_send_port_not_ready = tmp_send_port_not_ready; | ||
3481 | } else | 3557 | } else |
3482 | ret = -EINVAL; | 3558 | ret = -EINVAL; |
3483 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); | 3559 | spin_unlock_irqrestore(&cm_id_priv->lock, flags); |
@@ -3888,6 +3964,9 @@ static void cm_add_one(struct ib_device *ib_device) | |||
3888 | port->cm_dev = cm_dev; | 3964 | port->cm_dev = cm_dev; |
3889 | port->port_num = i; | 3965 | port->port_num = i; |
3890 | 3966 | ||
3967 | INIT_LIST_HEAD(&port->cm_priv_prim_list); | ||
3968 | INIT_LIST_HEAD(&port->cm_priv_altr_list); | ||
3969 | |||
3891 | ret = cm_create_port_fs(port); | 3970 | ret = cm_create_port_fs(port); |
3892 | if (ret) | 3971 | if (ret) |
3893 | goto error1; | 3972 | goto error1; |
@@ -3945,6 +4024,8 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) | |||
3945 | { | 4024 | { |
3946 | struct cm_device *cm_dev = client_data; | 4025 | struct cm_device *cm_dev = client_data; |
3947 | struct cm_port *port; | 4026 | struct cm_port *port; |
4027 | struct cm_id_private *cm_id_priv; | ||
4028 | struct ib_mad_agent *cur_mad_agent; | ||
3948 | struct ib_port_modify port_modify = { | 4029 | struct ib_port_modify port_modify = { |
3949 | .clr_port_cap_mask = IB_PORT_CM_SUP | 4030 | .clr_port_cap_mask = IB_PORT_CM_SUP |
3950 | }; | 4031 | }; |
@@ -3968,15 +4049,27 @@ static void cm_remove_one(struct ib_device *ib_device, void *client_data) | |||
3968 | 4049 | ||
3969 | port = cm_dev->port[i-1]; | 4050 | port = cm_dev->port[i-1]; |
3970 | ib_modify_port(ib_device, port->port_num, 0, &port_modify); | 4051 | ib_modify_port(ib_device, port->port_num, 0, &port_modify); |
4052 | /* Mark all the cm_id's as not valid */ | ||
4053 | spin_lock_irq(&cm.lock); | ||
4054 | list_for_each_entry(cm_id_priv, &port->cm_priv_altr_list, altr_list) | ||
4055 | cm_id_priv->altr_send_port_not_ready = 1; | ||
4056 | list_for_each_entry(cm_id_priv, &port->cm_priv_prim_list, prim_list) | ||
4057 | cm_id_priv->prim_send_port_not_ready = 1; | ||
4058 | spin_unlock_irq(&cm.lock); | ||
3971 | /* | 4059 | /* |
3972 | * We flush the queue here after the going_down set, this | 4060 | * We flush the queue here after the going_down set, this |
3973 | * verify that no new works will be queued in the recv handler, | 4061 | * verify that no new works will be queued in the recv handler, |
3974 | * after that we can call the unregister_mad_agent | 4062 | * after that we can call the unregister_mad_agent |
3975 | */ | 4063 | */ |
3976 | flush_workqueue(cm.wq); | 4064 | flush_workqueue(cm.wq); |
3977 | ib_unregister_mad_agent(port->mad_agent); | 4065 | spin_lock_irq(&cm.state_lock); |
4066 | cur_mad_agent = port->mad_agent; | ||
4067 | port->mad_agent = NULL; | ||
4068 | spin_unlock_irq(&cm.state_lock); | ||
4069 | ib_unregister_mad_agent(cur_mad_agent); | ||
3978 | cm_remove_port_fs(port); | 4070 | cm_remove_port_fs(port); |
3979 | } | 4071 | } |
4072 | |||
3980 | device_unregister(cm_dev->device); | 4073 | device_unregister(cm_dev->device); |
3981 | kfree(cm_dev); | 4074 | kfree(cm_dev); |
3982 | } | 4075 | } |
@@ -3989,6 +4082,7 @@ static int __init ib_cm_init(void) | |||
3989 | INIT_LIST_HEAD(&cm.device_list); | 4082 | INIT_LIST_HEAD(&cm.device_list); |
3990 | rwlock_init(&cm.device_lock); | 4083 | rwlock_init(&cm.device_lock); |
3991 | spin_lock_init(&cm.lock); | 4084 | spin_lock_init(&cm.lock); |
4085 | spin_lock_init(&cm.state_lock); | ||
3992 | cm.listen_service_table = RB_ROOT; | 4086 | cm.listen_service_table = RB_ROOT; |
3993 | cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); | 4087 | cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID); |
3994 | cm.remote_id_table = RB_ROOT; | 4088 | cm.remote_id_table = RB_ROOT; |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 89a6b0546804..2a6fc47a1dfb 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -2438,6 +2438,18 @@ static int iboe_tos_to_sl(struct net_device *ndev, int tos) | |||
2438 | return 0; | 2438 | return 0; |
2439 | } | 2439 | } |
2440 | 2440 | ||
2441 | static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type, | ||
2442 | unsigned long supported_gids, | ||
2443 | enum ib_gid_type default_gid) | ||
2444 | { | ||
2445 | if ((network_type == RDMA_NETWORK_IPV4 || | ||
2446 | network_type == RDMA_NETWORK_IPV6) && | ||
2447 | test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) | ||
2448 | return IB_GID_TYPE_ROCE_UDP_ENCAP; | ||
2449 | |||
2450 | return default_gid; | ||
2451 | } | ||
2452 | |||
2441 | static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | 2453 | static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) |
2442 | { | 2454 | { |
2443 | struct rdma_route *route = &id_priv->id.route; | 2455 | struct rdma_route *route = &id_priv->id.route; |
@@ -2463,6 +2475,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
2463 | route->num_paths = 1; | 2475 | route->num_paths = 1; |
2464 | 2476 | ||
2465 | if (addr->dev_addr.bound_dev_if) { | 2477 | if (addr->dev_addr.bound_dev_if) { |
2478 | unsigned long supported_gids; | ||
2479 | |||
2466 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); | 2480 | ndev = dev_get_by_index(&init_net, addr->dev_addr.bound_dev_if); |
2467 | if (!ndev) { | 2481 | if (!ndev) { |
2468 | ret = -ENODEV; | 2482 | ret = -ENODEV; |
@@ -2486,7 +2500,12 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
2486 | 2500 | ||
2487 | route->path_rec->net = &init_net; | 2501 | route->path_rec->net = &init_net; |
2488 | route->path_rec->ifindex = ndev->ifindex; | 2502 | route->path_rec->ifindex = ndev->ifindex; |
2489 | route->path_rec->gid_type = id_priv->gid_type; | 2503 | supported_gids = roce_gid_type_mask_support(id_priv->id.device, |
2504 | id_priv->id.port_num); | ||
2505 | route->path_rec->gid_type = | ||
2506 | cma_route_gid_type(addr->dev_addr.network, | ||
2507 | supported_gids, | ||
2508 | id_priv->gid_type); | ||
2490 | } | 2509 | } |
2491 | if (!ndev) { | 2510 | if (!ndev) { |
2492 | ret = -ENODEV; | 2511 | ret = -ENODEV; |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 224ad274ea0b..84b4eff90395 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -175,7 +175,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
175 | 175 | ||
176 | cur_base = addr & PAGE_MASK; | 176 | cur_base = addr & PAGE_MASK; |
177 | 177 | ||
178 | if (npages == 0) { | 178 | if (npages == 0 || npages > UINT_MAX) { |
179 | ret = -EINVAL; | 179 | ret = -EINVAL; |
180 | goto out; | 180 | goto out; |
181 | } | 181 | } |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 0012fa58c105..44b1104eb168 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -262,12 +262,9 @@ static int ib_uverbs_cleanup_ucontext(struct ib_uverbs_file *file, | |||
262 | container_of(uobj, struct ib_uqp_object, uevent.uobject); | 262 | container_of(uobj, struct ib_uqp_object, uevent.uobject); |
263 | 263 | ||
264 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); | 264 | idr_remove_uobj(&ib_uverbs_qp_idr, uobj); |
265 | if (qp != qp->real_qp) { | 265 | if (qp == qp->real_qp) |
266 | ib_close_qp(qp); | ||
267 | } else { | ||
268 | ib_uverbs_detach_umcast(qp, uqp); | 266 | ib_uverbs_detach_umcast(qp, uqp); |
269 | ib_destroy_qp(qp); | 267 | ib_destroy_qp(qp); |
270 | } | ||
271 | ib_uverbs_release_uevent(file, &uqp->uevent); | 268 | ib_uverbs_release_uevent(file, &uqp->uevent); |
272 | kfree(uqp); | 269 | kfree(uqp); |
273 | } | 270 | } |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 867b8cf82be8..19c6477af19f 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
@@ -666,18 +666,6 @@ skip_cqe: | |||
666 | return ret; | 666 | return ret; |
667 | } | 667 | } |
668 | 668 | ||
669 | static void invalidate_mr(struct c4iw_dev *rhp, u32 rkey) | ||
670 | { | ||
671 | struct c4iw_mr *mhp; | ||
672 | unsigned long flags; | ||
673 | |||
674 | spin_lock_irqsave(&rhp->lock, flags); | ||
675 | mhp = get_mhp(rhp, rkey >> 8); | ||
676 | if (mhp) | ||
677 | mhp->attr.state = 0; | ||
678 | spin_unlock_irqrestore(&rhp->lock, flags); | ||
679 | } | ||
680 | |||
681 | /* | 669 | /* |
682 | * Get one cq entry from c4iw and map it to openib. | 670 | * Get one cq entry from c4iw and map it to openib. |
683 | * | 671 | * |
@@ -733,7 +721,7 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) | |||
733 | CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) { | 721 | CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) { |
734 | wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); | 722 | wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe); |
735 | wc->wc_flags |= IB_WC_WITH_INVALIDATE; | 723 | wc->wc_flags |= IB_WC_WITH_INVALIDATE; |
736 | invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); | 724 | c4iw_invalidate_mr(qhp->rhp, wc->ex.invalidate_rkey); |
737 | } | 725 | } |
738 | } else { | 726 | } else { |
739 | switch (CQE_OPCODE(&cqe)) { | 727 | switch (CQE_OPCODE(&cqe)) { |
@@ -762,7 +750,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) | |||
762 | 750 | ||
763 | /* Invalidate the MR if the fastreg failed */ | 751 | /* Invalidate the MR if the fastreg failed */ |
764 | if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS) | 752 | if (CQE_STATUS(&cqe) != T4_ERR_SUCCESS) |
765 | invalidate_mr(qhp->rhp, CQE_WRID_FR_STAG(&cqe)); | 753 | c4iw_invalidate_mr(qhp->rhp, |
754 | CQE_WRID_FR_STAG(&cqe)); | ||
766 | break; | 755 | break; |
767 | default: | 756 | default: |
768 | printk(KERN_ERR MOD "Unexpected opcode %d " | 757 | printk(KERN_ERR MOD "Unexpected opcode %d " |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 7e7f79e55006..4788e1a46fde 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -999,6 +999,6 @@ extern int db_coalescing_threshold; | |||
999 | extern int use_dsgl; | 999 | extern int use_dsgl; |
1000 | void c4iw_drain_rq(struct ib_qp *qp); | 1000 | void c4iw_drain_rq(struct ib_qp *qp); |
1001 | void c4iw_drain_sq(struct ib_qp *qp); | 1001 | void c4iw_drain_sq(struct ib_qp *qp); |
1002 | 1002 | void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey); | |
1003 | 1003 | ||
1004 | #endif | 1004 | #endif |
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 80e27749420a..410408f886c1 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -770,3 +770,15 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr) | |||
770 | kfree(mhp); | 770 | kfree(mhp); |
771 | return 0; | 771 | return 0; |
772 | } | 772 | } |
773 | |||
774 | void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey) | ||
775 | { | ||
776 | struct c4iw_mr *mhp; | ||
777 | unsigned long flags; | ||
778 | |||
779 | spin_lock_irqsave(&rhp->lock, flags); | ||
780 | mhp = get_mhp(rhp, rkey >> 8); | ||
781 | if (mhp) | ||
782 | mhp->attr.state = 0; | ||
783 | spin_unlock_irqrestore(&rhp->lock, flags); | ||
784 | } | ||
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index f57deba6717c..b7ac97b27c88 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
@@ -706,12 +706,8 @@ static int build_memreg(struct t4_sq *sq, union t4_wr *wqe, | |||
706 | return 0; | 706 | return 0; |
707 | } | 707 | } |
708 | 708 | ||
709 | static int build_inv_stag(struct c4iw_dev *dev, union t4_wr *wqe, | 709 | static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) |
710 | struct ib_send_wr *wr, u8 *len16) | ||
711 | { | 710 | { |
712 | struct c4iw_mr *mhp = get_mhp(dev, wr->ex.invalidate_rkey >> 8); | ||
713 | |||
714 | mhp->attr.state = 0; | ||
715 | wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); | 711 | wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey); |
716 | wqe->inv.r2 = 0; | 712 | wqe->inv.r2 = 0; |
717 | *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); | 713 | *len16 = DIV_ROUND_UP(sizeof wqe->inv, 16); |
@@ -797,11 +793,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
797 | spin_lock_irqsave(&qhp->lock, flag); | 793 | spin_lock_irqsave(&qhp->lock, flag); |
798 | if (t4_wq_in_error(&qhp->wq)) { | 794 | if (t4_wq_in_error(&qhp->wq)) { |
799 | spin_unlock_irqrestore(&qhp->lock, flag); | 795 | spin_unlock_irqrestore(&qhp->lock, flag); |
796 | *bad_wr = wr; | ||
800 | return -EINVAL; | 797 | return -EINVAL; |
801 | } | 798 | } |
802 | num_wrs = t4_sq_avail(&qhp->wq); | 799 | num_wrs = t4_sq_avail(&qhp->wq); |
803 | if (num_wrs == 0) { | 800 | if (num_wrs == 0) { |
804 | spin_unlock_irqrestore(&qhp->lock, flag); | 801 | spin_unlock_irqrestore(&qhp->lock, flag); |
802 | *bad_wr = wr; | ||
805 | return -ENOMEM; | 803 | return -ENOMEM; |
806 | } | 804 | } |
807 | while (wr) { | 805 | while (wr) { |
@@ -840,10 +838,13 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
840 | case IB_WR_RDMA_READ_WITH_INV: | 838 | case IB_WR_RDMA_READ_WITH_INV: |
841 | fw_opcode = FW_RI_RDMA_READ_WR; | 839 | fw_opcode = FW_RI_RDMA_READ_WR; |
842 | swsqe->opcode = FW_RI_READ_REQ; | 840 | swsqe->opcode = FW_RI_READ_REQ; |
843 | if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) | 841 | if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) { |
842 | c4iw_invalidate_mr(qhp->rhp, | ||
843 | wr->sg_list[0].lkey); | ||
844 | fw_flags = FW_RI_RDMA_READ_INVALIDATE; | 844 | fw_flags = FW_RI_RDMA_READ_INVALIDATE; |
845 | else | 845 | } else { |
846 | fw_flags = 0; | 846 | fw_flags = 0; |
847 | } | ||
847 | err = build_rdma_read(wqe, wr, &len16); | 848 | err = build_rdma_read(wqe, wr, &len16); |
848 | if (err) | 849 | if (err) |
849 | break; | 850 | break; |
@@ -876,7 +877,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
876 | fw_flags |= FW_RI_LOCAL_FENCE_FLAG; | 877 | fw_flags |= FW_RI_LOCAL_FENCE_FLAG; |
877 | fw_opcode = FW_RI_INV_LSTAG_WR; | 878 | fw_opcode = FW_RI_INV_LSTAG_WR; |
878 | swsqe->opcode = FW_RI_LOCAL_INV; | 879 | swsqe->opcode = FW_RI_LOCAL_INV; |
879 | err = build_inv_stag(qhp->rhp, wqe, wr, &len16); | 880 | err = build_inv_stag(wqe, wr, &len16); |
881 | c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey); | ||
880 | break; | 882 | break; |
881 | default: | 883 | default: |
882 | PDBG("%s post of type=%d TBD!\n", __func__, | 884 | PDBG("%s post of type=%d TBD!\n", __func__, |
@@ -934,11 +936,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
934 | spin_lock_irqsave(&qhp->lock, flag); | 936 | spin_lock_irqsave(&qhp->lock, flag); |
935 | if (t4_wq_in_error(&qhp->wq)) { | 937 | if (t4_wq_in_error(&qhp->wq)) { |
936 | spin_unlock_irqrestore(&qhp->lock, flag); | 938 | spin_unlock_irqrestore(&qhp->lock, flag); |
939 | *bad_wr = wr; | ||
937 | return -EINVAL; | 940 | return -EINVAL; |
938 | } | 941 | } |
939 | num_wrs = t4_rq_avail(&qhp->wq); | 942 | num_wrs = t4_rq_avail(&qhp->wq); |
940 | if (num_wrs == 0) { | 943 | if (num_wrs == 0) { |
941 | spin_unlock_irqrestore(&qhp->lock, flag); | 944 | spin_unlock_irqrestore(&qhp->lock, flag); |
945 | *bad_wr = wr; | ||
942 | return -ENOMEM; | 946 | return -ENOMEM; |
943 | } | 947 | } |
944 | while (wr) { | 948 | while (wr) { |
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c index a26a9a0bfc41..67ea85a56945 100644 --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c | |||
@@ -775,75 +775,3 @@ void hfi1_put_proc_affinity(int cpu) | |||
775 | } | 775 | } |
776 | mutex_unlock(&affinity->lock); | 776 | mutex_unlock(&affinity->lock); |
777 | } | 777 | } |
778 | |||
779 | int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, | ||
780 | size_t count) | ||
781 | { | ||
782 | struct hfi1_affinity_node *entry; | ||
783 | cpumask_var_t mask; | ||
784 | int ret, i; | ||
785 | |||
786 | mutex_lock(&node_affinity.lock); | ||
787 | entry = node_affinity_lookup(dd->node); | ||
788 | |||
789 | if (!entry) { | ||
790 | ret = -EINVAL; | ||
791 | goto unlock; | ||
792 | } | ||
793 | |||
794 | ret = zalloc_cpumask_var(&mask, GFP_KERNEL); | ||
795 | if (!ret) { | ||
796 | ret = -ENOMEM; | ||
797 | goto unlock; | ||
798 | } | ||
799 | |||
800 | ret = cpulist_parse(buf, mask); | ||
801 | if (ret) | ||
802 | goto out; | ||
803 | |||
804 | if (!cpumask_subset(mask, cpu_online_mask) || cpumask_empty(mask)) { | ||
805 | dd_dev_warn(dd, "Invalid CPU mask\n"); | ||
806 | ret = -EINVAL; | ||
807 | goto out; | ||
808 | } | ||
809 | |||
810 | /* reset the SDMA interrupt affinity details */ | ||
811 | init_cpu_mask_set(&entry->def_intr); | ||
812 | cpumask_copy(&entry->def_intr.mask, mask); | ||
813 | |||
814 | /* Reassign the affinity for each SDMA interrupt. */ | ||
815 | for (i = 0; i < dd->num_msix_entries; i++) { | ||
816 | struct hfi1_msix_entry *msix; | ||
817 | |||
818 | msix = &dd->msix_entries[i]; | ||
819 | if (msix->type != IRQ_SDMA) | ||
820 | continue; | ||
821 | |||
822 | ret = get_irq_affinity(dd, msix); | ||
823 | |||
824 | if (ret) | ||
825 | break; | ||
826 | } | ||
827 | out: | ||
828 | free_cpumask_var(mask); | ||
829 | unlock: | ||
830 | mutex_unlock(&node_affinity.lock); | ||
831 | return ret ? ret : strnlen(buf, PAGE_SIZE); | ||
832 | } | ||
833 | |||
834 | int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf) | ||
835 | { | ||
836 | struct hfi1_affinity_node *entry; | ||
837 | |||
838 | mutex_lock(&node_affinity.lock); | ||
839 | entry = node_affinity_lookup(dd->node); | ||
840 | |||
841 | if (!entry) { | ||
842 | mutex_unlock(&node_affinity.lock); | ||
843 | return -EINVAL; | ||
844 | } | ||
845 | |||
846 | cpumap_print_to_pagebuf(true, buf, &entry->def_intr.mask); | ||
847 | mutex_unlock(&node_affinity.lock); | ||
848 | return strnlen(buf, PAGE_SIZE); | ||
849 | } | ||
diff --git a/drivers/infiniband/hw/hfi1/affinity.h b/drivers/infiniband/hw/hfi1/affinity.h index b89ea3c0ee1a..42e63316afd1 100644 --- a/drivers/infiniband/hw/hfi1/affinity.h +++ b/drivers/infiniband/hw/hfi1/affinity.h | |||
@@ -102,10 +102,6 @@ int hfi1_get_proc_affinity(int); | |||
102 | /* Release a CPU used by a user process. */ | 102 | /* Release a CPU used by a user process. */ |
103 | void hfi1_put_proc_affinity(int); | 103 | void hfi1_put_proc_affinity(int); |
104 | 104 | ||
105 | int hfi1_get_sdma_affinity(struct hfi1_devdata *dd, char *buf); | ||
106 | int hfi1_set_sdma_affinity(struct hfi1_devdata *dd, const char *buf, | ||
107 | size_t count); | ||
108 | |||
109 | struct hfi1_affinity_node { | 105 | struct hfi1_affinity_node { |
110 | int node; | 106 | int node; |
111 | struct cpu_mask_set def_intr; | 107 | struct cpu_mask_set def_intr; |
diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 9bf5f23544d4..24d0820873cf 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c | |||
@@ -6301,19 +6301,8 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf) | |||
6301 | /* leave shared count at zero for both global and VL15 */ | 6301 | /* leave shared count at zero for both global and VL15 */ |
6302 | write_global_credit(dd, vau, vl15buf, 0); | 6302 | write_global_credit(dd, vau, vl15buf, 0); |
6303 | 6303 | ||
6304 | /* We may need some credits for another VL when sending packets | 6304 | write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf |
6305 | * with the snoop interface. Dividing it down the middle for VL15 | 6305 | << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); |
6306 | * and VL0 should suffice. | ||
6307 | */ | ||
6308 | if (unlikely(dd->hfi1_snoop.mode_flag == HFI1_PORT_SNOOP_MODE)) { | ||
6309 | write_csr(dd, SEND_CM_CREDIT_VL15, (u64)(vl15buf >> 1) | ||
6310 | << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); | ||
6311 | write_csr(dd, SEND_CM_CREDIT_VL, (u64)(vl15buf >> 1) | ||
6312 | << SEND_CM_CREDIT_VL_DEDICATED_LIMIT_VL_SHIFT); | ||
6313 | } else { | ||
6314 | write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf | ||
6315 | << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); | ||
6316 | } | ||
6317 | } | 6306 | } |
6318 | 6307 | ||
6319 | /* | 6308 | /* |
@@ -9915,9 +9904,6 @@ static void set_lidlmc(struct hfi1_pportdata *ppd) | |||
9915 | u32 mask = ~((1U << ppd->lmc) - 1); | 9904 | u32 mask = ~((1U << ppd->lmc) - 1); |
9916 | u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); | 9905 | u64 c1 = read_csr(ppd->dd, DCC_CFG_PORT_CONFIG1); |
9917 | 9906 | ||
9918 | if (dd->hfi1_snoop.mode_flag) | ||
9919 | dd_dev_info(dd, "Set lid/lmc while snooping"); | ||
9920 | |||
9921 | c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK | 9907 | c1 &= ~(DCC_CFG_PORT_CONFIG1_TARGET_DLID_SMASK |
9922 | | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); | 9908 | | DCC_CFG_PORT_CONFIG1_DLID_MASK_SMASK); |
9923 | c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) | 9909 | c1 |= ((ppd->lid & DCC_CFG_PORT_CONFIG1_TARGET_DLID_MASK) |
@@ -12112,7 +12098,7 @@ static void update_synth_timer(unsigned long opaque) | |||
12112 | mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); | 12098 | mod_timer(&dd->synth_stats_timer, jiffies + HZ * SYNTH_CNT_TIME); |
12113 | } | 12099 | } |
12114 | 12100 | ||
12115 | #define C_MAX_NAME 13 /* 12 chars + one for /0 */ | 12101 | #define C_MAX_NAME 16 /* 15 chars + one for /0 */ |
12116 | static int init_cntrs(struct hfi1_devdata *dd) | 12102 | static int init_cntrs(struct hfi1_devdata *dd) |
12117 | { | 12103 | { |
12118 | int i, rcv_ctxts, j; | 12104 | int i, rcv_ctxts, j; |
@@ -14463,7 +14449,7 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, | |||
14463 | * Any error printing is already done by the init code. | 14449 | * Any error printing is already done by the init code. |
14464 | * On return, we have the chip mapped. | 14450 | * On return, we have the chip mapped. |
14465 | */ | 14451 | */ |
14466 | ret = hfi1_pcie_ddinit(dd, pdev, ent); | 14452 | ret = hfi1_pcie_ddinit(dd, pdev); |
14467 | if (ret < 0) | 14453 | if (ret < 0) |
14468 | goto bail_free; | 14454 | goto bail_free; |
14469 | 14455 | ||
@@ -14691,6 +14677,11 @@ struct hfi1_devdata *hfi1_init_dd(struct pci_dev *pdev, | |||
14691 | if (ret) | 14677 | if (ret) |
14692 | goto bail_free_cntrs; | 14678 | goto bail_free_cntrs; |
14693 | 14679 | ||
14680 | init_completion(&dd->user_comp); | ||
14681 | |||
14682 | /* The user refcount starts with one to inidicate an active device */ | ||
14683 | atomic_set(&dd->user_refcount, 1); | ||
14684 | |||
14694 | goto bail; | 14685 | goto bail; |
14695 | 14686 | ||
14696 | bail_free_rcverr: | 14687 | bail_free_rcverr: |
diff --git a/drivers/infiniband/hw/hfi1/chip.h b/drivers/infiniband/hw/hfi1/chip.h index 92345259a8f4..043fd21dc5f3 100644 --- a/drivers/infiniband/hw/hfi1/chip.h +++ b/drivers/infiniband/hw/hfi1/chip.h | |||
@@ -320,6 +320,9 @@ | |||
320 | /* DC_DC8051_CFG_MODE.GENERAL bits */ | 320 | /* DC_DC8051_CFG_MODE.GENERAL bits */ |
321 | #define DISABLE_SELF_GUID_CHECK 0x2 | 321 | #define DISABLE_SELF_GUID_CHECK 0x2 |
322 | 322 | ||
323 | /* Bad L2 frame error code */ | ||
324 | #define BAD_L2_ERR 0x6 | ||
325 | |||
323 | /* | 326 | /* |
324 | * Eager buffer minimum and maximum sizes supported by the hardware. | 327 | * Eager buffer minimum and maximum sizes supported by the hardware. |
325 | * All power-of-two sizes in between are supported as well. | 328 | * All power-of-two sizes in between are supported as well. |
diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index 6563e4d38b80..c5efff29c147 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c | |||
@@ -599,7 +599,6 @@ static void __prescan_rxq(struct hfi1_packet *packet) | |||
599 | dd->rhf_offset; | 599 | dd->rhf_offset; |
600 | struct rvt_qp *qp; | 600 | struct rvt_qp *qp; |
601 | struct ib_header *hdr; | 601 | struct ib_header *hdr; |
602 | struct ib_other_headers *ohdr; | ||
603 | struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; | 602 | struct rvt_dev_info *rdi = &dd->verbs_dev.rdi; |
604 | u64 rhf = rhf_to_cpu(rhf_addr); | 603 | u64 rhf = rhf_to_cpu(rhf_addr); |
605 | u32 etype = rhf_rcv_type(rhf), qpn, bth1; | 604 | u32 etype = rhf_rcv_type(rhf), qpn, bth1; |
@@ -615,18 +614,21 @@ static void __prescan_rxq(struct hfi1_packet *packet) | |||
615 | if (etype != RHF_RCV_TYPE_IB) | 614 | if (etype != RHF_RCV_TYPE_IB) |
616 | goto next; | 615 | goto next; |
617 | 616 | ||
618 | hdr = hfi1_get_msgheader(dd, rhf_addr); | 617 | packet->hdr = hfi1_get_msgheader(dd, rhf_addr); |
618 | hdr = packet->hdr; | ||
619 | 619 | ||
620 | lnh = be16_to_cpu(hdr->lrh[0]) & 3; | 620 | lnh = be16_to_cpu(hdr->lrh[0]) & 3; |
621 | 621 | ||
622 | if (lnh == HFI1_LRH_BTH) | 622 | if (lnh == HFI1_LRH_BTH) { |
623 | ohdr = &hdr->u.oth; | 623 | packet->ohdr = &hdr->u.oth; |
624 | else if (lnh == HFI1_LRH_GRH) | 624 | } else if (lnh == HFI1_LRH_GRH) { |
625 | ohdr = &hdr->u.l.oth; | 625 | packet->ohdr = &hdr->u.l.oth; |
626 | else | 626 | packet->rcv_flags |= HFI1_HAS_GRH; |
627 | } else { | ||
627 | goto next; /* just in case */ | 628 | goto next; /* just in case */ |
629 | } | ||
628 | 630 | ||
629 | bth1 = be32_to_cpu(ohdr->bth[1]); | 631 | bth1 = be32_to_cpu(packet->ohdr->bth[1]); |
630 | is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK)); | 632 | is_ecn = !!(bth1 & (HFI1_FECN_SMASK | HFI1_BECN_SMASK)); |
631 | 633 | ||
632 | if (!is_ecn) | 634 | if (!is_ecn) |
@@ -646,7 +648,7 @@ static void __prescan_rxq(struct hfi1_packet *packet) | |||
646 | 648 | ||
647 | /* turn off BECN, FECN */ | 649 | /* turn off BECN, FECN */ |
648 | bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK); | 650 | bth1 &= ~(HFI1_FECN_SMASK | HFI1_BECN_SMASK); |
649 | ohdr->bth[1] = cpu_to_be32(bth1); | 651 | packet->ohdr->bth[1] = cpu_to_be32(bth1); |
650 | next: | 652 | next: |
651 | update_ps_mdata(&mdata, rcd); | 653 | update_ps_mdata(&mdata, rcd); |
652 | } | 654 | } |
@@ -1360,12 +1362,25 @@ int process_receive_ib(struct hfi1_packet *packet) | |||
1360 | 1362 | ||
1361 | int process_receive_bypass(struct hfi1_packet *packet) | 1363 | int process_receive_bypass(struct hfi1_packet *packet) |
1362 | { | 1364 | { |
1365 | struct hfi1_devdata *dd = packet->rcd->dd; | ||
1366 | |||
1363 | if (unlikely(rhf_err_flags(packet->rhf))) | 1367 | if (unlikely(rhf_err_flags(packet->rhf))) |
1364 | handle_eflags(packet); | 1368 | handle_eflags(packet); |
1365 | 1369 | ||
1366 | dd_dev_err(packet->rcd->dd, | 1370 | dd_dev_err(dd, |
1367 | "Bypass packets are not supported in normal operation. Dropping\n"); | 1371 | "Bypass packets are not supported in normal operation. Dropping\n"); |
1368 | incr_cntr64(&packet->rcd->dd->sw_rcv_bypass_packet_errors); | 1372 | incr_cntr64(&dd->sw_rcv_bypass_packet_errors); |
1373 | if (!(dd->err_info_rcvport.status_and_code & OPA_EI_STATUS_SMASK)) { | ||
1374 | u64 *flits = packet->ebuf; | ||
1375 | |||
1376 | if (flits && !(packet->rhf & RHF_LEN_ERR)) { | ||
1377 | dd->err_info_rcvport.packet_flit1 = flits[0]; | ||
1378 | dd->err_info_rcvport.packet_flit2 = | ||
1379 | packet->tlen > sizeof(flits[0]) ? flits[1] : 0; | ||
1380 | } | ||
1381 | dd->err_info_rcvport.status_and_code |= | ||
1382 | (OPA_EI_STATUS_SMASK | BAD_L2_ERR); | ||
1383 | } | ||
1369 | return RHF_RCV_CONTINUE; | 1384 | return RHF_RCV_CONTINUE; |
1370 | } | 1385 | } |
1371 | 1386 | ||
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index 677efa0e8cd6..bd786b7bd30b 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c | |||
@@ -172,6 +172,9 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) | |||
172 | struct hfi1_devdata, | 172 | struct hfi1_devdata, |
173 | user_cdev); | 173 | user_cdev); |
174 | 174 | ||
175 | if (!atomic_inc_not_zero(&dd->user_refcount)) | ||
176 | return -ENXIO; | ||
177 | |||
175 | /* Just take a ref now. Not all opens result in a context assign */ | 178 | /* Just take a ref now. Not all opens result in a context assign */ |
176 | kobject_get(&dd->kobj); | 179 | kobject_get(&dd->kobj); |
177 | 180 | ||
@@ -183,11 +186,17 @@ static int hfi1_file_open(struct inode *inode, struct file *fp) | |||
183 | fd->rec_cpu_num = -1; /* no cpu affinity by default */ | 186 | fd->rec_cpu_num = -1; /* no cpu affinity by default */ |
184 | fd->mm = current->mm; | 187 | fd->mm = current->mm; |
185 | atomic_inc(&fd->mm->mm_count); | 188 | atomic_inc(&fd->mm->mm_count); |
186 | } | 189 | fp->private_data = fd; |
190 | } else { | ||
191 | fp->private_data = NULL; | ||
192 | |||
193 | if (atomic_dec_and_test(&dd->user_refcount)) | ||
194 | complete(&dd->user_comp); | ||
187 | 195 | ||
188 | fp->private_data = fd; | 196 | return -ENOMEM; |
197 | } | ||
189 | 198 | ||
190 | return fd ? 0 : -ENOMEM; | 199 | return 0; |
191 | } | 200 | } |
192 | 201 | ||
193 | static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, | 202 | static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, |
@@ -798,6 +807,10 @@ static int hfi1_file_close(struct inode *inode, struct file *fp) | |||
798 | done: | 807 | done: |
799 | mmdrop(fdata->mm); | 808 | mmdrop(fdata->mm); |
800 | kobject_put(&dd->kobj); | 809 | kobject_put(&dd->kobj); |
810 | |||
811 | if (atomic_dec_and_test(&dd->user_refcount)) | ||
812 | complete(&dd->user_comp); | ||
813 | |||
801 | kfree(fdata); | 814 | kfree(fdata); |
802 | return 0; | 815 | return 0; |
803 | } | 816 | } |
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h index 7eef11b316ff..cc87fd4e534b 100644 --- a/drivers/infiniband/hw/hfi1/hfi.h +++ b/drivers/infiniband/hw/hfi1/hfi.h | |||
@@ -367,26 +367,6 @@ struct hfi1_packet { | |||
367 | u8 etype; | 367 | u8 etype; |
368 | }; | 368 | }; |
369 | 369 | ||
370 | /* | ||
371 | * Private data for snoop/capture support. | ||
372 | */ | ||
373 | struct hfi1_snoop_data { | ||
374 | int mode_flag; | ||
375 | struct cdev cdev; | ||
376 | struct device *class_dev; | ||
377 | /* protect snoop data */ | ||
378 | spinlock_t snoop_lock; | ||
379 | struct list_head queue; | ||
380 | wait_queue_head_t waitq; | ||
381 | void *filter_value; | ||
382 | int (*filter_callback)(void *hdr, void *data, void *value); | ||
383 | u64 dcc_cfg; /* saved value of DCC Cfg register */ | ||
384 | }; | ||
385 | |||
386 | /* snoop mode_flag values */ | ||
387 | #define HFI1_PORT_SNOOP_MODE 1U | ||
388 | #define HFI1_PORT_CAPTURE_MODE 2U | ||
389 | |||
390 | struct rvt_sge_state; | 370 | struct rvt_sge_state; |
391 | 371 | ||
392 | /* | 372 | /* |
@@ -613,8 +593,6 @@ struct hfi1_pportdata { | |||
613 | struct mutex hls_lock; | 593 | struct mutex hls_lock; |
614 | u32 host_link_state; | 594 | u32 host_link_state; |
615 | 595 | ||
616 | spinlock_t sdma_alllock ____cacheline_aligned_in_smp; | ||
617 | |||
618 | u32 lstate; /* logical link state */ | 596 | u32 lstate; /* logical link state */ |
619 | 597 | ||
620 | /* these are the "32 bit" regs */ | 598 | /* these are the "32 bit" regs */ |
@@ -1104,8 +1082,6 @@ struct hfi1_devdata { | |||
1104 | char *portcntrnames; | 1082 | char *portcntrnames; |
1105 | size_t portcntrnameslen; | 1083 | size_t portcntrnameslen; |
1106 | 1084 | ||
1107 | struct hfi1_snoop_data hfi1_snoop; | ||
1108 | |||
1109 | struct err_info_rcvport err_info_rcvport; | 1085 | struct err_info_rcvport err_info_rcvport; |
1110 | struct err_info_constraint err_info_rcv_constraint; | 1086 | struct err_info_constraint err_info_rcv_constraint; |
1111 | struct err_info_constraint err_info_xmit_constraint; | 1087 | struct err_info_constraint err_info_xmit_constraint; |
@@ -1141,8 +1117,8 @@ struct hfi1_devdata { | |||
1141 | rhf_rcv_function_ptr normal_rhf_rcv_functions[8]; | 1117 | rhf_rcv_function_ptr normal_rhf_rcv_functions[8]; |
1142 | 1118 | ||
1143 | /* | 1119 | /* |
1144 | * Handlers for outgoing data so that snoop/capture does not | 1120 | * Capability to have different send engines simply by changing a |
1145 | * have to have its hooks in the send path | 1121 | * pointer value. |
1146 | */ | 1122 | */ |
1147 | send_routine process_pio_send; | 1123 | send_routine process_pio_send; |
1148 | send_routine process_dma_send; | 1124 | send_routine process_dma_send; |
@@ -1174,6 +1150,10 @@ struct hfi1_devdata { | |||
1174 | spinlock_t aspm_lock; | 1150 | spinlock_t aspm_lock; |
1175 | /* Number of verbs contexts which have disabled ASPM */ | 1151 | /* Number of verbs contexts which have disabled ASPM */ |
1176 | atomic_t aspm_disabled_cnt; | 1152 | atomic_t aspm_disabled_cnt; |
1153 | /* Keeps track of user space clients */ | ||
1154 | atomic_t user_refcount; | ||
1155 | /* Used to wait for outstanding user space clients before dev removal */ | ||
1156 | struct completion user_comp; | ||
1177 | 1157 | ||
1178 | struct hfi1_affinity *affinity; | 1158 | struct hfi1_affinity *affinity; |
1179 | struct rhashtable sdma_rht; | 1159 | struct rhashtable sdma_rht; |
@@ -1221,8 +1201,6 @@ struct hfi1_devdata *hfi1_lookup(int unit); | |||
1221 | extern u32 hfi1_cpulist_count; | 1201 | extern u32 hfi1_cpulist_count; |
1222 | extern unsigned long *hfi1_cpulist; | 1202 | extern unsigned long *hfi1_cpulist; |
1223 | 1203 | ||
1224 | extern unsigned int snoop_drop_send; | ||
1225 | extern unsigned int snoop_force_capture; | ||
1226 | int hfi1_init(struct hfi1_devdata *, int); | 1204 | int hfi1_init(struct hfi1_devdata *, int); |
1227 | int hfi1_count_units(int *npresentp, int *nupp); | 1205 | int hfi1_count_units(int *npresentp, int *nupp); |
1228 | int hfi1_count_active_units(void); | 1206 | int hfi1_count_active_units(void); |
@@ -1557,13 +1535,6 @@ void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf); | |||
1557 | void reset_link_credits(struct hfi1_devdata *dd); | 1535 | void reset_link_credits(struct hfi1_devdata *dd); |
1558 | void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); | 1536 | void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); |
1559 | 1537 | ||
1560 | int snoop_recv_handler(struct hfi1_packet *packet); | ||
1561 | int snoop_send_dma_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, | ||
1562 | u64 pbc); | ||
1563 | int snoop_send_pio_handler(struct rvt_qp *qp, struct hfi1_pkt_state *ps, | ||
1564 | u64 pbc); | ||
1565 | void snoop_inline_pio_send(struct hfi1_devdata *dd, struct pio_buf *pbuf, | ||
1566 | u64 pbc, const void *from, size_t count); | ||
1567 | int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc); | 1538 | int set_buffer_control(struct hfi1_pportdata *ppd, struct buffer_control *bc); |
1568 | 1539 | ||
1569 | static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd) | 1540 | static inline struct hfi1_devdata *dd_from_ppd(struct hfi1_pportdata *ppd) |
@@ -1763,8 +1734,7 @@ int qsfp_dump(struct hfi1_pportdata *ppd, char *buf, int len); | |||
1763 | 1734 | ||
1764 | int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *); | 1735 | int hfi1_pcie_init(struct pci_dev *, const struct pci_device_id *); |
1765 | void hfi1_pcie_cleanup(struct pci_dev *); | 1736 | void hfi1_pcie_cleanup(struct pci_dev *); |
1766 | int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *, | 1737 | int hfi1_pcie_ddinit(struct hfi1_devdata *, struct pci_dev *); |
1767 | const struct pci_device_id *); | ||
1768 | void hfi1_pcie_ddcleanup(struct hfi1_devdata *); | 1738 | void hfi1_pcie_ddcleanup(struct hfi1_devdata *); |
1769 | void hfi1_pcie_flr(struct hfi1_devdata *); | 1739 | void hfi1_pcie_flr(struct hfi1_devdata *); |
1770 | int pcie_speeds(struct hfi1_devdata *); | 1740 | int pcie_speeds(struct hfi1_devdata *); |
@@ -1799,8 +1769,6 @@ int kdeth_process_expected(struct hfi1_packet *packet); | |||
1799 | int kdeth_process_eager(struct hfi1_packet *packet); | 1769 | int kdeth_process_eager(struct hfi1_packet *packet); |
1800 | int process_receive_invalid(struct hfi1_packet *packet); | 1770 | int process_receive_invalid(struct hfi1_packet *packet); |
1801 | 1771 | ||
1802 | extern rhf_rcv_function_ptr snoop_rhf_rcv_functions[8]; | ||
1803 | |||
1804 | void update_sge(struct rvt_sge_state *ss, u32 length); | 1772 | void update_sge(struct rvt_sge_state *ss, u32 length); |
1805 | 1773 | ||
1806 | /* global module parameter variables */ | 1774 | /* global module parameter variables */ |
@@ -1827,9 +1795,6 @@ extern struct mutex hfi1_mutex; | |||
1827 | #define DRIVER_NAME "hfi1" | 1795 | #define DRIVER_NAME "hfi1" |
1828 | #define HFI1_USER_MINOR_BASE 0 | 1796 | #define HFI1_USER_MINOR_BASE 0 |
1829 | #define HFI1_TRACE_MINOR 127 | 1797 | #define HFI1_TRACE_MINOR 127 |
1830 | #define HFI1_DIAGPKT_MINOR 128 | ||
1831 | #define HFI1_DIAG_MINOR_BASE 129 | ||
1832 | #define HFI1_SNOOP_CAPTURE_BASE 200 | ||
1833 | #define HFI1_NMINORS 255 | 1798 | #define HFI1_NMINORS 255 |
1834 | 1799 | ||
1835 | #define PCI_VENDOR_ID_INTEL 0x8086 | 1800 | #define PCI_VENDOR_ID_INTEL 0x8086 |
@@ -1848,7 +1813,13 @@ extern struct mutex hfi1_mutex; | |||
1848 | static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, | 1813 | static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, |
1849 | u16 ctxt_type) | 1814 | u16 ctxt_type) |
1850 | { | 1815 | { |
1851 | u64 base_sc_integrity = | 1816 | u64 base_sc_integrity; |
1817 | |||
1818 | /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ | ||
1819 | if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) | ||
1820 | return 0; | ||
1821 | |||
1822 | base_sc_integrity = | ||
1852 | SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK | 1823 | SEND_CTXT_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK |
1853 | | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK | 1824 | | SEND_CTXT_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK |
1854 | | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK | 1825 | | SEND_CTXT_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK |
@@ -1863,7 +1834,6 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, | |||
1863 | | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK | 1834 | | SEND_CTXT_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK |
1864 | | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK | 1835 | | SEND_CTXT_CHECK_ENABLE_CHECK_OPCODE_SMASK |
1865 | | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK | 1836 | | SEND_CTXT_CHECK_ENABLE_CHECK_SLID_SMASK |
1866 | | SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK | ||
1867 | | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK | 1837 | | SEND_CTXT_CHECK_ENABLE_CHECK_VL_SMASK |
1868 | | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK; | 1838 | | SEND_CTXT_CHECK_ENABLE_CHECK_ENABLE_SMASK; |
1869 | 1839 | ||
@@ -1872,18 +1842,23 @@ static inline u64 hfi1_pkt_default_send_ctxt_mask(struct hfi1_devdata *dd, | |||
1872 | else | 1842 | else |
1873 | base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; | 1843 | base_sc_integrity |= HFI1_PKT_KERNEL_SC_INTEGRITY; |
1874 | 1844 | ||
1875 | if (is_ax(dd)) | 1845 | /* turn on send-side job key checks if !A0 */ |
1876 | /* turn off send-side job key checks - A0 */ | 1846 | if (!is_ax(dd)) |
1877 | return base_sc_integrity & | 1847 | base_sc_integrity |= SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; |
1878 | ~SEND_CTXT_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; | 1848 | |
1879 | return base_sc_integrity; | 1849 | return base_sc_integrity; |
1880 | } | 1850 | } |
1881 | 1851 | ||
1882 | static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) | 1852 | static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) |
1883 | { | 1853 | { |
1884 | u64 base_sdma_integrity = | 1854 | u64 base_sdma_integrity; |
1855 | |||
1856 | /* No integrity checks if HFI1_CAP_NO_INTEGRITY is set */ | ||
1857 | if (HFI1_CAP_IS_KSET(NO_INTEGRITY)) | ||
1858 | return 0; | ||
1859 | |||
1860 | base_sdma_integrity = | ||
1885 | SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK | 1861 | SEND_DMA_CHECK_ENABLE_DISALLOW_BYPASS_BAD_PKT_LEN_SMASK |
1886 | | SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK | ||
1887 | | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK | 1862 | | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_BYPASS_PACKETS_SMASK |
1888 | | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK | 1863 | | SEND_DMA_CHECK_ENABLE_DISALLOW_TOO_LONG_IB_PACKETS_SMASK |
1889 | | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK | 1864 | | SEND_DMA_CHECK_ENABLE_DISALLOW_BAD_PKT_LEN_SMASK |
@@ -1895,14 +1870,18 @@ static inline u64 hfi1_pkt_base_sdma_integrity(struct hfi1_devdata *dd) | |||
1895 | | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK | 1870 | | SEND_DMA_CHECK_ENABLE_CHECK_VL_MAPPING_SMASK |
1896 | | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK | 1871 | | SEND_DMA_CHECK_ENABLE_CHECK_OPCODE_SMASK |
1897 | | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK | 1872 | | SEND_DMA_CHECK_ENABLE_CHECK_SLID_SMASK |
1898 | | SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK | ||
1899 | | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK | 1873 | | SEND_DMA_CHECK_ENABLE_CHECK_VL_SMASK |
1900 | | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK; | 1874 | | SEND_DMA_CHECK_ENABLE_CHECK_ENABLE_SMASK; |
1901 | 1875 | ||
1902 | if (is_ax(dd)) | 1876 | if (!HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)) |
1903 | /* turn off send-side job key checks - A0 */ | 1877 | base_sdma_integrity |= |
1904 | return base_sdma_integrity & | 1878 | SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK; |
1905 | ~SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; | 1879 | |
1880 | /* turn on send-side job key checks if !A0 */ | ||
1881 | if (!is_ax(dd)) | ||
1882 | base_sdma_integrity |= | ||
1883 | SEND_DMA_CHECK_ENABLE_CHECK_JOB_KEY_SMASK; | ||
1884 | |||
1906 | return base_sdma_integrity; | 1885 | return base_sdma_integrity; |
1907 | } | 1886 | } |
1908 | 1887 | ||
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index 60db61536fed..e3b5bc93bc70 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c | |||
@@ -144,6 +144,8 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) | |||
144 | struct hfi1_ctxtdata *rcd; | 144 | struct hfi1_ctxtdata *rcd; |
145 | 145 | ||
146 | ppd = dd->pport + (i % dd->num_pports); | 146 | ppd = dd->pport + (i % dd->num_pports); |
147 | |||
148 | /* dd->rcd[i] gets assigned inside the callee */ | ||
147 | rcd = hfi1_create_ctxtdata(ppd, i, dd->node); | 149 | rcd = hfi1_create_ctxtdata(ppd, i, dd->node); |
148 | if (!rcd) { | 150 | if (!rcd) { |
149 | dd_dev_err(dd, | 151 | dd_dev_err(dd, |
@@ -169,8 +171,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) | |||
169 | if (!rcd->sc) { | 171 | if (!rcd->sc) { |
170 | dd_dev_err(dd, | 172 | dd_dev_err(dd, |
171 | "Unable to allocate kernel send context, failing\n"); | 173 | "Unable to allocate kernel send context, failing\n"); |
172 | dd->rcd[rcd->ctxt] = NULL; | ||
173 | hfi1_free_ctxtdata(dd, rcd); | ||
174 | goto nomem; | 174 | goto nomem; |
175 | } | 175 | } |
176 | 176 | ||
@@ -178,9 +178,6 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) | |||
178 | if (ret < 0) { | 178 | if (ret < 0) { |
179 | dd_dev_err(dd, | 179 | dd_dev_err(dd, |
180 | "Failed to setup kernel receive context, failing\n"); | 180 | "Failed to setup kernel receive context, failing\n"); |
181 | sc_free(rcd->sc); | ||
182 | dd->rcd[rcd->ctxt] = NULL; | ||
183 | hfi1_free_ctxtdata(dd, rcd); | ||
184 | ret = -EFAULT; | 181 | ret = -EFAULT; |
185 | goto bail; | 182 | goto bail; |
186 | } | 183 | } |
@@ -196,6 +193,10 @@ int hfi1_create_ctxts(struct hfi1_devdata *dd) | |||
196 | nomem: | 193 | nomem: |
197 | ret = -ENOMEM; | 194 | ret = -ENOMEM; |
198 | bail: | 195 | bail: |
196 | if (dd->rcd) { | ||
197 | for (i = 0; i < dd->num_rcv_contexts; ++i) | ||
198 | hfi1_free_ctxtdata(dd, dd->rcd[i]); | ||
199 | } | ||
199 | kfree(dd->rcd); | 200 | kfree(dd->rcd); |
200 | dd->rcd = NULL; | 201 | dd->rcd = NULL; |
201 | return ret; | 202 | return ret; |
@@ -216,7 +217,7 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt, | |||
216 | dd->num_rcv_contexts - dd->first_user_ctxt) | 217 | dd->num_rcv_contexts - dd->first_user_ctxt) |
217 | kctxt_ngroups = (dd->rcv_entries.nctxt_extra - | 218 | kctxt_ngroups = (dd->rcv_entries.nctxt_extra - |
218 | (dd->num_rcv_contexts - dd->first_user_ctxt)); | 219 | (dd->num_rcv_contexts - dd->first_user_ctxt)); |
219 | rcd = kzalloc(sizeof(*rcd), GFP_KERNEL); | 220 | rcd = kzalloc_node(sizeof(*rcd), GFP_KERNEL, numa); |
220 | if (rcd) { | 221 | if (rcd) { |
221 | u32 rcvtids, max_entries; | 222 | u32 rcvtids, max_entries; |
222 | 223 | ||
@@ -261,13 +262,6 @@ struct hfi1_ctxtdata *hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, u32 ctxt, | |||
261 | } | 262 | } |
262 | rcd->eager_base = base * dd->rcv_entries.group_size; | 263 | rcd->eager_base = base * dd->rcv_entries.group_size; |
263 | 264 | ||
264 | /* Validate and initialize Rcv Hdr Q variables */ | ||
265 | if (rcvhdrcnt % HDRQ_INCREMENT) { | ||
266 | dd_dev_err(dd, | ||
267 | "ctxt%u: header queue count %d must be divisible by %lu\n", | ||
268 | rcd->ctxt, rcvhdrcnt, HDRQ_INCREMENT); | ||
269 | goto bail; | ||
270 | } | ||
271 | rcd->rcvhdrq_cnt = rcvhdrcnt; | 265 | rcd->rcvhdrq_cnt = rcvhdrcnt; |
272 | rcd->rcvhdrqentsize = hfi1_hdrq_entsize; | 266 | rcd->rcvhdrqentsize = hfi1_hdrq_entsize; |
273 | /* | 267 | /* |
@@ -506,7 +500,6 @@ void hfi1_init_pportdata(struct pci_dev *pdev, struct hfi1_pportdata *ppd, | |||
506 | INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); | 500 | INIT_WORK(&ppd->qsfp_info.qsfp_work, qsfp_event); |
507 | 501 | ||
508 | mutex_init(&ppd->hls_lock); | 502 | mutex_init(&ppd->hls_lock); |
509 | spin_lock_init(&ppd->sdma_alllock); | ||
510 | spin_lock_init(&ppd->qsfp_info.qsfp_lock); | 503 | spin_lock_init(&ppd->qsfp_info.qsfp_lock); |
511 | 504 | ||
512 | ppd->qsfp_info.ppd = ppd; | 505 | ppd->qsfp_info.ppd = ppd; |
@@ -1399,28 +1392,43 @@ static void postinit_cleanup(struct hfi1_devdata *dd) | |||
1399 | hfi1_free_devdata(dd); | 1392 | hfi1_free_devdata(dd); |
1400 | } | 1393 | } |
1401 | 1394 | ||
1395 | static int init_validate_rcvhdrcnt(struct device *dev, uint thecnt) | ||
1396 | { | ||
1397 | if (thecnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { | ||
1398 | hfi1_early_err(dev, "Receive header queue count too small\n"); | ||
1399 | return -EINVAL; | ||
1400 | } | ||
1401 | |||
1402 | if (thecnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { | ||
1403 | hfi1_early_err(dev, | ||
1404 | "Receive header queue count cannot be greater than %u\n", | ||
1405 | HFI1_MAX_HDRQ_EGRBUF_CNT); | ||
1406 | return -EINVAL; | ||
1407 | } | ||
1408 | |||
1409 | if (thecnt % HDRQ_INCREMENT) { | ||
1410 | hfi1_early_err(dev, "Receive header queue count %d must be divisible by %lu\n", | ||
1411 | thecnt, HDRQ_INCREMENT); | ||
1412 | return -EINVAL; | ||
1413 | } | ||
1414 | |||
1415 | return 0; | ||
1416 | } | ||
1417 | |||
1402 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 1418 | static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
1403 | { | 1419 | { |
1404 | int ret = 0, j, pidx, initfail; | 1420 | int ret = 0, j, pidx, initfail; |
1405 | struct hfi1_devdata *dd = ERR_PTR(-EINVAL); | 1421 | struct hfi1_devdata *dd; |
1406 | struct hfi1_pportdata *ppd; | 1422 | struct hfi1_pportdata *ppd; |
1407 | 1423 | ||
1408 | /* First, lock the non-writable module parameters */ | 1424 | /* First, lock the non-writable module parameters */ |
1409 | HFI1_CAP_LOCK(); | 1425 | HFI1_CAP_LOCK(); |
1410 | 1426 | ||
1411 | /* Validate some global module parameters */ | 1427 | /* Validate some global module parameters */ |
1412 | if (rcvhdrcnt <= HFI1_MIN_HDRQ_EGRBUF_CNT) { | 1428 | ret = init_validate_rcvhdrcnt(&pdev->dev, rcvhdrcnt); |
1413 | hfi1_early_err(&pdev->dev, "Header queue count too small\n"); | 1429 | if (ret) |
1414 | ret = -EINVAL; | ||
1415 | goto bail; | ||
1416 | } | ||
1417 | if (rcvhdrcnt > HFI1_MAX_HDRQ_EGRBUF_CNT) { | ||
1418 | hfi1_early_err(&pdev->dev, | ||
1419 | "Receive header queue count cannot be greater than %u\n", | ||
1420 | HFI1_MAX_HDRQ_EGRBUF_CNT); | ||
1421 | ret = -EINVAL; | ||
1422 | goto bail; | 1430 | goto bail; |
1423 | } | 1431 | |
1424 | /* use the encoding function as a sanitization check */ | 1432 | /* use the encoding function as a sanitization check */ |
1425 | if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { | 1433 | if (!encode_rcv_header_entry_size(hfi1_hdrq_entsize)) { |
1426 | hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", | 1434 | hfi1_early_err(&pdev->dev, "Invalid HdrQ Entry size %u\n", |
@@ -1461,26 +1469,25 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1461 | if (ret) | 1469 | if (ret) |
1462 | goto bail; | 1470 | goto bail; |
1463 | 1471 | ||
1464 | /* | 1472 | if (!(ent->device == PCI_DEVICE_ID_INTEL0 || |
1465 | * Do device-specific initialization, function table setup, dd | 1473 | ent->device == PCI_DEVICE_ID_INTEL1)) { |
1466 | * allocation, etc. | ||
1467 | */ | ||
1468 | switch (ent->device) { | ||
1469 | case PCI_DEVICE_ID_INTEL0: | ||
1470 | case PCI_DEVICE_ID_INTEL1: | ||
1471 | dd = hfi1_init_dd(pdev, ent); | ||
1472 | break; | ||
1473 | default: | ||
1474 | hfi1_early_err(&pdev->dev, | 1474 | hfi1_early_err(&pdev->dev, |
1475 | "Failing on unknown Intel deviceid 0x%x\n", | 1475 | "Failing on unknown Intel deviceid 0x%x\n", |
1476 | ent->device); | 1476 | ent->device); |
1477 | ret = -ENODEV; | 1477 | ret = -ENODEV; |
1478 | goto clean_bail; | ||
1478 | } | 1479 | } |
1479 | 1480 | ||
1480 | if (IS_ERR(dd)) | 1481 | /* |
1482 | * Do device-specific initialization, function table setup, dd | ||
1483 | * allocation, etc. | ||
1484 | */ | ||
1485 | dd = hfi1_init_dd(pdev, ent); | ||
1486 | |||
1487 | if (IS_ERR(dd)) { | ||
1481 | ret = PTR_ERR(dd); | 1488 | ret = PTR_ERR(dd); |
1482 | if (ret) | ||
1483 | goto clean_bail; /* error already printed */ | 1489 | goto clean_bail; /* error already printed */ |
1490 | } | ||
1484 | 1491 | ||
1485 | ret = create_workqueues(dd); | 1492 | ret = create_workqueues(dd); |
1486 | if (ret) | 1493 | if (ret) |
@@ -1538,12 +1545,31 @@ bail: | |||
1538 | return ret; | 1545 | return ret; |
1539 | } | 1546 | } |
1540 | 1547 | ||
1548 | static void wait_for_clients(struct hfi1_devdata *dd) | ||
1549 | { | ||
1550 | /* | ||
1551 | * Remove the device init value and complete the device if there is | ||
1552 | * no clients or wait for active clients to finish. | ||
1553 | */ | ||
1554 | if (atomic_dec_and_test(&dd->user_refcount)) | ||
1555 | complete(&dd->user_comp); | ||
1556 | |||
1557 | wait_for_completion(&dd->user_comp); | ||
1558 | } | ||
1559 | |||
1541 | static void remove_one(struct pci_dev *pdev) | 1560 | static void remove_one(struct pci_dev *pdev) |
1542 | { | 1561 | { |
1543 | struct hfi1_devdata *dd = pci_get_drvdata(pdev); | 1562 | struct hfi1_devdata *dd = pci_get_drvdata(pdev); |
1544 | 1563 | ||
1545 | /* close debugfs files before ib unregister */ | 1564 | /* close debugfs files before ib unregister */ |
1546 | hfi1_dbg_ibdev_exit(&dd->verbs_dev); | 1565 | hfi1_dbg_ibdev_exit(&dd->verbs_dev); |
1566 | |||
1567 | /* remove the /dev hfi1 interface */ | ||
1568 | hfi1_device_remove(dd); | ||
1569 | |||
1570 | /* wait for existing user space clients to finish */ | ||
1571 | wait_for_clients(dd); | ||
1572 | |||
1547 | /* unregister from IB core */ | 1573 | /* unregister from IB core */ |
1548 | hfi1_unregister_ib_device(dd); | 1574 | hfi1_unregister_ib_device(dd); |
1549 | 1575 | ||
@@ -1558,8 +1584,6 @@ static void remove_one(struct pci_dev *pdev) | |||
1558 | /* wait until all of our (qsfp) queue_work() calls complete */ | 1584 | /* wait until all of our (qsfp) queue_work() calls complete */ |
1559 | flush_workqueue(ib_wq); | 1585 | flush_workqueue(ib_wq); |
1560 | 1586 | ||
1561 | hfi1_device_remove(dd); | ||
1562 | |||
1563 | postinit_cleanup(dd); | 1587 | postinit_cleanup(dd); |
1564 | } | 1588 | } |
1565 | 1589 | ||
diff --git a/drivers/infiniband/hw/hfi1/pcie.c b/drivers/infiniband/hw/hfi1/pcie.c index 89c68da1c273..4ac8f330c5cb 100644 --- a/drivers/infiniband/hw/hfi1/pcie.c +++ b/drivers/infiniband/hw/hfi1/pcie.c | |||
@@ -157,8 +157,7 @@ void hfi1_pcie_cleanup(struct pci_dev *pdev) | |||
157 | * fields required to re-initialize after a chip reset, or for | 157 | * fields required to re-initialize after a chip reset, or for |
158 | * various other purposes | 158 | * various other purposes |
159 | */ | 159 | */ |
160 | int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev, | 160 | int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev) |
161 | const struct pci_device_id *ent) | ||
162 | { | 161 | { |
163 | unsigned long len; | 162 | unsigned long len; |
164 | resource_size_t addr; | 163 | resource_size_t addr; |
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 50a3a36d9363..d89b8745d4c1 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c | |||
@@ -668,19 +668,12 @@ void sc_set_cr_threshold(struct send_context *sc, u32 new_threshold) | |||
668 | void set_pio_integrity(struct send_context *sc) | 668 | void set_pio_integrity(struct send_context *sc) |
669 | { | 669 | { |
670 | struct hfi1_devdata *dd = sc->dd; | 670 | struct hfi1_devdata *dd = sc->dd; |
671 | u64 reg = 0; | ||
672 | u32 hw_context = sc->hw_context; | 671 | u32 hw_context = sc->hw_context; |
673 | int type = sc->type; | 672 | int type = sc->type; |
674 | 673 | ||
675 | /* | 674 | write_kctxt_csr(dd, hw_context, |
676 | * No integrity checks if HFI1_CAP_NO_INTEGRITY is set, or if | 675 | SC(CHECK_ENABLE), |
677 | * we're snooping. | 676 | hfi1_pkt_default_send_ctxt_mask(dd, type)); |
678 | */ | ||
679 | if (likely(!HFI1_CAP_IS_KSET(NO_INTEGRITY)) && | ||
680 | dd->hfi1_snoop.mode_flag != HFI1_PORT_SNOOP_MODE) | ||
681 | reg = hfi1_pkt_default_send_ctxt_mask(dd, type); | ||
682 | |||
683 | write_kctxt_csr(dd, hw_context, SC(CHECK_ENABLE), reg); | ||
684 | } | 677 | } |
685 | 678 | ||
686 | static u32 get_buffers_allocated(struct send_context *sc) | 679 | static u32 get_buffers_allocated(struct send_context *sc) |
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c index 8bc5013f39a1..83198a8a8797 100644 --- a/drivers/infiniband/hw/hfi1/rc.c +++ b/drivers/infiniband/hw/hfi1/rc.c | |||
@@ -89,7 +89,7 @@ void hfi1_add_rnr_timer(struct rvt_qp *qp, u32 to) | |||
89 | 89 | ||
90 | lockdep_assert_held(&qp->s_lock); | 90 | lockdep_assert_held(&qp->s_lock); |
91 | qp->s_flags |= RVT_S_WAIT_RNR; | 91 | qp->s_flags |= RVT_S_WAIT_RNR; |
92 | qp->s_timer.expires = jiffies + usecs_to_jiffies(to); | 92 | priv->s_rnr_timer.expires = jiffies + usecs_to_jiffies(to); |
93 | add_timer(&priv->s_rnr_timer); | 93 | add_timer(&priv->s_rnr_timer); |
94 | } | 94 | } |
95 | 95 | ||
diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index fd39bcaa062d..9cbe52d21077 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c | |||
@@ -2009,11 +2009,6 @@ static void sdma_hw_start_up(struct sdma_engine *sde) | |||
2009 | write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); | 2009 | write_sde_csr(sde, SD(ENG_ERR_CLEAR), reg); |
2010 | } | 2010 | } |
2011 | 2011 | ||
2012 | #define CLEAR_STATIC_RATE_CONTROL_SMASK(r) \ | ||
2013 | (r &= ~SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) | ||
2014 | |||
2015 | #define SET_STATIC_RATE_CONTROL_SMASK(r) \ | ||
2016 | (r |= SEND_DMA_CHECK_ENABLE_DISALLOW_PBC_STATIC_RATE_CONTROL_SMASK) | ||
2017 | /* | 2012 | /* |
2018 | * set_sdma_integrity | 2013 | * set_sdma_integrity |
2019 | * | 2014 | * |
@@ -2022,19 +2017,9 @@ static void sdma_hw_start_up(struct sdma_engine *sde) | |||
2022 | static void set_sdma_integrity(struct sdma_engine *sde) | 2017 | static void set_sdma_integrity(struct sdma_engine *sde) |
2023 | { | 2018 | { |
2024 | struct hfi1_devdata *dd = sde->dd; | 2019 | struct hfi1_devdata *dd = sde->dd; |
2025 | u64 reg; | ||
2026 | |||
2027 | if (unlikely(HFI1_CAP_IS_KSET(NO_INTEGRITY))) | ||
2028 | return; | ||
2029 | |||
2030 | reg = hfi1_pkt_base_sdma_integrity(dd); | ||
2031 | |||
2032 | if (HFI1_CAP_IS_KSET(STATIC_RATE_CTRL)) | ||
2033 | CLEAR_STATIC_RATE_CONTROL_SMASK(reg); | ||
2034 | else | ||
2035 | SET_STATIC_RATE_CONTROL_SMASK(reg); | ||
2036 | 2020 | ||
2037 | write_sde_csr(sde, SD(CHECK_ENABLE), reg); | 2021 | write_sde_csr(sde, SD(CHECK_ENABLE), |
2022 | hfi1_pkt_base_sdma_integrity(dd)); | ||
2038 | } | 2023 | } |
2039 | 2024 | ||
2040 | static void init_sdma_regs( | 2025 | static void init_sdma_regs( |
diff --git a/drivers/infiniband/hw/hfi1/sysfs.c b/drivers/infiniband/hw/hfi1/sysfs.c index edba22461a9c..919a5474e651 100644 --- a/drivers/infiniband/hw/hfi1/sysfs.c +++ b/drivers/infiniband/hw/hfi1/sysfs.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include "hfi.h" | 49 | #include "hfi.h" |
50 | #include "mad.h" | 50 | #include "mad.h" |
51 | #include "trace.h" | 51 | #include "trace.h" |
52 | #include "affinity.h" | ||
53 | 52 | ||
54 | /* | 53 | /* |
55 | * Start of per-port congestion control structures and support code | 54 | * Start of per-port congestion control structures and support code |
@@ -623,27 +622,6 @@ static ssize_t show_tempsense(struct device *device, | |||
623 | return ret; | 622 | return ret; |
624 | } | 623 | } |
625 | 624 | ||
626 | static ssize_t show_sdma_affinity(struct device *device, | ||
627 | struct device_attribute *attr, char *buf) | ||
628 | { | ||
629 | struct hfi1_ibdev *dev = | ||
630 | container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); | ||
631 | struct hfi1_devdata *dd = dd_from_dev(dev); | ||
632 | |||
633 | return hfi1_get_sdma_affinity(dd, buf); | ||
634 | } | ||
635 | |||
636 | static ssize_t store_sdma_affinity(struct device *device, | ||
637 | struct device_attribute *attr, | ||
638 | const char *buf, size_t count) | ||
639 | { | ||
640 | struct hfi1_ibdev *dev = | ||
641 | container_of(device, struct hfi1_ibdev, rdi.ibdev.dev); | ||
642 | struct hfi1_devdata *dd = dd_from_dev(dev); | ||
643 | |||
644 | return hfi1_set_sdma_affinity(dd, buf, count); | ||
645 | } | ||
646 | |||
647 | /* | 625 | /* |
648 | * end of per-unit (or driver, in some cases, but replicated | 626 | * end of per-unit (or driver, in some cases, but replicated |
649 | * per unit) functions | 627 | * per unit) functions |
@@ -658,8 +636,6 @@ static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); | |||
658 | static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); | 636 | static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); |
659 | static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); | 637 | static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); |
660 | static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); | 638 | static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); |
661 | static DEVICE_ATTR(sdma_affinity, S_IWUSR | S_IRUGO, show_sdma_affinity, | ||
662 | store_sdma_affinity); | ||
663 | 639 | ||
664 | static struct device_attribute *hfi1_attributes[] = { | 640 | static struct device_attribute *hfi1_attributes[] = { |
665 | &dev_attr_hw_rev, | 641 | &dev_attr_hw_rev, |
@@ -670,7 +646,6 @@ static struct device_attribute *hfi1_attributes[] = { | |||
670 | &dev_attr_boardversion, | 646 | &dev_attr_boardversion, |
671 | &dev_attr_tempsense, | 647 | &dev_attr_tempsense, |
672 | &dev_attr_chip_reset, | 648 | &dev_attr_chip_reset, |
673 | &dev_attr_sdma_affinity, | ||
674 | }; | 649 | }; |
675 | 650 | ||
676 | int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, | 651 | int hfi1_create_port_files(struct ib_device *ibdev, u8 port_num, |
diff --git a/drivers/infiniband/hw/hfi1/trace_rx.h b/drivers/infiniband/hw/hfi1/trace_rx.h index 11e02b228922..f77e59fb43fe 100644 --- a/drivers/infiniband/hw/hfi1/trace_rx.h +++ b/drivers/infiniband/hw/hfi1/trace_rx.h | |||
@@ -253,66 +253,6 @@ TRACE_EVENT(hfi1_mmu_invalidate, | |||
253 | ) | 253 | ) |
254 | ); | 254 | ); |
255 | 255 | ||
256 | #define SNOOP_PRN \ | ||
257 | "slid %.4x dlid %.4x qpn 0x%.6x opcode 0x%.2x,%s " \ | ||
258 | "svc lvl %d pkey 0x%.4x [header = %d bytes] [data = %d bytes]" | ||
259 | |||
260 | TRACE_EVENT(snoop_capture, | ||
261 | TP_PROTO(struct hfi1_devdata *dd, | ||
262 | int hdr_len, | ||
263 | struct ib_header *hdr, | ||
264 | int data_len, | ||
265 | void *data), | ||
266 | TP_ARGS(dd, hdr_len, hdr, data_len, data), | ||
267 | TP_STRUCT__entry( | ||
268 | DD_DEV_ENTRY(dd) | ||
269 | __field(u16, slid) | ||
270 | __field(u16, dlid) | ||
271 | __field(u32, qpn) | ||
272 | __field(u8, opcode) | ||
273 | __field(u8, sl) | ||
274 | __field(u16, pkey) | ||
275 | __field(u32, hdr_len) | ||
276 | __field(u32, data_len) | ||
277 | __field(u8, lnh) | ||
278 | __dynamic_array(u8, raw_hdr, hdr_len) | ||
279 | __dynamic_array(u8, raw_pkt, data_len) | ||
280 | ), | ||
281 | TP_fast_assign( | ||
282 | struct ib_other_headers *ohdr; | ||
283 | |||
284 | __entry->lnh = (u8)(be16_to_cpu(hdr->lrh[0]) & 3); | ||
285 | if (__entry->lnh == HFI1_LRH_BTH) | ||
286 | ohdr = &hdr->u.oth; | ||
287 | else | ||
288 | ohdr = &hdr->u.l.oth; | ||
289 | DD_DEV_ASSIGN(dd); | ||
290 | __entry->slid = be16_to_cpu(hdr->lrh[3]); | ||
291 | __entry->dlid = be16_to_cpu(hdr->lrh[1]); | ||
292 | __entry->qpn = be32_to_cpu(ohdr->bth[1]) & RVT_QPN_MASK; | ||
293 | __entry->opcode = (be32_to_cpu(ohdr->bth[0]) >> 24) & 0xff; | ||
294 | __entry->sl = (u8)(be16_to_cpu(hdr->lrh[0]) >> 4) & 0xf; | ||
295 | __entry->pkey = be32_to_cpu(ohdr->bth[0]) & 0xffff; | ||
296 | __entry->hdr_len = hdr_len; | ||
297 | __entry->data_len = data_len; | ||
298 | memcpy(__get_dynamic_array(raw_hdr), hdr, hdr_len); | ||
299 | memcpy(__get_dynamic_array(raw_pkt), data, data_len); | ||
300 | ), | ||
301 | TP_printk( | ||
302 | "[%s] " SNOOP_PRN, | ||
303 | __get_str(dev), | ||
304 | __entry->slid, | ||
305 | __entry->dlid, | ||
306 | __entry->qpn, | ||
307 | __entry->opcode, | ||
308 | show_ib_opcode(__entry->opcode), | ||
309 | __entry->sl, | ||
310 | __entry->pkey, | ||
311 | __entry->hdr_len, | ||
312 | __entry->data_len | ||
313 | ) | ||
314 | ); | ||
315 | |||
316 | #endif /* __HFI1_TRACE_RX_H */ | 256 | #endif /* __HFI1_TRACE_RX_H */ |
317 | 257 | ||
318 | #undef TRACE_INCLUDE_PATH | 258 | #undef TRACE_INCLUDE_PATH |
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index a761f804111e..77697d690f3e 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c | |||
@@ -1144,7 +1144,7 @@ static int pin_vector_pages(struct user_sdma_request *req, | |||
1144 | rb_node = hfi1_mmu_rb_extract(pq->handler, | 1144 | rb_node = hfi1_mmu_rb_extract(pq->handler, |
1145 | (unsigned long)iovec->iov.iov_base, | 1145 | (unsigned long)iovec->iov.iov_base, |
1146 | iovec->iov.iov_len); | 1146 | iovec->iov.iov_len); |
1147 | if (rb_node && !IS_ERR(rb_node)) | 1147 | if (rb_node) |
1148 | node = container_of(rb_node, struct sdma_mmu_node, rb); | 1148 | node = container_of(rb_node, struct sdma_mmu_node, rb); |
1149 | else | 1149 | else |
1150 | rb_node = NULL; | 1150 | rb_node = NULL; |
diff --git a/drivers/infiniband/hw/mlx4/ah.c b/drivers/infiniband/hw/mlx4/ah.c index 5fc623362731..b9bf0759f10a 100644 --- a/drivers/infiniband/hw/mlx4/ah.c +++ b/drivers/infiniband/hw/mlx4/ah.c | |||
@@ -102,7 +102,10 @@ static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr | |||
102 | if (vlan_tag < 0x1000) | 102 | if (vlan_tag < 0x1000) |
103 | vlan_tag |= (ah_attr->sl & 7) << 13; | 103 | vlan_tag |= (ah_attr->sl & 7) << 13; |
104 | ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); | 104 | ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24)); |
105 | ah->av.eth.gid_index = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); | 105 | ret = mlx4_ib_gid_index_to_real_index(ibdev, ah_attr->port_num, ah_attr->grh.sgid_index); |
106 | if (ret < 0) | ||
107 | return ERR_PTR(ret); | ||
108 | ah->av.eth.gid_index = ret; | ||
106 | ah->av.eth.vlan = cpu_to_be16(vlan_tag); | 109 | ah->av.eth.vlan = cpu_to_be16(vlan_tag); |
107 | ah->av.eth.hop_limit = ah_attr->grh.hop_limit; | 110 | ah->av.eth.hop_limit = ah_attr->grh.hop_limit; |
108 | if (ah_attr->static_rate) { | 111 | if (ah_attr->static_rate) { |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 1ea686b9e0f9..6a0fec357dae 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -253,11 +253,14 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, | |||
253 | if (context) | 253 | if (context) |
254 | if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { | 254 | if (ib_copy_to_udata(udata, &cq->mcq.cqn, sizeof (__u32))) { |
255 | err = -EFAULT; | 255 | err = -EFAULT; |
256 | goto err_dbmap; | 256 | goto err_cq_free; |
257 | } | 257 | } |
258 | 258 | ||
259 | return &cq->ibcq; | 259 | return &cq->ibcq; |
260 | 260 | ||
261 | err_cq_free: | ||
262 | mlx4_cq_free(dev->dev, &cq->mcq); | ||
263 | |||
261 | err_dbmap: | 264 | err_dbmap: |
262 | if (context) | 265 | if (context) |
263 | mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); | 266 | mlx4_ib_db_unmap_user(to_mucontext(context), &cq->db); |
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index 79d017baf6f4..fcd04b881ec1 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c | |||
@@ -932,8 +932,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, | |||
932 | if (err) | 932 | if (err) |
933 | goto err_create; | 933 | goto err_create; |
934 | } else { | 934 | } else { |
935 | /* for now choose 64 bytes till we have a proper interface */ | 935 | cqe_size = cache_line_size() == 128 ? 128 : 64; |
936 | cqe_size = 64; | ||
937 | err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, | 936 | err = create_cq_kernel(dev, cq, entries, cqe_size, &cqb, |
938 | &index, &inlen); | 937 | &index, &inlen); |
939 | if (err) | 938 | if (err) |
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 63036c731626..32b09f059c84 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c | |||
@@ -2311,14 +2311,14 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, | |||
2311 | { | 2311 | { |
2312 | struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; | 2312 | struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context; |
2313 | struct ib_event ibev; | 2313 | struct ib_event ibev; |
2314 | 2314 | bool fatal = false; | |
2315 | u8 port = 0; | 2315 | u8 port = 0; |
2316 | 2316 | ||
2317 | switch (event) { | 2317 | switch (event) { |
2318 | case MLX5_DEV_EVENT_SYS_ERROR: | 2318 | case MLX5_DEV_EVENT_SYS_ERROR: |
2319 | ibdev->ib_active = false; | ||
2320 | ibev.event = IB_EVENT_DEVICE_FATAL; | 2319 | ibev.event = IB_EVENT_DEVICE_FATAL; |
2321 | mlx5_ib_handle_internal_error(ibdev); | 2320 | mlx5_ib_handle_internal_error(ibdev); |
2321 | fatal = true; | ||
2322 | break; | 2322 | break; |
2323 | 2323 | ||
2324 | case MLX5_DEV_EVENT_PORT_UP: | 2324 | case MLX5_DEV_EVENT_PORT_UP: |
@@ -2370,6 +2370,9 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context, | |||
2370 | 2370 | ||
2371 | if (ibdev->ib_active) | 2371 | if (ibdev->ib_active) |
2372 | ib_dispatch_event(&ibev); | 2372 | ib_dispatch_event(&ibev); |
2373 | |||
2374 | if (fatal) | ||
2375 | ibdev->ib_active = false; | ||
2373 | } | 2376 | } |
2374 | 2377 | ||
2375 | static void get_ext_port_caps(struct mlx5_ib_dev *dev) | 2378 | static void get_ext_port_caps(struct mlx5_ib_dev *dev) |
@@ -3115,7 +3118,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
3115 | } | 3118 | } |
3116 | err = init_node_data(dev); | 3119 | err = init_node_data(dev); |
3117 | if (err) | 3120 | if (err) |
3118 | goto err_dealloc; | 3121 | goto err_free_port; |
3119 | 3122 | ||
3120 | mutex_init(&dev->flow_db.lock); | 3123 | mutex_init(&dev->flow_db.lock); |
3121 | mutex_init(&dev->cap_mask_mutex); | 3124 | mutex_init(&dev->cap_mask_mutex); |
@@ -3125,7 +3128,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) | |||
3125 | if (ll == IB_LINK_LAYER_ETHERNET) { | 3128 | if (ll == IB_LINK_LAYER_ETHERNET) { |
3126 | err = mlx5_enable_roce(dev); | 3129 | err = mlx5_enable_roce(dev); |
3127 | if (err) | 3130 | if (err) |
3128 | goto err_dealloc; | 3131 | goto err_free_port; |
3129 | } | 3132 | } |
3130 | 3133 | ||
3131 | err = create_dev_resources(&dev->devr); | 3134 | err = create_dev_resources(&dev->devr); |
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index dcdcd195fe53..7d689903c87c 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h | |||
@@ -626,6 +626,8 @@ struct mlx5_ib_dev { | |||
626 | struct mlx5_ib_resources devr; | 626 | struct mlx5_ib_resources devr; |
627 | struct mlx5_mr_cache cache; | 627 | struct mlx5_mr_cache cache; |
628 | struct timer_list delay_timer; | 628 | struct timer_list delay_timer; |
629 | /* Prevents soft lock on massive reg MRs */ | ||
630 | struct mutex slow_path_mutex; | ||
629 | int fill_delay; | 631 | int fill_delay; |
630 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | 632 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
631 | struct ib_odp_caps odp_caps; | 633 | struct ib_odp_caps odp_caps; |
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index d4ad672b905b..4e9012463c37 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c | |||
@@ -610,6 +610,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) | |||
610 | int err; | 610 | int err; |
611 | int i; | 611 | int i; |
612 | 612 | ||
613 | mutex_init(&dev->slow_path_mutex); | ||
613 | cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); | 614 | cache->wq = alloc_ordered_workqueue("mkey_cache", WQ_MEM_RECLAIM); |
614 | if (!cache->wq) { | 615 | if (!cache->wq) { |
615 | mlx5_ib_warn(dev, "failed to create work queue\n"); | 616 | mlx5_ib_warn(dev, "failed to create work queue\n"); |
@@ -1182,9 +1183,12 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
1182 | goto error; | 1183 | goto error; |
1183 | } | 1184 | } |
1184 | 1185 | ||
1185 | if (!mr) | 1186 | if (!mr) { |
1187 | mutex_lock(&dev->slow_path_mutex); | ||
1186 | mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, | 1188 | mr = reg_create(NULL, pd, virt_addr, length, umem, ncont, |
1187 | page_shift, access_flags); | 1189 | page_shift, access_flags); |
1190 | mutex_unlock(&dev->slow_path_mutex); | ||
1191 | } | ||
1188 | 1192 | ||
1189 | if (IS_ERR(mr)) { | 1193 | if (IS_ERR(mr)) { |
1190 | err = PTR_ERR(mr); | 1194 | err = PTR_ERR(mr); |
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index 7ce97daf26c6..d1e921816bfe 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c | |||
@@ -2051,8 +2051,8 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, | |||
2051 | 2051 | ||
2052 | mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", | 2052 | mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", |
2053 | qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, | 2053 | qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn, |
2054 | to_mcq(init_attr->recv_cq)->mcq.cqn, | 2054 | init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1, |
2055 | to_mcq(init_attr->send_cq)->mcq.cqn); | 2055 | init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1); |
2056 | 2056 | ||
2057 | qp->trans_qp.xrcdn = xrcdn; | 2057 | qp->trans_qp.xrcdn = xrcdn; |
2058 | 2058 | ||
@@ -4814,6 +4814,14 @@ struct ib_rwq_ind_table *mlx5_ib_create_rwq_ind_table(struct ib_device *device, | |||
4814 | udata->inlen)) | 4814 | udata->inlen)) |
4815 | return ERR_PTR(-EOPNOTSUPP); | 4815 | return ERR_PTR(-EOPNOTSUPP); |
4816 | 4816 | ||
4817 | if (init_attr->log_ind_tbl_size > | ||
4818 | MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)) { | ||
4819 | mlx5_ib_dbg(dev, "log_ind_tbl_size = %d is bigger than supported = %d\n", | ||
4820 | init_attr->log_ind_tbl_size, | ||
4821 | MLX5_CAP_GEN(dev->mdev, log_max_rqt_size)); | ||
4822 | return ERR_PTR(-EINVAL); | ||
4823 | } | ||
4824 | |||
4817 | min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); | 4825 | min_resp_len = offsetof(typeof(resp), reserved) + sizeof(resp.reserved); |
4818 | if (udata->outlen && udata->outlen < min_resp_len) | 4826 | if (udata->outlen && udata->outlen < min_resp_len) |
4819 | return ERR_PTR(-EINVAL); | 4827 | return ERR_PTR(-EINVAL); |
diff --git a/drivers/infiniband/sw/rdmavt/dma.c b/drivers/infiniband/sw/rdmavt/dma.c index 01f71caa3ac4..f2cefb0d9180 100644 --- a/drivers/infiniband/sw/rdmavt/dma.c +++ b/drivers/infiniband/sw/rdmavt/dma.c | |||
@@ -90,9 +90,6 @@ static u64 rvt_dma_map_page(struct ib_device *dev, struct page *page, | |||
90 | if (WARN_ON(!valid_dma_direction(direction))) | 90 | if (WARN_ON(!valid_dma_direction(direction))) |
91 | return BAD_DMA_ADDRESS; | 91 | return BAD_DMA_ADDRESS; |
92 | 92 | ||
93 | if (offset + size > PAGE_SIZE) | ||
94 | return BAD_DMA_ADDRESS; | ||
95 | |||
96 | addr = (u64)page_address(page); | 93 | addr = (u64)page_address(page); |
97 | if (addr) | 94 | if (addr) |
98 | addr += offset; | 95 | addr += offset; |
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index b8258e4f0aea..ffff5a54cb34 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c | |||
@@ -243,10 +243,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, | |||
243 | { | 243 | { |
244 | int err; | 244 | int err; |
245 | struct socket *sock; | 245 | struct socket *sock; |
246 | struct udp_port_cfg udp_cfg; | 246 | struct udp_port_cfg udp_cfg = {0}; |
247 | struct udp_tunnel_sock_cfg tnl_cfg; | 247 | struct udp_tunnel_sock_cfg tnl_cfg = {0}; |
248 | |||
249 | memset(&udp_cfg, 0, sizeof(udp_cfg)); | ||
250 | 248 | ||
251 | if (ipv6) { | 249 | if (ipv6) { |
252 | udp_cfg.family = AF_INET6; | 250 | udp_cfg.family = AF_INET6; |
@@ -264,10 +262,8 @@ static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port, | |||
264 | return ERR_PTR(err); | 262 | return ERR_PTR(err); |
265 | } | 263 | } |
266 | 264 | ||
267 | tnl_cfg.sk_user_data = NULL; | ||
268 | tnl_cfg.encap_type = 1; | 265 | tnl_cfg.encap_type = 1; |
269 | tnl_cfg.encap_rcv = rxe_udp_encap_recv; | 266 | tnl_cfg.encap_rcv = rxe_udp_encap_recv; |
270 | tnl_cfg.encap_destroy = NULL; | ||
271 | 267 | ||
272 | /* Setup UDP tunnel */ | 268 | /* Setup UDP tunnel */ |
273 | setup_udp_tunnel_sock(net, sock, &tnl_cfg); | 269 | setup_udp_tunnel_sock(net, sock, &tnl_cfg); |
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index b8036cfbce04..c3e60e4bde6e 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c | |||
@@ -522,6 +522,7 @@ static void rxe_qp_reset(struct rxe_qp *qp) | |||
522 | if (qp->sq.queue) { | 522 | if (qp->sq.queue) { |
523 | __rxe_do_task(&qp->comp.task); | 523 | __rxe_do_task(&qp->comp.task); |
524 | __rxe_do_task(&qp->req.task); | 524 | __rxe_do_task(&qp->req.task); |
525 | rxe_queue_reset(qp->sq.queue); | ||
525 | } | 526 | } |
526 | 527 | ||
527 | /* cleanup attributes */ | 528 | /* cleanup attributes */ |
@@ -573,6 +574,7 @@ void rxe_qp_error(struct rxe_qp *qp) | |||
573 | { | 574 | { |
574 | qp->req.state = QP_STATE_ERROR; | 575 | qp->req.state = QP_STATE_ERROR; |
575 | qp->resp.state = QP_STATE_ERROR; | 576 | qp->resp.state = QP_STATE_ERROR; |
577 | qp->attr.qp_state = IB_QPS_ERR; | ||
576 | 578 | ||
577 | /* drain work and packet queues */ | 579 | /* drain work and packet queues */ |
578 | rxe_run_task(&qp->resp.task, 1); | 580 | rxe_run_task(&qp->resp.task, 1); |
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index 08274254eb88..d14bf496d62d 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c | |||
@@ -84,6 +84,15 @@ err1: | |||
84 | return -EINVAL; | 84 | return -EINVAL; |
85 | } | 85 | } |
86 | 86 | ||
87 | inline void rxe_queue_reset(struct rxe_queue *q) | ||
88 | { | ||
89 | /* queue is comprised from header and the memory | ||
90 | * of the actual queue. See "struct rxe_queue_buf" in rxe_queue.h | ||
91 | * reset only the queue itself and not the management header | ||
92 | */ | ||
93 | memset(q->buf->data, 0, q->buf_size - sizeof(struct rxe_queue_buf)); | ||
94 | } | ||
95 | |||
87 | struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, | 96 | struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, |
88 | int *num_elem, | 97 | int *num_elem, |
89 | unsigned int elem_size) | 98 | unsigned int elem_size) |
diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h index 239fd609c31e..8c8641c87817 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.h +++ b/drivers/infiniband/sw/rxe/rxe_queue.h | |||
@@ -84,6 +84,8 @@ int do_mmap_info(struct rxe_dev *rxe, | |||
84 | size_t buf_size, | 84 | size_t buf_size, |
85 | struct rxe_mmap_info **ip_p); | 85 | struct rxe_mmap_info **ip_p); |
86 | 86 | ||
87 | void rxe_queue_reset(struct rxe_queue *q); | ||
88 | |||
87 | struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, | 89 | struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, |
88 | int *num_elem, | 90 | int *num_elem, |
89 | unsigned int elem_size); | 91 | unsigned int elem_size); |
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 832846b73ea0..22bd9630dcd9 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c | |||
@@ -696,7 +696,8 @@ next_wqe: | |||
696 | qp->req.wqe_index); | 696 | qp->req.wqe_index); |
697 | wqe->state = wqe_state_done; | 697 | wqe->state = wqe_state_done; |
698 | wqe->status = IB_WC_SUCCESS; | 698 | wqe->status = IB_WC_SUCCESS; |
699 | goto complete; | 699 | __rxe_do_task(&qp->comp.task); |
700 | return 0; | ||
700 | } | 701 | } |
701 | payload = mtu; | 702 | payload = mtu; |
702 | } | 703 | } |
@@ -745,13 +746,17 @@ err: | |||
745 | wqe->status = IB_WC_LOC_PROT_ERR; | 746 | wqe->status = IB_WC_LOC_PROT_ERR; |
746 | wqe->state = wqe_state_error; | 747 | wqe->state = wqe_state_error; |
747 | 748 | ||
748 | complete: | 749 | /* |
749 | if (qp_type(qp) != IB_QPT_RC) { | 750 | * IBA Spec. Section 10.7.3.1 SIGNALED COMPLETIONS |
750 | while (rxe_completer(qp) == 0) | 751 | * ---------8<---------8<------------- |
751 | ; | 752 | * ...Note that if a completion error occurs, a Work Completion |
752 | } | 753 | * will always be generated, even if the signaling |
753 | 754 | * indicator requests an Unsignaled Completion. | |
754 | return 0; | 755 | * ---------8<---------8<------------- |
756 | */ | ||
757 | wqe->wr.send_flags |= IB_SEND_SIGNALED; | ||
758 | __rxe_do_task(&qp->comp.task); | ||
759 | return -EAGAIN; | ||
755 | 760 | ||
756 | exit: | 761 | exit: |
757 | return -EAGAIN; | 762 | return -EAGAIN; |
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 58470f5ced04..8c53748a769d 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c | |||
@@ -338,7 +338,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb, | |||
338 | struct pci_dev *pdev = to_pci_dev(data); | 338 | struct pci_dev *pdev = to_pci_dev(data); |
339 | struct dmar_pci_notify_info *info; | 339 | struct dmar_pci_notify_info *info; |
340 | 340 | ||
341 | /* Only care about add/remove events for physical functions */ | 341 | /* Only care about add/remove events for physical functions. |
342 | * For VFs we actually do the lookup based on the corresponding | ||
343 | * PF in device_to_iommu() anyway. */ | ||
342 | if (pdev->is_virtfn) | 344 | if (pdev->is_virtfn) |
343 | return NOTIFY_DONE; | 345 | return NOTIFY_DONE; |
344 | if (action != BUS_NOTIFY_ADD_DEVICE && | 346 | if (action != BUS_NOTIFY_ADD_DEVICE && |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 3965e73db51c..d8376c2d18b3 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -892,7 +892,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf | |||
892 | return NULL; | 892 | return NULL; |
893 | 893 | ||
894 | if (dev_is_pci(dev)) { | 894 | if (dev_is_pci(dev)) { |
895 | struct pci_dev *pf_pdev; | ||
896 | |||
895 | pdev = to_pci_dev(dev); | 897 | pdev = to_pci_dev(dev); |
898 | /* VFs aren't listed in scope tables; we need to look up | ||
899 | * the PF instead to find the IOMMU. */ | ||
900 | pf_pdev = pci_physfn(pdev); | ||
901 | dev = &pf_pdev->dev; | ||
896 | segment = pci_domain_nr(pdev->bus); | 902 | segment = pci_domain_nr(pdev->bus); |
897 | } else if (has_acpi_companion(dev)) | 903 | } else if (has_acpi_companion(dev)) |
898 | dev = &ACPI_COMPANION(dev)->dev; | 904 | dev = &ACPI_COMPANION(dev)->dev; |
@@ -905,6 +911,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf | |||
905 | for_each_active_dev_scope(drhd->devices, | 911 | for_each_active_dev_scope(drhd->devices, |
906 | drhd->devices_cnt, i, tmp) { | 912 | drhd->devices_cnt, i, tmp) { |
907 | if (tmp == dev) { | 913 | if (tmp == dev) { |
914 | /* For a VF use its original BDF# not that of the PF | ||
915 | * which we used for the IOMMU lookup. Strictly speaking | ||
916 | * we could do this for all PCI devices; we only need to | ||
917 | * get the BDF# from the scope table for ACPI matches. */ | ||
918 | if (pdev->is_virtfn) | ||
919 | goto got_pdev; | ||
920 | |||
908 | *bus = drhd->devices[i].bus; | 921 | *bus = drhd->devices[i].bus; |
909 | *devfn = drhd->devices[i].devfn; | 922 | *devfn = drhd->devices[i].devfn; |
910 | goto out; | 923 | goto out; |
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 8ebb3530afa7..cb72e0011310 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c | |||
@@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) | |||
39 | struct page *pages; | 39 | struct page *pages; |
40 | int order; | 40 | int order; |
41 | 41 | ||
42 | order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; | 42 | /* Start at 2 because it's defined as 2^(1+PSS) */ |
43 | if (order < 0) | 43 | iommu->pasid_max = 2 << ecap_pss(iommu->ecap); |
44 | order = 0; | 44 | |
45 | 45 | /* Eventually I'm promised we will get a multi-level PASID table | |
46 | * and it won't have to be physically contiguous. Until then, | ||
47 | * limit the size because 8MiB contiguous allocations can be hard | ||
48 | * to come by. The limit of 0x20000, which is 1MiB for each of | ||
49 | * the PASID and PASID-state tables, is somewhat arbitrary. */ | ||
50 | if (iommu->pasid_max > 0x20000) | ||
51 | iommu->pasid_max = 0x20000; | ||
52 | |||
53 | order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); | ||
46 | pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); | 54 | pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); |
47 | if (!pages) { | 55 | if (!pages) { |
48 | pr_warn("IOMMU: %s: Failed to allocate PASID table\n", | 56 | pr_warn("IOMMU: %s: Failed to allocate PASID table\n", |
@@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) | |||
53 | pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order); | 61 | pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order); |
54 | 62 | ||
55 | if (ecap_dis(iommu->ecap)) { | 63 | if (ecap_dis(iommu->ecap)) { |
64 | /* Just making it explicit... */ | ||
65 | BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry)); | ||
56 | pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); | 66 | pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); |
57 | if (pages) | 67 | if (pages) |
58 | iommu->pasid_state_table = page_address(pages); | 68 | iommu->pasid_state_table = page_address(pages); |
@@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) | |||
68 | 78 | ||
69 | int intel_svm_free_pasid_tables(struct intel_iommu *iommu) | 79 | int intel_svm_free_pasid_tables(struct intel_iommu *iommu) |
70 | { | 80 | { |
71 | int order; | 81 | int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max); |
72 | |||
73 | order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT; | ||
74 | if (order < 0) | ||
75 | order = 0; | ||
76 | 82 | ||
77 | if (iommu->pasid_table) { | 83 | if (iommu->pasid_table) { |
78 | free_pages((unsigned long)iommu->pasid_table, order); | 84 | free_pages((unsigned long)iommu->pasid_table, order); |
@@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ | |||
371 | } | 377 | } |
372 | svm->iommu = iommu; | 378 | svm->iommu = iommu; |
373 | 379 | ||
374 | if (pasid_max > 2 << ecap_pss(iommu->ecap)) | 380 | if (pasid_max > iommu->pasid_max) |
375 | pasid_max = 2 << ecap_pss(iommu->ecap); | 381 | pasid_max = iommu->pasid_max; |
376 | 382 | ||
377 | /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ | 383 | /* Do not use PASID 0 in caching mode (virtualised IOMMU) */ |
378 | ret = idr_alloc(&iommu->pasid_idr, svm, | 384 | ret = idr_alloc(&iommu->pasid_idr, svm, |
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c index 08c87fadca8c..1f32688c312d 100644 --- a/drivers/mailbox/pcc.c +++ b/drivers/mailbox/pcc.c | |||
@@ -65,6 +65,7 @@ | |||
65 | #include <linux/mailbox_controller.h> | 65 | #include <linux/mailbox_controller.h> |
66 | #include <linux/mailbox_client.h> | 66 | #include <linux/mailbox_client.h> |
67 | #include <linux/io-64-nonatomic-lo-hi.h> | 67 | #include <linux/io-64-nonatomic-lo-hi.h> |
68 | #include <acpi/pcc.h> | ||
68 | 69 | ||
69 | #include "mailbox.h" | 70 | #include "mailbox.h" |
70 | 71 | ||
@@ -267,6 +268,8 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, | |||
267 | if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) | 268 | if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) |
268 | chan->txdone_method |= TXDONE_BY_ACK; | 269 | chan->txdone_method |= TXDONE_BY_ACK; |
269 | 270 | ||
271 | spin_unlock_irqrestore(&chan->lock, flags); | ||
272 | |||
270 | if (pcc_doorbell_irq[subspace_id] > 0) { | 273 | if (pcc_doorbell_irq[subspace_id] > 0) { |
271 | int rc; | 274 | int rc; |
272 | 275 | ||
@@ -275,12 +278,11 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl, | |||
275 | if (unlikely(rc)) { | 278 | if (unlikely(rc)) { |
276 | dev_err(dev, "failed to register PCC interrupt %d\n", | 279 | dev_err(dev, "failed to register PCC interrupt %d\n", |
277 | pcc_doorbell_irq[subspace_id]); | 280 | pcc_doorbell_irq[subspace_id]); |
281 | pcc_mbox_free_channel(chan); | ||
278 | chan = ERR_PTR(rc); | 282 | chan = ERR_PTR(rc); |
279 | } | 283 | } |
280 | } | 284 | } |
281 | 285 | ||
282 | spin_unlock_irqrestore(&chan->lock, flags); | ||
283 | |||
284 | return chan; | 286 | return chan; |
285 | } | 287 | } |
286 | EXPORT_SYMBOL_GPL(pcc_mbox_request_channel); | 288 | EXPORT_SYMBOL_GPL(pcc_mbox_request_channel); |
@@ -304,20 +306,19 @@ void pcc_mbox_free_channel(struct mbox_chan *chan) | |||
304 | return; | 306 | return; |
305 | } | 307 | } |
306 | 308 | ||
309 | if (pcc_doorbell_irq[id] > 0) | ||
310 | devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan); | ||
311 | |||
307 | spin_lock_irqsave(&chan->lock, flags); | 312 | spin_lock_irqsave(&chan->lock, flags); |
308 | chan->cl = NULL; | 313 | chan->cl = NULL; |
309 | chan->active_req = NULL; | 314 | chan->active_req = NULL; |
310 | if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) | 315 | if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) |
311 | chan->txdone_method = TXDONE_BY_POLL; | 316 | chan->txdone_method = TXDONE_BY_POLL; |
312 | 317 | ||
313 | if (pcc_doorbell_irq[id] > 0) | ||
314 | devm_free_irq(chan->mbox->dev, pcc_doorbell_irq[id], chan); | ||
315 | |||
316 | spin_unlock_irqrestore(&chan->lock, flags); | 318 | spin_unlock_irqrestore(&chan->lock, flags); |
317 | } | 319 | } |
318 | EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); | 320 | EXPORT_SYMBOL_GPL(pcc_mbox_free_channel); |
319 | 321 | ||
320 | |||
321 | /** | 322 | /** |
322 | * pcc_send_data - Called from Mailbox Controller code. Used | 323 | * pcc_send_data - Called from Mailbox Controller code. Used |
323 | * here only to ring the channel doorbell. The PCC client | 324 | * here only to ring the channel doorbell. The PCC client |
diff --git a/drivers/media/dvb-frontends/gp8psk-fe.c b/drivers/media/dvb-frontends/gp8psk-fe.c index be19afeed7a9..93f59bfea092 100644 --- a/drivers/media/dvb-frontends/gp8psk-fe.c +++ b/drivers/media/dvb-frontends/gp8psk-fe.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* DVB USB compliant Linux driver for the | 1 | /* |
2 | * - GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module | 2 | * Frontend driver for the GENPIX 8pks/qpsk/DCII USB2.0 DVB-S module |
3 | * | 3 | * |
4 | * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com) | 4 | * Copyright (C) 2006,2007 Alan Nisota (alannisota@gmail.com) |
5 | * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com) | 5 | * Copyright (C) 2006,2007 Genpix Electronics (genpix@genpix-electronics.com) |
@@ -8,11 +8,9 @@ | |||
8 | * | 8 | * |
9 | * This module is based off the vp7045 and vp702x modules | 9 | * This module is based off the vp7045 and vp702x modules |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify it | 11 | * This program is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the Free | 12 | * under the terms of the GNU General Public License as published by the Free |
13 | * Software Foundation, version 2. | 13 | * Software Foundation, version 2. |
14 | * | ||
15 | * see Documentation/dvb/README.dvb-usb for more information | ||
16 | */ | 14 | */ |
17 | 15 | ||
18 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
@@ -395,3 +393,8 @@ static struct dvb_frontend_ops gp8psk_fe_ops = { | |||
395 | .dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd, | 393 | .dishnetwork_send_legacy_command = gp8psk_fe_send_legacy_dish_cmd, |
396 | .enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage | 394 | .enable_high_lnb_voltage = gp8psk_fe_enable_high_lnb_voltage |
397 | }; | 395 | }; |
396 | |||
397 | MODULE_AUTHOR("Alan Nisota <alannisota@gamil.com>"); | ||
398 | MODULE_DESCRIPTION("Frontend Driver for Genpix DVB-S"); | ||
399 | MODULE_VERSION("1.1"); | ||
400 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/media/tuners/tuner-xc2028.c b/drivers/media/tuners/tuner-xc2028.c index 317ef63ee789..8d96a22647b3 100644 --- a/drivers/media/tuners/tuner-xc2028.c +++ b/drivers/media/tuners/tuner-xc2028.c | |||
@@ -281,6 +281,14 @@ static void free_firmware(struct xc2028_data *priv) | |||
281 | int i; | 281 | int i; |
282 | tuner_dbg("%s called\n", __func__); | 282 | tuner_dbg("%s called\n", __func__); |
283 | 283 | ||
284 | /* free allocated f/w string */ | ||
285 | if (priv->fname != firmware_name) | ||
286 | kfree(priv->fname); | ||
287 | priv->fname = NULL; | ||
288 | |||
289 | priv->state = XC2028_NO_FIRMWARE; | ||
290 | memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); | ||
291 | |||
284 | if (!priv->firm) | 292 | if (!priv->firm) |
285 | return; | 293 | return; |
286 | 294 | ||
@@ -291,9 +299,6 @@ static void free_firmware(struct xc2028_data *priv) | |||
291 | 299 | ||
292 | priv->firm = NULL; | 300 | priv->firm = NULL; |
293 | priv->firm_size = 0; | 301 | priv->firm_size = 0; |
294 | priv->state = XC2028_NO_FIRMWARE; | ||
295 | |||
296 | memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); | ||
297 | } | 302 | } |
298 | 303 | ||
299 | static int load_all_firmwares(struct dvb_frontend *fe, | 304 | static int load_all_firmwares(struct dvb_frontend *fe, |
@@ -884,9 +889,8 @@ read_not_reliable: | |||
884 | return 0; | 889 | return 0; |
885 | 890 | ||
886 | fail: | 891 | fail: |
887 | priv->state = XC2028_NO_FIRMWARE; | 892 | free_firmware(priv); |
888 | 893 | ||
889 | memset(&priv->cur_fw, 0, sizeof(priv->cur_fw)); | ||
890 | if (retry_count < 8) { | 894 | if (retry_count < 8) { |
891 | msleep(50); | 895 | msleep(50); |
892 | retry_count++; | 896 | retry_count++; |
@@ -1332,11 +1336,8 @@ static int xc2028_dvb_release(struct dvb_frontend *fe) | |||
1332 | mutex_lock(&xc2028_list_mutex); | 1336 | mutex_lock(&xc2028_list_mutex); |
1333 | 1337 | ||
1334 | /* only perform final cleanup if this is the last instance */ | 1338 | /* only perform final cleanup if this is the last instance */ |
1335 | if (hybrid_tuner_report_instance_count(priv) == 1) { | 1339 | if (hybrid_tuner_report_instance_count(priv) == 1) |
1336 | free_firmware(priv); | 1340 | free_firmware(priv); |
1337 | kfree(priv->ctrl.fname); | ||
1338 | priv->ctrl.fname = NULL; | ||
1339 | } | ||
1340 | 1341 | ||
1341 | if (priv) | 1342 | if (priv) |
1342 | hybrid_tuner_release_state(priv); | 1343 | hybrid_tuner_release_state(priv); |
@@ -1399,19 +1400,8 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) | |||
1399 | 1400 | ||
1400 | /* | 1401 | /* |
1401 | * Copy the config data. | 1402 | * Copy the config data. |
1402 | * For the firmware name, keep a local copy of the string, | ||
1403 | * in order to avoid troubles during device release. | ||
1404 | */ | 1403 | */ |
1405 | kfree(priv->ctrl.fname); | ||
1406 | priv->ctrl.fname = NULL; | ||
1407 | memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); | 1404 | memcpy(&priv->ctrl, p, sizeof(priv->ctrl)); |
1408 | if (p->fname) { | ||
1409 | priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL); | ||
1410 | if (priv->ctrl.fname == NULL) { | ||
1411 | rc = -ENOMEM; | ||
1412 | goto unlock; | ||
1413 | } | ||
1414 | } | ||
1415 | 1405 | ||
1416 | /* | 1406 | /* |
1417 | * If firmware name changed, frees firmware. As free_firmware will | 1407 | * If firmware name changed, frees firmware. As free_firmware will |
@@ -1426,10 +1416,15 @@ static int xc2028_set_config(struct dvb_frontend *fe, void *priv_cfg) | |||
1426 | 1416 | ||
1427 | if (priv->state == XC2028_NO_FIRMWARE) { | 1417 | if (priv->state == XC2028_NO_FIRMWARE) { |
1428 | if (!firmware_name[0]) | 1418 | if (!firmware_name[0]) |
1429 | priv->fname = priv->ctrl.fname; | 1419 | priv->fname = kstrdup(p->fname, GFP_KERNEL); |
1430 | else | 1420 | else |
1431 | priv->fname = firmware_name; | 1421 | priv->fname = firmware_name; |
1432 | 1422 | ||
1423 | if (!priv->fname) { | ||
1424 | rc = -ENOMEM; | ||
1425 | goto unlock; | ||
1426 | } | ||
1427 | |||
1433 | rc = request_firmware_nowait(THIS_MODULE, 1, | 1428 | rc = request_firmware_nowait(THIS_MODULE, 1, |
1434 | priv->fname, | 1429 | priv->fname, |
1435 | priv->i2c_props.adap->dev.parent, | 1430 | priv->i2c_props.adap->dev.parent, |
diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c index 3228fd182a99..9ff243970e93 100644 --- a/drivers/mfd/intel-lpss-pci.c +++ b/drivers/mfd/intel-lpss-pci.c | |||
@@ -123,19 +123,6 @@ static const struct intel_lpss_platform_info apl_i2c_info = { | |||
123 | .properties = apl_i2c_properties, | 123 | .properties = apl_i2c_properties, |
124 | }; | 124 | }; |
125 | 125 | ||
126 | static const struct intel_lpss_platform_info kbl_info = { | ||
127 | .clk_rate = 120000000, | ||
128 | }; | ||
129 | |||
130 | static const struct intel_lpss_platform_info kbl_uart_info = { | ||
131 | .clk_rate = 120000000, | ||
132 | .clk_con_id = "baudclk", | ||
133 | }; | ||
134 | |||
135 | static const struct intel_lpss_platform_info kbl_i2c_info = { | ||
136 | .clk_rate = 133000000, | ||
137 | }; | ||
138 | |||
139 | static const struct pci_device_id intel_lpss_pci_ids[] = { | 126 | static const struct pci_device_id intel_lpss_pci_ids[] = { |
140 | /* BXT A-Step */ | 127 | /* BXT A-Step */ |
141 | { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info }, | 128 | { PCI_VDEVICE(INTEL, 0x0aac), (kernel_ulong_t)&bxt_i2c_info }, |
@@ -207,15 +194,15 @@ static const struct pci_device_id intel_lpss_pci_ids[] = { | |||
207 | { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info }, | 194 | { PCI_VDEVICE(INTEL, 0xa161), (kernel_ulong_t)&spt_i2c_info }, |
208 | { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, | 195 | { PCI_VDEVICE(INTEL, 0xa166), (kernel_ulong_t)&spt_uart_info }, |
209 | /* KBL-H */ | 196 | /* KBL-H */ |
210 | { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&kbl_uart_info }, | 197 | { PCI_VDEVICE(INTEL, 0xa2a7), (kernel_ulong_t)&spt_uart_info }, |
211 | { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&kbl_uart_info }, | 198 | { PCI_VDEVICE(INTEL, 0xa2a8), (kernel_ulong_t)&spt_uart_info }, |
212 | { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&kbl_info }, | 199 | { PCI_VDEVICE(INTEL, 0xa2a9), (kernel_ulong_t)&spt_info }, |
213 | { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&kbl_info }, | 200 | { PCI_VDEVICE(INTEL, 0xa2aa), (kernel_ulong_t)&spt_info }, |
214 | { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&kbl_i2c_info }, | 201 | { PCI_VDEVICE(INTEL, 0xa2e0), (kernel_ulong_t)&spt_i2c_info }, |
215 | { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&kbl_i2c_info }, | 202 | { PCI_VDEVICE(INTEL, 0xa2e1), (kernel_ulong_t)&spt_i2c_info }, |
216 | { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&kbl_i2c_info }, | 203 | { PCI_VDEVICE(INTEL, 0xa2e2), (kernel_ulong_t)&spt_i2c_info }, |
217 | { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&kbl_i2c_info }, | 204 | { PCI_VDEVICE(INTEL, 0xa2e3), (kernel_ulong_t)&spt_i2c_info }, |
218 | { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&kbl_uart_info }, | 205 | { PCI_VDEVICE(INTEL, 0xa2e6), (kernel_ulong_t)&spt_uart_info }, |
219 | { } | 206 | { } |
220 | }; | 207 | }; |
221 | MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids); | 208 | MODULE_DEVICE_TABLE(pci, intel_lpss_pci_ids); |
diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 41b113875d64..70c646b0097d 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c | |||
@@ -502,9 +502,6 @@ int intel_lpss_suspend(struct device *dev) | |||
502 | for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) | 502 | for (i = 0; i < LPSS_PRIV_REG_COUNT; i++) |
503 | lpss->priv_ctx[i] = readl(lpss->priv + i * 4); | 503 | lpss->priv_ctx[i] = readl(lpss->priv + i * 4); |
504 | 504 | ||
505 | /* Put the device into reset state */ | ||
506 | writel(0, lpss->priv + LPSS_PRIV_RESETS); | ||
507 | |||
508 | return 0; | 505 | return 0; |
509 | } | 506 | } |
510 | EXPORT_SYMBOL_GPL(intel_lpss_suspend); | 507 | EXPORT_SYMBOL_GPL(intel_lpss_suspend); |
diff --git a/drivers/mfd/intel_soc_pmic_bxtwc.c b/drivers/mfd/intel_soc_pmic_bxtwc.c index 43e54b7e908f..f9a8c5203873 100644 --- a/drivers/mfd/intel_soc_pmic_bxtwc.c +++ b/drivers/mfd/intel_soc_pmic_bxtwc.c | |||
@@ -86,6 +86,7 @@ enum bxtwc_irqs_level2 { | |||
86 | BXTWC_THRM2_IRQ, | 86 | BXTWC_THRM2_IRQ, |
87 | BXTWC_BCU_IRQ, | 87 | BXTWC_BCU_IRQ, |
88 | BXTWC_ADC_IRQ, | 88 | BXTWC_ADC_IRQ, |
89 | BXTWC_USBC_IRQ, | ||
89 | BXTWC_CHGR0_IRQ, | 90 | BXTWC_CHGR0_IRQ, |
90 | BXTWC_CHGR1_IRQ, | 91 | BXTWC_CHGR1_IRQ, |
91 | BXTWC_GPIO0_IRQ, | 92 | BXTWC_GPIO0_IRQ, |
@@ -111,7 +112,8 @@ static const struct regmap_irq bxtwc_regmap_irqs_level2[] = { | |||
111 | REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff), | 112 | REGMAP_IRQ_REG(BXTWC_THRM2_IRQ, 2, 0xff), |
112 | REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f), | 113 | REGMAP_IRQ_REG(BXTWC_BCU_IRQ, 3, 0x1f), |
113 | REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff), | 114 | REGMAP_IRQ_REG(BXTWC_ADC_IRQ, 4, 0xff), |
114 | REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x3f), | 115 | REGMAP_IRQ_REG(BXTWC_USBC_IRQ, 5, BIT(5)), |
116 | REGMAP_IRQ_REG(BXTWC_CHGR0_IRQ, 5, 0x1f), | ||
115 | REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f), | 117 | REGMAP_IRQ_REG(BXTWC_CHGR1_IRQ, 6, 0x1f), |
116 | REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff), | 118 | REGMAP_IRQ_REG(BXTWC_GPIO0_IRQ, 7, 0xff), |
117 | REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f), | 119 | REGMAP_IRQ_REG(BXTWC_GPIO1_IRQ, 8, 0x3f), |
@@ -146,7 +148,7 @@ static struct resource adc_resources[] = { | |||
146 | }; | 148 | }; |
147 | 149 | ||
148 | static struct resource usbc_resources[] = { | 150 | static struct resource usbc_resources[] = { |
149 | DEFINE_RES_IRQ_NAMED(BXTWC_CHGR0_IRQ, "USBC"), | 151 | DEFINE_RES_IRQ(BXTWC_USBC_IRQ), |
150 | }; | 152 | }; |
151 | 153 | ||
152 | static struct resource charger_resources[] = { | 154 | static struct resource charger_resources[] = { |
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index 3ac486a597f3..c57e407020f1 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c | |||
@@ -399,6 +399,8 @@ int mfd_clone_cell(const char *cell, const char **clones, size_t n_clones) | |||
399 | clones[i]); | 399 | clones[i]); |
400 | } | 400 | } |
401 | 401 | ||
402 | put_device(dev); | ||
403 | |||
402 | return 0; | 404 | return 0; |
403 | } | 405 | } |
404 | EXPORT_SYMBOL(mfd_clone_cell); | 406 | EXPORT_SYMBOL(mfd_clone_cell); |
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c index cfdae8a3d779..b0c7bcdaf5df 100644 --- a/drivers/mfd/stmpe.c +++ b/drivers/mfd/stmpe.c | |||
@@ -851,6 +851,8 @@ static int stmpe_reset(struct stmpe *stmpe) | |||
851 | if (ret < 0) | 851 | if (ret < 0) |
852 | return ret; | 852 | return ret; |
853 | 853 | ||
854 | msleep(10); | ||
855 | |||
854 | timeout = jiffies + msecs_to_jiffies(100); | 856 | timeout = jiffies + msecs_to_jiffies(100); |
855 | while (time_before(jiffies, timeout)) { | 857 | while (time_before(jiffies, timeout)) { |
856 | ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]); | 858 | ret = __stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_SYS_CTRL]); |
diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index 2f2225e845ef..b93fe4c4957a 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c | |||
@@ -73,8 +73,10 @@ static struct syscon *of_syscon_register(struct device_node *np) | |||
73 | /* Parse the device's DT node for an endianness specification */ | 73 | /* Parse the device's DT node for an endianness specification */ |
74 | if (of_property_read_bool(np, "big-endian")) | 74 | if (of_property_read_bool(np, "big-endian")) |
75 | syscon_config.val_format_endian = REGMAP_ENDIAN_BIG; | 75 | syscon_config.val_format_endian = REGMAP_ENDIAN_BIG; |
76 | else if (of_property_read_bool(np, "little-endian")) | 76 | else if (of_property_read_bool(np, "little-endian")) |
77 | syscon_config.val_format_endian = REGMAP_ENDIAN_LITTLE; | 77 | syscon_config.val_format_endian = REGMAP_ENDIAN_LITTLE; |
78 | else if (of_property_read_bool(np, "native-endian")) | ||
79 | syscon_config.val_format_endian = REGMAP_ENDIAN_NATIVE; | ||
78 | 80 | ||
79 | /* | 81 | /* |
80 | * search for reg-io-width property in DT. If it is not provided, | 82 | * search for reg-io-width property in DT. If it is not provided, |
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c index 7eec619a6023..8588dbad3301 100644 --- a/drivers/mfd/wm8994-core.c +++ b/drivers/mfd/wm8994-core.c | |||
@@ -393,8 +393,13 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq) | |||
393 | BUG(); | 393 | BUG(); |
394 | goto err; | 394 | goto err; |
395 | } | 395 | } |
396 | 396 | ||
397 | ret = devm_regulator_bulk_get(wm8994->dev, wm8994->num_supplies, | 397 | /* |
398 | * Can't use devres helper here as some of the supplies are provided by | ||
399 | * wm8994->dev's children (regulators) and those regulators are | ||
400 | * unregistered by the devres core before the supplies are freed. | ||
401 | */ | ||
402 | ret = regulator_bulk_get(wm8994->dev, wm8994->num_supplies, | ||
398 | wm8994->supplies); | 403 | wm8994->supplies); |
399 | if (ret != 0) { | 404 | if (ret != 0) { |
400 | dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret); | 405 | dev_err(wm8994->dev, "Failed to get supplies: %d\n", ret); |
@@ -405,7 +410,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq) | |||
405 | wm8994->supplies); | 410 | wm8994->supplies); |
406 | if (ret != 0) { | 411 | if (ret != 0) { |
407 | dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret); | 412 | dev_err(wm8994->dev, "Failed to enable supplies: %d\n", ret); |
408 | goto err; | 413 | goto err_regulator_free; |
409 | } | 414 | } |
410 | 415 | ||
411 | ret = wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET); | 416 | ret = wm8994_reg_read(wm8994, WM8994_SOFTWARE_RESET); |
@@ -596,6 +601,8 @@ err_irq: | |||
596 | err_enable: | 601 | err_enable: |
597 | regulator_bulk_disable(wm8994->num_supplies, | 602 | regulator_bulk_disable(wm8994->num_supplies, |
598 | wm8994->supplies); | 603 | wm8994->supplies); |
604 | err_regulator_free: | ||
605 | regulator_bulk_free(wm8994->num_supplies, wm8994->supplies); | ||
599 | err: | 606 | err: |
600 | mfd_remove_devices(wm8994->dev); | 607 | mfd_remove_devices(wm8994->dev); |
601 | return ret; | 608 | return ret; |
@@ -604,10 +611,11 @@ err: | |||
604 | static void wm8994_device_exit(struct wm8994 *wm8994) | 611 | static void wm8994_device_exit(struct wm8994 *wm8994) |
605 | { | 612 | { |
606 | pm_runtime_disable(wm8994->dev); | 613 | pm_runtime_disable(wm8994->dev); |
607 | mfd_remove_devices(wm8994->dev); | ||
608 | wm8994_irq_exit(wm8994); | 614 | wm8994_irq_exit(wm8994); |
609 | regulator_bulk_disable(wm8994->num_supplies, | 615 | regulator_bulk_disable(wm8994->num_supplies, |
610 | wm8994->supplies); | 616 | wm8994->supplies); |
617 | regulator_bulk_free(wm8994->num_supplies, wm8994->supplies); | ||
618 | mfd_remove_devices(wm8994->dev); | ||
611 | } | 619 | } |
612 | 620 | ||
613 | static const struct of_device_id wm8994_of_match[] = { | 621 | static const struct of_device_id wm8994_of_match[] = { |
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 50a674be6655..df478ae72e23 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c | |||
@@ -1058,6 +1058,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) | |||
1058 | spin_unlock_irqrestore(&host->irq_lock, irqflags); | 1058 | spin_unlock_irqrestore(&host->irq_lock, irqflags); |
1059 | 1059 | ||
1060 | if (host->dma_ops->start(host, sg_len)) { | 1060 | if (host->dma_ops->start(host, sg_len)) { |
1061 | host->dma_ops->stop(host); | ||
1061 | /* We can't do DMA, try PIO for this one */ | 1062 | /* We can't do DMA, try PIO for this one */ |
1062 | dev_dbg(host->dev, | 1063 | dev_dbg(host->dev, |
1063 | "%s: fall back to PIO mode for current transfer\n", | 1064 | "%s: fall back to PIO mode for current transfer\n", |
diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index fb71c866eacc..1bb11e4a9fe5 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c | |||
@@ -66,6 +66,20 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host, | |||
66 | return ret; | 66 | return ret; |
67 | } | 67 | } |
68 | } | 68 | } |
69 | /* | ||
70 | * The DAT[3:0] line signal levels and the CMD line signal level are | ||
71 | * not compatible with standard SDHC register. The line signal levels | ||
72 | * DAT[7:0] are at bits 31:24 and the command line signal level is at | ||
73 | * bit 23. All other bits are the same as in the standard SDHC | ||
74 | * register. | ||
75 | */ | ||
76 | if (spec_reg == SDHCI_PRESENT_STATE) { | ||
77 | ret = value & 0x000fffff; | ||
78 | ret |= (value >> 4) & SDHCI_DATA_LVL_MASK; | ||
79 | ret |= (value << 1) & SDHCI_CMD_LVL; | ||
80 | return ret; | ||
81 | } | ||
82 | |||
69 | ret = value; | 83 | ret = value; |
70 | return ret; | 84 | return ret; |
71 | } | 85 | } |
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index 766df17fb7eb..2570455b219a 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h | |||
@@ -73,6 +73,7 @@ | |||
73 | #define SDHCI_DATA_LVL_MASK 0x00F00000 | 73 | #define SDHCI_DATA_LVL_MASK 0x00F00000 |
74 | #define SDHCI_DATA_LVL_SHIFT 20 | 74 | #define SDHCI_DATA_LVL_SHIFT 20 |
75 | #define SDHCI_DATA_0_LVL_MASK 0x00100000 | 75 | #define SDHCI_DATA_0_LVL_MASK 0x00100000 |
76 | #define SDHCI_CMD_LVL 0x01000000 | ||
76 | 77 | ||
77 | #define SDHCI_HOST_CONTROL 0x28 | 78 | #define SDHCI_HOST_CONTROL 0x28 |
78 | #define SDHCI_CTRL_LED 0x01 | 79 | #define SDHCI_CTRL_LED 0x01 |
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 3eb7430dffbf..f8ff25c8ee2e 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c | |||
@@ -142,6 +142,9 @@ struct plx_pci_card { | |||
142 | #define CTI_PCI_VENDOR_ID 0x12c4 | 142 | #define CTI_PCI_VENDOR_ID 0x12c4 |
143 | #define CTI_PCI_DEVICE_ID_CRG001 0x0900 | 143 | #define CTI_PCI_DEVICE_ID_CRG001 0x0900 |
144 | 144 | ||
145 | #define MOXA_PCI_VENDOR_ID 0x1393 | ||
146 | #define MOXA_PCI_DEVICE_ID 0x0100 | ||
147 | |||
145 | static void plx_pci_reset_common(struct pci_dev *pdev); | 148 | static void plx_pci_reset_common(struct pci_dev *pdev); |
146 | static void plx9056_pci_reset_common(struct pci_dev *pdev); | 149 | static void plx9056_pci_reset_common(struct pci_dev *pdev); |
147 | static void plx_pci_reset_marathon_pci(struct pci_dev *pdev); | 150 | static void plx_pci_reset_marathon_pci(struct pci_dev *pdev); |
@@ -258,6 +261,14 @@ static struct plx_pci_card_info plx_pci_card_info_elcus = { | |||
258 | /* based on PLX9030 */ | 261 | /* based on PLX9030 */ |
259 | }; | 262 | }; |
260 | 263 | ||
264 | static struct plx_pci_card_info plx_pci_card_info_moxa = { | ||
265 | "MOXA", 2, | ||
266 | PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, | ||
267 | {0, 0x00, 0x00}, { {0, 0x00, 0x80}, {1, 0x00, 0x80} }, | ||
268 | &plx_pci_reset_common | ||
269 | /* based on PLX9052 */ | ||
270 | }; | ||
271 | |||
261 | static const struct pci_device_id plx_pci_tbl[] = { | 272 | static const struct pci_device_id plx_pci_tbl[] = { |
262 | { | 273 | { |
263 | /* Adlink PCI-7841/cPCI-7841 */ | 274 | /* Adlink PCI-7841/cPCI-7841 */ |
@@ -357,6 +368,13 @@ static const struct pci_device_id plx_pci_tbl[] = { | |||
357 | 0, 0, | 368 | 0, 0, |
358 | (kernel_ulong_t)&plx_pci_card_info_elcus | 369 | (kernel_ulong_t)&plx_pci_card_info_elcus |
359 | }, | 370 | }, |
371 | { | ||
372 | /* moxa */ | ||
373 | MOXA_PCI_VENDOR_ID, MOXA_PCI_DEVICE_ID, | ||
374 | PCI_ANY_ID, PCI_ANY_ID, | ||
375 | 0, 0, | ||
376 | (kernel_ulong_t)&plx_pci_card_info_moxa | ||
377 | }, | ||
360 | { 0,} | 378 | { 0,} |
361 | }; | 379 | }; |
362 | MODULE_DEVICE_TABLE(pci, plx_pci_tbl); | 380 | MODULE_DEVICE_TABLE(pci, plx_pci_tbl); |
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 7717b19dc806..947adda3397d 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c | |||
@@ -962,9 +962,10 @@ static void b53_vlan_add(struct dsa_switch *ds, int port, | |||
962 | 962 | ||
963 | vl->members |= BIT(port) | BIT(cpu_port); | 963 | vl->members |= BIT(port) | BIT(cpu_port); |
964 | if (untagged) | 964 | if (untagged) |
965 | vl->untag |= BIT(port) | BIT(cpu_port); | 965 | vl->untag |= BIT(port); |
966 | else | 966 | else |
967 | vl->untag &= ~(BIT(port) | BIT(cpu_port)); | 967 | vl->untag &= ~BIT(port); |
968 | vl->untag &= ~BIT(cpu_port); | ||
968 | 969 | ||
969 | b53_set_vlan_entry(dev, vid, vl); | 970 | b53_set_vlan_entry(dev, vid, vl); |
970 | b53_fast_age_vlan(dev, vid); | 971 | b53_fast_age_vlan(dev, vid); |
@@ -973,8 +974,6 @@ static void b53_vlan_add(struct dsa_switch *ds, int port, | |||
973 | if (pvid) { | 974 | if (pvid) { |
974 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), | 975 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), |
975 | vlan->vid_end); | 976 | vlan->vid_end); |
976 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), | ||
977 | vlan->vid_end); | ||
978 | b53_fast_age_vlan(dev, vid); | 977 | b53_fast_age_vlan(dev, vid); |
979 | } | 978 | } |
980 | } | 979 | } |
@@ -984,7 +983,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port, | |||
984 | { | 983 | { |
985 | struct b53_device *dev = ds->priv; | 984 | struct b53_device *dev = ds->priv; |
986 | bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; | 985 | bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; |
987 | unsigned int cpu_port = dev->cpu_port; | ||
988 | struct b53_vlan *vl; | 986 | struct b53_vlan *vl; |
989 | u16 vid; | 987 | u16 vid; |
990 | u16 pvid; | 988 | u16 pvid; |
@@ -997,8 +995,6 @@ static int b53_vlan_del(struct dsa_switch *ds, int port, | |||
997 | b53_get_vlan_entry(dev, vid, vl); | 995 | b53_get_vlan_entry(dev, vid, vl); |
998 | 996 | ||
999 | vl->members &= ~BIT(port); | 997 | vl->members &= ~BIT(port); |
1000 | if ((vl->members & BIT(cpu_port)) == BIT(cpu_port)) | ||
1001 | vl->members = 0; | ||
1002 | 998 | ||
1003 | if (pvid == vid) { | 999 | if (pvid == vid) { |
1004 | if (is5325(dev) || is5365(dev)) | 1000 | if (is5325(dev) || is5365(dev)) |
@@ -1007,18 +1003,14 @@ static int b53_vlan_del(struct dsa_switch *ds, int port, | |||
1007 | pvid = 0; | 1003 | pvid = 0; |
1008 | } | 1004 | } |
1009 | 1005 | ||
1010 | if (untagged) { | 1006 | if (untagged) |
1011 | vl->untag &= ~(BIT(port)); | 1007 | vl->untag &= ~(BIT(port)); |
1012 | if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port)) | ||
1013 | vl->untag = 0; | ||
1014 | } | ||
1015 | 1008 | ||
1016 | b53_set_vlan_entry(dev, vid, vl); | 1009 | b53_set_vlan_entry(dev, vid, vl); |
1017 | b53_fast_age_vlan(dev, vid); | 1010 | b53_fast_age_vlan(dev, vid); |
1018 | } | 1011 | } |
1019 | 1012 | ||
1020 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); | 1013 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid); |
1021 | b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid); | ||
1022 | b53_fast_age_vlan(dev, pvid); | 1014 | b53_fast_age_vlan(dev, pvid); |
1023 | 1015 | ||
1024 | return 0; | 1016 | return 0; |
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index e3ee27ce13dd..9ec33b51a0ed 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c | |||
@@ -588,6 +588,7 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port, | |||
588 | struct phy_device *phydev) | 588 | struct phy_device *phydev) |
589 | { | 589 | { |
590 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); | 590 | struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); |
591 | struct ethtool_eee *p = &priv->port_sts[port].eee; | ||
591 | u32 id_mode_dis = 0, port_mode; | 592 | u32 id_mode_dis = 0, port_mode; |
592 | const char *str = NULL; | 593 | const char *str = NULL; |
593 | u32 reg; | 594 | u32 reg; |
@@ -662,6 +663,9 @@ force_link: | |||
662 | reg |= DUPLX_MODE; | 663 | reg |= DUPLX_MODE; |
663 | 664 | ||
664 | core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); | 665 | core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port)); |
666 | |||
667 | if (!phydev->is_pseudo_fixed_link) | ||
668 | p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev); | ||
665 | } | 669 | } |
666 | 670 | ||
667 | static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, | 671 | static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port, |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index c481f104a8fe..5390ae89136c 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c | |||
@@ -204,17 +204,6 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) | |||
204 | return num_msgs; | 204 | return num_msgs; |
205 | } | 205 | } |
206 | 206 | ||
207 | static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) | ||
208 | { | ||
209 | u32 data = 0x7777; | ||
210 | |||
211 | xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); | ||
212 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); | ||
213 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); | ||
214 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); | ||
215 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); | ||
216 | } | ||
217 | |||
218 | void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, | 207 | void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, |
219 | struct xgene_enet_pdata *pdata, | 208 | struct xgene_enet_pdata *pdata, |
220 | enum xgene_enet_err_code status) | 209 | enum xgene_enet_err_code status) |
@@ -929,5 +918,4 @@ struct xgene_ring_ops xgene_ring1_ops = { | |||
929 | .clear = xgene_enet_clear_ring, | 918 | .clear = xgene_enet_clear_ring, |
930 | .wr_cmd = xgene_enet_wr_cmd, | 919 | .wr_cmd = xgene_enet_wr_cmd, |
931 | .len = xgene_enet_ring_len, | 920 | .len = xgene_enet_ring_len, |
932 | .coalesce = xgene_enet_setup_coalescing, | ||
933 | }; | 921 | }; |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h index 8456337a237d..06e598c8bc16 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h | |||
@@ -55,8 +55,10 @@ enum xgene_enet_rm { | |||
55 | #define PREFETCH_BUF_EN BIT(21) | 55 | #define PREFETCH_BUF_EN BIT(21) |
56 | #define CSR_RING_ID_BUF 0x000c | 56 | #define CSR_RING_ID_BUF 0x000c |
57 | #define CSR_PBM_COAL 0x0014 | 57 | #define CSR_PBM_COAL 0x0014 |
58 | #define CSR_PBM_CTICK0 0x0018 | ||
58 | #define CSR_PBM_CTICK1 0x001c | 59 | #define CSR_PBM_CTICK1 0x001c |
59 | #define CSR_PBM_CTICK2 0x0020 | 60 | #define CSR_PBM_CTICK2 0x0020 |
61 | #define CSR_PBM_CTICK3 0x0024 | ||
60 | #define CSR_THRESHOLD0_SET1 0x0030 | 62 | #define CSR_THRESHOLD0_SET1 0x0030 |
61 | #define CSR_THRESHOLD1_SET1 0x0034 | 63 | #define CSR_THRESHOLD1_SET1 0x0034 |
62 | #define CSR_RING_NE_INT_MODE 0x017c | 64 | #define CSR_RING_NE_INT_MODE 0x017c |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 429f18fc5503..8158d4698734 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c | |||
@@ -1188,7 +1188,8 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev) | |||
1188 | tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); | 1188 | tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring); |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | pdata->ring_ops->coalesce(pdata->tx_ring[0]); | 1191 | if (pdata->ring_ops->coalesce) |
1192 | pdata->ring_ops->coalesce(pdata->tx_ring[0]); | ||
1192 | pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; | 1193 | pdata->tx_qcnt_hi = pdata->tx_ring[0]->slots - 128; |
1193 | 1194 | ||
1194 | return 0; | 1195 | return 0; |
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c index 2b76732add5d..af51dd5844ce 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_ring2.c | |||
@@ -30,7 +30,7 @@ static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) | |||
30 | ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); | 30 | ring_cfg[0] |= SET_VAL(X2_INTLINE, ring->id & RING_BUFNUM_MASK); |
31 | ring_cfg[3] |= SET_BIT(X2_DEQINTEN); | 31 | ring_cfg[3] |= SET_BIT(X2_DEQINTEN); |
32 | } | 32 | } |
33 | ring_cfg[0] |= SET_VAL(X2_CFGCRID, 1); | 33 | ring_cfg[0] |= SET_VAL(X2_CFGCRID, 2); |
34 | 34 | ||
35 | addr >>= 8; | 35 | addr >>= 8; |
36 | ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr); | 36 | ring_cfg[2] |= QCOHERENT | SET_VAL(RINGADDRL, addr); |
@@ -192,13 +192,15 @@ static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) | |||
192 | 192 | ||
193 | static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) | 193 | static void xgene_enet_setup_coalescing(struct xgene_enet_desc_ring *ring) |
194 | { | 194 | { |
195 | u32 data = 0x7777; | 195 | u32 data = 0x77777777; |
196 | 196 | ||
197 | xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); | 197 | xgene_enet_ring_wr32(ring, CSR_PBM_COAL, 0x8e); |
198 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK0, data); | ||
198 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); | 199 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK1, data); |
199 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data << 16); | 200 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK2, data); |
200 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x40); | 201 | xgene_enet_ring_wr32(ring, CSR_PBM_CTICK3, data); |
201 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x80); | 202 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD0_SET1, 0x08); |
203 | xgene_enet_ring_wr32(ring, CSR_THRESHOLD1_SET1, 0x10); | ||
202 | } | 204 | } |
203 | 205 | ||
204 | struct xgene_ring_ops xgene_ring2_ops = { | 206 | struct xgene_ring_ops xgene_ring2_ops = { |
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index b0da9693f28a..be865b4dada2 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c | |||
@@ -460,7 +460,7 @@ static void arc_emac_set_rx_mode(struct net_device *ndev) | |||
460 | if (ndev->flags & IFF_ALLMULTI) { | 460 | if (ndev->flags & IFF_ALLMULTI) { |
461 | arc_reg_set(priv, R_LAFL, ~0); | 461 | arc_reg_set(priv, R_LAFL, ~0); |
462 | arc_reg_set(priv, R_LAFH, ~0); | 462 | arc_reg_set(priv, R_LAFH, ~0); |
463 | } else { | 463 | } else if (ndev->flags & IFF_MULTICAST) { |
464 | struct netdev_hw_addr *ha; | 464 | struct netdev_hw_addr *ha; |
465 | unsigned int filter[2] = { 0, 0 }; | 465 | unsigned int filter[2] = { 0, 0 }; |
466 | int bit; | 466 | int bit; |
@@ -472,6 +472,9 @@ static void arc_emac_set_rx_mode(struct net_device *ndev) | |||
472 | 472 | ||
473 | arc_reg_set(priv, R_LAFL, filter[0]); | 473 | arc_reg_set(priv, R_LAFL, filter[0]); |
474 | arc_reg_set(priv, R_LAFH, filter[1]); | 474 | arc_reg_set(priv, R_LAFH, filter[1]); |
475 | } else { | ||
476 | arc_reg_set(priv, R_LAFL, 0); | ||
477 | arc_reg_set(priv, R_LAFH, 0); | ||
475 | } | 478 | } |
476 | } | 479 | } |
477 | } | 480 | } |
@@ -764,8 +767,6 @@ int arc_emac_probe(struct net_device *ndev, int interface) | |||
764 | ndev->netdev_ops = &arc_emac_netdev_ops; | 767 | ndev->netdev_ops = &arc_emac_netdev_ops; |
765 | ndev->ethtool_ops = &arc_emac_ethtool_ops; | 768 | ndev->ethtool_ops = &arc_emac_ethtool_ops; |
766 | ndev->watchdog_timeo = TX_TIMEOUT; | 769 | ndev->watchdog_timeo = TX_TIMEOUT; |
767 | /* FIXME :: no multicast support yet */ | ||
768 | ndev->flags &= ~IFF_MULTICAST; | ||
769 | 770 | ||
770 | priv = netdev_priv(ndev); | 771 | priv = netdev_priv(ndev); |
771 | priv->dev = dev; | 772 | priv->dev = dev; |
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 31ca204b38d2..49f4cafe5438 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c | |||
@@ -307,6 +307,10 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, | |||
307 | u32 ctl; | 307 | u32 ctl; |
308 | 308 | ||
309 | ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); | 309 | ctl = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL); |
310 | |||
311 | /* preserve ONLY bits 16-17 from current hardware value */ | ||
312 | ctl &= BGMAC_DMA_RX_ADDREXT_MASK; | ||
313 | |||
310 | if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { | 314 | if (bgmac->feature_flags & BGMAC_FEAT_RX_MASK_SETUP) { |
311 | ctl &= ~BGMAC_DMA_RX_BL_MASK; | 315 | ctl &= ~BGMAC_DMA_RX_BL_MASK; |
312 | ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; | 316 | ctl |= BGMAC_DMA_RX_BL_128 << BGMAC_DMA_RX_BL_SHIFT; |
@@ -317,7 +321,6 @@ static void bgmac_dma_rx_enable(struct bgmac *bgmac, | |||
317 | ctl &= ~BGMAC_DMA_RX_PT_MASK; | 321 | ctl &= ~BGMAC_DMA_RX_PT_MASK; |
318 | ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; | 322 | ctl |= BGMAC_DMA_RX_PT_1 << BGMAC_DMA_RX_PT_SHIFT; |
319 | } | 323 | } |
320 | ctl &= BGMAC_DMA_RX_ADDREXT_MASK; | ||
321 | ctl |= BGMAC_DMA_RX_ENABLE; | 324 | ctl |= BGMAC_DMA_RX_ENABLE; |
322 | ctl |= BGMAC_DMA_RX_PARITY_DISABLE; | 325 | ctl |= BGMAC_DMA_RX_PARITY_DISABLE; |
323 | ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; | 326 | ctl |= BGMAC_DMA_RX_OVERFLOW_CONT; |
@@ -1046,9 +1049,9 @@ static void bgmac_enable(struct bgmac *bgmac) | |||
1046 | 1049 | ||
1047 | mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> | 1050 | mode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> |
1048 | BGMAC_DS_MM_SHIFT; | 1051 | BGMAC_DS_MM_SHIFT; |
1049 | if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) || mode != 0) | 1052 | if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST || mode != 0) |
1050 | bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); | 1053 | bgmac_set(bgmac, BCMA_CLKCTLST, BCMA_CLKCTLST_FORCEHT); |
1051 | if (bgmac->feature_flags & BGMAC_FEAT_CLKCTLST && mode == 2) | 1054 | if (!(bgmac->feature_flags & BGMAC_FEAT_CLKCTLST) && mode == 2) |
1052 | bgmac_cco_ctl_maskset(bgmac, 1, ~0, | 1055 | bgmac_cco_ctl_maskset(bgmac, 1, ~0, |
1053 | BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); | 1056 | BGMAC_CHIPCTL_1_RXC_DLL_BYPASS); |
1054 | 1057 | ||
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index b3791b394715..1f7034d739b0 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/firmware.h> | 49 | #include <linux/firmware.h> |
50 | #include <linux/log2.h> | 50 | #include <linux/log2.h> |
51 | #include <linux/aer.h> | 51 | #include <linux/aer.h> |
52 | #include <linux/crash_dump.h> | ||
52 | 53 | ||
53 | #if IS_ENABLED(CONFIG_CNIC) | 54 | #if IS_ENABLED(CONFIG_CNIC) |
54 | #define BCM_CNIC 1 | 55 | #define BCM_CNIC 1 |
@@ -4764,15 +4765,16 @@ bnx2_setup_msix_tbl(struct bnx2 *bp) | |||
4764 | BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); | 4765 | BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR); |
4765 | } | 4766 | } |
4766 | 4767 | ||
4767 | static int | 4768 | static void |
4768 | bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | 4769 | bnx2_wait_dma_complete(struct bnx2 *bp) |
4769 | { | 4770 | { |
4770 | u32 val; | 4771 | u32 val; |
4771 | int i, rc = 0; | 4772 | int i; |
4772 | u8 old_port; | ||
4773 | 4773 | ||
4774 | /* Wait for the current PCI transaction to complete before | 4774 | /* |
4775 | * issuing a reset. */ | 4775 | * Wait for the current PCI transaction to complete before |
4776 | * issuing a reset. | ||
4777 | */ | ||
4776 | if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || | 4778 | if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) || |
4777 | (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { | 4779 | (BNX2_CHIP(bp) == BNX2_CHIP_5708)) { |
4778 | BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, | 4780 | BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, |
@@ -4796,6 +4798,21 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | |||
4796 | } | 4798 | } |
4797 | } | 4799 | } |
4798 | 4800 | ||
4801 | return; | ||
4802 | } | ||
4803 | |||
4804 | |||
4805 | static int | ||
4806 | bnx2_reset_chip(struct bnx2 *bp, u32 reset_code) | ||
4807 | { | ||
4808 | u32 val; | ||
4809 | int i, rc = 0; | ||
4810 | u8 old_port; | ||
4811 | |||
4812 | /* Wait for the current PCI transaction to complete before | ||
4813 | * issuing a reset. */ | ||
4814 | bnx2_wait_dma_complete(bp); | ||
4815 | |||
4799 | /* Wait for the firmware to tell us it is ok to issue a reset. */ | 4816 | /* Wait for the firmware to tell us it is ok to issue a reset. */ |
4800 | bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); | 4817 | bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); |
4801 | 4818 | ||
@@ -6361,6 +6378,10 @@ bnx2_open(struct net_device *dev) | |||
6361 | struct bnx2 *bp = netdev_priv(dev); | 6378 | struct bnx2 *bp = netdev_priv(dev); |
6362 | int rc; | 6379 | int rc; |
6363 | 6380 | ||
6381 | rc = bnx2_request_firmware(bp); | ||
6382 | if (rc < 0) | ||
6383 | goto out; | ||
6384 | |||
6364 | netif_carrier_off(dev); | 6385 | netif_carrier_off(dev); |
6365 | 6386 | ||
6366 | bnx2_disable_int(bp); | 6387 | bnx2_disable_int(bp); |
@@ -6429,6 +6450,7 @@ open_err: | |||
6429 | bnx2_free_irq(bp); | 6450 | bnx2_free_irq(bp); |
6430 | bnx2_free_mem(bp); | 6451 | bnx2_free_mem(bp); |
6431 | bnx2_del_napi(bp); | 6452 | bnx2_del_napi(bp); |
6453 | bnx2_release_firmware(bp); | ||
6432 | goto out; | 6454 | goto out; |
6433 | } | 6455 | } |
6434 | 6456 | ||
@@ -8575,12 +8597,15 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
8575 | 8597 | ||
8576 | pci_set_drvdata(pdev, dev); | 8598 | pci_set_drvdata(pdev, dev); |
8577 | 8599 | ||
8578 | rc = bnx2_request_firmware(bp); | 8600 | /* |
8579 | if (rc < 0) | 8601 | * In-flight DMA from 1st kernel could continue going in kdump kernel. |
8580 | goto error; | 8602 | * New io-page table has been created before bnx2 does reset at open stage. |
8581 | 8603 | * We have to wait for the in-flight DMA to complete to avoid it look up | |
8604 | * into the newly created io-page table. | ||
8605 | */ | ||
8606 | if (is_kdump_kernel()) | ||
8607 | bnx2_wait_dma_complete(bp); | ||
8582 | 8608 | ||
8583 | bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); | ||
8584 | memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); | 8609 | memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN); |
8585 | 8610 | ||
8586 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | | 8611 | dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | |
@@ -8613,7 +8638,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
8613 | return 0; | 8638 | return 0; |
8614 | 8639 | ||
8615 | error: | 8640 | error: |
8616 | bnx2_release_firmware(bp); | ||
8617 | pci_iounmap(pdev, bp->regview); | 8641 | pci_iounmap(pdev, bp->regview); |
8618 | pci_release_regions(pdev); | 8642 | pci_release_regions(pdev); |
8619 | pci_disable_device(pdev); | 8643 | pci_disable_device(pdev); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a9f9f3738022..ee1a803aa11a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -1811,6 +1811,9 @@ static int bnxt_busy_poll(struct napi_struct *napi) | |||
1811 | if (atomic_read(&bp->intr_sem) != 0) | 1811 | if (atomic_read(&bp->intr_sem) != 0) |
1812 | return LL_FLUSH_FAILED; | 1812 | return LL_FLUSH_FAILED; |
1813 | 1813 | ||
1814 | if (!bp->link_info.link_up) | ||
1815 | return LL_FLUSH_FAILED; | ||
1816 | |||
1814 | if (!bnxt_lock_poll(bnapi)) | 1817 | if (!bnxt_lock_poll(bnapi)) |
1815 | return LL_FLUSH_BUSY; | 1818 | return LL_FLUSH_BUSY; |
1816 | 1819 | ||
@@ -3210,11 +3213,17 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, | |||
3210 | goto err_out; | 3213 | goto err_out; |
3211 | } | 3214 | } |
3212 | 3215 | ||
3213 | if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN) | 3216 | switch (tunnel_type) { |
3217 | case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN: | ||
3214 | bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; | 3218 | bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id; |
3215 | 3219 | break; | |
3216 | else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE) | 3220 | case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE: |
3217 | bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; | 3221 | bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id; |
3222 | break; | ||
3223 | default: | ||
3224 | break; | ||
3225 | } | ||
3226 | |||
3218 | err_out: | 3227 | err_out: |
3219 | mutex_unlock(&bp->hwrm_cmd_lock); | 3228 | mutex_unlock(&bp->hwrm_cmd_lock); |
3220 | return rc; | 3229 | return rc; |
@@ -4934,6 +4943,10 @@ static void bnxt_del_napi(struct bnxt *bp) | |||
4934 | napi_hash_del(&bnapi->napi); | 4943 | napi_hash_del(&bnapi->napi); |
4935 | netif_napi_del(&bnapi->napi); | 4944 | netif_napi_del(&bnapi->napi); |
4936 | } | 4945 | } |
4946 | /* We called napi_hash_del() before netif_napi_del(), we need | ||
4947 | * to respect an RCU grace period before freeing napi structures. | ||
4948 | */ | ||
4949 | synchronize_net(); | ||
4937 | } | 4950 | } |
4938 | 4951 | ||
4939 | static void bnxt_init_napi(struct bnxt *bp) | 4952 | static void bnxt_init_napi(struct bnxt *bp) |
@@ -6309,6 +6322,7 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, | |||
6309 | struct tc_to_netdev *ntc) | 6322 | struct tc_to_netdev *ntc) |
6310 | { | 6323 | { |
6311 | struct bnxt *bp = netdev_priv(dev); | 6324 | struct bnxt *bp = netdev_priv(dev); |
6325 | bool sh = false; | ||
6312 | u8 tc; | 6326 | u8 tc; |
6313 | 6327 | ||
6314 | if (ntc->type != TC_SETUP_MQPRIO) | 6328 | if (ntc->type != TC_SETUP_MQPRIO) |
@@ -6325,12 +6339,11 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, | |||
6325 | if (netdev_get_num_tc(dev) == tc) | 6339 | if (netdev_get_num_tc(dev) == tc) |
6326 | return 0; | 6340 | return 0; |
6327 | 6341 | ||
6342 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) | ||
6343 | sh = true; | ||
6344 | |||
6328 | if (tc) { | 6345 | if (tc) { |
6329 | int max_rx_rings, max_tx_rings, rc; | 6346 | int max_rx_rings, max_tx_rings, rc; |
6330 | bool sh = false; | ||
6331 | |||
6332 | if (bp->flags & BNXT_FLAG_SHARED_RINGS) | ||
6333 | sh = true; | ||
6334 | 6347 | ||
6335 | rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); | 6348 | rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); |
6336 | if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) | 6349 | if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) |
@@ -6348,7 +6361,8 @@ static int bnxt_setup_tc(struct net_device *dev, u32 handle, __be16 proto, | |||
6348 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; | 6361 | bp->tx_nr_rings = bp->tx_nr_rings_per_tc; |
6349 | netdev_reset_tc(dev); | 6362 | netdev_reset_tc(dev); |
6350 | } | 6363 | } |
6351 | bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); | 6364 | bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : |
6365 | bp->tx_nr_rings + bp->rx_nr_rings; | ||
6352 | bp->num_stat_ctxs = bp->cp_nr_rings; | 6366 | bp->num_stat_ctxs = bp->cp_nr_rings; |
6353 | 6367 | ||
6354 | if (netif_running(bp->dev)) | 6368 | if (netif_running(bp->dev)) |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index ec6cd18842c3..60e2af8678bd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | |||
@@ -774,8 +774,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) | |||
774 | 774 | ||
775 | if (vf->flags & BNXT_VF_LINK_UP) { | 775 | if (vf->flags & BNXT_VF_LINK_UP) { |
776 | /* if physical link is down, force link up on VF */ | 776 | /* if physical link is down, force link up on VF */ |
777 | if (phy_qcfg_resp.link == | 777 | if (phy_qcfg_resp.link != |
778 | PORT_PHY_QCFG_RESP_LINK_NO_LINK) { | 778 | PORT_PHY_QCFG_RESP_LINK_LINK) { |
779 | phy_qcfg_resp.link = | 779 | phy_qcfg_resp.link = |
780 | PORT_PHY_QCFG_RESP_LINK_LINK; | 780 | PORT_PHY_QCFG_RESP_LINK_LINK; |
781 | phy_qcfg_resp.link_speed = cpu_to_le16( | 781 | phy_qcfg_resp.link_speed = cpu_to_le16( |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index f9df4b5ae90e..f42f672b0e7e 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c | |||
@@ -177,6 +177,7 @@ bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb) | |||
177 | return 0; | 177 | return 0; |
178 | 178 | ||
179 | hw_cons = *(tcb->hw_consumer_index); | 179 | hw_cons = *(tcb->hw_consumer_index); |
180 | rmb(); | ||
180 | cons = tcb->consumer_index; | 181 | cons = tcb->consumer_index; |
181 | q_depth = tcb->q_depth; | 182 | q_depth = tcb->q_depth; |
182 | 183 | ||
@@ -3094,7 +3095,7 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
3094 | BNA_QE_INDX_INC(prod, q_depth); | 3095 | BNA_QE_INDX_INC(prod, q_depth); |
3095 | tcb->producer_index = prod; | 3096 | tcb->producer_index = prod; |
3096 | 3097 | ||
3097 | smp_mb(); | 3098 | wmb(); |
3098 | 3099 | ||
3099 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | 3100 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) |
3100 | return NETDEV_TX_OK; | 3101 | return NETDEV_TX_OK; |
@@ -3102,7 +3103,6 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
3102 | skb_tx_timestamp(skb); | 3103 | skb_tx_timestamp(skb); |
3103 | 3104 | ||
3104 | bna_txq_prod_indx_doorbell(tcb); | 3105 | bna_txq_prod_indx_doorbell(tcb); |
3105 | smp_mb(); | ||
3106 | 3106 | ||
3107 | return NETDEV_TX_OK; | 3107 | return NETDEV_TX_OK; |
3108 | } | 3108 | } |
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index b32444a3ed79..533653bd7aec 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c | |||
@@ -2673,6 +2673,12 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2673 | lp->skb_length = skb->len; | 2673 | lp->skb_length = skb->len; |
2674 | lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, | 2674 | lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len, |
2675 | DMA_TO_DEVICE); | 2675 | DMA_TO_DEVICE); |
2676 | if (dma_mapping_error(NULL, lp->skb_physaddr)) { | ||
2677 | dev_kfree_skb_any(skb); | ||
2678 | dev->stats.tx_dropped++; | ||
2679 | netdev_err(dev, "%s: DMA mapping error\n", __func__); | ||
2680 | return NETDEV_TX_OK; | ||
2681 | } | ||
2676 | 2682 | ||
2677 | /* Set address of the data in the Transmit Address register */ | 2683 | /* Set address of the data in the Transmit Address register */ |
2678 | macb_writel(lp, TAR, lp->skb_physaddr); | 2684 | macb_writel(lp, TAR, lp->skb_physaddr); |
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h index 30426109711c..86bd93ce2ea3 100644 --- a/drivers/net/ethernet/cavium/thunder/nic.h +++ b/drivers/net/ethernet/cavium/thunder/nic.h | |||
@@ -47,7 +47,7 @@ | |||
47 | 47 | ||
48 | /* Min/Max packet size */ | 48 | /* Min/Max packet size */ |
49 | #define NIC_HW_MIN_FRS 64 | 49 | #define NIC_HW_MIN_FRS 64 |
50 | #define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */ | 50 | #define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */ |
51 | 51 | ||
52 | /* Max pkinds */ | 52 | /* Max pkinds */ |
53 | #define NIC_MAX_PKIND 16 | 53 | #define NIC_MAX_PKIND 16 |
@@ -178,11 +178,11 @@ enum tx_stats_reg_offset { | |||
178 | 178 | ||
179 | struct nicvf_hw_stats { | 179 | struct nicvf_hw_stats { |
180 | u64 rx_bytes; | 180 | u64 rx_bytes; |
181 | u64 rx_frames; | ||
181 | u64 rx_ucast_frames; | 182 | u64 rx_ucast_frames; |
182 | u64 rx_bcast_frames; | 183 | u64 rx_bcast_frames; |
183 | u64 rx_mcast_frames; | 184 | u64 rx_mcast_frames; |
184 | u64 rx_fcs_errors; | 185 | u64 rx_drops; |
185 | u64 rx_l2_errors; | ||
186 | u64 rx_drop_red; | 186 | u64 rx_drop_red; |
187 | u64 rx_drop_red_bytes; | 187 | u64 rx_drop_red_bytes; |
188 | u64 rx_drop_overrun; | 188 | u64 rx_drop_overrun; |
@@ -191,6 +191,19 @@ struct nicvf_hw_stats { | |||
191 | u64 rx_drop_mcast; | 191 | u64 rx_drop_mcast; |
192 | u64 rx_drop_l3_bcast; | 192 | u64 rx_drop_l3_bcast; |
193 | u64 rx_drop_l3_mcast; | 193 | u64 rx_drop_l3_mcast; |
194 | u64 rx_fcs_errors; | ||
195 | u64 rx_l2_errors; | ||
196 | |||
197 | u64 tx_bytes; | ||
198 | u64 tx_frames; | ||
199 | u64 tx_ucast_frames; | ||
200 | u64 tx_bcast_frames; | ||
201 | u64 tx_mcast_frames; | ||
202 | u64 tx_drops; | ||
203 | }; | ||
204 | |||
205 | struct nicvf_drv_stats { | ||
206 | /* CQE Rx errs */ | ||
194 | u64 rx_bgx_truncated_pkts; | 207 | u64 rx_bgx_truncated_pkts; |
195 | u64 rx_jabber_errs; | 208 | u64 rx_jabber_errs; |
196 | u64 rx_fcs_errs; | 209 | u64 rx_fcs_errs; |
@@ -216,34 +229,30 @@ struct nicvf_hw_stats { | |||
216 | u64 rx_l4_pclp; | 229 | u64 rx_l4_pclp; |
217 | u64 rx_truncated_pkts; | 230 | u64 rx_truncated_pkts; |
218 | 231 | ||
219 | u64 tx_bytes_ok; | 232 | /* CQE Tx errs */ |
220 | u64 tx_ucast_frames_ok; | 233 | u64 tx_desc_fault; |
221 | u64 tx_bcast_frames_ok; | 234 | u64 tx_hdr_cons_err; |
222 | u64 tx_mcast_frames_ok; | 235 | u64 tx_subdesc_err; |
223 | u64 tx_drops; | 236 | u64 tx_max_size_exceeded; |
224 | }; | 237 | u64 tx_imm_size_oflow; |
225 | 238 | u64 tx_data_seq_err; | |
226 | struct nicvf_drv_stats { | 239 | u64 tx_mem_seq_err; |
227 | /* Rx */ | 240 | u64 tx_lock_viol; |
228 | u64 rx_frames_ok; | 241 | u64 tx_data_fault; |
229 | u64 rx_frames_64; | 242 | u64 tx_tstmp_conflict; |
230 | u64 rx_frames_127; | 243 | u64 tx_tstmp_timeout; |
231 | u64 rx_frames_255; | 244 | u64 tx_mem_fault; |
232 | u64 rx_frames_511; | 245 | u64 tx_csum_overlap; |
233 | u64 rx_frames_1023; | 246 | u64 tx_csum_overflow; |
234 | u64 rx_frames_1518; | 247 | |
235 | u64 rx_frames_jumbo; | 248 | /* driver debug stats */ |
236 | u64 rx_drops; | ||
237 | |||
238 | u64 rcv_buffer_alloc_failures; | 249 | u64 rcv_buffer_alloc_failures; |
239 | |||
240 | /* Tx */ | ||
241 | u64 tx_frames_ok; | ||
242 | u64 tx_drops; | ||
243 | u64 tx_tso; | 250 | u64 tx_tso; |
244 | u64 tx_timeout; | 251 | u64 tx_timeout; |
245 | u64 txq_stop; | 252 | u64 txq_stop; |
246 | u64 txq_wake; | 253 | u64 txq_wake; |
254 | |||
255 | struct u64_stats_sync syncp; | ||
247 | }; | 256 | }; |
248 | 257 | ||
249 | struct nicvf { | 258 | struct nicvf { |
@@ -282,7 +291,6 @@ struct nicvf { | |||
282 | 291 | ||
283 | u8 node; | 292 | u8 node; |
284 | u8 cpi_alg; | 293 | u8 cpi_alg; |
285 | u16 mtu; | ||
286 | bool link_up; | 294 | bool link_up; |
287 | u8 duplex; | 295 | u8 duplex; |
288 | u32 speed; | 296 | u32 speed; |
@@ -298,7 +306,7 @@ struct nicvf { | |||
298 | 306 | ||
299 | /* Stats */ | 307 | /* Stats */ |
300 | struct nicvf_hw_stats hw_stats; | 308 | struct nicvf_hw_stats hw_stats; |
301 | struct nicvf_drv_stats drv_stats; | 309 | struct nicvf_drv_stats __percpu *drv_stats; |
302 | struct bgx_stats bgx_stats; | 310 | struct bgx_stats bgx_stats; |
303 | 311 | ||
304 | /* MSI-X */ | 312 | /* MSI-X */ |
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 2bbf4cbf08b2..6677b96e1f3f 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <linux/etherdevice.h> | 12 | #include <linux/etherdevice.h> |
13 | #include <linux/of.h> | 13 | #include <linux/of.h> |
14 | #include <linux/if_vlan.h> | ||
14 | 15 | ||
15 | #include "nic_reg.h" | 16 | #include "nic_reg.h" |
16 | #include "nic.h" | 17 | #include "nic.h" |
@@ -260,18 +261,31 @@ static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx) | |||
260 | /* Update hardware min/max frame size */ | 261 | /* Update hardware min/max frame size */ |
261 | static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) | 262 | static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf) |
262 | { | 263 | { |
263 | if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) { | 264 | int bgx, lmac, lmac_cnt; |
264 | dev_err(&nic->pdev->dev, | 265 | u64 lmac_credits; |
265 | "Invalid MTU setting from VF%d rejected, should be between %d and %d\n", | 266 | |
266 | vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS); | 267 | if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) |
267 | return 1; | 268 | return 1; |
268 | } | ||
269 | new_frs += ETH_HLEN; | ||
270 | if (new_frs <= nic->pkind.maxlen) | ||
271 | return 0; | ||
272 | 269 | ||
273 | nic->pkind.maxlen = new_frs; | 270 | bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); |
274 | nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind); | 271 | lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]); |
272 | lmac += bgx * MAX_LMAC_PER_BGX; | ||
273 | |||
274 | new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4; | ||
275 | |||
276 | /* Update corresponding LMAC credits */ | ||
277 | lmac_cnt = bgx_get_lmac_count(nic->node, bgx); | ||
278 | lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8)); | ||
279 | lmac_credits &= ~(0xFFFFFULL << 12); | ||
280 | lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12); | ||
281 | nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits); | ||
282 | |||
283 | /* Enforce MTU in HW | ||
284 | * This config is supported only from 88xx pass 2.0 onwards. | ||
285 | */ | ||
286 | if (!pass1_silicon(nic->pdev)) | ||
287 | nic_reg_write(nic, | ||
288 | NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs); | ||
275 | return 0; | 289 | return 0; |
276 | } | 290 | } |
277 | 291 | ||
@@ -464,7 +478,7 @@ static int nic_init_hw(struct nicpf *nic) | |||
464 | 478 | ||
465 | /* PKIND configuration */ | 479 | /* PKIND configuration */ |
466 | nic->pkind.minlen = 0; | 480 | nic->pkind.minlen = 0; |
467 | nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN; | 481 | nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4; |
468 | nic->pkind.lenerr_en = 1; | 482 | nic->pkind.lenerr_en = 1; |
469 | nic->pkind.rx_hdr = 0; | 483 | nic->pkind.rx_hdr = 0; |
470 | nic->pkind.hdr_sl = 0; | 484 | nic->pkind.hdr_sl = 0; |
@@ -837,6 +851,7 @@ static int nic_reset_stat_counters(struct nicpf *nic, | |||
837 | nic_reg_write(nic, reg_addr, 0); | 851 | nic_reg_write(nic, reg_addr, 0); |
838 | } | 852 | } |
839 | } | 853 | } |
854 | |||
840 | return 0; | 855 | return 0; |
841 | } | 856 | } |
842 | 857 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h index edf779f5a227..80d46337cf29 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_reg.h +++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h | |||
@@ -106,6 +106,7 @@ | |||
106 | #define NIC_PF_MPI_0_2047_CFG (0x210000) | 106 | #define NIC_PF_MPI_0_2047_CFG (0x210000) |
107 | #define NIC_PF_RSSI_0_4097_RQ (0x220000) | 107 | #define NIC_PF_RSSI_0_4097_RQ (0x220000) |
108 | #define NIC_PF_LMAC_0_7_CFG (0x240000) | 108 | #define NIC_PF_LMAC_0_7_CFG (0x240000) |
109 | #define NIC_PF_LMAC_0_7_CFG2 (0x240100) | ||
109 | #define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) | 110 | #define NIC_PF_LMAC_0_7_SW_XOFF (0x242000) |
110 | #define NIC_PF_LMAC_0_7_CREDIT (0x244000) | 111 | #define NIC_PF_LMAC_0_7_CREDIT (0x244000) |
111 | #define NIC_PF_CHAN_0_255_TX_CFG (0x400000) | 112 | #define NIC_PF_CHAN_0_255_TX_CFG (0x400000) |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index ad4fddb55421..432bf6be57cb 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c | |||
@@ -36,11 +36,11 @@ struct nicvf_stat { | |||
36 | 36 | ||
37 | static const struct nicvf_stat nicvf_hw_stats[] = { | 37 | static const struct nicvf_stat nicvf_hw_stats[] = { |
38 | NICVF_HW_STAT(rx_bytes), | 38 | NICVF_HW_STAT(rx_bytes), |
39 | NICVF_HW_STAT(rx_frames), | ||
39 | NICVF_HW_STAT(rx_ucast_frames), | 40 | NICVF_HW_STAT(rx_ucast_frames), |
40 | NICVF_HW_STAT(rx_bcast_frames), | 41 | NICVF_HW_STAT(rx_bcast_frames), |
41 | NICVF_HW_STAT(rx_mcast_frames), | 42 | NICVF_HW_STAT(rx_mcast_frames), |
42 | NICVF_HW_STAT(rx_fcs_errors), | 43 | NICVF_HW_STAT(rx_drops), |
43 | NICVF_HW_STAT(rx_l2_errors), | ||
44 | NICVF_HW_STAT(rx_drop_red), | 44 | NICVF_HW_STAT(rx_drop_red), |
45 | NICVF_HW_STAT(rx_drop_red_bytes), | 45 | NICVF_HW_STAT(rx_drop_red_bytes), |
46 | NICVF_HW_STAT(rx_drop_overrun), | 46 | NICVF_HW_STAT(rx_drop_overrun), |
@@ -49,50 +49,59 @@ static const struct nicvf_stat nicvf_hw_stats[] = { | |||
49 | NICVF_HW_STAT(rx_drop_mcast), | 49 | NICVF_HW_STAT(rx_drop_mcast), |
50 | NICVF_HW_STAT(rx_drop_l3_bcast), | 50 | NICVF_HW_STAT(rx_drop_l3_bcast), |
51 | NICVF_HW_STAT(rx_drop_l3_mcast), | 51 | NICVF_HW_STAT(rx_drop_l3_mcast), |
52 | NICVF_HW_STAT(rx_bgx_truncated_pkts), | 52 | NICVF_HW_STAT(rx_fcs_errors), |
53 | NICVF_HW_STAT(rx_jabber_errs), | 53 | NICVF_HW_STAT(rx_l2_errors), |
54 | NICVF_HW_STAT(rx_fcs_errs), | 54 | NICVF_HW_STAT(tx_bytes), |
55 | NICVF_HW_STAT(rx_bgx_errs), | 55 | NICVF_HW_STAT(tx_frames), |
56 | NICVF_HW_STAT(rx_prel2_errs), | 56 | NICVF_HW_STAT(tx_ucast_frames), |
57 | NICVF_HW_STAT(rx_l2_hdr_malformed), | 57 | NICVF_HW_STAT(tx_bcast_frames), |
58 | NICVF_HW_STAT(rx_oversize), | 58 | NICVF_HW_STAT(tx_mcast_frames), |
59 | NICVF_HW_STAT(rx_undersize), | 59 | NICVF_HW_STAT(tx_drops), |
60 | NICVF_HW_STAT(rx_l2_len_mismatch), | ||
61 | NICVF_HW_STAT(rx_l2_pclp), | ||
62 | NICVF_HW_STAT(rx_ip_ver_errs), | ||
63 | NICVF_HW_STAT(rx_ip_csum_errs), | ||
64 | NICVF_HW_STAT(rx_ip_hdr_malformed), | ||
65 | NICVF_HW_STAT(rx_ip_payload_malformed), | ||
66 | NICVF_HW_STAT(rx_ip_ttl_errs), | ||
67 | NICVF_HW_STAT(rx_l3_pclp), | ||
68 | NICVF_HW_STAT(rx_l4_malformed), | ||
69 | NICVF_HW_STAT(rx_l4_csum_errs), | ||
70 | NICVF_HW_STAT(rx_udp_len_errs), | ||
71 | NICVF_HW_STAT(rx_l4_port_errs), | ||
72 | NICVF_HW_STAT(rx_tcp_flag_errs), | ||
73 | NICVF_HW_STAT(rx_tcp_offset_errs), | ||
74 | NICVF_HW_STAT(rx_l4_pclp), | ||
75 | NICVF_HW_STAT(rx_truncated_pkts), | ||
76 | NICVF_HW_STAT(tx_bytes_ok), | ||
77 | NICVF_HW_STAT(tx_ucast_frames_ok), | ||
78 | NICVF_HW_STAT(tx_bcast_frames_ok), | ||
79 | NICVF_HW_STAT(tx_mcast_frames_ok), | ||
80 | }; | 60 | }; |
81 | 61 | ||
82 | static const struct nicvf_stat nicvf_drv_stats[] = { | 62 | static const struct nicvf_stat nicvf_drv_stats[] = { |
83 | NICVF_DRV_STAT(rx_frames_ok), | 63 | NICVF_DRV_STAT(rx_bgx_truncated_pkts), |
84 | NICVF_DRV_STAT(rx_frames_64), | 64 | NICVF_DRV_STAT(rx_jabber_errs), |
85 | NICVF_DRV_STAT(rx_frames_127), | 65 | NICVF_DRV_STAT(rx_fcs_errs), |
86 | NICVF_DRV_STAT(rx_frames_255), | 66 | NICVF_DRV_STAT(rx_bgx_errs), |
87 | NICVF_DRV_STAT(rx_frames_511), | 67 | NICVF_DRV_STAT(rx_prel2_errs), |
88 | NICVF_DRV_STAT(rx_frames_1023), | 68 | NICVF_DRV_STAT(rx_l2_hdr_malformed), |
89 | NICVF_DRV_STAT(rx_frames_1518), | 69 | NICVF_DRV_STAT(rx_oversize), |
90 | NICVF_DRV_STAT(rx_frames_jumbo), | 70 | NICVF_DRV_STAT(rx_undersize), |
91 | NICVF_DRV_STAT(rx_drops), | 71 | NICVF_DRV_STAT(rx_l2_len_mismatch), |
72 | NICVF_DRV_STAT(rx_l2_pclp), | ||
73 | NICVF_DRV_STAT(rx_ip_ver_errs), | ||
74 | NICVF_DRV_STAT(rx_ip_csum_errs), | ||
75 | NICVF_DRV_STAT(rx_ip_hdr_malformed), | ||
76 | NICVF_DRV_STAT(rx_ip_payload_malformed), | ||
77 | NICVF_DRV_STAT(rx_ip_ttl_errs), | ||
78 | NICVF_DRV_STAT(rx_l3_pclp), | ||
79 | NICVF_DRV_STAT(rx_l4_malformed), | ||
80 | NICVF_DRV_STAT(rx_l4_csum_errs), | ||
81 | NICVF_DRV_STAT(rx_udp_len_errs), | ||
82 | NICVF_DRV_STAT(rx_l4_port_errs), | ||
83 | NICVF_DRV_STAT(rx_tcp_flag_errs), | ||
84 | NICVF_DRV_STAT(rx_tcp_offset_errs), | ||
85 | NICVF_DRV_STAT(rx_l4_pclp), | ||
86 | NICVF_DRV_STAT(rx_truncated_pkts), | ||
87 | |||
88 | NICVF_DRV_STAT(tx_desc_fault), | ||
89 | NICVF_DRV_STAT(tx_hdr_cons_err), | ||
90 | NICVF_DRV_STAT(tx_subdesc_err), | ||
91 | NICVF_DRV_STAT(tx_max_size_exceeded), | ||
92 | NICVF_DRV_STAT(tx_imm_size_oflow), | ||
93 | NICVF_DRV_STAT(tx_data_seq_err), | ||
94 | NICVF_DRV_STAT(tx_mem_seq_err), | ||
95 | NICVF_DRV_STAT(tx_lock_viol), | ||
96 | NICVF_DRV_STAT(tx_data_fault), | ||
97 | NICVF_DRV_STAT(tx_tstmp_conflict), | ||
98 | NICVF_DRV_STAT(tx_tstmp_timeout), | ||
99 | NICVF_DRV_STAT(tx_mem_fault), | ||
100 | NICVF_DRV_STAT(tx_csum_overlap), | ||
101 | NICVF_DRV_STAT(tx_csum_overflow), | ||
102 | |||
92 | NICVF_DRV_STAT(rcv_buffer_alloc_failures), | 103 | NICVF_DRV_STAT(rcv_buffer_alloc_failures), |
93 | NICVF_DRV_STAT(tx_frames_ok), | ||
94 | NICVF_DRV_STAT(tx_tso), | 104 | NICVF_DRV_STAT(tx_tso), |
95 | NICVF_DRV_STAT(tx_drops), | ||
96 | NICVF_DRV_STAT(tx_timeout), | 105 | NICVF_DRV_STAT(tx_timeout), |
97 | NICVF_DRV_STAT(txq_stop), | 106 | NICVF_DRV_STAT(txq_stop), |
98 | NICVF_DRV_STAT(txq_wake), | 107 | NICVF_DRV_STAT(txq_wake), |
@@ -278,8 +287,8 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev, | |||
278 | struct ethtool_stats *stats, u64 *data) | 287 | struct ethtool_stats *stats, u64 *data) |
279 | { | 288 | { |
280 | struct nicvf *nic = netdev_priv(netdev); | 289 | struct nicvf *nic = netdev_priv(netdev); |
281 | int stat; | 290 | int stat, tmp_stats; |
282 | int sqs; | 291 | int sqs, cpu; |
283 | 292 | ||
284 | nicvf_update_stats(nic); | 293 | nicvf_update_stats(nic); |
285 | 294 | ||
@@ -289,9 +298,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev, | |||
289 | for (stat = 0; stat < nicvf_n_hw_stats; stat++) | 298 | for (stat = 0; stat < nicvf_n_hw_stats; stat++) |
290 | *(data++) = ((u64 *)&nic->hw_stats) | 299 | *(data++) = ((u64 *)&nic->hw_stats) |
291 | [nicvf_hw_stats[stat].index]; | 300 | [nicvf_hw_stats[stat].index]; |
292 | for (stat = 0; stat < nicvf_n_drv_stats; stat++) | 301 | for (stat = 0; stat < nicvf_n_drv_stats; stat++) { |
293 | *(data++) = ((u64 *)&nic->drv_stats) | 302 | tmp_stats = 0; |
294 | [nicvf_drv_stats[stat].index]; | 303 | for_each_possible_cpu(cpu) |
304 | tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu)) | ||
305 | [nicvf_drv_stats[stat].index]; | ||
306 | *(data++) = tmp_stats; | ||
307 | } | ||
295 | 308 | ||
296 | nicvf_get_qset_stats(nic, stats, &data); | 309 | nicvf_get_qset_stats(nic, stats, &data); |
297 | 310 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 45a13f718863..8a37012c9c89 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
@@ -69,25 +69,6 @@ static inline u8 nicvf_netdev_qidx(struct nicvf *nic, u8 qidx) | |||
69 | return qidx; | 69 | return qidx; |
70 | } | 70 | } |
71 | 71 | ||
72 | static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic, | ||
73 | struct sk_buff *skb) | ||
74 | { | ||
75 | if (skb->len <= 64) | ||
76 | nic->drv_stats.rx_frames_64++; | ||
77 | else if (skb->len <= 127) | ||
78 | nic->drv_stats.rx_frames_127++; | ||
79 | else if (skb->len <= 255) | ||
80 | nic->drv_stats.rx_frames_255++; | ||
81 | else if (skb->len <= 511) | ||
82 | nic->drv_stats.rx_frames_511++; | ||
83 | else if (skb->len <= 1023) | ||
84 | nic->drv_stats.rx_frames_1023++; | ||
85 | else if (skb->len <= 1518) | ||
86 | nic->drv_stats.rx_frames_1518++; | ||
87 | else | ||
88 | nic->drv_stats.rx_frames_jumbo++; | ||
89 | } | ||
90 | |||
91 | /* The Cavium ThunderX network controller can *only* be found in SoCs | 72 | /* The Cavium ThunderX network controller can *only* be found in SoCs |
92 | * containing the ThunderX ARM64 CPU implementation. All accesses to the device | 73 | * containing the ThunderX ARM64 CPU implementation. All accesses to the device |
93 | * registers on this platform are implicitly strongly ordered with respect | 74 | * registers on this platform are implicitly strongly ordered with respect |
@@ -492,9 +473,6 @@ int nicvf_set_real_num_queues(struct net_device *netdev, | |||
492 | static int nicvf_init_resources(struct nicvf *nic) | 473 | static int nicvf_init_resources(struct nicvf *nic) |
493 | { | 474 | { |
494 | int err; | 475 | int err; |
495 | union nic_mbx mbx = {}; | ||
496 | |||
497 | mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; | ||
498 | 476 | ||
499 | /* Enable Qset */ | 477 | /* Enable Qset */ |
500 | nicvf_qset_config(nic, true); | 478 | nicvf_qset_config(nic, true); |
@@ -507,14 +485,10 @@ static int nicvf_init_resources(struct nicvf *nic) | |||
507 | return err; | 485 | return err; |
508 | } | 486 | } |
509 | 487 | ||
510 | /* Send VF config done msg to PF */ | ||
511 | nicvf_write_to_mbx(nic, &mbx); | ||
512 | |||
513 | return 0; | 488 | return 0; |
514 | } | 489 | } |
515 | 490 | ||
516 | static void nicvf_snd_pkt_handler(struct net_device *netdev, | 491 | static void nicvf_snd_pkt_handler(struct net_device *netdev, |
517 | struct cmp_queue *cq, | ||
518 | struct cqe_send_t *cqe_tx, | 492 | struct cqe_send_t *cqe_tx, |
519 | int cqe_type, int budget, | 493 | int cqe_type, int budget, |
520 | unsigned int *tx_pkts, unsigned int *tx_bytes) | 494 | unsigned int *tx_pkts, unsigned int *tx_bytes) |
@@ -536,7 +510,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev, | |||
536 | __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, | 510 | __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, |
537 | cqe_tx->sqe_ptr, hdr->subdesc_cnt); | 511 | cqe_tx->sqe_ptr, hdr->subdesc_cnt); |
538 | 512 | ||
539 | nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); | 513 | nicvf_check_cqe_tx_errs(nic, cqe_tx); |
540 | skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; | 514 | skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr]; |
541 | if (skb) { | 515 | if (skb) { |
542 | /* Check for dummy descriptor used for HW TSO offload on 88xx */ | 516 | /* Check for dummy descriptor used for HW TSO offload on 88xx */ |
@@ -630,8 +604,6 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev, | |||
630 | return; | 604 | return; |
631 | } | 605 | } |
632 | 606 | ||
633 | nicvf_set_rx_frame_cnt(nic, skb); | ||
634 | |||
635 | nicvf_set_rxhash(netdev, cqe_rx, skb); | 607 | nicvf_set_rxhash(netdev, cqe_rx, skb); |
636 | 608 | ||
637 | skb_record_rx_queue(skb, rq_idx); | 609 | skb_record_rx_queue(skb, rq_idx); |
@@ -703,7 +675,7 @@ loop: | |||
703 | work_done++; | 675 | work_done++; |
704 | break; | 676 | break; |
705 | case CQE_TYPE_SEND: | 677 | case CQE_TYPE_SEND: |
706 | nicvf_snd_pkt_handler(netdev, cq, | 678 | nicvf_snd_pkt_handler(netdev, |
707 | (void *)cq_desc, CQE_TYPE_SEND, | 679 | (void *)cq_desc, CQE_TYPE_SEND, |
708 | budget, &tx_pkts, &tx_bytes); | 680 | budget, &tx_pkts, &tx_bytes); |
709 | tx_done++; | 681 | tx_done++; |
@@ -740,7 +712,7 @@ done: | |||
740 | nic = nic->pnicvf; | 712 | nic = nic->pnicvf; |
741 | if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { | 713 | if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) { |
742 | netif_tx_start_queue(txq); | 714 | netif_tx_start_queue(txq); |
743 | nic->drv_stats.txq_wake++; | 715 | this_cpu_inc(nic->drv_stats->txq_wake); |
744 | if (netif_msg_tx_err(nic)) | 716 | if (netif_msg_tx_err(nic)) |
745 | netdev_warn(netdev, | 717 | netdev_warn(netdev, |
746 | "%s: Transmit queue wakeup SQ%d\n", | 718 | "%s: Transmit queue wakeup SQ%d\n", |
@@ -1084,7 +1056,7 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
1084 | 1056 | ||
1085 | if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { | 1057 | if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) { |
1086 | netif_tx_stop_queue(txq); | 1058 | netif_tx_stop_queue(txq); |
1087 | nic->drv_stats.txq_stop++; | 1059 | this_cpu_inc(nic->drv_stats->txq_stop); |
1088 | if (netif_msg_tx_err(nic)) | 1060 | if (netif_msg_tx_err(nic)) |
1089 | netdev_warn(netdev, | 1061 | netdev_warn(netdev, |
1090 | "%s: Transmit ring full, stopping SQ%d\n", | 1062 | "%s: Transmit ring full, stopping SQ%d\n", |
@@ -1189,14 +1161,24 @@ int nicvf_stop(struct net_device *netdev) | |||
1189 | return 0; | 1161 | return 0; |
1190 | } | 1162 | } |
1191 | 1163 | ||
1164 | static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) | ||
1165 | { | ||
1166 | union nic_mbx mbx = {}; | ||
1167 | |||
1168 | mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; | ||
1169 | mbx.frs.max_frs = mtu; | ||
1170 | mbx.frs.vf_id = nic->vf_id; | ||
1171 | |||
1172 | return nicvf_send_msg_to_pf(nic, &mbx); | ||
1173 | } | ||
1174 | |||
1192 | int nicvf_open(struct net_device *netdev) | 1175 | int nicvf_open(struct net_device *netdev) |
1193 | { | 1176 | { |
1194 | int err, qidx; | 1177 | int cpu, err, qidx; |
1195 | struct nicvf *nic = netdev_priv(netdev); | 1178 | struct nicvf *nic = netdev_priv(netdev); |
1196 | struct queue_set *qs = nic->qs; | 1179 | struct queue_set *qs = nic->qs; |
1197 | struct nicvf_cq_poll *cq_poll = NULL; | 1180 | struct nicvf_cq_poll *cq_poll = NULL; |
1198 | 1181 | union nic_mbx mbx = {}; | |
1199 | nic->mtu = netdev->mtu; | ||
1200 | 1182 | ||
1201 | netif_carrier_off(netdev); | 1183 | netif_carrier_off(netdev); |
1202 | 1184 | ||
@@ -1248,9 +1230,17 @@ int nicvf_open(struct net_device *netdev) | |||
1248 | if (nic->sqs_mode) | 1230 | if (nic->sqs_mode) |
1249 | nicvf_get_primary_vf_struct(nic); | 1231 | nicvf_get_primary_vf_struct(nic); |
1250 | 1232 | ||
1251 | /* Configure receive side scaling */ | 1233 | /* Configure receive side scaling and MTU */ |
1252 | if (!nic->sqs_mode) | 1234 | if (!nic->sqs_mode) { |
1253 | nicvf_rss_init(nic); | 1235 | nicvf_rss_init(nic); |
1236 | if (nicvf_update_hw_max_frs(nic, netdev->mtu)) | ||
1237 | goto cleanup; | ||
1238 | |||
1239 | /* Clear percpu stats */ | ||
1240 | for_each_possible_cpu(cpu) | ||
1241 | memset(per_cpu_ptr(nic->drv_stats, cpu), 0, | ||
1242 | sizeof(struct nicvf_drv_stats)); | ||
1243 | } | ||
1254 | 1244 | ||
1255 | err = nicvf_register_interrupts(nic); | 1245 | err = nicvf_register_interrupts(nic); |
1256 | if (err) | 1246 | if (err) |
@@ -1276,8 +1266,9 @@ int nicvf_open(struct net_device *netdev) | |||
1276 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) | 1266 | for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) |
1277 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); | 1267 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); |
1278 | 1268 | ||
1279 | nic->drv_stats.txq_stop = 0; | 1269 | /* Send VF config done msg to PF */ |
1280 | nic->drv_stats.txq_wake = 0; | 1270 | mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; |
1271 | nicvf_write_to_mbx(nic, &mbx); | ||
1281 | 1272 | ||
1282 | return 0; | 1273 | return 0; |
1283 | cleanup: | 1274 | cleanup: |
@@ -1297,17 +1288,6 @@ napi_del: | |||
1297 | return err; | 1288 | return err; |
1298 | } | 1289 | } |
1299 | 1290 | ||
1300 | static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu) | ||
1301 | { | ||
1302 | union nic_mbx mbx = {}; | ||
1303 | |||
1304 | mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS; | ||
1305 | mbx.frs.max_frs = mtu; | ||
1306 | mbx.frs.vf_id = nic->vf_id; | ||
1307 | |||
1308 | return nicvf_send_msg_to_pf(nic, &mbx); | ||
1309 | } | ||
1310 | |||
1311 | static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) | 1291 | static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) |
1312 | { | 1292 | { |
1313 | struct nicvf *nic = netdev_priv(netdev); | 1293 | struct nicvf *nic = netdev_priv(netdev); |
@@ -1318,10 +1298,13 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu) | |||
1318 | if (new_mtu < NIC_HW_MIN_FRS) | 1298 | if (new_mtu < NIC_HW_MIN_FRS) |
1319 | return -EINVAL; | 1299 | return -EINVAL; |
1320 | 1300 | ||
1301 | netdev->mtu = new_mtu; | ||
1302 | |||
1303 | if (!netif_running(netdev)) | ||
1304 | return 0; | ||
1305 | |||
1321 | if (nicvf_update_hw_max_frs(nic, new_mtu)) | 1306 | if (nicvf_update_hw_max_frs(nic, new_mtu)) |
1322 | return -EINVAL; | 1307 | return -EINVAL; |
1323 | netdev->mtu = new_mtu; | ||
1324 | nic->mtu = new_mtu; | ||
1325 | 1308 | ||
1326 | return 0; | 1309 | return 0; |
1327 | } | 1310 | } |
@@ -1379,9 +1362,10 @@ void nicvf_update_lmac_stats(struct nicvf *nic) | |||
1379 | 1362 | ||
1380 | void nicvf_update_stats(struct nicvf *nic) | 1363 | void nicvf_update_stats(struct nicvf *nic) |
1381 | { | 1364 | { |
1382 | int qidx; | 1365 | int qidx, cpu; |
1366 | u64 tmp_stats = 0; | ||
1383 | struct nicvf_hw_stats *stats = &nic->hw_stats; | 1367 | struct nicvf_hw_stats *stats = &nic->hw_stats; |
1384 | struct nicvf_drv_stats *drv_stats = &nic->drv_stats; | 1368 | struct nicvf_drv_stats *drv_stats; |
1385 | struct queue_set *qs = nic->qs; | 1369 | struct queue_set *qs = nic->qs; |
1386 | 1370 | ||
1387 | #define GET_RX_STATS(reg) \ | 1371 | #define GET_RX_STATS(reg) \ |
@@ -1404,21 +1388,33 @@ void nicvf_update_stats(struct nicvf *nic) | |||
1404 | stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); | 1388 | stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST); |
1405 | stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); | 1389 | stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST); |
1406 | 1390 | ||
1407 | stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS); | 1391 | stats->tx_bytes = GET_TX_STATS(TX_OCTS); |
1408 | stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST); | 1392 | stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST); |
1409 | stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST); | 1393 | stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST); |
1410 | stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST); | 1394 | stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST); |
1411 | stats->tx_drops = GET_TX_STATS(TX_DROP); | 1395 | stats->tx_drops = GET_TX_STATS(TX_DROP); |
1412 | 1396 | ||
1413 | drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok + | 1397 | /* On T88 pass 2.0, the dummy SQE added for TSO notification |
1414 | stats->tx_bcast_frames_ok + | 1398 | * via CQE has 'dont_send' set. Hence HW drops the pkt pointed |
1415 | stats->tx_mcast_frames_ok; | 1399 | * pointed by dummy SQE and results in tx_drops counter being |
1416 | drv_stats->rx_frames_ok = stats->rx_ucast_frames + | 1400 | * incremented. Subtracting it from tx_tso counter will give |
1417 | stats->rx_bcast_frames + | 1401 | * exact tx_drops counter. |
1418 | stats->rx_mcast_frames; | 1402 | */ |
1419 | drv_stats->rx_drops = stats->rx_drop_red + | 1403 | if (nic->t88 && nic->hw_tso) { |
1420 | stats->rx_drop_overrun; | 1404 | for_each_possible_cpu(cpu) { |
1421 | drv_stats->tx_drops = stats->tx_drops; | 1405 | drv_stats = per_cpu_ptr(nic->drv_stats, cpu); |
1406 | tmp_stats += drv_stats->tx_tso; | ||
1407 | } | ||
1408 | stats->tx_drops = tmp_stats - stats->tx_drops; | ||
1409 | } | ||
1410 | stats->tx_frames = stats->tx_ucast_frames + | ||
1411 | stats->tx_bcast_frames + | ||
1412 | stats->tx_mcast_frames; | ||
1413 | stats->rx_frames = stats->rx_ucast_frames + | ||
1414 | stats->rx_bcast_frames + | ||
1415 | stats->rx_mcast_frames; | ||
1416 | stats->rx_drops = stats->rx_drop_red + | ||
1417 | stats->rx_drop_overrun; | ||
1422 | 1418 | ||
1423 | /* Update RQ and SQ stats */ | 1419 | /* Update RQ and SQ stats */ |
1424 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) | 1420 | for (qidx = 0; qidx < qs->rq_cnt; qidx++) |
@@ -1432,18 +1428,17 @@ static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev, | |||
1432 | { | 1428 | { |
1433 | struct nicvf *nic = netdev_priv(netdev); | 1429 | struct nicvf *nic = netdev_priv(netdev); |
1434 | struct nicvf_hw_stats *hw_stats = &nic->hw_stats; | 1430 | struct nicvf_hw_stats *hw_stats = &nic->hw_stats; |
1435 | struct nicvf_drv_stats *drv_stats = &nic->drv_stats; | ||
1436 | 1431 | ||
1437 | nicvf_update_stats(nic); | 1432 | nicvf_update_stats(nic); |
1438 | 1433 | ||
1439 | stats->rx_bytes = hw_stats->rx_bytes; | 1434 | stats->rx_bytes = hw_stats->rx_bytes; |
1440 | stats->rx_packets = drv_stats->rx_frames_ok; | 1435 | stats->rx_packets = hw_stats->rx_frames; |
1441 | stats->rx_dropped = drv_stats->rx_drops; | 1436 | stats->rx_dropped = hw_stats->rx_drops; |
1442 | stats->multicast = hw_stats->rx_mcast_frames; | 1437 | stats->multicast = hw_stats->rx_mcast_frames; |
1443 | 1438 | ||
1444 | stats->tx_bytes = hw_stats->tx_bytes_ok; | 1439 | stats->tx_bytes = hw_stats->tx_bytes; |
1445 | stats->tx_packets = drv_stats->tx_frames_ok; | 1440 | stats->tx_packets = hw_stats->tx_frames; |
1446 | stats->tx_dropped = drv_stats->tx_drops; | 1441 | stats->tx_dropped = hw_stats->tx_drops; |
1447 | 1442 | ||
1448 | return stats; | 1443 | return stats; |
1449 | } | 1444 | } |
@@ -1456,7 +1451,7 @@ static void nicvf_tx_timeout(struct net_device *dev) | |||
1456 | netdev_warn(dev, "%s: Transmit timed out, resetting\n", | 1451 | netdev_warn(dev, "%s: Transmit timed out, resetting\n", |
1457 | dev->name); | 1452 | dev->name); |
1458 | 1453 | ||
1459 | nic->drv_stats.tx_timeout++; | 1454 | this_cpu_inc(nic->drv_stats->tx_timeout); |
1460 | schedule_work(&nic->reset_task); | 1455 | schedule_work(&nic->reset_task); |
1461 | } | 1456 | } |
1462 | 1457 | ||
@@ -1590,6 +1585,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1590 | goto err_free_netdev; | 1585 | goto err_free_netdev; |
1591 | } | 1586 | } |
1592 | 1587 | ||
1588 | nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats); | ||
1589 | if (!nic->drv_stats) { | ||
1590 | err = -ENOMEM; | ||
1591 | goto err_free_netdev; | ||
1592 | } | ||
1593 | |||
1593 | err = nicvf_set_qset_resources(nic); | 1594 | err = nicvf_set_qset_resources(nic); |
1594 | if (err) | 1595 | if (err) |
1595 | goto err_free_netdev; | 1596 | goto err_free_netdev; |
@@ -1648,6 +1649,8 @@ err_unregister_interrupts: | |||
1648 | nicvf_unregister_interrupts(nic); | 1649 | nicvf_unregister_interrupts(nic); |
1649 | err_free_netdev: | 1650 | err_free_netdev: |
1650 | pci_set_drvdata(pdev, NULL); | 1651 | pci_set_drvdata(pdev, NULL); |
1652 | if (nic->drv_stats) | ||
1653 | free_percpu(nic->drv_stats); | ||
1651 | free_netdev(netdev); | 1654 | free_netdev(netdev); |
1652 | err_release_regions: | 1655 | err_release_regions: |
1653 | pci_release_regions(pdev); | 1656 | pci_release_regions(pdev); |
@@ -1675,6 +1678,8 @@ static void nicvf_remove(struct pci_dev *pdev) | |||
1675 | unregister_netdev(pnetdev); | 1678 | unregister_netdev(pnetdev); |
1676 | nicvf_unregister_interrupts(nic); | 1679 | nicvf_unregister_interrupts(nic); |
1677 | pci_set_drvdata(pdev, NULL); | 1680 | pci_set_drvdata(pdev, NULL); |
1681 | if (nic->drv_stats) | ||
1682 | free_percpu(nic->drv_stats); | ||
1678 | free_netdev(netdev); | 1683 | free_netdev(netdev); |
1679 | pci_release_regions(pdev); | 1684 | pci_release_regions(pdev); |
1680 | pci_disable_device(pdev); | 1685 | pci_disable_device(pdev); |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c index a4fc50155881..747ef0882976 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c | |||
@@ -104,7 +104,8 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp, | |||
104 | nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, | 104 | nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, |
105 | order); | 105 | order); |
106 | if (!nic->rb_page) { | 106 | if (!nic->rb_page) { |
107 | nic->drv_stats.rcv_buffer_alloc_failures++; | 107 | this_cpu_inc(nic->pnicvf->drv_stats-> |
108 | rcv_buffer_alloc_failures); | ||
108 | return -ENOMEM; | 109 | return -ENOMEM; |
109 | } | 110 | } |
110 | nic->rb_page_offset = 0; | 111 | nic->rb_page_offset = 0; |
@@ -270,7 +271,8 @@ refill: | |||
270 | rbdr_idx, new_rb); | 271 | rbdr_idx, new_rb); |
271 | next_rbdr: | 272 | next_rbdr: |
272 | /* Re-enable RBDR interrupts only if buffer allocation is success */ | 273 | /* Re-enable RBDR interrupts only if buffer allocation is success */ |
273 | if (!nic->rb_alloc_fail && rbdr->enable) | 274 | if (!nic->rb_alloc_fail && rbdr->enable && |
275 | netif_running(nic->pnicvf->netdev)) | ||
274 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); | 276 | nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); |
275 | 277 | ||
276 | if (rbdr_idx) | 278 | if (rbdr_idx) |
@@ -361,6 +363,8 @@ static int nicvf_init_snd_queue(struct nicvf *nic, | |||
361 | 363 | ||
362 | static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) | 364 | static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) |
363 | { | 365 | { |
366 | struct sk_buff *skb; | ||
367 | |||
364 | if (!sq) | 368 | if (!sq) |
365 | return; | 369 | return; |
366 | if (!sq->dmem.base) | 370 | if (!sq->dmem.base) |
@@ -371,6 +375,15 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) | |||
371 | sq->dmem.q_len * TSO_HEADER_SIZE, | 375 | sq->dmem.q_len * TSO_HEADER_SIZE, |
372 | sq->tso_hdrs, sq->tso_hdrs_phys); | 376 | sq->tso_hdrs, sq->tso_hdrs_phys); |
373 | 377 | ||
378 | /* Free pending skbs in the queue */ | ||
379 | smp_rmb(); | ||
380 | while (sq->head != sq->tail) { | ||
381 | skb = (struct sk_buff *)sq->skbuff[sq->head]; | ||
382 | if (skb) | ||
383 | dev_kfree_skb_any(skb); | ||
384 | sq->head++; | ||
385 | sq->head &= (sq->dmem.q_len - 1); | ||
386 | } | ||
374 | kfree(sq->skbuff); | 387 | kfree(sq->skbuff); |
375 | nicvf_free_q_desc_mem(nic, &sq->dmem); | 388 | nicvf_free_q_desc_mem(nic, &sq->dmem); |
376 | } | 389 | } |
@@ -483,9 +496,12 @@ static void nicvf_reset_rcv_queue_stats(struct nicvf *nic) | |||
483 | { | 496 | { |
484 | union nic_mbx mbx = {}; | 497 | union nic_mbx mbx = {}; |
485 | 498 | ||
486 | /* Reset all RXQ's stats */ | 499 | /* Reset all RQ/SQ and VF stats */ |
487 | mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; | 500 | mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; |
501 | mbx.reset_stat.rx_stat_mask = 0x3FFF; | ||
502 | mbx.reset_stat.tx_stat_mask = 0x1F; | ||
488 | mbx.reset_stat.rq_stat_mask = 0xFFFF; | 503 | mbx.reset_stat.rq_stat_mask = 0xFFFF; |
504 | mbx.reset_stat.sq_stat_mask = 0xFFFF; | ||
489 | nicvf_send_msg_to_pf(nic, &mbx); | 505 | nicvf_send_msg_to_pf(nic, &mbx); |
490 | } | 506 | } |
491 | 507 | ||
@@ -538,9 +554,12 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, | |||
538 | mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); | 554 | mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8); |
539 | nicvf_send_msg_to_pf(nic, &mbx); | 555 | nicvf_send_msg_to_pf(nic, &mbx); |
540 | 556 | ||
541 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); | 557 | if (!nic->sqs_mode && (qidx == 0)) { |
542 | if (!nic->sqs_mode) | 558 | /* Enable checking L3/L4 length and TCP/UDP checksums */ |
559 | nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, | ||
560 | (BIT(24) | BIT(23) | BIT(21))); | ||
543 | nicvf_config_vlan_stripping(nic, nic->netdev->features); | 561 | nicvf_config_vlan_stripping(nic, nic->netdev->features); |
562 | } | ||
544 | 563 | ||
545 | /* Enable Receive queue */ | 564 | /* Enable Receive queue */ |
546 | memset(&rq_cfg, 0, sizeof(struct rq_cfg)); | 565 | memset(&rq_cfg, 0, sizeof(struct rq_cfg)); |
@@ -1029,7 +1048,7 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry, | |||
1029 | hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; | 1048 | hdr->tso_max_paysize = skb_shinfo(skb)->gso_size; |
1030 | /* For non-tunneled pkts, point this to L2 ethertype */ | 1049 | /* For non-tunneled pkts, point this to L2 ethertype */ |
1031 | hdr->inner_l3_offset = skb_network_offset(skb) - 2; | 1050 | hdr->inner_l3_offset = skb_network_offset(skb) - 2; |
1032 | nic->drv_stats.tx_tso++; | 1051 | this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); |
1033 | } | 1052 | } |
1034 | } | 1053 | } |
1035 | 1054 | ||
@@ -1161,7 +1180,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq, | |||
1161 | 1180 | ||
1162 | nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); | 1181 | nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt); |
1163 | 1182 | ||
1164 | nic->drv_stats.tx_tso++; | 1183 | this_cpu_inc(nic->pnicvf->drv_stats->tx_tso); |
1165 | return 1; | 1184 | return 1; |
1166 | } | 1185 | } |
1167 | 1186 | ||
@@ -1422,8 +1441,6 @@ void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) | |||
1422 | /* Check for errors in the receive cmp.queue entry */ | 1441 | /* Check for errors in the receive cmp.queue entry */ |
1423 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) | 1442 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) |
1424 | { | 1443 | { |
1425 | struct nicvf_hw_stats *stats = &nic->hw_stats; | ||
1426 | |||
1427 | if (!cqe_rx->err_level && !cqe_rx->err_opcode) | 1444 | if (!cqe_rx->err_level && !cqe_rx->err_opcode) |
1428 | return 0; | 1445 | return 0; |
1429 | 1446 | ||
@@ -1435,76 +1452,76 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) | |||
1435 | 1452 | ||
1436 | switch (cqe_rx->err_opcode) { | 1453 | switch (cqe_rx->err_opcode) { |
1437 | case CQ_RX_ERROP_RE_PARTIAL: | 1454 | case CQ_RX_ERROP_RE_PARTIAL: |
1438 | stats->rx_bgx_truncated_pkts++; | 1455 | this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts); |
1439 | break; | 1456 | break; |
1440 | case CQ_RX_ERROP_RE_JABBER: | 1457 | case CQ_RX_ERROP_RE_JABBER: |
1441 | stats->rx_jabber_errs++; | 1458 | this_cpu_inc(nic->drv_stats->rx_jabber_errs); |
1442 | break; | 1459 | break; |
1443 | case CQ_RX_ERROP_RE_FCS: | 1460 | case CQ_RX_ERROP_RE_FCS: |
1444 | stats->rx_fcs_errs++; | 1461 | this_cpu_inc(nic->drv_stats->rx_fcs_errs); |
1445 | break; | 1462 | break; |
1446 | case CQ_RX_ERROP_RE_RX_CTL: | 1463 | case CQ_RX_ERROP_RE_RX_CTL: |
1447 | stats->rx_bgx_errs++; | 1464 | this_cpu_inc(nic->drv_stats->rx_bgx_errs); |
1448 | break; | 1465 | break; |
1449 | case CQ_RX_ERROP_PREL2_ERR: | 1466 | case CQ_RX_ERROP_PREL2_ERR: |
1450 | stats->rx_prel2_errs++; | 1467 | this_cpu_inc(nic->drv_stats->rx_prel2_errs); |
1451 | break; | 1468 | break; |
1452 | case CQ_RX_ERROP_L2_MAL: | 1469 | case CQ_RX_ERROP_L2_MAL: |
1453 | stats->rx_l2_hdr_malformed++; | 1470 | this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed); |
1454 | break; | 1471 | break; |
1455 | case CQ_RX_ERROP_L2_OVERSIZE: | 1472 | case CQ_RX_ERROP_L2_OVERSIZE: |
1456 | stats->rx_oversize++; | 1473 | this_cpu_inc(nic->drv_stats->rx_oversize); |
1457 | break; | 1474 | break; |
1458 | case CQ_RX_ERROP_L2_UNDERSIZE: | 1475 | case CQ_RX_ERROP_L2_UNDERSIZE: |
1459 | stats->rx_undersize++; | 1476 | this_cpu_inc(nic->drv_stats->rx_undersize); |
1460 | break; | 1477 | break; |
1461 | case CQ_RX_ERROP_L2_LENMISM: | 1478 | case CQ_RX_ERROP_L2_LENMISM: |
1462 | stats->rx_l2_len_mismatch++; | 1479 | this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch); |
1463 | break; | 1480 | break; |
1464 | case CQ_RX_ERROP_L2_PCLP: | 1481 | case CQ_RX_ERROP_L2_PCLP: |
1465 | stats->rx_l2_pclp++; | 1482 | this_cpu_inc(nic->drv_stats->rx_l2_pclp); |
1466 | break; | 1483 | break; |
1467 | case CQ_RX_ERROP_IP_NOT: | 1484 | case CQ_RX_ERROP_IP_NOT: |
1468 | stats->rx_ip_ver_errs++; | 1485 | this_cpu_inc(nic->drv_stats->rx_ip_ver_errs); |
1469 | break; | 1486 | break; |
1470 | case CQ_RX_ERROP_IP_CSUM_ERR: | 1487 | case CQ_RX_ERROP_IP_CSUM_ERR: |
1471 | stats->rx_ip_csum_errs++; | 1488 | this_cpu_inc(nic->drv_stats->rx_ip_csum_errs); |
1472 | break; | 1489 | break; |
1473 | case CQ_RX_ERROP_IP_MAL: | 1490 | case CQ_RX_ERROP_IP_MAL: |
1474 | stats->rx_ip_hdr_malformed++; | 1491 | this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed); |
1475 | break; | 1492 | break; |
1476 | case CQ_RX_ERROP_IP_MALD: | 1493 | case CQ_RX_ERROP_IP_MALD: |
1477 | stats->rx_ip_payload_malformed++; | 1494 | this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed); |
1478 | break; | 1495 | break; |
1479 | case CQ_RX_ERROP_IP_HOP: | 1496 | case CQ_RX_ERROP_IP_HOP: |
1480 | stats->rx_ip_ttl_errs++; | 1497 | this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs); |
1481 | break; | 1498 | break; |
1482 | case CQ_RX_ERROP_L3_PCLP: | 1499 | case CQ_RX_ERROP_L3_PCLP: |
1483 | stats->rx_l3_pclp++; | 1500 | this_cpu_inc(nic->drv_stats->rx_l3_pclp); |
1484 | break; | 1501 | break; |
1485 | case CQ_RX_ERROP_L4_MAL: | 1502 | case CQ_RX_ERROP_L4_MAL: |
1486 | stats->rx_l4_malformed++; | 1503 | this_cpu_inc(nic->drv_stats->rx_l4_malformed); |
1487 | break; | 1504 | break; |
1488 | case CQ_RX_ERROP_L4_CHK: | 1505 | case CQ_RX_ERROP_L4_CHK: |
1489 | stats->rx_l4_csum_errs++; | 1506 | this_cpu_inc(nic->drv_stats->rx_l4_csum_errs); |
1490 | break; | 1507 | break; |
1491 | case CQ_RX_ERROP_UDP_LEN: | 1508 | case CQ_RX_ERROP_UDP_LEN: |
1492 | stats->rx_udp_len_errs++; | 1509 | this_cpu_inc(nic->drv_stats->rx_udp_len_errs); |
1493 | break; | 1510 | break; |
1494 | case CQ_RX_ERROP_L4_PORT: | 1511 | case CQ_RX_ERROP_L4_PORT: |
1495 | stats->rx_l4_port_errs++; | 1512 | this_cpu_inc(nic->drv_stats->rx_l4_port_errs); |
1496 | break; | 1513 | break; |
1497 | case CQ_RX_ERROP_TCP_FLAG: | 1514 | case CQ_RX_ERROP_TCP_FLAG: |
1498 | stats->rx_tcp_flag_errs++; | 1515 | this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs); |
1499 | break; | 1516 | break; |
1500 | case CQ_RX_ERROP_TCP_OFFSET: | 1517 | case CQ_RX_ERROP_TCP_OFFSET: |
1501 | stats->rx_tcp_offset_errs++; | 1518 | this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs); |
1502 | break; | 1519 | break; |
1503 | case CQ_RX_ERROP_L4_PCLP: | 1520 | case CQ_RX_ERROP_L4_PCLP: |
1504 | stats->rx_l4_pclp++; | 1521 | this_cpu_inc(nic->drv_stats->rx_l4_pclp); |
1505 | break; | 1522 | break; |
1506 | case CQ_RX_ERROP_RBDR_TRUNC: | 1523 | case CQ_RX_ERROP_RBDR_TRUNC: |
1507 | stats->rx_truncated_pkts++; | 1524 | this_cpu_inc(nic->drv_stats->rx_truncated_pkts); |
1508 | break; | 1525 | break; |
1509 | } | 1526 | } |
1510 | 1527 | ||
@@ -1512,53 +1529,52 @@ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx) | |||
1512 | } | 1529 | } |
1513 | 1530 | ||
1514 | /* Check for errors in the send cmp.queue entry */ | 1531 | /* Check for errors in the send cmp.queue entry */ |
1515 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, | 1532 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx) |
1516 | struct cmp_queue *cq, struct cqe_send_t *cqe_tx) | ||
1517 | { | 1533 | { |
1518 | struct cmp_queue_stats *stats = &cq->stats; | ||
1519 | |||
1520 | switch (cqe_tx->send_status) { | 1534 | switch (cqe_tx->send_status) { |
1521 | case CQ_TX_ERROP_GOOD: | 1535 | case CQ_TX_ERROP_GOOD: |
1522 | stats->tx.good++; | ||
1523 | return 0; | 1536 | return 0; |
1524 | case CQ_TX_ERROP_DESC_FAULT: | 1537 | case CQ_TX_ERROP_DESC_FAULT: |
1525 | stats->tx.desc_fault++; | 1538 | this_cpu_inc(nic->drv_stats->tx_desc_fault); |
1526 | break; | 1539 | break; |
1527 | case CQ_TX_ERROP_HDR_CONS_ERR: | 1540 | case CQ_TX_ERROP_HDR_CONS_ERR: |
1528 | stats->tx.hdr_cons_err++; | 1541 | this_cpu_inc(nic->drv_stats->tx_hdr_cons_err); |
1529 | break; | 1542 | break; |
1530 | case CQ_TX_ERROP_SUBDC_ERR: | 1543 | case CQ_TX_ERROP_SUBDC_ERR: |
1531 | stats->tx.subdesc_err++; | 1544 | this_cpu_inc(nic->drv_stats->tx_subdesc_err); |
1545 | break; | ||
1546 | case CQ_TX_ERROP_MAX_SIZE_VIOL: | ||
1547 | this_cpu_inc(nic->drv_stats->tx_max_size_exceeded); | ||
1532 | break; | 1548 | break; |
1533 | case CQ_TX_ERROP_IMM_SIZE_OFLOW: | 1549 | case CQ_TX_ERROP_IMM_SIZE_OFLOW: |
1534 | stats->tx.imm_size_oflow++; | 1550 | this_cpu_inc(nic->drv_stats->tx_imm_size_oflow); |
1535 | break; | 1551 | break; |
1536 | case CQ_TX_ERROP_DATA_SEQUENCE_ERR: | 1552 | case CQ_TX_ERROP_DATA_SEQUENCE_ERR: |
1537 | stats->tx.data_seq_err++; | 1553 | this_cpu_inc(nic->drv_stats->tx_data_seq_err); |
1538 | break; | 1554 | break; |
1539 | case CQ_TX_ERROP_MEM_SEQUENCE_ERR: | 1555 | case CQ_TX_ERROP_MEM_SEQUENCE_ERR: |
1540 | stats->tx.mem_seq_err++; | 1556 | this_cpu_inc(nic->drv_stats->tx_mem_seq_err); |
1541 | break; | 1557 | break; |
1542 | case CQ_TX_ERROP_LOCK_VIOL: | 1558 | case CQ_TX_ERROP_LOCK_VIOL: |
1543 | stats->tx.lock_viol++; | 1559 | this_cpu_inc(nic->drv_stats->tx_lock_viol); |
1544 | break; | 1560 | break; |
1545 | case CQ_TX_ERROP_DATA_FAULT: | 1561 | case CQ_TX_ERROP_DATA_FAULT: |
1546 | stats->tx.data_fault++; | 1562 | this_cpu_inc(nic->drv_stats->tx_data_fault); |
1547 | break; | 1563 | break; |
1548 | case CQ_TX_ERROP_TSTMP_CONFLICT: | 1564 | case CQ_TX_ERROP_TSTMP_CONFLICT: |
1549 | stats->tx.tstmp_conflict++; | 1565 | this_cpu_inc(nic->drv_stats->tx_tstmp_conflict); |
1550 | break; | 1566 | break; |
1551 | case CQ_TX_ERROP_TSTMP_TIMEOUT: | 1567 | case CQ_TX_ERROP_TSTMP_TIMEOUT: |
1552 | stats->tx.tstmp_timeout++; | 1568 | this_cpu_inc(nic->drv_stats->tx_tstmp_timeout); |
1553 | break; | 1569 | break; |
1554 | case CQ_TX_ERROP_MEM_FAULT: | 1570 | case CQ_TX_ERROP_MEM_FAULT: |
1555 | stats->tx.mem_fault++; | 1571 | this_cpu_inc(nic->drv_stats->tx_mem_fault); |
1556 | break; | 1572 | break; |
1557 | case CQ_TX_ERROP_CK_OVERLAP: | 1573 | case CQ_TX_ERROP_CK_OVERLAP: |
1558 | stats->tx.csum_overlap++; | 1574 | this_cpu_inc(nic->drv_stats->tx_csum_overlap); |
1559 | break; | 1575 | break; |
1560 | case CQ_TX_ERROP_CK_OFLOW: | 1576 | case CQ_TX_ERROP_CK_OFLOW: |
1561 | stats->tx.csum_overflow++; | 1577 | this_cpu_inc(nic->drv_stats->tx_csum_overflow); |
1562 | break; | 1578 | break; |
1563 | } | 1579 | } |
1564 | 1580 | ||
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h index 869f3386028b..2e3c940c1093 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h +++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h | |||
@@ -158,6 +158,7 @@ enum CQ_TX_ERROP_E { | |||
158 | CQ_TX_ERROP_DESC_FAULT = 0x10, | 158 | CQ_TX_ERROP_DESC_FAULT = 0x10, |
159 | CQ_TX_ERROP_HDR_CONS_ERR = 0x11, | 159 | CQ_TX_ERROP_HDR_CONS_ERR = 0x11, |
160 | CQ_TX_ERROP_SUBDC_ERR = 0x12, | 160 | CQ_TX_ERROP_SUBDC_ERR = 0x12, |
161 | CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13, | ||
161 | CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, | 162 | CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, |
162 | CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, | 163 | CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, |
163 | CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, | 164 | CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, |
@@ -171,25 +172,6 @@ enum CQ_TX_ERROP_E { | |||
171 | CQ_TX_ERROP_ENUM_LAST = 0x8a, | 172 | CQ_TX_ERROP_ENUM_LAST = 0x8a, |
172 | }; | 173 | }; |
173 | 174 | ||
174 | struct cmp_queue_stats { | ||
175 | struct tx_stats { | ||
176 | u64 good; | ||
177 | u64 desc_fault; | ||
178 | u64 hdr_cons_err; | ||
179 | u64 subdesc_err; | ||
180 | u64 imm_size_oflow; | ||
181 | u64 data_seq_err; | ||
182 | u64 mem_seq_err; | ||
183 | u64 lock_viol; | ||
184 | u64 data_fault; | ||
185 | u64 tstmp_conflict; | ||
186 | u64 tstmp_timeout; | ||
187 | u64 mem_fault; | ||
188 | u64 csum_overlap; | ||
189 | u64 csum_overflow; | ||
190 | } tx; | ||
191 | } ____cacheline_aligned_in_smp; | ||
192 | |||
193 | enum RQ_SQ_STATS { | 175 | enum RQ_SQ_STATS { |
194 | RQ_SQ_STATS_OCTS, | 176 | RQ_SQ_STATS_OCTS, |
195 | RQ_SQ_STATS_PKTS, | 177 | RQ_SQ_STATS_PKTS, |
@@ -241,7 +223,6 @@ struct cmp_queue { | |||
241 | spinlock_t lock; /* lock to serialize processing CQEs */ | 223 | spinlock_t lock; /* lock to serialize processing CQEs */ |
242 | void *desc; | 224 | void *desc; |
243 | struct q_desc_mem dmem; | 225 | struct q_desc_mem dmem; |
244 | struct cmp_queue_stats stats; | ||
245 | int irq; | 226 | int irq; |
246 | } ____cacheline_aligned_in_smp; | 227 | } ____cacheline_aligned_in_smp; |
247 | 228 | ||
@@ -336,6 +317,5 @@ u64 nicvf_queue_reg_read(struct nicvf *nic, | |||
336 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); | 317 | void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx); |
337 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); | 318 | void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx); |
338 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); | 319 | int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx); |
339 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, | 320 | int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx); |
340 | struct cmp_queue *cq, struct cqe_send_t *cqe_tx); | ||
341 | #endif /* NICVF_QUEUES_H */ | 321 | #endif /* NICVF_QUEUES_H */ |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index 8bbaedbb7b94..050e21fbb147 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c | |||
@@ -1242,8 +1242,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
1242 | 1242 | ||
1243 | pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); | 1243 | pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid); |
1244 | if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { | 1244 | if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) { |
1245 | bgx->bgx_id = | 1245 | bgx->bgx_id = (pci_resource_start(pdev, |
1246 | (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1; | 1246 | PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK; |
1247 | bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; | 1247 | bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE; |
1248 | bgx->max_lmac = MAX_LMAC_PER_BGX; | 1248 | bgx->max_lmac = MAX_LMAC_PER_BGX; |
1249 | bgx_vnic[bgx->bgx_id] = bgx; | 1249 | bgx_vnic[bgx->bgx_id] = bgx; |
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h index d59c71e4a000..01cc7c859131 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h | |||
@@ -28,6 +28,8 @@ | |||
28 | #define MAX_DMAC_PER_LMAC 8 | 28 | #define MAX_DMAC_PER_LMAC 8 |
29 | #define MAX_FRAME_SIZE 9216 | 29 | #define MAX_FRAME_SIZE 9216 |
30 | 30 | ||
31 | #define BGX_ID_MASK 0x3 | ||
32 | |||
31 | #define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 | 33 | #define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2 |
32 | 34 | ||
33 | /* Registers */ | 35 | /* Registers */ |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 1e74fd6085df..e19a0ca8e5dd 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c | |||
@@ -2951,7 +2951,6 @@ void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, | |||
2951 | rq->cntxt_id, fl_id, 0xffff); | 2951 | rq->cntxt_id, fl_id, 0xffff); |
2952 | dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, | 2952 | dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, |
2953 | rq->desc, rq->phys_addr); | 2953 | rq->desc, rq->phys_addr); |
2954 | napi_hash_del(&rq->napi); | ||
2955 | netif_napi_del(&rq->napi); | 2954 | netif_napi_del(&rq->napi); |
2956 | rq->netdev = NULL; | 2955 | rq->netdev = NULL; |
2957 | rq->cntxt_id = rq->abs_id = 0; | 2956 | rq->cntxt_id = rq->abs_id = 0; |
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 50812a1d67bd..df1573c4a659 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h | |||
@@ -178,9 +178,9 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN | |||
178 | CH_PCI_ID_TABLE_FENTRY(0x6005), | 178 | CH_PCI_ID_TABLE_FENTRY(0x6005), |
179 | CH_PCI_ID_TABLE_FENTRY(0x6006), | 179 | CH_PCI_ID_TABLE_FENTRY(0x6006), |
180 | CH_PCI_ID_TABLE_FENTRY(0x6007), | 180 | CH_PCI_ID_TABLE_FENTRY(0x6007), |
181 | CH_PCI_ID_TABLE_FENTRY(0x6008), | ||
181 | CH_PCI_ID_TABLE_FENTRY(0x6009), | 182 | CH_PCI_ID_TABLE_FENTRY(0x6009), |
182 | CH_PCI_ID_TABLE_FENTRY(0x600d), | 183 | CH_PCI_ID_TABLE_FENTRY(0x600d), |
183 | CH_PCI_ID_TABLE_FENTRY(0x6010), | ||
184 | CH_PCI_ID_TABLE_FENTRY(0x6011), | 184 | CH_PCI_ID_TABLE_FENTRY(0x6011), |
185 | CH_PCI_ID_TABLE_FENTRY(0x6014), | 185 | CH_PCI_ID_TABLE_FENTRY(0x6014), |
186 | CH_PCI_ID_TABLE_FENTRY(0x6015), | 186 | CH_PCI_ID_TABLE_FENTRY(0x6015), |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index cece8a08edca..93aa2939142a 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -2813,7 +2813,6 @@ static void be_evt_queues_destroy(struct be_adapter *adapter) | |||
2813 | if (eqo->q.created) { | 2813 | if (eqo->q.created) { |
2814 | be_eq_clean(eqo); | 2814 | be_eq_clean(eqo); |
2815 | be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); | 2815 | be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); |
2816 | napi_hash_del(&eqo->napi); | ||
2817 | netif_napi_del(&eqo->napi); | 2816 | netif_napi_del(&eqo->napi); |
2818 | free_cpumask_var(eqo->affinity_mask); | 2817 | free_cpumask_var(eqo->affinity_mask); |
2819 | } | 2818 | } |
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c index efabb04a1ae8..4b0f3a50b293 100644 --- a/drivers/net/ethernet/freescale/fman/fman_tgec.c +++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c | |||
@@ -722,9 +722,6 @@ int tgec_free(struct fman_mac *tgec) | |||
722 | { | 722 | { |
723 | free_init_resources(tgec); | 723 | free_init_resources(tgec); |
724 | 724 | ||
725 | if (tgec->cfg) | ||
726 | tgec->cfg = NULL; | ||
727 | |||
728 | kfree(tgec->cfg); | 725 | kfree(tgec->cfg); |
729 | kfree(tgec); | 726 | kfree(tgec); |
730 | 727 | ||
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index c54c6fac0d1d..b6ed818f78ff 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c | |||
@@ -332,8 +332,10 @@ struct hnae_handle *hnae_get_handle(struct device *owner_dev, | |||
332 | return ERR_PTR(-ENODEV); | 332 | return ERR_PTR(-ENODEV); |
333 | 333 | ||
334 | handle = dev->ops->get_handle(dev, port_id); | 334 | handle = dev->ops->get_handle(dev, port_id); |
335 | if (IS_ERR(handle)) | 335 | if (IS_ERR(handle)) { |
336 | put_device(&dev->cls_dev); | ||
336 | return handle; | 337 | return handle; |
338 | } | ||
337 | 339 | ||
338 | handle->dev = dev; | 340 | handle->dev = dev; |
339 | handle->owner_dev = owner_dev; | 341 | handle->owner_dev = owner_dev; |
@@ -356,6 +358,8 @@ out_when_init_queue: | |||
356 | for (j = i - 1; j >= 0; j--) | 358 | for (j = i - 1; j >= 0; j--) |
357 | hnae_fini_queue(handle->qs[j]); | 359 | hnae_fini_queue(handle->qs[j]); |
358 | 360 | ||
361 | put_device(&dev->cls_dev); | ||
362 | |||
359 | return ERR_PTR(-ENOMEM); | 363 | return ERR_PTR(-ENOMEM); |
360 | } | 364 | } |
361 | EXPORT_SYMBOL(hnae_get_handle); | 365 | EXPORT_SYMBOL(hnae_get_handle); |
@@ -377,6 +381,8 @@ void hnae_put_handle(struct hnae_handle *h) | |||
377 | dev->ops->put_handle(h); | 381 | dev->ops->put_handle(h); |
378 | 382 | ||
379 | module_put(dev->owner); | 383 | module_put(dev->owner); |
384 | |||
385 | put_device(&dev->cls_dev); | ||
380 | } | 386 | } |
381 | EXPORT_SYMBOL(hnae_put_handle); | 387 | EXPORT_SYMBOL(hnae_put_handle); |
382 | 388 | ||
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 54efa9a5167b..bd719e25dd76 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c | |||
@@ -2446,6 +2446,8 @@ static int ehea_open(struct net_device *dev) | |||
2446 | 2446 | ||
2447 | netif_info(port, ifup, dev, "enabling port\n"); | 2447 | netif_info(port, ifup, dev, "enabling port\n"); |
2448 | 2448 | ||
2449 | netif_carrier_off(dev); | ||
2450 | |||
2449 | ret = ehea_up(dev); | 2451 | ret = ehea_up(dev); |
2450 | if (!ret) { | 2452 | if (!ret) { |
2451 | port_napi_enable(port); | 2453 | port_napi_enable(port); |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 5f44c5520fbc..0fbf686f5e7c 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -74,7 +74,6 @@ | |||
74 | #include <asm/iommu.h> | 74 | #include <asm/iommu.h> |
75 | #include <linux/uaccess.h> | 75 | #include <linux/uaccess.h> |
76 | #include <asm/firmware.h> | 76 | #include <asm/firmware.h> |
77 | #include <linux/seq_file.h> | ||
78 | #include <linux/workqueue.h> | 77 | #include <linux/workqueue.h> |
79 | 78 | ||
80 | #include "ibmvnic.h" | 79 | #include "ibmvnic.h" |
@@ -1505,9 +1504,8 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry) | |||
1505 | adapter->max_rx_add_entries_per_subcrq > entries_page ? | 1504 | adapter->max_rx_add_entries_per_subcrq > entries_page ? |
1506 | entries_page : adapter->max_rx_add_entries_per_subcrq; | 1505 | entries_page : adapter->max_rx_add_entries_per_subcrq; |
1507 | 1506 | ||
1508 | /* Choosing the maximum number of queues supported by firmware*/ | 1507 | adapter->req_tx_queues = adapter->opt_tx_comp_sub_queues; |
1509 | adapter->req_tx_queues = adapter->max_tx_queues; | 1508 | adapter->req_rx_queues = adapter->opt_rx_comp_queues; |
1510 | adapter->req_rx_queues = adapter->max_rx_queues; | ||
1511 | adapter->req_rx_add_queues = adapter->max_rx_add_queues; | 1509 | adapter->req_rx_add_queues = adapter->max_rx_add_queues; |
1512 | 1510 | ||
1513 | adapter->req_mtu = adapter->max_mtu; | 1511 | adapter->req_mtu = adapter->max_mtu; |
@@ -3706,7 +3704,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id) | |||
3706 | struct net_device *netdev; | 3704 | struct net_device *netdev; |
3707 | unsigned char *mac_addr_p; | 3705 | unsigned char *mac_addr_p; |
3708 | struct dentry *ent; | 3706 | struct dentry *ent; |
3709 | char buf[16]; /* debugfs name buf */ | 3707 | char buf[17]; /* debugfs name buf */ |
3710 | int rc; | 3708 | int rc; |
3711 | 3709 | ||
3712 | dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", | 3710 | dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n", |
@@ -3845,6 +3843,9 @@ static int ibmvnic_remove(struct vio_dev *dev) | |||
3845 | if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) | 3843 | if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir)) |
3846 | debugfs_remove_recursive(adapter->debugfs_dir); | 3844 | debugfs_remove_recursive(adapter->debugfs_dir); |
3847 | 3845 | ||
3846 | dma_unmap_single(&dev->dev, adapter->stats_token, | ||
3847 | sizeof(struct ibmvnic_statistics), DMA_FROM_DEVICE); | ||
3848 | |||
3848 | if (adapter->ras_comps) | 3849 | if (adapter->ras_comps) |
3849 | dma_free_coherent(&dev->dev, | 3850 | dma_free_coherent(&dev->dev, |
3850 | adapter->ras_comp_num * | 3851 | adapter->ras_comp_num * |
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index bf5cc55ba24c..5b12022adf1f 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c | |||
@@ -1381,6 +1381,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp) | |||
1381 | temp = (val & 0x003fff00) >> 8; | 1381 | temp = (val & 0x003fff00) >> 8; |
1382 | 1382 | ||
1383 | temp *= 64000000; | 1383 | temp *= 64000000; |
1384 | temp += mp->t_clk / 2; | ||
1384 | do_div(temp, mp->t_clk); | 1385 | do_div(temp, mp->t_clk); |
1385 | 1386 | ||
1386 | return (unsigned int)temp; | 1387 | return (unsigned int)temp; |
@@ -1417,6 +1418,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp) | |||
1417 | 1418 | ||
1418 | temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; | 1419 | temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; |
1419 | temp *= 64000000; | 1420 | temp *= 64000000; |
1421 | temp += mp->t_clk / 2; | ||
1420 | do_div(temp, mp->t_clk); | 1422 | do_div(temp, mp->t_clk); |
1421 | 1423 | ||
1422 | return (unsigned int)temp; | 1424 | return (unsigned int)temp; |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 5cb07c2017bf..0c0a45af950f 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -4151,7 +4151,7 @@ static int mvneta_probe(struct platform_device *pdev) | |||
4151 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; | 4151 | dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO; |
4152 | dev->hw_features |= dev->features; | 4152 | dev->hw_features |= dev->features; |
4153 | dev->vlan_features |= dev->features; | 4153 | dev->vlan_features |= dev->features; |
4154 | dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE; | 4154 | dev->priv_flags |= IFF_LIVE_ADDR_CHANGE; |
4155 | dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; | 4155 | dev->gso_max_segs = MVNETA_MAX_TSO_SEGS; |
4156 | 4156 | ||
4157 | err = register_netdev(dev); | 4157 | err = register_netdev(dev); |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 60227a3452a4..1026c452e39d 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
@@ -3293,7 +3293,7 @@ static void mvpp2_cls_init(struct mvpp2 *priv) | |||
3293 | mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); | 3293 | mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); |
3294 | 3294 | ||
3295 | /* Clear classifier flow table */ | 3295 | /* Clear classifier flow table */ |
3296 | memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); | 3296 | memset(&fe.data, 0, sizeof(fe.data)); |
3297 | for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { | 3297 | for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { |
3298 | fe.index = index; | 3298 | fe.index = index; |
3299 | mvpp2_cls_flow_write(priv, &fe); | 3299 | mvpp2_cls_flow_write(priv, &fe); |
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index f05ea56dcff2..941c8e2c944e 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c | |||
@@ -5220,6 +5220,19 @@ static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume); | |||
5220 | 5220 | ||
5221 | static void sky2_shutdown(struct pci_dev *pdev) | 5221 | static void sky2_shutdown(struct pci_dev *pdev) |
5222 | { | 5222 | { |
5223 | struct sky2_hw *hw = pci_get_drvdata(pdev); | ||
5224 | int port; | ||
5225 | |||
5226 | for (port = 0; port < hw->ports; port++) { | ||
5227 | struct net_device *ndev = hw->dev[port]; | ||
5228 | |||
5229 | rtnl_lock(); | ||
5230 | if (netif_running(ndev)) { | ||
5231 | dev_close(ndev); | ||
5232 | netif_device_detach(ndev); | ||
5233 | } | ||
5234 | rtnl_unlock(); | ||
5235 | } | ||
5223 | sky2_suspend(&pdev->dev); | 5236 | sky2_suspend(&pdev->dev); |
5224 | pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); | 5237 | pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev)); |
5225 | pci_set_power_state(pdev, PCI_D3hot); | 5238 | pci_set_power_state(pdev, PCI_D3hot); |
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 12c99a2655f2..a60f635da78b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c | |||
@@ -129,6 +129,9 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) | |||
129 | } | 129 | } |
130 | }; | 130 | }; |
131 | 131 | ||
132 | /* Must not acquire state_lock, as its corresponding work_sync | ||
133 | * is done under it. | ||
134 | */ | ||
132 | static void mlx4_en_filter_work(struct work_struct *work) | 135 | static void mlx4_en_filter_work(struct work_struct *work) |
133 | { | 136 | { |
134 | struct mlx4_en_filter *filter = container_of(work, | 137 | struct mlx4_en_filter *filter = container_of(work, |
@@ -2189,20 +2192,19 @@ void mlx4_en_destroy_netdev(struct net_device *dev) | |||
2189 | mutex_lock(&mdev->state_lock); | 2192 | mutex_lock(&mdev->state_lock); |
2190 | mdev->pndev[priv->port] = NULL; | 2193 | mdev->pndev[priv->port] = NULL; |
2191 | mdev->upper[priv->port] = NULL; | 2194 | mdev->upper[priv->port] = NULL; |
2192 | mutex_unlock(&mdev->state_lock); | ||
2193 | 2195 | ||
2194 | #ifdef CONFIG_RFS_ACCEL | 2196 | #ifdef CONFIG_RFS_ACCEL |
2195 | mlx4_en_cleanup_filters(priv); | 2197 | mlx4_en_cleanup_filters(priv); |
2196 | #endif | 2198 | #endif |
2197 | 2199 | ||
2198 | mlx4_en_free_resources(priv); | 2200 | mlx4_en_free_resources(priv); |
2201 | mutex_unlock(&mdev->state_lock); | ||
2199 | 2202 | ||
2200 | kfree(priv->tx_ring); | 2203 | kfree(priv->tx_ring); |
2201 | kfree(priv->tx_cq); | 2204 | kfree(priv->tx_cq); |
2202 | 2205 | ||
2203 | if (!shutdown) | 2206 | if (!shutdown) |
2204 | free_netdev(dev); | 2207 | free_netdev(dev); |
2205 | dev->ethtool_ops = NULL; | ||
2206 | } | 2208 | } |
2207 | 2209 | ||
2208 | static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) | 2210 | static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index f4c687ce4c59..84e8b250e2af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c | |||
@@ -1445,6 +1445,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
1445 | c->netdev = priv->netdev; | 1445 | c->netdev = priv->netdev; |
1446 | c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); | 1446 | c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); |
1447 | c->num_tc = priv->params.num_tc; | 1447 | c->num_tc = priv->params.num_tc; |
1448 | c->xdp = !!priv->xdp_prog; | ||
1448 | 1449 | ||
1449 | if (priv->params.rx_am_enabled) | 1450 | if (priv->params.rx_am_enabled) |
1450 | rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); | 1451 | rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode); |
@@ -1468,6 +1469,12 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
1468 | if (err) | 1469 | if (err) |
1469 | goto err_close_tx_cqs; | 1470 | goto err_close_tx_cqs; |
1470 | 1471 | ||
1472 | /* XDP SQ CQ params are same as normal TXQ sq CQ params */ | ||
1473 | err = c->xdp ? mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq, | ||
1474 | priv->params.tx_cq_moderation) : 0; | ||
1475 | if (err) | ||
1476 | goto err_close_rx_cq; | ||
1477 | |||
1471 | napi_enable(&c->napi); | 1478 | napi_enable(&c->napi); |
1472 | 1479 | ||
1473 | err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq); | 1480 | err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq); |
@@ -1488,21 +1495,10 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
1488 | } | 1495 | } |
1489 | } | 1496 | } |
1490 | 1497 | ||
1491 | if (priv->xdp_prog) { | 1498 | err = c->xdp ? mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq) : 0; |
1492 | /* XDP SQ CQ params are same as normal TXQ sq CQ params */ | 1499 | if (err) |
1493 | err = mlx5e_open_cq(c, &cparam->tx_cq, &c->xdp_sq.cq, | 1500 | goto err_close_sqs; |
1494 | priv->params.tx_cq_moderation); | ||
1495 | if (err) | ||
1496 | goto err_close_sqs; | ||
1497 | |||
1498 | err = mlx5e_open_sq(c, 0, &cparam->xdp_sq, &c->xdp_sq); | ||
1499 | if (err) { | ||
1500 | mlx5e_close_cq(&c->xdp_sq.cq); | ||
1501 | goto err_close_sqs; | ||
1502 | } | ||
1503 | } | ||
1504 | 1501 | ||
1505 | c->xdp = !!priv->xdp_prog; | ||
1506 | err = mlx5e_open_rq(c, &cparam->rq, &c->rq); | 1502 | err = mlx5e_open_rq(c, &cparam->rq, &c->rq); |
1507 | if (err) | 1503 | if (err) |
1508 | goto err_close_xdp_sq; | 1504 | goto err_close_xdp_sq; |
@@ -1512,7 +1508,8 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, | |||
1512 | 1508 | ||
1513 | return 0; | 1509 | return 0; |
1514 | err_close_xdp_sq: | 1510 | err_close_xdp_sq: |
1515 | mlx5e_close_sq(&c->xdp_sq); | 1511 | if (c->xdp) |
1512 | mlx5e_close_sq(&c->xdp_sq); | ||
1516 | 1513 | ||
1517 | err_close_sqs: | 1514 | err_close_sqs: |
1518 | mlx5e_close_sqs(c); | 1515 | mlx5e_close_sqs(c); |
@@ -1522,6 +1519,10 @@ err_close_icosq: | |||
1522 | 1519 | ||
1523 | err_disable_napi: | 1520 | err_disable_napi: |
1524 | napi_disable(&c->napi); | 1521 | napi_disable(&c->napi); |
1522 | if (c->xdp) | ||
1523 | mlx5e_close_cq(&c->xdp_sq.cq); | ||
1524 | |||
1525 | err_close_rx_cq: | ||
1525 | mlx5e_close_cq(&c->rq.cq); | 1526 | mlx5e_close_cq(&c->rq.cq); |
1526 | 1527 | ||
1527 | err_close_tx_cqs: | 1528 | err_close_tx_cqs: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 7fe6559e4ab3..bf1c09ca73c0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c | |||
@@ -308,7 +308,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) | |||
308 | netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; | 308 | netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; |
309 | #endif | 309 | #endif |
310 | 310 | ||
311 | netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC; | 311 | netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; |
312 | netdev->hw_features |= NETIF_F_HW_TC; | 312 | netdev->hw_features |= NETIF_F_HW_TC; |
313 | 313 | ||
314 | eth_hw_addr_random(netdev); | 314 | eth_hw_addr_random(netdev); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index ce8c54d18906..6bb21b31cfeb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -237,12 +237,15 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec | |||
237 | skb_flow_dissector_target(f->dissector, | 237 | skb_flow_dissector_target(f->dissector, |
238 | FLOW_DISSECTOR_KEY_VLAN, | 238 | FLOW_DISSECTOR_KEY_VLAN, |
239 | f->mask); | 239 | f->mask); |
240 | if (mask->vlan_id) { | 240 | if (mask->vlan_id || mask->vlan_priority) { |
241 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); | 241 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1); |
242 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); | 242 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1); |
243 | 243 | ||
244 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); | 244 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); |
245 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); | 245 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); |
246 | |||
247 | MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority); | ||
248 | MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority); | ||
246 | } | 249 | } |
247 | } | 250 | } |
248 | 251 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index c55ad8d00c05..d239f5d0ea36 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -57,7 +57,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, | |||
57 | if (esw->mode != SRIOV_OFFLOADS) | 57 | if (esw->mode != SRIOV_OFFLOADS) |
58 | return ERR_PTR(-EOPNOTSUPP); | 58 | return ERR_PTR(-EOPNOTSUPP); |
59 | 59 | ||
60 | action = attr->action; | 60 | /* per flow vlan pop/push is emulated, don't set that into the firmware */ |
61 | action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); | ||
61 | 62 | ||
62 | if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { | 63 | if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { |
63 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; | 64 | dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 89696048b045..914e5466f729 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
@@ -1690,7 +1690,7 @@ static int init_root_ns(struct mlx5_flow_steering *steering) | |||
1690 | { | 1690 | { |
1691 | 1691 | ||
1692 | steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); | 1692 | steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX); |
1693 | if (IS_ERR_OR_NULL(steering->root_ns)) | 1693 | if (!steering->root_ns) |
1694 | goto cleanup; | 1694 | goto cleanup; |
1695 | 1695 | ||
1696 | if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) | 1696 | if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node)) |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index d5433c49b2b0..3b7c6a9f2b5f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -46,7 +46,6 @@ | |||
46 | #include <linux/mlx5/srq.h> | 46 | #include <linux/mlx5/srq.h> |
47 | #include <linux/debugfs.h> | 47 | #include <linux/debugfs.h> |
48 | #include <linux/kmod.h> | 48 | #include <linux/kmod.h> |
49 | #include <linux/delay.h> | ||
50 | #include <linux/mlx5/mlx5_ifc.h> | 49 | #include <linux/mlx5/mlx5_ifc.h> |
51 | #ifdef CONFIG_RFS_ACCEL | 50 | #ifdef CONFIG_RFS_ACCEL |
52 | #include <linux/cpu_rmap.h> | 51 | #include <linux/cpu_rmap.h> |
@@ -1226,6 +1225,9 @@ static int init_one(struct pci_dev *pdev, | |||
1226 | 1225 | ||
1227 | pci_set_drvdata(pdev, dev); | 1226 | pci_set_drvdata(pdev, dev); |
1228 | 1227 | ||
1228 | dev->pdev = pdev; | ||
1229 | dev->event = mlx5_core_event; | ||
1230 | |||
1229 | if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { | 1231 | if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) { |
1230 | mlx5_core_warn(dev, | 1232 | mlx5_core_warn(dev, |
1231 | "selected profile out of range, selecting default (%d)\n", | 1233 | "selected profile out of range, selecting default (%d)\n", |
@@ -1233,8 +1235,6 @@ static int init_one(struct pci_dev *pdev, | |||
1233 | prof_sel = MLX5_DEFAULT_PROF; | 1235 | prof_sel = MLX5_DEFAULT_PROF; |
1234 | } | 1236 | } |
1235 | dev->profile = &profile[prof_sel]; | 1237 | dev->profile = &profile[prof_sel]; |
1236 | dev->pdev = pdev; | ||
1237 | dev->event = mlx5_core_event; | ||
1238 | 1238 | ||
1239 | INIT_LIST_HEAD(&priv->ctx_list); | 1239 | INIT_LIST_HEAD(&priv->ctx_list); |
1240 | spin_lock_init(&priv->ctx_lock); | 1240 | spin_lock_init(&priv->ctx_lock); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 1ec0a4ce3c46..dda5761e91bc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
@@ -231,7 +231,7 @@ mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port) | |||
231 | 231 | ||
232 | span_entry->used = true; | 232 | span_entry->used = true; |
233 | span_entry->id = index; | 233 | span_entry->id = index; |
234 | span_entry->ref_count = 0; | 234 | span_entry->ref_count = 1; |
235 | span_entry->local_port = local_port; | 235 | span_entry->local_port = local_port; |
236 | return span_entry; | 236 | return span_entry; |
237 | } | 237 | } |
@@ -270,6 +270,7 @@ static struct mlxsw_sp_span_entry | |||
270 | 270 | ||
271 | span_entry = mlxsw_sp_span_entry_find(port); | 271 | span_entry = mlxsw_sp_span_entry_find(port); |
272 | if (span_entry) { | 272 | if (span_entry) { |
273 | /* Already exists, just take a reference */ | ||
273 | span_entry->ref_count++; | 274 | span_entry->ref_count++; |
274 | return span_entry; | 275 | return span_entry; |
275 | } | 276 | } |
@@ -280,6 +281,7 @@ static struct mlxsw_sp_span_entry | |||
280 | static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, | 281 | static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp, |
281 | struct mlxsw_sp_span_entry *span_entry) | 282 | struct mlxsw_sp_span_entry *span_entry) |
282 | { | 283 | { |
284 | WARN_ON(!span_entry->ref_count); | ||
283 | if (--span_entry->ref_count == 0) | 285 | if (--span_entry->ref_count == 0) |
284 | mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); | 286 | mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry); |
285 | return 0; | 287 | return 0; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 9b22863a924b..97bbc1d21df8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h | |||
@@ -115,7 +115,7 @@ struct mlxsw_sp_rif { | |||
115 | struct mlxsw_sp_mid { | 115 | struct mlxsw_sp_mid { |
116 | struct list_head list; | 116 | struct list_head list; |
117 | unsigned char addr[ETH_ALEN]; | 117 | unsigned char addr[ETH_ALEN]; |
118 | u16 vid; | 118 | u16 fid; |
119 | u16 mid; | 119 | u16 mid; |
120 | unsigned int ref_count; | 120 | unsigned int ref_count; |
121 | }; | 121 | }; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 4573da2c5560..e83072da6272 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -594,21 +594,22 @@ static int mlxsw_sp_vrs_init(struct mlxsw_sp *mlxsw_sp) | |||
594 | return 0; | 594 | return 0; |
595 | } | 595 | } |
596 | 596 | ||
597 | static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp); | ||
598 | |||
597 | static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) | 599 | static void mlxsw_sp_vrs_fini(struct mlxsw_sp *mlxsw_sp) |
598 | { | 600 | { |
601 | mlxsw_sp_router_fib_flush(mlxsw_sp); | ||
599 | kfree(mlxsw_sp->router.vrs); | 602 | kfree(mlxsw_sp->router.vrs); |
600 | } | 603 | } |
601 | 604 | ||
602 | struct mlxsw_sp_neigh_key { | 605 | struct mlxsw_sp_neigh_key { |
603 | unsigned char addr[sizeof(struct in6_addr)]; | 606 | struct neighbour *n; |
604 | struct net_device *dev; | ||
605 | }; | 607 | }; |
606 | 608 | ||
607 | struct mlxsw_sp_neigh_entry { | 609 | struct mlxsw_sp_neigh_entry { |
608 | struct rhash_head ht_node; | 610 | struct rhash_head ht_node; |
609 | struct mlxsw_sp_neigh_key key; | 611 | struct mlxsw_sp_neigh_key key; |
610 | u16 rif; | 612 | u16 rif; |
611 | struct neighbour *n; | ||
612 | bool offloaded; | 613 | bool offloaded; |
613 | struct delayed_work dw; | 614 | struct delayed_work dw; |
614 | struct mlxsw_sp_port *mlxsw_sp_port; | 615 | struct mlxsw_sp_port *mlxsw_sp_port; |
@@ -646,19 +647,15 @@ mlxsw_sp_neigh_entry_remove(struct mlxsw_sp *mlxsw_sp, | |||
646 | static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); | 647 | static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work); |
647 | 648 | ||
648 | static struct mlxsw_sp_neigh_entry * | 649 | static struct mlxsw_sp_neigh_entry * |
649 | mlxsw_sp_neigh_entry_create(const void *addr, size_t addr_len, | 650 | mlxsw_sp_neigh_entry_create(struct neighbour *n, u16 rif) |
650 | struct net_device *dev, u16 rif, | ||
651 | struct neighbour *n) | ||
652 | { | 651 | { |
653 | struct mlxsw_sp_neigh_entry *neigh_entry; | 652 | struct mlxsw_sp_neigh_entry *neigh_entry; |
654 | 653 | ||
655 | neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); | 654 | neigh_entry = kzalloc(sizeof(*neigh_entry), GFP_ATOMIC); |
656 | if (!neigh_entry) | 655 | if (!neigh_entry) |
657 | return NULL; | 656 | return NULL; |
658 | memcpy(neigh_entry->key.addr, addr, addr_len); | 657 | neigh_entry->key.n = n; |
659 | neigh_entry->key.dev = dev; | ||
660 | neigh_entry->rif = rif; | 658 | neigh_entry->rif = rif; |
661 | neigh_entry->n = n; | ||
662 | INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); | 659 | INIT_DELAYED_WORK(&neigh_entry->dw, mlxsw_sp_router_neigh_update_hw); |
663 | INIT_LIST_HEAD(&neigh_entry->nexthop_list); | 660 | INIT_LIST_HEAD(&neigh_entry->nexthop_list); |
664 | return neigh_entry; | 661 | return neigh_entry; |
@@ -671,13 +668,11 @@ mlxsw_sp_neigh_entry_destroy(struct mlxsw_sp_neigh_entry *neigh_entry) | |||
671 | } | 668 | } |
672 | 669 | ||
673 | static struct mlxsw_sp_neigh_entry * | 670 | static struct mlxsw_sp_neigh_entry * |
674 | mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, const void *addr, | 671 | mlxsw_sp_neigh_entry_lookup(struct mlxsw_sp *mlxsw_sp, struct neighbour *n) |
675 | size_t addr_len, struct net_device *dev) | ||
676 | { | 672 | { |
677 | struct mlxsw_sp_neigh_key key = {{ 0 } }; | 673 | struct mlxsw_sp_neigh_key key; |
678 | 674 | ||
679 | memcpy(key.addr, addr, addr_len); | 675 | key.n = n; |
680 | key.dev = dev; | ||
681 | return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, | 676 | return rhashtable_lookup_fast(&mlxsw_sp->router.neigh_ht, |
682 | &key, mlxsw_sp_neigh_ht_params); | 677 | &key, mlxsw_sp_neigh_ht_params); |
683 | } | 678 | } |
@@ -689,26 +684,20 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev, | |||
689 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 684 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
690 | struct mlxsw_sp_neigh_entry *neigh_entry; | 685 | struct mlxsw_sp_neigh_entry *neigh_entry; |
691 | struct mlxsw_sp_rif *r; | 686 | struct mlxsw_sp_rif *r; |
692 | u32 dip; | ||
693 | int err; | 687 | int err; |
694 | 688 | ||
695 | if (n->tbl != &arp_tbl) | 689 | if (n->tbl != &arp_tbl) |
696 | return 0; | 690 | return 0; |
697 | 691 | ||
698 | dip = ntohl(*((__be32 *) n->primary_key)); | 692 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); |
699 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), | 693 | if (neigh_entry) |
700 | n->dev); | ||
701 | if (neigh_entry) { | ||
702 | WARN_ON(neigh_entry->n != n); | ||
703 | return 0; | 694 | return 0; |
704 | } | ||
705 | 695 | ||
706 | r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); | 696 | r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev); |
707 | if (WARN_ON(!r)) | 697 | if (WARN_ON(!r)) |
708 | return -EINVAL; | 698 | return -EINVAL; |
709 | 699 | ||
710 | neigh_entry = mlxsw_sp_neigh_entry_create(&dip, sizeof(dip), n->dev, | 700 | neigh_entry = mlxsw_sp_neigh_entry_create(n, r->rif); |
711 | r->rif, n); | ||
712 | if (!neigh_entry) | 701 | if (!neigh_entry) |
713 | return -ENOMEM; | 702 | return -ENOMEM; |
714 | err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); | 703 | err = mlxsw_sp_neigh_entry_insert(mlxsw_sp, neigh_entry); |
@@ -727,14 +716,11 @@ void mlxsw_sp_router_neigh_destroy(struct net_device *dev, | |||
727 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); | 716 | struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); |
728 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 717 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
729 | struct mlxsw_sp_neigh_entry *neigh_entry; | 718 | struct mlxsw_sp_neigh_entry *neigh_entry; |
730 | u32 dip; | ||
731 | 719 | ||
732 | if (n->tbl != &arp_tbl) | 720 | if (n->tbl != &arp_tbl) |
733 | return; | 721 | return; |
734 | 722 | ||
735 | dip = ntohl(*((__be32 *) n->primary_key)); | 723 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); |
736 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &dip, sizeof(dip), | ||
737 | n->dev); | ||
738 | if (!neigh_entry) | 724 | if (!neigh_entry) |
739 | return; | 725 | return; |
740 | mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); | 726 | mlxsw_sp_neigh_entry_remove(mlxsw_sp, neigh_entry); |
@@ -817,6 +803,26 @@ static void mlxsw_sp_router_neigh_rec_process(struct mlxsw_sp *mlxsw_sp, | |||
817 | } | 803 | } |
818 | } | 804 | } |
819 | 805 | ||
806 | static bool mlxsw_sp_router_rauhtd_is_full(char *rauhtd_pl) | ||
807 | { | ||
808 | u8 num_rec, last_rec_index, num_entries; | ||
809 | |||
810 | num_rec = mlxsw_reg_rauhtd_num_rec_get(rauhtd_pl); | ||
811 | last_rec_index = num_rec - 1; | ||
812 | |||
813 | if (num_rec < MLXSW_REG_RAUHTD_REC_MAX_NUM) | ||
814 | return false; | ||
815 | if (mlxsw_reg_rauhtd_rec_type_get(rauhtd_pl, last_rec_index) == | ||
816 | MLXSW_REG_RAUHTD_TYPE_IPV6) | ||
817 | return true; | ||
818 | |||
819 | num_entries = mlxsw_reg_rauhtd_ipv4_rec_num_entries_get(rauhtd_pl, | ||
820 | last_rec_index); | ||
821 | if (++num_entries == MLXSW_REG_RAUHTD_IPV4_ENT_PER_REC) | ||
822 | return true; | ||
823 | return false; | ||
824 | } | ||
825 | |||
820 | static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) | 826 | static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) |
821 | { | 827 | { |
822 | char *rauhtd_pl; | 828 | char *rauhtd_pl; |
@@ -843,7 +849,7 @@ static int mlxsw_sp_router_neighs_update_rauhtd(struct mlxsw_sp *mlxsw_sp) | |||
843 | for (i = 0; i < num_rec; i++) | 849 | for (i = 0; i < num_rec; i++) |
844 | mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, | 850 | mlxsw_sp_router_neigh_rec_process(mlxsw_sp, rauhtd_pl, |
845 | i); | 851 | i); |
846 | } while (num_rec); | 852 | } while (mlxsw_sp_router_rauhtd_is_full(rauhtd_pl)); |
847 | rtnl_unlock(); | 853 | rtnl_unlock(); |
848 | 854 | ||
849 | kfree(rauhtd_pl); | 855 | kfree(rauhtd_pl); |
@@ -862,7 +868,7 @@ static void mlxsw_sp_router_neighs_update_nh(struct mlxsw_sp *mlxsw_sp) | |||
862 | * is active regardless of the traffic. | 868 | * is active regardless of the traffic. |
863 | */ | 869 | */ |
864 | if (!list_empty(&neigh_entry->nexthop_list)) | 870 | if (!list_empty(&neigh_entry->nexthop_list)) |
865 | neigh_event_send(neigh_entry->n, NULL); | 871 | neigh_event_send(neigh_entry->key.n, NULL); |
866 | } | 872 | } |
867 | rtnl_unlock(); | 873 | rtnl_unlock(); |
868 | } | 874 | } |
@@ -908,9 +914,9 @@ static void mlxsw_sp_router_probe_unresolved_nexthops(struct work_struct *work) | |||
908 | rtnl_lock(); | 914 | rtnl_lock(); |
909 | list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, | 915 | list_for_each_entry(neigh_entry, &mlxsw_sp->router.nexthop_neighs_list, |
910 | nexthop_neighs_list_node) { | 916 | nexthop_neighs_list_node) { |
911 | if (!(neigh_entry->n->nud_state & NUD_VALID) && | 917 | if (!(neigh_entry->key.n->nud_state & NUD_VALID) && |
912 | !list_empty(&neigh_entry->nexthop_list)) | 918 | !list_empty(&neigh_entry->nexthop_list)) |
913 | neigh_event_send(neigh_entry->n, NULL); | 919 | neigh_event_send(neigh_entry->key.n, NULL); |
914 | } | 920 | } |
915 | rtnl_unlock(); | 921 | rtnl_unlock(); |
916 | 922 | ||
@@ -927,7 +933,7 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work) | |||
927 | { | 933 | { |
928 | struct mlxsw_sp_neigh_entry *neigh_entry = | 934 | struct mlxsw_sp_neigh_entry *neigh_entry = |
929 | container_of(work, struct mlxsw_sp_neigh_entry, dw.work); | 935 | container_of(work, struct mlxsw_sp_neigh_entry, dw.work); |
930 | struct neighbour *n = neigh_entry->n; | 936 | struct neighbour *n = neigh_entry->key.n; |
931 | struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; | 937 | struct mlxsw_sp_port *mlxsw_sp_port = neigh_entry->mlxsw_sp_port; |
932 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 938 | struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
933 | char rauht_pl[MLXSW_REG_RAUHT_LEN]; | 939 | char rauht_pl[MLXSW_REG_RAUHT_LEN]; |
@@ -1030,11 +1036,8 @@ int mlxsw_sp_router_netevent_event(struct notifier_block *unused, | |||
1030 | 1036 | ||
1031 | mlxsw_sp = mlxsw_sp_port->mlxsw_sp; | 1037 | mlxsw_sp = mlxsw_sp_port->mlxsw_sp; |
1032 | dip = ntohl(*((__be32 *) n->primary_key)); | 1038 | dip = ntohl(*((__be32 *) n->primary_key)); |
1033 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, | 1039 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); |
1034 | &dip, | 1040 | if (WARN_ON(!neigh_entry)) { |
1035 | sizeof(__be32), | ||
1036 | dev); | ||
1037 | if (WARN_ON(!neigh_entry) || WARN_ON(neigh_entry->n != n)) { | ||
1038 | mlxsw_sp_port_dev_put(mlxsw_sp_port); | 1041 | mlxsw_sp_port_dev_put(mlxsw_sp_port); |
1039 | return NOTIFY_DONE; | 1042 | return NOTIFY_DONE; |
1040 | } | 1043 | } |
@@ -1343,33 +1346,26 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, | |||
1343 | struct fib_nh *fib_nh) | 1346 | struct fib_nh *fib_nh) |
1344 | { | 1347 | { |
1345 | struct mlxsw_sp_neigh_entry *neigh_entry; | 1348 | struct mlxsw_sp_neigh_entry *neigh_entry; |
1346 | u32 gwip = ntohl(fib_nh->nh_gw); | ||
1347 | struct net_device *dev = fib_nh->nh_dev; | 1349 | struct net_device *dev = fib_nh->nh_dev; |
1348 | struct neighbour *n; | 1350 | struct neighbour *n; |
1349 | u8 nud_state; | 1351 | u8 nud_state; |
1350 | 1352 | ||
1351 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, | 1353 | /* Take a reference of neigh here ensuring that neigh would |
1352 | sizeof(gwip), dev); | 1354 | * not be detructed before the nexthop entry is finished. |
1353 | if (!neigh_entry) { | 1355 | * The reference is taken either in neigh_lookup() or |
1354 | __be32 gwipn = htonl(gwip); | 1356 | * in neith_create() in case n is not found. |
1355 | 1357 | */ | |
1356 | n = neigh_create(&arp_tbl, &gwipn, dev); | 1358 | n = neigh_lookup(&arp_tbl, &fib_nh->nh_gw, dev); |
1359 | if (!n) { | ||
1360 | n = neigh_create(&arp_tbl, &fib_nh->nh_gw, dev); | ||
1357 | if (IS_ERR(n)) | 1361 | if (IS_ERR(n)) |
1358 | return PTR_ERR(n); | 1362 | return PTR_ERR(n); |
1359 | neigh_event_send(n, NULL); | 1363 | neigh_event_send(n, NULL); |
1360 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, &gwip, | 1364 | } |
1361 | sizeof(gwip), dev); | 1365 | neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n); |
1362 | if (!neigh_entry) { | 1366 | if (!neigh_entry) { |
1363 | neigh_release(n); | 1367 | neigh_release(n); |
1364 | return -EINVAL; | 1368 | return -EINVAL; |
1365 | } | ||
1366 | } else { | ||
1367 | /* Take a reference of neigh here ensuring that neigh would | ||
1368 | * not be detructed before the nexthop entry is finished. | ||
1369 | * The second branch takes the reference in neith_create() | ||
1370 | */ | ||
1371 | n = neigh_entry->n; | ||
1372 | neigh_clone(n); | ||
1373 | } | 1369 | } |
1374 | 1370 | ||
1375 | /* If that is the first nexthop connected to that neigh, add to | 1371 | /* If that is the first nexthop connected to that neigh, add to |
@@ -1403,7 +1399,7 @@ static void mlxsw_sp_nexthop_fini(struct mlxsw_sp *mlxsw_sp, | |||
1403 | if (list_empty(&nh->neigh_entry->nexthop_list)) | 1399 | if (list_empty(&nh->neigh_entry->nexthop_list)) |
1404 | list_del(&nh->neigh_entry->nexthop_neighs_list_node); | 1400 | list_del(&nh->neigh_entry->nexthop_neighs_list_node); |
1405 | 1401 | ||
1406 | neigh_release(neigh_entry->n); | 1402 | neigh_release(neigh_entry->key.n); |
1407 | } | 1403 | } |
1408 | 1404 | ||
1409 | static struct mlxsw_sp_nexthop_group * | 1405 | static struct mlxsw_sp_nexthop_group * |
@@ -1463,11 +1459,11 @@ static bool mlxsw_sp_nexthop_match(struct mlxsw_sp_nexthop *nh, | |||
1463 | 1459 | ||
1464 | for (i = 0; i < fi->fib_nhs; i++) { | 1460 | for (i = 0; i < fi->fib_nhs; i++) { |
1465 | struct fib_nh *fib_nh = &fi->fib_nh[i]; | 1461 | struct fib_nh *fib_nh = &fi->fib_nh[i]; |
1466 | u32 gwip = ntohl(fib_nh->nh_gw); | 1462 | struct neighbour *n = nh->neigh_entry->key.n; |
1467 | 1463 | ||
1468 | if (memcmp(nh->neigh_entry->key.addr, | 1464 | if (memcmp(n->primary_key, &fib_nh->nh_gw, |
1469 | &gwip, sizeof(u32)) == 0 && | 1465 | sizeof(fib_nh->nh_gw)) == 0 && |
1470 | nh->neigh_entry->key.dev == fib_nh->nh_dev) | 1466 | n->dev == fib_nh->nh_dev) |
1471 | return true; | 1467 | return true; |
1472 | } | 1468 | } |
1473 | return false; | 1469 | return false; |
@@ -1874,18 +1870,18 @@ static int mlxsw_sp_router_set_abort_trap(struct mlxsw_sp *mlxsw_sp) | |||
1874 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); | 1870 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ralue), ralue_pl); |
1875 | } | 1871 | } |
1876 | 1872 | ||
1877 | static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) | 1873 | static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp) |
1878 | { | 1874 | { |
1879 | struct mlxsw_resources *resources; | 1875 | struct mlxsw_resources *resources; |
1880 | struct mlxsw_sp_fib_entry *fib_entry; | 1876 | struct mlxsw_sp_fib_entry *fib_entry; |
1881 | struct mlxsw_sp_fib_entry *tmp; | 1877 | struct mlxsw_sp_fib_entry *tmp; |
1882 | struct mlxsw_sp_vr *vr; | 1878 | struct mlxsw_sp_vr *vr; |
1883 | int i; | 1879 | int i; |
1884 | int err; | ||
1885 | 1880 | ||
1886 | resources = mlxsw_core_resources_get(mlxsw_sp->core); | 1881 | resources = mlxsw_core_resources_get(mlxsw_sp->core); |
1887 | for (i = 0; i < resources->max_virtual_routers; i++) { | 1882 | for (i = 0; i < resources->max_virtual_routers; i++) { |
1888 | vr = &mlxsw_sp->router.vrs[i]; | 1883 | vr = &mlxsw_sp->router.vrs[i]; |
1884 | |||
1889 | if (!vr->used) | 1885 | if (!vr->used) |
1890 | continue; | 1886 | continue; |
1891 | 1887 | ||
@@ -1901,6 +1897,13 @@ static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) | |||
1901 | break; | 1897 | break; |
1902 | } | 1898 | } |
1903 | } | 1899 | } |
1900 | } | ||
1901 | |||
1902 | static void mlxsw_sp_router_fib4_abort(struct mlxsw_sp *mlxsw_sp) | ||
1903 | { | ||
1904 | int err; | ||
1905 | |||
1906 | mlxsw_sp_router_fib_flush(mlxsw_sp); | ||
1904 | mlxsw_sp->router.aborted = true; | 1907 | mlxsw_sp->router.aborted = true; |
1905 | err = mlxsw_sp_router_set_abort_trap(mlxsw_sp); | 1908 | err = mlxsw_sp_router_set_abort_trap(mlxsw_sp); |
1906 | if (err) | 1909 | if (err) |
@@ -1958,6 +1961,9 @@ static int mlxsw_sp_router_fib_event(struct notifier_block *nb, | |||
1958 | struct fib_entry_notifier_info *fen_info = ptr; | 1961 | struct fib_entry_notifier_info *fen_info = ptr; |
1959 | int err; | 1962 | int err; |
1960 | 1963 | ||
1964 | if (!net_eq(fen_info->info.net, &init_net)) | ||
1965 | return NOTIFY_DONE; | ||
1966 | |||
1961 | switch (event) { | 1967 | switch (event) { |
1962 | case FIB_EVENT_ENTRY_ADD: | 1968 | case FIB_EVENT_ENTRY_ADD: |
1963 | err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info); | 1969 | err = mlxsw_sp_router_fib4_add(mlxsw_sp, fen_info); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 5e00c79e8133..1e2c8eca3af1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c | |||
@@ -929,12 +929,12 @@ static int mlxsw_sp_port_smid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mid, | |||
929 | 929 | ||
930 | static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, | 930 | static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, |
931 | const unsigned char *addr, | 931 | const unsigned char *addr, |
932 | u16 vid) | 932 | u16 fid) |
933 | { | 933 | { |
934 | struct mlxsw_sp_mid *mid; | 934 | struct mlxsw_sp_mid *mid; |
935 | 935 | ||
936 | list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { | 936 | list_for_each_entry(mid, &mlxsw_sp->br_mids.list, list) { |
937 | if (ether_addr_equal(mid->addr, addr) && mid->vid == vid) | 937 | if (ether_addr_equal(mid->addr, addr) && mid->fid == fid) |
938 | return mid; | 938 | return mid; |
939 | } | 939 | } |
940 | return NULL; | 940 | return NULL; |
@@ -942,7 +942,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_get(struct mlxsw_sp *mlxsw_sp, | |||
942 | 942 | ||
943 | static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, | 943 | static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, |
944 | const unsigned char *addr, | 944 | const unsigned char *addr, |
945 | u16 vid) | 945 | u16 fid) |
946 | { | 946 | { |
947 | struct mlxsw_sp_mid *mid; | 947 | struct mlxsw_sp_mid *mid; |
948 | u16 mid_idx; | 948 | u16 mid_idx; |
@@ -958,7 +958,7 @@ static struct mlxsw_sp_mid *__mlxsw_sp_mc_alloc(struct mlxsw_sp *mlxsw_sp, | |||
958 | 958 | ||
959 | set_bit(mid_idx, mlxsw_sp->br_mids.mapped); | 959 | set_bit(mid_idx, mlxsw_sp->br_mids.mapped); |
960 | ether_addr_copy(mid->addr, addr); | 960 | ether_addr_copy(mid->addr, addr); |
961 | mid->vid = vid; | 961 | mid->fid = fid; |
962 | mid->mid = mid_idx; | 962 | mid->mid = mid_idx; |
963 | mid->ref_count = 0; | 963 | mid->ref_count = 0; |
964 | list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); | 964 | list_add_tail(&mid->list, &mlxsw_sp->br_mids.list); |
@@ -991,9 +991,9 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port, | |||
991 | if (switchdev_trans_ph_prepare(trans)) | 991 | if (switchdev_trans_ph_prepare(trans)) |
992 | return 0; | 992 | return 0; |
993 | 993 | ||
994 | mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); | 994 | mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); |
995 | if (!mid) { | 995 | if (!mid) { |
996 | mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, mdb->vid); | 996 | mid = __mlxsw_sp_mc_alloc(mlxsw_sp, mdb->addr, fid); |
997 | if (!mid) { | 997 | if (!mid) { |
998 | netdev_err(dev, "Unable to allocate MC group\n"); | 998 | netdev_err(dev, "Unable to allocate MC group\n"); |
999 | return -ENOMEM; | 999 | return -ENOMEM; |
@@ -1137,7 +1137,7 @@ static int mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port, | |||
1137 | u16 mid_idx; | 1137 | u16 mid_idx; |
1138 | int err = 0; | 1138 | int err = 0; |
1139 | 1139 | ||
1140 | mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, mdb->vid); | 1140 | mid = __mlxsw_sp_mc_get(mlxsw_sp, mdb->addr, fid); |
1141 | if (!mid) { | 1141 | if (!mid) { |
1142 | netdev_err(dev, "Unable to remove port from MC DB\n"); | 1142 | netdev_err(dev, "Unable to remove port from MC DB\n"); |
1143 | return -EINVAL; | 1143 | return -EINVAL; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index 72eee29c677f..2777d5bb4380 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h | |||
@@ -727,9 +727,6 @@ struct core_tx_bd_flags { | |||
727 | #define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 | 727 | #define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 |
728 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 | 728 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 |
729 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 | 729 | #define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 |
730 | #define CORE_TX_BD_FLAGS_ROCE_FLAV_MASK 0x1 | ||
731 | #define CORE_TX_BD_FLAGS_ROCE_FLAV_SHIFT 12 | ||
732 | |||
733 | }; | 730 | }; |
734 | 731 | ||
735 | struct core_tx_bd { | 732 | struct core_tx_bd { |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index 63e1a1b0ef8e..f95385cbbd40 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c | |||
@@ -1119,6 +1119,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn, | |||
1119 | start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << | 1119 | start_bd->bd_flags.as_bitfield |= CORE_TX_BD_FLAGS_START_BD_MASK << |
1120 | CORE_TX_BD_FLAGS_START_BD_SHIFT; | 1120 | CORE_TX_BD_FLAGS_START_BD_SHIFT; |
1121 | SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); | 1121 | SET_FIELD(start_bd->bitfield0, CORE_TX_BD_NBDS, num_of_bds); |
1122 | SET_FIELD(start_bd->bitfield0, CORE_TX_BD_ROCE_FLAV, type); | ||
1122 | DMA_REGPAIR_LE(start_bd->addr, first_frag); | 1123 | DMA_REGPAIR_LE(start_bd->addr, first_frag); |
1123 | start_bd->nbytes = cpu_to_le16(first_frag_len); | 1124 | start_bd->nbytes = cpu_to_le16(first_frag_len); |
1124 | 1125 | ||
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index c418360ba02a..333c7442e48a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c | |||
@@ -839,20 +839,19 @@ static void qed_update_pf_params(struct qed_dev *cdev, | |||
839 | { | 839 | { |
840 | int i; | 840 | int i; |
841 | 841 | ||
842 | if (IS_ENABLED(CONFIG_QED_RDMA)) { | ||
843 | params->rdma_pf_params.num_qps = QED_ROCE_QPS; | ||
844 | params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; | ||
845 | /* divide by 3 the MRs to avoid MF ILT overflow */ | ||
846 | params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; | ||
847 | params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; | ||
848 | } | ||
849 | |||
842 | for (i = 0; i < cdev->num_hwfns; i++) { | 850 | for (i = 0; i < cdev->num_hwfns; i++) { |
843 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; | 851 | struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; |
844 | 852 | ||
845 | p_hwfn->pf_params = *params; | 853 | p_hwfn->pf_params = *params; |
846 | } | 854 | } |
847 | |||
848 | if (!IS_ENABLED(CONFIG_QED_RDMA)) | ||
849 | return; | ||
850 | |||
851 | params->rdma_pf_params.num_qps = QED_ROCE_QPS; | ||
852 | params->rdma_pf_params.min_dpis = QED_ROCE_DPIS; | ||
853 | /* divide by 3 the MRs to avoid MF ILT overflow */ | ||
854 | params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS; | ||
855 | params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX; | ||
856 | } | 855 | } |
857 | 856 | ||
858 | static int qed_slowpath_start(struct qed_dev *cdev, | 857 | static int qed_slowpath_start(struct qed_dev *cdev, |
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 12251a1032d1..7567cc464b88 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c | |||
@@ -175,16 +175,23 @@ static void qede_get_strings_stats(struct qede_dev *edev, u8 *buf) | |||
175 | for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { | 175 | for (i = 0, k = 0; i < QEDE_QUEUE_CNT(edev); i++) { |
176 | int tc; | 176 | int tc; |
177 | 177 | ||
178 | for (j = 0; j < QEDE_NUM_RQSTATS; j++) | 178 | if (edev->fp_array[i].type & QEDE_FASTPATH_RX) { |
179 | sprintf(buf + (k + j) * ETH_GSTRING_LEN, | 179 | for (j = 0; j < QEDE_NUM_RQSTATS; j++) |
180 | "%d: %s", i, qede_rqstats_arr[j].string); | ||
181 | k += QEDE_NUM_RQSTATS; | ||
182 | for (tc = 0; tc < edev->num_tc; tc++) { | ||
183 | for (j = 0; j < QEDE_NUM_TQSTATS; j++) | ||
184 | sprintf(buf + (k + j) * ETH_GSTRING_LEN, | 180 | sprintf(buf + (k + j) * ETH_GSTRING_LEN, |
185 | "%d.%d: %s", i, tc, | 181 | "%d: %s", i, |
186 | qede_tqstats_arr[j].string); | 182 | qede_rqstats_arr[j].string); |
187 | k += QEDE_NUM_TQSTATS; | 183 | k += QEDE_NUM_RQSTATS; |
184 | } | ||
185 | |||
186 | if (edev->fp_array[i].type & QEDE_FASTPATH_TX) { | ||
187 | for (tc = 0; tc < edev->num_tc; tc++) { | ||
188 | for (j = 0; j < QEDE_NUM_TQSTATS; j++) | ||
189 | sprintf(buf + (k + j) * | ||
190 | ETH_GSTRING_LEN, | ||
191 | "%d.%d: %s", i, tc, | ||
192 | qede_tqstats_arr[j].string); | ||
193 | k += QEDE_NUM_TQSTATS; | ||
194 | } | ||
188 | } | 195 | } |
189 | } | 196 | } |
190 | 197 | ||
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 7def29aaf65c..85f46dbecd5b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c | |||
@@ -2839,7 +2839,7 @@ static int qede_alloc_sge_mem(struct qede_dev *edev, struct qede_rx_queue *rxq) | |||
2839 | } | 2839 | } |
2840 | 2840 | ||
2841 | mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, | 2841 | mapping = dma_map_page(&edev->pdev->dev, replace_buf->data, 0, |
2842 | rxq->rx_buf_size, DMA_FROM_DEVICE); | 2842 | PAGE_SIZE, DMA_FROM_DEVICE); |
2843 | if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { | 2843 | if (unlikely(dma_mapping_error(&edev->pdev->dev, mapping))) { |
2844 | DP_NOTICE(edev, | 2844 | DP_NOTICE(edev, |
2845 | "Failed to map TPA replacement buffer\n"); | 2845 | "Failed to map TPA replacement buffer\n"); |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c index 6fb3bee904d3..0b4deb31e742 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c | |||
@@ -575,10 +575,11 @@ void emac_mac_start(struct emac_adapter *adpt) | |||
575 | 575 | ||
576 | mac |= TXEN | RXEN; /* enable RX/TX */ | 576 | mac |= TXEN | RXEN; /* enable RX/TX */ |
577 | 577 | ||
578 | /* We don't have ethtool support yet, so force flow-control mode | 578 | /* Configure MAC flow control to match the PHY's settings. */ |
579 | * to 'full' always. | 579 | if (phydev->pause) |
580 | */ | 580 | mac |= RXFC; |
581 | mac |= TXFC | RXFC; | 581 | if (phydev->pause != phydev->asym_pause) |
582 | mac |= TXFC; | ||
582 | 583 | ||
583 | /* setup link speed */ | 584 | /* setup link speed */ |
584 | mac &= ~SPEED_MASK; | 585 | mac &= ~SPEED_MASK; |
@@ -1003,6 +1004,12 @@ int emac_mac_up(struct emac_adapter *adpt) | |||
1003 | writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); | 1004 | writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); |
1004 | writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); | 1005 | writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); |
1005 | 1006 | ||
1007 | /* Enable pause frames. Without this feature, the EMAC has been shown | ||
1008 | * to receive (and drop) frames with FCS errors at gigabit connections. | ||
1009 | */ | ||
1010 | adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; | ||
1011 | adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; | ||
1012 | |||
1006 | adpt->phydev->irq = PHY_IGNORE_INTERRUPT; | 1013 | adpt->phydev->irq = PHY_IGNORE_INTERRUPT; |
1007 | phy_start(adpt->phydev); | 1014 | phy_start(adpt->phydev); |
1008 | 1015 | ||
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c index 75c1b530e39e..72fe343c7a36 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c | |||
@@ -421,7 +421,7 @@ static const struct emac_reg_write sgmii_v2_laned[] = { | |||
421 | /* CDR Settings */ | 421 | /* CDR Settings */ |
422 | {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0, | 422 | {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0, |
423 | UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)}, | 423 | UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)}, |
424 | {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)}, | 424 | {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(0)}, |
425 | {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)}, | 425 | {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)}, |
426 | 426 | ||
427 | /* TX/RX Settings */ | 427 | /* TX/RX Settings */ |
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 3cf3557106c2..6b89e4a7b164 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c | |||
@@ -485,6 +485,9 @@ efx_copy_channel(const struct efx_channel *old_channel) | |||
485 | *channel = *old_channel; | 485 | *channel = *old_channel; |
486 | 486 | ||
487 | channel->napi_dev = NULL; | 487 | channel->napi_dev = NULL; |
488 | INIT_HLIST_NODE(&channel->napi_str.napi_hash_node); | ||
489 | channel->napi_str.napi_id = 0; | ||
490 | channel->napi_str.state = 0; | ||
488 | memset(&channel->eventq, 0, sizeof(channel->eventq)); | 491 | memset(&channel->eventq, 0, sizeof(channel->eventq)); |
489 | 492 | ||
490 | for (j = 0; j < EFX_TXQ_TYPES; j++) { | 493 | for (j = 0; j < EFX_TXQ_TYPES; j++) { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 3818c5e06eba..4b78168a5f3c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig | |||
@@ -107,7 +107,7 @@ config DWMAC_STI | |||
107 | config DWMAC_STM32 | 107 | config DWMAC_STM32 |
108 | tristate "STM32 DWMAC support" | 108 | tristate "STM32 DWMAC support" |
109 | default ARCH_STM32 | 109 | default ARCH_STM32 |
110 | depends on OF && HAS_IOMEM | 110 | depends on OF && HAS_IOMEM && (ARCH_STM32 || COMPILE_TEST) |
111 | select MFD_SYSCON | 111 | select MFD_SYSCON |
112 | ---help--- | 112 | ---help--- |
113 | Support for ethernet controller on STM32 SOCs. | 113 | Support for ethernet controller on STM32 SOCs. |
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c index 2920e2ee3864..489ef146201e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c | |||
@@ -63,8 +63,8 @@ | |||
63 | #define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40 | 63 | #define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40 |
64 | #define TSE_PCS_SGMII_LINK_TIMER_1 0x0003 | 64 | #define TSE_PCS_SGMII_LINK_TIMER_1 0x0003 |
65 | #define TSE_PCS_SW_RESET_TIMEOUT 100 | 65 | #define TSE_PCS_SW_RESET_TIMEOUT 100 |
66 | #define TSE_PCS_USE_SGMII_AN_MASK BIT(2) | 66 | #define TSE_PCS_USE_SGMII_AN_MASK BIT(1) |
67 | #define TSE_PCS_USE_SGMII_ENA BIT(1) | 67 | #define TSE_PCS_USE_SGMII_ENA BIT(0) |
68 | 68 | ||
69 | #define SGMII_ADAPTER_CTRL_REG 0x00 | 69 | #define SGMII_ADAPTER_CTRL_REG 0x00 |
70 | #define SGMII_ADAPTER_DISABLE 0x0001 | 70 | #define SGMII_ADAPTER_DISABLE 0x0001 |
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index d3292c4a6eda..6d2de4e01f6d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h | |||
@@ -120,14 +120,17 @@ struct stmmac_extra_stats { | |||
120 | unsigned long ip_csum_bypassed; | 120 | unsigned long ip_csum_bypassed; |
121 | unsigned long ipv4_pkt_rcvd; | 121 | unsigned long ipv4_pkt_rcvd; |
122 | unsigned long ipv6_pkt_rcvd; | 122 | unsigned long ipv6_pkt_rcvd; |
123 | unsigned long rx_msg_type_ext_no_ptp; | 123 | unsigned long no_ptp_rx_msg_type_ext; |
124 | unsigned long rx_msg_type_sync; | 124 | unsigned long ptp_rx_msg_type_sync; |
125 | unsigned long rx_msg_type_follow_up; | 125 | unsigned long ptp_rx_msg_type_follow_up; |
126 | unsigned long rx_msg_type_delay_req; | 126 | unsigned long ptp_rx_msg_type_delay_req; |
127 | unsigned long rx_msg_type_delay_resp; | 127 | unsigned long ptp_rx_msg_type_delay_resp; |
128 | unsigned long rx_msg_type_pdelay_req; | 128 | unsigned long ptp_rx_msg_type_pdelay_req; |
129 | unsigned long rx_msg_type_pdelay_resp; | 129 | unsigned long ptp_rx_msg_type_pdelay_resp; |
130 | unsigned long rx_msg_type_pdelay_follow_up; | 130 | unsigned long ptp_rx_msg_type_pdelay_follow_up; |
131 | unsigned long ptp_rx_msg_type_announce; | ||
132 | unsigned long ptp_rx_msg_type_management; | ||
133 | unsigned long ptp_rx_msg_pkt_reserved_type; | ||
131 | unsigned long ptp_frame_type; | 134 | unsigned long ptp_frame_type; |
132 | unsigned long ptp_ver; | 135 | unsigned long ptp_ver; |
133 | unsigned long timestamp_dropped; | 136 | unsigned long timestamp_dropped; |
@@ -482,11 +485,12 @@ struct stmmac_ops { | |||
482 | /* PTP and HW Timer helpers */ | 485 | /* PTP and HW Timer helpers */ |
483 | struct stmmac_hwtimestamp { | 486 | struct stmmac_hwtimestamp { |
484 | void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); | 487 | void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data); |
485 | u32 (*config_sub_second_increment) (void __iomem *ioaddr, u32 clk_rate); | 488 | u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock, |
489 | int gmac4); | ||
486 | int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); | 490 | int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec); |
487 | int (*config_addend) (void __iomem *ioaddr, u32 addend); | 491 | int (*config_addend) (void __iomem *ioaddr, u32 addend); |
488 | int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, | 492 | int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec, |
489 | int add_sub); | 493 | int add_sub, int gmac4); |
490 | u64(*get_systime) (void __iomem *ioaddr); | 494 | u64(*get_systime) (void __iomem *ioaddr); |
491 | }; | 495 | }; |
492 | 496 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h index 2e4c171a2b41..e3c86d422109 100644 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h | |||
@@ -155,14 +155,18 @@ | |||
155 | #define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26) | 155 | #define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26) |
156 | 156 | ||
157 | /* Extended RDES4 message type definitions */ | 157 | /* Extended RDES4 message type definitions */ |
158 | #define RDES_EXT_NO_PTP 0 | 158 | #define RDES_EXT_NO_PTP 0x0 |
159 | #define RDES_EXT_SYNC 1 | 159 | #define RDES_EXT_SYNC 0x1 |
160 | #define RDES_EXT_FOLLOW_UP 2 | 160 | #define RDES_EXT_FOLLOW_UP 0x2 |
161 | #define RDES_EXT_DELAY_REQ 3 | 161 | #define RDES_EXT_DELAY_REQ 0x3 |
162 | #define RDES_EXT_DELAY_RESP 4 | 162 | #define RDES_EXT_DELAY_RESP 0x4 |
163 | #define RDES_EXT_PDELAY_REQ 5 | 163 | #define RDES_EXT_PDELAY_REQ 0x5 |
164 | #define RDES_EXT_PDELAY_RESP 6 | 164 | #define RDES_EXT_PDELAY_RESP 0x6 |
165 | #define RDES_EXT_PDELAY_FOLLOW_UP 7 | 165 | #define RDES_EXT_PDELAY_FOLLOW_UP 0x7 |
166 | #define RDES_PTP_ANNOUNCE 0x8 | ||
167 | #define RDES_PTP_MANAGEMENT 0x9 | ||
168 | #define RDES_PTP_SIGNALING 0xa | ||
169 | #define RDES_PTP_PKT_RESERVED_TYPE 0xf | ||
166 | 170 | ||
167 | /* Basic descriptor structure for normal and alternate descriptors */ | 171 | /* Basic descriptor structure for normal and alternate descriptors */ |
168 | struct dma_desc { | 172 | struct dma_desc { |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index a1b17cd7886b..a601f8d43b75 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | |||
@@ -123,22 +123,29 @@ static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x, | |||
123 | x->ipv4_pkt_rcvd++; | 123 | x->ipv4_pkt_rcvd++; |
124 | if (rdes1 & RDES1_IPV6_HEADER) | 124 | if (rdes1 & RDES1_IPV6_HEADER) |
125 | x->ipv6_pkt_rcvd++; | 125 | x->ipv6_pkt_rcvd++; |
126 | if (message_type == RDES_EXT_SYNC) | 126 | |
127 | x->rx_msg_type_sync++; | 127 | if (message_type == RDES_EXT_NO_PTP) |
128 | x->no_ptp_rx_msg_type_ext++; | ||
129 | else if (message_type == RDES_EXT_SYNC) | ||
130 | x->ptp_rx_msg_type_sync++; | ||
128 | else if (message_type == RDES_EXT_FOLLOW_UP) | 131 | else if (message_type == RDES_EXT_FOLLOW_UP) |
129 | x->rx_msg_type_follow_up++; | 132 | x->ptp_rx_msg_type_follow_up++; |
130 | else if (message_type == RDES_EXT_DELAY_REQ) | 133 | else if (message_type == RDES_EXT_DELAY_REQ) |
131 | x->rx_msg_type_delay_req++; | 134 | x->ptp_rx_msg_type_delay_req++; |
132 | else if (message_type == RDES_EXT_DELAY_RESP) | 135 | else if (message_type == RDES_EXT_DELAY_RESP) |
133 | x->rx_msg_type_delay_resp++; | 136 | x->ptp_rx_msg_type_delay_resp++; |
134 | else if (message_type == RDES_EXT_PDELAY_REQ) | 137 | else if (message_type == RDES_EXT_PDELAY_REQ) |
135 | x->rx_msg_type_pdelay_req++; | 138 | x->ptp_rx_msg_type_pdelay_req++; |
136 | else if (message_type == RDES_EXT_PDELAY_RESP) | 139 | else if (message_type == RDES_EXT_PDELAY_RESP) |
137 | x->rx_msg_type_pdelay_resp++; | 140 | x->ptp_rx_msg_type_pdelay_resp++; |
138 | else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) | 141 | else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) |
139 | x->rx_msg_type_pdelay_follow_up++; | 142 | x->ptp_rx_msg_type_pdelay_follow_up++; |
140 | else | 143 | else if (message_type == RDES_PTP_ANNOUNCE) |
141 | x->rx_msg_type_ext_no_ptp++; | 144 | x->ptp_rx_msg_type_announce++; |
145 | else if (message_type == RDES_PTP_MANAGEMENT) | ||
146 | x->ptp_rx_msg_type_management++; | ||
147 | else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) | ||
148 | x->ptp_rx_msg_pkt_reserved_type++; | ||
142 | 149 | ||
143 | if (rdes1 & RDES1_PTP_PACKET_TYPE) | 150 | if (rdes1 & RDES1_PTP_PACKET_TYPE) |
144 | x->ptp_frame_type++; | 151 | x->ptp_frame_type++; |
@@ -204,14 +211,18 @@ static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p) | |||
204 | 211 | ||
205 | static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) | 212 | static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p) |
206 | { | 213 | { |
207 | return (p->des3 & TDES3_TIMESTAMP_STATUS) | 214 | /* Context type from W/B descriptor must be zero */ |
208 | >> TDES3_TIMESTAMP_STATUS_SHIFT; | 215 | if (p->des3 & TDES3_CONTEXT_TYPE) |
216 | return -EINVAL; | ||
217 | |||
218 | /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */ | ||
219 | if (p->des3 & TDES3_TIMESTAMP_STATUS) | ||
220 | return 0; | ||
221 | |||
222 | return 1; | ||
209 | } | 223 | } |
210 | 224 | ||
211 | /* NOTE: For RX CTX bit has to be checked before | 225 | static inline u64 dwmac4_get_timestamp(void *desc, u32 ats) |
212 | * HAVE a specific function for TX and another one for RX | ||
213 | */ | ||
214 | static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats) | ||
215 | { | 226 | { |
216 | struct dma_desc *p = (struct dma_desc *)desc; | 227 | struct dma_desc *p = (struct dma_desc *)desc; |
217 | u64 ns; | 228 | u64 ns; |
@@ -223,12 +234,54 @@ static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats) | |||
223 | return ns; | 234 | return ns; |
224 | } | 235 | } |
225 | 236 | ||
226 | static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats) | 237 | static int dwmac4_rx_check_timestamp(void *desc) |
238 | { | ||
239 | struct dma_desc *p = (struct dma_desc *)desc; | ||
240 | u32 own, ctxt; | ||
241 | int ret = 1; | ||
242 | |||
243 | own = p->des3 & RDES3_OWN; | ||
244 | ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR) | ||
245 | >> RDES3_CONTEXT_DESCRIPTOR_SHIFT); | ||
246 | |||
247 | if (likely(!own && ctxt)) { | ||
248 | if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff)) | ||
249 | /* Corrupted value */ | ||
250 | ret = -EINVAL; | ||
251 | else | ||
252 | /* A valid Timestamp is ready to be read */ | ||
253 | ret = 0; | ||
254 | } | ||
255 | |||
256 | /* Timestamp not ready */ | ||
257 | return ret; | ||
258 | } | ||
259 | |||
260 | static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) | ||
227 | { | 261 | { |
228 | struct dma_desc *p = (struct dma_desc *)desc; | 262 | struct dma_desc *p = (struct dma_desc *)desc; |
263 | int ret = -EINVAL; | ||
264 | |||
265 | /* Get the status from normal w/b descriptor */ | ||
266 | if (likely(p->des3 & TDES3_RS1V)) { | ||
267 | if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) { | ||
268 | int i = 0; | ||
269 | |||
270 | /* Check if timestamp is OK from context descriptor */ | ||
271 | do { | ||
272 | ret = dwmac4_rx_check_timestamp(desc); | ||
273 | if (ret < 0) | ||
274 | goto exit; | ||
275 | i++; | ||
229 | 276 | ||
230 | return (p->des1 & RDES1_TIMESTAMP_AVAILABLE) | 277 | } while ((ret == 1) || (i < 10)); |
231 | >> RDES1_TIMESTAMP_AVAILABLE_SHIFT; | 278 | |
279 | if (i == 10) | ||
280 | ret = -EBUSY; | ||
281 | } | ||
282 | } | ||
283 | exit: | ||
284 | return ret; | ||
232 | } | 285 | } |
233 | 286 | ||
234 | static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, | 287 | static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic, |
@@ -373,8 +426,8 @@ const struct stmmac_desc_ops dwmac4_desc_ops = { | |||
373 | .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, | 426 | .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len, |
374 | .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, | 427 | .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp, |
375 | .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, | 428 | .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status, |
376 | .get_timestamp = dwmac4_wrback_get_timestamp, | 429 | .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status, |
377 | .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status, | 430 | .get_timestamp = dwmac4_get_timestamp, |
378 | .set_tx_ic = dwmac4_rd_set_tx_ic, | 431 | .set_tx_ic = dwmac4_rd_set_tx_ic, |
379 | .prepare_tx_desc = dwmac4_rd_prepare_tx_desc, | 432 | .prepare_tx_desc = dwmac4_rd_prepare_tx_desc, |
380 | .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc, | 433 | .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc, |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h index 0902a2edeaa9..9736c505211a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h | |||
@@ -59,10 +59,13 @@ | |||
59 | #define TDES3_CTXT_TCMSSV BIT(26) | 59 | #define TDES3_CTXT_TCMSSV BIT(26) |
60 | 60 | ||
61 | /* TDES3 Common */ | 61 | /* TDES3 Common */ |
62 | #define TDES3_RS1V BIT(26) | ||
63 | #define TDES3_RS1V_SHIFT 26 | ||
62 | #define TDES3_LAST_DESCRIPTOR BIT(28) | 64 | #define TDES3_LAST_DESCRIPTOR BIT(28) |
63 | #define TDES3_LAST_DESCRIPTOR_SHIFT 28 | 65 | #define TDES3_LAST_DESCRIPTOR_SHIFT 28 |
64 | #define TDES3_FIRST_DESCRIPTOR BIT(29) | 66 | #define TDES3_FIRST_DESCRIPTOR BIT(29) |
65 | #define TDES3_CONTEXT_TYPE BIT(30) | 67 | #define TDES3_CONTEXT_TYPE BIT(30) |
68 | #define TDES3_CONTEXT_TYPE_SHIFT 30 | ||
66 | 69 | ||
67 | /* TDS3 use for both format (read and write back) */ | 70 | /* TDS3 use for both format (read and write back) */ |
68 | #define TDES3_OWN BIT(31) | 71 | #define TDES3_OWN BIT(31) |
@@ -117,6 +120,7 @@ | |||
117 | #define RDES3_LAST_DESCRIPTOR BIT(28) | 120 | #define RDES3_LAST_DESCRIPTOR BIT(28) |
118 | #define RDES3_FIRST_DESCRIPTOR BIT(29) | 121 | #define RDES3_FIRST_DESCRIPTOR BIT(29) |
119 | #define RDES3_CONTEXT_DESCRIPTOR BIT(30) | 122 | #define RDES3_CONTEXT_DESCRIPTOR BIT(30) |
123 | #define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30 | ||
120 | 124 | ||
121 | /* RDES3 (read format) */ | 125 | /* RDES3 (read format) */ |
122 | #define RDES3_BUFFER1_VALID_ADDR BIT(24) | 126 | #define RDES3_BUFFER1_VALID_ADDR BIT(24) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c index 38f19c99cf59..e75549327c34 100644 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c | |||
@@ -150,22 +150,30 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, | |||
150 | x->ipv4_pkt_rcvd++; | 150 | x->ipv4_pkt_rcvd++; |
151 | if (rdes4 & ERDES4_IPV6_PKT_RCVD) | 151 | if (rdes4 & ERDES4_IPV6_PKT_RCVD) |
152 | x->ipv6_pkt_rcvd++; | 152 | x->ipv6_pkt_rcvd++; |
153 | if (message_type == RDES_EXT_SYNC) | 153 | |
154 | x->rx_msg_type_sync++; | 154 | if (message_type == RDES_EXT_NO_PTP) |
155 | x->no_ptp_rx_msg_type_ext++; | ||
156 | else if (message_type == RDES_EXT_SYNC) | ||
157 | x->ptp_rx_msg_type_sync++; | ||
155 | else if (message_type == RDES_EXT_FOLLOW_UP) | 158 | else if (message_type == RDES_EXT_FOLLOW_UP) |
156 | x->rx_msg_type_follow_up++; | 159 | x->ptp_rx_msg_type_follow_up++; |
157 | else if (message_type == RDES_EXT_DELAY_REQ) | 160 | else if (message_type == RDES_EXT_DELAY_REQ) |
158 | x->rx_msg_type_delay_req++; | 161 | x->ptp_rx_msg_type_delay_req++; |
159 | else if (message_type == RDES_EXT_DELAY_RESP) | 162 | else if (message_type == RDES_EXT_DELAY_RESP) |
160 | x->rx_msg_type_delay_resp++; | 163 | x->ptp_rx_msg_type_delay_resp++; |
161 | else if (message_type == RDES_EXT_PDELAY_REQ) | 164 | else if (message_type == RDES_EXT_PDELAY_REQ) |
162 | x->rx_msg_type_pdelay_req++; | 165 | x->ptp_rx_msg_type_pdelay_req++; |
163 | else if (message_type == RDES_EXT_PDELAY_RESP) | 166 | else if (message_type == RDES_EXT_PDELAY_RESP) |
164 | x->rx_msg_type_pdelay_resp++; | 167 | x->ptp_rx_msg_type_pdelay_resp++; |
165 | else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) | 168 | else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP) |
166 | x->rx_msg_type_pdelay_follow_up++; | 169 | x->ptp_rx_msg_type_pdelay_follow_up++; |
167 | else | 170 | else if (message_type == RDES_PTP_ANNOUNCE) |
168 | x->rx_msg_type_ext_no_ptp++; | 171 | x->ptp_rx_msg_type_announce++; |
172 | else if (message_type == RDES_PTP_MANAGEMENT) | ||
173 | x->ptp_rx_msg_type_management++; | ||
174 | else if (message_type == RDES_PTP_PKT_RESERVED_TYPE) | ||
175 | x->ptp_rx_msg_pkt_reserved_type++; | ||
176 | |||
169 | if (rdes4 & ERDES4_PTP_FRAME_TYPE) | 177 | if (rdes4 & ERDES4_PTP_FRAME_TYPE) |
170 | x->ptp_frame_type++; | 178 | x->ptp_frame_type++; |
171 | if (rdes4 & ERDES4_PTP_VER) | 179 | if (rdes4 & ERDES4_PTP_VER) |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index b15fc55f1b96..4d2a759b8465 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h | |||
@@ -129,6 +129,7 @@ struct stmmac_priv { | |||
129 | int irq_wake; | 129 | int irq_wake; |
130 | spinlock_t ptp_lock; | 130 | spinlock_t ptp_lock; |
131 | void __iomem *mmcaddr; | 131 | void __iomem *mmcaddr; |
132 | void __iomem *ptpaddr; | ||
132 | u32 rx_tail_addr; | 133 | u32 rx_tail_addr; |
133 | u32 tx_tail_addr; | 134 | u32 tx_tail_addr; |
134 | u32 mss; | 135 | u32 mss; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 1e06173fc9d7..c5d0142adda2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | |||
@@ -115,14 +115,17 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = { | |||
115 | STMMAC_STAT(ip_csum_bypassed), | 115 | STMMAC_STAT(ip_csum_bypassed), |
116 | STMMAC_STAT(ipv4_pkt_rcvd), | 116 | STMMAC_STAT(ipv4_pkt_rcvd), |
117 | STMMAC_STAT(ipv6_pkt_rcvd), | 117 | STMMAC_STAT(ipv6_pkt_rcvd), |
118 | STMMAC_STAT(rx_msg_type_ext_no_ptp), | 118 | STMMAC_STAT(no_ptp_rx_msg_type_ext), |
119 | STMMAC_STAT(rx_msg_type_sync), | 119 | STMMAC_STAT(ptp_rx_msg_type_sync), |
120 | STMMAC_STAT(rx_msg_type_follow_up), | 120 | STMMAC_STAT(ptp_rx_msg_type_follow_up), |
121 | STMMAC_STAT(rx_msg_type_delay_req), | 121 | STMMAC_STAT(ptp_rx_msg_type_delay_req), |
122 | STMMAC_STAT(rx_msg_type_delay_resp), | 122 | STMMAC_STAT(ptp_rx_msg_type_delay_resp), |
123 | STMMAC_STAT(rx_msg_type_pdelay_req), | 123 | STMMAC_STAT(ptp_rx_msg_type_pdelay_req), |
124 | STMMAC_STAT(rx_msg_type_pdelay_resp), | 124 | STMMAC_STAT(ptp_rx_msg_type_pdelay_resp), |
125 | STMMAC_STAT(rx_msg_type_pdelay_follow_up), | 125 | STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up), |
126 | STMMAC_STAT(ptp_rx_msg_type_announce), | ||
127 | STMMAC_STAT(ptp_rx_msg_type_management), | ||
128 | STMMAC_STAT(ptp_rx_msg_pkt_reserved_type), | ||
126 | STMMAC_STAT(ptp_frame_type), | 129 | STMMAC_STAT(ptp_frame_type), |
127 | STMMAC_STAT(ptp_ver), | 130 | STMMAC_STAT(ptp_ver), |
128 | STMMAC_STAT(timestamp_dropped), | 131 | STMMAC_STAT(timestamp_dropped), |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c index a77f68918010..10d6059b2f26 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c | |||
@@ -34,21 +34,29 @@ static void stmmac_config_hw_tstamping(void __iomem *ioaddr, u32 data) | |||
34 | } | 34 | } |
35 | 35 | ||
36 | static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, | 36 | static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr, |
37 | u32 ptp_clock) | 37 | u32 ptp_clock, int gmac4) |
38 | { | 38 | { |
39 | u32 value = readl(ioaddr + PTP_TCR); | 39 | u32 value = readl(ioaddr + PTP_TCR); |
40 | unsigned long data; | 40 | unsigned long data; |
41 | 41 | ||
42 | /* Convert the ptp_clock to nano second | 42 | /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second |
43 | * formula = (2/ptp_clock) * 1000000000 | 43 | * formula = (1/ptp_clock) * 1000000000 |
44 | * where, ptp_clock = 50MHz. | 44 | * where ptp_clock is 50MHz if fine method is used to update system |
45 | */ | 45 | */ |
46 | data = (2000000000ULL / ptp_clock); | 46 | if (value & PTP_TCR_TSCFUPDT) |
47 | data = (1000000000ULL / 50000000); | ||
48 | else | ||
49 | data = (1000000000ULL / ptp_clock); | ||
47 | 50 | ||
48 | /* 0.465ns accuracy */ | 51 | /* 0.465ns accuracy */ |
49 | if (!(value & PTP_TCR_TSCTRLSSR)) | 52 | if (!(value & PTP_TCR_TSCTRLSSR)) |
50 | data = (data * 1000) / 465; | 53 | data = (data * 1000) / 465; |
51 | 54 | ||
55 | data &= PTP_SSIR_SSINC_MASK; | ||
56 | |||
57 | if (gmac4) | ||
58 | data = data << GMAC4_PTP_SSIR_SSINC_SHIFT; | ||
59 | |||
52 | writel(data, ioaddr + PTP_SSIR); | 60 | writel(data, ioaddr + PTP_SSIR); |
53 | 61 | ||
54 | return data; | 62 | return data; |
@@ -104,14 +112,30 @@ static int stmmac_config_addend(void __iomem *ioaddr, u32 addend) | |||
104 | } | 112 | } |
105 | 113 | ||
106 | static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, | 114 | static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec, |
107 | int add_sub) | 115 | int add_sub, int gmac4) |
108 | { | 116 | { |
109 | u32 value; | 117 | u32 value; |
110 | int limit; | 118 | int limit; |
111 | 119 | ||
120 | if (add_sub) { | ||
121 | /* If the new sec value needs to be subtracted with | ||
122 | * the system time, then MAC_STSUR reg should be | ||
123 | * programmed with (2^32 – <new_sec_value>) | ||
124 | */ | ||
125 | if (gmac4) | ||
126 | sec = (100000000ULL - sec); | ||
127 | |||
128 | value = readl(ioaddr + PTP_TCR); | ||
129 | if (value & PTP_TCR_TSCTRLSSR) | ||
130 | nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec); | ||
131 | else | ||
132 | nsec = (PTP_BINARY_ROLLOVER_MODE - nsec); | ||
133 | } | ||
134 | |||
112 | writel(sec, ioaddr + PTP_STSUR); | 135 | writel(sec, ioaddr + PTP_STSUR); |
113 | writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec), | 136 | value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec; |
114 | ioaddr + PTP_STNSUR); | 137 | writel(value, ioaddr + PTP_STNSUR); |
138 | |||
115 | /* issue command to initialize the system time value */ | 139 | /* issue command to initialize the system time value */ |
116 | value = readl(ioaddr + PTP_TCR); | 140 | value = readl(ioaddr + PTP_TCR); |
117 | value |= PTP_TCR_TSUPDT; | 141 | value |= PTP_TCR_TSUPDT; |
@@ -134,8 +158,9 @@ static u64 stmmac_get_systime(void __iomem *ioaddr) | |||
134 | { | 158 | { |
135 | u64 ns; | 159 | u64 ns; |
136 | 160 | ||
161 | /* Get the TSSS value */ | ||
137 | ns = readl(ioaddr + PTP_STNSR); | 162 | ns = readl(ioaddr + PTP_STNSR); |
138 | /* convert sec time value to nanosecond */ | 163 | /* Get the TSS and convert sec time value to nanosecond */ |
139 | ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; | 164 | ns += readl(ioaddr + PTP_STSR) * 1000000000ULL; |
140 | 165 | ||
141 | return ns; | 166 | return ns; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 48e71fad4210..1f9ec02fa7f8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -340,18 +340,17 @@ out: | |||
340 | 340 | ||
341 | /* stmmac_get_tx_hwtstamp - get HW TX timestamps | 341 | /* stmmac_get_tx_hwtstamp - get HW TX timestamps |
342 | * @priv: driver private structure | 342 | * @priv: driver private structure |
343 | * @entry : descriptor index to be used. | 343 | * @p : descriptor pointer |
344 | * @skb : the socket buffer | 344 | * @skb : the socket buffer |
345 | * Description : | 345 | * Description : |
346 | * This function will read timestamp from the descriptor & pass it to stack. | 346 | * This function will read timestamp from the descriptor & pass it to stack. |
347 | * and also perform some sanity checks. | 347 | * and also perform some sanity checks. |
348 | */ | 348 | */ |
349 | static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, | 349 | static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, |
350 | unsigned int entry, struct sk_buff *skb) | 350 | struct dma_desc *p, struct sk_buff *skb) |
351 | { | 351 | { |
352 | struct skb_shared_hwtstamps shhwtstamp; | 352 | struct skb_shared_hwtstamps shhwtstamp; |
353 | u64 ns; | 353 | u64 ns; |
354 | void *desc = NULL; | ||
355 | 354 | ||
356 | if (!priv->hwts_tx_en) | 355 | if (!priv->hwts_tx_en) |
357 | return; | 356 | return; |
@@ -360,58 +359,55 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv, | |||
360 | if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) | 359 | if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) |
361 | return; | 360 | return; |
362 | 361 | ||
363 | if (priv->adv_ts) | ||
364 | desc = (priv->dma_etx + entry); | ||
365 | else | ||
366 | desc = (priv->dma_tx + entry); | ||
367 | |||
368 | /* check tx tstamp status */ | 362 | /* check tx tstamp status */ |
369 | if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc)) | 363 | if (!priv->hw->desc->get_tx_timestamp_status(p)) { |
370 | return; | 364 | /* get the valid tstamp */ |
365 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); | ||
371 | 366 | ||
372 | /* get the valid tstamp */ | 367 | memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
373 | ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); | 368 | shhwtstamp.hwtstamp = ns_to_ktime(ns); |
374 | 369 | ||
375 | memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); | 370 | netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns); |
376 | shhwtstamp.hwtstamp = ns_to_ktime(ns); | 371 | /* pass tstamp to stack */ |
377 | /* pass tstamp to stack */ | 372 | skb_tstamp_tx(skb, &shhwtstamp); |
378 | skb_tstamp_tx(skb, &shhwtstamp); | 373 | } |
379 | 374 | ||
380 | return; | 375 | return; |
381 | } | 376 | } |
382 | 377 | ||
383 | /* stmmac_get_rx_hwtstamp - get HW RX timestamps | 378 | /* stmmac_get_rx_hwtstamp - get HW RX timestamps |
384 | * @priv: driver private structure | 379 | * @priv: driver private structure |
385 | * @entry : descriptor index to be used. | 380 | * @p : descriptor pointer |
381 | * @np : next descriptor pointer | ||
386 | * @skb : the socket buffer | 382 | * @skb : the socket buffer |
387 | * Description : | 383 | * Description : |
388 | * This function will read received packet's timestamp from the descriptor | 384 | * This function will read received packet's timestamp from the descriptor |
389 | * and pass it to stack. It also perform some sanity checks. | 385 | * and pass it to stack. It also perform some sanity checks. |
390 | */ | 386 | */ |
391 | static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, | 387 | static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, |
392 | unsigned int entry, struct sk_buff *skb) | 388 | struct dma_desc *np, struct sk_buff *skb) |
393 | { | 389 | { |
394 | struct skb_shared_hwtstamps *shhwtstamp = NULL; | 390 | struct skb_shared_hwtstamps *shhwtstamp = NULL; |
395 | u64 ns; | 391 | u64 ns; |
396 | void *desc = NULL; | ||
397 | 392 | ||
398 | if (!priv->hwts_rx_en) | 393 | if (!priv->hwts_rx_en) |
399 | return; | 394 | return; |
400 | 395 | ||
401 | if (priv->adv_ts) | 396 | /* Check if timestamp is available */ |
402 | desc = (priv->dma_erx + entry); | 397 | if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { |
403 | else | 398 | /* For GMAC4, the valid timestamp is from CTX next desc. */ |
404 | desc = (priv->dma_rx + entry); | 399 | if (priv->plat->has_gmac4) |
405 | 400 | ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); | |
406 | /* exit if rx tstamp is not valid */ | 401 | else |
407 | if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) | 402 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); |
408 | return; | ||
409 | 403 | ||
410 | /* get valid tstamp */ | 404 | netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns); |
411 | ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); | 405 | shhwtstamp = skb_hwtstamps(skb); |
412 | shhwtstamp = skb_hwtstamps(skb); | 406 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
413 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); | 407 | shhwtstamp->hwtstamp = ns_to_ktime(ns); |
414 | shhwtstamp->hwtstamp = ns_to_ktime(ns); | 408 | } else { |
409 | netdev_err(priv->dev, "cannot get RX hw timestamp\n"); | ||
410 | } | ||
415 | } | 411 | } |
416 | 412 | ||
417 | /** | 413 | /** |
@@ -598,17 +594,18 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
598 | priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; | 594 | priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; |
599 | 595 | ||
600 | if (!priv->hwts_tx_en && !priv->hwts_rx_en) | 596 | if (!priv->hwts_tx_en && !priv->hwts_rx_en) |
601 | priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0); | 597 | priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0); |
602 | else { | 598 | else { |
603 | value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | | 599 | value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR | |
604 | tstamp_all | ptp_v2 | ptp_over_ethernet | | 600 | tstamp_all | ptp_v2 | ptp_over_ethernet | |
605 | ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | | 601 | ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en | |
606 | ts_master_en | snap_type_sel); | 602 | ts_master_en | snap_type_sel); |
607 | priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value); | 603 | priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value); |
608 | 604 | ||
609 | /* program Sub Second Increment reg */ | 605 | /* program Sub Second Increment reg */ |
610 | sec_inc = priv->hw->ptp->config_sub_second_increment( | 606 | sec_inc = priv->hw->ptp->config_sub_second_increment( |
611 | priv->ioaddr, priv->clk_ptp_rate); | 607 | priv->ptpaddr, priv->clk_ptp_rate, |
608 | priv->plat->has_gmac4); | ||
612 | temp = div_u64(1000000000ULL, sec_inc); | 609 | temp = div_u64(1000000000ULL, sec_inc); |
613 | 610 | ||
614 | /* calculate default added value: | 611 | /* calculate default added value: |
@@ -618,14 +615,14 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr) | |||
618 | */ | 615 | */ |
619 | temp = (u64)(temp << 32); | 616 | temp = (u64)(temp << 32); |
620 | priv->default_addend = div_u64(temp, priv->clk_ptp_rate); | 617 | priv->default_addend = div_u64(temp, priv->clk_ptp_rate); |
621 | priv->hw->ptp->config_addend(priv->ioaddr, | 618 | priv->hw->ptp->config_addend(priv->ptpaddr, |
622 | priv->default_addend); | 619 | priv->default_addend); |
623 | 620 | ||
624 | /* initialize system time */ | 621 | /* initialize system time */ |
625 | ktime_get_real_ts64(&now); | 622 | ktime_get_real_ts64(&now); |
626 | 623 | ||
627 | /* lower 32 bits of tv_sec are safe until y2106 */ | 624 | /* lower 32 bits of tv_sec are safe until y2106 */ |
628 | priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec, | 625 | priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec, |
629 | now.tv_nsec); | 626 | now.tv_nsec); |
630 | } | 627 | } |
631 | 628 | ||
@@ -880,6 +877,13 @@ static int stmmac_init_phy(struct net_device *dev) | |||
880 | return -ENODEV; | 877 | return -ENODEV; |
881 | } | 878 | } |
882 | 879 | ||
880 | /* stmmac_adjust_link will change this to PHY_IGNORE_INTERRUPT to avoid | ||
881 | * subsequent PHY polling, make sure we force a link transition if | ||
882 | * we have a UP/DOWN/UP transition | ||
883 | */ | ||
884 | if (phydev->is_pseudo_fixed_link) | ||
885 | phydev->irq = PHY_POLL; | ||
886 | |||
883 | pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" | 887 | pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" |
884 | " Link = %d\n", dev->name, phydev->phy_id, phydev->link); | 888 | " Link = %d\n", dev->name, phydev->phy_id, phydev->link); |
885 | 889 | ||
@@ -1333,7 +1337,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) | |||
1333 | priv->dev->stats.tx_packets++; | 1337 | priv->dev->stats.tx_packets++; |
1334 | priv->xstats.tx_pkt_n++; | 1338 | priv->xstats.tx_pkt_n++; |
1335 | } | 1339 | } |
1336 | stmmac_get_tx_hwtstamp(priv, entry, skb); | 1340 | stmmac_get_tx_hwtstamp(priv, p, skb); |
1337 | } | 1341 | } |
1338 | 1342 | ||
1339 | if (likely(priv->tx_skbuff_dma[entry].buf)) { | 1343 | if (likely(priv->tx_skbuff_dma[entry].buf)) { |
@@ -1479,10 +1483,13 @@ static void stmmac_mmc_setup(struct stmmac_priv *priv) | |||
1479 | unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | | 1483 | unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET | |
1480 | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; | 1484 | MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET; |
1481 | 1485 | ||
1482 | if (priv->synopsys_id >= DWMAC_CORE_4_00) | 1486 | if (priv->synopsys_id >= DWMAC_CORE_4_00) { |
1487 | priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET; | ||
1483 | priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; | 1488 | priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET; |
1484 | else | 1489 | } else { |
1490 | priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET; | ||
1485 | priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; | 1491 | priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET; |
1492 | } | ||
1486 | 1493 | ||
1487 | dwmac_mmc_intr_all_mask(priv->mmcaddr); | 1494 | dwmac_mmc_intr_all_mask(priv->mmcaddr); |
1488 | 1495 | ||
@@ -2477,7 +2484,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) | |||
2477 | if (netif_msg_rx_status(priv)) { | 2484 | if (netif_msg_rx_status(priv)) { |
2478 | void *rx_head; | 2485 | void *rx_head; |
2479 | 2486 | ||
2480 | pr_debug("%s: descriptor ring:\n", __func__); | 2487 | pr_info(">>>>>> %s: descriptor ring:\n", __func__); |
2481 | if (priv->extend_desc) | 2488 | if (priv->extend_desc) |
2482 | rx_head = (void *)priv->dma_erx; | 2489 | rx_head = (void *)priv->dma_erx; |
2483 | else | 2490 | else |
@@ -2488,6 +2495,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) | |||
2488 | while (count < limit) { | 2495 | while (count < limit) { |
2489 | int status; | 2496 | int status; |
2490 | struct dma_desc *p; | 2497 | struct dma_desc *p; |
2498 | struct dma_desc *np; | ||
2491 | 2499 | ||
2492 | if (priv->extend_desc) | 2500 | if (priv->extend_desc) |
2493 | p = (struct dma_desc *)(priv->dma_erx + entry); | 2501 | p = (struct dma_desc *)(priv->dma_erx + entry); |
@@ -2507,9 +2515,11 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) | |||
2507 | next_entry = priv->cur_rx; | 2515 | next_entry = priv->cur_rx; |
2508 | 2516 | ||
2509 | if (priv->extend_desc) | 2517 | if (priv->extend_desc) |
2510 | prefetch(priv->dma_erx + next_entry); | 2518 | np = (struct dma_desc *)(priv->dma_erx + next_entry); |
2511 | else | 2519 | else |
2512 | prefetch(priv->dma_rx + next_entry); | 2520 | np = priv->dma_rx + next_entry; |
2521 | |||
2522 | prefetch(np); | ||
2513 | 2523 | ||
2514 | if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) | 2524 | if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status)) |
2515 | priv->hw->desc->rx_extended_status(&priv->dev->stats, | 2525 | priv->hw->desc->rx_extended_status(&priv->dev->stats, |
@@ -2561,7 +2571,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) | |||
2561 | frame_len -= ETH_FCS_LEN; | 2571 | frame_len -= ETH_FCS_LEN; |
2562 | 2572 | ||
2563 | if (netif_msg_rx_status(priv)) { | 2573 | if (netif_msg_rx_status(priv)) { |
2564 | pr_debug("\tdesc: %p [entry %d] buff=0x%x\n", | 2574 | pr_info("\tdesc: %p [entry %d] buff=0x%x\n", |
2565 | p, entry, des); | 2575 | p, entry, des); |
2566 | if (frame_len > ETH_FRAME_LEN) | 2576 | if (frame_len > ETH_FRAME_LEN) |
2567 | pr_debug("\tframe size %d, COE: %d\n", | 2577 | pr_debug("\tframe size %d, COE: %d\n", |
@@ -2618,13 +2628,13 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit) | |||
2618 | DMA_FROM_DEVICE); | 2628 | DMA_FROM_DEVICE); |
2619 | } | 2629 | } |
2620 | 2630 | ||
2621 | stmmac_get_rx_hwtstamp(priv, entry, skb); | ||
2622 | |||
2623 | if (netif_msg_pktdata(priv)) { | 2631 | if (netif_msg_pktdata(priv)) { |
2624 | pr_debug("frame received (%dbytes)", frame_len); | 2632 | pr_debug("frame received (%dbytes)", frame_len); |
2625 | print_pkt(skb->data, frame_len); | 2633 | print_pkt(skb->data, frame_len); |
2626 | } | 2634 | } |
2627 | 2635 | ||
2636 | stmmac_get_rx_hwtstamp(priv, p, np, skb); | ||
2637 | |||
2628 | stmmac_rx_vlan(priv->dev, skb); | 2638 | stmmac_rx_vlan(priv->dev, skb); |
2629 | 2639 | ||
2630 | skb->protocol = eth_type_trans(skb, priv->dev); | 2640 | skb->protocol = eth_type_trans(skb, priv->dev); |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 1477471f8d44..3eb281d1db08 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | |||
@@ -54,7 +54,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb) | |||
54 | 54 | ||
55 | spin_lock_irqsave(&priv->ptp_lock, flags); | 55 | spin_lock_irqsave(&priv->ptp_lock, flags); |
56 | 56 | ||
57 | priv->hw->ptp->config_addend(priv->ioaddr, addend); | 57 | priv->hw->ptp->config_addend(priv->ptpaddr, addend); |
58 | 58 | ||
59 | spin_unlock_irqrestore(&priv->ptp_lock, flags); | 59 | spin_unlock_irqrestore(&priv->ptp_lock, flags); |
60 | 60 | ||
@@ -89,7 +89,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) | |||
89 | 89 | ||
90 | spin_lock_irqsave(&priv->ptp_lock, flags); | 90 | spin_lock_irqsave(&priv->ptp_lock, flags); |
91 | 91 | ||
92 | priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj); | 92 | priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj, |
93 | priv->plat->has_gmac4); | ||
93 | 94 | ||
94 | spin_unlock_irqrestore(&priv->ptp_lock, flags); | 95 | spin_unlock_irqrestore(&priv->ptp_lock, flags); |
95 | 96 | ||
@@ -114,7 +115,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts) | |||
114 | 115 | ||
115 | spin_lock_irqsave(&priv->ptp_lock, flags); | 116 | spin_lock_irqsave(&priv->ptp_lock, flags); |
116 | 117 | ||
117 | ns = priv->hw->ptp->get_systime(priv->ioaddr); | 118 | ns = priv->hw->ptp->get_systime(priv->ptpaddr); |
118 | 119 | ||
119 | spin_unlock_irqrestore(&priv->ptp_lock, flags); | 120 | spin_unlock_irqrestore(&priv->ptp_lock, flags); |
120 | 121 | ||
@@ -141,7 +142,7 @@ static int stmmac_set_time(struct ptp_clock_info *ptp, | |||
141 | 142 | ||
142 | spin_lock_irqsave(&priv->ptp_lock, flags); | 143 | spin_lock_irqsave(&priv->ptp_lock, flags); |
143 | 144 | ||
144 | priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec); | 145 | priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec); |
145 | 146 | ||
146 | spin_unlock_irqrestore(&priv->ptp_lock, flags); | 147 | spin_unlock_irqrestore(&priv->ptp_lock, flags); |
147 | 148 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h index 4535df37c227..c06938c47af5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h | |||
@@ -22,51 +22,53 @@ | |||
22 | Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> | 22 | Author: Rayagond Kokatanur <rayagond@vayavyalabs.com> |
23 | ******************************************************************************/ | 23 | ******************************************************************************/ |
24 | 24 | ||
25 | #ifndef __STMMAC_PTP_H__ | 25 | #ifndef __STMMAC_PTP_H__ |
26 | #define __STMMAC_PTP_H__ | 26 | #define __STMMAC_PTP_H__ |
27 | 27 | ||
28 | /* IEEE 1588 PTP register offsets */ | 28 | #define PTP_GMAC4_OFFSET 0xb00 |
29 | #define PTP_TCR 0x0700 /* Timestamp Control Reg */ | 29 | #define PTP_GMAC3_X_OFFSET 0x700 |
30 | #define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */ | ||
31 | #define PTP_STSR 0x0708 /* System Time – Seconds Regr */ | ||
32 | #define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */ | ||
33 | #define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */ | ||
34 | #define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */ | ||
35 | #define PTP_TAR 0x0718 /* Timestamp Addend Reg */ | ||
36 | #define PTP_TTSR 0x071C /* Target Time Seconds Reg */ | ||
37 | #define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */ | ||
38 | #define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */ | ||
39 | #define PTP_TSR 0x0728 /* Timestamp Status */ | ||
40 | 30 | ||
41 | #define PTP_STNSUR_ADDSUB_SHIFT 31 | 31 | /* IEEE 1588 PTP register offsets */ |
32 | #define PTP_TCR 0x00 /* Timestamp Control Reg */ | ||
33 | #define PTP_SSIR 0x04 /* Sub-Second Increment Reg */ | ||
34 | #define PTP_STSR 0x08 /* System Time – Seconds Regr */ | ||
35 | #define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */ | ||
36 | #define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */ | ||
37 | #define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */ | ||
38 | #define PTP_TAR 0x18 /* Timestamp Addend Reg */ | ||
42 | 39 | ||
43 | /* PTP TCR defines */ | 40 | #define PTP_STNSUR_ADDSUB_SHIFT 31 |
44 | #define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */ | 41 | #define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */ |
45 | #define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */ | 42 | #define PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */ |
46 | #define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */ | ||
47 | #define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */ | ||
48 | /* Timestamp Interrupt Trigger Enable */ | ||
49 | #define PTP_TCR_TSTRIG 0x00000010 | ||
50 | #define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */ | ||
51 | #define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */ | ||
52 | /* Timestamp Digital or Binary Rollover Control */ | ||
53 | #define PTP_TCR_TSCTRLSSR 0x00000200 | ||
54 | 43 | ||
44 | /* PTP Timestamp control register defines */ | ||
45 | #define PTP_TCR_TSENA BIT(0) /* Timestamp Enable */ | ||
46 | #define PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */ | ||
47 | #define PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */ | ||
48 | #define PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */ | ||
49 | #define PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */ | ||
50 | #define PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */ | ||
51 | #define PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */ | ||
52 | #define PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */ | ||
55 | /* Enable PTP packet Processing for Version 2 Format */ | 53 | /* Enable PTP packet Processing for Version 2 Format */ |
56 | #define PTP_TCR_TSVER2ENA 0x00000400 | 54 | #define PTP_TCR_TSVER2ENA BIT(10) |
57 | /* Enable Processing of PTP over Ethernet Frames */ | 55 | /* Enable Processing of PTP over Ethernet Frames */ |
58 | #define PTP_TCR_TSIPENA 0x00000800 | 56 | #define PTP_TCR_TSIPENA BIT(11) |
59 | /* Enable Processing of PTP Frames Sent over IPv6-UDP */ | 57 | /* Enable Processing of PTP Frames Sent over IPv6-UDP */ |
60 | #define PTP_TCR_TSIPV6ENA 0x00001000 | 58 | #define PTP_TCR_TSIPV6ENA BIT(12) |
61 | /* Enable Processing of PTP Frames Sent over IPv4-UDP */ | 59 | /* Enable Processing of PTP Frames Sent over IPv4-UDP */ |
62 | #define PTP_TCR_TSIPV4ENA 0x00002000 | 60 | #define PTP_TCR_TSIPV4ENA BIT(13) |
63 | /* Enable Timestamp Snapshot for Event Messages */ | 61 | /* Enable Timestamp Snapshot for Event Messages */ |
64 | #define PTP_TCR_TSEVNTENA 0x00004000 | 62 | #define PTP_TCR_TSEVNTENA BIT(14) |
65 | /* Enable Snapshot for Messages Relevant to Master */ | 63 | /* Enable Snapshot for Messages Relevant to Master */ |
66 | #define PTP_TCR_TSMSTRENA 0x00008000 | 64 | #define PTP_TCR_TSMSTRENA BIT(15) |
67 | /* Select PTP packets for Taking Snapshots */ | 65 | /* Select PTP packets for Taking Snapshots */ |
68 | #define PTP_TCR_SNAPTYPSEL_1 0x00010000 | 66 | #define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16) |
69 | /* Enable MAC address for PTP Frame Filtering */ | 67 | /* Enable MAC address for PTP Frame Filtering */ |
70 | #define PTP_TCR_TSENMACADDR 0x00040000 | 68 | #define PTP_TCR_TSENMACADDR BIT(18) |
69 | |||
70 | /* SSIR defines */ | ||
71 | #define PTP_SSIR_SSINC_MASK 0xff | ||
72 | #define GMAC4_PTP_SSIR_SSINC_SHIFT 16 | ||
71 | 73 | ||
72 | #endif /* __STMMAC_PTP_H__ */ | 74 | #endif /* __STMMAC_PTP_H__ */ |
diff --git a/drivers/net/ethernet/sun/sunbmac.c b/drivers/net/ethernet/sun/sunbmac.c index aa4f9d2d8fa9..02f452730d52 100644 --- a/drivers/net/ethernet/sun/sunbmac.c +++ b/drivers/net/ethernet/sun/sunbmac.c | |||
@@ -623,6 +623,7 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq) | |||
623 | void __iomem *gregs = bp->gregs; | 623 | void __iomem *gregs = bp->gregs; |
624 | void __iomem *cregs = bp->creg; | 624 | void __iomem *cregs = bp->creg; |
625 | void __iomem *bregs = bp->bregs; | 625 | void __iomem *bregs = bp->bregs; |
626 | __u32 bblk_dvma = (__u32)bp->bblock_dvma; | ||
626 | unsigned char *e = &bp->dev->dev_addr[0]; | 627 | unsigned char *e = &bp->dev->dev_addr[0]; |
627 | 628 | ||
628 | /* Latch current counters into statistics. */ | 629 | /* Latch current counters into statistics. */ |
@@ -671,9 +672,9 @@ static int bigmac_init_hw(struct bigmac *bp, int from_irq) | |||
671 | bregs + BMAC_XIFCFG); | 672 | bregs + BMAC_XIFCFG); |
672 | 673 | ||
673 | /* Tell the QEC where the ring descriptors are. */ | 674 | /* Tell the QEC where the ring descriptors are. */ |
674 | sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0), | 675 | sbus_writel(bblk_dvma + bib_offset(be_rxd, 0), |
675 | cregs + CREG_RXDS); | 676 | cregs + CREG_RXDS); |
676 | sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0), | 677 | sbus_writel(bblk_dvma + bib_offset(be_txd, 0), |
677 | cregs + CREG_TXDS); | 678 | cregs + CREG_TXDS); |
678 | 679 | ||
679 | /* Setup the FIFO pointers into QEC local memory. */ | 680 | /* Setup the FIFO pointers into QEC local memory. */ |
diff --git a/drivers/net/ethernet/sun/sunbmac.h b/drivers/net/ethernet/sun/sunbmac.h index 06dd21707353..532fc56830cf 100644 --- a/drivers/net/ethernet/sun/sunbmac.h +++ b/drivers/net/ethernet/sun/sunbmac.h | |||
@@ -291,7 +291,7 @@ struct bigmac { | |||
291 | void __iomem *bregs; /* BigMAC Registers */ | 291 | void __iomem *bregs; /* BigMAC Registers */ |
292 | void __iomem *tregs; /* BigMAC Transceiver */ | 292 | void __iomem *tregs; /* BigMAC Transceiver */ |
293 | struct bmac_init_block *bmac_block; /* RX and TX descriptors */ | 293 | struct bmac_init_block *bmac_block; /* RX and TX descriptors */ |
294 | __u32 bblock_dvma; /* RX and TX descriptors */ | 294 | dma_addr_t bblock_dvma; /* RX and TX descriptors */ |
295 | 295 | ||
296 | spinlock_t lock; | 296 | spinlock_t lock; |
297 | 297 | ||
diff --git a/drivers/net/ethernet/sun/sunqe.c b/drivers/net/ethernet/sun/sunqe.c index 9b825780b3be..9582948145c1 100644 --- a/drivers/net/ethernet/sun/sunqe.c +++ b/drivers/net/ethernet/sun/sunqe.c | |||
@@ -124,7 +124,7 @@ static void qe_init_rings(struct sunqe *qep) | |||
124 | { | 124 | { |
125 | struct qe_init_block *qb = qep->qe_block; | 125 | struct qe_init_block *qb = qep->qe_block; |
126 | struct sunqe_buffers *qbufs = qep->buffers; | 126 | struct sunqe_buffers *qbufs = qep->buffers; |
127 | __u32 qbufs_dvma = qep->buffers_dvma; | 127 | __u32 qbufs_dvma = (__u32)qep->buffers_dvma; |
128 | int i; | 128 | int i; |
129 | 129 | ||
130 | qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; | 130 | qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0; |
@@ -144,6 +144,7 @@ static int qe_init(struct sunqe *qep, int from_irq) | |||
144 | void __iomem *mregs = qep->mregs; | 144 | void __iomem *mregs = qep->mregs; |
145 | void __iomem *gregs = qecp->gregs; | 145 | void __iomem *gregs = qecp->gregs; |
146 | unsigned char *e = &qep->dev->dev_addr[0]; | 146 | unsigned char *e = &qep->dev->dev_addr[0]; |
147 | __u32 qblk_dvma = (__u32)qep->qblock_dvma; | ||
147 | u32 tmp; | 148 | u32 tmp; |
148 | int i; | 149 | int i; |
149 | 150 | ||
@@ -152,8 +153,8 @@ static int qe_init(struct sunqe *qep, int from_irq) | |||
152 | return -EAGAIN; | 153 | return -EAGAIN; |
153 | 154 | ||
154 | /* Setup initial rx/tx init block pointers. */ | 155 | /* Setup initial rx/tx init block pointers. */ |
155 | sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); | 156 | sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS); |
156 | sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); | 157 | sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS); |
157 | 158 | ||
158 | /* Enable/mask the various irq's. */ | 159 | /* Enable/mask the various irq's. */ |
159 | sbus_writel(0, cregs + CREG_RIMASK); | 160 | sbus_writel(0, cregs + CREG_RIMASK); |
@@ -413,7 +414,7 @@ static void qe_rx(struct sunqe *qep) | |||
413 | struct net_device *dev = qep->dev; | 414 | struct net_device *dev = qep->dev; |
414 | struct qe_rxd *this; | 415 | struct qe_rxd *this; |
415 | struct sunqe_buffers *qbufs = qep->buffers; | 416 | struct sunqe_buffers *qbufs = qep->buffers; |
416 | __u32 qbufs_dvma = qep->buffers_dvma; | 417 | __u32 qbufs_dvma = (__u32)qep->buffers_dvma; |
417 | int elem = qep->rx_new; | 418 | int elem = qep->rx_new; |
418 | u32 flags; | 419 | u32 flags; |
419 | 420 | ||
@@ -572,7 +573,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
572 | { | 573 | { |
573 | struct sunqe *qep = netdev_priv(dev); | 574 | struct sunqe *qep = netdev_priv(dev); |
574 | struct sunqe_buffers *qbufs = qep->buffers; | 575 | struct sunqe_buffers *qbufs = qep->buffers; |
575 | __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma; | 576 | __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma; |
576 | unsigned char *txbuf; | 577 | unsigned char *txbuf; |
577 | int len, entry; | 578 | int len, entry; |
578 | 579 | ||
diff --git a/drivers/net/ethernet/sun/sunqe.h b/drivers/net/ethernet/sun/sunqe.h index 581781b6b2fa..ae190b77431b 100644 --- a/drivers/net/ethernet/sun/sunqe.h +++ b/drivers/net/ethernet/sun/sunqe.h | |||
@@ -334,12 +334,12 @@ struct sunqe { | |||
334 | void __iomem *qcregs; /* QEC per-channel Registers */ | 334 | void __iomem *qcregs; /* QEC per-channel Registers */ |
335 | void __iomem *mregs; /* Per-channel MACE Registers */ | 335 | void __iomem *mregs; /* Per-channel MACE Registers */ |
336 | struct qe_init_block *qe_block; /* RX and TX descriptors */ | 336 | struct qe_init_block *qe_block; /* RX and TX descriptors */ |
337 | __u32 qblock_dvma; /* RX and TX descriptors */ | 337 | dma_addr_t qblock_dvma; /* RX and TX descriptors */ |
338 | spinlock_t lock; /* Protects txfull state */ | 338 | spinlock_t lock; /* Protects txfull state */ |
339 | int rx_new, rx_old; /* RX ring extents */ | 339 | int rx_new, rx_old; /* RX ring extents */ |
340 | int tx_new, tx_old; /* TX ring extents */ | 340 | int tx_new, tx_old; /* TX ring extents */ |
341 | struct sunqe_buffers *buffers; /* CPU visible address. */ | 341 | struct sunqe_buffers *buffers; /* CPU visible address. */ |
342 | __u32 buffers_dvma; /* DVMA visible address. */ | 342 | dma_addr_t buffers_dvma; /* DVMA visible address. */ |
343 | struct sunqec *parent; | 343 | struct sunqec *parent; |
344 | u8 mconfig; /* Base MACE mconfig value */ | 344 | u8 mconfig; /* Base MACE mconfig value */ |
345 | struct platform_device *op; /* QE's OF device struct */ | 345 | struct platform_device *op; /* QE's OF device struct */ |
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c index 5eedac495077..4ba2421e625d 100644 --- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c +++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include <linux/stat.h> | 33 | #include <linux/stat.h> |
34 | #include <linux/types.h> | 34 | #include <linux/types.h> |
35 | 35 | ||
36 | #include <linux/types.h> | ||
37 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
38 | #include <linux/delay.h> | 37 | #include <linux/delay.h> |
39 | #include <linux/mm.h> | 38 | #include <linux/mm.h> |
@@ -43,7 +42,6 @@ | |||
43 | 42 | ||
44 | #include <linux/phy.h> | 43 | #include <linux/phy.h> |
45 | #include <linux/mii.h> | 44 | #include <linux/mii.h> |
46 | #include <linux/delay.h> | ||
47 | #include <linux/dma-mapping.h> | 45 | #include <linux/dma-mapping.h> |
48 | #include <linux/vmalloc.h> | 46 | #include <linux/vmalloc.h> |
49 | 47 | ||
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index 054a8dd23dae..ba1e45ff6aae 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c | |||
@@ -176,9 +176,12 @@ void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave) | |||
176 | } | 176 | } |
177 | 177 | ||
178 | dev = bus_find_device(&platform_bus_type, NULL, node, match); | 178 | dev = bus_find_device(&platform_bus_type, NULL, node, match); |
179 | of_node_put(node); | ||
179 | priv = dev_get_drvdata(dev); | 180 | priv = dev_get_drvdata(dev); |
180 | 181 | ||
181 | priv->cpsw_phy_sel(priv, phy_mode, slave); | 182 | priv->cpsw_phy_sel(priv, phy_mode, slave); |
183 | |||
184 | put_device(dev); | ||
182 | } | 185 | } |
183 | EXPORT_SYMBOL_GPL(cpsw_phy_sel); | 186 | EXPORT_SYMBOL_GPL(cpsw_phy_sel); |
184 | 187 | ||
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c6cff3d2ff05..58947aae31c7 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -2375,8 +2375,11 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
2375 | * to the PHY is the Ethernet MAC DT node. | 2375 | * to the PHY is the Ethernet MAC DT node. |
2376 | */ | 2376 | */ |
2377 | ret = of_phy_register_fixed_link(slave_node); | 2377 | ret = of_phy_register_fixed_link(slave_node); |
2378 | if (ret) | 2378 | if (ret) { |
2379 | if (ret != -EPROBE_DEFER) | ||
2380 | dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret); | ||
2379 | return ret; | 2381 | return ret; |
2382 | } | ||
2380 | slave_data->phy_node = of_node_get(slave_node); | 2383 | slave_data->phy_node = of_node_get(slave_node); |
2381 | } else if (parp) { | 2384 | } else if (parp) { |
2382 | u32 phyid; | 2385 | u32 phyid; |
@@ -2397,6 +2400,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, | |||
2397 | } | 2400 | } |
2398 | snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), | 2401 | snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), |
2399 | PHY_ID_FMT, mdio->name, phyid); | 2402 | PHY_ID_FMT, mdio->name, phyid); |
2403 | put_device(&mdio->dev); | ||
2400 | } else { | 2404 | } else { |
2401 | dev_err(&pdev->dev, | 2405 | dev_err(&pdev->dev, |
2402 | "No slave[%d] phy_id, phy-handle, or fixed-link property\n", | 2406 | "No slave[%d] phy_id, phy-handle, or fixed-link property\n", |
@@ -2440,6 +2444,46 @@ no_phy_slave: | |||
2440 | return 0; | 2444 | return 0; |
2441 | } | 2445 | } |
2442 | 2446 | ||
2447 | static void cpsw_remove_dt(struct platform_device *pdev) | ||
2448 | { | ||
2449 | struct net_device *ndev = platform_get_drvdata(pdev); | ||
2450 | struct cpsw_common *cpsw = ndev_to_cpsw(ndev); | ||
2451 | struct cpsw_platform_data *data = &cpsw->data; | ||
2452 | struct device_node *node = pdev->dev.of_node; | ||
2453 | struct device_node *slave_node; | ||
2454 | int i = 0; | ||
2455 | |||
2456 | for_each_available_child_of_node(node, slave_node) { | ||
2457 | struct cpsw_slave_data *slave_data = &data->slave_data[i]; | ||
2458 | |||
2459 | if (strcmp(slave_node->name, "slave")) | ||
2460 | continue; | ||
2461 | |||
2462 | if (of_phy_is_fixed_link(slave_node)) { | ||
2463 | struct phy_device *phydev; | ||
2464 | |||
2465 | phydev = of_phy_find_device(slave_node); | ||
2466 | if (phydev) { | ||
2467 | fixed_phy_unregister(phydev); | ||
2468 | /* Put references taken by | ||
2469 | * of_phy_find_device() and | ||
2470 | * of_phy_register_fixed_link(). | ||
2471 | */ | ||
2472 | phy_device_free(phydev); | ||
2473 | phy_device_free(phydev); | ||
2474 | } | ||
2475 | } | ||
2476 | |||
2477 | of_node_put(slave_data->phy_node); | ||
2478 | |||
2479 | i++; | ||
2480 | if (i == data->slaves) | ||
2481 | break; | ||
2482 | } | ||
2483 | |||
2484 | of_platform_depopulate(&pdev->dev); | ||
2485 | } | ||
2486 | |||
2443 | static int cpsw_probe_dual_emac(struct cpsw_priv *priv) | 2487 | static int cpsw_probe_dual_emac(struct cpsw_priv *priv) |
2444 | { | 2488 | { |
2445 | struct cpsw_common *cpsw = priv->cpsw; | 2489 | struct cpsw_common *cpsw = priv->cpsw; |
@@ -2547,6 +2591,9 @@ static int cpsw_probe(struct platform_device *pdev) | |||
2547 | int irq; | 2591 | int irq; |
2548 | 2592 | ||
2549 | cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); | 2593 | cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL); |
2594 | if (!cpsw) | ||
2595 | return -ENOMEM; | ||
2596 | |||
2550 | cpsw->dev = &pdev->dev; | 2597 | cpsw->dev = &pdev->dev; |
2551 | 2598 | ||
2552 | ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); | 2599 | ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES); |
@@ -2584,11 +2631,19 @@ static int cpsw_probe(struct platform_device *pdev) | |||
2584 | /* Select default pin state */ | 2631 | /* Select default pin state */ |
2585 | pinctrl_pm_select_default_state(&pdev->dev); | 2632 | pinctrl_pm_select_default_state(&pdev->dev); |
2586 | 2633 | ||
2587 | if (cpsw_probe_dt(&cpsw->data, pdev)) { | 2634 | /* Need to enable clocks with runtime PM api to access module |
2588 | dev_err(&pdev->dev, "cpsw: platform data missing\n"); | 2635 | * registers |
2589 | ret = -ENODEV; | 2636 | */ |
2637 | ret = pm_runtime_get_sync(&pdev->dev); | ||
2638 | if (ret < 0) { | ||
2639 | pm_runtime_put_noidle(&pdev->dev); | ||
2590 | goto clean_runtime_disable_ret; | 2640 | goto clean_runtime_disable_ret; |
2591 | } | 2641 | } |
2642 | |||
2643 | ret = cpsw_probe_dt(&cpsw->data, pdev); | ||
2644 | if (ret) | ||
2645 | goto clean_dt_ret; | ||
2646 | |||
2592 | data = &cpsw->data; | 2647 | data = &cpsw->data; |
2593 | cpsw->rx_ch_num = 1; | 2648 | cpsw->rx_ch_num = 1; |
2594 | cpsw->tx_ch_num = 1; | 2649 | cpsw->tx_ch_num = 1; |
@@ -2608,7 +2663,7 @@ static int cpsw_probe(struct platform_device *pdev) | |||
2608 | GFP_KERNEL); | 2663 | GFP_KERNEL); |
2609 | if (!cpsw->slaves) { | 2664 | if (!cpsw->slaves) { |
2610 | ret = -ENOMEM; | 2665 | ret = -ENOMEM; |
2611 | goto clean_runtime_disable_ret; | 2666 | goto clean_dt_ret; |
2612 | } | 2667 | } |
2613 | for (i = 0; i < data->slaves; i++) | 2668 | for (i = 0; i < data->slaves; i++) |
2614 | cpsw->slaves[i].slave_num = i; | 2669 | cpsw->slaves[i].slave_num = i; |
@@ -2620,7 +2675,7 @@ static int cpsw_probe(struct platform_device *pdev) | |||
2620 | if (IS_ERR(clk)) { | 2675 | if (IS_ERR(clk)) { |
2621 | dev_err(priv->dev, "fck is not found\n"); | 2676 | dev_err(priv->dev, "fck is not found\n"); |
2622 | ret = -ENODEV; | 2677 | ret = -ENODEV; |
2623 | goto clean_runtime_disable_ret; | 2678 | goto clean_dt_ret; |
2624 | } | 2679 | } |
2625 | cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; | 2680 | cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000; |
2626 | 2681 | ||
@@ -2628,26 +2683,17 @@ static int cpsw_probe(struct platform_device *pdev) | |||
2628 | ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); | 2683 | ss_regs = devm_ioremap_resource(&pdev->dev, ss_res); |
2629 | if (IS_ERR(ss_regs)) { | 2684 | if (IS_ERR(ss_regs)) { |
2630 | ret = PTR_ERR(ss_regs); | 2685 | ret = PTR_ERR(ss_regs); |
2631 | goto clean_runtime_disable_ret; | 2686 | goto clean_dt_ret; |
2632 | } | 2687 | } |
2633 | cpsw->regs = ss_regs; | 2688 | cpsw->regs = ss_regs; |
2634 | 2689 | ||
2635 | /* Need to enable clocks with runtime PM api to access module | ||
2636 | * registers | ||
2637 | */ | ||
2638 | ret = pm_runtime_get_sync(&pdev->dev); | ||
2639 | if (ret < 0) { | ||
2640 | pm_runtime_put_noidle(&pdev->dev); | ||
2641 | goto clean_runtime_disable_ret; | ||
2642 | } | ||
2643 | cpsw->version = readl(&cpsw->regs->id_ver); | 2690 | cpsw->version = readl(&cpsw->regs->id_ver); |
2644 | pm_runtime_put_sync(&pdev->dev); | ||
2645 | 2691 | ||
2646 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 2692 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
2647 | cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res); | 2693 | cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res); |
2648 | if (IS_ERR(cpsw->wr_regs)) { | 2694 | if (IS_ERR(cpsw->wr_regs)) { |
2649 | ret = PTR_ERR(cpsw->wr_regs); | 2695 | ret = PTR_ERR(cpsw->wr_regs); |
2650 | goto clean_runtime_disable_ret; | 2696 | goto clean_dt_ret; |
2651 | } | 2697 | } |
2652 | 2698 | ||
2653 | memset(&dma_params, 0, sizeof(dma_params)); | 2699 | memset(&dma_params, 0, sizeof(dma_params)); |
@@ -2684,7 +2730,7 @@ static int cpsw_probe(struct platform_device *pdev) | |||
2684 | default: | 2730 | default: |
2685 | dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version); | 2731 | dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version); |
2686 | ret = -ENODEV; | 2732 | ret = -ENODEV; |
2687 | goto clean_runtime_disable_ret; | 2733 | goto clean_dt_ret; |
2688 | } | 2734 | } |
2689 | for (i = 0; i < cpsw->data.slaves; i++) { | 2735 | for (i = 0; i < cpsw->data.slaves; i++) { |
2690 | struct cpsw_slave *slave = &cpsw->slaves[i]; | 2736 | struct cpsw_slave *slave = &cpsw->slaves[i]; |
@@ -2713,7 +2759,7 @@ static int cpsw_probe(struct platform_device *pdev) | |||
2713 | if (!cpsw->dma) { | 2759 | if (!cpsw->dma) { |
2714 | dev_err(priv->dev, "error initializing dma\n"); | 2760 | dev_err(priv->dev, "error initializing dma\n"); |
2715 | ret = -ENOMEM; | 2761 | ret = -ENOMEM; |
2716 | goto clean_runtime_disable_ret; | 2762 | goto clean_dt_ret; |
2717 | } | 2763 | } |
2718 | 2764 | ||
2719 | cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); | 2765 | cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0); |
@@ -2811,16 +2857,23 @@ static int cpsw_probe(struct platform_device *pdev) | |||
2811 | ret = cpsw_probe_dual_emac(priv); | 2857 | ret = cpsw_probe_dual_emac(priv); |
2812 | if (ret) { | 2858 | if (ret) { |
2813 | cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); | 2859 | cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); |
2814 | goto clean_ale_ret; | 2860 | goto clean_unregister_netdev_ret; |
2815 | } | 2861 | } |
2816 | } | 2862 | } |
2817 | 2863 | ||
2864 | pm_runtime_put(&pdev->dev); | ||
2865 | |||
2818 | return 0; | 2866 | return 0; |
2819 | 2867 | ||
2868 | clean_unregister_netdev_ret: | ||
2869 | unregister_netdev(ndev); | ||
2820 | clean_ale_ret: | 2870 | clean_ale_ret: |
2821 | cpsw_ale_destroy(cpsw->ale); | 2871 | cpsw_ale_destroy(cpsw->ale); |
2822 | clean_dma_ret: | 2872 | clean_dma_ret: |
2823 | cpdma_ctlr_destroy(cpsw->dma); | 2873 | cpdma_ctlr_destroy(cpsw->dma); |
2874 | clean_dt_ret: | ||
2875 | cpsw_remove_dt(pdev); | ||
2876 | pm_runtime_put_sync(&pdev->dev); | ||
2824 | clean_runtime_disable_ret: | 2877 | clean_runtime_disable_ret: |
2825 | pm_runtime_disable(&pdev->dev); | 2878 | pm_runtime_disable(&pdev->dev); |
2826 | clean_ndev_ret: | 2879 | clean_ndev_ret: |
@@ -2846,7 +2899,7 @@ static int cpsw_remove(struct platform_device *pdev) | |||
2846 | 2899 | ||
2847 | cpsw_ale_destroy(cpsw->ale); | 2900 | cpsw_ale_destroy(cpsw->ale); |
2848 | cpdma_ctlr_destroy(cpsw->dma); | 2901 | cpdma_ctlr_destroy(cpsw->dma); |
2849 | of_platform_depopulate(&pdev->dev); | 2902 | cpsw_remove_dt(pdev); |
2850 | pm_runtime_put_sync(&pdev->dev); | 2903 | pm_runtime_put_sync(&pdev->dev); |
2851 | pm_runtime_disable(&pdev->dev); | 2904 | pm_runtime_disable(&pdev->dev); |
2852 | if (cpsw->data.dual_emac) | 2905 | if (cpsw->data.dual_emac) |
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 2fd94a5bc1f3..84fbe5714f8b 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c | |||
@@ -1410,6 +1410,7 @@ static int emac_dev_open(struct net_device *ndev) | |||
1410 | int i = 0; | 1410 | int i = 0; |
1411 | struct emac_priv *priv = netdev_priv(ndev); | 1411 | struct emac_priv *priv = netdev_priv(ndev); |
1412 | struct phy_device *phydev = NULL; | 1412 | struct phy_device *phydev = NULL; |
1413 | struct device *phy = NULL; | ||
1413 | 1414 | ||
1414 | ret = pm_runtime_get_sync(&priv->pdev->dev); | 1415 | ret = pm_runtime_get_sync(&priv->pdev->dev); |
1415 | if (ret < 0) { | 1416 | if (ret < 0) { |
@@ -1488,19 +1489,20 @@ static int emac_dev_open(struct net_device *ndev) | |||
1488 | 1489 | ||
1489 | /* use the first phy on the bus if pdata did not give us a phy id */ | 1490 | /* use the first phy on the bus if pdata did not give us a phy id */ |
1490 | if (!phydev && !priv->phy_id) { | 1491 | if (!phydev && !priv->phy_id) { |
1491 | struct device *phy; | ||
1492 | |||
1493 | phy = bus_find_device(&mdio_bus_type, NULL, NULL, | 1492 | phy = bus_find_device(&mdio_bus_type, NULL, NULL, |
1494 | match_first_device); | 1493 | match_first_device); |
1495 | if (phy) | 1494 | if (phy) { |
1496 | priv->phy_id = dev_name(phy); | 1495 | priv->phy_id = dev_name(phy); |
1496 | if (!priv->phy_id || !*priv->phy_id) | ||
1497 | put_device(phy); | ||
1498 | } | ||
1497 | } | 1499 | } |
1498 | 1500 | ||
1499 | if (!phydev && priv->phy_id && *priv->phy_id) { | 1501 | if (!phydev && priv->phy_id && *priv->phy_id) { |
1500 | phydev = phy_connect(ndev, priv->phy_id, | 1502 | phydev = phy_connect(ndev, priv->phy_id, |
1501 | &emac_adjust_link, | 1503 | &emac_adjust_link, |
1502 | PHY_INTERFACE_MODE_MII); | 1504 | PHY_INTERFACE_MODE_MII); |
1503 | 1505 | put_device(phy); /* reference taken by bus_find_device */ | |
1504 | if (IS_ERR(phydev)) { | 1506 | if (IS_ERR(phydev)) { |
1505 | dev_err(emac_dev, "could not connect to phy %s\n", | 1507 | dev_err(emac_dev, "could not connect to phy %s\n", |
1506 | priv->phy_id); | 1508 | priv->phy_id); |
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c index 446ea580ad42..928c1dca2673 100644 --- a/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c +++ b/drivers/net/ethernet/toshiba/ps3_gelic_wireless.c | |||
@@ -1694,7 +1694,7 @@ struct gelic_wl_scan_info *gelic_wl_find_best_bss(struct gelic_wl_info *wl) | |||
1694 | pr_debug("%s: bssid matched\n", __func__); | 1694 | pr_debug("%s: bssid matched\n", __func__); |
1695 | break; | 1695 | break; |
1696 | } else { | 1696 | } else { |
1697 | pr_debug("%s: bssid unmached\n", __func__); | 1697 | pr_debug("%s: bssid unmatched\n", __func__); |
1698 | continue; | 1698 | continue; |
1699 | } | 1699 | } |
1700 | } | 1700 | } |
diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 7f127dc1b7ba..fa32391720fe 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c | |||
@@ -708,8 +708,7 @@ static int eth_poll(struct napi_struct *napi, int budget) | |||
708 | if (!qmgr_stat_below_low_watermark(rxq) && | 708 | if (!qmgr_stat_below_low_watermark(rxq) && |
709 | napi_reschedule(napi)) { /* not empty again */ | 709 | napi_reschedule(napi)) { /* not empty again */ |
710 | #if DEBUG_RX | 710 | #if DEBUG_RX |
711 | printk(KERN_DEBUG "%s: eth_poll" | 711 | printk(KERN_DEBUG "%s: eth_poll napi_reschedule succeeded\n", |
712 | " napi_reschedule successed\n", | ||
713 | dev->name); | 712 | dev->name); |
714 | #endif | 713 | #endif |
715 | qmgr_disable_irq(rxq); | 714 | qmgr_disable_irq(rxq); |
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index 9fa7ac9f8e68..f355df7cf84a 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/skbuff.h> | 20 | #include <linux/skbuff.h> |
21 | #include <linux/of.h> | 21 | #include <linux/of.h> |
22 | #include <linux/irq.h> | 22 | #include <linux/irq.h> |
23 | #include <linux/delay.h> | ||
24 | #include <linux/debugfs.h> | 23 | #include <linux/debugfs.h> |
25 | #include <linux/bitops.h> | 24 | #include <linux/bitops.h> |
26 | #include <linux/ieee802154.h> | 25 | #include <linux/ieee802154.h> |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 3234fcdea317..26d6f0bbe14b 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -623,7 +623,8 @@ hash_add: | |||
623 | return 0; | 623 | return 0; |
624 | 624 | ||
625 | clear_multi: | 625 | clear_multi: |
626 | dev_set_allmulti(lowerdev, -1); | 626 | if (dev->flags & IFF_ALLMULTI) |
627 | dev_set_allmulti(lowerdev, -1); | ||
627 | del_unicast: | 628 | del_unicast: |
628 | dev_uc_del(lowerdev, dev->dev_addr); | 629 | dev_uc_del(lowerdev, dev->dev_addr); |
629 | out: | 630 | out: |
@@ -1278,6 +1279,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
1278 | struct net_device *lowerdev; | 1279 | struct net_device *lowerdev; |
1279 | int err; | 1280 | int err; |
1280 | int macmode; | 1281 | int macmode; |
1282 | bool create = false; | ||
1281 | 1283 | ||
1282 | if (!tb[IFLA_LINK]) | 1284 | if (!tb[IFLA_LINK]) |
1283 | return -EINVAL; | 1285 | return -EINVAL; |
@@ -1304,12 +1306,18 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
1304 | err = macvlan_port_create(lowerdev); | 1306 | err = macvlan_port_create(lowerdev); |
1305 | if (err < 0) | 1307 | if (err < 0) |
1306 | return err; | 1308 | return err; |
1309 | create = true; | ||
1307 | } | 1310 | } |
1308 | port = macvlan_port_get_rtnl(lowerdev); | 1311 | port = macvlan_port_get_rtnl(lowerdev); |
1309 | 1312 | ||
1310 | /* Only 1 macvlan device can be created in passthru mode */ | 1313 | /* Only 1 macvlan device can be created in passthru mode */ |
1311 | if (port->passthru) | 1314 | if (port->passthru) { |
1312 | return -EINVAL; | 1315 | /* The macvlan port must be not created this time, |
1316 | * still goto destroy_macvlan_port for readability. | ||
1317 | */ | ||
1318 | err = -EINVAL; | ||
1319 | goto destroy_macvlan_port; | ||
1320 | } | ||
1313 | 1321 | ||
1314 | vlan->lowerdev = lowerdev; | 1322 | vlan->lowerdev = lowerdev; |
1315 | vlan->dev = dev; | 1323 | vlan->dev = dev; |
@@ -1325,24 +1333,28 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
1325 | vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); | 1333 | vlan->flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]); |
1326 | 1334 | ||
1327 | if (vlan->mode == MACVLAN_MODE_PASSTHRU) { | 1335 | if (vlan->mode == MACVLAN_MODE_PASSTHRU) { |
1328 | if (port->count) | 1336 | if (port->count) { |
1329 | return -EINVAL; | 1337 | err = -EINVAL; |
1338 | goto destroy_macvlan_port; | ||
1339 | } | ||
1330 | port->passthru = true; | 1340 | port->passthru = true; |
1331 | eth_hw_addr_inherit(dev, lowerdev); | 1341 | eth_hw_addr_inherit(dev, lowerdev); |
1332 | } | 1342 | } |
1333 | 1343 | ||
1334 | if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { | 1344 | if (data && data[IFLA_MACVLAN_MACADDR_MODE]) { |
1335 | if (vlan->mode != MACVLAN_MODE_SOURCE) | 1345 | if (vlan->mode != MACVLAN_MODE_SOURCE) { |
1336 | return -EINVAL; | 1346 | err = -EINVAL; |
1347 | goto destroy_macvlan_port; | ||
1348 | } | ||
1337 | macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); | 1349 | macmode = nla_get_u32(data[IFLA_MACVLAN_MACADDR_MODE]); |
1338 | err = macvlan_changelink_sources(vlan, macmode, data); | 1350 | err = macvlan_changelink_sources(vlan, macmode, data); |
1339 | if (err) | 1351 | if (err) |
1340 | return err; | 1352 | goto destroy_macvlan_port; |
1341 | } | 1353 | } |
1342 | 1354 | ||
1343 | err = register_netdevice(dev); | 1355 | err = register_netdevice(dev); |
1344 | if (err < 0) | 1356 | if (err < 0) |
1345 | return err; | 1357 | goto destroy_macvlan_port; |
1346 | 1358 | ||
1347 | dev->priv_flags |= IFF_MACVLAN; | 1359 | dev->priv_flags |= IFF_MACVLAN; |
1348 | err = netdev_upper_dev_link(lowerdev, dev); | 1360 | err = netdev_upper_dev_link(lowerdev, dev); |
@@ -1357,7 +1369,9 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, | |||
1357 | 1369 | ||
1358 | unregister_netdev: | 1370 | unregister_netdev: |
1359 | unregister_netdevice(dev); | 1371 | unregister_netdevice(dev); |
1360 | 1372 | destroy_macvlan_port: | |
1373 | if (create) | ||
1374 | macvlan_port_destroy(port->dev); | ||
1361 | return err; | 1375 | return err; |
1362 | } | 1376 | } |
1363 | EXPORT_SYMBOL_GPL(macvlan_common_newlink); | 1377 | EXPORT_SYMBOL_GPL(macvlan_common_newlink); |
diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index c649c101bbab..eb5167210681 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c | |||
@@ -279,7 +279,7 @@ EXPORT_SYMBOL_GPL(fixed_phy_register); | |||
279 | void fixed_phy_unregister(struct phy_device *phy) | 279 | void fixed_phy_unregister(struct phy_device *phy) |
280 | { | 280 | { |
281 | phy_device_remove(phy); | 281 | phy_device_remove(phy); |
282 | 282 | of_node_put(phy->mdio.dev.of_node); | |
283 | fixed_phy_del(phy->mdio.addr); | 283 | fixed_phy_del(phy->mdio.addr); |
284 | } | 284 | } |
285 | EXPORT_SYMBOL_GPL(fixed_phy_unregister); | 285 | EXPORT_SYMBOL_GPL(fixed_phy_unregister); |
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 081df68d2ce1..ea92d524d5a8 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c | |||
@@ -318,12 +318,12 @@ static int ksz8041_config_init(struct phy_device *phydev) | |||
318 | /* Limit supported and advertised modes in fiber mode */ | 318 | /* Limit supported and advertised modes in fiber mode */ |
319 | if (of_property_read_bool(of_node, "micrel,fiber-mode")) { | 319 | if (of_property_read_bool(of_node, "micrel,fiber-mode")) { |
320 | phydev->dev_flags |= MICREL_PHY_FXEN; | 320 | phydev->dev_flags |= MICREL_PHY_FXEN; |
321 | phydev->supported &= SUPPORTED_FIBRE | | 321 | phydev->supported &= SUPPORTED_100baseT_Full | |
322 | SUPPORTED_100baseT_Full | | ||
323 | SUPPORTED_100baseT_Half; | 322 | SUPPORTED_100baseT_Half; |
324 | phydev->advertising &= ADVERTISED_FIBRE | | 323 | phydev->supported |= SUPPORTED_FIBRE; |
325 | ADVERTISED_100baseT_Full | | 324 | phydev->advertising &= ADVERTISED_100baseT_Full | |
326 | ADVERTISED_100baseT_Half; | 325 | ADVERTISED_100baseT_Half; |
326 | phydev->advertising |= ADVERTISED_FIBRE; | ||
327 | phydev->autoneg = AUTONEG_DISABLE; | 327 | phydev->autoneg = AUTONEG_DISABLE; |
328 | } | 328 | } |
329 | 329 | ||
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index e977ba931878..1a4bf8acad78 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
@@ -723,6 +723,7 @@ struct phy_device *phy_connect(struct net_device *dev, const char *bus_id, | |||
723 | phydev = to_phy_device(d); | 723 | phydev = to_phy_device(d); |
724 | 724 | ||
725 | rc = phy_connect_direct(dev, phydev, handler, interface); | 725 | rc = phy_connect_direct(dev, phydev, handler, interface); |
726 | put_device(d); | ||
726 | if (rc) | 727 | if (rc) |
727 | return ERR_PTR(rc); | 728 | return ERR_PTR(rc); |
728 | 729 | ||
@@ -953,6 +954,7 @@ struct phy_device *phy_attach(struct net_device *dev, const char *bus_id, | |||
953 | phydev = to_phy_device(d); | 954 | phydev = to_phy_device(d); |
954 | 955 | ||
955 | rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); | 956 | rc = phy_attach_direct(dev, phydev, phydev->dev_flags, interface); |
957 | put_device(d); | ||
956 | if (rc) | 958 | if (rc) |
957 | return ERR_PTR(rc); | 959 | return ERR_PTR(rc); |
958 | 960 | ||
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index 2e37eb337d48..24b4a09468dd 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c | |||
@@ -62,6 +62,10 @@ | |||
62 | /* Vitesse Extended Page Access Register */ | 62 | /* Vitesse Extended Page Access Register */ |
63 | #define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f | 63 | #define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f |
64 | 64 | ||
65 | /* Vitesse VSC8601 Extended PHY Control Register 1 */ | ||
66 | #define MII_VSC8601_EPHY_CTL 0x17 | ||
67 | #define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8) | ||
68 | |||
65 | #define PHY_ID_VSC8234 0x000fc620 | 69 | #define PHY_ID_VSC8234 0x000fc620 |
66 | #define PHY_ID_VSC8244 0x000fc6c0 | 70 | #define PHY_ID_VSC8244 0x000fc6c0 |
67 | #define PHY_ID_VSC8514 0x00070670 | 71 | #define PHY_ID_VSC8514 0x00070670 |
@@ -111,6 +115,34 @@ static int vsc824x_config_init(struct phy_device *phydev) | |||
111 | return err; | 115 | return err; |
112 | } | 116 | } |
113 | 117 | ||
118 | /* This adds a skew for both TX and RX clocks, so the skew should only be | ||
119 | * applied to "rgmii-id" interfaces. It may not work as expected | ||
120 | * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */ | ||
121 | static int vsc8601_add_skew(struct phy_device *phydev) | ||
122 | { | ||
123 | int ret; | ||
124 | |||
125 | ret = phy_read(phydev, MII_VSC8601_EPHY_CTL); | ||
126 | if (ret < 0) | ||
127 | return ret; | ||
128 | |||
129 | ret |= MII_VSC8601_EPHY_CTL_RGMII_SKEW; | ||
130 | return phy_write(phydev, MII_VSC8601_EPHY_CTL, ret); | ||
131 | } | ||
132 | |||
133 | static int vsc8601_config_init(struct phy_device *phydev) | ||
134 | { | ||
135 | int ret = 0; | ||
136 | |||
137 | if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) | ||
138 | ret = vsc8601_add_skew(phydev); | ||
139 | |||
140 | if (ret < 0) | ||
141 | return ret; | ||
142 | |||
143 | return genphy_config_init(phydev); | ||
144 | } | ||
145 | |||
114 | static int vsc824x_ack_interrupt(struct phy_device *phydev) | 146 | static int vsc824x_ack_interrupt(struct phy_device *phydev) |
115 | { | 147 | { |
116 | int err = 0; | 148 | int err = 0; |
@@ -275,7 +307,7 @@ static struct phy_driver vsc82xx_driver[] = { | |||
275 | .phy_id_mask = 0x000ffff0, | 307 | .phy_id_mask = 0x000ffff0, |
276 | .features = PHY_GBIT_FEATURES, | 308 | .features = PHY_GBIT_FEATURES, |
277 | .flags = PHY_HAS_INTERRUPT, | 309 | .flags = PHY_HAS_INTERRUPT, |
278 | .config_init = &genphy_config_init, | 310 | .config_init = &vsc8601_config_init, |
279 | .config_aneg = &genphy_config_aneg, | 311 | .config_aneg = &genphy_config_aneg, |
280 | .read_status = &genphy_read_status, | 312 | .read_status = &genphy_read_status, |
281 | .ack_interrupt = &vsc824x_ack_interrupt, | 313 | .ack_interrupt = &vsc824x_ack_interrupt, |
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index e6338c16081a..8a6675d92b98 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c | |||
@@ -1656,6 +1656,19 @@ static const struct driver_info ax88178a_info = { | |||
1656 | .tx_fixup = ax88179_tx_fixup, | 1656 | .tx_fixup = ax88179_tx_fixup, |
1657 | }; | 1657 | }; |
1658 | 1658 | ||
1659 | static const struct driver_info cypress_GX3_info = { | ||
1660 | .description = "Cypress GX3 SuperSpeed to Gigabit Ethernet Controller", | ||
1661 | .bind = ax88179_bind, | ||
1662 | .unbind = ax88179_unbind, | ||
1663 | .status = ax88179_status, | ||
1664 | .link_reset = ax88179_link_reset, | ||
1665 | .reset = ax88179_reset, | ||
1666 | .stop = ax88179_stop, | ||
1667 | .flags = FLAG_ETHER | FLAG_FRAMING_AX, | ||
1668 | .rx_fixup = ax88179_rx_fixup, | ||
1669 | .tx_fixup = ax88179_tx_fixup, | ||
1670 | }; | ||
1671 | |||
1659 | static const struct driver_info dlink_dub1312_info = { | 1672 | static const struct driver_info dlink_dub1312_info = { |
1660 | .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", | 1673 | .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter", |
1661 | .bind = ax88179_bind, | 1674 | .bind = ax88179_bind, |
@@ -1718,6 +1731,10 @@ static const struct usb_device_id products[] = { | |||
1718 | USB_DEVICE(0x0b95, 0x178a), | 1731 | USB_DEVICE(0x0b95, 0x178a), |
1719 | .driver_info = (unsigned long)&ax88178a_info, | 1732 | .driver_info = (unsigned long)&ax88178a_info, |
1720 | }, { | 1733 | }, { |
1734 | /* Cypress GX3 SuperSpeed to Gigabit Ethernet Bridge Controller */ | ||
1735 | USB_DEVICE(0x04b4, 0x3610), | ||
1736 | .driver_info = (unsigned long)&cypress_GX3_info, | ||
1737 | }, { | ||
1721 | /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ | 1738 | /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */ |
1722 | USB_DEVICE(0x2001, 0x4a00), | 1739 | USB_DEVICE(0x2001, 0x4a00), |
1723 | .driver_info = (unsigned long)&dlink_dub1312_info, | 1740 | .driver_info = (unsigned long)&dlink_dub1312_info, |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 44d439f50961..efb84f092492 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) | |||
1730 | u8 checksum = CHECKSUM_NONE; | 1730 | u8 checksum = CHECKSUM_NONE; |
1731 | u32 opts2, opts3; | 1731 | u32 opts2, opts3; |
1732 | 1732 | ||
1733 | if (tp->version == RTL_VER_01) | 1733 | if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02) |
1734 | goto return_result; | 1734 | goto return_result; |
1735 | 1735 | ||
1736 | opts2 = le32_to_cpu(rx_desc->opts2); | 1736 | opts2 = le32_to_cpu(rx_desc->opts2); |
@@ -1745,7 +1745,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc) | |||
1745 | checksum = CHECKSUM_NONE; | 1745 | checksum = CHECKSUM_NONE; |
1746 | else | 1746 | else |
1747 | checksum = CHECKSUM_UNNECESSARY; | 1747 | checksum = CHECKSUM_UNNECESSARY; |
1748 | } else if (RD_IPV6_CS) { | 1748 | } else if (opts2 & RD_IPV6_CS) { |
1749 | if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) | 1749 | if ((opts2 & RD_UDP_CS) && !(opts3 & UDPF)) |
1750 | checksum = CHECKSUM_UNNECESSARY; | 1750 | checksum = CHECKSUM_UNNECESSARY; |
1751 | else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) | 1751 | else if ((opts2 & RD_TCP_CS) && !(opts3 & TCPF)) |
@@ -3266,10 +3266,8 @@ static int rtl8152_open(struct net_device *netdev) | |||
3266 | goto out; | 3266 | goto out; |
3267 | 3267 | ||
3268 | res = usb_autopm_get_interface(tp->intf); | 3268 | res = usb_autopm_get_interface(tp->intf); |
3269 | if (res < 0) { | 3269 | if (res < 0) |
3270 | free_all_mem(tp); | 3270 | goto out_free; |
3271 | goto out; | ||
3272 | } | ||
3273 | 3271 | ||
3274 | mutex_lock(&tp->control); | 3272 | mutex_lock(&tp->control); |
3275 | 3273 | ||
@@ -3285,10 +3283,9 @@ static int rtl8152_open(struct net_device *netdev) | |||
3285 | netif_device_detach(tp->netdev); | 3283 | netif_device_detach(tp->netdev); |
3286 | netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", | 3284 | netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n", |
3287 | res); | 3285 | res); |
3288 | free_all_mem(tp); | 3286 | goto out_unlock; |
3289 | } else { | ||
3290 | napi_enable(&tp->napi); | ||
3291 | } | 3287 | } |
3288 | napi_enable(&tp->napi); | ||
3292 | 3289 | ||
3293 | mutex_unlock(&tp->control); | 3290 | mutex_unlock(&tp->control); |
3294 | 3291 | ||
@@ -3297,7 +3294,13 @@ static int rtl8152_open(struct net_device *netdev) | |||
3297 | tp->pm_notifier.notifier_call = rtl_notifier; | 3294 | tp->pm_notifier.notifier_call = rtl_notifier; |
3298 | register_pm_notifier(&tp->pm_notifier); | 3295 | register_pm_notifier(&tp->pm_notifier); |
3299 | #endif | 3296 | #endif |
3297 | return 0; | ||
3300 | 3298 | ||
3299 | out_unlock: | ||
3300 | mutex_unlock(&tp->control); | ||
3301 | usb_autopm_put_interface(tp->intf); | ||
3302 | out_free: | ||
3303 | free_all_mem(tp); | ||
3301 | out: | 3304 | out: |
3302 | return res; | 3305 | return res; |
3303 | } | 3306 | } |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fad84f3f4109..7276d5a95bd0 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -1497,6 +1497,11 @@ static void virtnet_free_queues(struct virtnet_info *vi) | |||
1497 | netif_napi_del(&vi->rq[i].napi); | 1497 | netif_napi_del(&vi->rq[i].napi); |
1498 | } | 1498 | } |
1499 | 1499 | ||
1500 | /* We called napi_hash_del() before netif_napi_del(), | ||
1501 | * we need to respect an RCU grace period before freeing vi->rq | ||
1502 | */ | ||
1503 | synchronize_net(); | ||
1504 | |||
1500 | kfree(vi->rq); | 1505 | kfree(vi->rq); |
1501 | kfree(vi->sq); | 1506 | kfree(vi->sq); |
1502 | } | 1507 | } |
@@ -2038,23 +2043,33 @@ static struct virtio_device_id id_table[] = { | |||
2038 | { 0 }, | 2043 | { 0 }, |
2039 | }; | 2044 | }; |
2040 | 2045 | ||
2046 | #define VIRTNET_FEATURES \ | ||
2047 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, \ | ||
2048 | VIRTIO_NET_F_MAC, \ | ||
2049 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, \ | ||
2050 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, \ | ||
2051 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, \ | ||
2052 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, \ | ||
2053 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, \ | ||
2054 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \ | ||
2055 | VIRTIO_NET_F_CTRL_MAC_ADDR, \ | ||
2056 | VIRTIO_NET_F_MTU | ||
2057 | |||
2041 | static unsigned int features[] = { | 2058 | static unsigned int features[] = { |
2042 | VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, | 2059 | VIRTNET_FEATURES, |
2043 | VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, | 2060 | }; |
2044 | VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, | 2061 | |
2045 | VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, | 2062 | static unsigned int features_legacy[] = { |
2046 | VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, | 2063 | VIRTNET_FEATURES, |
2047 | VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, | 2064 | VIRTIO_NET_F_GSO, |
2048 | VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, | ||
2049 | VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, | ||
2050 | VIRTIO_NET_F_CTRL_MAC_ADDR, | ||
2051 | VIRTIO_F_ANY_LAYOUT, | 2065 | VIRTIO_F_ANY_LAYOUT, |
2052 | VIRTIO_NET_F_MTU, | ||
2053 | }; | 2066 | }; |
2054 | 2067 | ||
2055 | static struct virtio_driver virtio_net_driver = { | 2068 | static struct virtio_driver virtio_net_driver = { |
2056 | .feature_table = features, | 2069 | .feature_table = features, |
2057 | .feature_table_size = ARRAY_SIZE(features), | 2070 | .feature_table_size = ARRAY_SIZE(features), |
2071 | .feature_table_legacy = features_legacy, | ||
2072 | .feature_table_size_legacy = ARRAY_SIZE(features_legacy), | ||
2058 | .driver.name = KBUILD_MODNAME, | 2073 | .driver.name = KBUILD_MODNAME, |
2059 | .driver.owner = THIS_MODULE, | 2074 | .driver.owner = THIS_MODULE, |
2060 | .id_table = id_table, | 2075 | .id_table = id_table, |
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index f3c2fa3ab0d5..24532cdebb00 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -944,7 +944,9 @@ static bool vxlan_group_used(struct vxlan_net *vn, struct vxlan_dev *dev) | |||
944 | { | 944 | { |
945 | struct vxlan_dev *vxlan; | 945 | struct vxlan_dev *vxlan; |
946 | struct vxlan_sock *sock4; | 946 | struct vxlan_sock *sock4; |
947 | struct vxlan_sock *sock6 = NULL; | 947 | #if IS_ENABLED(CONFIG_IPV6) |
948 | struct vxlan_sock *sock6; | ||
949 | #endif | ||
948 | unsigned short family = dev->default_dst.remote_ip.sa.sa_family; | 950 | unsigned short family = dev->default_dst.remote_ip.sa.sa_family; |
949 | 951 | ||
950 | sock4 = rtnl_dereference(dev->vn4_sock); | 952 | sock4 = rtnl_dereference(dev->vn4_sock); |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index b777e1b2f87a..78d9966a3957 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c | |||
@@ -4516,7 +4516,7 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev, | |||
4516 | /* store current 11d setting */ | 4516 | /* store current 11d setting */ |
4517 | if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, | 4517 | if (brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_REGULATORY, |
4518 | &ifp->vif->is_11d)) { | 4518 | &ifp->vif->is_11d)) { |
4519 | supports_11d = false; | 4519 | is_11d = supports_11d = false; |
4520 | } else { | 4520 | } else { |
4521 | country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, | 4521 | country_ie = brcmf_parse_tlvs((u8 *)settings->beacon.tail, |
4522 | settings->beacon.tail_len, | 4522 | settings->beacon.tail_len, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 4fdc3dad3e85..b88e2048ae0b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c | |||
@@ -1087,6 +1087,15 @@ iwl_mvm_netdetect_config(struct iwl_mvm *mvm, | |||
1087 | ret = iwl_mvm_switch_to_d3(mvm); | 1087 | ret = iwl_mvm_switch_to_d3(mvm); |
1088 | if (ret) | 1088 | if (ret) |
1089 | return ret; | 1089 | return ret; |
1090 | } else { | ||
1091 | /* In theory, we wouldn't have to stop a running sched | ||
1092 | * scan in order to start another one (for | ||
1093 | * net-detect). But in practice this doesn't seem to | ||
1094 | * work properly, so stop any running sched_scan now. | ||
1095 | */ | ||
1096 | ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); | ||
1097 | if (ret) | ||
1098 | return ret; | ||
1090 | } | 1099 | } |
1091 | 1100 | ||
1092 | /* rfkill release can be either for wowlan or netdetect */ | 1101 | /* rfkill release can be either for wowlan or netdetect */ |
@@ -1254,7 +1263,10 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, | |||
1254 | out: | 1263 | out: |
1255 | if (ret < 0) { | 1264 | if (ret < 0) { |
1256 | iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); | 1265 | iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); |
1257 | ieee80211_restart_hw(mvm->hw); | 1266 | if (mvm->restart_fw > 0) { |
1267 | mvm->restart_fw--; | ||
1268 | ieee80211_restart_hw(mvm->hw); | ||
1269 | } | ||
1258 | iwl_mvm_free_nd(mvm); | 1270 | iwl_mvm_free_nd(mvm); |
1259 | } | 1271 | } |
1260 | out_noreset: | 1272 | out_noreset: |
@@ -2088,6 +2100,16 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) | |||
2088 | iwl_mvm_update_changed_regdom(mvm); | 2100 | iwl_mvm_update_changed_regdom(mvm); |
2089 | 2101 | ||
2090 | if (mvm->net_detect) { | 2102 | if (mvm->net_detect) { |
2103 | /* If this is a non-unified image, we restart the FW, | ||
2104 | * so no need to stop the netdetect scan. If that | ||
2105 | * fails, continue and try to get the wake-up reasons, | ||
2106 | * but trigger a HW restart by keeping a failure code | ||
2107 | * in ret. | ||
2108 | */ | ||
2109 | if (unified_image) | ||
2110 | ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_NETDETECT, | ||
2111 | false); | ||
2112 | |||
2091 | iwl_mvm_query_netdetect_reasons(mvm, vif); | 2113 | iwl_mvm_query_netdetect_reasons(mvm, vif); |
2092 | /* has unlocked the mutex, so skip that */ | 2114 | /* has unlocked the mutex, so skip that */ |
2093 | goto out; | 2115 | goto out; |
@@ -2271,7 +2293,8 @@ static void iwl_mvm_d3_test_disconn_work_iter(void *_data, u8 *mac, | |||
2271 | static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) | 2293 | static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) |
2272 | { | 2294 | { |
2273 | struct iwl_mvm *mvm = inode->i_private; | 2295 | struct iwl_mvm *mvm = inode->i_private; |
2274 | int remaining_time = 10; | 2296 | bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, |
2297 | IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); | ||
2275 | 2298 | ||
2276 | mvm->d3_test_active = false; | 2299 | mvm->d3_test_active = false; |
2277 | 2300 | ||
@@ -2282,17 +2305,21 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file) | |||
2282 | mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; | 2305 | mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; |
2283 | 2306 | ||
2284 | iwl_abort_notification_waits(&mvm->notif_wait); | 2307 | iwl_abort_notification_waits(&mvm->notif_wait); |
2285 | ieee80211_restart_hw(mvm->hw); | 2308 | if (!unified_image) { |
2309 | int remaining_time = 10; | ||
2286 | 2310 | ||
2287 | /* wait for restart and disconnect all interfaces */ | 2311 | ieee80211_restart_hw(mvm->hw); |
2288 | while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && | 2312 | |
2289 | remaining_time > 0) { | 2313 | /* wait for restart and disconnect all interfaces */ |
2290 | remaining_time--; | 2314 | while (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && |
2291 | msleep(1000); | 2315 | remaining_time > 0) { |
2292 | } | 2316 | remaining_time--; |
2317 | msleep(1000); | ||
2318 | } | ||
2293 | 2319 | ||
2294 | if (remaining_time == 0) | 2320 | if (remaining_time == 0) |
2295 | IWL_ERR(mvm, "Timed out waiting for HW restart to finish!\n"); | 2321 | IWL_ERR(mvm, "Timed out waiting for HW restart!\n"); |
2322 | } | ||
2296 | 2323 | ||
2297 | ieee80211_iterate_active_interfaces_atomic( | 2324 | ieee80211_iterate_active_interfaces_atomic( |
2298 | mvm->hw, IEEE80211_IFACE_ITER_NORMAL, | 2325 | mvm->hw, IEEE80211_IFACE_ITER_NORMAL, |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 07da4efe8458..7b7d2a146e30 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c | |||
@@ -1529,8 +1529,8 @@ static ssize_t iwl_dbgfs_mem_read(struct file *file, char __user *user_buf, | |||
1529 | .data = { &cmd, }, | 1529 | .data = { &cmd, }, |
1530 | .len = { sizeof(cmd) }, | 1530 | .len = { sizeof(cmd) }, |
1531 | }; | 1531 | }; |
1532 | size_t delta, len; | 1532 | size_t delta; |
1533 | ssize_t ret; | 1533 | ssize_t ret, len; |
1534 | 1534 | ||
1535 | hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, | 1535 | hcmd.id = iwl_cmd_id(*ppos >> 24 ? UMAC_RD_WR : LMAC_RD_WR, |
1536 | DEBUG_GROUP, 0); | 1536 | DEBUG_GROUP, 0); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 318efd814037..1db1dc13e988 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
@@ -4121,7 +4121,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, | |||
4121 | struct iwl_mvm_internal_rxq_notif *notif, | 4121 | struct iwl_mvm_internal_rxq_notif *notif, |
4122 | u32 size) | 4122 | u32 size) |
4123 | { | 4123 | { |
4124 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(notif_waitq); | ||
4125 | u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; | 4124 | u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; |
4126 | int ret; | 4125 | int ret; |
4127 | 4126 | ||
@@ -4143,7 +4142,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, | |||
4143 | } | 4142 | } |
4144 | 4143 | ||
4145 | if (notif->sync) | 4144 | if (notif->sync) |
4146 | ret = wait_event_timeout(notif_waitq, | 4145 | ret = wait_event_timeout(mvm->rx_sync_waitq, |
4147 | atomic_read(&mvm->queue_sync_counter) == 0, | 4146 | atomic_read(&mvm->queue_sync_counter) == 0, |
4148 | HZ); | 4147 | HZ); |
4149 | WARN_ON_ONCE(!ret); | 4148 | WARN_ON_ONCE(!ret); |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index d17cbf603f7c..c60703e0c246 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
@@ -937,6 +937,7 @@ struct iwl_mvm { | |||
937 | /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ | 937 | /* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */ |
938 | spinlock_t d0i3_tx_lock; | 938 | spinlock_t d0i3_tx_lock; |
939 | wait_queue_head_t d0i3_exit_waitq; | 939 | wait_queue_head_t d0i3_exit_waitq; |
940 | wait_queue_head_t rx_sync_waitq; | ||
940 | 941 | ||
941 | /* BT-Coex */ | 942 | /* BT-Coex */ |
942 | struct iwl_bt_coex_profile_notif last_bt_notif; | 943 | struct iwl_bt_coex_profile_notif last_bt_notif; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 05fe6dd1a2c8..4d35deb628bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c | |||
@@ -619,6 +619,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, | |||
619 | spin_lock_init(&mvm->refs_lock); | 619 | spin_lock_init(&mvm->refs_lock); |
620 | skb_queue_head_init(&mvm->d0i3_tx); | 620 | skb_queue_head_init(&mvm->d0i3_tx); |
621 | init_waitqueue_head(&mvm->d0i3_exit_waitq); | 621 | init_waitqueue_head(&mvm->d0i3_exit_waitq); |
622 | init_waitqueue_head(&mvm->rx_sync_waitq); | ||
622 | 623 | ||
623 | atomic_set(&mvm->queue_sync_counter, 0); | 624 | atomic_set(&mvm->queue_sync_counter, 0); |
624 | 625 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index a57c6ef5bc14..6c802cee900c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | |||
@@ -547,7 +547,8 @@ void iwl_mvm_rx_queue_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb, | |||
547 | "Received expired RX queue sync message\n"); | 547 | "Received expired RX queue sync message\n"); |
548 | return; | 548 | return; |
549 | } | 549 | } |
550 | atomic_dec(&mvm->queue_sync_counter); | 550 | if (!atomic_dec_return(&mvm->queue_sync_counter)) |
551 | wake_up(&mvm->rx_sync_waitq); | ||
551 | } | 552 | } |
552 | 553 | ||
553 | switch (internal_notif->type) { | 554 | switch (internal_notif->type) { |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index f279fdd6eb44..fa9743205491 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c | |||
@@ -1199,6 +1199,9 @@ static int iwl_mvm_num_scans(struct iwl_mvm *mvm) | |||
1199 | 1199 | ||
1200 | static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) | 1200 | static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) |
1201 | { | 1201 | { |
1202 | bool unified_image = fw_has_capa(&mvm->fw->ucode_capa, | ||
1203 | IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); | ||
1204 | |||
1202 | /* This looks a bit arbitrary, but the idea is that if we run | 1205 | /* This looks a bit arbitrary, but the idea is that if we run |
1203 | * out of possible simultaneous scans and the userspace is | 1206 | * out of possible simultaneous scans and the userspace is |
1204 | * trying to run a scan type that is already running, we | 1207 | * trying to run a scan type that is already running, we |
@@ -1225,12 +1228,30 @@ static int iwl_mvm_check_running_scans(struct iwl_mvm *mvm, int type) | |||
1225 | return -EBUSY; | 1228 | return -EBUSY; |
1226 | return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); | 1229 | return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); |
1227 | case IWL_MVM_SCAN_NETDETECT: | 1230 | case IWL_MVM_SCAN_NETDETECT: |
1228 | /* No need to stop anything for net-detect since the | 1231 | /* For non-unified images, there's no need to stop |
1229 | * firmware is restarted anyway. This way, any sched | 1232 | * anything for net-detect since the firmware is |
1230 | * scans that were running will be restarted when we | 1233 | * restarted anyway. This way, any sched scans that |
1231 | * resume. | 1234 | * were running will be restarted when we resume. |
1232 | */ | 1235 | */ |
1233 | return 0; | 1236 | if (!unified_image) |
1237 | return 0; | ||
1238 | |||
1239 | /* If this is a unified image and we ran out of scans, | ||
1240 | * we need to stop something. Prefer stopping regular | ||
1241 | * scans, because the results are useless at this | ||
1242 | * point, and we should be able to keep running | ||
1243 | * another scheduled scan while suspended. | ||
1244 | */ | ||
1245 | if (mvm->scan_status & IWL_MVM_SCAN_REGULAR_MASK) | ||
1246 | return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, | ||
1247 | true); | ||
1248 | if (mvm->scan_status & IWL_MVM_SCAN_SCHED_MASK) | ||
1249 | return iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, | ||
1250 | true); | ||
1251 | |||
1252 | /* fall through, something is wrong if no scan was | ||
1253 | * running but we ran out of scans. | ||
1254 | */ | ||
1234 | default: | 1255 | default: |
1235 | WARN_ON(1); | 1256 | WARN_ON(1); |
1236 | break; | 1257 | break; |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 001be406a3d3..2f8134b2a504 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c | |||
@@ -541,48 +541,64 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
541 | MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); | 541 | MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids); |
542 | 542 | ||
543 | #ifdef CONFIG_ACPI | 543 | #ifdef CONFIG_ACPI |
544 | #define SPL_METHOD "SPLC" | 544 | #define ACPI_SPLC_METHOD "SPLC" |
545 | #define SPL_DOMAINTYPE_MODULE BIT(0) | 545 | #define ACPI_SPLC_DOMAIN_WIFI (0x07) |
546 | #define SPL_DOMAINTYPE_WIFI BIT(1) | ||
547 | #define SPL_DOMAINTYPE_WIGIG BIT(2) | ||
548 | #define SPL_DOMAINTYPE_RFEM BIT(3) | ||
549 | 546 | ||
550 | static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx) | 547 | static u64 splc_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splc) |
551 | { | 548 | { |
552 | union acpi_object *limits, *domain_type, *power_limit; | 549 | union acpi_object *data_pkg, *dflt_pwr_limit; |
553 | 550 | int i; | |
554 | if (splx->type != ACPI_TYPE_PACKAGE || | 551 | |
555 | splx->package.count != 2 || | 552 | /* We need at least two elements, one for the revision and one |
556 | splx->package.elements[0].type != ACPI_TYPE_INTEGER || | 553 | * for the data itself. Also check that the revision is |
557 | splx->package.elements[0].integer.value != 0) { | 554 | * supported (currently only revision 0). |
558 | IWL_ERR(trans, "Unsupported splx structure\n"); | 555 | */ |
556 | if (splc->type != ACPI_TYPE_PACKAGE || | ||
557 | splc->package.count < 2 || | ||
558 | splc->package.elements[0].type != ACPI_TYPE_INTEGER || | ||
559 | splc->package.elements[0].integer.value != 0) { | ||
560 | IWL_DEBUG_INFO(trans, | ||
561 | "Unsupported structure returned by the SPLC method. Ignoring.\n"); | ||
559 | return 0; | 562 | return 0; |
560 | } | 563 | } |
561 | 564 | ||
562 | limits = &splx->package.elements[1]; | 565 | /* loop through all the packages to find the one for WiFi */ |
563 | if (limits->type != ACPI_TYPE_PACKAGE || | 566 | for (i = 1; i < splc->package.count; i++) { |
564 | limits->package.count < 2 || | 567 | union acpi_object *domain; |
565 | limits->package.elements[0].type != ACPI_TYPE_INTEGER || | 568 | |
566 | limits->package.elements[1].type != ACPI_TYPE_INTEGER) { | 569 | data_pkg = &splc->package.elements[i]; |
567 | IWL_ERR(trans, "Invalid limits element\n"); | 570 | |
568 | return 0; | 571 | /* Skip anything that is not a package with the right |
572 | * amount of elements (i.e. at least 2 integers). | ||
573 | */ | ||
574 | if (data_pkg->type != ACPI_TYPE_PACKAGE || | ||
575 | data_pkg->package.count < 2 || | ||
576 | data_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || | ||
577 | data_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) | ||
578 | continue; | ||
579 | |||
580 | domain = &data_pkg->package.elements[0]; | ||
581 | if (domain->integer.value == ACPI_SPLC_DOMAIN_WIFI) | ||
582 | break; | ||
583 | |||
584 | data_pkg = NULL; | ||
569 | } | 585 | } |
570 | 586 | ||
571 | domain_type = &limits->package.elements[0]; | 587 | if (!data_pkg) { |
572 | power_limit = &limits->package.elements[1]; | 588 | IWL_DEBUG_INFO(trans, |
573 | if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) { | 589 | "No element for the WiFi domain returned by the SPLC method.\n"); |
574 | IWL_DEBUG_INFO(trans, "WiFi power is not limited\n"); | ||
575 | return 0; | 590 | return 0; |
576 | } | 591 | } |
577 | 592 | ||
578 | return power_limit->integer.value; | 593 | dflt_pwr_limit = &data_pkg->package.elements[1]; |
594 | return dflt_pwr_limit->integer.value; | ||
579 | } | 595 | } |
580 | 596 | ||
581 | static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) | 597 | static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) |
582 | { | 598 | { |
583 | acpi_handle pxsx_handle; | 599 | acpi_handle pxsx_handle; |
584 | acpi_handle handle; | 600 | acpi_handle handle; |
585 | struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL}; | 601 | struct acpi_buffer splc = {ACPI_ALLOCATE_BUFFER, NULL}; |
586 | acpi_status status; | 602 | acpi_status status; |
587 | 603 | ||
588 | pxsx_handle = ACPI_HANDLE(&pdev->dev); | 604 | pxsx_handle = ACPI_HANDLE(&pdev->dev); |
@@ -593,23 +609,24 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) | |||
593 | } | 609 | } |
594 | 610 | ||
595 | /* Get the method's handle */ | 611 | /* Get the method's handle */ |
596 | status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle); | 612 | status = acpi_get_handle(pxsx_handle, (acpi_string)ACPI_SPLC_METHOD, |
613 | &handle); | ||
597 | if (ACPI_FAILURE(status)) { | 614 | if (ACPI_FAILURE(status)) { |
598 | IWL_DEBUG_INFO(trans, "SPL method not found\n"); | 615 | IWL_DEBUG_INFO(trans, "SPLC method not found\n"); |
599 | return; | 616 | return; |
600 | } | 617 | } |
601 | 618 | ||
602 | /* Call SPLC with no arguments */ | 619 | /* Call SPLC with no arguments */ |
603 | status = acpi_evaluate_object(handle, NULL, NULL, &splx); | 620 | status = acpi_evaluate_object(handle, NULL, NULL, &splc); |
604 | if (ACPI_FAILURE(status)) { | 621 | if (ACPI_FAILURE(status)) { |
605 | IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); | 622 | IWL_ERR(trans, "SPLC invocation failed (0x%x)\n", status); |
606 | return; | 623 | return; |
607 | } | 624 | } |
608 | 625 | ||
609 | trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer); | 626 | trans->dflt_pwr_limit = splc_get_pwr_limit(trans, splc.pointer); |
610 | IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", | 627 | IWL_DEBUG_INFO(trans, "Default power limit set to %lld\n", |
611 | trans->dflt_pwr_limit); | 628 | trans->dflt_pwr_limit); |
612 | kfree(splx.pointer); | 629 | kfree(splc.pointer); |
613 | } | 630 | } |
614 | 631 | ||
615 | #else /* CONFIG_ACPI */ | 632 | #else /* CONFIG_ACPI */ |
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index e9a278b60dfd..5f840f16f40b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c | |||
@@ -592,6 +592,7 @@ error: | |||
592 | static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, | 592 | static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, |
593 | int slots_num, u32 txq_id) | 593 | int slots_num, u32 txq_id) |
594 | { | 594 | { |
595 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
595 | int ret; | 596 | int ret; |
596 | 597 | ||
597 | txq->need_update = false; | 598 | txq->need_update = false; |
@@ -606,6 +607,13 @@ static int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, | |||
606 | return ret; | 607 | return ret; |
607 | 608 | ||
608 | spin_lock_init(&txq->lock); | 609 | spin_lock_init(&txq->lock); |
610 | |||
611 | if (txq_id == trans_pcie->cmd_queue) { | ||
612 | static struct lock_class_key iwl_pcie_cmd_queue_lock_class; | ||
613 | |||
614 | lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class); | ||
615 | } | ||
616 | |||
609 | __skb_queue_head_init(&txq->overflow_q); | 617 | __skb_queue_head_init(&txq->overflow_q); |
610 | 618 | ||
611 | /* | 619 | /* |
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 431f13b4faf6..d3bad5779376 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c | |||
@@ -826,7 +826,7 @@ static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw, | |||
826 | data->bcn_delta = do_div(delta, bcn_int); | 826 | data->bcn_delta = do_div(delta, bcn_int); |
827 | } else { | 827 | } else { |
828 | data->tsf_offset -= delta; | 828 | data->tsf_offset -= delta; |
829 | data->bcn_delta = -do_div(delta, bcn_int); | 829 | data->bcn_delta = -(s64)do_div(delta, bcn_int); |
830 | } | 830 | } |
831 | } | 831 | } |
832 | 832 | ||
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index e17879dd5d5a..bf2744e1e3db 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -304,7 +304,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) | |||
304 | queue->rx_skbs[id] = skb; | 304 | queue->rx_skbs[id] = skb; |
305 | 305 | ||
306 | ref = gnttab_claim_grant_reference(&queue->gref_rx_head); | 306 | ref = gnttab_claim_grant_reference(&queue->gref_rx_head); |
307 | BUG_ON((signed short)ref < 0); | 307 | WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); |
308 | queue->grant_rx_ref[id] = ref; | 308 | queue->grant_rx_ref[id] = ref; |
309 | 309 | ||
310 | page = skb_frag_page(&skb_shinfo(skb)->frags[0]); | 310 | page = skb_frag_page(&skb_shinfo(skb)->frags[0]); |
@@ -428,7 +428,7 @@ static void xennet_tx_setup_grant(unsigned long gfn, unsigned int offset, | |||
428 | id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); | 428 | id = get_id_from_freelist(&queue->tx_skb_freelist, queue->tx_skbs); |
429 | tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); | 429 | tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); |
430 | ref = gnttab_claim_grant_reference(&queue->gref_tx_head); | 430 | ref = gnttab_claim_grant_reference(&queue->gref_tx_head); |
431 | BUG_ON((signed short)ref < 0); | 431 | WARN_ON_ONCE(IS_ERR_VALUE((unsigned long)(int)ref)); |
432 | 432 | ||
433 | gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, | 433 | gnttab_grant_foreign_access_ref(ref, queue->info->xbdev->otherend_id, |
434 | gfn, GNTMAP_readonly); | 434 | gfn, GNTMAP_readonly); |
diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c index 0d5c29ae51de..7310a261c858 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.c +++ b/drivers/ntb/hw/intel/ntb_hw_intel.c | |||
@@ -112,17 +112,17 @@ MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, | |||
112 | 112 | ||
113 | module_param_named(xeon_b2b_usd_bar4_addr64, | 113 | module_param_named(xeon_b2b_usd_bar4_addr64, |
114 | xeon_b2b_usd_addr.bar4_addr64, ullong, 0644); | 114 | xeon_b2b_usd_addr.bar4_addr64, ullong, 0644); |
115 | MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, | 115 | MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr64, |
116 | "XEON B2B USD BAR 4 64-bit address"); | 116 | "XEON B2B USD BAR 4 64-bit address"); |
117 | 117 | ||
118 | module_param_named(xeon_b2b_usd_bar4_addr32, | 118 | module_param_named(xeon_b2b_usd_bar4_addr32, |
119 | xeon_b2b_usd_addr.bar4_addr32, ullong, 0644); | 119 | xeon_b2b_usd_addr.bar4_addr32, ullong, 0644); |
120 | MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, | 120 | MODULE_PARM_DESC(xeon_b2b_usd_bar4_addr32, |
121 | "XEON B2B USD split-BAR 4 32-bit address"); | 121 | "XEON B2B USD split-BAR 4 32-bit address"); |
122 | 122 | ||
123 | module_param_named(xeon_b2b_usd_bar5_addr32, | 123 | module_param_named(xeon_b2b_usd_bar5_addr32, |
124 | xeon_b2b_usd_addr.bar5_addr32, ullong, 0644); | 124 | xeon_b2b_usd_addr.bar5_addr32, ullong, 0644); |
125 | MODULE_PARM_DESC(xeon_b2b_usd_bar2_addr64, | 125 | MODULE_PARM_DESC(xeon_b2b_usd_bar5_addr32, |
126 | "XEON B2B USD split-BAR 5 32-bit address"); | 126 | "XEON B2B USD split-BAR 5 32-bit address"); |
127 | 127 | ||
128 | module_param_named(xeon_b2b_dsd_bar2_addr64, | 128 | module_param_named(xeon_b2b_dsd_bar2_addr64, |
@@ -132,17 +132,17 @@ MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, | |||
132 | 132 | ||
133 | module_param_named(xeon_b2b_dsd_bar4_addr64, | 133 | module_param_named(xeon_b2b_dsd_bar4_addr64, |
134 | xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644); | 134 | xeon_b2b_dsd_addr.bar4_addr64, ullong, 0644); |
135 | MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, | 135 | MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr64, |
136 | "XEON B2B DSD BAR 4 64-bit address"); | 136 | "XEON B2B DSD BAR 4 64-bit address"); |
137 | 137 | ||
138 | module_param_named(xeon_b2b_dsd_bar4_addr32, | 138 | module_param_named(xeon_b2b_dsd_bar4_addr32, |
139 | xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644); | 139 | xeon_b2b_dsd_addr.bar4_addr32, ullong, 0644); |
140 | MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, | 140 | MODULE_PARM_DESC(xeon_b2b_dsd_bar4_addr32, |
141 | "XEON B2B DSD split-BAR 4 32-bit address"); | 141 | "XEON B2B DSD split-BAR 4 32-bit address"); |
142 | 142 | ||
143 | module_param_named(xeon_b2b_dsd_bar5_addr32, | 143 | module_param_named(xeon_b2b_dsd_bar5_addr32, |
144 | xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644); | 144 | xeon_b2b_dsd_addr.bar5_addr32, ullong, 0644); |
145 | MODULE_PARM_DESC(xeon_b2b_dsd_bar2_addr64, | 145 | MODULE_PARM_DESC(xeon_b2b_dsd_bar5_addr32, |
146 | "XEON B2B DSD split-BAR 5 32-bit address"); | 146 | "XEON B2B DSD split-BAR 5 32-bit address"); |
147 | 147 | ||
148 | #ifndef ioread64 | 148 | #ifndef ioread64 |
@@ -1755,6 +1755,8 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, | |||
1755 | XEON_B2B_MIN_SIZE); | 1755 | XEON_B2B_MIN_SIZE); |
1756 | if (!ndev->peer_mmio) | 1756 | if (!ndev->peer_mmio) |
1757 | return -EIO; | 1757 | return -EIO; |
1758 | |||
1759 | ndev->peer_addr = pci_resource_start(pdev, b2b_bar); | ||
1758 | } | 1760 | } |
1759 | 1761 | ||
1760 | return 0; | 1762 | return 0; |
@@ -2019,6 +2021,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev) | |||
2019 | goto err_mmio; | 2021 | goto err_mmio; |
2020 | } | 2022 | } |
2021 | ndev->peer_mmio = ndev->self_mmio; | 2023 | ndev->peer_mmio = ndev->self_mmio; |
2024 | ndev->peer_addr = pci_resource_start(pdev, 0); | ||
2022 | 2025 | ||
2023 | return 0; | 2026 | return 0; |
2024 | 2027 | ||
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 8601c10acf74..4eb8adb34508 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c | |||
@@ -257,7 +257,7 @@ enum { | |||
257 | #define NTB_QP_DEF_NUM_ENTRIES 100 | 257 | #define NTB_QP_DEF_NUM_ENTRIES 100 |
258 | #define NTB_LINK_DOWN_TIMEOUT 10 | 258 | #define NTB_LINK_DOWN_TIMEOUT 10 |
259 | #define DMA_RETRIES 20 | 259 | #define DMA_RETRIES 20 |
260 | #define DMA_OUT_RESOURCE_TO 50 | 260 | #define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) |
261 | 261 | ||
262 | static void ntb_transport_rxc_db(unsigned long data); | 262 | static void ntb_transport_rxc_db(unsigned long data); |
263 | static const struct ntb_ctx_ops ntb_transport_ops; | 263 | static const struct ntb_ctx_ops ntb_transport_ops; |
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 6a50f20bf1cd..e75d4fdc0866 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c | |||
@@ -72,7 +72,7 @@ | |||
72 | #define MAX_THREADS 32 | 72 | #define MAX_THREADS 32 |
73 | #define MAX_TEST_SIZE SZ_1M | 73 | #define MAX_TEST_SIZE SZ_1M |
74 | #define MAX_SRCS 32 | 74 | #define MAX_SRCS 32 |
75 | #define DMA_OUT_RESOURCE_TO 50 | 75 | #define DMA_OUT_RESOURCE_TO msecs_to_jiffies(50) |
76 | #define DMA_RETRIES 20 | 76 | #define DMA_RETRIES 20 |
77 | #define SZ_4G (1ULL << 32) | 77 | #define SZ_4G (1ULL << 32) |
78 | #define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */ | 78 | #define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */ |
@@ -589,7 +589,7 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, | |||
589 | return -ENOMEM; | 589 | return -ENOMEM; |
590 | 590 | ||
591 | if (mutex_is_locked(&perf->run_mutex)) { | 591 | if (mutex_is_locked(&perf->run_mutex)) { |
592 | out_off = snprintf(buf, 64, "running\n"); | 592 | out_off = scnprintf(buf, 64, "running\n"); |
593 | goto read_from_buf; | 593 | goto read_from_buf; |
594 | } | 594 | } |
595 | 595 | ||
@@ -600,14 +600,14 @@ static ssize_t debugfs_run_read(struct file *filp, char __user *ubuf, | |||
600 | break; | 600 | break; |
601 | 601 | ||
602 | if (pctx->status) { | 602 | if (pctx->status) { |
603 | out_off += snprintf(buf + out_off, 1024 - out_off, | 603 | out_off += scnprintf(buf + out_off, 1024 - out_off, |
604 | "%d: error %d\n", i, | 604 | "%d: error %d\n", i, |
605 | pctx->status); | 605 | pctx->status); |
606 | continue; | 606 | continue; |
607 | } | 607 | } |
608 | 608 | ||
609 | rate = div64_u64(pctx->copied, pctx->diff_us); | 609 | rate = div64_u64(pctx->copied, pctx->diff_us); |
610 | out_off += snprintf(buf + out_off, 1024 - out_off, | 610 | out_off += scnprintf(buf + out_off, 1024 - out_off, |
611 | "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n", | 611 | "%d: copied %llu bytes in %llu usecs, %llu MBytes/s\n", |
612 | i, pctx->copied, pctx->diff_us, rate); | 612 | i, pctx->copied, pctx->diff_us, rate); |
613 | } | 613 | } |
diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c index 7d311799fca1..435861189d97 100644 --- a/drivers/ntb/test/ntb_pingpong.c +++ b/drivers/ntb/test/ntb_pingpong.c | |||
@@ -88,7 +88,7 @@ MODULE_PARM_DESC(delay_ms, "Milliseconds to delay the response to peer"); | |||
88 | 88 | ||
89 | static unsigned long db_init = 0x7; | 89 | static unsigned long db_init = 0x7; |
90 | module_param(db_init, ulong, 0644); | 90 | module_param(db_init, ulong, 0644); |
91 | MODULE_PARM_DESC(delay_ms, "Initial doorbell bits to ring on the peer"); | 91 | MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer"); |
92 | 92 | ||
93 | struct pp_ctx { | 93 | struct pp_ctx { |
94 | struct ntb_dev *ntb; | 94 | struct ntb_dev *ntb; |
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 0248d0e21fee..5e52034ab010 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
@@ -1242,20 +1242,16 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
1242 | 1242 | ||
1243 | result = nvme_enable_ctrl(&dev->ctrl, cap); | 1243 | result = nvme_enable_ctrl(&dev->ctrl, cap); |
1244 | if (result) | 1244 | if (result) |
1245 | goto free_nvmeq; | 1245 | return result; |
1246 | 1246 | ||
1247 | nvmeq->cq_vector = 0; | 1247 | nvmeq->cq_vector = 0; |
1248 | result = queue_request_irq(nvmeq); | 1248 | result = queue_request_irq(nvmeq); |
1249 | if (result) { | 1249 | if (result) { |
1250 | nvmeq->cq_vector = -1; | 1250 | nvmeq->cq_vector = -1; |
1251 | goto free_nvmeq; | 1251 | return result; |
1252 | } | 1252 | } |
1253 | 1253 | ||
1254 | return result; | 1254 | return result; |
1255 | |||
1256 | free_nvmeq: | ||
1257 | nvme_free_queues(dev, 0); | ||
1258 | return result; | ||
1259 | } | 1255 | } |
1260 | 1256 | ||
1261 | static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) | 1257 | static bool nvme_should_reset(struct nvme_dev *dev, u32 csts) |
@@ -1317,10 +1313,8 @@ static int nvme_create_io_queues(struct nvme_dev *dev) | |||
1317 | max = min(dev->max_qid, dev->queue_count - 1); | 1313 | max = min(dev->max_qid, dev->queue_count - 1); |
1318 | for (i = dev->online_queues; i <= max; i++) { | 1314 | for (i = dev->online_queues; i <= max; i++) { |
1319 | ret = nvme_create_queue(dev->queues[i], i); | 1315 | ret = nvme_create_queue(dev->queues[i], i); |
1320 | if (ret) { | 1316 | if (ret) |
1321 | nvme_free_queues(dev, i); | ||
1322 | break; | 1317 | break; |
1323 | } | ||
1324 | } | 1318 | } |
1325 | 1319 | ||
1326 | /* | 1320 | /* |
@@ -1460,13 +1454,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) | |||
1460 | result = queue_request_irq(adminq); | 1454 | result = queue_request_irq(adminq); |
1461 | if (result) { | 1455 | if (result) { |
1462 | adminq->cq_vector = -1; | 1456 | adminq->cq_vector = -1; |
1463 | goto free_queues; | 1457 | return result; |
1464 | } | 1458 | } |
1465 | return nvme_create_io_queues(dev); | 1459 | return nvme_create_io_queues(dev); |
1466 | |||
1467 | free_queues: | ||
1468 | nvme_free_queues(dev, 1); | ||
1469 | return result; | ||
1470 | } | 1460 | } |
1471 | 1461 | ||
1472 | static void nvme_del_queue_end(struct request *req, int error) | 1462 | static void nvme_del_queue_end(struct request *req, int error) |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 5a8388177959..3d25add36d91 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -83,6 +83,7 @@ enum nvme_rdma_queue_flags { | |||
83 | NVME_RDMA_Q_CONNECTED = (1 << 0), | 83 | NVME_RDMA_Q_CONNECTED = (1 << 0), |
84 | NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1), | 84 | NVME_RDMA_IB_QUEUE_ALLOCATED = (1 << 1), |
85 | NVME_RDMA_Q_DELETING = (1 << 2), | 85 | NVME_RDMA_Q_DELETING = (1 << 2), |
86 | NVME_RDMA_Q_LIVE = (1 << 3), | ||
86 | }; | 87 | }; |
87 | 88 | ||
88 | struct nvme_rdma_queue { | 89 | struct nvme_rdma_queue { |
@@ -624,10 +625,18 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl) | |||
624 | 625 | ||
625 | for (i = 1; i < ctrl->queue_count; i++) { | 626 | for (i = 1; i < ctrl->queue_count; i++) { |
626 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); | 627 | ret = nvmf_connect_io_queue(&ctrl->ctrl, i); |
627 | if (ret) | 628 | if (ret) { |
628 | break; | 629 | dev_info(ctrl->ctrl.device, |
630 | "failed to connect i/o queue: %d\n", ret); | ||
631 | goto out_free_queues; | ||
632 | } | ||
633 | set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); | ||
629 | } | 634 | } |
630 | 635 | ||
636 | return 0; | ||
637 | |||
638 | out_free_queues: | ||
639 | nvme_rdma_free_io_queues(ctrl); | ||
631 | return ret; | 640 | return ret; |
632 | } | 641 | } |
633 | 642 | ||
@@ -712,6 +721,8 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) | |||
712 | if (ret) | 721 | if (ret) |
713 | goto stop_admin_q; | 722 | goto stop_admin_q; |
714 | 723 | ||
724 | set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); | ||
725 | |||
715 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); | 726 | ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); |
716 | if (ret) | 727 | if (ret) |
717 | goto stop_admin_q; | 728 | goto stop_admin_q; |
@@ -761,8 +772,10 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) | |||
761 | 772 | ||
762 | nvme_stop_keep_alive(&ctrl->ctrl); | 773 | nvme_stop_keep_alive(&ctrl->ctrl); |
763 | 774 | ||
764 | for (i = 0; i < ctrl->queue_count; i++) | 775 | for (i = 0; i < ctrl->queue_count; i++) { |
765 | clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags); | 776 | clear_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[i].flags); |
777 | clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); | ||
778 | } | ||
766 | 779 | ||
767 | if (ctrl->queue_count > 1) | 780 | if (ctrl->queue_count > 1) |
768 | nvme_stop_queues(&ctrl->ctrl); | 781 | nvme_stop_queues(&ctrl->ctrl); |
@@ -1378,6 +1391,24 @@ nvme_rdma_timeout(struct request *rq, bool reserved) | |||
1378 | return BLK_EH_HANDLED; | 1391 | return BLK_EH_HANDLED; |
1379 | } | 1392 | } |
1380 | 1393 | ||
1394 | /* | ||
1395 | * We cannot accept any other command until the Connect command has completed. | ||
1396 | */ | ||
1397 | static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, | ||
1398 | struct request *rq) | ||
1399 | { | ||
1400 | if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) { | ||
1401 | struct nvme_command *cmd = (struct nvme_command *)rq->cmd; | ||
1402 | |||
1403 | if (rq->cmd_type != REQ_TYPE_DRV_PRIV || | ||
1404 | cmd->common.opcode != nvme_fabrics_command || | ||
1405 | cmd->fabrics.fctype != nvme_fabrics_type_connect) | ||
1406 | return false; | ||
1407 | } | ||
1408 | |||
1409 | return true; | ||
1410 | } | ||
1411 | |||
1381 | static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, | 1412 | static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, |
1382 | const struct blk_mq_queue_data *bd) | 1413 | const struct blk_mq_queue_data *bd) |
1383 | { | 1414 | { |
@@ -1394,6 +1425,9 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
1394 | 1425 | ||
1395 | WARN_ON_ONCE(rq->tag < 0); | 1426 | WARN_ON_ONCE(rq->tag < 0); |
1396 | 1427 | ||
1428 | if (!nvme_rdma_queue_is_ready(queue, rq)) | ||
1429 | return BLK_MQ_RQ_QUEUE_BUSY; | ||
1430 | |||
1397 | dev = queue->device->dev; | 1431 | dev = queue->device->dev; |
1398 | ib_dma_sync_single_for_cpu(dev, sqe->dma, | 1432 | ib_dma_sync_single_for_cpu(dev, sqe->dma, |
1399 | sizeof(struct nvme_command), DMA_TO_DEVICE); | 1433 | sizeof(struct nvme_command), DMA_TO_DEVICE); |
@@ -1544,6 +1578,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) | |||
1544 | if (error) | 1578 | if (error) |
1545 | goto out_cleanup_queue; | 1579 | goto out_cleanup_queue; |
1546 | 1580 | ||
1581 | set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); | ||
1582 | |||
1547 | error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); | 1583 | error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); |
1548 | if (error) { | 1584 | if (error) { |
1549 | dev_err(ctrl->ctrl.device, | 1585 | dev_err(ctrl->ctrl.device, |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b4cacb6f0258..a21437a33adb 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -838,9 +838,13 @@ static void nvmet_fatal_error_handler(struct work_struct *work) | |||
838 | 838 | ||
839 | void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) | 839 | void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) |
840 | { | 840 | { |
841 | ctrl->csts |= NVME_CSTS_CFS; | 841 | mutex_lock(&ctrl->lock); |
842 | INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); | 842 | if (!(ctrl->csts & NVME_CSTS_CFS)) { |
843 | schedule_work(&ctrl->fatal_err_work); | 843 | ctrl->csts |= NVME_CSTS_CFS; |
844 | INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); | ||
845 | schedule_work(&ctrl->fatal_err_work); | ||
846 | } | ||
847 | mutex_unlock(&ctrl->lock); | ||
844 | } | 848 | } |
845 | EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); | 849 | EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error); |
846 | 850 | ||
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index f8d23999e0f2..005ef5d17a19 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
@@ -951,6 +951,7 @@ err_destroy_cq: | |||
951 | 951 | ||
952 | static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) | 952 | static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue) |
953 | { | 953 | { |
954 | ib_drain_qp(queue->cm_id->qp); | ||
954 | rdma_destroy_qp(queue->cm_id); | 955 | rdma_destroy_qp(queue->cm_id); |
955 | ib_free_cq(queue->cq); | 956 | ib_free_cq(queue->cq); |
956 | } | 957 | } |
@@ -1066,6 +1067,7 @@ nvmet_rdma_alloc_queue(struct nvmet_rdma_device *ndev, | |||
1066 | spin_lock_init(&queue->rsp_wr_wait_lock); | 1067 | spin_lock_init(&queue->rsp_wr_wait_lock); |
1067 | INIT_LIST_HEAD(&queue->free_rsps); | 1068 | INIT_LIST_HEAD(&queue->free_rsps); |
1068 | spin_lock_init(&queue->rsps_lock); | 1069 | spin_lock_init(&queue->rsps_lock); |
1070 | INIT_LIST_HEAD(&queue->queue_list); | ||
1069 | 1071 | ||
1070 | queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); | 1072 | queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); |
1071 | if (queue->idx < 0) { | 1073 | if (queue->idx < 0) { |
@@ -1244,7 +1246,6 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) | |||
1244 | 1246 | ||
1245 | if (disconnect) { | 1247 | if (disconnect) { |
1246 | rdma_disconnect(queue->cm_id); | 1248 | rdma_disconnect(queue->cm_id); |
1247 | ib_drain_qp(queue->cm_id->qp); | ||
1248 | schedule_work(&queue->release_work); | 1249 | schedule_work(&queue->release_work); |
1249 | } | 1250 | } |
1250 | } | 1251 | } |
@@ -1269,7 +1270,12 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, | |||
1269 | { | 1270 | { |
1270 | WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); | 1271 | WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); |
1271 | 1272 | ||
1272 | pr_err("failed to connect queue\n"); | 1273 | mutex_lock(&nvmet_rdma_queue_mutex); |
1274 | if (!list_empty(&queue->queue_list)) | ||
1275 | list_del_init(&queue->queue_list); | ||
1276 | mutex_unlock(&nvmet_rdma_queue_mutex); | ||
1277 | |||
1278 | pr_err("failed to connect queue %d\n", queue->idx); | ||
1273 | schedule_work(&queue->release_work); | 1279 | schedule_work(&queue->release_work); |
1274 | } | 1280 | } |
1275 | 1281 | ||
@@ -1352,7 +1358,13 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, | |||
1352 | case RDMA_CM_EVENT_ADDR_CHANGE: | 1358 | case RDMA_CM_EVENT_ADDR_CHANGE: |
1353 | case RDMA_CM_EVENT_DISCONNECTED: | 1359 | case RDMA_CM_EVENT_DISCONNECTED: |
1354 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: | 1360 | case RDMA_CM_EVENT_TIMEWAIT_EXIT: |
1355 | nvmet_rdma_queue_disconnect(queue); | 1361 | /* |
1362 | * We might end up here when we already freed the qp | ||
1363 | * which means queue release sequence is in progress, | ||
1364 | * so don't get in the way... | ||
1365 | */ | ||
1366 | if (queue) | ||
1367 | nvmet_rdma_queue_disconnect(queue); | ||
1356 | break; | 1368 | break; |
1357 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | 1369 | case RDMA_CM_EVENT_DEVICE_REMOVAL: |
1358 | ret = nvmet_rdma_device_removal(cm_id, queue); | 1370 | ret = nvmet_rdma_device_removal(cm_id, queue); |
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index b470f7e3521d..5a3145a02547 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c | |||
@@ -292,6 +292,7 @@ struct phy_device *of_phy_find_device(struct device_node *phy_np) | |||
292 | mdiodev = to_mdio_device(d); | 292 | mdiodev = to_mdio_device(d); |
293 | if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) | 293 | if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY) |
294 | return to_phy_device(d); | 294 | return to_phy_device(d); |
295 | put_device(d); | ||
295 | } | 296 | } |
296 | 297 | ||
297 | return NULL; | 298 | return NULL; |
@@ -456,8 +457,11 @@ int of_phy_register_fixed_link(struct device_node *np) | |||
456 | status.link = 1; | 457 | status.link = 1; |
457 | status.duplex = of_property_read_bool(fixed_link_node, | 458 | status.duplex = of_property_read_bool(fixed_link_node, |
458 | "full-duplex"); | 459 | "full-duplex"); |
459 | if (of_property_read_u32(fixed_link_node, "speed", &status.speed)) | 460 | if (of_property_read_u32(fixed_link_node, "speed", |
461 | &status.speed)) { | ||
462 | of_node_put(fixed_link_node); | ||
460 | return -EINVAL; | 463 | return -EINVAL; |
464 | } | ||
461 | status.pause = of_property_read_bool(fixed_link_node, "pause"); | 465 | status.pause = of_property_read_bool(fixed_link_node, "pause"); |
462 | status.asym_pause = of_property_read_bool(fixed_link_node, | 466 | status.asym_pause = of_property_read_bool(fixed_link_node, |
463 | "asym-pause"); | 467 | "asym-pause"); |
diff --git a/drivers/pci/pci-mid.c b/drivers/pci/pci-mid.c index 55f453de562e..c7f3408e3148 100644 --- a/drivers/pci/pci-mid.c +++ b/drivers/pci/pci-mid.c | |||
@@ -29,6 +29,11 @@ static int mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state) | |||
29 | return intel_mid_pci_set_power_state(pdev, state); | 29 | return intel_mid_pci_set_power_state(pdev, state); |
30 | } | 30 | } |
31 | 31 | ||
32 | static pci_power_t mid_pci_get_power_state(struct pci_dev *pdev) | ||
33 | { | ||
34 | return intel_mid_pci_get_power_state(pdev); | ||
35 | } | ||
36 | |||
32 | static pci_power_t mid_pci_choose_state(struct pci_dev *pdev) | 37 | static pci_power_t mid_pci_choose_state(struct pci_dev *pdev) |
33 | { | 38 | { |
34 | return PCI_D3hot; | 39 | return PCI_D3hot; |
@@ -52,6 +57,7 @@ static bool mid_pci_need_resume(struct pci_dev *dev) | |||
52 | static struct pci_platform_pm_ops mid_pci_platform_pm = { | 57 | static struct pci_platform_pm_ops mid_pci_platform_pm = { |
53 | .is_manageable = mid_pci_power_manageable, | 58 | .is_manageable = mid_pci_power_manageable, |
54 | .set_state = mid_pci_set_power_state, | 59 | .set_state = mid_pci_set_power_state, |
60 | .get_state = mid_pci_get_power_state, | ||
55 | .choose_state = mid_pci_choose_state, | 61 | .choose_state = mid_pci_choose_state, |
56 | .sleep_wake = mid_pci_sleep_wake, | 62 | .sleep_wake = mid_pci_sleep_wake, |
57 | .run_wake = mid_pci_run_wake, | 63 | .run_wake = mid_pci_run_wake, |
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c index 87e6334eab93..547ca7b3f098 100644 --- a/drivers/phy/phy-twl4030-usb.c +++ b/drivers/phy/phy-twl4030-usb.c | |||
@@ -459,8 +459,6 @@ static int twl4030_phy_power_off(struct phy *phy) | |||
459 | struct twl4030_usb *twl = phy_get_drvdata(phy); | 459 | struct twl4030_usb *twl = phy_get_drvdata(phy); |
460 | 460 | ||
461 | dev_dbg(twl->dev, "%s\n", __func__); | 461 | dev_dbg(twl->dev, "%s\n", __func__); |
462 | pm_runtime_mark_last_busy(twl->dev); | ||
463 | pm_runtime_put_autosuspend(twl->dev); | ||
464 | 462 | ||
465 | return 0; | 463 | return 0; |
466 | } | 464 | } |
@@ -472,6 +470,8 @@ static int twl4030_phy_power_on(struct phy *phy) | |||
472 | dev_dbg(twl->dev, "%s\n", __func__); | 470 | dev_dbg(twl->dev, "%s\n", __func__); |
473 | pm_runtime_get_sync(twl->dev); | 471 | pm_runtime_get_sync(twl->dev); |
474 | schedule_delayed_work(&twl->id_workaround_work, HZ); | 472 | schedule_delayed_work(&twl->id_workaround_work, HZ); |
473 | pm_runtime_mark_last_busy(twl->dev); | ||
474 | pm_runtime_put_autosuspend(twl->dev); | ||
475 | 475 | ||
476 | return 0; | 476 | return 0; |
477 | } | 477 | } |
diff --git a/drivers/rtc/rtc-asm9260.c b/drivers/rtc/rtc-asm9260.c index 18a93d3e3f93..d36534965635 100644 --- a/drivers/rtc/rtc-asm9260.c +++ b/drivers/rtc/rtc-asm9260.c | |||
@@ -327,6 +327,7 @@ static const struct of_device_id asm9260_dt_ids[] = { | |||
327 | { .compatible = "alphascale,asm9260-rtc", }, | 327 | { .compatible = "alphascale,asm9260-rtc", }, |
328 | {} | 328 | {} |
329 | }; | 329 | }; |
330 | MODULE_DEVICE_TABLE(of, asm9260_dt_ids); | ||
330 | 331 | ||
331 | static struct platform_driver asm9260_rtc_driver = { | 332 | static struct platform_driver asm9260_rtc_driver = { |
332 | .probe = asm9260_rtc_probe, | 333 | .probe = asm9260_rtc_probe, |
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index dd3d59806ffa..7030d7cd3861 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c | |||
@@ -776,7 +776,7 @@ static void cmos_do_shutdown(int rtc_irq) | |||
776 | spin_unlock_irq(&rtc_lock); | 776 | spin_unlock_irq(&rtc_lock); |
777 | } | 777 | } |
778 | 778 | ||
779 | static void __exit cmos_do_remove(struct device *dev) | 779 | static void cmos_do_remove(struct device *dev) |
780 | { | 780 | { |
781 | struct cmos_rtc *cmos = dev_get_drvdata(dev); | 781 | struct cmos_rtc *cmos = dev_get_drvdata(dev); |
782 | struct resource *ports; | 782 | struct resource *ports; |
@@ -996,8 +996,9 @@ static u32 rtc_handler(void *context) | |||
996 | struct cmos_rtc *cmos = dev_get_drvdata(dev); | 996 | struct cmos_rtc *cmos = dev_get_drvdata(dev); |
997 | unsigned char rtc_control = 0; | 997 | unsigned char rtc_control = 0; |
998 | unsigned char rtc_intr; | 998 | unsigned char rtc_intr; |
999 | unsigned long flags; | ||
999 | 1000 | ||
1000 | spin_lock_irq(&rtc_lock); | 1001 | spin_lock_irqsave(&rtc_lock, flags); |
1001 | if (cmos_rtc.suspend_ctrl) | 1002 | if (cmos_rtc.suspend_ctrl) |
1002 | rtc_control = CMOS_READ(RTC_CONTROL); | 1003 | rtc_control = CMOS_READ(RTC_CONTROL); |
1003 | if (rtc_control & RTC_AIE) { | 1004 | if (rtc_control & RTC_AIE) { |
@@ -1006,7 +1007,7 @@ static u32 rtc_handler(void *context) | |||
1006 | rtc_intr = CMOS_READ(RTC_INTR_FLAGS); | 1007 | rtc_intr = CMOS_READ(RTC_INTR_FLAGS); |
1007 | rtc_update_irq(cmos->rtc, 1, rtc_intr); | 1008 | rtc_update_irq(cmos->rtc, 1, rtc_intr); |
1008 | } | 1009 | } |
1009 | spin_unlock_irq(&rtc_lock); | 1010 | spin_unlock_irqrestore(&rtc_lock, flags); |
1010 | 1011 | ||
1011 | pm_wakeup_event(dev, 0); | 1012 | pm_wakeup_event(dev, 0); |
1012 | acpi_clear_event(ACPI_EVENT_RTC); | 1013 | acpi_clear_event(ACPI_EVENT_RTC); |
@@ -1129,7 +1130,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) | |||
1129 | pnp_irq(pnp, 0)); | 1130 | pnp_irq(pnp, 0)); |
1130 | } | 1131 | } |
1131 | 1132 | ||
1132 | static void __exit cmos_pnp_remove(struct pnp_dev *pnp) | 1133 | static void cmos_pnp_remove(struct pnp_dev *pnp) |
1133 | { | 1134 | { |
1134 | cmos_do_remove(&pnp->dev); | 1135 | cmos_do_remove(&pnp->dev); |
1135 | } | 1136 | } |
@@ -1161,7 +1162,7 @@ static struct pnp_driver cmos_pnp_driver = { | |||
1161 | .name = (char *) driver_name, | 1162 | .name = (char *) driver_name, |
1162 | .id_table = rtc_ids, | 1163 | .id_table = rtc_ids, |
1163 | .probe = cmos_pnp_probe, | 1164 | .probe = cmos_pnp_probe, |
1164 | .remove = __exit_p(cmos_pnp_remove), | 1165 | .remove = cmos_pnp_remove, |
1165 | .shutdown = cmos_pnp_shutdown, | 1166 | .shutdown = cmos_pnp_shutdown, |
1166 | 1167 | ||
1167 | /* flag ensures resume() gets called, and stops syslog spam */ | 1168 | /* flag ensures resume() gets called, and stops syslog spam */ |
@@ -1238,7 +1239,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev) | |||
1238 | return cmos_do_probe(&pdev->dev, resource, irq); | 1239 | return cmos_do_probe(&pdev->dev, resource, irq); |
1239 | } | 1240 | } |
1240 | 1241 | ||
1241 | static int __exit cmos_platform_remove(struct platform_device *pdev) | 1242 | static int cmos_platform_remove(struct platform_device *pdev) |
1242 | { | 1243 | { |
1243 | cmos_do_remove(&pdev->dev); | 1244 | cmos_do_remove(&pdev->dev); |
1244 | return 0; | 1245 | return 0; |
@@ -1263,7 +1264,7 @@ static void cmos_platform_shutdown(struct platform_device *pdev) | |||
1263 | MODULE_ALIAS("platform:rtc_cmos"); | 1264 | MODULE_ALIAS("platform:rtc_cmos"); |
1264 | 1265 | ||
1265 | static struct platform_driver cmos_platform_driver = { | 1266 | static struct platform_driver cmos_platform_driver = { |
1266 | .remove = __exit_p(cmos_platform_remove), | 1267 | .remove = cmos_platform_remove, |
1267 | .shutdown = cmos_platform_shutdown, | 1268 | .shutdown = cmos_platform_shutdown, |
1268 | .driver = { | 1269 | .driver = { |
1269 | .name = driver_name, | 1270 | .name = driver_name, |
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c index b04ea9b5ae67..51e52446eacb 100644 --- a/drivers/rtc/rtc-omap.c +++ b/drivers/rtc/rtc-omap.c | |||
@@ -113,6 +113,7 @@ | |||
113 | /* OMAP_RTC_OSC_REG bit fields: */ | 113 | /* OMAP_RTC_OSC_REG bit fields: */ |
114 | #define OMAP_RTC_OSC_32KCLK_EN BIT(6) | 114 | #define OMAP_RTC_OSC_32KCLK_EN BIT(6) |
115 | #define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3) | 115 | #define OMAP_RTC_OSC_SEL_32KCLK_SRC BIT(3) |
116 | #define OMAP_RTC_OSC_OSC32K_GZ_DISABLE BIT(4) | ||
116 | 117 | ||
117 | /* OMAP_RTC_IRQWAKEEN bit fields: */ | 118 | /* OMAP_RTC_IRQWAKEEN bit fields: */ |
118 | #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1) | 119 | #define OMAP_RTC_IRQWAKEEN_ALARM_WAKEEN BIT(1) |
@@ -146,6 +147,7 @@ struct omap_rtc { | |||
146 | u8 interrupts_reg; | 147 | u8 interrupts_reg; |
147 | bool is_pmic_controller; | 148 | bool is_pmic_controller; |
148 | bool has_ext_clk; | 149 | bool has_ext_clk; |
150 | bool is_suspending; | ||
149 | const struct omap_rtc_device_type *type; | 151 | const struct omap_rtc_device_type *type; |
150 | struct pinctrl_dev *pctldev; | 152 | struct pinctrl_dev *pctldev; |
151 | }; | 153 | }; |
@@ -786,8 +788,9 @@ static int omap_rtc_probe(struct platform_device *pdev) | |||
786 | */ | 788 | */ |
787 | if (rtc->has_ext_clk) { | 789 | if (rtc->has_ext_clk) { |
788 | reg = rtc_read(rtc, OMAP_RTC_OSC_REG); | 790 | reg = rtc_read(rtc, OMAP_RTC_OSC_REG); |
789 | rtc_write(rtc, OMAP_RTC_OSC_REG, | 791 | reg &= ~OMAP_RTC_OSC_OSC32K_GZ_DISABLE; |
790 | reg | OMAP_RTC_OSC_SEL_32KCLK_SRC); | 792 | reg |= OMAP_RTC_OSC_32KCLK_EN | OMAP_RTC_OSC_SEL_32KCLK_SRC; |
793 | rtc_writel(rtc, OMAP_RTC_OSC_REG, reg); | ||
791 | } | 794 | } |
792 | 795 | ||
793 | rtc->type->lock(rtc); | 796 | rtc->type->lock(rtc); |
@@ -898,8 +901,7 @@ static int omap_rtc_suspend(struct device *dev) | |||
898 | rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0); | 901 | rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, 0); |
899 | rtc->type->lock(rtc); | 902 | rtc->type->lock(rtc); |
900 | 903 | ||
901 | /* Disable the clock/module */ | 904 | rtc->is_suspending = true; |
902 | pm_runtime_put_sync(dev); | ||
903 | 905 | ||
904 | return 0; | 906 | return 0; |
905 | } | 907 | } |
@@ -908,9 +910,6 @@ static int omap_rtc_resume(struct device *dev) | |||
908 | { | 910 | { |
909 | struct omap_rtc *rtc = dev_get_drvdata(dev); | 911 | struct omap_rtc *rtc = dev_get_drvdata(dev); |
910 | 912 | ||
911 | /* Enable the clock/module so that we can access the registers */ | ||
912 | pm_runtime_get_sync(dev); | ||
913 | |||
914 | rtc->type->unlock(rtc); | 913 | rtc->type->unlock(rtc); |
915 | if (device_may_wakeup(dev)) | 914 | if (device_may_wakeup(dev)) |
916 | disable_irq_wake(rtc->irq_alarm); | 915 | disable_irq_wake(rtc->irq_alarm); |
@@ -918,11 +917,34 @@ static int omap_rtc_resume(struct device *dev) | |||
918 | rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg); | 917 | rtc_write(rtc, OMAP_RTC_INTERRUPTS_REG, rtc->interrupts_reg); |
919 | rtc->type->lock(rtc); | 918 | rtc->type->lock(rtc); |
920 | 919 | ||
920 | rtc->is_suspending = false; | ||
921 | |||
921 | return 0; | 922 | return 0; |
922 | } | 923 | } |
923 | #endif | 924 | #endif |
924 | 925 | ||
925 | static SIMPLE_DEV_PM_OPS(omap_rtc_pm_ops, omap_rtc_suspend, omap_rtc_resume); | 926 | #ifdef CONFIG_PM |
927 | static int omap_rtc_runtime_suspend(struct device *dev) | ||
928 | { | ||
929 | struct omap_rtc *rtc = dev_get_drvdata(dev); | ||
930 | |||
931 | if (rtc->is_suspending && !rtc->has_ext_clk) | ||
932 | return -EBUSY; | ||
933 | |||
934 | return 0; | ||
935 | } | ||
936 | |||
937 | static int omap_rtc_runtime_resume(struct device *dev) | ||
938 | { | ||
939 | return 0; | ||
940 | } | ||
941 | #endif | ||
942 | |||
943 | static const struct dev_pm_ops omap_rtc_pm_ops = { | ||
944 | SET_SYSTEM_SLEEP_PM_OPS(omap_rtc_suspend, omap_rtc_resume) | ||
945 | SET_RUNTIME_PM_OPS(omap_rtc_runtime_suspend, | ||
946 | omap_rtc_runtime_resume, NULL) | ||
947 | }; | ||
926 | 948 | ||
927 | static void omap_rtc_shutdown(struct platform_device *pdev) | 949 | static void omap_rtc_shutdown(struct platform_device *pdev) |
928 | { | 950 | { |
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 8aa769a2d919..91b70bc46e7f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c | |||
@@ -4010,7 +4010,10 @@ _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status) | |||
4010 | SAM_STAT_CHECK_CONDITION; | 4010 | SAM_STAT_CHECK_CONDITION; |
4011 | } | 4011 | } |
4012 | 4012 | ||
4013 | 4013 | static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd) | |
4014 | { | ||
4015 | return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16); | ||
4016 | } | ||
4014 | 4017 | ||
4015 | /** | 4018 | /** |
4016 | * scsih_qcmd - main scsi request entry point | 4019 | * scsih_qcmd - main scsi request entry point |
@@ -4038,6 +4041,13 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) | |||
4038 | if (ioc->logging_level & MPT_DEBUG_SCSI) | 4041 | if (ioc->logging_level & MPT_DEBUG_SCSI) |
4039 | scsi_print_command(scmd); | 4042 | scsi_print_command(scmd); |
4040 | 4043 | ||
4044 | /* | ||
4045 | * Lock the device for any subsequent command until command is | ||
4046 | * done. | ||
4047 | */ | ||
4048 | if (ata_12_16_cmd(scmd)) | ||
4049 | scsi_internal_device_block(scmd->device); | ||
4050 | |||
4041 | sas_device_priv_data = scmd->device->hostdata; | 4051 | sas_device_priv_data = scmd->device->hostdata; |
4042 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { | 4052 | if (!sas_device_priv_data || !sas_device_priv_data->sas_target) { |
4043 | scmd->result = DID_NO_CONNECT << 16; | 4053 | scmd->result = DID_NO_CONNECT << 16; |
@@ -4613,6 +4623,9 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) | |||
4613 | if (scmd == NULL) | 4623 | if (scmd == NULL) |
4614 | return 1; | 4624 | return 1; |
4615 | 4625 | ||
4626 | if (ata_12_16_cmd(scmd)) | ||
4627 | scsi_internal_device_unblock(scmd->device, SDEV_RUNNING); | ||
4628 | |||
4616 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); | 4629 | mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); |
4617 | 4630 | ||
4618 | if (mpi_reply == NULL) { | 4631 | if (mpi_reply == NULL) { |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 567fa080e261..56d6142852a5 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1456,15 +1456,20 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1456 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { | 1456 | for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { |
1457 | sp = req->outstanding_cmds[cnt]; | 1457 | sp = req->outstanding_cmds[cnt]; |
1458 | if (sp) { | 1458 | if (sp) { |
1459 | /* Get a reference to the sp and drop the lock. | 1459 | /* Don't abort commands in adapter during EEH |
1460 | * The reference ensures this sp->done() call | 1460 | * recovery as it's not accessible/responding. |
1461 | * - and not the call in qla2xxx_eh_abort() - | ||
1462 | * ends the SCSI command (with result 'res'). | ||
1463 | */ | 1461 | */ |
1464 | sp_get(sp); | 1462 | if (!ha->flags.eeh_busy) { |
1465 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | 1463 | /* Get a reference to the sp and drop the lock. |
1466 | qla2xxx_eh_abort(GET_CMD_SP(sp)); | 1464 | * The reference ensures this sp->done() call |
1467 | spin_lock_irqsave(&ha->hardware_lock, flags); | 1465 | * - and not the call in qla2xxx_eh_abort() - |
1466 | * ends the SCSI command (with result 'res'). | ||
1467 | */ | ||
1468 | sp_get(sp); | ||
1469 | spin_unlock_irqrestore(&ha->hardware_lock, flags); | ||
1470 | qla2xxx_eh_abort(GET_CMD_SP(sp)); | ||
1471 | spin_lock_irqsave(&ha->hardware_lock, flags); | ||
1472 | } | ||
1468 | req->outstanding_cmds[cnt] = NULL; | 1473 | req->outstanding_cmds[cnt] = NULL; |
1469 | sp->done(vha, sp, res); | 1474 | sp->done(vha, sp, res); |
1470 | } | 1475 | } |
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index 7a223074df3d..afada655f861 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c | |||
@@ -669,9 +669,16 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = { | |||
669 | .set_cur_state = powerclamp_set_cur_state, | 669 | .set_cur_state = powerclamp_set_cur_state, |
670 | }; | 670 | }; |
671 | 671 | ||
672 | static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = { | ||
673 | { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT }, | ||
674 | {} | ||
675 | }; | ||
676 | MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids); | ||
677 | |||
672 | static int __init powerclamp_probe(void) | 678 | static int __init powerclamp_probe(void) |
673 | { | 679 | { |
674 | if (!boot_cpu_has(X86_FEATURE_MWAIT)) { | 680 | |
681 | if (!x86_match_cpu(intel_powerclamp_ids)) { | ||
675 | pr_err("CPU does not support MWAIT"); | 682 | pr_err("CPU does not support MWAIT"); |
676 | return -ENODEV; | 683 | return -ENODEV; |
677 | } | 684 | } |
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 69426e644d17..3dbb4a21ab44 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c | |||
@@ -914,6 +914,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) | |||
914 | if (!ci) | 914 | if (!ci) |
915 | return -ENOMEM; | 915 | return -ENOMEM; |
916 | 916 | ||
917 | spin_lock_init(&ci->lock); | ||
917 | ci->dev = dev; | 918 | ci->dev = dev; |
918 | ci->platdata = dev_get_platdata(dev); | 919 | ci->platdata = dev_get_platdata(dev); |
919 | ci->imx28_write_fix = !!(ci->platdata->flags & | 920 | ci->imx28_write_fix = !!(ci->platdata->flags & |
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index a7b383dc3d07..f5320d66019a 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c | |||
@@ -1889,8 +1889,6 @@ static int udc_start(struct ci_hdrc *ci) | |||
1889 | struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps; | 1889 | struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps; |
1890 | int retval = 0; | 1890 | int retval = 0; |
1891 | 1891 | ||
1892 | spin_lock_init(&ci->lock); | ||
1893 | |||
1894 | ci->gadget.ops = &usb_gadget_ops; | 1892 | ci->gadget.ops = &usb_gadget_ops; |
1895 | ci->gadget.speed = USB_SPEED_UNKNOWN; | 1893 | ci->gadget.speed = USB_SPEED_UNKNOWN; |
1896 | ci->gadget.max_speed = USB_SPEED_HIGH; | 1894 | ci->gadget.max_speed = USB_SPEED_HIGH; |
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index dff72a873297..0780d8311ec6 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c | |||
@@ -3225,11 +3225,11 @@ static bool ffs_func_req_match(struct usb_function *f, | |||
3225 | 3225 | ||
3226 | switch (creq->bRequestType & USB_RECIP_MASK) { | 3226 | switch (creq->bRequestType & USB_RECIP_MASK) { |
3227 | case USB_RECIP_INTERFACE: | 3227 | case USB_RECIP_INTERFACE: |
3228 | return ffs_func_revmap_intf(func, | 3228 | return (ffs_func_revmap_intf(func, |
3229 | le16_to_cpu(creq->wIndex) >= 0); | 3229 | le16_to_cpu(creq->wIndex)) >= 0); |
3230 | case USB_RECIP_ENDPOINT: | 3230 | case USB_RECIP_ENDPOINT: |
3231 | return ffs_func_revmap_ep(func, | 3231 | return (ffs_func_revmap_ep(func, |
3232 | le16_to_cpu(creq->wIndex) >= 0); | 3232 | le16_to_cpu(creq->wIndex)) >= 0); |
3233 | default: | 3233 | default: |
3234 | return (bool) (func->ffs->user_flags & | 3234 | return (bool) (func->ffs->user_flags & |
3235 | FUNCTIONFS_ALL_CTRL_RECIP); | 3235 | FUNCTIONFS_ALL_CTRL_RECIP); |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index a109f22fefcd..9e226468a13e 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1002,7 +1002,7 @@ b_host: | |||
1002 | } | 1002 | } |
1003 | #endif | 1003 | #endif |
1004 | 1004 | ||
1005 | schedule_work(&musb->irq_work); | 1005 | schedule_delayed_work(&musb->irq_work, 0); |
1006 | 1006 | ||
1007 | return handled; | 1007 | return handled; |
1008 | } | 1008 | } |
@@ -1871,14 +1871,23 @@ static void musb_pm_runtime_check_session(struct musb *musb) | |||
1871 | MUSB_DEVCTL_HR; | 1871 | MUSB_DEVCTL_HR; |
1872 | switch (devctl & ~s) { | 1872 | switch (devctl & ~s) { |
1873 | case MUSB_QUIRK_B_INVALID_VBUS_91: | 1873 | case MUSB_QUIRK_B_INVALID_VBUS_91: |
1874 | if (!musb->session && !musb->quirk_invalid_vbus) { | 1874 | if (musb->quirk_retries--) { |
1875 | musb->quirk_invalid_vbus = true; | ||
1876 | musb_dbg(musb, | 1875 | musb_dbg(musb, |
1877 | "First invalid vbus, assume no session"); | 1876 | "Poll devctl on invalid vbus, assume no session"); |
1877 | schedule_delayed_work(&musb->irq_work, | ||
1878 | msecs_to_jiffies(1000)); | ||
1879 | |||
1878 | return; | 1880 | return; |
1879 | } | 1881 | } |
1880 | break; | ||
1881 | case MUSB_QUIRK_A_DISCONNECT_19: | 1882 | case MUSB_QUIRK_A_DISCONNECT_19: |
1883 | if (musb->quirk_retries--) { | ||
1884 | musb_dbg(musb, | ||
1885 | "Poll devctl on possible host mode disconnect"); | ||
1886 | schedule_delayed_work(&musb->irq_work, | ||
1887 | msecs_to_jiffies(1000)); | ||
1888 | |||
1889 | return; | ||
1890 | } | ||
1882 | if (!musb->session) | 1891 | if (!musb->session) |
1883 | break; | 1892 | break; |
1884 | musb_dbg(musb, "Allow PM on possible host mode disconnect"); | 1893 | musb_dbg(musb, "Allow PM on possible host mode disconnect"); |
@@ -1902,9 +1911,9 @@ static void musb_pm_runtime_check_session(struct musb *musb) | |||
1902 | if (error < 0) | 1911 | if (error < 0) |
1903 | dev_err(musb->controller, "Could not enable: %i\n", | 1912 | dev_err(musb->controller, "Could not enable: %i\n", |
1904 | error); | 1913 | error); |
1914 | musb->quirk_retries = 3; | ||
1905 | } else { | 1915 | } else { |
1906 | musb_dbg(musb, "Allow PM with no session: %02x", devctl); | 1916 | musb_dbg(musb, "Allow PM with no session: %02x", devctl); |
1907 | musb->quirk_invalid_vbus = false; | ||
1908 | pm_runtime_mark_last_busy(musb->controller); | 1917 | pm_runtime_mark_last_busy(musb->controller); |
1909 | pm_runtime_put_autosuspend(musb->controller); | 1918 | pm_runtime_put_autosuspend(musb->controller); |
1910 | } | 1919 | } |
@@ -1915,7 +1924,7 @@ static void musb_pm_runtime_check_session(struct musb *musb) | |||
1915 | /* Only used to provide driver mode change events */ | 1924 | /* Only used to provide driver mode change events */ |
1916 | static void musb_irq_work(struct work_struct *data) | 1925 | static void musb_irq_work(struct work_struct *data) |
1917 | { | 1926 | { |
1918 | struct musb *musb = container_of(data, struct musb, irq_work); | 1927 | struct musb *musb = container_of(data, struct musb, irq_work.work); |
1919 | 1928 | ||
1920 | musb_pm_runtime_check_session(musb); | 1929 | musb_pm_runtime_check_session(musb); |
1921 | 1930 | ||
@@ -1985,6 +1994,7 @@ static struct musb *allocate_instance(struct device *dev, | |||
1985 | INIT_LIST_HEAD(&musb->control); | 1994 | INIT_LIST_HEAD(&musb->control); |
1986 | INIT_LIST_HEAD(&musb->in_bulk); | 1995 | INIT_LIST_HEAD(&musb->in_bulk); |
1987 | INIT_LIST_HEAD(&musb->out_bulk); | 1996 | INIT_LIST_HEAD(&musb->out_bulk); |
1997 | INIT_LIST_HEAD(&musb->pending_list); | ||
1988 | 1998 | ||
1989 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; | 1999 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; |
1990 | musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; | 2000 | musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; |
@@ -2034,6 +2044,84 @@ static void musb_free(struct musb *musb) | |||
2034 | musb_host_free(musb); | 2044 | musb_host_free(musb); |
2035 | } | 2045 | } |
2036 | 2046 | ||
2047 | struct musb_pending_work { | ||
2048 | int (*callback)(struct musb *musb, void *data); | ||
2049 | void *data; | ||
2050 | struct list_head node; | ||
2051 | }; | ||
2052 | |||
2053 | /* | ||
2054 | * Called from musb_runtime_resume(), musb_resume(), and | ||
2055 | * musb_queue_resume_work(). Callers must take musb->lock. | ||
2056 | */ | ||
2057 | static int musb_run_resume_work(struct musb *musb) | ||
2058 | { | ||
2059 | struct musb_pending_work *w, *_w; | ||
2060 | unsigned long flags; | ||
2061 | int error = 0; | ||
2062 | |||
2063 | spin_lock_irqsave(&musb->list_lock, flags); | ||
2064 | list_for_each_entry_safe(w, _w, &musb->pending_list, node) { | ||
2065 | if (w->callback) { | ||
2066 | error = w->callback(musb, w->data); | ||
2067 | if (error < 0) { | ||
2068 | dev_err(musb->controller, | ||
2069 | "resume callback %p failed: %i\n", | ||
2070 | w->callback, error); | ||
2071 | } | ||
2072 | } | ||
2073 | list_del(&w->node); | ||
2074 | devm_kfree(musb->controller, w); | ||
2075 | } | ||
2076 | spin_unlock_irqrestore(&musb->list_lock, flags); | ||
2077 | |||
2078 | return error; | ||
2079 | } | ||
2080 | |||
2081 | /* | ||
2082 | * Called to run work if device is active or else queue the work to happen | ||
2083 | * on resume. Caller must take musb->lock and must hold an RPM reference. | ||
2084 | * | ||
2085 | * Note that we cowardly refuse queuing work after musb PM runtime | ||
2086 | * resume is done calling musb_run_resume_work() and return -EINPROGRESS | ||
2087 | * instead. | ||
2088 | */ | ||
2089 | int musb_queue_resume_work(struct musb *musb, | ||
2090 | int (*callback)(struct musb *musb, void *data), | ||
2091 | void *data) | ||
2092 | { | ||
2093 | struct musb_pending_work *w; | ||
2094 | unsigned long flags; | ||
2095 | int error; | ||
2096 | |||
2097 | if (WARN_ON(!callback)) | ||
2098 | return -EINVAL; | ||
2099 | |||
2100 | if (pm_runtime_active(musb->controller)) | ||
2101 | return callback(musb, data); | ||
2102 | |||
2103 | w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC); | ||
2104 | if (!w) | ||
2105 | return -ENOMEM; | ||
2106 | |||
2107 | w->callback = callback; | ||
2108 | w->data = data; | ||
2109 | spin_lock_irqsave(&musb->list_lock, flags); | ||
2110 | if (musb->is_runtime_suspended) { | ||
2111 | list_add_tail(&w->node, &musb->pending_list); | ||
2112 | error = 0; | ||
2113 | } else { | ||
2114 | dev_err(musb->controller, "could not add resume work %p\n", | ||
2115 | callback); | ||
2116 | devm_kfree(musb->controller, w); | ||
2117 | error = -EINPROGRESS; | ||
2118 | } | ||
2119 | spin_unlock_irqrestore(&musb->list_lock, flags); | ||
2120 | |||
2121 | return error; | ||
2122 | } | ||
2123 | EXPORT_SYMBOL_GPL(musb_queue_resume_work); | ||
2124 | |||
2037 | static void musb_deassert_reset(struct work_struct *work) | 2125 | static void musb_deassert_reset(struct work_struct *work) |
2038 | { | 2126 | { |
2039 | struct musb *musb; | 2127 | struct musb *musb; |
@@ -2081,6 +2169,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
2081 | } | 2169 | } |
2082 | 2170 | ||
2083 | spin_lock_init(&musb->lock); | 2171 | spin_lock_init(&musb->lock); |
2172 | spin_lock_init(&musb->list_lock); | ||
2084 | musb->board_set_power = plat->set_power; | 2173 | musb->board_set_power = plat->set_power; |
2085 | musb->min_power = plat->min_power; | 2174 | musb->min_power = plat->min_power; |
2086 | musb->ops = plat->platform_ops; | 2175 | musb->ops = plat->platform_ops; |
@@ -2224,7 +2313,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
2224 | musb_generic_disable(musb); | 2313 | musb_generic_disable(musb); |
2225 | 2314 | ||
2226 | /* Init IRQ workqueue before request_irq */ | 2315 | /* Init IRQ workqueue before request_irq */ |
2227 | INIT_WORK(&musb->irq_work, musb_irq_work); | 2316 | INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work); |
2228 | INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset); | 2317 | INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset); |
2229 | INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume); | 2318 | INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume); |
2230 | 2319 | ||
@@ -2307,6 +2396,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl) | |||
2307 | if (status) | 2396 | if (status) |
2308 | goto fail5; | 2397 | goto fail5; |
2309 | 2398 | ||
2399 | musb->is_initialized = 1; | ||
2310 | pm_runtime_mark_last_busy(musb->controller); | 2400 | pm_runtime_mark_last_busy(musb->controller); |
2311 | pm_runtime_put_autosuspend(musb->controller); | 2401 | pm_runtime_put_autosuspend(musb->controller); |
2312 | 2402 | ||
@@ -2320,7 +2410,7 @@ fail4: | |||
2320 | musb_host_cleanup(musb); | 2410 | musb_host_cleanup(musb); |
2321 | 2411 | ||
2322 | fail3: | 2412 | fail3: |
2323 | cancel_work_sync(&musb->irq_work); | 2413 | cancel_delayed_work_sync(&musb->irq_work); |
2324 | cancel_delayed_work_sync(&musb->finish_resume_work); | 2414 | cancel_delayed_work_sync(&musb->finish_resume_work); |
2325 | cancel_delayed_work_sync(&musb->deassert_reset_work); | 2415 | cancel_delayed_work_sync(&musb->deassert_reset_work); |
2326 | if (musb->dma_controller) | 2416 | if (musb->dma_controller) |
@@ -2388,7 +2478,7 @@ static int musb_remove(struct platform_device *pdev) | |||
2388 | */ | 2478 | */ |
2389 | musb_exit_debugfs(musb); | 2479 | musb_exit_debugfs(musb); |
2390 | 2480 | ||
2391 | cancel_work_sync(&musb->irq_work); | 2481 | cancel_delayed_work_sync(&musb->irq_work); |
2392 | cancel_delayed_work_sync(&musb->finish_resume_work); | 2482 | cancel_delayed_work_sync(&musb->finish_resume_work); |
2393 | cancel_delayed_work_sync(&musb->deassert_reset_work); | 2483 | cancel_delayed_work_sync(&musb->deassert_reset_work); |
2394 | pm_runtime_get_sync(musb->controller); | 2484 | pm_runtime_get_sync(musb->controller); |
@@ -2574,6 +2664,7 @@ static int musb_suspend(struct device *dev) | |||
2574 | 2664 | ||
2575 | musb_platform_disable(musb); | 2665 | musb_platform_disable(musb); |
2576 | musb_generic_disable(musb); | 2666 | musb_generic_disable(musb); |
2667 | WARN_ON(!list_empty(&musb->pending_list)); | ||
2577 | 2668 | ||
2578 | spin_lock_irqsave(&musb->lock, flags); | 2669 | spin_lock_irqsave(&musb->lock, flags); |
2579 | 2670 | ||
@@ -2595,9 +2686,11 @@ static int musb_suspend(struct device *dev) | |||
2595 | 2686 | ||
2596 | static int musb_resume(struct device *dev) | 2687 | static int musb_resume(struct device *dev) |
2597 | { | 2688 | { |
2598 | struct musb *musb = dev_to_musb(dev); | 2689 | struct musb *musb = dev_to_musb(dev); |
2599 | u8 devctl; | 2690 | unsigned long flags; |
2600 | u8 mask; | 2691 | int error; |
2692 | u8 devctl; | ||
2693 | u8 mask; | ||
2601 | 2694 | ||
2602 | /* | 2695 | /* |
2603 | * For static cmos like DaVinci, register values were preserved | 2696 | * For static cmos like DaVinci, register values were preserved |
@@ -2631,6 +2724,13 @@ static int musb_resume(struct device *dev) | |||
2631 | 2724 | ||
2632 | musb_start(musb); | 2725 | musb_start(musb); |
2633 | 2726 | ||
2727 | spin_lock_irqsave(&musb->lock, flags); | ||
2728 | error = musb_run_resume_work(musb); | ||
2729 | if (error) | ||
2730 | dev_err(musb->controller, "resume work failed with %i\n", | ||
2731 | error); | ||
2732 | spin_unlock_irqrestore(&musb->lock, flags); | ||
2733 | |||
2634 | return 0; | 2734 | return 0; |
2635 | } | 2735 | } |
2636 | 2736 | ||
@@ -2639,14 +2739,16 @@ static int musb_runtime_suspend(struct device *dev) | |||
2639 | struct musb *musb = dev_to_musb(dev); | 2739 | struct musb *musb = dev_to_musb(dev); |
2640 | 2740 | ||
2641 | musb_save_context(musb); | 2741 | musb_save_context(musb); |
2742 | musb->is_runtime_suspended = 1; | ||
2642 | 2743 | ||
2643 | return 0; | 2744 | return 0; |
2644 | } | 2745 | } |
2645 | 2746 | ||
2646 | static int musb_runtime_resume(struct device *dev) | 2747 | static int musb_runtime_resume(struct device *dev) |
2647 | { | 2748 | { |
2648 | struct musb *musb = dev_to_musb(dev); | 2749 | struct musb *musb = dev_to_musb(dev); |
2649 | static int first = 1; | 2750 | unsigned long flags; |
2751 | int error; | ||
2650 | 2752 | ||
2651 | /* | 2753 | /* |
2652 | * When pm_runtime_get_sync called for the first time in driver | 2754 | * When pm_runtime_get_sync called for the first time in driver |
@@ -2657,9 +2759,10 @@ static int musb_runtime_resume(struct device *dev) | |||
2657 | * Also context restore without save does not make | 2759 | * Also context restore without save does not make |
2658 | * any sense | 2760 | * any sense |
2659 | */ | 2761 | */ |
2660 | if (!first) | 2762 | if (!musb->is_initialized) |
2661 | musb_restore_context(musb); | 2763 | return 0; |
2662 | first = 0; | 2764 | |
2765 | musb_restore_context(musb); | ||
2663 | 2766 | ||
2664 | if (musb->need_finish_resume) { | 2767 | if (musb->need_finish_resume) { |
2665 | musb->need_finish_resume = 0; | 2768 | musb->need_finish_resume = 0; |
@@ -2667,6 +2770,14 @@ static int musb_runtime_resume(struct device *dev) | |||
2667 | msecs_to_jiffies(USB_RESUME_TIMEOUT)); | 2770 | msecs_to_jiffies(USB_RESUME_TIMEOUT)); |
2668 | } | 2771 | } |
2669 | 2772 | ||
2773 | spin_lock_irqsave(&musb->lock, flags); | ||
2774 | error = musb_run_resume_work(musb); | ||
2775 | if (error) | ||
2776 | dev_err(musb->controller, "resume work failed with %i\n", | ||
2777 | error); | ||
2778 | musb->is_runtime_suspended = 0; | ||
2779 | spin_unlock_irqrestore(&musb->lock, flags); | ||
2780 | |||
2670 | return 0; | 2781 | return 0; |
2671 | } | 2782 | } |
2672 | 2783 | ||
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index 76f00f61b874..a611e2f67bdc 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
@@ -303,13 +303,14 @@ struct musb_context_registers { | |||
303 | struct musb { | 303 | struct musb { |
304 | /* device lock */ | 304 | /* device lock */ |
305 | spinlock_t lock; | 305 | spinlock_t lock; |
306 | spinlock_t list_lock; /* resume work list lock */ | ||
306 | 307 | ||
307 | struct musb_io io; | 308 | struct musb_io io; |
308 | const struct musb_platform_ops *ops; | 309 | const struct musb_platform_ops *ops; |
309 | struct musb_context_registers context; | 310 | struct musb_context_registers context; |
310 | 311 | ||
311 | irqreturn_t (*isr)(int, void *); | 312 | irqreturn_t (*isr)(int, void *); |
312 | struct work_struct irq_work; | 313 | struct delayed_work irq_work; |
313 | struct delayed_work deassert_reset_work; | 314 | struct delayed_work deassert_reset_work; |
314 | struct delayed_work finish_resume_work; | 315 | struct delayed_work finish_resume_work; |
315 | struct delayed_work gadget_work; | 316 | struct delayed_work gadget_work; |
@@ -337,6 +338,7 @@ struct musb { | |||
337 | struct list_head control; /* of musb_qh */ | 338 | struct list_head control; /* of musb_qh */ |
338 | struct list_head in_bulk; /* of musb_qh */ | 339 | struct list_head in_bulk; /* of musb_qh */ |
339 | struct list_head out_bulk; /* of musb_qh */ | 340 | struct list_head out_bulk; /* of musb_qh */ |
341 | struct list_head pending_list; /* pending work list */ | ||
340 | 342 | ||
341 | struct timer_list otg_timer; | 343 | struct timer_list otg_timer; |
342 | struct notifier_block nb; | 344 | struct notifier_block nb; |
@@ -379,12 +381,15 @@ struct musb { | |||
379 | 381 | ||
380 | int port_mode; /* MUSB_PORT_MODE_* */ | 382 | int port_mode; /* MUSB_PORT_MODE_* */ |
381 | bool session; | 383 | bool session; |
382 | bool quirk_invalid_vbus; | 384 | unsigned long quirk_retries; |
383 | bool is_host; | 385 | bool is_host; |
384 | 386 | ||
385 | int a_wait_bcon; /* VBUS timeout in msecs */ | 387 | int a_wait_bcon; /* VBUS timeout in msecs */ |
386 | unsigned long idle_timeout; /* Next timeout in jiffies */ | 388 | unsigned long idle_timeout; /* Next timeout in jiffies */ |
387 | 389 | ||
390 | unsigned is_initialized:1; | ||
391 | unsigned is_runtime_suspended:1; | ||
392 | |||
388 | /* active means connected and not suspended */ | 393 | /* active means connected and not suspended */ |
389 | unsigned is_active:1; | 394 | unsigned is_active:1; |
390 | 395 | ||
@@ -540,6 +545,10 @@ extern irqreturn_t musb_interrupt(struct musb *); | |||
540 | 545 | ||
541 | extern void musb_hnp_stop(struct musb *musb); | 546 | extern void musb_hnp_stop(struct musb *musb); |
542 | 547 | ||
548 | int musb_queue_resume_work(struct musb *musb, | ||
549 | int (*callback)(struct musb *musb, void *data), | ||
550 | void *data); | ||
551 | |||
543 | static inline void musb_platform_set_vbus(struct musb *musb, int is_on) | 552 | static inline void musb_platform_set_vbus(struct musb *musb, int is_on) |
544 | { | 553 | { |
545 | if (musb->ops->set_vbus) | 554 | if (musb->ops->set_vbus) |
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c index 0f17d2140db6..feae1561b9ab 100644 --- a/drivers/usb/musb/musb_dsps.c +++ b/drivers/usb/musb/musb_dsps.c | |||
@@ -185,24 +185,19 @@ static void dsps_musb_disable(struct musb *musb) | |||
185 | musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap); | 185 | musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap); |
186 | musb_writel(reg_base, wrp->epintr_clear, | 186 | musb_writel(reg_base, wrp->epintr_clear, |
187 | wrp->txep_bitmap | wrp->rxep_bitmap); | 187 | wrp->txep_bitmap | wrp->rxep_bitmap); |
188 | del_timer_sync(&glue->timer); | ||
188 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | 189 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); |
189 | } | 190 | } |
190 | 191 | ||
191 | static void otg_timer(unsigned long _musb) | 192 | /* Caller must take musb->lock */ |
193 | static int dsps_check_status(struct musb *musb, void *unused) | ||
192 | { | 194 | { |
193 | struct musb *musb = (void *)_musb; | ||
194 | void __iomem *mregs = musb->mregs; | 195 | void __iomem *mregs = musb->mregs; |
195 | struct device *dev = musb->controller; | 196 | struct device *dev = musb->controller; |
196 | struct dsps_glue *glue = dev_get_drvdata(dev->parent); | 197 | struct dsps_glue *glue = dev_get_drvdata(dev->parent); |
197 | const struct dsps_musb_wrapper *wrp = glue->wrp; | 198 | const struct dsps_musb_wrapper *wrp = glue->wrp; |
198 | u8 devctl; | 199 | u8 devctl; |
199 | unsigned long flags; | ||
200 | int skip_session = 0; | 200 | int skip_session = 0; |
201 | int err; | ||
202 | |||
203 | err = pm_runtime_get_sync(dev); | ||
204 | if (err < 0) | ||
205 | dev_err(dev, "Poll could not pm_runtime_get: %i\n", err); | ||
206 | 201 | ||
207 | /* | 202 | /* |
208 | * We poll because DSPS IP's won't expose several OTG-critical | 203 | * We poll because DSPS IP's won't expose several OTG-critical |
@@ -212,7 +207,6 @@ static void otg_timer(unsigned long _musb) | |||
212 | dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, | 207 | dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl, |
213 | usb_otg_state_string(musb->xceiv->otg->state)); | 208 | usb_otg_state_string(musb->xceiv->otg->state)); |
214 | 209 | ||
215 | spin_lock_irqsave(&musb->lock, flags); | ||
216 | switch (musb->xceiv->otg->state) { | 210 | switch (musb->xceiv->otg->state) { |
217 | case OTG_STATE_A_WAIT_VRISE: | 211 | case OTG_STATE_A_WAIT_VRISE: |
218 | mod_timer(&glue->timer, jiffies + | 212 | mod_timer(&glue->timer, jiffies + |
@@ -245,8 +239,30 @@ static void otg_timer(unsigned long _musb) | |||
245 | default: | 239 | default: |
246 | break; | 240 | break; |
247 | } | 241 | } |
248 | spin_unlock_irqrestore(&musb->lock, flags); | ||
249 | 242 | ||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | static void otg_timer(unsigned long _musb) | ||
247 | { | ||
248 | struct musb *musb = (void *)_musb; | ||
249 | struct device *dev = musb->controller; | ||
250 | unsigned long flags; | ||
251 | int err; | ||
252 | |||
253 | err = pm_runtime_get(dev); | ||
254 | if ((err != -EINPROGRESS) && err < 0) { | ||
255 | dev_err(dev, "Poll could not pm_runtime_get: %i\n", err); | ||
256 | pm_runtime_put_noidle(dev); | ||
257 | |||
258 | return; | ||
259 | } | ||
260 | |||
261 | spin_lock_irqsave(&musb->lock, flags); | ||
262 | err = musb_queue_resume_work(musb, dsps_check_status, NULL); | ||
263 | if (err < 0) | ||
264 | dev_err(dev, "%s resume work: %i\n", __func__, err); | ||
265 | spin_unlock_irqrestore(&musb->lock, flags); | ||
250 | pm_runtime_mark_last_busy(dev); | 266 | pm_runtime_mark_last_busy(dev); |
251 | pm_runtime_put_autosuspend(dev); | 267 | pm_runtime_put_autosuspend(dev); |
252 | } | 268 | } |
@@ -767,28 +783,13 @@ static int dsps_probe(struct platform_device *pdev) | |||
767 | 783 | ||
768 | platform_set_drvdata(pdev, glue); | 784 | platform_set_drvdata(pdev, glue); |
769 | pm_runtime_enable(&pdev->dev); | 785 | pm_runtime_enable(&pdev->dev); |
770 | pm_runtime_use_autosuspend(&pdev->dev); | ||
771 | pm_runtime_set_autosuspend_delay(&pdev->dev, 200); | ||
772 | |||
773 | ret = pm_runtime_get_sync(&pdev->dev); | ||
774 | if (ret < 0) { | ||
775 | dev_err(&pdev->dev, "pm_runtime_get_sync FAILED"); | ||
776 | goto err2; | ||
777 | } | ||
778 | |||
779 | ret = dsps_create_musb_pdev(glue, pdev); | 786 | ret = dsps_create_musb_pdev(glue, pdev); |
780 | if (ret) | 787 | if (ret) |
781 | goto err3; | 788 | goto err; |
782 | |||
783 | pm_runtime_mark_last_busy(&pdev->dev); | ||
784 | pm_runtime_put_autosuspend(&pdev->dev); | ||
785 | 789 | ||
786 | return 0; | 790 | return 0; |
787 | 791 | ||
788 | err3: | 792 | err: |
789 | pm_runtime_put_sync(&pdev->dev); | ||
790 | err2: | ||
791 | pm_runtime_dont_use_autosuspend(&pdev->dev); | ||
792 | pm_runtime_disable(&pdev->dev); | 793 | pm_runtime_disable(&pdev->dev); |
793 | return ret; | 794 | return ret; |
794 | } | 795 | } |
@@ -799,9 +800,6 @@ static int dsps_remove(struct platform_device *pdev) | |||
799 | 800 | ||
800 | platform_device_unregister(glue->musb); | 801 | platform_device_unregister(glue->musb); |
801 | 802 | ||
802 | /* disable usbss clocks */ | ||
803 | pm_runtime_dont_use_autosuspend(&pdev->dev); | ||
804 | pm_runtime_put_sync(&pdev->dev); | ||
805 | pm_runtime_disable(&pdev->dev); | 803 | pm_runtime_disable(&pdev->dev); |
806 | 804 | ||
807 | return 0; | 805 | return 0; |
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index 47304560f105..1acc4864f9f6 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c | |||
@@ -1114,7 +1114,7 @@ static int musb_gadget_enable(struct usb_ep *ep, | |||
1114 | musb_ep->dma ? "dma, " : "", | 1114 | musb_ep->dma ? "dma, " : "", |
1115 | musb_ep->packet_sz); | 1115 | musb_ep->packet_sz); |
1116 | 1116 | ||
1117 | schedule_work(&musb->irq_work); | 1117 | schedule_delayed_work(&musb->irq_work, 0); |
1118 | 1118 | ||
1119 | fail: | 1119 | fail: |
1120 | spin_unlock_irqrestore(&musb->lock, flags); | 1120 | spin_unlock_irqrestore(&musb->lock, flags); |
@@ -1158,7 +1158,7 @@ static int musb_gadget_disable(struct usb_ep *ep) | |||
1158 | musb_ep->desc = NULL; | 1158 | musb_ep->desc = NULL; |
1159 | musb_ep->end_point.desc = NULL; | 1159 | musb_ep->end_point.desc = NULL; |
1160 | 1160 | ||
1161 | schedule_work(&musb->irq_work); | 1161 | schedule_delayed_work(&musb->irq_work, 0); |
1162 | 1162 | ||
1163 | spin_unlock_irqrestore(&(musb->lock), flags); | 1163 | spin_unlock_irqrestore(&(musb->lock), flags); |
1164 | 1164 | ||
@@ -1222,13 +1222,22 @@ void musb_ep_restart(struct musb *musb, struct musb_request *req) | |||
1222 | rxstate(musb, req); | 1222 | rxstate(musb, req); |
1223 | } | 1223 | } |
1224 | 1224 | ||
1225 | static int musb_ep_restart_resume_work(struct musb *musb, void *data) | ||
1226 | { | ||
1227 | struct musb_request *req = data; | ||
1228 | |||
1229 | musb_ep_restart(musb, req); | ||
1230 | |||
1231 | return 0; | ||
1232 | } | ||
1233 | |||
1225 | static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | 1234 | static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, |
1226 | gfp_t gfp_flags) | 1235 | gfp_t gfp_flags) |
1227 | { | 1236 | { |
1228 | struct musb_ep *musb_ep; | 1237 | struct musb_ep *musb_ep; |
1229 | struct musb_request *request; | 1238 | struct musb_request *request; |
1230 | struct musb *musb; | 1239 | struct musb *musb; |
1231 | int status = 0; | 1240 | int status; |
1232 | unsigned long lockflags; | 1241 | unsigned long lockflags; |
1233 | 1242 | ||
1234 | if (!ep || !req) | 1243 | if (!ep || !req) |
@@ -1245,6 +1254,17 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | |||
1245 | if (request->ep != musb_ep) | 1254 | if (request->ep != musb_ep) |
1246 | return -EINVAL; | 1255 | return -EINVAL; |
1247 | 1256 | ||
1257 | status = pm_runtime_get(musb->controller); | ||
1258 | if ((status != -EINPROGRESS) && status < 0) { | ||
1259 | dev_err(musb->controller, | ||
1260 | "pm runtime get failed in %s\n", | ||
1261 | __func__); | ||
1262 | pm_runtime_put_noidle(musb->controller); | ||
1263 | |||
1264 | return status; | ||
1265 | } | ||
1266 | status = 0; | ||
1267 | |||
1248 | trace_musb_req_enq(request); | 1268 | trace_musb_req_enq(request); |
1249 | 1269 | ||
1250 | /* request is mine now... */ | 1270 | /* request is mine now... */ |
@@ -1255,7 +1275,6 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | |||
1255 | 1275 | ||
1256 | map_dma_buffer(request, musb, musb_ep); | 1276 | map_dma_buffer(request, musb, musb_ep); |
1257 | 1277 | ||
1258 | pm_runtime_get_sync(musb->controller); | ||
1259 | spin_lock_irqsave(&musb->lock, lockflags); | 1278 | spin_lock_irqsave(&musb->lock, lockflags); |
1260 | 1279 | ||
1261 | /* don't queue if the ep is down */ | 1280 | /* don't queue if the ep is down */ |
@@ -1271,8 +1290,14 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req, | |||
1271 | list_add_tail(&request->list, &musb_ep->req_list); | 1290 | list_add_tail(&request->list, &musb_ep->req_list); |
1272 | 1291 | ||
1273 | /* it this is the head of the queue, start i/o ... */ | 1292 | /* it this is the head of the queue, start i/o ... */ |
1274 | if (!musb_ep->busy && &request->list == musb_ep->req_list.next) | 1293 | if (!musb_ep->busy && &request->list == musb_ep->req_list.next) { |
1275 | musb_ep_restart(musb, request); | 1294 | status = musb_queue_resume_work(musb, |
1295 | musb_ep_restart_resume_work, | ||
1296 | request); | ||
1297 | if (status < 0) | ||
1298 | dev_err(musb->controller, "%s resume work: %i\n", | ||
1299 | __func__, status); | ||
1300 | } | ||
1276 | 1301 | ||
1277 | unlock: | 1302 | unlock: |
1278 | spin_unlock_irqrestore(&musb->lock, lockflags); | 1303 | spin_unlock_irqrestore(&musb->lock, lockflags); |
@@ -1969,7 +1994,7 @@ static int musb_gadget_stop(struct usb_gadget *g) | |||
1969 | */ | 1994 | */ |
1970 | 1995 | ||
1971 | /* Force check of devctl register for PM runtime */ | 1996 | /* Force check of devctl register for PM runtime */ |
1972 | schedule_work(&musb->irq_work); | 1997 | schedule_delayed_work(&musb->irq_work, 0); |
1973 | 1998 | ||
1974 | pm_runtime_mark_last_busy(musb->controller); | 1999 | pm_runtime_mark_last_busy(musb->controller); |
1975 | pm_runtime_put_autosuspend(musb->controller); | 2000 | pm_runtime_put_autosuspend(musb->controller); |
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index 7c9aa5e78666..8b73214a9ea3 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c | |||
@@ -513,17 +513,18 @@ static int omap2430_probe(struct platform_device *pdev) | |||
513 | } | 513 | } |
514 | 514 | ||
515 | pm_runtime_enable(glue->dev); | 515 | pm_runtime_enable(glue->dev); |
516 | pm_runtime_use_autosuspend(glue->dev); | ||
517 | pm_runtime_set_autosuspend_delay(glue->dev, 100); | ||
518 | 516 | ||
519 | ret = platform_device_add(musb); | 517 | ret = platform_device_add(musb); |
520 | if (ret) { | 518 | if (ret) { |
521 | dev_err(&pdev->dev, "failed to register musb device\n"); | 519 | dev_err(&pdev->dev, "failed to register musb device\n"); |
522 | goto err2; | 520 | goto err3; |
523 | } | 521 | } |
524 | 522 | ||
525 | return 0; | 523 | return 0; |
526 | 524 | ||
525 | err3: | ||
526 | pm_runtime_disable(glue->dev); | ||
527 | |||
527 | err2: | 528 | err2: |
528 | platform_device_put(musb); | 529 | platform_device_put(musb); |
529 | 530 | ||
@@ -535,10 +536,7 @@ static int omap2430_remove(struct platform_device *pdev) | |||
535 | { | 536 | { |
536 | struct omap2430_glue *glue = platform_get_drvdata(pdev); | 537 | struct omap2430_glue *glue = platform_get_drvdata(pdev); |
537 | 538 | ||
538 | pm_runtime_get_sync(glue->dev); | ||
539 | platform_device_unregister(glue->musb); | 539 | platform_device_unregister(glue->musb); |
540 | pm_runtime_put_sync(glue->dev); | ||
541 | pm_runtime_dont_use_autosuspend(glue->dev); | ||
542 | pm_runtime_disable(glue->dev); | 540 | pm_runtime_disable(glue->dev); |
543 | 541 | ||
544 | return 0; | 542 | return 0; |
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c index df7c9f46be54..e85cc8e4e7a9 100644 --- a/drivers/usb/musb/tusb6010.c +++ b/drivers/usb/musb/tusb6010.c | |||
@@ -724,7 +724,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) | |||
724 | dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", | 724 | dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", |
725 | usb_otg_state_string(musb->xceiv->otg->state), otg_stat); | 725 | usb_otg_state_string(musb->xceiv->otg->state), otg_stat); |
726 | idle_timeout = jiffies + (1 * HZ); | 726 | idle_timeout = jiffies + (1 * HZ); |
727 | schedule_work(&musb->irq_work); | 727 | schedule_delayed_work(&musb->irq_work, 0); |
728 | 728 | ||
729 | } else /* A-dev state machine */ { | 729 | } else /* A-dev state machine */ { |
730 | dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", | 730 | dev_dbg(musb->controller, "vbus change, %s, otg %03x\n", |
@@ -814,7 +814,7 @@ tusb_otg_ints(struct musb *musb, u32 int_src, void __iomem *tbase) | |||
814 | break; | 814 | break; |
815 | } | 815 | } |
816 | } | 816 | } |
817 | schedule_work(&musb->irq_work); | 817 | schedule_delayed_work(&musb->irq_work, 0); |
818 | 818 | ||
819 | return idle_timeout; | 819 | return idle_timeout; |
820 | } | 820 | } |
@@ -864,7 +864,7 @@ static irqreturn_t tusb_musb_interrupt(int irq, void *__hci) | |||
864 | musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg); | 864 | musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg); |
865 | if (reg & ~TUSB_PRCM_WNORCS) { | 865 | if (reg & ~TUSB_PRCM_WNORCS) { |
866 | musb->is_active = 1; | 866 | musb->is_active = 1; |
867 | schedule_work(&musb->irq_work); | 867 | schedule_delayed_work(&musb->irq_work, 0); |
868 | } | 868 | } |
869 | dev_dbg(musb->controller, "wake %sactive %02x\n", | 869 | dev_dbg(musb->controller, "wake %sactive %02x\n", |
870 | musb->is_active ? "" : "in", reg); | 870 | musb->is_active ? "" : "in", reg); |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index f61477bed3a8..243ac5ebe46a 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
@@ -131,6 +131,7 @@ static const struct usb_device_id id_table[] = { | |||
131 | { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ | 131 | { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ |
132 | { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ | 132 | { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ |
133 | { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ | 133 | { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ |
134 | { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */ | ||
134 | { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ | 135 | { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ |
135 | { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ | 136 | { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */ |
136 | { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ | 137 | { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */ |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 0ff7f38d7800..6e9fc8bcc285 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -1012,6 +1012,8 @@ static const struct usb_device_id id_table_combined[] = { | |||
1012 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, | 1012 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) }, |
1013 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, | 1013 | { USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) }, |
1014 | { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, | 1014 | { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, |
1015 | { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), | ||
1016 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | ||
1015 | { } /* Terminating entry */ | 1017 | { } /* Terminating entry */ |
1016 | }; | 1018 | }; |
1017 | 1019 | ||
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 21011c0a4c64..48ee04c94a75 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -596,6 +596,12 @@ | |||
596 | #define STK541_PID 0x2109 /* Zigbee Controller */ | 596 | #define STK541_PID 0x2109 /* Zigbee Controller */ |
597 | 597 | ||
598 | /* | 598 | /* |
599 | * Texas Instruments | ||
600 | */ | ||
601 | #define TI_VID 0x0451 | ||
602 | #define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */ | ||
603 | |||
604 | /* | ||
599 | * Blackfin gnICE JTAG | 605 | * Blackfin gnICE JTAG |
600 | * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice | 606 | * http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice |
601 | */ | 607 | */ |
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c index ffd086733421..1a59f335b063 100644 --- a/drivers/usb/storage/transport.c +++ b/drivers/usb/storage/transport.c | |||
@@ -954,10 +954,15 @@ int usb_stor_CB_transport(struct scsi_cmnd *srb, struct us_data *us) | |||
954 | 954 | ||
955 | /* COMMAND STAGE */ | 955 | /* COMMAND STAGE */ |
956 | /* let's send the command via the control pipe */ | 956 | /* let's send the command via the control pipe */ |
957 | /* | ||
958 | * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack. | ||
959 | * Stack may be vmallocated. So no DMA for us. Make a copy. | ||
960 | */ | ||
961 | memcpy(us->iobuf, srb->cmnd, srb->cmd_len); | ||
957 | result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, | 962 | result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe, |
958 | US_CBI_ADSC, | 963 | US_CBI_ADSC, |
959 | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, | 964 | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0, |
960 | us->ifnum, srb->cmnd, srb->cmd_len); | 965 | us->ifnum, us->iobuf, srb->cmd_len); |
961 | 966 | ||
962 | /* check the return code for the command */ | 967 | /* check the return code for the command */ |
963 | usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n", | 968 | usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n", |
diff --git a/drivers/video/fbdev/amba-clcd-versatile.c b/drivers/video/fbdev/amba-clcd-versatile.c index 19ad8645d93c..e5d9bfc1703a 100644 --- a/drivers/video/fbdev/amba-clcd-versatile.c +++ b/drivers/video/fbdev/amba-clcd-versatile.c | |||
@@ -526,8 +526,8 @@ int versatile_clcd_init_panel(struct clcd_fb *fb, | |||
526 | np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match, | 526 | np = of_find_matching_node_and_match(NULL, versatile_clcd_of_match, |
527 | &clcd_id); | 527 | &clcd_id); |
528 | if (!np) { | 528 | if (!np) { |
529 | dev_err(dev, "no Versatile syscon node\n"); | 529 | /* Vexpress does not have this */ |
530 | return -ENODEV; | 530 | return 0; |
531 | } | 531 | } |
532 | versatile_clcd_type = (enum versatile_clcd)clcd_id->data; | 532 | versatile_clcd_type = (enum versatile_clcd)clcd_id->data; |
533 | 533 | ||
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index fdd3228e0678..3eb58cb51e56 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig | |||
@@ -155,6 +155,7 @@ config TANGOX_WATCHDOG | |||
155 | config WDAT_WDT | 155 | config WDAT_WDT |
156 | tristate "ACPI Watchdog Action Table (WDAT)" | 156 | tristate "ACPI Watchdog Action Table (WDAT)" |
157 | depends on ACPI | 157 | depends on ACPI |
158 | select WATCHDOG_CORE | ||
158 | select ACPI_WATCHDOG | 159 | select ACPI_WATCHDOG |
159 | help | 160 | help |
160 | This driver adds support for systems with ACPI Watchdog Action | 161 | This driver adds support for systems with ACPI Watchdog Action |
diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c index 9a28133ac3b8..9b774f4b50c8 100644 --- a/fs/crypto/fname.c +++ b/fs/crypto/fname.c | |||
@@ -39,65 +39,54 @@ static void fname_crypt_complete(struct crypto_async_request *req, int res) | |||
39 | static int fname_encrypt(struct inode *inode, | 39 | static int fname_encrypt(struct inode *inode, |
40 | const struct qstr *iname, struct fscrypt_str *oname) | 40 | const struct qstr *iname, struct fscrypt_str *oname) |
41 | { | 41 | { |
42 | u32 ciphertext_len; | ||
43 | struct skcipher_request *req = NULL; | 42 | struct skcipher_request *req = NULL; |
44 | DECLARE_FS_COMPLETION_RESULT(ecr); | 43 | DECLARE_FS_COMPLETION_RESULT(ecr); |
45 | struct fscrypt_info *ci = inode->i_crypt_info; | 44 | struct fscrypt_info *ci = inode->i_crypt_info; |
46 | struct crypto_skcipher *tfm = ci->ci_ctfm; | 45 | struct crypto_skcipher *tfm = ci->ci_ctfm; |
47 | int res = 0; | 46 | int res = 0; |
48 | char iv[FS_CRYPTO_BLOCK_SIZE]; | 47 | char iv[FS_CRYPTO_BLOCK_SIZE]; |
49 | struct scatterlist src_sg, dst_sg; | 48 | struct scatterlist sg; |
50 | int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK); | 49 | int padding = 4 << (ci->ci_flags & FS_POLICY_FLAGS_PAD_MASK); |
51 | char *workbuf, buf[32], *alloc_buf = NULL; | 50 | unsigned int lim; |
52 | unsigned lim; | 51 | unsigned int cryptlen; |
53 | 52 | ||
54 | lim = inode->i_sb->s_cop->max_namelen(inode); | 53 | lim = inode->i_sb->s_cop->max_namelen(inode); |
55 | if (iname->len <= 0 || iname->len > lim) | 54 | if (iname->len <= 0 || iname->len > lim) |
56 | return -EIO; | 55 | return -EIO; |
57 | 56 | ||
58 | ciphertext_len = max(iname->len, (u32)FS_CRYPTO_BLOCK_SIZE); | 57 | /* |
59 | ciphertext_len = round_up(ciphertext_len, padding); | 58 | * Copy the filename to the output buffer for encrypting in-place and |
60 | ciphertext_len = min(ciphertext_len, lim); | 59 | * pad it with the needed number of NUL bytes. |
60 | */ | ||
61 | cryptlen = max_t(unsigned int, iname->len, FS_CRYPTO_BLOCK_SIZE); | ||
62 | cryptlen = round_up(cryptlen, padding); | ||
63 | cryptlen = min(cryptlen, lim); | ||
64 | memcpy(oname->name, iname->name, iname->len); | ||
65 | memset(oname->name + iname->len, 0, cryptlen - iname->len); | ||
61 | 66 | ||
62 | if (ciphertext_len <= sizeof(buf)) { | 67 | /* Initialize the IV */ |
63 | workbuf = buf; | 68 | memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); |
64 | } else { | ||
65 | alloc_buf = kmalloc(ciphertext_len, GFP_NOFS); | ||
66 | if (!alloc_buf) | ||
67 | return -ENOMEM; | ||
68 | workbuf = alloc_buf; | ||
69 | } | ||
70 | 69 | ||
71 | /* Allocate request */ | 70 | /* Set up the encryption request */ |
72 | req = skcipher_request_alloc(tfm, GFP_NOFS); | 71 | req = skcipher_request_alloc(tfm, GFP_NOFS); |
73 | if (!req) { | 72 | if (!req) { |
74 | printk_ratelimited(KERN_ERR | 73 | printk_ratelimited(KERN_ERR |
75 | "%s: crypto_request_alloc() failed\n", __func__); | 74 | "%s: skcipher_request_alloc() failed\n", __func__); |
76 | kfree(alloc_buf); | ||
77 | return -ENOMEM; | 75 | return -ENOMEM; |
78 | } | 76 | } |
79 | skcipher_request_set_callback(req, | 77 | skcipher_request_set_callback(req, |
80 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, | 78 | CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP, |
81 | fname_crypt_complete, &ecr); | 79 | fname_crypt_complete, &ecr); |
80 | sg_init_one(&sg, oname->name, cryptlen); | ||
81 | skcipher_request_set_crypt(req, &sg, &sg, cryptlen, iv); | ||
82 | 82 | ||
83 | /* Copy the input */ | 83 | /* Do the encryption */ |
84 | memcpy(workbuf, iname->name, iname->len); | ||
85 | if (iname->len < ciphertext_len) | ||
86 | memset(workbuf + iname->len, 0, ciphertext_len - iname->len); | ||
87 | |||
88 | /* Initialize IV */ | ||
89 | memset(iv, 0, FS_CRYPTO_BLOCK_SIZE); | ||
90 | |||
91 | /* Create encryption request */ | ||
92 | sg_init_one(&src_sg, workbuf, ciphertext_len); | ||
93 | sg_init_one(&dst_sg, oname->name, ciphertext_len); | ||
94 | skcipher_request_set_crypt(req, &src_sg, &dst_sg, ciphertext_len, iv); | ||
95 | res = crypto_skcipher_encrypt(req); | 84 | res = crypto_skcipher_encrypt(req); |
96 | if (res == -EINPROGRESS || res == -EBUSY) { | 85 | if (res == -EINPROGRESS || res == -EBUSY) { |
86 | /* Request is being completed asynchronously; wait for it */ | ||
97 | wait_for_completion(&ecr.completion); | 87 | wait_for_completion(&ecr.completion); |
98 | res = ecr.res; | 88 | res = ecr.res; |
99 | } | 89 | } |
100 | kfree(alloc_buf); | ||
101 | skcipher_request_free(req); | 90 | skcipher_request_free(req); |
102 | if (res < 0) { | 91 | if (res < 0) { |
103 | printk_ratelimited(KERN_ERR | 92 | printk_ratelimited(KERN_ERR |
@@ -105,7 +94,7 @@ static int fname_encrypt(struct inode *inode, | |||
105 | return res; | 94 | return res; |
106 | } | 95 | } |
107 | 96 | ||
108 | oname->len = ciphertext_len; | 97 | oname->len = cryptlen; |
109 | return 0; | 98 | return 0; |
110 | } | 99 | } |
111 | 100 | ||
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 82f0285f5d08..67fb6d8876d0 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c | |||
@@ -185,7 +185,7 @@ int get_crypt_info(struct inode *inode) | |||
185 | struct crypto_skcipher *ctfm; | 185 | struct crypto_skcipher *ctfm; |
186 | const char *cipher_str; | 186 | const char *cipher_str; |
187 | int keysize; | 187 | int keysize; |
188 | u8 raw_key[FS_MAX_KEY_SIZE]; | 188 | u8 *raw_key = NULL; |
189 | int res; | 189 | int res; |
190 | 190 | ||
191 | res = fscrypt_initialize(); | 191 | res = fscrypt_initialize(); |
@@ -238,6 +238,15 @@ retry: | |||
238 | if (res) | 238 | if (res) |
239 | goto out; | 239 | goto out; |
240 | 240 | ||
241 | /* | ||
242 | * This cannot be a stack buffer because it is passed to the scatterlist | ||
243 | * crypto API as part of key derivation. | ||
244 | */ | ||
245 | res = -ENOMEM; | ||
246 | raw_key = kmalloc(FS_MAX_KEY_SIZE, GFP_NOFS); | ||
247 | if (!raw_key) | ||
248 | goto out; | ||
249 | |||
241 | if (fscrypt_dummy_context_enabled(inode)) { | 250 | if (fscrypt_dummy_context_enabled(inode)) { |
242 | memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); | 251 | memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE); |
243 | goto got_key; | 252 | goto got_key; |
@@ -276,7 +285,8 @@ got_key: | |||
276 | if (res) | 285 | if (res) |
277 | goto out; | 286 | goto out; |
278 | 287 | ||
279 | memzero_explicit(raw_key, sizeof(raw_key)); | 288 | kzfree(raw_key); |
289 | raw_key = NULL; | ||
280 | if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) { | 290 | if (cmpxchg(&inode->i_crypt_info, NULL, crypt_info) != NULL) { |
281 | put_crypt_info(crypt_info); | 291 | put_crypt_info(crypt_info); |
282 | goto retry; | 292 | goto retry; |
@@ -287,7 +297,7 @@ out: | |||
287 | if (res == -ENOKEY) | 297 | if (res == -ENOKEY) |
288 | res = 0; | 298 | res = 0; |
289 | put_crypt_info(crypt_info); | 299 | put_crypt_info(crypt_info); |
290 | memzero_explicit(raw_key, sizeof(raw_key)); | 300 | kzfree(raw_key); |
291 | return res; | 301 | return res; |
292 | } | 302 | } |
293 | 303 | ||
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 282a51b07c57..a8a750f59621 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -235,6 +235,7 @@ struct ext4_io_submit { | |||
235 | #define EXT4_MAX_BLOCK_SIZE 65536 | 235 | #define EXT4_MAX_BLOCK_SIZE 65536 |
236 | #define EXT4_MIN_BLOCK_LOG_SIZE 10 | 236 | #define EXT4_MIN_BLOCK_LOG_SIZE 10 |
237 | #define EXT4_MAX_BLOCK_LOG_SIZE 16 | 237 | #define EXT4_MAX_BLOCK_LOG_SIZE 16 |
238 | #define EXT4_MAX_CLUSTER_LOG_SIZE 30 | ||
238 | #ifdef __KERNEL__ | 239 | #ifdef __KERNEL__ |
239 | # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize) | 240 | # define EXT4_BLOCK_SIZE(s) ((s)->s_blocksize) |
240 | #else | 241 | #else |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 20da99da0a34..52b0530c5d65 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -3565,7 +3565,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3565 | if (blocksize < EXT4_MIN_BLOCK_SIZE || | 3565 | if (blocksize < EXT4_MIN_BLOCK_SIZE || |
3566 | blocksize > EXT4_MAX_BLOCK_SIZE) { | 3566 | blocksize > EXT4_MAX_BLOCK_SIZE) { |
3567 | ext4_msg(sb, KERN_ERR, | 3567 | ext4_msg(sb, KERN_ERR, |
3568 | "Unsupported filesystem blocksize %d", blocksize); | 3568 | "Unsupported filesystem blocksize %d (%d log_block_size)", |
3569 | blocksize, le32_to_cpu(es->s_log_block_size)); | ||
3570 | goto failed_mount; | ||
3571 | } | ||
3572 | if (le32_to_cpu(es->s_log_block_size) > | ||
3573 | (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { | ||
3574 | ext4_msg(sb, KERN_ERR, | ||
3575 | "Invalid log block size: %u", | ||
3576 | le32_to_cpu(es->s_log_block_size)); | ||
3569 | goto failed_mount; | 3577 | goto failed_mount; |
3570 | } | 3578 | } |
3571 | 3579 | ||
@@ -3697,6 +3705,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3697 | "block size (%d)", clustersize, blocksize); | 3705 | "block size (%d)", clustersize, blocksize); |
3698 | goto failed_mount; | 3706 | goto failed_mount; |
3699 | } | 3707 | } |
3708 | if (le32_to_cpu(es->s_log_cluster_size) > | ||
3709 | (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) { | ||
3710 | ext4_msg(sb, KERN_ERR, | ||
3711 | "Invalid log cluster size: %u", | ||
3712 | le32_to_cpu(es->s_log_cluster_size)); | ||
3713 | goto failed_mount; | ||
3714 | } | ||
3700 | sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - | 3715 | sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) - |
3701 | le32_to_cpu(es->s_log_block_size); | 3716 | le32_to_cpu(es->s_log_block_size); |
3702 | sbi->s_clusters_per_group = | 3717 | sbi->s_clusters_per_group = |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 6a4d0e5418a1..b3ebe512d64c 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -286,6 +286,11 @@ const struct dentry_operations fuse_dentry_operations = { | |||
286 | .d_release = fuse_dentry_release, | 286 | .d_release = fuse_dentry_release, |
287 | }; | 287 | }; |
288 | 288 | ||
289 | const struct dentry_operations fuse_root_dentry_operations = { | ||
290 | .d_init = fuse_dentry_init, | ||
291 | .d_release = fuse_dentry_release, | ||
292 | }; | ||
293 | |||
289 | int fuse_valid_type(int m) | 294 | int fuse_valid_type(int m) |
290 | { | 295 | { |
291 | return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) || | 296 | return S_ISREG(m) || S_ISDIR(m) || S_ISLNK(m) || S_ISCHR(m) || |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index abc66a6237fd..2401c5dabb2a 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1985,6 +1985,10 @@ static int fuse_write_end(struct file *file, struct address_space *mapping, | |||
1985 | { | 1985 | { |
1986 | struct inode *inode = page->mapping->host; | 1986 | struct inode *inode = page->mapping->host; |
1987 | 1987 | ||
1988 | /* Haven't copied anything? Skip zeroing, size extending, dirtying. */ | ||
1989 | if (!copied) | ||
1990 | goto unlock; | ||
1991 | |||
1988 | if (!PageUptodate(page)) { | 1992 | if (!PageUptodate(page)) { |
1989 | /* Zero any unwritten bytes at the end of the page */ | 1993 | /* Zero any unwritten bytes at the end of the page */ |
1990 | size_t endoff = (pos + copied) & ~PAGE_MASK; | 1994 | size_t endoff = (pos + copied) & ~PAGE_MASK; |
@@ -1995,6 +1999,8 @@ static int fuse_write_end(struct file *file, struct address_space *mapping, | |||
1995 | 1999 | ||
1996 | fuse_write_update_size(inode, pos + copied); | 2000 | fuse_write_update_size(inode, pos + copied); |
1997 | set_page_dirty(page); | 2001 | set_page_dirty(page); |
2002 | |||
2003 | unlock: | ||
1998 | unlock_page(page); | 2004 | unlock_page(page); |
1999 | put_page(page); | 2005 | put_page(page); |
2000 | 2006 | ||
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 0dfbb136e59a..91307940c8ac 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -692,6 +692,7 @@ static inline u64 get_node_id(struct inode *inode) | |||
692 | extern const struct file_operations fuse_dev_operations; | 692 | extern const struct file_operations fuse_dev_operations; |
693 | 693 | ||
694 | extern const struct dentry_operations fuse_dentry_operations; | 694 | extern const struct dentry_operations fuse_dentry_operations; |
695 | extern const struct dentry_operations fuse_root_dentry_operations; | ||
695 | 696 | ||
696 | /** | 697 | /** |
697 | * Inode to nodeid comparison. | 698 | * Inode to nodeid comparison. |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 17141099f2e7..6fe6a88ecb4a 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -1131,10 +1131,11 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
1131 | 1131 | ||
1132 | err = -ENOMEM; | 1132 | err = -ENOMEM; |
1133 | root = fuse_get_root_inode(sb, d.rootmode); | 1133 | root = fuse_get_root_inode(sb, d.rootmode); |
1134 | sb->s_d_op = &fuse_root_dentry_operations; | ||
1134 | root_dentry = d_make_root(root); | 1135 | root_dentry = d_make_root(root); |
1135 | if (!root_dentry) | 1136 | if (!root_dentry) |
1136 | goto err_dev_free; | 1137 | goto err_dev_free; |
1137 | /* only now - we want root dentry with NULL ->d_op */ | 1138 | /* Root dentry doesn't have .d_revalidate */ |
1138 | sb->s_d_op = &fuse_dentry_operations; | 1139 | sb->s_d_op = &fuse_dentry_operations; |
1139 | 1140 | ||
1140 | init_req = fuse_request_alloc(0); | 1141 | init_req = fuse_request_alloc(0); |
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 532d8e242d4d..484bebc20bca 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
@@ -197,7 +197,7 @@ static int nfs_callback_up_net(int minorversion, struct svc_serv *serv, | |||
197 | } | 197 | } |
198 | 198 | ||
199 | ret = -EPROTONOSUPPORT; | 199 | ret = -EPROTONOSUPPORT; |
200 | if (minorversion == 0) | 200 | if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0) |
201 | ret = nfs4_callback_up_net(serv, net); | 201 | ret = nfs4_callback_up_net(serv, net); |
202 | else if (xprt->ops->bc_up) | 202 | else if (xprt->ops->bc_up) |
203 | ret = xprt->ops->bc_up(serv, net); | 203 | ret = xprt->ops->bc_up(serv, net); |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 9b3a82abab07..1452177c822d 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -542,6 +542,13 @@ static inline bool nfs4_valid_open_stateid(const struct nfs4_state *state) | |||
542 | return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0; | 542 | return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0; |
543 | } | 543 | } |
544 | 544 | ||
545 | static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state *state, | ||
546 | const nfs4_stateid *stateid) | ||
547 | { | ||
548 | return test_bit(NFS_OPEN_STATE, &state->flags) && | ||
549 | nfs4_stateid_match_other(&state->open_stateid, stateid); | ||
550 | } | ||
551 | |||
545 | #else | 552 | #else |
546 | 553 | ||
547 | #define nfs4_close_state(a, b) do { } while (0) | 554 | #define nfs4_close_state(a, b) do { } while (0) |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 7897826d7c51..241da19b7da4 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -1451,7 +1451,6 @@ static void nfs_resync_open_stateid_locked(struct nfs4_state *state) | |||
1451 | } | 1451 | } |
1452 | 1452 | ||
1453 | static void nfs_clear_open_stateid_locked(struct nfs4_state *state, | 1453 | static void nfs_clear_open_stateid_locked(struct nfs4_state *state, |
1454 | nfs4_stateid *arg_stateid, | ||
1455 | nfs4_stateid *stateid, fmode_t fmode) | 1454 | nfs4_stateid *stateid, fmode_t fmode) |
1456 | { | 1455 | { |
1457 | clear_bit(NFS_O_RDWR_STATE, &state->flags); | 1456 | clear_bit(NFS_O_RDWR_STATE, &state->flags); |
@@ -1469,10 +1468,9 @@ static void nfs_clear_open_stateid_locked(struct nfs4_state *state, | |||
1469 | } | 1468 | } |
1470 | if (stateid == NULL) | 1469 | if (stateid == NULL) |
1471 | return; | 1470 | return; |
1472 | /* Handle races with OPEN */ | 1471 | /* Handle OPEN+OPEN_DOWNGRADE races */ |
1473 | if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) || | 1472 | if (nfs4_stateid_match_other(stateid, &state->open_stateid) && |
1474 | (nfs4_stateid_match_other(stateid, &state->open_stateid) && | 1473 | !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { |
1475 | !nfs4_stateid_is_newer(stateid, &state->open_stateid))) { | ||
1476 | nfs_resync_open_stateid_locked(state); | 1474 | nfs_resync_open_stateid_locked(state); |
1477 | return; | 1475 | return; |
1478 | } | 1476 | } |
@@ -1486,7 +1484,9 @@ static void nfs_clear_open_stateid(struct nfs4_state *state, | |||
1486 | nfs4_stateid *stateid, fmode_t fmode) | 1484 | nfs4_stateid *stateid, fmode_t fmode) |
1487 | { | 1485 | { |
1488 | write_seqlock(&state->seqlock); | 1486 | write_seqlock(&state->seqlock); |
1489 | nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode); | 1487 | /* Ignore, if the CLOSE argment doesn't match the current stateid */ |
1488 | if (nfs4_state_match_open_stateid_other(state, arg_stateid)) | ||
1489 | nfs_clear_open_stateid_locked(state, stateid, fmode); | ||
1490 | write_sequnlock(&state->seqlock); | 1490 | write_sequnlock(&state->seqlock); |
1491 | if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) | 1491 | if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags)) |
1492 | nfs4_schedule_state_manager(state->owner->so_server->nfs_client); | 1492 | nfs4_schedule_state_manager(state->owner->so_server->nfs_client); |
@@ -2564,15 +2564,23 @@ static void nfs41_check_delegation_stateid(struct nfs4_state *state) | |||
2564 | static int nfs41_check_expired_locks(struct nfs4_state *state) | 2564 | static int nfs41_check_expired_locks(struct nfs4_state *state) |
2565 | { | 2565 | { |
2566 | int status, ret = NFS_OK; | 2566 | int status, ret = NFS_OK; |
2567 | struct nfs4_lock_state *lsp; | 2567 | struct nfs4_lock_state *lsp, *prev = NULL; |
2568 | struct nfs_server *server = NFS_SERVER(state->inode); | 2568 | struct nfs_server *server = NFS_SERVER(state->inode); |
2569 | 2569 | ||
2570 | if (!test_bit(LK_STATE_IN_USE, &state->flags)) | 2570 | if (!test_bit(LK_STATE_IN_USE, &state->flags)) |
2571 | goto out; | 2571 | goto out; |
2572 | |||
2573 | spin_lock(&state->state_lock); | ||
2572 | list_for_each_entry(lsp, &state->lock_states, ls_locks) { | 2574 | list_for_each_entry(lsp, &state->lock_states, ls_locks) { |
2573 | if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { | 2575 | if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) { |
2574 | struct rpc_cred *cred = lsp->ls_state->owner->so_cred; | 2576 | struct rpc_cred *cred = lsp->ls_state->owner->so_cred; |
2575 | 2577 | ||
2578 | atomic_inc(&lsp->ls_count); | ||
2579 | spin_unlock(&state->state_lock); | ||
2580 | |||
2581 | nfs4_put_lock_state(prev); | ||
2582 | prev = lsp; | ||
2583 | |||
2576 | status = nfs41_test_and_free_expired_stateid(server, | 2584 | status = nfs41_test_and_free_expired_stateid(server, |
2577 | &lsp->ls_stateid, | 2585 | &lsp->ls_stateid, |
2578 | cred); | 2586 | cred); |
@@ -2585,10 +2593,14 @@ static int nfs41_check_expired_locks(struct nfs4_state *state) | |||
2585 | set_bit(NFS_LOCK_LOST, &lsp->ls_flags); | 2593 | set_bit(NFS_LOCK_LOST, &lsp->ls_flags); |
2586 | } else if (status != NFS_OK) { | 2594 | } else if (status != NFS_OK) { |
2587 | ret = status; | 2595 | ret = status; |
2588 | break; | 2596 | nfs4_put_lock_state(prev); |
2597 | goto out; | ||
2589 | } | 2598 | } |
2599 | spin_lock(&state->state_lock); | ||
2590 | } | 2600 | } |
2591 | }; | 2601 | } |
2602 | spin_unlock(&state->state_lock); | ||
2603 | nfs4_put_lock_state(prev); | ||
2592 | out: | 2604 | out: |
2593 | return ret; | 2605 | return ret; |
2594 | } | 2606 | } |
@@ -3122,7 +3134,8 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data) | |||
3122 | } else if (is_rdwr) | 3134 | } else if (is_rdwr) |
3123 | calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; | 3135 | calldata->arg.fmode |= FMODE_READ|FMODE_WRITE; |
3124 | 3136 | ||
3125 | if (!nfs4_valid_open_stateid(state)) | 3137 | if (!nfs4_valid_open_stateid(state) || |
3138 | test_bit(NFS_OPEN_STATE, &state->flags) == 0) | ||
3126 | call_close = 0; | 3139 | call_close = 0; |
3127 | spin_unlock(&state->owner->so_lock); | 3140 | spin_unlock(&state->owner->so_lock); |
3128 | 3141 | ||
@@ -5569,6 +5582,7 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) | |||
5569 | switch (task->tk_status) { | 5582 | switch (task->tk_status) { |
5570 | case 0: | 5583 | case 0: |
5571 | renew_lease(data->res.server, data->timestamp); | 5584 | renew_lease(data->res.server, data->timestamp); |
5585 | break; | ||
5572 | case -NFS4ERR_ADMIN_REVOKED: | 5586 | case -NFS4ERR_ADMIN_REVOKED: |
5573 | case -NFS4ERR_DELEG_REVOKED: | 5587 | case -NFS4ERR_DELEG_REVOKED: |
5574 | case -NFS4ERR_EXPIRED: | 5588 | case -NFS4ERR_EXPIRED: |
@@ -5579,8 +5593,6 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) | |||
5579 | case -NFS4ERR_OLD_STATEID: | 5593 | case -NFS4ERR_OLD_STATEID: |
5580 | case -NFS4ERR_STALE_STATEID: | 5594 | case -NFS4ERR_STALE_STATEID: |
5581 | task->tk_status = 0; | 5595 | task->tk_status = 0; |
5582 | if (data->roc) | ||
5583 | pnfs_roc_set_barrier(data->inode, data->roc_barrier); | ||
5584 | break; | 5596 | break; |
5585 | default: | 5597 | default: |
5586 | if (nfs4_async_handle_error(task, data->res.server, | 5598 | if (nfs4_async_handle_error(task, data->res.server, |
@@ -5590,6 +5602,8 @@ static void nfs4_delegreturn_done(struct rpc_task *task, void *calldata) | |||
5590 | } | 5602 | } |
5591 | } | 5603 | } |
5592 | data->rpc_status = task->tk_status; | 5604 | data->rpc_status = task->tk_status; |
5605 | if (data->roc && data->rpc_status == 0) | ||
5606 | pnfs_roc_set_barrier(data->inode, data->roc_barrier); | ||
5593 | } | 5607 | } |
5594 | 5608 | ||
5595 | static void nfs4_delegreturn_release(void *calldata) | 5609 | static void nfs4_delegreturn_release(void *calldata) |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 5f4281ec5f72..0959c9661662 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1547,6 +1547,7 @@ restart: | |||
1547 | ssleep(1); | 1547 | ssleep(1); |
1548 | case -NFS4ERR_ADMIN_REVOKED: | 1548 | case -NFS4ERR_ADMIN_REVOKED: |
1549 | case -NFS4ERR_STALE_STATEID: | 1549 | case -NFS4ERR_STALE_STATEID: |
1550 | case -NFS4ERR_OLD_STATEID: | ||
1550 | case -NFS4ERR_BAD_STATEID: | 1551 | case -NFS4ERR_BAD_STATEID: |
1551 | case -NFS4ERR_RECLAIM_BAD: | 1552 | case -NFS4ERR_RECLAIM_BAD: |
1552 | case -NFS4ERR_RECLAIM_CONFLICT: | 1553 | case -NFS4ERR_RECLAIM_CONFLICT: |
diff --git a/fs/orangefs/orangefs-debugfs.c b/fs/orangefs/orangefs-debugfs.c index d484068ca716..38887cc5577f 100644 --- a/fs/orangefs/orangefs-debugfs.c +++ b/fs/orangefs/orangefs-debugfs.c | |||
@@ -114,6 +114,7 @@ static const struct seq_operations help_debug_ops = { | |||
114 | }; | 114 | }; |
115 | 115 | ||
116 | const struct file_operations debug_help_fops = { | 116 | const struct file_operations debug_help_fops = { |
117 | .owner = THIS_MODULE, | ||
117 | .open = orangefs_debug_help_open, | 118 | .open = orangefs_debug_help_open, |
118 | .read = seq_read, | 119 | .read = seq_read, |
119 | .release = seq_release, | 120 | .release = seq_release, |
@@ -121,6 +122,7 @@ const struct file_operations debug_help_fops = { | |||
121 | }; | 122 | }; |
122 | 123 | ||
123 | static const struct file_operations kernel_debug_fops = { | 124 | static const struct file_operations kernel_debug_fops = { |
125 | .owner = THIS_MODULE, | ||
124 | .open = orangefs_debug_open, | 126 | .open = orangefs_debug_open, |
125 | .read = orangefs_debug_read, | 127 | .read = orangefs_debug_read, |
126 | .write = orangefs_debug_write, | 128 | .write = orangefs_debug_write, |
diff --git a/fs/splice.c b/fs/splice.c index dcaf185a5731..5a7750bd2eea 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -408,7 +408,8 @@ static ssize_t default_file_splice_read(struct file *in, loff_t *ppos, | |||
408 | if (res <= 0) | 408 | if (res <= 0) |
409 | return -ENOMEM; | 409 | return -ENOMEM; |
410 | 410 | ||
411 | nr_pages = res / PAGE_SIZE; | 411 | BUG_ON(dummy); |
412 | nr_pages = DIV_ROUND_UP(res, PAGE_SIZE); | ||
412 | 413 | ||
413 | vec = __vec; | 414 | vec = __vec; |
414 | if (nr_pages > PIPE_DEF_BUFFERS) { | 415 | if (nr_pages > PIPE_DEF_BUFFERS) { |
diff --git a/fs/xattr.c b/fs/xattr.c index 3368659c471e..2d13b4e62fae 100644 --- a/fs/xattr.c +++ b/fs/xattr.c | |||
@@ -170,7 +170,7 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, | |||
170 | const void *value, size_t size, int flags) | 170 | const void *value, size_t size, int flags) |
171 | { | 171 | { |
172 | struct inode *inode = dentry->d_inode; | 172 | struct inode *inode = dentry->d_inode; |
173 | int error = -EOPNOTSUPP; | 173 | int error = -EAGAIN; |
174 | int issec = !strncmp(name, XATTR_SECURITY_PREFIX, | 174 | int issec = !strncmp(name, XATTR_SECURITY_PREFIX, |
175 | XATTR_SECURITY_PREFIX_LEN); | 175 | XATTR_SECURITY_PREFIX_LEN); |
176 | 176 | ||
@@ -183,15 +183,21 @@ int __vfs_setxattr_noperm(struct dentry *dentry, const char *name, | |||
183 | security_inode_post_setxattr(dentry, name, value, | 183 | security_inode_post_setxattr(dentry, name, value, |
184 | size, flags); | 184 | size, flags); |
185 | } | 185 | } |
186 | } else if (issec) { | 186 | } else { |
187 | const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; | ||
188 | |||
189 | if (unlikely(is_bad_inode(inode))) | 187 | if (unlikely(is_bad_inode(inode))) |
190 | return -EIO; | 188 | return -EIO; |
191 | error = security_inode_setsecurity(inode, suffix, value, | 189 | } |
192 | size, flags); | 190 | if (error == -EAGAIN) { |
193 | if (!error) | 191 | error = -EOPNOTSUPP; |
194 | fsnotify_xattr(dentry); | 192 | |
193 | if (issec) { | ||
194 | const char *suffix = name + XATTR_SECURITY_PREFIX_LEN; | ||
195 | |||
196 | error = security_inode_setsecurity(inode, suffix, value, | ||
197 | size, flags); | ||
198 | if (!error) | ||
199 | fsnotify_xattr(dentry); | ||
200 | } | ||
195 | } | 201 | } |
196 | 202 | ||
197 | return error; | 203 | return error; |
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h index 1b949e08015c..c19700e2a2fe 100644 --- a/include/acpi/actbl.h +++ b/include/acpi/actbl.h | |||
@@ -230,72 +230,62 @@ struct acpi_table_facs { | |||
230 | /* Fields common to all versions of the FADT */ | 230 | /* Fields common to all versions of the FADT */ |
231 | 231 | ||
232 | struct acpi_table_fadt { | 232 | struct acpi_table_fadt { |
233 | struct acpi_table_header header; /* [V1] Common ACPI table header */ | 233 | struct acpi_table_header header; /* Common ACPI table header */ |
234 | u32 facs; /* [V1] 32-bit physical address of FACS */ | 234 | u32 facs; /* 32-bit physical address of FACS */ |
235 | u32 dsdt; /* [V1] 32-bit physical address of DSDT */ | 235 | u32 dsdt; /* 32-bit physical address of DSDT */ |
236 | u8 model; /* [V1] System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ | 236 | u8 model; /* System Interrupt Model (ACPI 1.0) - not used in ACPI 2.0+ */ |
237 | u8 preferred_profile; /* [V1] Conveys preferred power management profile to OSPM. */ | 237 | u8 preferred_profile; /* Conveys preferred power management profile to OSPM. */ |
238 | u16 sci_interrupt; /* [V1] System vector of SCI interrupt */ | 238 | u16 sci_interrupt; /* System vector of SCI interrupt */ |
239 | u32 smi_command; /* [V1] 32-bit Port address of SMI command port */ | 239 | u32 smi_command; /* 32-bit Port address of SMI command port */ |
240 | u8 acpi_enable; /* [V1] Value to write to SMI_CMD to enable ACPI */ | 240 | u8 acpi_enable; /* Value to write to SMI_CMD to enable ACPI */ |
241 | u8 acpi_disable; /* [V1] Value to write to SMI_CMD to disable ACPI */ | 241 | u8 acpi_disable; /* Value to write to SMI_CMD to disable ACPI */ |
242 | u8 s4_bios_request; /* [V1] Value to write to SMI_CMD to enter S4BIOS state */ | 242 | u8 s4_bios_request; /* Value to write to SMI_CMD to enter S4BIOS state */ |
243 | u8 pstate_control; /* [V1] Processor performance state control */ | 243 | u8 pstate_control; /* Processor performance state control */ |
244 | u32 pm1a_event_block; /* [V1] 32-bit port address of Power Mgt 1a Event Reg Blk */ | 244 | u32 pm1a_event_block; /* 32-bit port address of Power Mgt 1a Event Reg Blk */ |
245 | u32 pm1b_event_block; /* [V1] 32-bit port address of Power Mgt 1b Event Reg Blk */ | 245 | u32 pm1b_event_block; /* 32-bit port address of Power Mgt 1b Event Reg Blk */ |
246 | u32 pm1a_control_block; /* [V1] 32-bit port address of Power Mgt 1a Control Reg Blk */ | 246 | u32 pm1a_control_block; /* 32-bit port address of Power Mgt 1a Control Reg Blk */ |
247 | u32 pm1b_control_block; /* [V1] 32-bit port address of Power Mgt 1b Control Reg Blk */ | 247 | u32 pm1b_control_block; /* 32-bit port address of Power Mgt 1b Control Reg Blk */ |
248 | u32 pm2_control_block; /* [V1] 32-bit port address of Power Mgt 2 Control Reg Blk */ | 248 | u32 pm2_control_block; /* 32-bit port address of Power Mgt 2 Control Reg Blk */ |
249 | u32 pm_timer_block; /* [V1] 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ | 249 | u32 pm_timer_block; /* 32-bit port address of Power Mgt Timer Ctrl Reg Blk */ |
250 | u32 gpe0_block; /* [V1] 32-bit port address of General Purpose Event 0 Reg Blk */ | 250 | u32 gpe0_block; /* 32-bit port address of General Purpose Event 0 Reg Blk */ |
251 | u32 gpe1_block; /* [V1] 32-bit port address of General Purpose Event 1 Reg Blk */ | 251 | u32 gpe1_block; /* 32-bit port address of General Purpose Event 1 Reg Blk */ |
252 | u8 pm1_event_length; /* [V1] Byte Length of ports at pm1x_event_block */ | 252 | u8 pm1_event_length; /* Byte Length of ports at pm1x_event_block */ |
253 | u8 pm1_control_length; /* [V1] Byte Length of ports at pm1x_control_block */ | 253 | u8 pm1_control_length; /* Byte Length of ports at pm1x_control_block */ |
254 | u8 pm2_control_length; /* [V1] Byte Length of ports at pm2_control_block */ | 254 | u8 pm2_control_length; /* Byte Length of ports at pm2_control_block */ |
255 | u8 pm_timer_length; /* [V1] Byte Length of ports at pm_timer_block */ | 255 | u8 pm_timer_length; /* Byte Length of ports at pm_timer_block */ |
256 | u8 gpe0_block_length; /* [V1] Byte Length of ports at gpe0_block */ | 256 | u8 gpe0_block_length; /* Byte Length of ports at gpe0_block */ |
257 | u8 gpe1_block_length; /* [V1] Byte Length of ports at gpe1_block */ | 257 | u8 gpe1_block_length; /* Byte Length of ports at gpe1_block */ |
258 | u8 gpe1_base; /* [V1] Offset in GPE number space where GPE1 events start */ | 258 | u8 gpe1_base; /* Offset in GPE number space where GPE1 events start */ |
259 | u8 cst_control; /* [V1] Support for the _CST object and C-States change notification */ | 259 | u8 cst_control; /* Support for the _CST object and C-States change notification */ |
260 | u16 c2_latency; /* [V1] Worst case HW latency to enter/exit C2 state */ | 260 | u16 c2_latency; /* Worst case HW latency to enter/exit C2 state */ |
261 | u16 c3_latency; /* [V1] Worst case HW latency to enter/exit C3 state */ | 261 | u16 c3_latency; /* Worst case HW latency to enter/exit C3 state */ |
262 | u16 flush_size; /* [V1] Processor memory cache line width, in bytes */ | 262 | u16 flush_size; /* Processor memory cache line width, in bytes */ |
263 | u16 flush_stride; /* [V1] Number of flush strides that need to be read */ | 263 | u16 flush_stride; /* Number of flush strides that need to be read */ |
264 | u8 duty_offset; /* [V1] Processor duty cycle index in processor P_CNT reg */ | 264 | u8 duty_offset; /* Processor duty cycle index in processor P_CNT reg */ |
265 | u8 duty_width; /* [V1] Processor duty cycle value bit width in P_CNT register */ | 265 | u8 duty_width; /* Processor duty cycle value bit width in P_CNT register */ |
266 | u8 day_alarm; /* [V1] Index to day-of-month alarm in RTC CMOS RAM */ | 266 | u8 day_alarm; /* Index to day-of-month alarm in RTC CMOS RAM */ |
267 | u8 month_alarm; /* [V1] Index to month-of-year alarm in RTC CMOS RAM */ | 267 | u8 month_alarm; /* Index to month-of-year alarm in RTC CMOS RAM */ |
268 | u8 century; /* [V1] Index to century in RTC CMOS RAM */ | 268 | u8 century; /* Index to century in RTC CMOS RAM */ |
269 | u16 boot_flags; /* [V3] IA-PC Boot Architecture Flags (see below for individual flags) */ | 269 | u16 boot_flags; /* IA-PC Boot Architecture Flags (see below for individual flags) */ |
270 | u8 reserved; /* [V1] Reserved, must be zero */ | 270 | u8 reserved; /* Reserved, must be zero */ |
271 | u32 flags; /* [V1] Miscellaneous flag bits (see below for individual flags) */ | 271 | u32 flags; /* Miscellaneous flag bits (see below for individual flags) */ |
272 | /* End of Version 1 FADT fields (ACPI 1.0) */ | 272 | struct acpi_generic_address reset_register; /* 64-bit address of the Reset register */ |
273 | 273 | u8 reset_value; /* Value to write to the reset_register port to reset the system */ | |
274 | struct acpi_generic_address reset_register; /* [V3] 64-bit address of the Reset register */ | 274 | u16 arm_boot_flags; /* ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ |
275 | u8 reset_value; /* [V3] Value to write to the reset_register port to reset the system */ | 275 | u8 minor_revision; /* FADT Minor Revision (ACPI 5.1) */ |
276 | u16 arm_boot_flags; /* [V5] ARM-Specific Boot Flags (see below for individual flags) (ACPI 5.1) */ | 276 | u64 Xfacs; /* 64-bit physical address of FACS */ |
277 | u8 minor_revision; /* [V5] FADT Minor Revision (ACPI 5.1) */ | 277 | u64 Xdsdt; /* 64-bit physical address of DSDT */ |
278 | u64 Xfacs; /* [V3] 64-bit physical address of FACS */ | 278 | struct acpi_generic_address xpm1a_event_block; /* 64-bit Extended Power Mgt 1a Event Reg Blk address */ |
279 | u64 Xdsdt; /* [V3] 64-bit physical address of DSDT */ | 279 | struct acpi_generic_address xpm1b_event_block; /* 64-bit Extended Power Mgt 1b Event Reg Blk address */ |
280 | struct acpi_generic_address xpm1a_event_block; /* [V3] 64-bit Extended Power Mgt 1a Event Reg Blk address */ | 280 | struct acpi_generic_address xpm1a_control_block; /* 64-bit Extended Power Mgt 1a Control Reg Blk address */ |
281 | struct acpi_generic_address xpm1b_event_block; /* [V3] 64-bit Extended Power Mgt 1b Event Reg Blk address */ | 281 | struct acpi_generic_address xpm1b_control_block; /* 64-bit Extended Power Mgt 1b Control Reg Blk address */ |
282 | struct acpi_generic_address xpm1a_control_block; /* [V3] 64-bit Extended Power Mgt 1a Control Reg Blk address */ | 282 | struct acpi_generic_address xpm2_control_block; /* 64-bit Extended Power Mgt 2 Control Reg Blk address */ |
283 | struct acpi_generic_address xpm1b_control_block; /* [V3] 64-bit Extended Power Mgt 1b Control Reg Blk address */ | 283 | struct acpi_generic_address xpm_timer_block; /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ |
284 | struct acpi_generic_address xpm2_control_block; /* [V3] 64-bit Extended Power Mgt 2 Control Reg Blk address */ | 284 | struct acpi_generic_address xgpe0_block; /* 64-bit Extended General Purpose Event 0 Reg Blk address */ |
285 | struct acpi_generic_address xpm_timer_block; /* [V3] 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */ | 285 | struct acpi_generic_address xgpe1_block; /* 64-bit Extended General Purpose Event 1 Reg Blk address */ |
286 | struct acpi_generic_address xgpe0_block; /* [V3] 64-bit Extended General Purpose Event 0 Reg Blk address */ | 286 | struct acpi_generic_address sleep_control; /* 64-bit Sleep Control register (ACPI 5.0) */ |
287 | struct acpi_generic_address xgpe1_block; /* [V3] 64-bit Extended General Purpose Event 1 Reg Blk address */ | 287 | struct acpi_generic_address sleep_status; /* 64-bit Sleep Status register (ACPI 5.0) */ |
288 | /* End of Version 3 FADT fields (ACPI 2.0) */ | 288 | u64 hypervisor_id; /* Hypervisor Vendor ID (ACPI 6.0) */ |
289 | |||
290 | struct acpi_generic_address sleep_control; /* [V4] 64-bit Sleep Control register (ACPI 5.0) */ | ||
291 | /* End of Version 4 FADT fields (ACPI 3.0 and ACPI 4.0) (Field was originally reserved in ACPI 3.0) */ | ||
292 | |||
293 | struct acpi_generic_address sleep_status; /* [V5] 64-bit Sleep Status register (ACPI 5.0) */ | ||
294 | /* End of Version 5 FADT fields (ACPI 5.0) */ | ||
295 | |||
296 | u64 hypervisor_id; /* [V6] Hypervisor Vendor ID (ACPI 6.0) */ | ||
297 | /* End of Version 6 FADT fields (ACPI 6.0) */ | ||
298 | |||
299 | }; | 289 | }; |
300 | 290 | ||
301 | /* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */ | 291 | /* Masks for FADT IA-PC Boot Architecture Flags (boot_flags) [Vx]=Introduced in this FADT revision */ |
@@ -311,8 +301,8 @@ struct acpi_table_fadt { | |||
311 | 301 | ||
312 | /* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */ | 302 | /* Masks for FADT ARM Boot Architecture Flags (arm_boot_flags) ACPI 5.1 */ |
313 | 303 | ||
314 | #define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5] PSCI 0.2+ is implemented */ | 304 | #define ACPI_FADT_PSCI_COMPLIANT (1) /* 00: [V5+] PSCI 0.2+ is implemented */ |
315 | #define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5] HVC must be used instead of SMC as the PSCI conduit */ | 305 | #define ACPI_FADT_PSCI_USE_HVC (1<<1) /* 01: [V5+] HVC must be used instead of SMC as the PSCI conduit */ |
316 | 306 | ||
317 | /* Masks for FADT flags */ | 307 | /* Masks for FADT flags */ |
318 | 308 | ||
@@ -409,34 +399,20 @@ struct acpi_table_desc { | |||
409 | * match the expected length. In other words, the length of the | 399 | * match the expected length. In other words, the length of the |
410 | * FADT is the bottom line as to what the version really is. | 400 | * FADT is the bottom line as to what the version really is. |
411 | * | 401 | * |
412 | * NOTE: There is no officialy released V2 of the FADT. This | 402 | * For reference, the values below are as follows: |
413 | * version was used only for prototyping and testing during the | 403 | * FADT V1 size: 0x074 |
414 | * 32-bit to 64-bit transition. V3 was the first official 64-bit | 404 | * FADT V2 size: 0x084 |
415 | * version of the FADT. | 405 | * FADT V3 size: 0x0F4 |
416 | * | 406 | * FADT V4 size: 0x0F4 |
417 | * Update this list of defines when a new version of the FADT is | 407 | * FADT V5 size: 0x10C |
418 | * added to the ACPI specification. Note that the FADT version is | 408 | * FADT V6 size: 0x114 |
419 | * only incremented when new fields are appended to the existing | ||
420 | * version. Therefore, the FADT version is competely independent | ||
421 | * from the version of the ACPI specification where it is | ||
422 | * defined. | ||
423 | * | ||
424 | * For reference, the various FADT lengths are as follows: | ||
425 | * FADT V1 size: 0x074 ACPI 1.0 | ||
426 | * FADT V3 size: 0x0F4 ACPI 2.0 | ||
427 | * FADT V4 size: 0x100 ACPI 3.0 and ACPI 4.0 | ||
428 | * FADT V5 size: 0x10C ACPI 5.0 | ||
429 | * FADT V6 size: 0x114 ACPI 6.0 | ||
430 | */ | 409 | */ |
431 | #define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) /* ACPI 1.0 */ | 410 | #define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4) |
432 | #define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) /* ACPI 2.0 */ | 411 | #define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (minor_revision) + 1) |
433 | #define ACPI_FADT_V4_SIZE (u32) (ACPI_FADT_OFFSET (sleep_status)) /* ACPI 3.0 and ACPI 4.0 */ | 412 | #define ACPI_FADT_V3_SIZE (u32) (ACPI_FADT_OFFSET (sleep_control)) |
434 | #define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) /* ACPI 5.0 */ | 413 | #define ACPI_FADT_V5_SIZE (u32) (ACPI_FADT_OFFSET (hypervisor_id)) |
435 | #define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) /* ACPI 6.0 */ | 414 | #define ACPI_FADT_V6_SIZE (u32) (sizeof (struct acpi_table_fadt)) |
436 | |||
437 | /* Update these when new FADT versions are added */ | ||
438 | 415 | ||
439 | #define ACPI_FADT_MAX_VERSION 6 | ||
440 | #define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)" | 416 | #define ACPI_FADT_CONFORMANCE "ACPI 6.1 (FADT version 6)" |
441 | 417 | ||
442 | #endif /* __ACTBL_H__ */ | 418 | #endif /* __ACTBL_H__ */ |
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index a5d98d171866..e861a24f06f2 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h | |||
@@ -191,6 +191,9 @@ | |||
191 | #ifndef __init | 191 | #ifndef __init |
192 | #define __init | 192 | #define __init |
193 | #endif | 193 | #endif |
194 | #ifndef __iomem | ||
195 | #define __iomem | ||
196 | #endif | ||
194 | 197 | ||
195 | /* Host-dependent types and defines for user-space ACPICA */ | 198 | /* Host-dependent types and defines for user-space ACPICA */ |
196 | 199 | ||
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 7035b997aaa5..6aaf425cebc3 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
@@ -14,7 +14,7 @@ | |||
14 | * are obviously wrong for any sort of memory access. | 14 | * are obviously wrong for any sort of memory access. |
15 | */ | 15 | */ |
16 | #define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) | 16 | #define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024) |
17 | #define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024) | 17 | #define BPF_REGISTER_MIN_RANGE -1 |
18 | 18 | ||
19 | struct bpf_reg_state { | 19 | struct bpf_reg_state { |
20 | enum bpf_reg_type type; | 20 | enum bpf_reg_type type; |
@@ -22,7 +22,8 @@ struct bpf_reg_state { | |||
22 | * Used to determine if any memory access using this register will | 22 | * Used to determine if any memory access using this register will |
23 | * result in a bad access. | 23 | * result in a bad access. |
24 | */ | 24 | */ |
25 | u64 min_value, max_value; | 25 | s64 min_value; |
26 | u64 max_value; | ||
26 | union { | 27 | union { |
27 | /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ | 28 | /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ |
28 | s64 imm; | 29 | s64 imm; |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 9b9f65d99873..e35e6de633b9 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -22,7 +22,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
22 | unsigned char *vec); | 22 | unsigned char *vec); |
23 | extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | 23 | extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
24 | unsigned long new_addr, unsigned long old_end, | 24 | unsigned long new_addr, unsigned long old_end, |
25 | pmd_t *old_pmd, pmd_t *new_pmd); | 25 | pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); |
26 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 26 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
27 | unsigned long addr, pgprot_t newprot, | 27 | unsigned long addr, pgprot_t newprot, |
28 | int prot_numa); | 28 | int prot_numa); |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 2d9b650047a5..d49e26c6cdc7 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -429,6 +429,7 @@ struct intel_iommu { | |||
429 | struct page_req_dsc *prq; | 429 | struct page_req_dsc *prq; |
430 | unsigned char prq_name[16]; /* Name for PRQ interrupt */ | 430 | unsigned char prq_name[16]; /* Name for PRQ interrupt */ |
431 | struct idr pasid_idr; | 431 | struct idr pasid_idr; |
432 | u32 pasid_max; | ||
432 | #endif | 433 | #endif |
433 | struct q_inval *qi; /* Queued invalidation info */ | 434 | struct q_inval *qi; /* Queued invalidation info */ |
434 | u32 *iommu_state; /* Store iommu states between suspend and resume.*/ | 435 | u32 *iommu_state; /* Store iommu states between suspend and resume.*/ |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ca1ad9ebbc92..a0649973ee5b 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -149,7 +149,7 @@ static inline bool inet6_exact_dif_match(struct net *net, struct sk_buff *skb) | |||
149 | { | 149 | { |
150 | #if defined(CONFIG_NET_L3_MASTER_DEV) | 150 | #if defined(CONFIG_NET_L3_MASTER_DEV) |
151 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && | 151 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && |
152 | ipv6_l3mdev_skb(IP6CB(skb)->flags)) | 152 | skb && ipv6_l3mdev_skb(IP6CB(skb)->flags)) |
153 | return true; | 153 | return true; |
154 | #endif | 154 | #endif |
155 | return false; | 155 | return false; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 91ee3643ccc8..e16a2a980ea8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -1619,7 +1619,7 @@ enum netdev_priv_flags { | |||
1619 | * @dcbnl_ops: Data Center Bridging netlink ops | 1619 | * @dcbnl_ops: Data Center Bridging netlink ops |
1620 | * @num_tc: Number of traffic classes in the net device | 1620 | * @num_tc: Number of traffic classes in the net device |
1621 | * @tc_to_txq: XXX: need comments on this one | 1621 | * @tc_to_txq: XXX: need comments on this one |
1622 | * @prio_tc_map XXX: need comments on this one | 1622 | * @prio_tc_map: XXX: need comments on this one |
1623 | * | 1623 | * |
1624 | * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp | 1624 | * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp |
1625 | * | 1625 | * |
@@ -3354,6 +3354,21 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | |||
3354 | bool is_skb_forwardable(const struct net_device *dev, | 3354 | bool is_skb_forwardable(const struct net_device *dev, |
3355 | const struct sk_buff *skb); | 3355 | const struct sk_buff *skb); |
3356 | 3356 | ||
3357 | static __always_inline int ____dev_forward_skb(struct net_device *dev, | ||
3358 | struct sk_buff *skb) | ||
3359 | { | ||
3360 | if (skb_orphan_frags(skb, GFP_ATOMIC) || | ||
3361 | unlikely(!is_skb_forwardable(dev, skb))) { | ||
3362 | atomic_long_inc(&dev->rx_dropped); | ||
3363 | kfree_skb(skb); | ||
3364 | return NET_RX_DROP; | ||
3365 | } | ||
3366 | |||
3367 | skb_scrub_packet(skb, true); | ||
3368 | skb->priority = 0; | ||
3369 | return 0; | ||
3370 | } | ||
3371 | |||
3357 | void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); | 3372 | void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev); |
3358 | 3373 | ||
3359 | extern int netdev_budget; | 3374 | extern int netdev_budget; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 348f51b0ec92..e9c009dc3a4a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2567,6 +2567,7 @@ extern void sched_autogroup_create_attach(struct task_struct *p); | |||
2567 | extern void sched_autogroup_detach(struct task_struct *p); | 2567 | extern void sched_autogroup_detach(struct task_struct *p); |
2568 | extern void sched_autogroup_fork(struct signal_struct *sig); | 2568 | extern void sched_autogroup_fork(struct signal_struct *sig); |
2569 | extern void sched_autogroup_exit(struct signal_struct *sig); | 2569 | extern void sched_autogroup_exit(struct signal_struct *sig); |
2570 | extern void sched_autogroup_exit_task(struct task_struct *p); | ||
2570 | #ifdef CONFIG_PROC_FS | 2571 | #ifdef CONFIG_PROC_FS |
2571 | extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); | 2572 | extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m); |
2572 | extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); | 2573 | extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice); |
@@ -2576,6 +2577,7 @@ static inline void sched_autogroup_create_attach(struct task_struct *p) { } | |||
2576 | static inline void sched_autogroup_detach(struct task_struct *p) { } | 2577 | static inline void sched_autogroup_detach(struct task_struct *p) { } |
2577 | static inline void sched_autogroup_fork(struct signal_struct *sig) { } | 2578 | static inline void sched_autogroup_fork(struct signal_struct *sig) { } |
2578 | static inline void sched_autogroup_exit(struct signal_struct *sig) { } | 2579 | static inline void sched_autogroup_exit(struct signal_struct *sig) { } |
2580 | static inline void sched_autogroup_exit_task(struct task_struct *p) { } | ||
2579 | #endif | 2581 | #endif |
2580 | 2582 | ||
2581 | extern int yield_to(struct task_struct *p, bool preempt); | 2583 | extern int yield_to(struct task_struct *p, bool preempt); |
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h index ab02a457da1f..e5d193440374 100644 --- a/include/linux/sunrpc/svc_xprt.h +++ b/include/linux/sunrpc/svc_xprt.h | |||
@@ -25,6 +25,7 @@ struct svc_xprt_ops { | |||
25 | void (*xpo_detach)(struct svc_xprt *); | 25 | void (*xpo_detach)(struct svc_xprt *); |
26 | void (*xpo_free)(struct svc_xprt *); | 26 | void (*xpo_free)(struct svc_xprt *); |
27 | int (*xpo_secure_port)(struct svc_rqst *); | 27 | int (*xpo_secure_port)(struct svc_rqst *); |
28 | void (*xpo_kill_temp_xprt)(struct svc_xprt *); | ||
28 | }; | 29 | }; |
29 | 30 | ||
30 | struct svc_xprt_class { | 31 | struct svc_xprt_class { |
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index f00bf667ec33..554671c81f4a 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h | |||
@@ -1018,7 +1018,7 @@ static inline void hci_set_drvdata(struct hci_dev *hdev, void *data) | |||
1018 | } | 1018 | } |
1019 | 1019 | ||
1020 | struct hci_dev *hci_dev_get(int index); | 1020 | struct hci_dev *hci_dev_get(int index); |
1021 | struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src); | 1021 | struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, u8 src_type); |
1022 | 1022 | ||
1023 | struct hci_dev *hci_alloc_dev(void); | 1023 | struct hci_dev *hci_alloc_dev(void); |
1024 | void hci_free_dev(struct hci_dev *hdev); | 1024 | void hci_free_dev(struct hci_dev *hdev); |
diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h index d15214d673b2..2a1abbf8da74 100644 --- a/include/net/gro_cells.h +++ b/include/net/gro_cells.h | |||
@@ -68,6 +68,9 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de | |||
68 | struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); | 68 | struct gro_cell *cell = per_cpu_ptr(gcells->cells, i); |
69 | 69 | ||
70 | __skb_queue_head_init(&cell->napi_skbs); | 70 | __skb_queue_head_init(&cell->napi_skbs); |
71 | |||
72 | set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state); | ||
73 | |||
71 | netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); | 74 | netif_napi_add(dev, &cell->napi, gro_cell_poll, 64); |
72 | napi_enable(&cell->napi); | 75 | napi_enable(&cell->napi); |
73 | } | 76 | } |
diff --git a/include/net/ip.h b/include/net/ip.h index 5413883ac47f..d3a107850a41 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
@@ -47,8 +47,7 @@ struct inet_skb_parm { | |||
47 | #define IPSKB_REROUTED BIT(4) | 47 | #define IPSKB_REROUTED BIT(4) |
48 | #define IPSKB_DOREDIRECT BIT(5) | 48 | #define IPSKB_DOREDIRECT BIT(5) |
49 | #define IPSKB_FRAG_PMTU BIT(6) | 49 | #define IPSKB_FRAG_PMTU BIT(6) |
50 | #define IPSKB_FRAG_SEGS BIT(7) | 50 | #define IPSKB_L3SLAVE BIT(7) |
51 | #define IPSKB_L3SLAVE BIT(8) | ||
52 | 51 | ||
53 | u16 frag_max_size; | 52 | u16 frag_max_size; |
54 | }; | 53 | }; |
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h index 20ed9699fcd4..1b1cf33cbfb0 100644 --- a/include/net/ip6_tunnel.h +++ b/include/net/ip6_tunnel.h | |||
@@ -146,6 +146,7 @@ static inline void ip6tunnel_xmit(struct sock *sk, struct sk_buff *skb, | |||
146 | { | 146 | { |
147 | int pkt_len, err; | 147 | int pkt_len, err; |
148 | 148 | ||
149 | memset(skb->cb, 0, sizeof(struct inet6_skb_parm)); | ||
149 | pkt_len = skb->len - skb_inner_network_offset(skb); | 150 | pkt_len = skb->len - skb_inner_network_offset(skb); |
150 | err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); | 151 | err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb); |
151 | if (unlikely(net_xmit_eval(err))) | 152 | if (unlikely(net_xmit_eval(err))) |
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index b9314b48e39f..f390c3bb05c5 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h | |||
@@ -243,6 +243,7 @@ int fib_table_dump(struct fib_table *table, struct sk_buff *skb, | |||
243 | struct netlink_callback *cb); | 243 | struct netlink_callback *cb); |
244 | int fib_table_flush(struct net *net, struct fib_table *table); | 244 | int fib_table_flush(struct net *net, struct fib_table *table); |
245 | struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); | 245 | struct fib_table *fib_trie_unmerge(struct fib_table *main_tb); |
246 | void fib_table_flush_external(struct fib_table *table); | ||
246 | void fib_free_table(struct fib_table *tb); | 247 | void fib_free_table(struct fib_table *tb); |
247 | 248 | ||
248 | #ifndef CONFIG_IP_MULTIPLE_TABLES | 249 | #ifndef CONFIG_IP_MULTIPLE_TABLES |
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index fc4f757107df..0940598c002f 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
@@ -170,7 +170,7 @@ static inline struct net *copy_net_ns(unsigned long flags, | |||
170 | extern struct list_head net_namespace_list; | 170 | extern struct list_head net_namespace_list; |
171 | 171 | ||
172 | struct net *get_net_ns_by_pid(pid_t pid); | 172 | struct net *get_net_ns_by_pid(pid_t pid); |
173 | struct net *get_net_ns_by_fd(int pid); | 173 | struct net *get_net_ns_by_fd(int fd); |
174 | 174 | ||
175 | #ifdef CONFIG_SYSCTL | 175 | #ifdef CONFIG_SYSCTL |
176 | void ipx_register_sysctl(void); | 176 | void ipx_register_sysctl(void); |
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h index 498814626e28..1723a67c0b0a 100644 --- a/include/net/netfilter/nf_conntrack_labels.h +++ b/include/net/netfilter/nf_conntrack_labels.h | |||
@@ -30,8 +30,7 @@ static inline struct nf_conn_labels *nf_ct_labels_ext_add(struct nf_conn *ct) | |||
30 | if (net->ct.labels_used == 0) | 30 | if (net->ct.labels_used == 0) |
31 | return NULL; | 31 | return NULL; |
32 | 32 | ||
33 | return nf_ct_ext_add_length(ct, NF_CT_EXT_LABELS, | 33 | return nf_ct_ext_add(ct, NF_CT_EXT_LABELS, GFP_ATOMIC); |
34 | sizeof(struct nf_conn_labels), GFP_ATOMIC); | ||
35 | #else | 34 | #else |
36 | return NULL; | 35 | return NULL; |
37 | #endif | 36 | #endif |
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 5031e072567b..d79d1e9b9546 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h | |||
@@ -145,7 +145,7 @@ static inline enum nft_registers nft_type_to_reg(enum nft_data_types type) | |||
145 | return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE; | 145 | return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE; |
146 | } | 146 | } |
147 | 147 | ||
148 | unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); | 148 | int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest); |
149 | unsigned int nft_parse_register(const struct nlattr *attr); | 149 | unsigned int nft_parse_register(const struct nlattr *attr); |
150 | int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); | 150 | int nft_dump_register(struct sk_buff *skb, unsigned int attr, unsigned int reg); |
151 | 151 | ||
@@ -542,7 +542,8 @@ void *nft_set_elem_init(const struct nft_set *set, | |||
542 | const struct nft_set_ext_tmpl *tmpl, | 542 | const struct nft_set_ext_tmpl *tmpl, |
543 | const u32 *key, const u32 *data, | 543 | const u32 *key, const u32 *data, |
544 | u64 timeout, gfp_t gfp); | 544 | u64 timeout, gfp_t gfp); |
545 | void nft_set_elem_destroy(const struct nft_set *set, void *elem); | 545 | void nft_set_elem_destroy(const struct nft_set *set, void *elem, |
546 | bool destroy_expr); | ||
546 | 547 | ||
547 | /** | 548 | /** |
548 | * struct nft_set_gc_batch_head - nf_tables set garbage collection batch | 549 | * struct nft_set_gc_batch_head - nf_tables set garbage collection batch |
@@ -693,7 +694,6 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) | |||
693 | { | 694 | { |
694 | int err; | 695 | int err; |
695 | 696 | ||
696 | __module_get(src->ops->type->owner); | ||
697 | if (src->ops->clone) { | 697 | if (src->ops->clone) { |
698 | dst->ops = src->ops; | 698 | dst->ops = src->ops; |
699 | err = src->ops->clone(dst, src); | 699 | err = src->ops->clone(dst, src); |
@@ -702,6 +702,8 @@ static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src) | |||
702 | } else { | 702 | } else { |
703 | memcpy(dst, src, src->ops->size); | 703 | memcpy(dst, src, src->ops->size); |
704 | } | 704 | } |
705 | |||
706 | __module_get(src->ops->type->owner); | ||
705 | return 0; | 707 | return 0; |
706 | } | 708 | } |
707 | 709 | ||
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 87a7f42e7639..31acc3f4f132 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -152,7 +152,7 @@ void sctp_unhash_endpoint(struct sctp_endpoint *); | |||
152 | struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, | 152 | struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *, |
153 | struct sctphdr *, struct sctp_association **, | 153 | struct sctphdr *, struct sctp_association **, |
154 | struct sctp_transport **); | 154 | struct sctp_transport **); |
155 | void sctp_err_finish(struct sock *, struct sctp_association *); | 155 | void sctp_err_finish(struct sock *, struct sctp_transport *); |
156 | void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, | 156 | void sctp_icmp_frag_needed(struct sock *, struct sctp_association *, |
157 | struct sctp_transport *t, __u32 pmtu); | 157 | struct sctp_transport *t, __u32 pmtu); |
158 | void sctp_icmp_redirect(struct sock *, struct sctp_transport *, | 158 | void sctp_icmp_redirect(struct sock *, struct sctp_transport *, |
diff --git a/include/net/sock.h b/include/net/sock.h index 73c6b008f1b7..92b269709b9a 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -1596,11 +1596,11 @@ static inline void sock_put(struct sock *sk) | |||
1596 | void sock_gen_put(struct sock *sk); | 1596 | void sock_gen_put(struct sock *sk); |
1597 | 1597 | ||
1598 | int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, | 1598 | int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested, |
1599 | unsigned int trim_cap); | 1599 | unsigned int trim_cap, bool refcounted); |
1600 | static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, | 1600 | static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb, |
1601 | const int nested) | 1601 | const int nested) |
1602 | { | 1602 | { |
1603 | return __sk_receive_skb(sk, skb, nested, 1); | 1603 | return __sk_receive_skb(sk, skb, nested, 1, true); |
1604 | } | 1604 | } |
1605 | 1605 | ||
1606 | static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) | 1606 | static inline void sk_tx_queue_set(struct sock *sk, int tx_queue) |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 5b82d4d94834..123979fe12bf 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -805,7 +805,7 @@ static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) | |||
805 | { | 805 | { |
806 | #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) | 806 | #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) |
807 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && | 807 | if (!net->ipv4.sysctl_tcp_l3mdev_accept && |
808 | ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) | 808 | skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags)) |
809 | return true; | 809 | return true; |
810 | #endif | 810 | #endif |
811 | return false; | 811 | return false; |
@@ -1220,6 +1220,7 @@ static inline void tcp_prequeue_init(struct tcp_sock *tp) | |||
1220 | 1220 | ||
1221 | bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); | 1221 | bool tcp_prequeue(struct sock *sk, struct sk_buff *skb); |
1222 | bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); | 1222 | bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb); |
1223 | int tcp_filter(struct sock *sk, struct sk_buff *skb); | ||
1223 | 1224 | ||
1224 | #undef STATE_TRACE | 1225 | #undef STATE_TRACE |
1225 | 1226 | ||
diff --git a/include/uapi/linux/atm_zatm.h b/include/uapi/linux/atm_zatm.h index 5cd4d4d2dd1d..9c9c6ad55f14 100644 --- a/include/uapi/linux/atm_zatm.h +++ b/include/uapi/linux/atm_zatm.h | |||
@@ -14,7 +14,6 @@ | |||
14 | 14 | ||
15 | #include <linux/atmapi.h> | 15 | #include <linux/atmapi.h> |
16 | #include <linux/atmioc.h> | 16 | #include <linux/atmioc.h> |
17 | #include <linux/time.h> | ||
18 | 17 | ||
19 | #define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) | 18 | #define ZATM_GETPOOL _IOW('a',ATMIOC_SARPRV+1,struct atmif_sioc) |
20 | /* get pool statistics */ | 19 | /* get pool statistics */ |
diff --git a/include/uapi/linux/bpqether.h b/include/uapi/linux/bpqether.h index a6c35e1a89ad..05865edaefda 100644 --- a/include/uapi/linux/bpqether.h +++ b/include/uapi/linux/bpqether.h | |||
@@ -5,9 +5,7 @@ | |||
5 | * Defines for the BPQETHER pseudo device driver | 5 | * Defines for the BPQETHER pseudo device driver |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #ifndef __LINUX_IF_ETHER_H | ||
9 | #include <linux/if_ether.h> | 8 | #include <linux/if_ether.h> |
10 | #endif | ||
11 | 9 | ||
12 | #define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */ | 10 | #define SIOCSBPQETHOPT (SIOCDEVPRIVATE+0) /* reserved */ |
13 | #define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1) | 11 | #define SIOCSBPQETHADDR (SIOCDEVPRIVATE+1) |
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 300ef255d1e0..4ee67cb99143 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h | |||
@@ -972,12 +972,19 @@ struct kvm_irqfd { | |||
972 | __u8 pad[16]; | 972 | __u8 pad[16]; |
973 | }; | 973 | }; |
974 | 974 | ||
975 | /* For KVM_CAP_ADJUST_CLOCK */ | ||
976 | |||
977 | /* Do not use 1, KVM_CHECK_EXTENSION returned it before we had flags. */ | ||
978 | #define KVM_CLOCK_TSC_STABLE 2 | ||
979 | |||
975 | struct kvm_clock_data { | 980 | struct kvm_clock_data { |
976 | __u64 clock; | 981 | __u64 clock; |
977 | __u32 flags; | 982 | __u32 flags; |
978 | __u32 pad[9]; | 983 | __u32 pad[9]; |
979 | }; | 984 | }; |
980 | 985 | ||
986 | /* For KVM_CAP_SW_TLB */ | ||
987 | |||
981 | #define KVM_MMU_FSL_BOOKE_NOHV 0 | 988 | #define KVM_MMU_FSL_BOOKE_NOHV 0 |
982 | #define KVM_MMU_FSL_BOOKE_HV 1 | 989 | #define KVM_MMU_FSL_BOOKE_HV 1 |
983 | 990 | ||
diff --git a/init/Kconfig b/init/Kconfig index 34407f15e6d3..c4fbc1e55c25 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -1945,6 +1945,7 @@ config MODULE_FORCE_UNLOAD | |||
1945 | 1945 | ||
1946 | config MODVERSIONS | 1946 | config MODVERSIONS |
1947 | bool "Module versioning support" | 1947 | bool "Module versioning support" |
1948 | depends on BROKEN | ||
1948 | help | 1949 | help |
1949 | Usually, you have to use modules compiled with your kernel. | 1950 | Usually, you have to use modules compiled with your kernel. |
1950 | Saying Y here makes it sometimes possible to use modules | 1951 | Saying Y here makes it sometimes possible to use modules |
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c index 8a09b32e07d6..dd4104c9aa12 100644 --- a/init/do_mounts_rd.c +++ b/init/do_mounts_rd.c | |||
@@ -272,7 +272,7 @@ int __init rd_load_image(char *from) | |||
272 | sys_write(out_fd, buf, BLOCK_SIZE); | 272 | sys_write(out_fd, buf, BLOCK_SIZE); |
273 | #if !defined(CONFIG_S390) | 273 | #if !defined(CONFIG_S390) |
274 | if (!(i % 16)) { | 274 | if (!(i % 16)) { |
275 | printk("%c\b", rotator[rotate & 0x3]); | 275 | pr_cont("%c\b", rotator[rotate & 0x3]); |
276 | rotate++; | 276 | rotate++; |
277 | } | 277 | } |
278 | #endif | 278 | #endif |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 570eeca7bdfa..ad1bc67aff1b 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
@@ -687,7 +687,8 @@ static void delete_all_elements(struct bpf_htab *htab) | |||
687 | 687 | ||
688 | hlist_for_each_entry_safe(l, n, head, hash_node) { | 688 | hlist_for_each_entry_safe(l, n, head, hash_node) { |
689 | hlist_del_rcu(&l->hash_node); | 689 | hlist_del_rcu(&l->hash_node); |
690 | htab_elem_free(htab, l); | 690 | if (l->state != HTAB_EXTRA_ELEM_USED) |
691 | htab_elem_free(htab, l); | ||
691 | } | 692 | } |
692 | } | 693 | } |
693 | } | 694 | } |
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 228f962447a5..237f3d6a7ddc 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -194,7 +194,7 @@ static int map_create(union bpf_attr *attr) | |||
194 | 194 | ||
195 | err = bpf_map_charge_memlock(map); | 195 | err = bpf_map_charge_memlock(map); |
196 | if (err) | 196 | if (err) |
197 | goto free_map; | 197 | goto free_map_nouncharge; |
198 | 198 | ||
199 | err = bpf_map_new_fd(map); | 199 | err = bpf_map_new_fd(map); |
200 | if (err < 0) | 200 | if (err < 0) |
@@ -204,6 +204,8 @@ static int map_create(union bpf_attr *attr) | |||
204 | return err; | 204 | return err; |
205 | 205 | ||
206 | free_map: | 206 | free_map: |
207 | bpf_map_uncharge_memlock(map); | ||
208 | free_map_nouncharge: | ||
207 | map->ops->map_free(map); | 209 | map->ops->map_free(map); |
208 | return err; | 210 | return err; |
209 | } | 211 | } |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 99a7e5b388f2..6a936159c6e0 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -216,8 +216,8 @@ static void print_verifier_state(struct bpf_verifier_state *state) | |||
216 | reg->map_ptr->key_size, | 216 | reg->map_ptr->key_size, |
217 | reg->map_ptr->value_size); | 217 | reg->map_ptr->value_size); |
218 | if (reg->min_value != BPF_REGISTER_MIN_RANGE) | 218 | if (reg->min_value != BPF_REGISTER_MIN_RANGE) |
219 | verbose(",min_value=%llu", | 219 | verbose(",min_value=%lld", |
220 | (unsigned long long)reg->min_value); | 220 | (long long)reg->min_value); |
221 | if (reg->max_value != BPF_REGISTER_MAX_RANGE) | 221 | if (reg->max_value != BPF_REGISTER_MAX_RANGE) |
222 | verbose(",max_value=%llu", | 222 | verbose(",max_value=%llu", |
223 | (unsigned long long)reg->max_value); | 223 | (unsigned long long)reg->max_value); |
@@ -758,7 +758,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off, | |||
758 | * index'es we need to make sure that whatever we use | 758 | * index'es we need to make sure that whatever we use |
759 | * will have a set floor within our range. | 759 | * will have a set floor within our range. |
760 | */ | 760 | */ |
761 | if ((s64)reg->min_value < 0) { | 761 | if (reg->min_value < 0) { |
762 | verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", | 762 | verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", |
763 | regno); | 763 | regno); |
764 | return -EACCES; | 764 | return -EACCES; |
@@ -1468,7 +1468,8 @@ static void check_reg_overflow(struct bpf_reg_state *reg) | |||
1468 | { | 1468 | { |
1469 | if (reg->max_value > BPF_REGISTER_MAX_RANGE) | 1469 | if (reg->max_value > BPF_REGISTER_MAX_RANGE) |
1470 | reg->max_value = BPF_REGISTER_MAX_RANGE; | 1470 | reg->max_value = BPF_REGISTER_MAX_RANGE; |
1471 | if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE) | 1471 | if (reg->min_value < BPF_REGISTER_MIN_RANGE || |
1472 | reg->min_value > BPF_REGISTER_MAX_RANGE) | ||
1472 | reg->min_value = BPF_REGISTER_MIN_RANGE; | 1473 | reg->min_value = BPF_REGISTER_MIN_RANGE; |
1473 | } | 1474 | } |
1474 | 1475 | ||
@@ -1476,7 +1477,8 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, | |||
1476 | struct bpf_insn *insn) | 1477 | struct bpf_insn *insn) |
1477 | { | 1478 | { |
1478 | struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; | 1479 | struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg; |
1479 | u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE; | 1480 | s64 min_val = BPF_REGISTER_MIN_RANGE; |
1481 | u64 max_val = BPF_REGISTER_MAX_RANGE; | ||
1480 | bool min_set = false, max_set = false; | 1482 | bool min_set = false, max_set = false; |
1481 | u8 opcode = BPF_OP(insn->code); | 1483 | u8 opcode = BPF_OP(insn->code); |
1482 | 1484 | ||
@@ -1512,22 +1514,43 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, | |||
1512 | return; | 1514 | return; |
1513 | } | 1515 | } |
1514 | 1516 | ||
1517 | /* If one of our values was at the end of our ranges then we can't just | ||
1518 | * do our normal operations to the register, we need to set the values | ||
1519 | * to the min/max since they are undefined. | ||
1520 | */ | ||
1521 | if (min_val == BPF_REGISTER_MIN_RANGE) | ||
1522 | dst_reg->min_value = BPF_REGISTER_MIN_RANGE; | ||
1523 | if (max_val == BPF_REGISTER_MAX_RANGE) | ||
1524 | dst_reg->max_value = BPF_REGISTER_MAX_RANGE; | ||
1525 | |||
1515 | switch (opcode) { | 1526 | switch (opcode) { |
1516 | case BPF_ADD: | 1527 | case BPF_ADD: |
1517 | dst_reg->min_value += min_val; | 1528 | if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) |
1518 | dst_reg->max_value += max_val; | 1529 | dst_reg->min_value += min_val; |
1530 | if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) | ||
1531 | dst_reg->max_value += max_val; | ||
1519 | break; | 1532 | break; |
1520 | case BPF_SUB: | 1533 | case BPF_SUB: |
1521 | dst_reg->min_value -= min_val; | 1534 | if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) |
1522 | dst_reg->max_value -= max_val; | 1535 | dst_reg->min_value -= min_val; |
1536 | if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) | ||
1537 | dst_reg->max_value -= max_val; | ||
1523 | break; | 1538 | break; |
1524 | case BPF_MUL: | 1539 | case BPF_MUL: |
1525 | dst_reg->min_value *= min_val; | 1540 | if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) |
1526 | dst_reg->max_value *= max_val; | 1541 | dst_reg->min_value *= min_val; |
1542 | if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) | ||
1543 | dst_reg->max_value *= max_val; | ||
1527 | break; | 1544 | break; |
1528 | case BPF_AND: | 1545 | case BPF_AND: |
1529 | /* & is special since it could end up with 0 bits set. */ | 1546 | /* Disallow AND'ing of negative numbers, ain't nobody got time |
1530 | dst_reg->min_value &= min_val; | 1547 | * for that. Otherwise the minimum is 0 and the max is the max |
1548 | * value we could AND against. | ||
1549 | */ | ||
1550 | if (min_val < 0) | ||
1551 | dst_reg->min_value = BPF_REGISTER_MIN_RANGE; | ||
1552 | else | ||
1553 | dst_reg->min_value = 0; | ||
1531 | dst_reg->max_value = max_val; | 1554 | dst_reg->max_value = max_val; |
1532 | break; | 1555 | break; |
1533 | case BPF_LSH: | 1556 | case BPF_LSH: |
@@ -1537,24 +1560,25 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env, | |||
1537 | */ | 1560 | */ |
1538 | if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) | 1561 | if (min_val > ilog2(BPF_REGISTER_MAX_RANGE)) |
1539 | dst_reg->min_value = BPF_REGISTER_MIN_RANGE; | 1562 | dst_reg->min_value = BPF_REGISTER_MIN_RANGE; |
1540 | else | 1563 | else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE) |
1541 | dst_reg->min_value <<= min_val; | 1564 | dst_reg->min_value <<= min_val; |
1542 | 1565 | ||
1543 | if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) | 1566 | if (max_val > ilog2(BPF_REGISTER_MAX_RANGE)) |
1544 | dst_reg->max_value = BPF_REGISTER_MAX_RANGE; | 1567 | dst_reg->max_value = BPF_REGISTER_MAX_RANGE; |
1545 | else | 1568 | else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) |
1546 | dst_reg->max_value <<= max_val; | 1569 | dst_reg->max_value <<= max_val; |
1547 | break; | 1570 | break; |
1548 | case BPF_RSH: | 1571 | case BPF_RSH: |
1549 | dst_reg->min_value >>= min_val; | 1572 | /* RSH by a negative number is undefined, and the BPF_RSH is an |
1550 | dst_reg->max_value >>= max_val; | 1573 | * unsigned shift, so make the appropriate casts. |
1551 | break; | ||
1552 | case BPF_MOD: | ||
1553 | /* % is special since it is an unsigned modulus, so the floor | ||
1554 | * will always be 0. | ||
1555 | */ | 1574 | */ |
1556 | dst_reg->min_value = 0; | 1575 | if (min_val < 0 || dst_reg->min_value < 0) |
1557 | dst_reg->max_value = max_val - 1; | 1576 | dst_reg->min_value = BPF_REGISTER_MIN_RANGE; |
1577 | else | ||
1578 | dst_reg->min_value = | ||
1579 | (u64)(dst_reg->min_value) >> min_val; | ||
1580 | if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE) | ||
1581 | dst_reg->max_value >>= max_val; | ||
1558 | break; | 1582 | break; |
1559 | default: | 1583 | default: |
1560 | reset_reg_range_values(regs, insn->dst_reg); | 1584 | reset_reg_range_values(regs, insn->dst_reg); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index 0e292132efac..6ee1febdf6ff 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -902,6 +902,17 @@ list_update_cgroup_event(struct perf_event *event, | |||
902 | * this will always be called from the right CPU. | 902 | * this will always be called from the right CPU. |
903 | */ | 903 | */ |
904 | cpuctx = __get_cpu_context(ctx); | 904 | cpuctx = __get_cpu_context(ctx); |
905 | |||
906 | /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */ | ||
907 | if (perf_cgroup_from_task(current, ctx) != event->cgrp) { | ||
908 | /* | ||
909 | * We are removing the last cpu event in this context. | ||
910 | * If that event is not active in this cpu, cpuctx->cgrp | ||
911 | * should've been cleared by perf_cgroup_switch. | ||
912 | */ | ||
913 | WARN_ON_ONCE(!add && cpuctx->cgrp); | ||
914 | return; | ||
915 | } | ||
905 | cpuctx->cgrp = add ? event->cgrp : NULL; | 916 | cpuctx->cgrp = add ? event->cgrp : NULL; |
906 | } | 917 | } |
907 | 918 | ||
@@ -8018,6 +8029,7 @@ restart: | |||
8018 | * if <size> is not specified, the range is treated as a single address. | 8029 | * if <size> is not specified, the range is treated as a single address. |
8019 | */ | 8030 | */ |
8020 | enum { | 8031 | enum { |
8032 | IF_ACT_NONE = -1, | ||
8021 | IF_ACT_FILTER, | 8033 | IF_ACT_FILTER, |
8022 | IF_ACT_START, | 8034 | IF_ACT_START, |
8023 | IF_ACT_STOP, | 8035 | IF_ACT_STOP, |
@@ -8041,6 +8053,7 @@ static const match_table_t if_tokens = { | |||
8041 | { IF_SRC_KERNEL, "%u/%u" }, | 8053 | { IF_SRC_KERNEL, "%u/%u" }, |
8042 | { IF_SRC_FILEADDR, "%u@%s" }, | 8054 | { IF_SRC_FILEADDR, "%u@%s" }, |
8043 | { IF_SRC_KERNELADDR, "%u" }, | 8055 | { IF_SRC_KERNELADDR, "%u" }, |
8056 | { IF_ACT_NONE, NULL }, | ||
8044 | }; | 8057 | }; |
8045 | 8058 | ||
8046 | /* | 8059 | /* |
diff --git a/kernel/exit.c b/kernel/exit.c index 9d68c45ebbe3..3076f3089919 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -836,6 +836,7 @@ void __noreturn do_exit(long code) | |||
836 | */ | 836 | */ |
837 | perf_event_exit_task(tsk); | 837 | perf_event_exit_task(tsk); |
838 | 838 | ||
839 | sched_autogroup_exit_task(tsk); | ||
839 | cgroup_exit(tsk); | 840 | cgroup_exit(tsk); |
840 | 841 | ||
841 | /* | 842 | /* |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 9c4d30483264..6b669593e7eb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -1341,12 +1341,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1341 | 1341 | ||
1342 | } else if (new->flags & IRQF_TRIGGER_MASK) { | 1342 | } else if (new->flags & IRQF_TRIGGER_MASK) { |
1343 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; | 1343 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; |
1344 | unsigned int omsk = irq_settings_get_trigger_mask(desc); | 1344 | unsigned int omsk = irqd_get_trigger_type(&desc->irq_data); |
1345 | 1345 | ||
1346 | if (nmsk != omsk) | 1346 | if (nmsk != omsk) |
1347 | /* hope the handler works with current trigger mode */ | 1347 | /* hope the handler works with current trigger mode */ |
1348 | pr_warn("irq %d uses trigger mode %u; requested %u\n", | 1348 | pr_warn("irq %d uses trigger mode %u; requested %u\n", |
1349 | irq, nmsk, omsk); | 1349 | irq, omsk, nmsk); |
1350 | } | 1350 | } |
1351 | 1351 | ||
1352 | *old_ptr = new; | 1352 | *old_ptr = new; |
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h index 51c4b24b6328..c2b88490d857 100644 --- a/kernel/locking/lockdep_internals.h +++ b/kernel/locking/lockdep_internals.h | |||
@@ -46,6 +46,14 @@ enum { | |||
46 | (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) | 46 | (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text, | ||
50 | * .data and .bss to fit in required 32MB limit for the kernel. With | ||
51 | * PROVE_LOCKING we could go over this limit and cause system boot-up problems. | ||
52 | * So, reduce the static allocations for lockdeps related structures so that | ||
53 | * everything fits in current required size limit. | ||
54 | */ | ||
55 | #ifdef CONFIG_PROVE_LOCKING_SMALL | ||
56 | /* | ||
49 | * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies | 57 | * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies |
50 | * we track. | 58 | * we track. |
51 | * | 59 | * |
@@ -54,18 +62,24 @@ enum { | |||
54 | * table (if it's not there yet), and we check it for lock order | 62 | * table (if it's not there yet), and we check it for lock order |
55 | * conflicts and deadlocks. | 63 | * conflicts and deadlocks. |
56 | */ | 64 | */ |
65 | #define MAX_LOCKDEP_ENTRIES 16384UL | ||
66 | #define MAX_LOCKDEP_CHAINS_BITS 15 | ||
67 | #define MAX_STACK_TRACE_ENTRIES 262144UL | ||
68 | #else | ||
57 | #define MAX_LOCKDEP_ENTRIES 32768UL | 69 | #define MAX_LOCKDEP_ENTRIES 32768UL |
58 | 70 | ||
59 | #define MAX_LOCKDEP_CHAINS_BITS 16 | 71 | #define MAX_LOCKDEP_CHAINS_BITS 16 |
60 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) | ||
61 | |||
62 | #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) | ||
63 | 72 | ||
64 | /* | 73 | /* |
65 | * Stack-trace: tightly packed array of stack backtrace | 74 | * Stack-trace: tightly packed array of stack backtrace |
66 | * addresses. Protected by the hash_lock. | 75 | * addresses. Protected by the hash_lock. |
67 | */ | 76 | */ |
68 | #define MAX_STACK_TRACE_ENTRIES 524288UL | 77 | #define MAX_STACK_TRACE_ENTRIES 524288UL |
78 | #endif | ||
79 | |||
80 | #define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS) | ||
81 | |||
82 | #define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5) | ||
69 | 83 | ||
70 | extern struct list_head all_lock_classes; | 84 | extern struct list_head all_lock_classes; |
71 | extern struct lock_chain lock_chains[]; | 85 | extern struct lock_chain lock_chains[]; |
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 5028f4fd504a..f7a55e9ff2f7 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c | |||
@@ -783,8 +783,6 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from) | |||
783 | return ret; | 783 | return ret; |
784 | } | 784 | } |
785 | 785 | ||
786 | static void cont_flush(void); | ||
787 | |||
788 | static ssize_t devkmsg_read(struct file *file, char __user *buf, | 786 | static ssize_t devkmsg_read(struct file *file, char __user *buf, |
789 | size_t count, loff_t *ppos) | 787 | size_t count, loff_t *ppos) |
790 | { | 788 | { |
@@ -800,7 +798,6 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf, | |||
800 | if (ret) | 798 | if (ret) |
801 | return ret; | 799 | return ret; |
802 | raw_spin_lock_irq(&logbuf_lock); | 800 | raw_spin_lock_irq(&logbuf_lock); |
803 | cont_flush(); | ||
804 | while (user->seq == log_next_seq) { | 801 | while (user->seq == log_next_seq) { |
805 | if (file->f_flags & O_NONBLOCK) { | 802 | if (file->f_flags & O_NONBLOCK) { |
806 | ret = -EAGAIN; | 803 | ret = -EAGAIN; |
@@ -863,7 +860,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence) | |||
863 | return -ESPIPE; | 860 | return -ESPIPE; |
864 | 861 | ||
865 | raw_spin_lock_irq(&logbuf_lock); | 862 | raw_spin_lock_irq(&logbuf_lock); |
866 | cont_flush(); | ||
867 | switch (whence) { | 863 | switch (whence) { |
868 | case SEEK_SET: | 864 | case SEEK_SET: |
869 | /* the first record */ | 865 | /* the first record */ |
@@ -902,7 +898,6 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait) | |||
902 | poll_wait(file, &log_wait, wait); | 898 | poll_wait(file, &log_wait, wait); |
903 | 899 | ||
904 | raw_spin_lock_irq(&logbuf_lock); | 900 | raw_spin_lock_irq(&logbuf_lock); |
905 | cont_flush(); | ||
906 | if (user->seq < log_next_seq) { | 901 | if (user->seq < log_next_seq) { |
907 | /* return error when data has vanished underneath us */ | 902 | /* return error when data has vanished underneath us */ |
908 | if (user->seq < log_first_seq) | 903 | if (user->seq < log_first_seq) |
@@ -1289,7 +1284,6 @@ static int syslog_print(char __user *buf, int size) | |||
1289 | size_t skip; | 1284 | size_t skip; |
1290 | 1285 | ||
1291 | raw_spin_lock_irq(&logbuf_lock); | 1286 | raw_spin_lock_irq(&logbuf_lock); |
1292 | cont_flush(); | ||
1293 | if (syslog_seq < log_first_seq) { | 1287 | if (syslog_seq < log_first_seq) { |
1294 | /* messages are gone, move to first one */ | 1288 | /* messages are gone, move to first one */ |
1295 | syslog_seq = log_first_seq; | 1289 | syslog_seq = log_first_seq; |
@@ -1349,7 +1343,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear) | |||
1349 | return -ENOMEM; | 1343 | return -ENOMEM; |
1350 | 1344 | ||
1351 | raw_spin_lock_irq(&logbuf_lock); | 1345 | raw_spin_lock_irq(&logbuf_lock); |
1352 | cont_flush(); | ||
1353 | if (buf) { | 1346 | if (buf) { |
1354 | u64 next_seq; | 1347 | u64 next_seq; |
1355 | u64 seq; | 1348 | u64 seq; |
@@ -1511,7 +1504,6 @@ int do_syslog(int type, char __user *buf, int len, int source) | |||
1511 | /* Number of chars in the log buffer */ | 1504 | /* Number of chars in the log buffer */ |
1512 | case SYSLOG_ACTION_SIZE_UNREAD: | 1505 | case SYSLOG_ACTION_SIZE_UNREAD: |
1513 | raw_spin_lock_irq(&logbuf_lock); | 1506 | raw_spin_lock_irq(&logbuf_lock); |
1514 | cont_flush(); | ||
1515 | if (syslog_seq < log_first_seq) { | 1507 | if (syslog_seq < log_first_seq) { |
1516 | /* messages are gone, move to first one */ | 1508 | /* messages are gone, move to first one */ |
1517 | syslog_seq = log_first_seq; | 1509 | syslog_seq = log_first_seq; |
@@ -3028,7 +3020,6 @@ void kmsg_dump(enum kmsg_dump_reason reason) | |||
3028 | dumper->active = true; | 3020 | dumper->active = true; |
3029 | 3021 | ||
3030 | raw_spin_lock_irqsave(&logbuf_lock, flags); | 3022 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3031 | cont_flush(); | ||
3032 | dumper->cur_seq = clear_seq; | 3023 | dumper->cur_seq = clear_seq; |
3033 | dumper->cur_idx = clear_idx; | 3024 | dumper->cur_idx = clear_idx; |
3034 | dumper->next_seq = log_next_seq; | 3025 | dumper->next_seq = log_next_seq; |
@@ -3119,7 +3110,6 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog, | |||
3119 | bool ret; | 3110 | bool ret; |
3120 | 3111 | ||
3121 | raw_spin_lock_irqsave(&logbuf_lock, flags); | 3112 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3122 | cont_flush(); | ||
3123 | ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len); | 3113 | ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len); |
3124 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); | 3114 | raw_spin_unlock_irqrestore(&logbuf_lock, flags); |
3125 | 3115 | ||
@@ -3162,7 +3152,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog, | |||
3162 | goto out; | 3152 | goto out; |
3163 | 3153 | ||
3164 | raw_spin_lock_irqsave(&logbuf_lock, flags); | 3154 | raw_spin_lock_irqsave(&logbuf_lock, flags); |
3165 | cont_flush(); | ||
3166 | if (dumper->cur_seq < log_first_seq) { | 3155 | if (dumper->cur_seq < log_first_seq) { |
3167 | /* messages are gone, move to first available one */ | 3156 | /* messages are gone, move to first available one */ |
3168 | dumper->cur_seq = log_first_seq; | 3157 | dumper->cur_seq = log_first_seq; |
diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index a5d966cb8891..f1c8fd566246 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c | |||
@@ -111,10 +111,13 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) | |||
111 | { | 111 | { |
112 | if (tg != &root_task_group) | 112 | if (tg != &root_task_group) |
113 | return false; | 113 | return false; |
114 | |||
115 | /* | 114 | /* |
116 | * We can only assume the task group can't go away on us if | 115 | * If we race with autogroup_move_group() the caller can use the old |
117 | * autogroup_move_group() can see us on ->thread_group list. | 116 | * value of signal->autogroup but in this case sched_move_task() will |
117 | * be called again before autogroup_kref_put(). | ||
118 | * | ||
119 | * However, there is no way sched_autogroup_exit_task() could tell us | ||
120 | * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case. | ||
118 | */ | 121 | */ |
119 | if (p->flags & PF_EXITING) | 122 | if (p->flags & PF_EXITING) |
120 | return false; | 123 | return false; |
@@ -122,6 +125,16 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) | |||
122 | return true; | 125 | return true; |
123 | } | 126 | } |
124 | 127 | ||
128 | void sched_autogroup_exit_task(struct task_struct *p) | ||
129 | { | ||
130 | /* | ||
131 | * We are going to call exit_notify() and autogroup_move_group() can't | ||
132 | * see this thread after that: we can no longer use signal->autogroup. | ||
133 | * See the PF_EXITING check in task_wants_autogroup(). | ||
134 | */ | ||
135 | sched_move_task(p); | ||
136 | } | ||
137 | |||
125 | static void | 138 | static void |
126 | autogroup_move_group(struct task_struct *p, struct autogroup *ag) | 139 | autogroup_move_group(struct task_struct *p, struct autogroup *ag) |
127 | { | 140 | { |
@@ -138,13 +151,20 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag) | |||
138 | } | 151 | } |
139 | 152 | ||
140 | p->signal->autogroup = autogroup_kref_get(ag); | 153 | p->signal->autogroup = autogroup_kref_get(ag); |
141 | 154 | /* | |
142 | if (!READ_ONCE(sysctl_sched_autogroup_enabled)) | 155 | * We can't avoid sched_move_task() after we changed signal->autogroup, |
143 | goto out; | 156 | * this process can already run with task_group() == prev->tg or we can |
144 | 157 | * race with cgroup code which can read autogroup = prev under rq->lock. | |
158 | * In the latter case for_each_thread() can not miss a migrating thread, | ||
159 | * cpu_cgroup_attach() must not be possible after cgroup_exit() and it | ||
160 | * can't be removed from thread list, we hold ->siglock. | ||
161 | * | ||
162 | * If an exiting thread was already removed from thread list we rely on | ||
163 | * sched_autogroup_exit_task(). | ||
164 | */ | ||
145 | for_each_thread(p, t) | 165 | for_each_thread(p, t) |
146 | sched_move_task(t); | 166 | sched_move_task(t); |
147 | out: | 167 | |
148 | unlock_task_sighand(p, &flags); | 168 | unlock_task_sighand(p, &flags); |
149 | autogroup_kref_put(prev); | 169 | autogroup_kref_put(prev); |
150 | } | 170 | } |
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index b3f05ee20d18..cbb387a265db 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
@@ -54,7 +54,11 @@ static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1 | |||
54 | [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, | 54 | [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, |
55 | [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; | 55 | [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; |
56 | 56 | ||
57 | static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = { | 57 | /* |
58 | * We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family. | ||
59 | * Make sure they are always aligned. | ||
60 | */ | ||
61 | static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = { | ||
58 | [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, | 62 | [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, |
59 | }; | 63 | }; |
60 | 64 | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 2050a7652a86..da87b3cba5b3 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1862,6 +1862,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, | |||
1862 | 1862 | ||
1863 | /* Update rec->flags */ | 1863 | /* Update rec->flags */ |
1864 | do_for_each_ftrace_rec(pg, rec) { | 1864 | do_for_each_ftrace_rec(pg, rec) { |
1865 | |||
1866 | if (rec->flags & FTRACE_FL_DISABLED) | ||
1867 | continue; | ||
1868 | |||
1865 | /* We need to update only differences of filter_hash */ | 1869 | /* We need to update only differences of filter_hash */ |
1866 | in_old = !!ftrace_lookup_ip(old_hash, rec->ip); | 1870 | in_old = !!ftrace_lookup_ip(old_hash, rec->ip); |
1867 | in_new = !!ftrace_lookup_ip(new_hash, rec->ip); | 1871 | in_new = !!ftrace_lookup_ip(new_hash, rec->ip); |
@@ -1884,6 +1888,10 @@ rollback: | |||
1884 | 1888 | ||
1885 | /* Roll back what we did above */ | 1889 | /* Roll back what we did above */ |
1886 | do_for_each_ftrace_rec(pg, rec) { | 1890 | do_for_each_ftrace_rec(pg, rec) { |
1891 | |||
1892 | if (rec->flags & FTRACE_FL_DISABLED) | ||
1893 | continue; | ||
1894 | |||
1887 | if (rec == end) | 1895 | if (rec == end) |
1888 | goto err_out; | 1896 | goto err_out; |
1889 | 1897 | ||
@@ -2397,6 +2405,10 @@ void __weak ftrace_replace_code(int enable) | |||
2397 | return; | 2405 | return; |
2398 | 2406 | ||
2399 | do_for_each_ftrace_rec(pg, rec) { | 2407 | do_for_each_ftrace_rec(pg, rec) { |
2408 | |||
2409 | if (rec->flags & FTRACE_FL_DISABLED) | ||
2410 | continue; | ||
2411 | |||
2400 | failed = __ftrace_replace_code(rec, enable); | 2412 | failed = __ftrace_replace_code(rec, enable); |
2401 | if (failed) { | 2413 | if (failed) { |
2402 | ftrace_bug(failed, rec); | 2414 | ftrace_bug(failed, rec); |
@@ -2763,7 +2775,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
2763 | struct dyn_ftrace *rec; | 2775 | struct dyn_ftrace *rec; |
2764 | 2776 | ||
2765 | do_for_each_ftrace_rec(pg, rec) { | 2777 | do_for_each_ftrace_rec(pg, rec) { |
2766 | if (FTRACE_WARN_ON_ONCE(rec->flags)) | 2778 | if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED)) |
2767 | pr_warn(" %pS flags:%lx\n", | 2779 | pr_warn(" %pS flags:%lx\n", |
2768 | (void *)rec->ip, rec->flags); | 2780 | (void *)rec->ip, rec->flags); |
2769 | } while_for_each_ftrace_rec(); | 2781 | } while_for_each_ftrace_rec(); |
@@ -3598,6 +3610,10 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod) | |||
3598 | goto out_unlock; | 3610 | goto out_unlock; |
3599 | 3611 | ||
3600 | do_for_each_ftrace_rec(pg, rec) { | 3612 | do_for_each_ftrace_rec(pg, rec) { |
3613 | |||
3614 | if (rec->flags & FTRACE_FL_DISABLED) | ||
3615 | continue; | ||
3616 | |||
3601 | if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { | 3617 | if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) { |
3602 | ret = enter_record(hash, rec, clear_filter); | 3618 | ret = enter_record(hash, rec, clear_filter); |
3603 | if (ret < 0) { | 3619 | if (ret < 0) { |
@@ -3793,6 +3809,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
3793 | 3809 | ||
3794 | do_for_each_ftrace_rec(pg, rec) { | 3810 | do_for_each_ftrace_rec(pg, rec) { |
3795 | 3811 | ||
3812 | if (rec->flags & FTRACE_FL_DISABLED) | ||
3813 | continue; | ||
3814 | |||
3796 | if (!ftrace_match_record(rec, &func_g, NULL, 0)) | 3815 | if (!ftrace_match_record(rec, &func_g, NULL, 0)) |
3797 | continue; | 3816 | continue; |
3798 | 3817 | ||
@@ -4685,6 +4704,9 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer) | |||
4685 | 4704 | ||
4686 | do_for_each_ftrace_rec(pg, rec) { | 4705 | do_for_each_ftrace_rec(pg, rec) { |
4687 | 4706 | ||
4707 | if (rec->flags & FTRACE_FL_DISABLED) | ||
4708 | continue; | ||
4709 | |||
4688 | if (ftrace_match_record(rec, &func_g, NULL, 0)) { | 4710 | if (ftrace_match_record(rec, &func_g, NULL, 0)) { |
4689 | /* if it is in the array */ | 4711 | /* if it is in the array */ |
4690 | exists = false; | 4712 | exists = false; |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b01e547d4d04..a6c8db1d62f6 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1085,6 +1085,9 @@ config PROVE_LOCKING | |||
1085 | 1085 | ||
1086 | For more details, see Documentation/locking/lockdep-design.txt. | 1086 | For more details, see Documentation/locking/lockdep-design.txt. |
1087 | 1087 | ||
1088 | config PROVE_LOCKING_SMALL | ||
1089 | bool | ||
1090 | |||
1088 | config LOCKDEP | 1091 | config LOCKDEP |
1089 | bool | 1092 | bool |
1090 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT | 1093 | depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT |
diff --git a/lib/iov_iter.c b/lib/iov_iter.c index f0c7f1481bae..f2bd21b93dfc 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c | |||
@@ -683,10 +683,11 @@ static void pipe_advance(struct iov_iter *i, size_t size) | |||
683 | struct pipe_inode_info *pipe = i->pipe; | 683 | struct pipe_inode_info *pipe = i->pipe; |
684 | struct pipe_buffer *buf; | 684 | struct pipe_buffer *buf; |
685 | int idx = i->idx; | 685 | int idx = i->idx; |
686 | size_t off = i->iov_offset; | 686 | size_t off = i->iov_offset, orig_sz; |
687 | 687 | ||
688 | if (unlikely(i->count < size)) | 688 | if (unlikely(i->count < size)) |
689 | size = i->count; | 689 | size = i->count; |
690 | orig_sz = size; | ||
690 | 691 | ||
691 | if (size) { | 692 | if (size) { |
692 | if (off) /* make it relative to the beginning of buffer */ | 693 | if (off) /* make it relative to the beginning of buffer */ |
@@ -713,6 +714,7 @@ static void pipe_advance(struct iov_iter *i, size_t size) | |||
713 | pipe->nrbufs--; | 714 | pipe->nrbufs--; |
714 | } | 715 | } |
715 | } | 716 | } |
717 | i->count -= orig_sz; | ||
716 | } | 718 | } |
717 | 719 | ||
718 | void iov_iter_advance(struct iov_iter *i, size_t size) | 720 | void iov_iter_advance(struct iov_iter *i, size_t size) |
diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c index 5464c8744ea9..e24388a863a7 100644 --- a/lib/mpi/mpi-pow.c +++ b/lib/mpi/mpi-pow.c | |||
@@ -64,8 +64,13 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod) | |||
64 | if (!esize) { | 64 | if (!esize) { |
65 | /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 | 65 | /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0 |
66 | * depending on if MOD equals 1. */ | 66 | * depending on if MOD equals 1. */ |
67 | rp[0] = 1; | ||
68 | res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; | 67 | res->nlimbs = (msize == 1 && mod->d[0] == 1) ? 0 : 1; |
68 | if (res->nlimbs) { | ||
69 | if (mpi_resize(res, 1) < 0) | ||
70 | goto enomem; | ||
71 | rp = res->d; | ||
72 | rp[0] = 1; | ||
73 | } | ||
69 | res->sign = 0; | 74 | res->sign = 0; |
70 | goto leave; | 75 | goto leave; |
71 | } | 76 | } |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index cdcd25cb30fe..eff3de359d50 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1426,11 +1426,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, | |||
1426 | 1426 | ||
1427 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | 1427 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
1428 | unsigned long new_addr, unsigned long old_end, | 1428 | unsigned long new_addr, unsigned long old_end, |
1429 | pmd_t *old_pmd, pmd_t *new_pmd) | 1429 | pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush) |
1430 | { | 1430 | { |
1431 | spinlock_t *old_ptl, *new_ptl; | 1431 | spinlock_t *old_ptl, *new_ptl; |
1432 | pmd_t pmd; | 1432 | pmd_t pmd; |
1433 | struct mm_struct *mm = vma->vm_mm; | 1433 | struct mm_struct *mm = vma->vm_mm; |
1434 | bool force_flush = false; | ||
1434 | 1435 | ||
1435 | if ((old_addr & ~HPAGE_PMD_MASK) || | 1436 | if ((old_addr & ~HPAGE_PMD_MASK) || |
1436 | (new_addr & ~HPAGE_PMD_MASK) || | 1437 | (new_addr & ~HPAGE_PMD_MASK) || |
@@ -1455,6 +1456,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | |||
1455 | new_ptl = pmd_lockptr(mm, new_pmd); | 1456 | new_ptl = pmd_lockptr(mm, new_pmd); |
1456 | if (new_ptl != old_ptl) | 1457 | if (new_ptl != old_ptl) |
1457 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); | 1458 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
1459 | if (pmd_present(*old_pmd) && pmd_dirty(*old_pmd)) | ||
1460 | force_flush = true; | ||
1458 | pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); | 1461 | pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); |
1459 | VM_BUG_ON(!pmd_none(*new_pmd)); | 1462 | VM_BUG_ON(!pmd_none(*new_pmd)); |
1460 | 1463 | ||
@@ -1467,6 +1470,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | |||
1467 | set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); | 1470 | set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd)); |
1468 | if (new_ptl != old_ptl) | 1471 | if (new_ptl != old_ptl) |
1469 | spin_unlock(new_ptl); | 1472 | spin_unlock(new_ptl); |
1473 | if (force_flush) | ||
1474 | flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); | ||
1475 | else | ||
1476 | *need_flush = true; | ||
1470 | spin_unlock(old_ptl); | 1477 | spin_unlock(old_ptl); |
1471 | return true; | 1478 | return true; |
1472 | } | 1479 | } |
diff --git a/mm/mremap.c b/mm/mremap.c index da22ad2a5678..6ccecc03f56a 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -104,11 +104,13 @@ static pte_t move_soft_dirty_pte(pte_t pte) | |||
104 | static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | 104 | static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, |
105 | unsigned long old_addr, unsigned long old_end, | 105 | unsigned long old_addr, unsigned long old_end, |
106 | struct vm_area_struct *new_vma, pmd_t *new_pmd, | 106 | struct vm_area_struct *new_vma, pmd_t *new_pmd, |
107 | unsigned long new_addr, bool need_rmap_locks) | 107 | unsigned long new_addr, bool need_rmap_locks, bool *need_flush) |
108 | { | 108 | { |
109 | struct mm_struct *mm = vma->vm_mm; | 109 | struct mm_struct *mm = vma->vm_mm; |
110 | pte_t *old_pte, *new_pte, pte; | 110 | pte_t *old_pte, *new_pte, pte; |
111 | spinlock_t *old_ptl, *new_ptl; | 111 | spinlock_t *old_ptl, *new_ptl; |
112 | bool force_flush = false; | ||
113 | unsigned long len = old_end - old_addr; | ||
112 | 114 | ||
113 | /* | 115 | /* |
114 | * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma | 116 | * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma |
@@ -146,6 +148,14 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
146 | new_pte++, new_addr += PAGE_SIZE) { | 148 | new_pte++, new_addr += PAGE_SIZE) { |
147 | if (pte_none(*old_pte)) | 149 | if (pte_none(*old_pte)) |
148 | continue; | 150 | continue; |
151 | |||
152 | /* | ||
153 | * We are remapping a dirty PTE, make sure to | ||
154 | * flush TLB before we drop the PTL for the | ||
155 | * old PTE or we may race with page_mkclean(). | ||
156 | */ | ||
157 | if (pte_present(*old_pte) && pte_dirty(*old_pte)) | ||
158 | force_flush = true; | ||
149 | pte = ptep_get_and_clear(mm, old_addr, old_pte); | 159 | pte = ptep_get_and_clear(mm, old_addr, old_pte); |
150 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); | 160 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); |
151 | pte = move_soft_dirty_pte(pte); | 161 | pte = move_soft_dirty_pte(pte); |
@@ -156,6 +166,10 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
156 | if (new_ptl != old_ptl) | 166 | if (new_ptl != old_ptl) |
157 | spin_unlock(new_ptl); | 167 | spin_unlock(new_ptl); |
158 | pte_unmap(new_pte - 1); | 168 | pte_unmap(new_pte - 1); |
169 | if (force_flush) | ||
170 | flush_tlb_range(vma, old_end - len, old_end); | ||
171 | else | ||
172 | *need_flush = true; | ||
159 | pte_unmap_unlock(old_pte - 1, old_ptl); | 173 | pte_unmap_unlock(old_pte - 1, old_ptl); |
160 | if (need_rmap_locks) | 174 | if (need_rmap_locks) |
161 | drop_rmap_locks(vma); | 175 | drop_rmap_locks(vma); |
@@ -201,13 +215,12 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
201 | if (need_rmap_locks) | 215 | if (need_rmap_locks) |
202 | take_rmap_locks(vma); | 216 | take_rmap_locks(vma); |
203 | moved = move_huge_pmd(vma, old_addr, new_addr, | 217 | moved = move_huge_pmd(vma, old_addr, new_addr, |
204 | old_end, old_pmd, new_pmd); | 218 | old_end, old_pmd, new_pmd, |
219 | &need_flush); | ||
205 | if (need_rmap_locks) | 220 | if (need_rmap_locks) |
206 | drop_rmap_locks(vma); | 221 | drop_rmap_locks(vma); |
207 | if (moved) { | 222 | if (moved) |
208 | need_flush = true; | ||
209 | continue; | 223 | continue; |
210 | } | ||
211 | } | 224 | } |
212 | split_huge_pmd(vma, old_pmd, old_addr); | 225 | split_huge_pmd(vma, old_pmd, old_addr); |
213 | if (pmd_trans_unstable(old_pmd)) | 226 | if (pmd_trans_unstable(old_pmd)) |
@@ -220,11 +233,10 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
220 | extent = next - new_addr; | 233 | extent = next - new_addr; |
221 | if (extent > LATENCY_LIMIT) | 234 | if (extent > LATENCY_LIMIT) |
222 | extent = LATENCY_LIMIT; | 235 | extent = LATENCY_LIMIT; |
223 | move_ptes(vma, old_pmd, old_addr, old_addr + extent, | 236 | move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, |
224 | new_vma, new_pmd, new_addr, need_rmap_locks); | 237 | new_pmd, new_addr, need_rmap_locks, &need_flush); |
225 | need_flush = true; | ||
226 | } | 238 | } |
227 | if (likely(need_flush)) | 239 | if (need_flush) |
228 | flush_tlb_range(vma, old_end-len, old_addr); | 240 | flush_tlb_range(vma, old_end-len, old_addr); |
229 | 241 | ||
230 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); | 242 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); |
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c index e034afbd1bb0..08ce36147c4c 100644 --- a/net/batman-adv/hard-interface.c +++ b/net/batman-adv/hard-interface.c | |||
@@ -652,6 +652,7 @@ void batadv_hardif_disable_interface(struct batadv_hard_iface *hard_iface, | |||
652 | batadv_softif_destroy_sysfs(hard_iface->soft_iface); | 652 | batadv_softif_destroy_sysfs(hard_iface->soft_iface); |
653 | } | 653 | } |
654 | 654 | ||
655 | hard_iface->soft_iface = NULL; | ||
655 | batadv_hardif_put(hard_iface); | 656 | batadv_hardif_put(hard_iface); |
656 | 657 | ||
657 | out: | 658 | out: |
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index 2333777f919d..8af1611b8ab2 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c | |||
@@ -837,6 +837,7 @@ static int batadv_tp_send(void *arg) | |||
837 | primary_if = batadv_primary_if_get_selected(bat_priv); | 837 | primary_if = batadv_primary_if_get_selected(bat_priv); |
838 | if (unlikely(!primary_if)) { | 838 | if (unlikely(!primary_if)) { |
839 | err = BATADV_TP_REASON_DST_UNREACHABLE; | 839 | err = BATADV_TP_REASON_DST_UNREACHABLE; |
840 | tp_vars->reason = err; | ||
840 | goto out; | 841 | goto out; |
841 | } | 842 | } |
842 | 843 | ||
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c index d020299baba4..1904a93f47d5 100644 --- a/net/bluetooth/6lowpan.c +++ b/net/bluetooth/6lowpan.c | |||
@@ -1090,7 +1090,6 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, | |||
1090 | { | 1090 | { |
1091 | struct hci_conn *hcon; | 1091 | struct hci_conn *hcon; |
1092 | struct hci_dev *hdev; | 1092 | struct hci_dev *hdev; |
1093 | bdaddr_t *src = BDADDR_ANY; | ||
1094 | int n; | 1093 | int n; |
1095 | 1094 | ||
1096 | n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", | 1095 | n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", |
@@ -1101,7 +1100,8 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, | |||
1101 | if (n < 7) | 1100 | if (n < 7) |
1102 | return -EINVAL; | 1101 | return -EINVAL; |
1103 | 1102 | ||
1104 | hdev = hci_get_route(addr, src); | 1103 | /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */ |
1104 | hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC); | ||
1105 | if (!hdev) | 1105 | if (!hdev) |
1106 | return -ENOENT; | 1106 | return -ENOENT; |
1107 | 1107 | ||
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 3809617aa98d..dc59eae54717 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c | |||
@@ -613,7 +613,7 @@ int hci_conn_del(struct hci_conn *conn) | |||
613 | return 0; | 613 | return 0; |
614 | } | 614 | } |
615 | 615 | ||
616 | struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) | 616 | struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) |
617 | { | 617 | { |
618 | int use_src = bacmp(src, BDADDR_ANY); | 618 | int use_src = bacmp(src, BDADDR_ANY); |
619 | struct hci_dev *hdev = NULL, *d; | 619 | struct hci_dev *hdev = NULL, *d; |
@@ -634,7 +634,29 @@ struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src) | |||
634 | */ | 634 | */ |
635 | 635 | ||
636 | if (use_src) { | 636 | if (use_src) { |
637 | if (!bacmp(&d->bdaddr, src)) { | 637 | bdaddr_t id_addr; |
638 | u8 id_addr_type; | ||
639 | |||
640 | if (src_type == BDADDR_BREDR) { | ||
641 | if (!lmp_bredr_capable(d)) | ||
642 | continue; | ||
643 | bacpy(&id_addr, &d->bdaddr); | ||
644 | id_addr_type = BDADDR_BREDR; | ||
645 | } else { | ||
646 | if (!lmp_le_capable(d)) | ||
647 | continue; | ||
648 | |||
649 | hci_copy_identity_address(d, &id_addr, | ||
650 | &id_addr_type); | ||
651 | |||
652 | /* Convert from HCI to three-value type */ | ||
653 | if (id_addr_type == ADDR_LE_DEV_PUBLIC) | ||
654 | id_addr_type = BDADDR_LE_PUBLIC; | ||
655 | else | ||
656 | id_addr_type = BDADDR_LE_RANDOM; | ||
657 | } | ||
658 | |||
659 | if (!bacmp(&id_addr, src) && id_addr_type == src_type) { | ||
638 | hdev = d; break; | 660 | hdev = d; break; |
639 | } | 661 | } |
640 | } else { | 662 | } else { |
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index d4cad29b033f..577f1c01454a 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c | |||
@@ -7060,7 +7060,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, | |||
7060 | BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst, | 7060 | BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst, |
7061 | dst_type, __le16_to_cpu(psm)); | 7061 | dst_type, __le16_to_cpu(psm)); |
7062 | 7062 | ||
7063 | hdev = hci_get_route(dst, &chan->src); | 7063 | hdev = hci_get_route(dst, &chan->src, chan->src_type); |
7064 | if (!hdev) | 7064 | if (!hdev) |
7065 | return -EHOSTUNREACH; | 7065 | return -EHOSTUNREACH; |
7066 | 7066 | ||
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 8e385a0ae60e..2f2cb5e27cdd 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
@@ -178,7 +178,7 @@ static void rfcomm_reparent_device(struct rfcomm_dev *dev) | |||
178 | struct hci_dev *hdev; | 178 | struct hci_dev *hdev; |
179 | struct hci_conn *conn; | 179 | struct hci_conn *conn; |
180 | 180 | ||
181 | hdev = hci_get_route(&dev->dst, &dev->src); | 181 | hdev = hci_get_route(&dev->dst, &dev->src, BDADDR_BREDR); |
182 | if (!hdev) | 182 | if (!hdev) |
183 | return; | 183 | return; |
184 | 184 | ||
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index f52bcbf2e58c..3125ce670c2f 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -219,7 +219,7 @@ static int sco_connect(struct sock *sk) | |||
219 | 219 | ||
220 | BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); | 220 | BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); |
221 | 221 | ||
222 | hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src); | 222 | hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); |
223 | if (!hdev) | 223 | if (!hdev) |
224 | return -EHOSTUNREACH; | 224 | return -EHOSTUNREACH; |
225 | 225 | ||
diff --git a/net/can/bcm.c b/net/can/bcm.c index 8e999ffdf28b..436a7537e6a9 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -77,7 +77,7 @@ | |||
77 | (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ | 77 | (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ |
78 | (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) | 78 | (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) |
79 | 79 | ||
80 | #define CAN_BCM_VERSION "20160617" | 80 | #define CAN_BCM_VERSION "20161123" |
81 | 81 | ||
82 | MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); | 82 | MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); |
83 | MODULE_LICENSE("Dual BSD/GPL"); | 83 | MODULE_LICENSE("Dual BSD/GPL"); |
@@ -109,8 +109,9 @@ struct bcm_op { | |||
109 | u32 count; | 109 | u32 count; |
110 | u32 nframes; | 110 | u32 nframes; |
111 | u32 currframe; | 111 | u32 currframe; |
112 | struct canfd_frame *frames; | 112 | /* void pointers to arrays of struct can[fd]_frame */ |
113 | struct canfd_frame *last_frames; | 113 | void *frames; |
114 | void *last_frames; | ||
114 | struct canfd_frame sframe; | 115 | struct canfd_frame sframe; |
115 | struct canfd_frame last_sframe; | 116 | struct canfd_frame last_sframe; |
116 | struct sock *sk; | 117 | struct sock *sk; |
@@ -681,7 +682,7 @@ static void bcm_rx_handler(struct sk_buff *skb, void *data) | |||
681 | 682 | ||
682 | if (op->flags & RX_FILTER_ID) { | 683 | if (op->flags & RX_FILTER_ID) { |
683 | /* the easiest case */ | 684 | /* the easiest case */ |
684 | bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); | 685 | bcm_rx_update_and_send(op, op->last_frames, rxframe); |
685 | goto rx_starttimer; | 686 | goto rx_starttimer; |
686 | } | 687 | } |
687 | 688 | ||
@@ -1068,7 +1069,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
1068 | 1069 | ||
1069 | if (msg_head->nframes) { | 1070 | if (msg_head->nframes) { |
1070 | /* update CAN frames content */ | 1071 | /* update CAN frames content */ |
1071 | err = memcpy_from_msg((u8 *)op->frames, msg, | 1072 | err = memcpy_from_msg(op->frames, msg, |
1072 | msg_head->nframes * op->cfsiz); | 1073 | msg_head->nframes * op->cfsiz); |
1073 | if (err < 0) | 1074 | if (err < 0) |
1074 | return err; | 1075 | return err; |
@@ -1118,7 +1119,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
1118 | } | 1119 | } |
1119 | 1120 | ||
1120 | if (msg_head->nframes) { | 1121 | if (msg_head->nframes) { |
1121 | err = memcpy_from_msg((u8 *)op->frames, msg, | 1122 | err = memcpy_from_msg(op->frames, msg, |
1122 | msg_head->nframes * op->cfsiz); | 1123 | msg_head->nframes * op->cfsiz); |
1123 | if (err < 0) { | 1124 | if (err < 0) { |
1124 | if (op->frames != &op->sframe) | 1125 | if (op->frames != &op->sframe) |
@@ -1163,6 +1164,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
1163 | /* check flags */ | 1164 | /* check flags */ |
1164 | 1165 | ||
1165 | if (op->flags & RX_RTR_FRAME) { | 1166 | if (op->flags & RX_RTR_FRAME) { |
1167 | struct canfd_frame *frame0 = op->frames; | ||
1166 | 1168 | ||
1167 | /* no timers in RTR-mode */ | 1169 | /* no timers in RTR-mode */ |
1168 | hrtimer_cancel(&op->thrtimer); | 1170 | hrtimer_cancel(&op->thrtimer); |
@@ -1174,8 +1176,8 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
1174 | * prevent a full-load-loopback-test ... ;-] | 1176 | * prevent a full-load-loopback-test ... ;-] |
1175 | */ | 1177 | */ |
1176 | if ((op->flags & TX_CP_CAN_ID) || | 1178 | if ((op->flags & TX_CP_CAN_ID) || |
1177 | (op->frames[0].can_id == op->can_id)) | 1179 | (frame0->can_id == op->can_id)) |
1178 | op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG; | 1180 | frame0->can_id = op->can_id & ~CAN_RTR_FLAG; |
1179 | 1181 | ||
1180 | } else { | 1182 | } else { |
1181 | if (op->flags & SETTIMER) { | 1183 | if (op->flags & SETTIMER) { |
@@ -1549,24 +1551,31 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, | |||
1549 | struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; | 1551 | struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; |
1550 | struct sock *sk = sock->sk; | 1552 | struct sock *sk = sock->sk; |
1551 | struct bcm_sock *bo = bcm_sk(sk); | 1553 | struct bcm_sock *bo = bcm_sk(sk); |
1554 | int ret = 0; | ||
1552 | 1555 | ||
1553 | if (len < sizeof(*addr)) | 1556 | if (len < sizeof(*addr)) |
1554 | return -EINVAL; | 1557 | return -EINVAL; |
1555 | 1558 | ||
1556 | if (bo->bound) | 1559 | lock_sock(sk); |
1557 | return -EISCONN; | 1560 | |
1561 | if (bo->bound) { | ||
1562 | ret = -EISCONN; | ||
1563 | goto fail; | ||
1564 | } | ||
1558 | 1565 | ||
1559 | /* bind a device to this socket */ | 1566 | /* bind a device to this socket */ |
1560 | if (addr->can_ifindex) { | 1567 | if (addr->can_ifindex) { |
1561 | struct net_device *dev; | 1568 | struct net_device *dev; |
1562 | 1569 | ||
1563 | dev = dev_get_by_index(&init_net, addr->can_ifindex); | 1570 | dev = dev_get_by_index(&init_net, addr->can_ifindex); |
1564 | if (!dev) | 1571 | if (!dev) { |
1565 | return -ENODEV; | 1572 | ret = -ENODEV; |
1566 | 1573 | goto fail; | |
1574 | } | ||
1567 | if (dev->type != ARPHRD_CAN) { | 1575 | if (dev->type != ARPHRD_CAN) { |
1568 | dev_put(dev); | 1576 | dev_put(dev); |
1569 | return -ENODEV; | 1577 | ret = -ENODEV; |
1578 | goto fail; | ||
1570 | } | 1579 | } |
1571 | 1580 | ||
1572 | bo->ifindex = dev->ifindex; | 1581 | bo->ifindex = dev->ifindex; |
@@ -1577,17 +1586,24 @@ static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, | |||
1577 | bo->ifindex = 0; | 1586 | bo->ifindex = 0; |
1578 | } | 1587 | } |
1579 | 1588 | ||
1580 | bo->bound = 1; | ||
1581 | |||
1582 | if (proc_dir) { | 1589 | if (proc_dir) { |
1583 | /* unique socket address as filename */ | 1590 | /* unique socket address as filename */ |
1584 | sprintf(bo->procname, "%lu", sock_i_ino(sk)); | 1591 | sprintf(bo->procname, "%lu", sock_i_ino(sk)); |
1585 | bo->bcm_proc_read = proc_create_data(bo->procname, 0644, | 1592 | bo->bcm_proc_read = proc_create_data(bo->procname, 0644, |
1586 | proc_dir, | 1593 | proc_dir, |
1587 | &bcm_proc_fops, sk); | 1594 | &bcm_proc_fops, sk); |
1595 | if (!bo->bcm_proc_read) { | ||
1596 | ret = -ENOMEM; | ||
1597 | goto fail; | ||
1598 | } | ||
1588 | } | 1599 | } |
1589 | 1600 | ||
1590 | return 0; | 1601 | bo->bound = 1; |
1602 | |||
1603 | fail: | ||
1604 | release_sock(sk); | ||
1605 | |||
1606 | return ret; | ||
1591 | } | 1607 | } |
1592 | 1608 | ||
1593 | static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, | 1609 | static int bcm_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, |
diff --git a/net/core/dev.c b/net/core/dev.c index 820bac239738..6666b28b6815 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1766,19 +1766,14 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable); | |||
1766 | 1766 | ||
1767 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) | 1767 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb) |
1768 | { | 1768 | { |
1769 | if (skb_orphan_frags(skb, GFP_ATOMIC) || | 1769 | int ret = ____dev_forward_skb(dev, skb); |
1770 | unlikely(!is_skb_forwardable(dev, skb))) { | ||
1771 | atomic_long_inc(&dev->rx_dropped); | ||
1772 | kfree_skb(skb); | ||
1773 | return NET_RX_DROP; | ||
1774 | } | ||
1775 | 1770 | ||
1776 | skb_scrub_packet(skb, true); | 1771 | if (likely(!ret)) { |
1777 | skb->priority = 0; | 1772 | skb->protocol = eth_type_trans(skb, dev); |
1778 | skb->protocol = eth_type_trans(skb, dev); | 1773 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); |
1779 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); | 1774 | } |
1780 | 1775 | ||
1781 | return 0; | 1776 | return ret; |
1782 | } | 1777 | } |
1783 | EXPORT_SYMBOL_GPL(__dev_forward_skb); | 1778 | EXPORT_SYMBOL_GPL(__dev_forward_skb); |
1784 | 1779 | ||
@@ -2484,7 +2479,7 @@ int skb_checksum_help(struct sk_buff *skb) | |||
2484 | goto out; | 2479 | goto out; |
2485 | } | 2480 | } |
2486 | 2481 | ||
2487 | *(__sum16 *)(skb->data + offset) = csum_fold(csum); | 2482 | *(__sum16 *)(skb->data + offset) = csum_fold(csum) ?: CSUM_MANGLED_0; |
2488 | out_set_summed: | 2483 | out_set_summed: |
2489 | skb->ip_summed = CHECKSUM_NONE; | 2484 | skb->ip_summed = CHECKSUM_NONE; |
2490 | out: | 2485 | out: |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 977489820eb9..047a1752ece1 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -2479,6 +2479,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
2479 | case ETHTOOL_GET_TS_INFO: | 2479 | case ETHTOOL_GET_TS_INFO: |
2480 | case ETHTOOL_GEEE: | 2480 | case ETHTOOL_GEEE: |
2481 | case ETHTOOL_GTUNABLE: | 2481 | case ETHTOOL_GTUNABLE: |
2482 | case ETHTOOL_GLINKSETTINGS: | ||
2482 | break; | 2483 | break; |
2483 | default: | 2484 | default: |
2484 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) | 2485 | if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) |
diff --git a/net/core/filter.c b/net/core/filter.c index 00351cdf7d0c..b391209838ef 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -1628,6 +1628,19 @@ static inline int __bpf_rx_skb(struct net_device *dev, struct sk_buff *skb) | |||
1628 | return dev_forward_skb(dev, skb); | 1628 | return dev_forward_skb(dev, skb); |
1629 | } | 1629 | } |
1630 | 1630 | ||
1631 | static inline int __bpf_rx_skb_no_mac(struct net_device *dev, | ||
1632 | struct sk_buff *skb) | ||
1633 | { | ||
1634 | int ret = ____dev_forward_skb(dev, skb); | ||
1635 | |||
1636 | if (likely(!ret)) { | ||
1637 | skb->dev = dev; | ||
1638 | ret = netif_rx(skb); | ||
1639 | } | ||
1640 | |||
1641 | return ret; | ||
1642 | } | ||
1643 | |||
1631 | static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) | 1644 | static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) |
1632 | { | 1645 | { |
1633 | int ret; | 1646 | int ret; |
@@ -1647,6 +1660,51 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) | |||
1647 | return ret; | 1660 | return ret; |
1648 | } | 1661 | } |
1649 | 1662 | ||
1663 | static int __bpf_redirect_no_mac(struct sk_buff *skb, struct net_device *dev, | ||
1664 | u32 flags) | ||
1665 | { | ||
1666 | /* skb->mac_len is not set on normal egress */ | ||
1667 | unsigned int mlen = skb->network_header - skb->mac_header; | ||
1668 | |||
1669 | __skb_pull(skb, mlen); | ||
1670 | |||
1671 | /* At ingress, the mac header has already been pulled once. | ||
1672 | * At egress, skb_pospull_rcsum has to be done in case that | ||
1673 | * the skb is originated from ingress (i.e. a forwarded skb) | ||
1674 | * to ensure that rcsum starts at net header. | ||
1675 | */ | ||
1676 | if (!skb_at_tc_ingress(skb)) | ||
1677 | skb_postpull_rcsum(skb, skb_mac_header(skb), mlen); | ||
1678 | skb_pop_mac_header(skb); | ||
1679 | skb_reset_mac_len(skb); | ||
1680 | return flags & BPF_F_INGRESS ? | ||
1681 | __bpf_rx_skb_no_mac(dev, skb) : __bpf_tx_skb(dev, skb); | ||
1682 | } | ||
1683 | |||
1684 | static int __bpf_redirect_common(struct sk_buff *skb, struct net_device *dev, | ||
1685 | u32 flags) | ||
1686 | { | ||
1687 | bpf_push_mac_rcsum(skb); | ||
1688 | return flags & BPF_F_INGRESS ? | ||
1689 | __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); | ||
1690 | } | ||
1691 | |||
1692 | static int __bpf_redirect(struct sk_buff *skb, struct net_device *dev, | ||
1693 | u32 flags) | ||
1694 | { | ||
1695 | switch (dev->type) { | ||
1696 | case ARPHRD_TUNNEL: | ||
1697 | case ARPHRD_TUNNEL6: | ||
1698 | case ARPHRD_SIT: | ||
1699 | case ARPHRD_IPGRE: | ||
1700 | case ARPHRD_VOID: | ||
1701 | case ARPHRD_NONE: | ||
1702 | return __bpf_redirect_no_mac(skb, dev, flags); | ||
1703 | default: | ||
1704 | return __bpf_redirect_common(skb, dev, flags); | ||
1705 | } | ||
1706 | } | ||
1707 | |||
1650 | BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) | 1708 | BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) |
1651 | { | 1709 | { |
1652 | struct net_device *dev; | 1710 | struct net_device *dev; |
@@ -1675,10 +1733,7 @@ BPF_CALL_3(bpf_clone_redirect, struct sk_buff *, skb, u32, ifindex, u64, flags) | |||
1675 | return -ENOMEM; | 1733 | return -ENOMEM; |
1676 | } | 1734 | } |
1677 | 1735 | ||
1678 | bpf_push_mac_rcsum(clone); | 1736 | return __bpf_redirect(clone, dev, flags); |
1679 | |||
1680 | return flags & BPF_F_INGRESS ? | ||
1681 | __bpf_rx_skb(dev, clone) : __bpf_tx_skb(dev, clone); | ||
1682 | } | 1737 | } |
1683 | 1738 | ||
1684 | static const struct bpf_func_proto bpf_clone_redirect_proto = { | 1739 | static const struct bpf_func_proto bpf_clone_redirect_proto = { |
@@ -1722,10 +1777,7 @@ int skb_do_redirect(struct sk_buff *skb) | |||
1722 | return -EINVAL; | 1777 | return -EINVAL; |
1723 | } | 1778 | } |
1724 | 1779 | ||
1725 | bpf_push_mac_rcsum(skb); | 1780 | return __bpf_redirect(skb, dev, ri->flags); |
1726 | |||
1727 | return ri->flags & BPF_F_INGRESS ? | ||
1728 | __bpf_rx_skb(dev, skb) : __bpf_tx_skb(dev, skb); | ||
1729 | } | 1781 | } |
1730 | 1782 | ||
1731 | static const struct bpf_func_proto bpf_redirect_proto = { | 1783 | static const struct bpf_func_proto bpf_redirect_proto = { |
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index ab193e5def07..c6d8207ffa7e 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c | |||
@@ -122,7 +122,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb, | |||
122 | struct flow_dissector_key_keyid *key_keyid; | 122 | struct flow_dissector_key_keyid *key_keyid; |
123 | bool skip_vlan = false; | 123 | bool skip_vlan = false; |
124 | u8 ip_proto = 0; | 124 | u8 ip_proto = 0; |
125 | bool ret = false; | 125 | bool ret; |
126 | 126 | ||
127 | if (!data) { | 127 | if (!data) { |
128 | data = skb->data; | 128 | data = skb->data; |
@@ -549,12 +549,17 @@ ip_proto_again: | |||
549 | out_good: | 549 | out_good: |
550 | ret = true; | 550 | ret = true; |
551 | 551 | ||
552 | out_bad: | 552 | key_control->thoff = (u16)nhoff; |
553 | out: | ||
553 | key_basic->n_proto = proto; | 554 | key_basic->n_proto = proto; |
554 | key_basic->ip_proto = ip_proto; | 555 | key_basic->ip_proto = ip_proto; |
555 | key_control->thoff = (u16)nhoff; | ||
556 | 556 | ||
557 | return ret; | 557 | return ret; |
558 | |||
559 | out_bad: | ||
560 | ret = false; | ||
561 | key_control->thoff = min_t(u16, nhoff, skb ? skb->len : hlen); | ||
562 | goto out; | ||
558 | } | 563 | } |
559 | EXPORT_SYMBOL(__skb_flow_dissect); | 564 | EXPORT_SYMBOL(__skb_flow_dissect); |
560 | 565 | ||
@@ -1008,4 +1013,4 @@ static int __init init_default_flow_dissectors(void) | |||
1008 | return 0; | 1013 | return 0; |
1009 | } | 1014 | } |
1010 | 1015 | ||
1011 | late_initcall_sync(init_default_flow_dissectors); | 1016 | core_initcall(init_default_flow_dissectors); |
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index f61c0e02a413..7001da910c6b 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c | |||
@@ -219,6 +219,8 @@ int peernet2id_alloc(struct net *net, struct net *peer) | |||
219 | bool alloc; | 219 | bool alloc; |
220 | int id; | 220 | int id; |
221 | 221 | ||
222 | if (atomic_read(&net->count) == 0) | ||
223 | return NETNSA_NSID_NOT_ASSIGNED; | ||
222 | spin_lock_irqsave(&net->nsid_lock, flags); | 224 | spin_lock_irqsave(&net->nsid_lock, flags); |
223 | alloc = atomic_read(&peer->count) == 0 ? false : true; | 225 | alloc = atomic_read(&peer->count) == 0 ? false : true; |
224 | id = __peernet2id_alloc(net, peer, &alloc); | 226 | id = __peernet2id_alloc(net, peer, &alloc); |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index fb7348f13501..deb35acbefd0 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -275,6 +275,7 @@ int rtnl_unregister(int protocol, int msgtype) | |||
275 | 275 | ||
276 | rtnl_msg_handlers[protocol][msgindex].doit = NULL; | 276 | rtnl_msg_handlers[protocol][msgindex].doit = NULL; |
277 | rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; | 277 | rtnl_msg_handlers[protocol][msgindex].dumpit = NULL; |
278 | rtnl_msg_handlers[protocol][msgindex].calcit = NULL; | ||
278 | 279 | ||
279 | return 0; | 280 | return 0; |
280 | } | 281 | } |
@@ -839,18 +840,20 @@ static inline int rtnl_vfinfo_size(const struct net_device *dev, | |||
839 | if (dev->dev.parent && dev_is_pci(dev->dev.parent) && | 840 | if (dev->dev.parent && dev_is_pci(dev->dev.parent) && |
840 | (ext_filter_mask & RTEXT_FILTER_VF)) { | 841 | (ext_filter_mask & RTEXT_FILTER_VF)) { |
841 | int num_vfs = dev_num_vf(dev->dev.parent); | 842 | int num_vfs = dev_num_vf(dev->dev.parent); |
842 | size_t size = nla_total_size(sizeof(struct nlattr)); | 843 | size_t size = nla_total_size(0); |
843 | size += nla_total_size(num_vfs * sizeof(struct nlattr)); | ||
844 | size += num_vfs * | 844 | size += num_vfs * |
845 | (nla_total_size(sizeof(struct ifla_vf_mac)) + | 845 | (nla_total_size(0) + |
846 | nla_total_size(MAX_VLAN_LIST_LEN * | 846 | nla_total_size(sizeof(struct ifla_vf_mac)) + |
847 | sizeof(struct nlattr)) + | 847 | nla_total_size(sizeof(struct ifla_vf_vlan)) + |
848 | nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */ | ||
848 | nla_total_size(MAX_VLAN_LIST_LEN * | 849 | nla_total_size(MAX_VLAN_LIST_LEN * |
849 | sizeof(struct ifla_vf_vlan_info)) + | 850 | sizeof(struct ifla_vf_vlan_info)) + |
850 | nla_total_size(sizeof(struct ifla_vf_spoofchk)) + | 851 | nla_total_size(sizeof(struct ifla_vf_spoofchk)) + |
852 | nla_total_size(sizeof(struct ifla_vf_tx_rate)) + | ||
851 | nla_total_size(sizeof(struct ifla_vf_rate)) + | 853 | nla_total_size(sizeof(struct ifla_vf_rate)) + |
852 | nla_total_size(sizeof(struct ifla_vf_link_state)) + | 854 | nla_total_size(sizeof(struct ifla_vf_link_state)) + |
853 | nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + | 855 | nla_total_size(sizeof(struct ifla_vf_rss_query_en)) + |
856 | nla_total_size(0) + /* nest IFLA_VF_STATS */ | ||
854 | /* IFLA_VF_STATS_RX_PACKETS */ | 857 | /* IFLA_VF_STATS_RX_PACKETS */ |
855 | nla_total_size_64bit(sizeof(__u64)) + | 858 | nla_total_size_64bit(sizeof(__u64)) + |
856 | /* IFLA_VF_STATS_TX_PACKETS */ | 859 | /* IFLA_VF_STATS_TX_PACKETS */ |
@@ -898,7 +901,8 @@ static size_t rtnl_port_size(const struct net_device *dev, | |||
898 | 901 | ||
899 | static size_t rtnl_xdp_size(const struct net_device *dev) | 902 | static size_t rtnl_xdp_size(const struct net_device *dev) |
900 | { | 903 | { |
901 | size_t xdp_size = nla_total_size(1); /* XDP_ATTACHED */ | 904 | size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */ |
905 | nla_total_size(1); /* XDP_ATTACHED */ | ||
902 | 906 | ||
903 | if (!dev->netdev_ops->ndo_xdp) | 907 | if (!dev->netdev_ops->ndo_xdp) |
904 | return 0; | 908 | return 0; |
@@ -1605,7 +1609,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) | |||
1605 | head = &net->dev_index_head[h]; | 1609 | head = &net->dev_index_head[h]; |
1606 | hlist_for_each_entry(dev, head, index_hlist) { | 1610 | hlist_for_each_entry(dev, head, index_hlist) { |
1607 | if (link_dump_filtered(dev, master_idx, kind_ops)) | 1611 | if (link_dump_filtered(dev, master_idx, kind_ops)) |
1608 | continue; | 1612 | goto cont; |
1609 | if (idx < s_idx) | 1613 | if (idx < s_idx) |
1610 | goto cont; | 1614 | goto cont; |
1611 | err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, | 1615 | err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, |
@@ -2733,7 +2737,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
2733 | ext_filter_mask)); | 2737 | ext_filter_mask)); |
2734 | } | 2738 | } |
2735 | 2739 | ||
2736 | return min_ifinfo_dump_size; | 2740 | return nlmsg_total_size(min_ifinfo_dump_size); |
2737 | } | 2741 | } |
2738 | 2742 | ||
2739 | static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) | 2743 | static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb) |
@@ -2848,7 +2852,10 @@ nla_put_failure: | |||
2848 | 2852 | ||
2849 | static inline size_t rtnl_fdb_nlmsg_size(void) | 2853 | static inline size_t rtnl_fdb_nlmsg_size(void) |
2850 | { | 2854 | { |
2851 | return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN); | 2855 | return NLMSG_ALIGN(sizeof(struct ndmsg)) + |
2856 | nla_total_size(ETH_ALEN) + /* NDA_LLADDR */ | ||
2857 | nla_total_size(sizeof(u16)) + /* NDA_VLAN */ | ||
2858 | 0; | ||
2852 | } | 2859 | } |
2853 | 2860 | ||
2854 | static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, | 2861 | static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type, |
diff --git a/net/core/sock.c b/net/core/sock.c index c73e28fc9c2a..5e3ca414357e 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -453,7 +453,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
453 | EXPORT_SYMBOL(sock_queue_rcv_skb); | 453 | EXPORT_SYMBOL(sock_queue_rcv_skb); |
454 | 454 | ||
455 | int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, | 455 | int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, |
456 | const int nested, unsigned int trim_cap) | 456 | const int nested, unsigned int trim_cap, bool refcounted) |
457 | { | 457 | { |
458 | int rc = NET_RX_SUCCESS; | 458 | int rc = NET_RX_SUCCESS; |
459 | 459 | ||
@@ -487,7 +487,8 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, | |||
487 | 487 | ||
488 | bh_unlock_sock(sk); | 488 | bh_unlock_sock(sk); |
489 | out: | 489 | out: |
490 | sock_put(sk); | 490 | if (refcounted) |
491 | sock_put(sk); | ||
491 | return rc; | 492 | return rc; |
492 | discard_and_relse: | 493 | discard_and_relse: |
493 | kfree_skb(skb); | 494 | kfree_skb(skb); |
@@ -1543,6 +1544,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
1543 | RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); | 1544 | RCU_INIT_POINTER(newsk->sk_reuseport_cb, NULL); |
1544 | 1545 | ||
1545 | newsk->sk_err = 0; | 1546 | newsk->sk_err = 0; |
1547 | newsk->sk_err_soft = 0; | ||
1546 | newsk->sk_priority = 0; | 1548 | newsk->sk_priority = 0; |
1547 | newsk->sk_incoming_cpu = raw_smp_processor_id(); | 1549 | newsk->sk_incoming_cpu = raw_smp_processor_id(); |
1548 | atomic64_set(&newsk->sk_cookie, 0); | 1550 | atomic64_set(&newsk->sk_cookie, 0); |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 345a3aeb8c7e..b567c8725aea 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -235,7 +235,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
235 | { | 235 | { |
236 | const struct iphdr *iph = (struct iphdr *)skb->data; | 236 | const struct iphdr *iph = (struct iphdr *)skb->data; |
237 | const u8 offset = iph->ihl << 2; | 237 | const u8 offset = iph->ihl << 2; |
238 | const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); | 238 | const struct dccp_hdr *dh; |
239 | struct dccp_sock *dp; | 239 | struct dccp_sock *dp; |
240 | struct inet_sock *inet; | 240 | struct inet_sock *inet; |
241 | const int type = icmp_hdr(skb)->type; | 241 | const int type = icmp_hdr(skb)->type; |
@@ -245,11 +245,13 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info) | |||
245 | int err; | 245 | int err; |
246 | struct net *net = dev_net(skb->dev); | 246 | struct net *net = dev_net(skb->dev); |
247 | 247 | ||
248 | if (skb->len < offset + sizeof(*dh) || | 248 | /* Only need dccph_dport & dccph_sport which are the first |
249 | skb->len < offset + __dccp_basic_hdr_len(dh)) { | 249 | * 4 bytes in dccp header. |
250 | __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); | 250 | * Our caller (icmp_socket_deliver()) already pulled 8 bytes for us. |
251 | return; | 251 | */ |
252 | } | 252 | BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); |
253 | BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); | ||
254 | dh = (struct dccp_hdr *)(skb->data + offset); | ||
253 | 255 | ||
254 | sk = __inet_lookup_established(net, &dccp_hashinfo, | 256 | sk = __inet_lookup_established(net, &dccp_hashinfo, |
255 | iph->daddr, dh->dccph_dport, | 257 | iph->daddr, dh->dccph_dport, |
@@ -868,7 +870,7 @@ lookup: | |||
868 | goto discard_and_relse; | 870 | goto discard_and_relse; |
869 | nf_reset(skb); | 871 | nf_reset(skb); |
870 | 872 | ||
871 | return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4); | 873 | return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, refcounted); |
872 | 874 | ||
873 | no_dccp_socket: | 875 | no_dccp_socket: |
874 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) | 876 | if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) |
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 3828f94b234c..715e5d1dc107 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c | |||
@@ -70,7 +70,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
70 | u8 type, u8 code, int offset, __be32 info) | 70 | u8 type, u8 code, int offset, __be32 info) |
71 | { | 71 | { |
72 | const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; | 72 | const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data; |
73 | const struct dccp_hdr *dh = (struct dccp_hdr *)(skb->data + offset); | 73 | const struct dccp_hdr *dh; |
74 | struct dccp_sock *dp; | 74 | struct dccp_sock *dp; |
75 | struct ipv6_pinfo *np; | 75 | struct ipv6_pinfo *np; |
76 | struct sock *sk; | 76 | struct sock *sk; |
@@ -78,12 +78,13 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
78 | __u64 seq; | 78 | __u64 seq; |
79 | struct net *net = dev_net(skb->dev); | 79 | struct net *net = dev_net(skb->dev); |
80 | 80 | ||
81 | if (skb->len < offset + sizeof(*dh) || | 81 | /* Only need dccph_dport & dccph_sport which are the first |
82 | skb->len < offset + __dccp_basic_hdr_len(dh)) { | 82 | * 4 bytes in dccp header. |
83 | __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), | 83 | * Our caller (icmpv6_notify()) already pulled 8 bytes for us. |
84 | ICMP6_MIB_INERRORS); | 84 | */ |
85 | return; | 85 | BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_sport) > 8); |
86 | } | 86 | BUILD_BUG_ON(offsetofend(struct dccp_hdr, dccph_dport) > 8); |
87 | dh = (struct dccp_hdr *)(skb->data + offset); | ||
87 | 88 | ||
88 | sk = __inet6_lookup_established(net, &dccp_hashinfo, | 89 | sk = __inet6_lookup_established(net, &dccp_hashinfo, |
89 | &hdr->daddr, dh->dccph_dport, | 90 | &hdr->daddr, dh->dccph_dport, |
@@ -738,7 +739,8 @@ lookup: | |||
738 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) | 739 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) |
739 | goto discard_and_relse; | 740 | goto discard_and_relse; |
740 | 741 | ||
741 | return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4) ? -1 : 0; | 742 | return __sk_receive_skb(sk, skb, 1, dh->dccph_doff * 4, |
743 | refcounted) ? -1 : 0; | ||
742 | 744 | ||
743 | no_dccp_socket: | 745 | no_dccp_socket: |
744 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) | 746 | if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) |
@@ -956,6 +958,7 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = { | |||
956 | .getsockopt = ipv6_getsockopt, | 958 | .getsockopt = ipv6_getsockopt, |
957 | .addr2sockaddr = inet6_csk_addr2sockaddr, | 959 | .addr2sockaddr = inet6_csk_addr2sockaddr, |
958 | .sockaddr_len = sizeof(struct sockaddr_in6), | 960 | .sockaddr_len = sizeof(struct sockaddr_in6), |
961 | .bind_conflict = inet6_csk_bind_conflict, | ||
959 | #ifdef CONFIG_COMPAT | 962 | #ifdef CONFIG_COMPAT |
960 | .compat_setsockopt = compat_ipv6_setsockopt, | 963 | .compat_setsockopt = compat_ipv6_setsockopt, |
961 | .compat_getsockopt = compat_ipv6_getsockopt, | 964 | .compat_getsockopt = compat_ipv6_getsockopt, |
diff --git a/net/dccp/proto.c b/net/dccp/proto.c index 41e65804ddf5..9fe25bf63296 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c | |||
@@ -1009,6 +1009,10 @@ void dccp_close(struct sock *sk, long timeout) | |||
1009 | __kfree_skb(skb); | 1009 | __kfree_skb(skb); |
1010 | } | 1010 | } |
1011 | 1011 | ||
1012 | /* If socket has been already reset kill it. */ | ||
1013 | if (sk->sk_state == DCCP_CLOSED) | ||
1014 | goto adjudge_to_death; | ||
1015 | |||
1012 | if (data_was_unread) { | 1016 | if (data_was_unread) { |
1013 | /* Unread data was tossed, send an appropriate Reset Code */ | 1017 | /* Unread data was tossed, send an appropriate Reset Code */ |
1014 | DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); | 1018 | DCCP_WARN("ABORT with %u bytes unread\n", data_was_unread); |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 9648c97e541f..5ddf5cda07f4 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -533,9 +533,9 @@ EXPORT_SYMBOL(inet_dgram_connect); | |||
533 | 533 | ||
534 | static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) | 534 | static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) |
535 | { | 535 | { |
536 | DEFINE_WAIT(wait); | 536 | DEFINE_WAIT_FUNC(wait, woken_wake_function); |
537 | 537 | ||
538 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | 538 | add_wait_queue(sk_sleep(sk), &wait); |
539 | sk->sk_write_pending += writebias; | 539 | sk->sk_write_pending += writebias; |
540 | 540 | ||
541 | /* Basic assumption: if someone sets sk->sk_err, he _must_ | 541 | /* Basic assumption: if someone sets sk->sk_err, he _must_ |
@@ -545,13 +545,12 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias) | |||
545 | */ | 545 | */ |
546 | while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { | 546 | while ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { |
547 | release_sock(sk); | 547 | release_sock(sk); |
548 | timeo = schedule_timeout(timeo); | 548 | timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); |
549 | lock_sock(sk); | 549 | lock_sock(sk); |
550 | if (signal_pending(current) || !timeo) | 550 | if (signal_pending(current) || !timeo) |
551 | break; | 551 | break; |
552 | prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); | ||
553 | } | 552 | } |
554 | finish_wait(sk_sleep(sk), &wait); | 553 | remove_wait_queue(sk_sleep(sk), &wait); |
555 | sk->sk_write_pending -= writebias; | 554 | sk->sk_write_pending -= writebias; |
556 | return timeo; | 555 | return timeo; |
557 | } | 556 | } |
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index c3b80478226e..161fc0f0d752 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c | |||
@@ -151,7 +151,7 @@ static void fib_replace_table(struct net *net, struct fib_table *old, | |||
151 | 151 | ||
152 | int fib_unmerge(struct net *net) | 152 | int fib_unmerge(struct net *net) |
153 | { | 153 | { |
154 | struct fib_table *old, *new; | 154 | struct fib_table *old, *new, *main_table; |
155 | 155 | ||
156 | /* attempt to fetch local table if it has been allocated */ | 156 | /* attempt to fetch local table if it has been allocated */ |
157 | old = fib_get_table(net, RT_TABLE_LOCAL); | 157 | old = fib_get_table(net, RT_TABLE_LOCAL); |
@@ -162,11 +162,21 @@ int fib_unmerge(struct net *net) | |||
162 | if (!new) | 162 | if (!new) |
163 | return -ENOMEM; | 163 | return -ENOMEM; |
164 | 164 | ||
165 | /* table is already unmerged */ | ||
166 | if (new == old) | ||
167 | return 0; | ||
168 | |||
165 | /* replace merged table with clean table */ | 169 | /* replace merged table with clean table */ |
166 | if (new != old) { | 170 | fib_replace_table(net, old, new); |
167 | fib_replace_table(net, old, new); | 171 | fib_free_table(old); |
168 | fib_free_table(old); | 172 | |
169 | } | 173 | /* attempt to fetch main table if it has been allocated */ |
174 | main_table = fib_get_table(net, RT_TABLE_MAIN); | ||
175 | if (!main_table) | ||
176 | return 0; | ||
177 | |||
178 | /* flush local entries from main table */ | ||
179 | fib_table_flush_external(main_table); | ||
170 | 180 | ||
171 | return 0; | 181 | return 0; |
172 | } | 182 | } |
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 31cef3602585..026f309c51e9 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c | |||
@@ -1743,8 +1743,10 @@ struct fib_table *fib_trie_unmerge(struct fib_table *oldtb) | |||
1743 | local_l = fib_find_node(lt, &local_tp, l->key); | 1743 | local_l = fib_find_node(lt, &local_tp, l->key); |
1744 | 1744 | ||
1745 | if (fib_insert_alias(lt, local_tp, local_l, new_fa, | 1745 | if (fib_insert_alias(lt, local_tp, local_l, new_fa, |
1746 | NULL, l->key)) | 1746 | NULL, l->key)) { |
1747 | kmem_cache_free(fn_alias_kmem, new_fa); | ||
1747 | goto out; | 1748 | goto out; |
1749 | } | ||
1748 | } | 1750 | } |
1749 | 1751 | ||
1750 | /* stop loop if key wrapped back to 0 */ | 1752 | /* stop loop if key wrapped back to 0 */ |
@@ -1760,6 +1762,71 @@ out: | |||
1760 | return NULL; | 1762 | return NULL; |
1761 | } | 1763 | } |
1762 | 1764 | ||
1765 | /* Caller must hold RTNL */ | ||
1766 | void fib_table_flush_external(struct fib_table *tb) | ||
1767 | { | ||
1768 | struct trie *t = (struct trie *)tb->tb_data; | ||
1769 | struct key_vector *pn = t->kv; | ||
1770 | unsigned long cindex = 1; | ||
1771 | struct hlist_node *tmp; | ||
1772 | struct fib_alias *fa; | ||
1773 | |||
1774 | /* walk trie in reverse order */ | ||
1775 | for (;;) { | ||
1776 | unsigned char slen = 0; | ||
1777 | struct key_vector *n; | ||
1778 | |||
1779 | if (!(cindex--)) { | ||
1780 | t_key pkey = pn->key; | ||
1781 | |||
1782 | /* cannot resize the trie vector */ | ||
1783 | if (IS_TRIE(pn)) | ||
1784 | break; | ||
1785 | |||
1786 | /* resize completed node */ | ||
1787 | pn = resize(t, pn); | ||
1788 | cindex = get_index(pkey, pn); | ||
1789 | |||
1790 | continue; | ||
1791 | } | ||
1792 | |||
1793 | /* grab the next available node */ | ||
1794 | n = get_child(pn, cindex); | ||
1795 | if (!n) | ||
1796 | continue; | ||
1797 | |||
1798 | if (IS_TNODE(n)) { | ||
1799 | /* record pn and cindex for leaf walking */ | ||
1800 | pn = n; | ||
1801 | cindex = 1ul << n->bits; | ||
1802 | |||
1803 | continue; | ||
1804 | } | ||
1805 | |||
1806 | hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) { | ||
1807 | /* if alias was cloned to local then we just | ||
1808 | * need to remove the local copy from main | ||
1809 | */ | ||
1810 | if (tb->tb_id != fa->tb_id) { | ||
1811 | hlist_del_rcu(&fa->fa_list); | ||
1812 | alias_free_mem_rcu(fa); | ||
1813 | continue; | ||
1814 | } | ||
1815 | |||
1816 | /* record local slen */ | ||
1817 | slen = fa->fa_slen; | ||
1818 | } | ||
1819 | |||
1820 | /* update leaf slen */ | ||
1821 | n->slen = slen; | ||
1822 | |||
1823 | if (hlist_empty(&n->leaf)) { | ||
1824 | put_child_root(pn, n->key, NULL); | ||
1825 | node_free(n); | ||
1826 | } | ||
1827 | } | ||
1828 | } | ||
1829 | |||
1763 | /* Caller must hold RTNL. */ | 1830 | /* Caller must hold RTNL. */ |
1764 | int fib_table_flush(struct net *net, struct fib_table *tb) | 1831 | int fib_table_flush(struct net *net, struct fib_table *tb) |
1765 | { | 1832 | { |
@@ -2413,22 +2480,19 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, | |||
2413 | struct key_vector *l, **tp = &iter->tnode; | 2480 | struct key_vector *l, **tp = &iter->tnode; |
2414 | t_key key; | 2481 | t_key key; |
2415 | 2482 | ||
2416 | /* use cache location of next-to-find key */ | 2483 | /* use cached location of previously found key */ |
2417 | if (iter->pos > 0 && pos >= iter->pos) { | 2484 | if (iter->pos > 0 && pos >= iter->pos) { |
2418 | pos -= iter->pos; | ||
2419 | key = iter->key; | 2485 | key = iter->key; |
2420 | } else { | 2486 | } else { |
2421 | iter->pos = 0; | 2487 | iter->pos = 1; |
2422 | key = 0; | 2488 | key = 0; |
2423 | } | 2489 | } |
2424 | 2490 | ||
2425 | while ((l = leaf_walk_rcu(tp, key)) != NULL) { | 2491 | pos -= iter->pos; |
2492 | |||
2493 | while ((l = leaf_walk_rcu(tp, key)) && (pos-- > 0)) { | ||
2426 | key = l->key + 1; | 2494 | key = l->key + 1; |
2427 | iter->pos++; | 2495 | iter->pos++; |
2428 | |||
2429 | if (--pos <= 0) | ||
2430 | break; | ||
2431 | |||
2432 | l = NULL; | 2496 | l = NULL; |
2433 | 2497 | ||
2434 | /* handle unlikely case of a key wrap */ | 2498 | /* handle unlikely case of a key wrap */ |
@@ -2437,7 +2501,7 @@ static struct key_vector *fib_route_get_idx(struct fib_route_iter *iter, | |||
2437 | } | 2501 | } |
2438 | 2502 | ||
2439 | if (l) | 2503 | if (l) |
2440 | iter->key = key; /* remember it */ | 2504 | iter->key = l->key; /* remember it */ |
2441 | else | 2505 | else |
2442 | iter->pos = 0; /* forget it */ | 2506 | iter->pos = 0; /* forget it */ |
2443 | 2507 | ||
@@ -2465,7 +2529,7 @@ static void *fib_route_seq_start(struct seq_file *seq, loff_t *pos) | |||
2465 | return fib_route_get_idx(iter, *pos); | 2529 | return fib_route_get_idx(iter, *pos); |
2466 | 2530 | ||
2467 | iter->pos = 0; | 2531 | iter->pos = 0; |
2468 | iter->key = 0; | 2532 | iter->key = KEY_MAX; |
2469 | 2533 | ||
2470 | return SEQ_START_TOKEN; | 2534 | return SEQ_START_TOKEN; |
2471 | } | 2535 | } |
@@ -2474,7 +2538,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2474 | { | 2538 | { |
2475 | struct fib_route_iter *iter = seq->private; | 2539 | struct fib_route_iter *iter = seq->private; |
2476 | struct key_vector *l = NULL; | 2540 | struct key_vector *l = NULL; |
2477 | t_key key = iter->key; | 2541 | t_key key = iter->key + 1; |
2478 | 2542 | ||
2479 | ++*pos; | 2543 | ++*pos; |
2480 | 2544 | ||
@@ -2483,7 +2547,7 @@ static void *fib_route_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |||
2483 | l = leaf_walk_rcu(&iter->tnode, key); | 2547 | l = leaf_walk_rcu(&iter->tnode, key); |
2484 | 2548 | ||
2485 | if (l) { | 2549 | if (l) { |
2486 | iter->key = l->key + 1; | 2550 | iter->key = l->key; |
2487 | iter->pos++; | 2551 | iter->pos++; |
2488 | } else { | 2552 | } else { |
2489 | iter->pos = 0; | 2553 | iter->pos = 0; |
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 38abe70e595f..48734ee6293f 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c | |||
@@ -477,7 +477,7 @@ static struct rtable *icmp_route_lookup(struct net *net, | |||
477 | fl4->flowi4_proto = IPPROTO_ICMP; | 477 | fl4->flowi4_proto = IPPROTO_ICMP; |
478 | fl4->fl4_icmp_type = type; | 478 | fl4->fl4_icmp_type = type; |
479 | fl4->fl4_icmp_code = code; | 479 | fl4->fl4_icmp_code = code; |
480 | fl4->flowi4_oif = l3mdev_master_ifindex(skb_in->dev); | 480 | fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev); |
481 | 481 | ||
482 | security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); | 482 | security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4)); |
483 | rt = __ip_route_output_key_hash(net, fl4, | 483 | rt = __ip_route_output_key_hash(net, fl4, |
@@ -502,7 +502,7 @@ static struct rtable *icmp_route_lookup(struct net *net, | |||
502 | if (err) | 502 | if (err) |
503 | goto relookup_failed; | 503 | goto relookup_failed; |
504 | 504 | ||
505 | if (inet_addr_type_dev_table(net, skb_in->dev, | 505 | if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev, |
506 | fl4_dec.saddr) == RTN_LOCAL) { | 506 | fl4_dec.saddr) == RTN_LOCAL) { |
507 | rt2 = __ip_route_output_key(net, &fl4_dec); | 507 | rt2 = __ip_route_output_key(net, &fl4_dec); |
508 | if (IS_ERR(rt2)) | 508 | if (IS_ERR(rt2)) |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 606cc3e85d2b..15db786d50ed 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
@@ -162,7 +162,7 @@ static int unsolicited_report_interval(struct in_device *in_dev) | |||
162 | } | 162 | } |
163 | 163 | ||
164 | static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im); | 164 | static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im); |
165 | static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr); | 165 | static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im); |
166 | static void igmpv3_clear_delrec(struct in_device *in_dev); | 166 | static void igmpv3_clear_delrec(struct in_device *in_dev); |
167 | static int sf_setstate(struct ip_mc_list *pmc); | 167 | static int sf_setstate(struct ip_mc_list *pmc); |
168 | static void sf_markstate(struct ip_mc_list *pmc); | 168 | static void sf_markstate(struct ip_mc_list *pmc); |
@@ -1130,10 +1130,15 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im) | |||
1130 | spin_unlock_bh(&in_dev->mc_tomb_lock); | 1130 | spin_unlock_bh(&in_dev->mc_tomb_lock); |
1131 | } | 1131 | } |
1132 | 1132 | ||
1133 | static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr) | 1133 | /* |
1134 | * restore ip_mc_list deleted records | ||
1135 | */ | ||
1136 | static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) | ||
1134 | { | 1137 | { |
1135 | struct ip_mc_list *pmc, *pmc_prev; | 1138 | struct ip_mc_list *pmc, *pmc_prev; |
1136 | struct ip_sf_list *psf, *psf_next; | 1139 | struct ip_sf_list *psf; |
1140 | struct net *net = dev_net(in_dev->dev); | ||
1141 | __be32 multiaddr = im->multiaddr; | ||
1137 | 1142 | ||
1138 | spin_lock_bh(&in_dev->mc_tomb_lock); | 1143 | spin_lock_bh(&in_dev->mc_tomb_lock); |
1139 | pmc_prev = NULL; | 1144 | pmc_prev = NULL; |
@@ -1149,16 +1154,26 @@ static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr) | |||
1149 | in_dev->mc_tomb = pmc->next; | 1154 | in_dev->mc_tomb = pmc->next; |
1150 | } | 1155 | } |
1151 | spin_unlock_bh(&in_dev->mc_tomb_lock); | 1156 | spin_unlock_bh(&in_dev->mc_tomb_lock); |
1157 | |||
1158 | spin_lock_bh(&im->lock); | ||
1152 | if (pmc) { | 1159 | if (pmc) { |
1153 | for (psf = pmc->tomb; psf; psf = psf_next) { | 1160 | im->interface = pmc->interface; |
1154 | psf_next = psf->sf_next; | 1161 | im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; |
1155 | kfree(psf); | 1162 | im->sfmode = pmc->sfmode; |
1163 | if (pmc->sfmode == MCAST_INCLUDE) { | ||
1164 | im->tomb = pmc->tomb; | ||
1165 | im->sources = pmc->sources; | ||
1166 | for (psf = im->sources; psf; psf = psf->sf_next) | ||
1167 | psf->sf_crcount = im->crcount; | ||
1156 | } | 1168 | } |
1157 | in_dev_put(pmc->interface); | 1169 | in_dev_put(pmc->interface); |
1158 | kfree(pmc); | ||
1159 | } | 1170 | } |
1171 | spin_unlock_bh(&im->lock); | ||
1160 | } | 1172 | } |
1161 | 1173 | ||
1174 | /* | ||
1175 | * flush ip_mc_list deleted records | ||
1176 | */ | ||
1162 | static void igmpv3_clear_delrec(struct in_device *in_dev) | 1177 | static void igmpv3_clear_delrec(struct in_device *in_dev) |
1163 | { | 1178 | { |
1164 | struct ip_mc_list *pmc, *nextpmc; | 1179 | struct ip_mc_list *pmc, *nextpmc; |
@@ -1366,7 +1381,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr) | |||
1366 | ip_mc_hash_add(in_dev, im); | 1381 | ip_mc_hash_add(in_dev, im); |
1367 | 1382 | ||
1368 | #ifdef CONFIG_IP_MULTICAST | 1383 | #ifdef CONFIG_IP_MULTICAST |
1369 | igmpv3_del_delrec(in_dev, im->multiaddr); | 1384 | igmpv3_del_delrec(in_dev, im); |
1370 | #endif | 1385 | #endif |
1371 | igmp_group_added(im); | 1386 | igmp_group_added(im); |
1372 | if (!in_dev->dead) | 1387 | if (!in_dev->dead) |
@@ -1626,8 +1641,12 @@ void ip_mc_remap(struct in_device *in_dev) | |||
1626 | 1641 | ||
1627 | ASSERT_RTNL(); | 1642 | ASSERT_RTNL(); |
1628 | 1643 | ||
1629 | for_each_pmc_rtnl(in_dev, pmc) | 1644 | for_each_pmc_rtnl(in_dev, pmc) { |
1645 | #ifdef CONFIG_IP_MULTICAST | ||
1646 | igmpv3_del_delrec(in_dev, pmc); | ||
1647 | #endif | ||
1630 | igmp_group_added(pmc); | 1648 | igmp_group_added(pmc); |
1649 | } | ||
1631 | } | 1650 | } |
1632 | 1651 | ||
1633 | /* Device going down */ | 1652 | /* Device going down */ |
@@ -1648,7 +1667,6 @@ void ip_mc_down(struct in_device *in_dev) | |||
1648 | in_dev->mr_gq_running = 0; | 1667 | in_dev->mr_gq_running = 0; |
1649 | if (del_timer(&in_dev->mr_gq_timer)) | 1668 | if (del_timer(&in_dev->mr_gq_timer)) |
1650 | __in_dev_put(in_dev); | 1669 | __in_dev_put(in_dev); |
1651 | igmpv3_clear_delrec(in_dev); | ||
1652 | #endif | 1670 | #endif |
1653 | 1671 | ||
1654 | ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS); | 1672 | ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS); |
@@ -1688,8 +1706,12 @@ void ip_mc_up(struct in_device *in_dev) | |||
1688 | #endif | 1706 | #endif |
1689 | ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); | 1707 | ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS); |
1690 | 1708 | ||
1691 | for_each_pmc_rtnl(in_dev, pmc) | 1709 | for_each_pmc_rtnl(in_dev, pmc) { |
1710 | #ifdef CONFIG_IP_MULTICAST | ||
1711 | igmpv3_del_delrec(in_dev, pmc); | ||
1712 | #endif | ||
1692 | igmp_group_added(pmc); | 1713 | igmp_group_added(pmc); |
1714 | } | ||
1693 | } | 1715 | } |
1694 | 1716 | ||
1695 | /* | 1717 | /* |
@@ -1704,13 +1726,13 @@ void ip_mc_destroy_dev(struct in_device *in_dev) | |||
1704 | 1726 | ||
1705 | /* Deactivate timers */ | 1727 | /* Deactivate timers */ |
1706 | ip_mc_down(in_dev); | 1728 | ip_mc_down(in_dev); |
1729 | #ifdef CONFIG_IP_MULTICAST | ||
1730 | igmpv3_clear_delrec(in_dev); | ||
1731 | #endif | ||
1707 | 1732 | ||
1708 | while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { | 1733 | while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) { |
1709 | in_dev->mc_list = i->next_rcu; | 1734 | in_dev->mc_list = i->next_rcu; |
1710 | in_dev->mc_count--; | 1735 | in_dev->mc_count--; |
1711 | |||
1712 | /* We've dropped the groups in ip_mc_down already */ | ||
1713 | ip_mc_clear_src(i); | ||
1714 | ip_ma_put(i); | 1736 | ip_ma_put(i); |
1715 | } | 1737 | } |
1716 | } | 1738 | } |
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 8b4ffd216839..9f0a7b96646f 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c | |||
@@ -117,7 +117,7 @@ int ip_forward(struct sk_buff *skb) | |||
117 | if (opt->is_strictroute && rt->rt_uses_gateway) | 117 | if (opt->is_strictroute && rt->rt_uses_gateway) |
118 | goto sr_failed; | 118 | goto sr_failed; |
119 | 119 | ||
120 | IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; | 120 | IPCB(skb)->flags |= IPSKB_FORWARDED; |
121 | mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); | 121 | mtu = ip_dst_mtu_maybe_forward(&rt->dst, true); |
122 | if (ip_exceeds_mtu(skb, mtu)) { | 122 | if (ip_exceeds_mtu(skb, mtu)) { |
123 | IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); | 123 | IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); |
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 03e7f7310423..105908d841a3 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c | |||
@@ -239,19 +239,23 @@ static int ip_finish_output_gso(struct net *net, struct sock *sk, | |||
239 | struct sk_buff *segs; | 239 | struct sk_buff *segs; |
240 | int ret = 0; | 240 | int ret = 0; |
241 | 241 | ||
242 | /* common case: fragmentation of segments is not allowed, | 242 | /* common case: seglen is <= mtu |
243 | * or seglen is <= mtu | ||
244 | */ | 243 | */ |
245 | if (((IPCB(skb)->flags & IPSKB_FRAG_SEGS) == 0) || | 244 | if (skb_gso_validate_mtu(skb, mtu)) |
246 | skb_gso_validate_mtu(skb, mtu)) | ||
247 | return ip_finish_output2(net, sk, skb); | 245 | return ip_finish_output2(net, sk, skb); |
248 | 246 | ||
249 | /* Slowpath - GSO segment length is exceeding the dst MTU. | 247 | /* Slowpath - GSO segment length exceeds the egress MTU. |
250 | * | 248 | * |
251 | * This can happen in two cases: | 249 | * This can happen in several cases: |
252 | * 1) TCP GRO packet, DF bit not set | 250 | * - Forwarding of a TCP GRO skb, when DF flag is not set. |
253 | * 2) skb arrived via virtio-net, we thus get TSO/GSO skbs directly | 251 | * - Forwarding of an skb that arrived on a virtualization interface |
254 | * from host network stack. | 252 | * (virtio-net/vhost/tap) with TSO/GSO size set by other network |
253 | * stack. | ||
254 | * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an | ||
255 | * interface with a smaller MTU. | ||
256 | * - Arriving GRO skb (or GSO skb in a virtualized environment) that is | ||
257 | * bridged to a NETIF_F_TSO tunnel stacked over an interface with an | ||
258 | * insufficent MTU. | ||
255 | */ | 259 | */ |
256 | features = netif_skb_features(skb); | 260 | features = netif_skb_features(skb); |
257 | BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); | 261 | BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); |
@@ -1579,7 +1583,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, | |||
1579 | } | 1583 | } |
1580 | 1584 | ||
1581 | oif = arg->bound_dev_if; | 1585 | oif = arg->bound_dev_if; |
1582 | oif = oif ? : skb->skb_iif; | 1586 | if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) |
1587 | oif = skb->skb_iif; | ||
1583 | 1588 | ||
1584 | flowi4_init_output(&fl4, oif, | 1589 | flowi4_init_output(&fl4, oif, |
1585 | IP4_REPLY_MARK(net, skb->mark), | 1590 | IP4_REPLY_MARK(net, skb->mark), |
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 777bc1883870..fed3d29f9eb3 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c | |||
@@ -63,7 +63,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, | |||
63 | int pkt_len = skb->len - skb_inner_network_offset(skb); | 63 | int pkt_len = skb->len - skb_inner_network_offset(skb); |
64 | struct net *net = dev_net(rt->dst.dev); | 64 | struct net *net = dev_net(rt->dst.dev); |
65 | struct net_device *dev = skb->dev; | 65 | struct net_device *dev = skb->dev; |
66 | int skb_iif = skb->skb_iif; | ||
67 | struct iphdr *iph; | 66 | struct iphdr *iph; |
68 | int err; | 67 | int err; |
69 | 68 | ||
@@ -73,16 +72,6 @@ void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, | |||
73 | skb_dst_set(skb, &rt->dst); | 72 | skb_dst_set(skb, &rt->dst); |
74 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); | 73 | memset(IPCB(skb), 0, sizeof(*IPCB(skb))); |
75 | 74 | ||
76 | if (skb_iif && !(df & htons(IP_DF))) { | ||
77 | /* Arrived from an ingress interface, got encapsulated, with | ||
78 | * fragmentation of encapulating frames allowed. | ||
79 | * If skb is gso, the resulting encapsulated network segments | ||
80 | * may exceed dst mtu. | ||
81 | * Allow IP Fragmentation of segments. | ||
82 | */ | ||
83 | IPCB(skb)->flags |= IPSKB_FRAG_SEGS; | ||
84 | } | ||
85 | |||
86 | /* Push down and install the IP header. */ | 75 | /* Push down and install the IP header. */ |
87 | skb_push(skb, sizeof(struct iphdr)); | 76 | skb_push(skb, sizeof(struct iphdr)); |
88 | skb_reset_network_header(skb); | 77 | skb_reset_network_header(skb); |
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 5f006e13de56..27089f5ebbb1 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c | |||
@@ -1749,7 +1749,7 @@ static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt, | |||
1749 | vif->dev->stats.tx_bytes += skb->len; | 1749 | vif->dev->stats.tx_bytes += skb->len; |
1750 | } | 1750 | } |
1751 | 1751 | ||
1752 | IPCB(skb)->flags |= IPSKB_FORWARDED | IPSKB_FRAG_SEGS; | 1752 | IPCB(skb)->flags |= IPSKB_FORWARDED; |
1753 | 1753 | ||
1754 | /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally | 1754 | /* RFC1584 teaches, that DVMRP/PIM router must deliver packets locally |
1755 | * not only before forwarding, but after forwarding on all output | 1755 | * not only before forwarding, but after forwarding on all output |
diff --git a/net/ipv4/netfilter/nft_dup_ipv4.c b/net/ipv4/netfilter/nft_dup_ipv4.c index bf855e64fc45..0c01a270bf9f 100644 --- a/net/ipv4/netfilter/nft_dup_ipv4.c +++ b/net/ipv4/netfilter/nft_dup_ipv4.c | |||
@@ -28,7 +28,7 @@ static void nft_dup_ipv4_eval(const struct nft_expr *expr, | |||
28 | struct in_addr gw = { | 28 | struct in_addr gw = { |
29 | .s_addr = (__force __be32)regs->data[priv->sreg_addr], | 29 | .s_addr = (__force __be32)regs->data[priv->sreg_addr], |
30 | }; | 30 | }; |
31 | int oif = regs->data[priv->sreg_dev]; | 31 | int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1; |
32 | 32 | ||
33 | nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif); | 33 | nf_dup_ipv4(pkt->net, pkt->skb, pkt->hook, &gw, oif); |
34 | } | 34 | } |
@@ -59,7 +59,9 @@ static int nft_dup_ipv4_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
59 | { | 59 | { |
60 | struct nft_dup_ipv4 *priv = nft_expr_priv(expr); | 60 | struct nft_dup_ipv4 *priv = nft_expr_priv(expr); |
61 | 61 | ||
62 | if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || | 62 | if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr)) |
63 | goto nla_put_failure; | ||
64 | if (priv->sreg_dev && | ||
63 | nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) | 65 | nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) |
64 | goto nla_put_failure; | 66 | goto nla_put_failure; |
65 | 67 | ||
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 62d4d90c1389..2a57566e6e91 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -753,7 +753,9 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow | |||
753 | goto reject_redirect; | 753 | goto reject_redirect; |
754 | } | 754 | } |
755 | 755 | ||
756 | n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw); | 756 | n = __ipv4_neigh_lookup(rt->dst.dev, new_gw); |
757 | if (!n) | ||
758 | n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev); | ||
757 | if (!IS_ERR(n)) { | 759 | if (!IS_ERR(n)) { |
758 | if (!(n->nud_state & NUD_VALID)) { | 760 | if (!(n->nud_state & NUD_VALID)) { |
759 | neigh_event_send(n, NULL); | 761 | neigh_event_send(n, NULL); |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 3251fe71f39f..814af89c1bd3 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -1164,7 +1164,7 @@ restart: | |||
1164 | 1164 | ||
1165 | err = -EPIPE; | 1165 | err = -EPIPE; |
1166 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | 1166 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) |
1167 | goto out_err; | 1167 | goto do_error; |
1168 | 1168 | ||
1169 | sg = !!(sk->sk_route_caps & NETIF_F_SG); | 1169 | sg = !!(sk->sk_route_caps & NETIF_F_SG); |
1170 | 1170 | ||
@@ -1241,7 +1241,7 @@ new_segment: | |||
1241 | 1241 | ||
1242 | if (!skb_can_coalesce(skb, i, pfrag->page, | 1242 | if (!skb_can_coalesce(skb, i, pfrag->page, |
1243 | pfrag->offset)) { | 1243 | pfrag->offset)) { |
1244 | if (i == sysctl_max_skb_frags || !sg) { | 1244 | if (i >= sysctl_max_skb_frags || !sg) { |
1245 | tcp_mark_push(tp, skb); | 1245 | tcp_mark_push(tp, skb); |
1246 | goto new_segment; | 1246 | goto new_segment; |
1247 | } | 1247 | } |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 1294af4e0127..f9038d6b109e 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -200,8 +200,10 @@ static void tcp_reinit_congestion_control(struct sock *sk, | |||
200 | icsk->icsk_ca_ops = ca; | 200 | icsk->icsk_ca_ops = ca; |
201 | icsk->icsk_ca_setsockopt = 1; | 201 | icsk->icsk_ca_setsockopt = 1; |
202 | 202 | ||
203 | if (sk->sk_state != TCP_CLOSE) | 203 | if (sk->sk_state != TCP_CLOSE) { |
204 | memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv)); | ||
204 | tcp_init_congestion_control(sk); | 205 | tcp_init_congestion_control(sk); |
206 | } | ||
205 | } | 207 | } |
206 | 208 | ||
207 | /* Manage refcounts on socket close. */ | 209 | /* Manage refcounts on socket close. */ |
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 10d728b6804c..ab37c6775630 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c | |||
@@ -56,6 +56,7 @@ struct dctcp { | |||
56 | u32 next_seq; | 56 | u32 next_seq; |
57 | u32 ce_state; | 57 | u32 ce_state; |
58 | u32 delayed_ack_reserved; | 58 | u32 delayed_ack_reserved; |
59 | u32 loss_cwnd; | ||
59 | }; | 60 | }; |
60 | 61 | ||
61 | static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */ | 62 | static unsigned int dctcp_shift_g __read_mostly = 4; /* g = 1/2^4 */ |
@@ -96,6 +97,7 @@ static void dctcp_init(struct sock *sk) | |||
96 | ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); | 97 | ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA); |
97 | 98 | ||
98 | ca->delayed_ack_reserved = 0; | 99 | ca->delayed_ack_reserved = 0; |
100 | ca->loss_cwnd = 0; | ||
99 | ca->ce_state = 0; | 101 | ca->ce_state = 0; |
100 | 102 | ||
101 | dctcp_reset(tp, ca); | 103 | dctcp_reset(tp, ca); |
@@ -111,9 +113,10 @@ static void dctcp_init(struct sock *sk) | |||
111 | 113 | ||
112 | static u32 dctcp_ssthresh(struct sock *sk) | 114 | static u32 dctcp_ssthresh(struct sock *sk) |
113 | { | 115 | { |
114 | const struct dctcp *ca = inet_csk_ca(sk); | 116 | struct dctcp *ca = inet_csk_ca(sk); |
115 | struct tcp_sock *tp = tcp_sk(sk); | 117 | struct tcp_sock *tp = tcp_sk(sk); |
116 | 118 | ||
119 | ca->loss_cwnd = tp->snd_cwnd; | ||
117 | return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); | 120 | return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); |
118 | } | 121 | } |
119 | 122 | ||
@@ -308,12 +311,20 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr, | |||
308 | return 0; | 311 | return 0; |
309 | } | 312 | } |
310 | 313 | ||
314 | static u32 dctcp_cwnd_undo(struct sock *sk) | ||
315 | { | ||
316 | const struct dctcp *ca = inet_csk_ca(sk); | ||
317 | |||
318 | return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); | ||
319 | } | ||
320 | |||
311 | static struct tcp_congestion_ops dctcp __read_mostly = { | 321 | static struct tcp_congestion_ops dctcp __read_mostly = { |
312 | .init = dctcp_init, | 322 | .init = dctcp_init, |
313 | .in_ack_event = dctcp_update_alpha, | 323 | .in_ack_event = dctcp_update_alpha, |
314 | .cwnd_event = dctcp_cwnd_event, | 324 | .cwnd_event = dctcp_cwnd_event, |
315 | .ssthresh = dctcp_ssthresh, | 325 | .ssthresh = dctcp_ssthresh, |
316 | .cong_avoid = tcp_reno_cong_avoid, | 326 | .cong_avoid = tcp_reno_cong_avoid, |
327 | .undo_cwnd = dctcp_cwnd_undo, | ||
317 | .set_state = dctcp_state, | 328 | .set_state = dctcp_state, |
318 | .get_info = dctcp_get_info, | 329 | .get_info = dctcp_get_info, |
319 | .flags = TCP_CONG_NEEDS_ECN, | 330 | .flags = TCP_CONG_NEEDS_ECN, |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 61b7be303eec..2259114c7242 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1564,6 +1564,21 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) | |||
1564 | } | 1564 | } |
1565 | EXPORT_SYMBOL(tcp_add_backlog); | 1565 | EXPORT_SYMBOL(tcp_add_backlog); |
1566 | 1566 | ||
1567 | int tcp_filter(struct sock *sk, struct sk_buff *skb) | ||
1568 | { | ||
1569 | struct tcphdr *th = (struct tcphdr *)skb->data; | ||
1570 | unsigned int eaten = skb->len; | ||
1571 | int err; | ||
1572 | |||
1573 | err = sk_filter_trim_cap(sk, skb, th->doff * 4); | ||
1574 | if (!err) { | ||
1575 | eaten -= skb->len; | ||
1576 | TCP_SKB_CB(skb)->end_seq -= eaten; | ||
1577 | } | ||
1578 | return err; | ||
1579 | } | ||
1580 | EXPORT_SYMBOL(tcp_filter); | ||
1581 | |||
1567 | /* | 1582 | /* |
1568 | * From tcp_input.c | 1583 | * From tcp_input.c |
1569 | */ | 1584 | */ |
@@ -1676,8 +1691,10 @@ process: | |||
1676 | 1691 | ||
1677 | nf_reset(skb); | 1692 | nf_reset(skb); |
1678 | 1693 | ||
1679 | if (sk_filter(sk, skb)) | 1694 | if (tcp_filter(sk, skb)) |
1680 | goto discard_and_relse; | 1695 | goto discard_and_relse; |
1696 | th = (const struct tcphdr *)skb->data; | ||
1697 | iph = ip_hdr(skb); | ||
1681 | 1698 | ||
1682 | skb->dev = NULL; | 1699 | skb->dev = NULL; |
1683 | 1700 | ||
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index d123d68f4d1d..5bab6c3f7a2f 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -1455,7 +1455,7 @@ static void udp_v4_rehash(struct sock *sk) | |||
1455 | udp_lib_rehash(sk, new_hash); | 1455 | udp_lib_rehash(sk, new_hash); |
1456 | } | 1456 | } |
1457 | 1457 | ||
1458 | static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 1458 | int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
1459 | { | 1459 | { |
1460 | int rc; | 1460 | int rc; |
1461 | 1461 | ||
@@ -1652,10 +1652,10 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
1652 | 1652 | ||
1653 | if (use_hash2) { | 1653 | if (use_hash2) { |
1654 | hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & | 1654 | hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) & |
1655 | udp_table.mask; | 1655 | udptable->mask; |
1656 | hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask; | 1656 | hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask; |
1657 | start_lookup: | 1657 | start_lookup: |
1658 | hslot = &udp_table.hash2[hash2]; | 1658 | hslot = &udptable->hash2[hash2]; |
1659 | offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); | 1659 | offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); |
1660 | } | 1660 | } |
1661 | 1661 | ||
diff --git a/net/ipv4/udp_impl.h b/net/ipv4/udp_impl.h index 7e0fe4bdd967..feb50a16398d 100644 --- a/net/ipv4/udp_impl.h +++ b/net/ipv4/udp_impl.h | |||
@@ -25,7 +25,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, | |||
25 | int flags, int *addr_len); | 25 | int flags, int *addr_len); |
26 | int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, | 26 | int udp_sendpage(struct sock *sk, struct page *page, int offset, size_t size, |
27 | int flags); | 27 | int flags); |
28 | int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); | 28 | int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
29 | void udp_destroy_sock(struct sock *sk); | 29 | void udp_destroy_sock(struct sock *sk); |
30 | 30 | ||
31 | #ifdef CONFIG_PROC_FS | 31 | #ifdef CONFIG_PROC_FS |
diff --git a/net/ipv4/udplite.c b/net/ipv4/udplite.c index af817158d830..ff450c2aad9b 100644 --- a/net/ipv4/udplite.c +++ b/net/ipv4/udplite.c | |||
@@ -50,7 +50,7 @@ struct proto udplite_prot = { | |||
50 | .sendmsg = udp_sendmsg, | 50 | .sendmsg = udp_sendmsg, |
51 | .recvmsg = udp_recvmsg, | 51 | .recvmsg = udp_recvmsg, |
52 | .sendpage = udp_sendpage, | 52 | .sendpage = udp_sendpage, |
53 | .backlog_rcv = udp_queue_rcv_skb, | 53 | .backlog_rcv = __udp_queue_rcv_skb, |
54 | .hash = udp_lib_hash, | 54 | .hash = udp_lib_hash, |
55 | .unhash = udp_lib_unhash, | 55 | .unhash = udp_lib_unhash, |
56 | .get_port = udp_v4_get_port, | 56 | .get_port = udp_v4_get_port, |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 060dd9922018..4bc5ba3ae452 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -183,7 +183,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx, | |||
183 | 183 | ||
184 | static void addrconf_dad_start(struct inet6_ifaddr *ifp); | 184 | static void addrconf_dad_start(struct inet6_ifaddr *ifp); |
185 | static void addrconf_dad_work(struct work_struct *w); | 185 | static void addrconf_dad_work(struct work_struct *w); |
186 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp); | 186 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id); |
187 | static void addrconf_dad_run(struct inet6_dev *idev); | 187 | static void addrconf_dad_run(struct inet6_dev *idev); |
188 | static void addrconf_rs_timer(unsigned long data); | 188 | static void addrconf_rs_timer(unsigned long data); |
189 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); | 189 | static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa); |
@@ -2898,6 +2898,7 @@ static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr, | |||
2898 | spin_lock_bh(&ifp->lock); | 2898 | spin_lock_bh(&ifp->lock); |
2899 | ifp->flags &= ~IFA_F_TENTATIVE; | 2899 | ifp->flags &= ~IFA_F_TENTATIVE; |
2900 | spin_unlock_bh(&ifp->lock); | 2900 | spin_unlock_bh(&ifp->lock); |
2901 | rt_genid_bump_ipv6(dev_net(idev->dev)); | ||
2901 | ipv6_ifa_notify(RTM_NEWADDR, ifp); | 2902 | ipv6_ifa_notify(RTM_NEWADDR, ifp); |
2902 | in6_ifa_put(ifp); | 2903 | in6_ifa_put(ifp); |
2903 | } | 2904 | } |
@@ -3740,7 +3741,7 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) | |||
3740 | { | 3741 | { |
3741 | struct inet6_dev *idev = ifp->idev; | 3742 | struct inet6_dev *idev = ifp->idev; |
3742 | struct net_device *dev = idev->dev; | 3743 | struct net_device *dev = idev->dev; |
3743 | bool notify = false; | 3744 | bool bump_id, notify = false; |
3744 | 3745 | ||
3745 | addrconf_join_solict(dev, &ifp->addr); | 3746 | addrconf_join_solict(dev, &ifp->addr); |
3746 | 3747 | ||
@@ -3755,11 +3756,12 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) | |||
3755 | idev->cnf.accept_dad < 1 || | 3756 | idev->cnf.accept_dad < 1 || |
3756 | !(ifp->flags&IFA_F_TENTATIVE) || | 3757 | !(ifp->flags&IFA_F_TENTATIVE) || |
3757 | ifp->flags & IFA_F_NODAD) { | 3758 | ifp->flags & IFA_F_NODAD) { |
3759 | bump_id = ifp->flags & IFA_F_TENTATIVE; | ||
3758 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); | 3760 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); |
3759 | spin_unlock(&ifp->lock); | 3761 | spin_unlock(&ifp->lock); |
3760 | read_unlock_bh(&idev->lock); | 3762 | read_unlock_bh(&idev->lock); |
3761 | 3763 | ||
3762 | addrconf_dad_completed(ifp); | 3764 | addrconf_dad_completed(ifp, bump_id); |
3763 | return; | 3765 | return; |
3764 | } | 3766 | } |
3765 | 3767 | ||
@@ -3819,8 +3821,8 @@ static void addrconf_dad_work(struct work_struct *w) | |||
3819 | struct inet6_ifaddr, | 3821 | struct inet6_ifaddr, |
3820 | dad_work); | 3822 | dad_work); |
3821 | struct inet6_dev *idev = ifp->idev; | 3823 | struct inet6_dev *idev = ifp->idev; |
3824 | bool bump_id, disable_ipv6 = false; | ||
3822 | struct in6_addr mcaddr; | 3825 | struct in6_addr mcaddr; |
3823 | bool disable_ipv6 = false; | ||
3824 | 3826 | ||
3825 | enum { | 3827 | enum { |
3826 | DAD_PROCESS, | 3828 | DAD_PROCESS, |
@@ -3890,11 +3892,12 @@ static void addrconf_dad_work(struct work_struct *w) | |||
3890 | * DAD was successful | 3892 | * DAD was successful |
3891 | */ | 3893 | */ |
3892 | 3894 | ||
3895 | bump_id = ifp->flags & IFA_F_TENTATIVE; | ||
3893 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); | 3896 | ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED); |
3894 | spin_unlock(&ifp->lock); | 3897 | spin_unlock(&ifp->lock); |
3895 | write_unlock_bh(&idev->lock); | 3898 | write_unlock_bh(&idev->lock); |
3896 | 3899 | ||
3897 | addrconf_dad_completed(ifp); | 3900 | addrconf_dad_completed(ifp, bump_id); |
3898 | 3901 | ||
3899 | goto out; | 3902 | goto out; |
3900 | } | 3903 | } |
@@ -3931,7 +3934,7 @@ static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp) | |||
3931 | return true; | 3934 | return true; |
3932 | } | 3935 | } |
3933 | 3936 | ||
3934 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp) | 3937 | static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id) |
3935 | { | 3938 | { |
3936 | struct net_device *dev = ifp->idev->dev; | 3939 | struct net_device *dev = ifp->idev->dev; |
3937 | struct in6_addr lladdr; | 3940 | struct in6_addr lladdr; |
@@ -3983,6 +3986,9 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp) | |||
3983 | spin_unlock(&ifp->lock); | 3986 | spin_unlock(&ifp->lock); |
3984 | write_unlock_bh(&ifp->idev->lock); | 3987 | write_unlock_bh(&ifp->idev->lock); |
3985 | } | 3988 | } |
3989 | |||
3990 | if (bump_id) | ||
3991 | rt_genid_bump_ipv6(dev_net(dev)); | ||
3986 | } | 3992 | } |
3987 | 3993 | ||
3988 | static void addrconf_dad_run(struct inet6_dev *idev) | 3994 | static void addrconf_dad_run(struct inet6_dev *idev) |
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index bd59c343d35f..7370ad2e693a 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c | |||
@@ -448,7 +448,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info, | |||
448 | if (__ipv6_addr_needs_scope_id(addr_type)) | 448 | if (__ipv6_addr_needs_scope_id(addr_type)) |
449 | iif = skb->dev->ifindex; | 449 | iif = skb->dev->ifindex; |
450 | else | 450 | else |
451 | iif = l3mdev_master_ifindex(skb->dev); | 451 | iif = l3mdev_master_ifindex(skb_dst(skb)->dev); |
452 | 452 | ||
453 | /* | 453 | /* |
454 | * Must not send error if the source does not uniquely | 454 | * Must not send error if the source does not uniquely |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 6001e781164e..59eb4ed99ce8 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -1366,7 +1366,7 @@ emsgsize: | |||
1366 | if (((length > mtu) || | 1366 | if (((length > mtu) || |
1367 | (skb && skb_is_gso(skb))) && | 1367 | (skb && skb_is_gso(skb))) && |
1368 | (sk->sk_protocol == IPPROTO_UDP) && | 1368 | (sk->sk_protocol == IPPROTO_UDP) && |
1369 | (rt->dst.dev->features & NETIF_F_UFO) && | 1369 | (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && |
1370 | (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { | 1370 | (sk->sk_type == SOCK_DGRAM) && !udp_get_no_check6_tx(sk)) { |
1371 | err = ip6_ufo_append_data(sk, queue, getfrag, from, length, | 1371 | err = ip6_ufo_append_data(sk, queue, getfrag, from, length, |
1372 | hh_len, fragheaderlen, exthdrlen, | 1372 | hh_len, fragheaderlen, exthdrlen, |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 87784560dc46..0a4759b89da2 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1034,6 +1034,7 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, | |||
1034 | int mtu; | 1034 | int mtu; |
1035 | unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; | 1035 | unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen; |
1036 | unsigned int max_headroom = psh_hlen; | 1036 | unsigned int max_headroom = psh_hlen; |
1037 | bool use_cache = false; | ||
1037 | u8 hop_limit; | 1038 | u8 hop_limit; |
1038 | int err = -1; | 1039 | int err = -1; |
1039 | 1040 | ||
@@ -1066,7 +1067,15 @@ int ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, | |||
1066 | 1067 | ||
1067 | memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); | 1068 | memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr)); |
1068 | neigh_release(neigh); | 1069 | neigh_release(neigh); |
1069 | } else if (!fl6->flowi6_mark) | 1070 | } else if (!(t->parms.flags & |
1071 | (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) { | ||
1072 | /* enable the cache only only if the routing decision does | ||
1073 | * not depend on the current inner header value | ||
1074 | */ | ||
1075 | use_cache = true; | ||
1076 | } | ||
1077 | |||
1078 | if (use_cache) | ||
1070 | dst = dst_cache_get(&t->dst_cache); | 1079 | dst = dst_cache_get(&t->dst_cache); |
1071 | 1080 | ||
1072 | if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) | 1081 | if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr)) |
@@ -1150,7 +1159,7 @@ route_lookup: | |||
1150 | if (t->encap.type != TUNNEL_ENCAP_NONE) | 1159 | if (t->encap.type != TUNNEL_ENCAP_NONE) |
1151 | goto tx_err_dst_release; | 1160 | goto tx_err_dst_release; |
1152 | } else { | 1161 | } else { |
1153 | if (!fl6->flowi6_mark && ndst) | 1162 | if (use_cache && ndst) |
1154 | dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); | 1163 | dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr); |
1155 | } | 1164 | } |
1156 | skb_dst_set(skb, dst); | 1165 | skb_dst_set(skb, dst); |
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c index a7520528ecd2..b283f293ee4a 100644 --- a/net/ipv6/ip6_udp_tunnel.c +++ b/net/ipv6/ip6_udp_tunnel.c | |||
@@ -88,9 +88,6 @@ int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, | |||
88 | 88 | ||
89 | uh->len = htons(skb->len); | 89 | uh->len = htons(skb->len); |
90 | 90 | ||
91 | memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); | ||
92 | IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | ||
93 | | IPSKB_REROUTED); | ||
94 | skb_dst_set(skb, dst); | 91 | skb_dst_set(skb, dst); |
95 | 92 | ||
96 | udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); | 93 | udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); |
diff --git a/net/ipv6/netfilter/nft_dup_ipv6.c b/net/ipv6/netfilter/nft_dup_ipv6.c index 8bfd470cbe72..831f86e1ec08 100644 --- a/net/ipv6/netfilter/nft_dup_ipv6.c +++ b/net/ipv6/netfilter/nft_dup_ipv6.c | |||
@@ -26,7 +26,7 @@ static void nft_dup_ipv6_eval(const struct nft_expr *expr, | |||
26 | { | 26 | { |
27 | struct nft_dup_ipv6 *priv = nft_expr_priv(expr); | 27 | struct nft_dup_ipv6 *priv = nft_expr_priv(expr); |
28 | struct in6_addr *gw = (struct in6_addr *)®s->data[priv->sreg_addr]; | 28 | struct in6_addr *gw = (struct in6_addr *)®s->data[priv->sreg_addr]; |
29 | int oif = regs->data[priv->sreg_dev]; | 29 | int oif = priv->sreg_dev ? regs->data[priv->sreg_dev] : -1; |
30 | 30 | ||
31 | nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif); | 31 | nf_dup_ipv6(pkt->net, pkt->skb, pkt->hook, gw, oif); |
32 | } | 32 | } |
@@ -57,7 +57,9 @@ static int nft_dup_ipv6_dump(struct sk_buff *skb, const struct nft_expr *expr) | |||
57 | { | 57 | { |
58 | struct nft_dup_ipv6 *priv = nft_expr_priv(expr); | 58 | struct nft_dup_ipv6 *priv = nft_expr_priv(expr); |
59 | 59 | ||
60 | if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr) || | 60 | if (nft_dump_register(skb, NFTA_DUP_SREG_ADDR, priv->sreg_addr)) |
61 | goto nla_put_failure; | ||
62 | if (priv->sreg_dev && | ||
61 | nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) | 63 | nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) |
62 | goto nla_put_failure; | 64 | goto nla_put_failure; |
63 | 65 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 947ed1ded026..1b57e11e6e0d 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1364,6 +1364,9 @@ static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, | |||
1364 | if (rt6->rt6i_flags & RTF_LOCAL) | 1364 | if (rt6->rt6i_flags & RTF_LOCAL) |
1365 | return; | 1365 | return; |
1366 | 1366 | ||
1367 | if (dst_metric_locked(dst, RTAX_MTU)) | ||
1368 | return; | ||
1369 | |||
1367 | dst_confirm(dst); | 1370 | dst_confirm(dst); |
1368 | mtu = max_t(u32, mtu, IPV6_MIN_MTU); | 1371 | mtu = max_t(u32, mtu, IPV6_MIN_MTU); |
1369 | if (mtu >= dst_mtu(dst)) | 1372 | if (mtu >= dst_mtu(dst)) |
@@ -2758,6 +2761,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) | |||
2758 | PMTU discouvery. | 2761 | PMTU discouvery. |
2759 | */ | 2762 | */ |
2760 | if (rt->dst.dev == arg->dev && | 2763 | if (rt->dst.dev == arg->dev && |
2764 | dst_metric_raw(&rt->dst, RTAX_MTU) && | ||
2761 | !dst_metric_locked(&rt->dst, RTAX_MTU)) { | 2765 | !dst_metric_locked(&rt->dst, RTAX_MTU)) { |
2762 | if (rt->rt6i_flags & RTF_CACHE) { | 2766 | if (rt->rt6i_flags & RTF_CACHE) { |
2763 | /* For RTF_CACHE with rt6i_pmtu == 0 | 2767 | /* For RTF_CACHE with rt6i_pmtu == 0 |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5a27ab4eab39..b9f1fee9a886 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -818,8 +818,12 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 | |||
818 | fl6.flowi6_proto = IPPROTO_TCP; | 818 | fl6.flowi6_proto = IPPROTO_TCP; |
819 | if (rt6_need_strict(&fl6.daddr) && !oif) | 819 | if (rt6_need_strict(&fl6.daddr) && !oif) |
820 | fl6.flowi6_oif = tcp_v6_iif(skb); | 820 | fl6.flowi6_oif = tcp_v6_iif(skb); |
821 | else | 821 | else { |
822 | fl6.flowi6_oif = oif ? : skb->skb_iif; | 822 | if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) |
823 | oif = skb->skb_iif; | ||
824 | |||
825 | fl6.flowi6_oif = oif; | ||
826 | } | ||
823 | 827 | ||
824 | fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); | 828 | fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark); |
825 | fl6.fl6_dport = t1->dest; | 829 | fl6.fl6_dport = t1->dest; |
@@ -1225,7 +1229,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb) | |||
1225 | if (skb->protocol == htons(ETH_P_IP)) | 1229 | if (skb->protocol == htons(ETH_P_IP)) |
1226 | return tcp_v4_do_rcv(sk, skb); | 1230 | return tcp_v4_do_rcv(sk, skb); |
1227 | 1231 | ||
1228 | if (sk_filter(sk, skb)) | 1232 | if (tcp_filter(sk, skb)) |
1229 | goto discard; | 1233 | goto discard; |
1230 | 1234 | ||
1231 | /* | 1235 | /* |
@@ -1453,8 +1457,10 @@ process: | |||
1453 | if (tcp_v6_inbound_md5_hash(sk, skb)) | 1457 | if (tcp_v6_inbound_md5_hash(sk, skb)) |
1454 | goto discard_and_relse; | 1458 | goto discard_and_relse; |
1455 | 1459 | ||
1456 | if (sk_filter(sk, skb)) | 1460 | if (tcp_filter(sk, skb)) |
1457 | goto discard_and_relse; | 1461 | goto discard_and_relse; |
1462 | th = (const struct tcphdr *)skb->data; | ||
1463 | hdr = ipv6_hdr(skb); | ||
1458 | 1464 | ||
1459 | skb->dev = NULL; | 1465 | skb->dev = NULL; |
1460 | 1466 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index b2ef061e6836..e4a8000d59ad 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -514,7 +514,7 @@ out: | |||
514 | return; | 514 | return; |
515 | } | 515 | } |
516 | 516 | ||
517 | static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 517 | int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
518 | { | 518 | { |
519 | int rc; | 519 | int rc; |
520 | 520 | ||
@@ -706,10 +706,10 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb, | |||
706 | 706 | ||
707 | if (use_hash2) { | 707 | if (use_hash2) { |
708 | hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & | 708 | hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) & |
709 | udp_table.mask; | 709 | udptable->mask; |
710 | hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask; | 710 | hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask; |
711 | start_lookup: | 711 | start_lookup: |
712 | hslot = &udp_table.hash2[hash2]; | 712 | hslot = &udptable->hash2[hash2]; |
713 | offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); | 713 | offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node); |
714 | } | 714 | } |
715 | 715 | ||
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index f6eb1ab34f4b..e78bdc76dcc3 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h | |||
@@ -26,7 +26,7 @@ int compat_udpv6_getsockopt(struct sock *sk, int level, int optname, | |||
26 | int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); | 26 | int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len); |
27 | int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, | 27 | int udpv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int noblock, |
28 | int flags, int *addr_len); | 28 | int flags, int *addr_len); |
29 | int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); | 29 | int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb); |
30 | void udpv6_destroy_sock(struct sock *sk); | 30 | void udpv6_destroy_sock(struct sock *sk); |
31 | 31 | ||
32 | #ifdef CONFIG_PROC_FS | 32 | #ifdef CONFIG_PROC_FS |
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index 47d0d2b87106..2f5101a12283 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c | |||
@@ -45,7 +45,7 @@ struct proto udplitev6_prot = { | |||
45 | .getsockopt = udpv6_getsockopt, | 45 | .getsockopt = udpv6_getsockopt, |
46 | .sendmsg = udpv6_sendmsg, | 46 | .sendmsg = udpv6_sendmsg, |
47 | .recvmsg = udpv6_recvmsg, | 47 | .recvmsg = udpv6_recvmsg, |
48 | .backlog_rcv = udpv6_queue_rcv_skb, | 48 | .backlog_rcv = __udpv6_queue_rcv_skb, |
49 | .hash = udp_lib_hash, | 49 | .hash = udp_lib_hash, |
50 | .unhash = udp_lib_unhash, | 50 | .unhash = udp_lib_unhash, |
51 | .get_port = udp_v6_get_port, | 51 | .get_port = udp_v6_get_port, |
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index fce25afb652a..982f6c44ea01 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c | |||
@@ -251,8 +251,6 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
251 | int ret; | 251 | int ret; |
252 | int chk_addr_ret; | 252 | int chk_addr_ret; |
253 | 253 | ||
254 | if (!sock_flag(sk, SOCK_ZAPPED)) | ||
255 | return -EINVAL; | ||
256 | if (addr_len < sizeof(struct sockaddr_l2tpip)) | 254 | if (addr_len < sizeof(struct sockaddr_l2tpip)) |
257 | return -EINVAL; | 255 | return -EINVAL; |
258 | if (addr->l2tp_family != AF_INET) | 256 | if (addr->l2tp_family != AF_INET) |
@@ -267,6 +265,9 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
267 | read_unlock_bh(&l2tp_ip_lock); | 265 | read_unlock_bh(&l2tp_ip_lock); |
268 | 266 | ||
269 | lock_sock(sk); | 267 | lock_sock(sk); |
268 | if (!sock_flag(sk, SOCK_ZAPPED)) | ||
269 | goto out; | ||
270 | |||
270 | if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) | 271 | if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip)) |
271 | goto out; | 272 | goto out; |
272 | 273 | ||
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index ad3468c32b53..9978d01ba0ba 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -269,8 +269,6 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
269 | int addr_type; | 269 | int addr_type; |
270 | int err; | 270 | int err; |
271 | 271 | ||
272 | if (!sock_flag(sk, SOCK_ZAPPED)) | ||
273 | return -EINVAL; | ||
274 | if (addr->l2tp_family != AF_INET6) | 272 | if (addr->l2tp_family != AF_INET6) |
275 | return -EINVAL; | 273 | return -EINVAL; |
276 | if (addr_len < sizeof(*addr)) | 274 | if (addr_len < sizeof(*addr)) |
@@ -296,6 +294,9 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
296 | lock_sock(sk); | 294 | lock_sock(sk); |
297 | 295 | ||
298 | err = -EINVAL; | 296 | err = -EINVAL; |
297 | if (!sock_flag(sk, SOCK_ZAPPED)) | ||
298 | goto out_unlock; | ||
299 | |||
299 | if (sk->sk_state != TCP_CLOSE) | 300 | if (sk->sk_state != TCP_CLOSE) |
300 | goto out_unlock; | 301 | goto out_unlock; |
301 | 302 | ||
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index 78e9ecbc96e6..8e05032689f0 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c | |||
@@ -688,7 +688,7 @@ static void __sta_info_recalc_tim(struct sta_info *sta, bool ignore_pending) | |||
688 | } | 688 | } |
689 | 689 | ||
690 | /* No need to do anything if the driver does all */ | 690 | /* No need to do anything if the driver does all */ |
691 | if (!local->ops->set_tim) | 691 | if (ieee80211_hw_check(&local->hw, AP_LINK_PS)) |
692 | return; | 692 | return; |
693 | 693 | ||
694 | if (sta->dead) | 694 | if (sta->dead) |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 1c56abc49627..bd5f4be89435 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1501,7 +1501,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local, | |||
1501 | struct sta_info *sta, | 1501 | struct sta_info *sta, |
1502 | struct sk_buff *skb) | 1502 | struct sk_buff *skb) |
1503 | { | 1503 | { |
1504 | struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); | ||
1505 | struct fq *fq = &local->fq; | 1504 | struct fq *fq = &local->fq; |
1506 | struct ieee80211_vif *vif; | 1505 | struct ieee80211_vif *vif; |
1507 | struct txq_info *txqi; | 1506 | struct txq_info *txqi; |
@@ -1526,8 +1525,6 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local, | |||
1526 | if (!txqi) | 1525 | if (!txqi) |
1527 | return false; | 1526 | return false; |
1528 | 1527 | ||
1529 | info->control.vif = vif; | ||
1530 | |||
1531 | spin_lock_bh(&fq->lock); | 1528 | spin_lock_bh(&fq->lock); |
1532 | ieee80211_txq_enqueue(local, txqi, skb); | 1529 | ieee80211_txq_enqueue(local, txqi, skb); |
1533 | spin_unlock_bh(&fq->lock); | 1530 | spin_unlock_bh(&fq->lock); |
@@ -3213,7 +3210,6 @@ static void ieee80211_xmit_fast_finish(struct ieee80211_sub_if_data *sdata, | |||
3213 | 3210 | ||
3214 | if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { | 3211 | if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { |
3215 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; | 3212 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; |
3216 | *ieee80211_get_qos_ctl(hdr) = tid; | ||
3217 | hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); | 3213 | hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid); |
3218 | } else { | 3214 | } else { |
3219 | info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; | 3215 | info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; |
@@ -3338,6 +3334,11 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata, | |||
3338 | (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0); | 3334 | (tid_tx ? IEEE80211_TX_CTL_AMPDU : 0); |
3339 | info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT; | 3335 | info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT; |
3340 | 3336 | ||
3337 | if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) { | ||
3338 | tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK; | ||
3339 | *ieee80211_get_qos_ctl(hdr) = tid; | ||
3340 | } | ||
3341 | |||
3341 | __skb_queue_head_init(&tx.skbs); | 3342 | __skb_queue_head_init(&tx.skbs); |
3342 | 3343 | ||
3343 | tx.flags = IEEE80211_TX_UNICAST; | 3344 | tx.flags = IEEE80211_TX_UNICAST; |
@@ -3426,6 +3427,11 @@ begin: | |||
3426 | goto begin; | 3427 | goto begin; |
3427 | } | 3428 | } |
3428 | 3429 | ||
3430 | if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags)) | ||
3431 | info->flags |= IEEE80211_TX_CTL_AMPDU; | ||
3432 | else | ||
3433 | info->flags &= ~IEEE80211_TX_CTL_AMPDU; | ||
3434 | |||
3429 | if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) { | 3435 | if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) { |
3430 | struct sta_info *sta = container_of(txq->sta, struct sta_info, | 3436 | struct sta_info *sta = container_of(txq->sta, struct sta_info, |
3431 | sta); | 3437 | sta); |
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index ee715764a828..6832bf6ab69f 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c | |||
@@ -270,6 +270,22 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, | |||
270 | vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2); | 270 | vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2); |
271 | } | 271 | } |
272 | 272 | ||
273 | /* | ||
274 | * This is a workaround for VHT-enabled STAs which break the spec | ||
275 | * and have the VHT-MCS Rx map filled in with value 3 for all eight | ||
276 | * spacial streams, an example is AR9462. | ||
277 | * | ||
278 | * As per spec, in section 22.1.1 Introduction to the VHT PHY | ||
279 | * A VHT STA shall support at least single spactial stream VHT-MCSs | ||
280 | * 0 to 7 (transmit and receive) in all supported channel widths. | ||
281 | */ | ||
282 | if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) { | ||
283 | vht_cap->vht_supported = false; | ||
284 | sdata_info(sdata, "Ignoring VHT IE from %pM due to invalid rx_mcs_map\n", | ||
285 | sta->addr); | ||
286 | return; | ||
287 | } | ||
288 | |||
273 | /* finally set up the bandwidth */ | 289 | /* finally set up the bandwidth */ |
274 | switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { | 290 | switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { |
275 | case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: | 291 | case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index c3c809b2e712..a6e44ef2ec9a 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -2845,7 +2845,7 @@ static struct genl_family ip_vs_genl_family = { | |||
2845 | .hdrsize = 0, | 2845 | .hdrsize = 0, |
2846 | .name = IPVS_GENL_NAME, | 2846 | .name = IPVS_GENL_NAME, |
2847 | .version = IPVS_GENL_VERSION, | 2847 | .version = IPVS_GENL_VERSION, |
2848 | .maxattr = IPVS_CMD_MAX, | 2848 | .maxattr = IPVS_CMD_ATTR_MAX, |
2849 | .netnsok = true, /* Make ipvsadm to work on netns */ | 2849 | .netnsok = true, /* Make ipvsadm to work on netns */ |
2850 | }; | 2850 | }; |
2851 | 2851 | ||
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 1b07578bedf3..9350530c16c1 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -283,6 +283,7 @@ struct ip_vs_sync_buff { | |||
283 | */ | 283 | */ |
284 | static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) | 284 | static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) |
285 | { | 285 | { |
286 | memset(ho, 0, sizeof(*ho)); | ||
286 | ho->init_seq = get_unaligned_be32(&no->init_seq); | 287 | ho->init_seq = get_unaligned_be32(&no->init_seq); |
287 | ho->delta = get_unaligned_be32(&no->delta); | 288 | ho->delta = get_unaligned_be32(&no->delta); |
288 | ho->previous_delta = get_unaligned_be32(&no->previous_delta); | 289 | ho->previous_delta = get_unaligned_be32(&no->previous_delta); |
@@ -917,8 +918,10 @@ static void ip_vs_proc_conn(struct netns_ipvs *ipvs, struct ip_vs_conn_param *pa | |||
917 | kfree(param->pe_data); | 918 | kfree(param->pe_data); |
918 | } | 919 | } |
919 | 920 | ||
920 | if (opt) | 921 | if (opt) { |
921 | memcpy(&cp->in_seq, opt, sizeof(*opt)); | 922 | cp->in_seq = opt->in_seq; |
923 | cp->out_seq = opt->out_seq; | ||
924 | } | ||
922 | atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); | 925 | atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); |
923 | cp->state = state; | 926 | cp->state = state; |
924 | cp->old_state = cp->state; | 927 | cp->old_state = cp->state; |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index df2f5a3901df..0f87e5d21be7 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
@@ -76,6 +76,7 @@ struct conntrack_gc_work { | |||
76 | struct delayed_work dwork; | 76 | struct delayed_work dwork; |
77 | u32 last_bucket; | 77 | u32 last_bucket; |
78 | bool exiting; | 78 | bool exiting; |
79 | long next_gc_run; | ||
79 | }; | 80 | }; |
80 | 81 | ||
81 | static __read_mostly struct kmem_cache *nf_conntrack_cachep; | 82 | static __read_mostly struct kmem_cache *nf_conntrack_cachep; |
@@ -83,9 +84,11 @@ static __read_mostly spinlock_t nf_conntrack_locks_all_lock; | |||
83 | static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); | 84 | static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock); |
84 | static __read_mostly bool nf_conntrack_locks_all; | 85 | static __read_mostly bool nf_conntrack_locks_all; |
85 | 86 | ||
87 | /* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */ | ||
86 | #define GC_MAX_BUCKETS_DIV 64u | 88 | #define GC_MAX_BUCKETS_DIV 64u |
87 | #define GC_MAX_BUCKETS 8192u | 89 | /* upper bound of scan intervals */ |
88 | #define GC_INTERVAL (5 * HZ) | 90 | #define GC_INTERVAL_MAX (2 * HZ) |
91 | /* maximum conntracks to evict per gc run */ | ||
89 | #define GC_MAX_EVICTS 256u | 92 | #define GC_MAX_EVICTS 256u |
90 | 93 | ||
91 | static struct conntrack_gc_work conntrack_gc_work; | 94 | static struct conntrack_gc_work conntrack_gc_work; |
@@ -936,13 +939,13 @@ static noinline int early_drop(struct net *net, unsigned int _hash) | |||
936 | static void gc_worker(struct work_struct *work) | 939 | static void gc_worker(struct work_struct *work) |
937 | { | 940 | { |
938 | unsigned int i, goal, buckets = 0, expired_count = 0; | 941 | unsigned int i, goal, buckets = 0, expired_count = 0; |
939 | unsigned long next_run = GC_INTERVAL; | ||
940 | unsigned int ratio, scanned = 0; | ||
941 | struct conntrack_gc_work *gc_work; | 942 | struct conntrack_gc_work *gc_work; |
943 | unsigned int ratio, scanned = 0; | ||
944 | unsigned long next_run; | ||
942 | 945 | ||
943 | gc_work = container_of(work, struct conntrack_gc_work, dwork.work); | 946 | gc_work = container_of(work, struct conntrack_gc_work, dwork.work); |
944 | 947 | ||
945 | goal = min(nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV, GC_MAX_BUCKETS); | 948 | goal = nf_conntrack_htable_size / GC_MAX_BUCKETS_DIV; |
946 | i = gc_work->last_bucket; | 949 | i = gc_work->last_bucket; |
947 | 950 | ||
948 | do { | 951 | do { |
@@ -982,17 +985,47 @@ static void gc_worker(struct work_struct *work) | |||
982 | if (gc_work->exiting) | 985 | if (gc_work->exiting) |
983 | return; | 986 | return; |
984 | 987 | ||
988 | /* | ||
989 | * Eviction will normally happen from the packet path, and not | ||
990 | * from this gc worker. | ||
991 | * | ||
992 | * This worker is only here to reap expired entries when system went | ||
993 | * idle after a busy period. | ||
994 | * | ||
995 | * The heuristics below are supposed to balance conflicting goals: | ||
996 | * | ||
997 | * 1. Minimize time until we notice a stale entry | ||
998 | * 2. Maximize scan intervals to not waste cycles | ||
999 | * | ||
1000 | * Normally, expired_count will be 0, this increases the next_run time | ||
1001 | * to priorize 2) above. | ||
1002 | * | ||
1003 | * As soon as a timed-out entry is found, move towards 1) and increase | ||
1004 | * the scan frequency. | ||
1005 | * In case we have lots of evictions next scan is done immediately. | ||
1006 | */ | ||
985 | ratio = scanned ? expired_count * 100 / scanned : 0; | 1007 | ratio = scanned ? expired_count * 100 / scanned : 0; |
986 | if (ratio >= 90 || expired_count == GC_MAX_EVICTS) | 1008 | if (ratio >= 90 || expired_count == GC_MAX_EVICTS) { |
1009 | gc_work->next_gc_run = 0; | ||
987 | next_run = 0; | 1010 | next_run = 0; |
1011 | } else if (expired_count) { | ||
1012 | gc_work->next_gc_run /= 2U; | ||
1013 | next_run = msecs_to_jiffies(1); | ||
1014 | } else { | ||
1015 | if (gc_work->next_gc_run < GC_INTERVAL_MAX) | ||
1016 | gc_work->next_gc_run += msecs_to_jiffies(1); | ||
1017 | |||
1018 | next_run = gc_work->next_gc_run; | ||
1019 | } | ||
988 | 1020 | ||
989 | gc_work->last_bucket = i; | 1021 | gc_work->last_bucket = i; |
990 | schedule_delayed_work(&gc_work->dwork, next_run); | 1022 | queue_delayed_work(system_long_wq, &gc_work->dwork, next_run); |
991 | } | 1023 | } |
992 | 1024 | ||
993 | static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) | 1025 | static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work) |
994 | { | 1026 | { |
995 | INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); | 1027 | INIT_DELAYED_WORK(&gc_work->dwork, gc_worker); |
1028 | gc_work->next_gc_run = GC_INTERVAL_MAX; | ||
996 | gc_work->exiting = false; | 1029 | gc_work->exiting = false; |
997 | } | 1030 | } |
998 | 1031 | ||
@@ -1885,7 +1918,7 @@ int nf_conntrack_init_start(void) | |||
1885 | nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); | 1918 | nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED); |
1886 | 1919 | ||
1887 | conntrack_gc_work_init(&conntrack_gc_work); | 1920 | conntrack_gc_work_init(&conntrack_gc_work); |
1888 | schedule_delayed_work(&conntrack_gc_work.dwork, GC_INTERVAL); | 1921 | queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX); |
1889 | 1922 | ||
1890 | return 0; | 1923 | return 0; |
1891 | 1924 | ||
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index 336e21559e01..7341adf7059d 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c | |||
@@ -138,9 +138,14 @@ __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum) | |||
138 | 138 | ||
139 | for (i = 0; i < nf_ct_helper_hsize; i++) { | 139 | for (i = 0; i < nf_ct_helper_hsize; i++) { |
140 | hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { | 140 | hlist_for_each_entry_rcu(h, &nf_ct_helper_hash[i], hnode) { |
141 | if (!strcmp(h->name, name) && | 141 | if (strcmp(h->name, name)) |
142 | h->tuple.src.l3num == l3num && | 142 | continue; |
143 | h->tuple.dst.protonum == protonum) | 143 | |
144 | if (h->tuple.src.l3num != NFPROTO_UNSPEC && | ||
145 | h->tuple.src.l3num != l3num) | ||
146 | continue; | ||
147 | |||
148 | if (h->tuple.dst.protonum == protonum) | ||
144 | return h; | 149 | return h; |
145 | } | 150 | } |
146 | } | 151 | } |
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 621b81c7bddc..c3fc14e021ec 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c | |||
@@ -1436,9 +1436,12 @@ static int process_sip_request(struct sk_buff *skb, unsigned int protoff, | |||
1436 | handler = &sip_handlers[i]; | 1436 | handler = &sip_handlers[i]; |
1437 | if (handler->request == NULL) | 1437 | if (handler->request == NULL) |
1438 | continue; | 1438 | continue; |
1439 | if (*datalen < handler->len || | 1439 | if (*datalen < handler->len + 2 || |
1440 | strncasecmp(*dptr, handler->method, handler->len)) | 1440 | strncasecmp(*dptr, handler->method, handler->len)) |
1441 | continue; | 1441 | continue; |
1442 | if ((*dptr)[handler->len] != ' ' || | ||
1443 | !isalpha((*dptr)[handler->len+1])) | ||
1444 | continue; | ||
1442 | 1445 | ||
1443 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, | 1446 | if (ct_sip_get_header(ct, *dptr, 0, *datalen, SIP_HDR_CSEQ, |
1444 | &matchoff, &matchlen) <= 0) { | 1447 | &matchoff, &matchlen) <= 0) { |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 24db22257586..026581b04ea8 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -2956,12 +2956,14 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, | |||
2956 | 2956 | ||
2957 | err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); | 2957 | err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); |
2958 | if (err < 0) | 2958 | if (err < 0) |
2959 | goto err2; | 2959 | goto err3; |
2960 | 2960 | ||
2961 | list_add_tail_rcu(&set->list, &table->sets); | 2961 | list_add_tail_rcu(&set->list, &table->sets); |
2962 | table->use++; | 2962 | table->use++; |
2963 | return 0; | 2963 | return 0; |
2964 | 2964 | ||
2965 | err3: | ||
2966 | ops->destroy(set); | ||
2965 | err2: | 2967 | err2: |
2966 | kfree(set); | 2968 | kfree(set); |
2967 | err1: | 2969 | err1: |
@@ -3452,14 +3454,15 @@ void *nft_set_elem_init(const struct nft_set *set, | |||
3452 | return elem; | 3454 | return elem; |
3453 | } | 3455 | } |
3454 | 3456 | ||
3455 | void nft_set_elem_destroy(const struct nft_set *set, void *elem) | 3457 | void nft_set_elem_destroy(const struct nft_set *set, void *elem, |
3458 | bool destroy_expr) | ||
3456 | { | 3459 | { |
3457 | struct nft_set_ext *ext = nft_set_elem_ext(set, elem); | 3460 | struct nft_set_ext *ext = nft_set_elem_ext(set, elem); |
3458 | 3461 | ||
3459 | nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); | 3462 | nft_data_uninit(nft_set_ext_key(ext), NFT_DATA_VALUE); |
3460 | if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) | 3463 | if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) |
3461 | nft_data_uninit(nft_set_ext_data(ext), set->dtype); | 3464 | nft_data_uninit(nft_set_ext_data(ext), set->dtype); |
3462 | if (nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) | 3465 | if (destroy_expr && nft_set_ext_exists(ext, NFT_SET_EXT_EXPR)) |
3463 | nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); | 3466 | nf_tables_expr_destroy(NULL, nft_set_ext_expr(ext)); |
3464 | 3467 | ||
3465 | kfree(elem); | 3468 | kfree(elem); |
@@ -3565,6 +3568,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, | |||
3565 | dreg = nft_type_to_reg(set->dtype); | 3568 | dreg = nft_type_to_reg(set->dtype); |
3566 | list_for_each_entry(binding, &set->bindings, list) { | 3569 | list_for_each_entry(binding, &set->bindings, list) { |
3567 | struct nft_ctx bind_ctx = { | 3570 | struct nft_ctx bind_ctx = { |
3571 | .net = ctx->net, | ||
3568 | .afi = ctx->afi, | 3572 | .afi = ctx->afi, |
3569 | .table = ctx->table, | 3573 | .table = ctx->table, |
3570 | .chain = (struct nft_chain *)binding->chain, | 3574 | .chain = (struct nft_chain *)binding->chain, |
@@ -3812,7 +3816,7 @@ void nft_set_gc_batch_release(struct rcu_head *rcu) | |||
3812 | 3816 | ||
3813 | gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu); | 3817 | gcb = container_of(rcu, struct nft_set_gc_batch, head.rcu); |
3814 | for (i = 0; i < gcb->head.cnt; i++) | 3818 | for (i = 0; i < gcb->head.cnt; i++) |
3815 | nft_set_elem_destroy(gcb->head.set, gcb->elems[i]); | 3819 | nft_set_elem_destroy(gcb->head.set, gcb->elems[i], true); |
3816 | kfree(gcb); | 3820 | kfree(gcb); |
3817 | } | 3821 | } |
3818 | EXPORT_SYMBOL_GPL(nft_set_gc_batch_release); | 3822 | EXPORT_SYMBOL_GPL(nft_set_gc_batch_release); |
@@ -4030,7 +4034,7 @@ static void nf_tables_commit_release(struct nft_trans *trans) | |||
4030 | break; | 4034 | break; |
4031 | case NFT_MSG_DELSETELEM: | 4035 | case NFT_MSG_DELSETELEM: |
4032 | nft_set_elem_destroy(nft_trans_elem_set(trans), | 4036 | nft_set_elem_destroy(nft_trans_elem_set(trans), |
4033 | nft_trans_elem(trans).priv); | 4037 | nft_trans_elem(trans).priv, true); |
4034 | break; | 4038 | break; |
4035 | } | 4039 | } |
4036 | kfree(trans); | 4040 | kfree(trans); |
@@ -4171,7 +4175,7 @@ static void nf_tables_abort_release(struct nft_trans *trans) | |||
4171 | break; | 4175 | break; |
4172 | case NFT_MSG_NEWSETELEM: | 4176 | case NFT_MSG_NEWSETELEM: |
4173 | nft_set_elem_destroy(nft_trans_elem_set(trans), | 4177 | nft_set_elem_destroy(nft_trans_elem_set(trans), |
4174 | nft_trans_elem(trans).priv); | 4178 | nft_trans_elem(trans).priv, true); |
4175 | break; | 4179 | break; |
4176 | } | 4180 | } |
4177 | kfree(trans); | 4181 | kfree(trans); |
@@ -4421,7 +4425,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx, | |||
4421 | * Otherwise a 0 is returned and the attribute value is stored in the | 4425 | * Otherwise a 0 is returned and the attribute value is stored in the |
4422 | * destination variable. | 4426 | * destination variable. |
4423 | */ | 4427 | */ |
4424 | unsigned int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) | 4428 | int nft_parse_u32_check(const struct nlattr *attr, int max, u32 *dest) |
4425 | { | 4429 | { |
4426 | u32 val; | 4430 | u32 val; |
4427 | 4431 | ||
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 517f08767a3c..31ca94793aa9 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c | |||
@@ -44,18 +44,22 @@ static void *nft_dynset_new(struct nft_set *set, const struct nft_expr *expr, | |||
44 | ®s->data[priv->sreg_key], | 44 | ®s->data[priv->sreg_key], |
45 | ®s->data[priv->sreg_data], | 45 | ®s->data[priv->sreg_data], |
46 | timeout, GFP_ATOMIC); | 46 | timeout, GFP_ATOMIC); |
47 | if (elem == NULL) { | 47 | if (elem == NULL) |
48 | if (set->size) | 48 | goto err1; |
49 | atomic_dec(&set->nelems); | ||
50 | return NULL; | ||
51 | } | ||
52 | 49 | ||
53 | ext = nft_set_elem_ext(set, elem); | 50 | ext = nft_set_elem_ext(set, elem); |
54 | if (priv->expr != NULL && | 51 | if (priv->expr != NULL && |
55 | nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0) | 52 | nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0) |
56 | return NULL; | 53 | goto err2; |
57 | 54 | ||
58 | return elem; | 55 | return elem; |
56 | |||
57 | err2: | ||
58 | nft_set_elem_destroy(set, elem, false); | ||
59 | err1: | ||
60 | if (set->size) | ||
61 | atomic_dec(&set->nelems); | ||
62 | return NULL; | ||
59 | } | 63 | } |
60 | 64 | ||
61 | static void nft_dynset_eval(const struct nft_expr *expr, | 65 | static void nft_dynset_eval(const struct nft_expr *expr, |
@@ -139,6 +143,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx, | |||
139 | return PTR_ERR(set); | 143 | return PTR_ERR(set); |
140 | } | 144 | } |
141 | 145 | ||
146 | if (set->ops->update == NULL) | ||
147 | return -EOPNOTSUPP; | ||
148 | |||
142 | if (set->flags & NFT_SET_CONSTANT) | 149 | if (set->flags & NFT_SET_CONSTANT) |
143 | return -EBUSY; | 150 | return -EBUSY; |
144 | 151 | ||
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c index 3794cb2fc788..a3dface3e6e6 100644 --- a/net/netfilter/nft_set_hash.c +++ b/net/netfilter/nft_set_hash.c | |||
@@ -98,7 +98,7 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key, | |||
98 | const struct nft_set_ext **ext) | 98 | const struct nft_set_ext **ext) |
99 | { | 99 | { |
100 | struct nft_hash *priv = nft_set_priv(set); | 100 | struct nft_hash *priv = nft_set_priv(set); |
101 | struct nft_hash_elem *he; | 101 | struct nft_hash_elem *he, *prev; |
102 | struct nft_hash_cmp_arg arg = { | 102 | struct nft_hash_cmp_arg arg = { |
103 | .genmask = NFT_GENMASK_ANY, | 103 | .genmask = NFT_GENMASK_ANY, |
104 | .set = set, | 104 | .set = set, |
@@ -112,15 +112,24 @@ static bool nft_hash_update(struct nft_set *set, const u32 *key, | |||
112 | he = new(set, expr, regs); | 112 | he = new(set, expr, regs); |
113 | if (he == NULL) | 113 | if (he == NULL) |
114 | goto err1; | 114 | goto err1; |
115 | if (rhashtable_lookup_insert_key(&priv->ht, &arg, &he->node, | 115 | |
116 | nft_hash_params)) | 116 | prev = rhashtable_lookup_get_insert_key(&priv->ht, &arg, &he->node, |
117 | nft_hash_params); | ||
118 | if (IS_ERR(prev)) | ||
117 | goto err2; | 119 | goto err2; |
120 | |||
121 | /* Another cpu may race to insert the element with the same key */ | ||
122 | if (prev) { | ||
123 | nft_set_elem_destroy(set, he, true); | ||
124 | he = prev; | ||
125 | } | ||
126 | |||
118 | out: | 127 | out: |
119 | *ext = &he->ext; | 128 | *ext = &he->ext; |
120 | return true; | 129 | return true; |
121 | 130 | ||
122 | err2: | 131 | err2: |
123 | nft_set_elem_destroy(set, he); | 132 | nft_set_elem_destroy(set, he, true); |
124 | err1: | 133 | err1: |
125 | return false; | 134 | return false; |
126 | } | 135 | } |
@@ -332,7 +341,7 @@ static int nft_hash_init(const struct nft_set *set, | |||
332 | 341 | ||
333 | static void nft_hash_elem_destroy(void *ptr, void *arg) | 342 | static void nft_hash_elem_destroy(void *ptr, void *arg) |
334 | { | 343 | { |
335 | nft_set_elem_destroy((const struct nft_set *)arg, ptr); | 344 | nft_set_elem_destroy((const struct nft_set *)arg, ptr, true); |
336 | } | 345 | } |
337 | 346 | ||
338 | static void nft_hash_destroy(const struct nft_set *set) | 347 | static void nft_hash_destroy(const struct nft_set *set) |
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 38b5bda242f8..36493a7cae88 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c | |||
@@ -266,7 +266,7 @@ static void nft_rbtree_destroy(const struct nft_set *set) | |||
266 | while ((node = priv->root.rb_node) != NULL) { | 266 | while ((node = priv->root.rb_node) != NULL) { |
267 | rb_erase(node, &priv->root); | 267 | rb_erase(node, &priv->root); |
268 | rbe = rb_entry(node, struct nft_rbtree_elem, node); | 268 | rbe = rb_entry(node, struct nft_rbtree_elem, node); |
269 | nft_set_elem_destroy(set, rbe); | 269 | nft_set_elem_destroy(set, rbe, true); |
270 | } | 270 | } |
271 | } | 271 | } |
272 | 272 | ||
diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index 69f78e96fdb4..b83e158e116a 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c | |||
@@ -44,7 +44,7 @@ connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) | |||
44 | u_int32_t newmark; | 44 | u_int32_t newmark; |
45 | 45 | ||
46 | ct = nf_ct_get(skb, &ctinfo); | 46 | ct = nf_ct_get(skb, &ctinfo); |
47 | if (ct == NULL) | 47 | if (ct == NULL || nf_ct_is_untracked(ct)) |
48 | return XT_CONTINUE; | 48 | return XT_CONTINUE; |
49 | 49 | ||
50 | switch (info->mode) { | 50 | switch (info->mode) { |
@@ -97,7 +97,7 @@ connmark_mt(const struct sk_buff *skb, struct xt_action_param *par) | |||
97 | const struct nf_conn *ct; | 97 | const struct nf_conn *ct; |
98 | 98 | ||
99 | ct = nf_ct_get(skb, &ctinfo); | 99 | ct = nf_ct_get(skb, &ctinfo); |
100 | if (ct == NULL) | 100 | if (ct == NULL || nf_ct_is_untracked(ct)) |
101 | return false; | 101 | return false; |
102 | 102 | ||
103 | return ((ct->mark & info->mask) == info->mark) ^ info->invert; | 103 | return ((ct->mark & info->mask) == info->mark) ^ info->invert; |
diff --git a/net/netlink/diag.c b/net/netlink/diag.c index b2f0e986a6f4..a5546249fb10 100644 --- a/net/netlink/diag.c +++ b/net/netlink/diag.c | |||
@@ -178,11 +178,8 @@ static int netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
178 | } | 178 | } |
179 | cb->args[1] = i; | 179 | cb->args[1] = i; |
180 | } else { | 180 | } else { |
181 | if (req->sdiag_protocol >= MAX_LINKS) { | 181 | if (req->sdiag_protocol >= MAX_LINKS) |
182 | read_unlock(&nl_table_lock); | ||
183 | rcu_read_unlock(); | ||
184 | return -ENOENT; | 182 | return -ENOENT; |
185 | } | ||
186 | 183 | ||
187 | err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); | 184 | err = __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); |
188 | } | 185 | } |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 23cc12639ba7..49c28e8ef01b 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -404,7 +404,7 @@ int __genl_register_family(struct genl_family *family) | |||
404 | 404 | ||
405 | err = genl_validate_assign_mc_groups(family); | 405 | err = genl_validate_assign_mc_groups(family); |
406 | if (err) | 406 | if (err) |
407 | goto errout_locked; | 407 | goto errout_free; |
408 | 408 | ||
409 | list_add_tail(&family->family_list, genl_family_chain(family->id)); | 409 | list_add_tail(&family->family_list, genl_family_chain(family->id)); |
410 | genl_unlock_all(); | 410 | genl_unlock_all(); |
@@ -417,6 +417,8 @@ int __genl_register_family(struct genl_family *family) | |||
417 | 417 | ||
418 | return 0; | 418 | return 0; |
419 | 419 | ||
420 | errout_free: | ||
421 | kfree(family->attrbuf); | ||
420 | errout_locked: | 422 | errout_locked: |
421 | genl_unlock_all(); | 423 | genl_unlock_all(); |
422 | errout: | 424 | errout: |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 2b2a7974e4bb..b05d4a2155b0 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -430,7 +430,8 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb, | |||
430 | if (!skb) | 430 | if (!skb) |
431 | return -ENOBUFS; | 431 | return -ENOBUFS; |
432 | 432 | ||
433 | if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) { | 433 | if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, |
434 | n->nlmsg_flags, event) <= 0) { | ||
434 | kfree_skb(skb); | 435 | kfree_skb(skb); |
435 | return -EINVAL; | 436 | return -EINVAL; |
436 | } | 437 | } |
diff --git a/net/sctp/input.c b/net/sctp/input.c index a2ea1d1cc06a..a01a56ec8b8c 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -181,9 +181,10 @@ int sctp_rcv(struct sk_buff *skb) | |||
181 | * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB | 181 | * bound to another interface, via SO_BINDTODEVICE, treat it as OOTB |
182 | */ | 182 | */ |
183 | if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { | 183 | if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) { |
184 | if (asoc) { | 184 | if (transport) { |
185 | sctp_association_put(asoc); | 185 | sctp_transport_put(transport); |
186 | asoc = NULL; | 186 | asoc = NULL; |
187 | transport = NULL; | ||
187 | } else { | 188 | } else { |
188 | sctp_endpoint_put(ep); | 189 | sctp_endpoint_put(ep); |
189 | ep = NULL; | 190 | ep = NULL; |
@@ -269,8 +270,8 @@ int sctp_rcv(struct sk_buff *skb) | |||
269 | bh_unlock_sock(sk); | 270 | bh_unlock_sock(sk); |
270 | 271 | ||
271 | /* Release the asoc/ep ref we took in the lookup calls. */ | 272 | /* Release the asoc/ep ref we took in the lookup calls. */ |
272 | if (asoc) | 273 | if (transport) |
273 | sctp_association_put(asoc); | 274 | sctp_transport_put(transport); |
274 | else | 275 | else |
275 | sctp_endpoint_put(ep); | 276 | sctp_endpoint_put(ep); |
276 | 277 | ||
@@ -283,8 +284,8 @@ discard_it: | |||
283 | 284 | ||
284 | discard_release: | 285 | discard_release: |
285 | /* Release the asoc/ep ref we took in the lookup calls. */ | 286 | /* Release the asoc/ep ref we took in the lookup calls. */ |
286 | if (asoc) | 287 | if (transport) |
287 | sctp_association_put(asoc); | 288 | sctp_transport_put(transport); |
288 | else | 289 | else |
289 | sctp_endpoint_put(ep); | 290 | sctp_endpoint_put(ep); |
290 | 291 | ||
@@ -300,6 +301,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
300 | { | 301 | { |
301 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 302 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
302 | struct sctp_inq *inqueue = &chunk->rcvr->inqueue; | 303 | struct sctp_inq *inqueue = &chunk->rcvr->inqueue; |
304 | struct sctp_transport *t = chunk->transport; | ||
303 | struct sctp_ep_common *rcvr = NULL; | 305 | struct sctp_ep_common *rcvr = NULL; |
304 | int backloged = 0; | 306 | int backloged = 0; |
305 | 307 | ||
@@ -351,7 +353,7 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | |||
351 | done: | 353 | done: |
352 | /* Release the refs we took in sctp_add_backlog */ | 354 | /* Release the refs we took in sctp_add_backlog */ |
353 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | 355 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) |
354 | sctp_association_put(sctp_assoc(rcvr)); | 356 | sctp_transport_put(t); |
355 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) | 357 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) |
356 | sctp_endpoint_put(sctp_ep(rcvr)); | 358 | sctp_endpoint_put(sctp_ep(rcvr)); |
357 | else | 359 | else |
@@ -363,6 +365,7 @@ done: | |||
363 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) | 365 | static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) |
364 | { | 366 | { |
365 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 367 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
368 | struct sctp_transport *t = chunk->transport; | ||
366 | struct sctp_ep_common *rcvr = chunk->rcvr; | 369 | struct sctp_ep_common *rcvr = chunk->rcvr; |
367 | int ret; | 370 | int ret; |
368 | 371 | ||
@@ -373,7 +376,7 @@ static int sctp_add_backlog(struct sock *sk, struct sk_buff *skb) | |||
373 | * from us | 376 | * from us |
374 | */ | 377 | */ |
375 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | 378 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) |
376 | sctp_association_hold(sctp_assoc(rcvr)); | 379 | sctp_transport_hold(t); |
377 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) | 380 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) |
378 | sctp_endpoint_hold(sctp_ep(rcvr)); | 381 | sctp_endpoint_hold(sctp_ep(rcvr)); |
379 | else | 382 | else |
@@ -537,15 +540,15 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb, | |||
537 | return sk; | 540 | return sk; |
538 | 541 | ||
539 | out: | 542 | out: |
540 | sctp_association_put(asoc); | 543 | sctp_transport_put(transport); |
541 | return NULL; | 544 | return NULL; |
542 | } | 545 | } |
543 | 546 | ||
544 | /* Common cleanup code for icmp/icmpv6 error handler. */ | 547 | /* Common cleanup code for icmp/icmpv6 error handler. */ |
545 | void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) | 548 | void sctp_err_finish(struct sock *sk, struct sctp_transport *t) |
546 | { | 549 | { |
547 | bh_unlock_sock(sk); | 550 | bh_unlock_sock(sk); |
548 | sctp_association_put(asoc); | 551 | sctp_transport_put(t); |
549 | } | 552 | } |
550 | 553 | ||
551 | /* | 554 | /* |
@@ -641,7 +644,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) | |||
641 | } | 644 | } |
642 | 645 | ||
643 | out_unlock: | 646 | out_unlock: |
644 | sctp_err_finish(sk, asoc); | 647 | sctp_err_finish(sk, transport); |
645 | } | 648 | } |
646 | 649 | ||
647 | /* | 650 | /* |
@@ -952,11 +955,8 @@ static struct sctp_association *__sctp_lookup_association( | |||
952 | goto out; | 955 | goto out; |
953 | 956 | ||
954 | asoc = t->asoc; | 957 | asoc = t->asoc; |
955 | sctp_association_hold(asoc); | ||
956 | *pt = t; | 958 | *pt = t; |
957 | 959 | ||
958 | sctp_transport_put(t); | ||
959 | |||
960 | out: | 960 | out: |
961 | return asoc; | 961 | return asoc; |
962 | } | 962 | } |
@@ -986,7 +986,7 @@ int sctp_has_association(struct net *net, | |||
986 | struct sctp_transport *transport; | 986 | struct sctp_transport *transport; |
987 | 987 | ||
988 | if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) { | 988 | if ((asoc = sctp_lookup_association(net, laddr, paddr, &transport))) { |
989 | sctp_association_put(asoc); | 989 | sctp_transport_put(transport); |
990 | return 1; | 990 | return 1; |
991 | } | 991 | } |
992 | 992 | ||
@@ -1021,7 +1021,6 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net, | |||
1021 | struct sctphdr *sh = sctp_hdr(skb); | 1021 | struct sctphdr *sh = sctp_hdr(skb); |
1022 | union sctp_params params; | 1022 | union sctp_params params; |
1023 | sctp_init_chunk_t *init; | 1023 | sctp_init_chunk_t *init; |
1024 | struct sctp_transport *transport; | ||
1025 | struct sctp_af *af; | 1024 | struct sctp_af *af; |
1026 | 1025 | ||
1027 | /* | 1026 | /* |
@@ -1052,7 +1051,7 @@ static struct sctp_association *__sctp_rcv_init_lookup(struct net *net, | |||
1052 | 1051 | ||
1053 | af->from_addr_param(paddr, params.addr, sh->source, 0); | 1052 | af->from_addr_param(paddr, params.addr, sh->source, 0); |
1054 | 1053 | ||
1055 | asoc = __sctp_lookup_association(net, laddr, paddr, &transport); | 1054 | asoc = __sctp_lookup_association(net, laddr, paddr, transportp); |
1056 | if (asoc) | 1055 | if (asoc) |
1057 | return asoc; | 1056 | return asoc; |
1058 | } | 1057 | } |
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index f473779e8b1c..176af3080a2b 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -198,7 +198,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
198 | } | 198 | } |
199 | 199 | ||
200 | out_unlock: | 200 | out_unlock: |
201 | sctp_err_finish(sk, asoc); | 201 | sctp_err_finish(sk, transport); |
202 | out: | 202 | out: |
203 | if (likely(idev != NULL)) | 203 | if (likely(idev != NULL)) |
204 | in6_dev_put(idev); | 204 | in6_dev_put(idev); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 9fbb6feb8c27..f23ad913dc7a 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -1214,9 +1214,12 @@ static int __sctp_connect(struct sock *sk, | |||
1214 | 1214 | ||
1215 | timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); | 1215 | timeo = sock_sndtimeo(sk, f_flags & O_NONBLOCK); |
1216 | 1216 | ||
1217 | err = sctp_wait_for_connect(asoc, &timeo); | 1217 | if (assoc_id) |
1218 | if ((err == 0 || err == -EINPROGRESS) && assoc_id) | ||
1219 | *assoc_id = asoc->assoc_id; | 1218 | *assoc_id = asoc->assoc_id; |
1219 | err = sctp_wait_for_connect(asoc, &timeo); | ||
1220 | /* Note: the asoc may be freed after the return of | ||
1221 | * sctp_wait_for_connect. | ||
1222 | */ | ||
1220 | 1223 | ||
1221 | /* Don't free association on exit. */ | 1224 | /* Don't free association on exit. */ |
1222 | asoc = NULL; | 1225 | asoc = NULL; |
@@ -4282,19 +4285,18 @@ static void sctp_shutdown(struct sock *sk, int how) | |||
4282 | { | 4285 | { |
4283 | struct net *net = sock_net(sk); | 4286 | struct net *net = sock_net(sk); |
4284 | struct sctp_endpoint *ep; | 4287 | struct sctp_endpoint *ep; |
4285 | struct sctp_association *asoc; | ||
4286 | 4288 | ||
4287 | if (!sctp_style(sk, TCP)) | 4289 | if (!sctp_style(sk, TCP)) |
4288 | return; | 4290 | return; |
4289 | 4291 | ||
4290 | if (how & SEND_SHUTDOWN) { | 4292 | ep = sctp_sk(sk)->ep; |
4293 | if (how & SEND_SHUTDOWN && !list_empty(&ep->asocs)) { | ||
4294 | struct sctp_association *asoc; | ||
4295 | |||
4291 | sk->sk_state = SCTP_SS_CLOSING; | 4296 | sk->sk_state = SCTP_SS_CLOSING; |
4292 | ep = sctp_sk(sk)->ep; | 4297 | asoc = list_entry(ep->asocs.next, |
4293 | if (!list_empty(&ep->asocs)) { | 4298 | struct sctp_association, asocs); |
4294 | asoc = list_entry(ep->asocs.next, | 4299 | sctp_primitive_SHUTDOWN(net, asoc, NULL); |
4295 | struct sctp_association, asocs); | ||
4296 | sctp_primitive_SHUTDOWN(net, asoc, NULL); | ||
4297 | } | ||
4298 | } | 4300 | } |
4299 | } | 4301 | } |
4300 | 4302 | ||
@@ -4480,12 +4482,9 @@ int sctp_transport_lookup_process(int (*cb)(struct sctp_transport *, void *), | |||
4480 | if (!transport || !sctp_transport_hold(transport)) | 4482 | if (!transport || !sctp_transport_hold(transport)) |
4481 | goto out; | 4483 | goto out; |
4482 | 4484 | ||
4483 | sctp_association_hold(transport->asoc); | ||
4484 | sctp_transport_put(transport); | ||
4485 | |||
4486 | rcu_read_unlock(); | 4485 | rcu_read_unlock(); |
4487 | err = cb(transport, p); | 4486 | err = cb(transport, p); |
4488 | sctp_association_put(transport->asoc); | 4487 | sctp_transport_put(transport); |
4489 | 4488 | ||
4490 | out: | 4489 | out: |
4491 | return err; | 4490 | return err; |
diff --git a/net/socket.c b/net/socket.c index 5a9bf5ee2464..73dc69f9681e 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -341,8 +341,23 @@ static const struct xattr_handler sockfs_xattr_handler = { | |||
341 | .get = sockfs_xattr_get, | 341 | .get = sockfs_xattr_get, |
342 | }; | 342 | }; |
343 | 343 | ||
344 | static int sockfs_security_xattr_set(const struct xattr_handler *handler, | ||
345 | struct dentry *dentry, struct inode *inode, | ||
346 | const char *suffix, const void *value, | ||
347 | size_t size, int flags) | ||
348 | { | ||
349 | /* Handled by LSM. */ | ||
350 | return -EAGAIN; | ||
351 | } | ||
352 | |||
353 | static const struct xattr_handler sockfs_security_xattr_handler = { | ||
354 | .prefix = XATTR_SECURITY_PREFIX, | ||
355 | .set = sockfs_security_xattr_set, | ||
356 | }; | ||
357 | |||
344 | static const struct xattr_handler *sockfs_xattr_handlers[] = { | 358 | static const struct xattr_handler *sockfs_xattr_handlers[] = { |
345 | &sockfs_xattr_handler, | 359 | &sockfs_xattr_handler, |
360 | &sockfs_security_xattr_handler, | ||
346 | NULL | 361 | NULL |
347 | }; | 362 | }; |
348 | 363 | ||
@@ -2038,6 +2053,8 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, | |||
2038 | if (err) | 2053 | if (err) |
2039 | break; | 2054 | break; |
2040 | ++datagrams; | 2055 | ++datagrams; |
2056 | if (msg_data_left(&msg_sys)) | ||
2057 | break; | ||
2041 | cond_resched(); | 2058 | cond_resched(); |
2042 | } | 2059 | } |
2043 | 2060 | ||
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index c3f652395a80..3bc1d61694cb 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -1002,14 +1002,8 @@ static void svc_age_temp_xprts(unsigned long closure) | |||
1002 | void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) | 1002 | void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) |
1003 | { | 1003 | { |
1004 | struct svc_xprt *xprt; | 1004 | struct svc_xprt *xprt; |
1005 | struct svc_sock *svsk; | ||
1006 | struct socket *sock; | ||
1007 | struct list_head *le, *next; | 1005 | struct list_head *le, *next; |
1008 | LIST_HEAD(to_be_closed); | 1006 | LIST_HEAD(to_be_closed); |
1009 | struct linger no_linger = { | ||
1010 | .l_onoff = 1, | ||
1011 | .l_linger = 0, | ||
1012 | }; | ||
1013 | 1007 | ||
1014 | spin_lock_bh(&serv->sv_lock); | 1008 | spin_lock_bh(&serv->sv_lock); |
1015 | list_for_each_safe(le, next, &serv->sv_tempsocks) { | 1009 | list_for_each_safe(le, next, &serv->sv_tempsocks) { |
@@ -1027,10 +1021,7 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr) | |||
1027 | list_del_init(le); | 1021 | list_del_init(le); |
1028 | xprt = list_entry(le, struct svc_xprt, xpt_list); | 1022 | xprt = list_entry(le, struct svc_xprt, xpt_list); |
1029 | dprintk("svc_age_temp_xprts_now: closing %p\n", xprt); | 1023 | dprintk("svc_age_temp_xprts_now: closing %p\n", xprt); |
1030 | svsk = container_of(xprt, struct svc_sock, sk_xprt); | 1024 | xprt->xpt_ops->xpo_kill_temp_xprt(xprt); |
1031 | sock = svsk->sk_sock; | ||
1032 | kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, | ||
1033 | (char *)&no_linger, sizeof(no_linger)); | ||
1034 | svc_close_xprt(xprt); | 1025 | svc_close_xprt(xprt); |
1035 | } | 1026 | } |
1036 | } | 1027 | } |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 57625f64efd5..a4bc98265d88 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -438,6 +438,21 @@ static int svc_tcp_has_wspace(struct svc_xprt *xprt) | |||
438 | return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); | 438 | return !test_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); |
439 | } | 439 | } |
440 | 440 | ||
441 | static void svc_tcp_kill_temp_xprt(struct svc_xprt *xprt) | ||
442 | { | ||
443 | struct svc_sock *svsk; | ||
444 | struct socket *sock; | ||
445 | struct linger no_linger = { | ||
446 | .l_onoff = 1, | ||
447 | .l_linger = 0, | ||
448 | }; | ||
449 | |||
450 | svsk = container_of(xprt, struct svc_sock, sk_xprt); | ||
451 | sock = svsk->sk_sock; | ||
452 | kernel_setsockopt(sock, SOL_SOCKET, SO_LINGER, | ||
453 | (char *)&no_linger, sizeof(no_linger)); | ||
454 | } | ||
455 | |||
441 | /* | 456 | /* |
442 | * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo | 457 | * See net/ipv6/ip_sockglue.c : ip_cmsg_recv_pktinfo |
443 | */ | 458 | */ |
@@ -648,6 +663,10 @@ static struct svc_xprt *svc_udp_accept(struct svc_xprt *xprt) | |||
648 | return NULL; | 663 | return NULL; |
649 | } | 664 | } |
650 | 665 | ||
666 | static void svc_udp_kill_temp_xprt(struct svc_xprt *xprt) | ||
667 | { | ||
668 | } | ||
669 | |||
651 | static struct svc_xprt *svc_udp_create(struct svc_serv *serv, | 670 | static struct svc_xprt *svc_udp_create(struct svc_serv *serv, |
652 | struct net *net, | 671 | struct net *net, |
653 | struct sockaddr *sa, int salen, | 672 | struct sockaddr *sa, int salen, |
@@ -667,6 +686,7 @@ static struct svc_xprt_ops svc_udp_ops = { | |||
667 | .xpo_has_wspace = svc_udp_has_wspace, | 686 | .xpo_has_wspace = svc_udp_has_wspace, |
668 | .xpo_accept = svc_udp_accept, | 687 | .xpo_accept = svc_udp_accept, |
669 | .xpo_secure_port = svc_sock_secure_port, | 688 | .xpo_secure_port = svc_sock_secure_port, |
689 | .xpo_kill_temp_xprt = svc_udp_kill_temp_xprt, | ||
670 | }; | 690 | }; |
671 | 691 | ||
672 | static struct svc_xprt_class svc_udp_class = { | 692 | static struct svc_xprt_class svc_udp_class = { |
@@ -1242,6 +1262,7 @@ static struct svc_xprt_ops svc_tcp_ops = { | |||
1242 | .xpo_has_wspace = svc_tcp_has_wspace, | 1262 | .xpo_has_wspace = svc_tcp_has_wspace, |
1243 | .xpo_accept = svc_tcp_accept, | 1263 | .xpo_accept = svc_tcp_accept, |
1244 | .xpo_secure_port = svc_sock_secure_port, | 1264 | .xpo_secure_port = svc_sock_secure_port, |
1265 | .xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt, | ||
1245 | }; | 1266 | }; |
1246 | 1267 | ||
1247 | static struct svc_xprt_class svc_tcp_class = { | 1268 | static struct svc_xprt_class svc_tcp_class = { |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 6864fb967038..1334de2715c2 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -67,6 +67,7 @@ static void svc_rdma_detach(struct svc_xprt *xprt); | |||
67 | static void svc_rdma_free(struct svc_xprt *xprt); | 67 | static void svc_rdma_free(struct svc_xprt *xprt); |
68 | static int svc_rdma_has_wspace(struct svc_xprt *xprt); | 68 | static int svc_rdma_has_wspace(struct svc_xprt *xprt); |
69 | static int svc_rdma_secure_port(struct svc_rqst *); | 69 | static int svc_rdma_secure_port(struct svc_rqst *); |
70 | static void svc_rdma_kill_temp_xprt(struct svc_xprt *); | ||
70 | 71 | ||
71 | static struct svc_xprt_ops svc_rdma_ops = { | 72 | static struct svc_xprt_ops svc_rdma_ops = { |
72 | .xpo_create = svc_rdma_create, | 73 | .xpo_create = svc_rdma_create, |
@@ -79,6 +80,7 @@ static struct svc_xprt_ops svc_rdma_ops = { | |||
79 | .xpo_has_wspace = svc_rdma_has_wspace, | 80 | .xpo_has_wspace = svc_rdma_has_wspace, |
80 | .xpo_accept = svc_rdma_accept, | 81 | .xpo_accept = svc_rdma_accept, |
81 | .xpo_secure_port = svc_rdma_secure_port, | 82 | .xpo_secure_port = svc_rdma_secure_port, |
83 | .xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt, | ||
82 | }; | 84 | }; |
83 | 85 | ||
84 | struct svc_xprt_class svc_rdma_class = { | 86 | struct svc_xprt_class svc_rdma_class = { |
@@ -1317,6 +1319,10 @@ static int svc_rdma_secure_port(struct svc_rqst *rqstp) | |||
1317 | return 1; | 1319 | return 1; |
1318 | } | 1320 | } |
1319 | 1321 | ||
1322 | static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt) | ||
1323 | { | ||
1324 | } | ||
1325 | |||
1320 | int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) | 1326 | int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr) |
1321 | { | 1327 | { |
1322 | struct ib_send_wr *bad_wr, *n_wr; | 1328 | struct ib_send_wr *bad_wr, *n_wr; |
diff --git a/net/tipc/link.c b/net/tipc/link.c index 1055164c6232..ecc12411155e 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1492,8 +1492,9 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, | |||
1492 | if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) | 1492 | if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) |
1493 | l->tolerance = peers_tol; | 1493 | l->tolerance = peers_tol; |
1494 | 1494 | ||
1495 | if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI, | 1495 | /* Update own prio if peer indicates a different value */ |
1496 | TIPC_MAX_LINK_PRI)) { | 1496 | if ((peers_prio != l->priority) && |
1497 | in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) { | ||
1497 | l->priority = peers_prio; | 1498 | l->priority = peers_prio; |
1498 | rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); | 1499 | rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT); |
1499 | } | 1500 | } |
diff --git a/net/tipc/monitor.c b/net/tipc/monitor.c index ed97a5876ebe..9e109bb1a207 100644 --- a/net/tipc/monitor.c +++ b/net/tipc/monitor.c | |||
@@ -455,14 +455,14 @@ void tipc_mon_rcv(struct net *net, void *data, u16 dlen, u32 addr, | |||
455 | int i, applied_bef; | 455 | int i, applied_bef; |
456 | 456 | ||
457 | state->probing = false; | 457 | state->probing = false; |
458 | if (!dlen) | ||
459 | return; | ||
460 | 458 | ||
461 | /* Sanity check received domain record */ | 459 | /* Sanity check received domain record */ |
462 | if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) { | 460 | if (dlen < dom_rec_len(arrv_dom, 0)) |
463 | pr_warn_ratelimited("Received illegal domain record\n"); | 461 | return; |
462 | if (dlen != dom_rec_len(arrv_dom, new_member_cnt)) | ||
463 | return; | ||
464 | if ((dlen < new_dlen) || ntohs(arrv_dom->len) != new_dlen) | ||
464 | return; | 465 | return; |
465 | } | ||
466 | 466 | ||
467 | /* Synch generation numbers with peer if link just came up */ | 467 | /* Synch generation numbers with peer if link just came up */ |
468 | if (!state->synched) { | 468 | if (!state->synched) { |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index f9f5f3c3dab5..41f013888f07 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/socket.c: TIPC socket API | 2 | * net/tipc/socket.c: TIPC socket API |
3 | * | 3 | * |
4 | * Copyright (c) 2001-2007, 2012-2015, Ericsson AB | 4 | * Copyright (c) 2001-2007, 2012-2016, Ericsson AB |
5 | * Copyright (c) 2004-2008, 2010-2013, Wind River Systems | 5 | * Copyright (c) 2004-2008, 2010-2013, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
@@ -129,54 +129,8 @@ static const struct proto_ops packet_ops; | |||
129 | static const struct proto_ops stream_ops; | 129 | static const struct proto_ops stream_ops; |
130 | static const struct proto_ops msg_ops; | 130 | static const struct proto_ops msg_ops; |
131 | static struct proto tipc_proto; | 131 | static struct proto tipc_proto; |
132 | |||
133 | static const struct rhashtable_params tsk_rht_params; | 132 | static const struct rhashtable_params tsk_rht_params; |
134 | 133 | ||
135 | /* | ||
136 | * Revised TIPC socket locking policy: | ||
137 | * | ||
138 | * Most socket operations take the standard socket lock when they start | ||
139 | * and hold it until they finish (or until they need to sleep). Acquiring | ||
140 | * this lock grants the owner exclusive access to the fields of the socket | ||
141 | * data structures, with the exception of the backlog queue. A few socket | ||
142 | * operations can be done without taking the socket lock because they only | ||
143 | * read socket information that never changes during the life of the socket. | ||
144 | * | ||
145 | * Socket operations may acquire the lock for the associated TIPC port if they | ||
146 | * need to perform an operation on the port. If any routine needs to acquire | ||
147 | * both the socket lock and the port lock it must take the socket lock first | ||
148 | * to avoid the risk of deadlock. | ||
149 | * | ||
150 | * The dispatcher handling incoming messages cannot grab the socket lock in | ||
151 | * the standard fashion, since invoked it runs at the BH level and cannot block. | ||
152 | * Instead, it checks to see if the socket lock is currently owned by someone, | ||
153 | * and either handles the message itself or adds it to the socket's backlog | ||
154 | * queue; in the latter case the queued message is processed once the process | ||
155 | * owning the socket lock releases it. | ||
156 | * | ||
157 | * NOTE: Releasing the socket lock while an operation is sleeping overcomes | ||
158 | * the problem of a blocked socket operation preventing any other operations | ||
159 | * from occurring. However, applications must be careful if they have | ||
160 | * multiple threads trying to send (or receive) on the same socket, as these | ||
161 | * operations might interfere with each other. For example, doing a connect | ||
162 | * and a receive at the same time might allow the receive to consume the | ||
163 | * ACK message meant for the connect. While additional work could be done | ||
164 | * to try and overcome this, it doesn't seem to be worthwhile at the present. | ||
165 | * | ||
166 | * NOTE: Releasing the socket lock while an operation is sleeping also ensures | ||
167 | * that another operation that must be performed in a non-blocking manner is | ||
168 | * not delayed for very long because the lock has already been taken. | ||
169 | * | ||
170 | * NOTE: This code assumes that certain fields of a port/socket pair are | ||
171 | * constant over its lifetime; such fields can be examined without taking | ||
172 | * the socket lock and/or port lock, and do not need to be re-read even | ||
173 | * after resuming processing after waiting. These fields include: | ||
174 | * - socket type | ||
175 | * - pointer to socket sk structure (aka tipc_sock structure) | ||
176 | * - pointer to port structure | ||
177 | * - port reference | ||
178 | */ | ||
179 | |||
180 | static u32 tsk_own_node(struct tipc_sock *tsk) | 134 | static u32 tsk_own_node(struct tipc_sock *tsk) |
181 | { | 135 | { |
182 | return msg_prevnode(&tsk->phdr); | 136 | return msg_prevnode(&tsk->phdr); |
@@ -232,7 +186,7 @@ static struct tipc_sock *tipc_sk(const struct sock *sk) | |||
232 | 186 | ||
233 | static bool tsk_conn_cong(struct tipc_sock *tsk) | 187 | static bool tsk_conn_cong(struct tipc_sock *tsk) |
234 | { | 188 | { |
235 | return tsk->snt_unacked >= tsk->snd_win; | 189 | return tsk->snt_unacked > tsk->snd_win; |
236 | } | 190 | } |
237 | 191 | ||
238 | /* tsk_blocks(): translate a buffer size in bytes to number of | 192 | /* tsk_blocks(): translate a buffer size in bytes to number of |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 145082e2ba36..2358f2690ec5 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -2199,7 +2199,8 @@ out: | |||
2199 | * Sleep until more data has arrived. But check for races.. | 2199 | * Sleep until more data has arrived. But check for races.. |
2200 | */ | 2200 | */ |
2201 | static long unix_stream_data_wait(struct sock *sk, long timeo, | 2201 | static long unix_stream_data_wait(struct sock *sk, long timeo, |
2202 | struct sk_buff *last, unsigned int last_len) | 2202 | struct sk_buff *last, unsigned int last_len, |
2203 | bool freezable) | ||
2203 | { | 2204 | { |
2204 | struct sk_buff *tail; | 2205 | struct sk_buff *tail; |
2205 | DEFINE_WAIT(wait); | 2206 | DEFINE_WAIT(wait); |
@@ -2220,7 +2221,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo, | |||
2220 | 2221 | ||
2221 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); | 2222 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); |
2222 | unix_state_unlock(sk); | 2223 | unix_state_unlock(sk); |
2223 | timeo = freezable_schedule_timeout(timeo); | 2224 | if (freezable) |
2225 | timeo = freezable_schedule_timeout(timeo); | ||
2226 | else | ||
2227 | timeo = schedule_timeout(timeo); | ||
2224 | unix_state_lock(sk); | 2228 | unix_state_lock(sk); |
2225 | 2229 | ||
2226 | if (sock_flag(sk, SOCK_DEAD)) | 2230 | if (sock_flag(sk, SOCK_DEAD)) |
@@ -2250,7 +2254,8 @@ struct unix_stream_read_state { | |||
2250 | unsigned int splice_flags; | 2254 | unsigned int splice_flags; |
2251 | }; | 2255 | }; |
2252 | 2256 | ||
2253 | static int unix_stream_read_generic(struct unix_stream_read_state *state) | 2257 | static int unix_stream_read_generic(struct unix_stream_read_state *state, |
2258 | bool freezable) | ||
2254 | { | 2259 | { |
2255 | struct scm_cookie scm; | 2260 | struct scm_cookie scm; |
2256 | struct socket *sock = state->socket; | 2261 | struct socket *sock = state->socket; |
@@ -2330,7 +2335,7 @@ again: | |||
2330 | mutex_unlock(&u->iolock); | 2335 | mutex_unlock(&u->iolock); |
2331 | 2336 | ||
2332 | timeo = unix_stream_data_wait(sk, timeo, last, | 2337 | timeo = unix_stream_data_wait(sk, timeo, last, |
2333 | last_len); | 2338 | last_len, freezable); |
2334 | 2339 | ||
2335 | if (signal_pending(current)) { | 2340 | if (signal_pending(current)) { |
2336 | err = sock_intr_errno(timeo); | 2341 | err = sock_intr_errno(timeo); |
@@ -2472,7 +2477,7 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg, | |||
2472 | .flags = flags | 2477 | .flags = flags |
2473 | }; | 2478 | }; |
2474 | 2479 | ||
2475 | return unix_stream_read_generic(&state); | 2480 | return unix_stream_read_generic(&state, true); |
2476 | } | 2481 | } |
2477 | 2482 | ||
2478 | static int unix_stream_splice_actor(struct sk_buff *skb, | 2483 | static int unix_stream_splice_actor(struct sk_buff *skb, |
@@ -2503,7 +2508,7 @@ static ssize_t unix_stream_splice_read(struct socket *sock, loff_t *ppos, | |||
2503 | flags & SPLICE_F_NONBLOCK) | 2508 | flags & SPLICE_F_NONBLOCK) |
2504 | state.flags = MSG_DONTWAIT; | 2509 | state.flags = MSG_DONTWAIT; |
2505 | 2510 | ||
2506 | return unix_stream_read_generic(&state); | 2511 | return unix_stream_read_generic(&state, false); |
2507 | } | 2512 | } |
2508 | 2513 | ||
2509 | static int unix_shutdown(struct socket *sock, int mode) | 2514 | static int unix_shutdown(struct socket *sock, int mode) |
@@ -2812,7 +2817,8 @@ static int unix_seq_show(struct seq_file *seq, void *v) | |||
2812 | i++; | 2817 | i++; |
2813 | } | 2818 | } |
2814 | for ( ; i < len; i++) | 2819 | for ( ; i < len; i++) |
2815 | seq_putc(seq, u->addr->name->sun_path[i]); | 2820 | seq_putc(seq, u->addr->name->sun_path[i] ?: |
2821 | '@'); | ||
2816 | } | 2822 | } |
2817 | unix_state_unlock(s); | 2823 | unix_state_unlock(s); |
2818 | seq_putc(seq, '\n'); | 2824 | seq_putc(seq, '\n'); |
diff --git a/net/wireless/core.h b/net/wireless/core.h index 08d2e948c9ad..f0c0c8a48c92 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h | |||
@@ -71,6 +71,7 @@ struct cfg80211_registered_device { | |||
71 | struct list_head bss_list; | 71 | struct list_head bss_list; |
72 | struct rb_root bss_tree; | 72 | struct rb_root bss_tree; |
73 | u32 bss_generation; | 73 | u32 bss_generation; |
74 | u32 bss_entries; | ||
74 | struct cfg80211_scan_request *scan_req; /* protected by RTNL */ | 75 | struct cfg80211_scan_request *scan_req; /* protected by RTNL */ |
75 | struct sk_buff *scan_msg; | 76 | struct sk_buff *scan_msg; |
76 | struct cfg80211_sched_scan_request __rcu *sched_scan_req; | 77 | struct cfg80211_sched_scan_request __rcu *sched_scan_req; |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index b5bd58d0f731..35ad69fd0838 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -57,6 +57,19 @@ | |||
57 | * also linked into the probe response struct. | 57 | * also linked into the probe response struct. |
58 | */ | 58 | */ |
59 | 59 | ||
60 | /* | ||
61 | * Limit the number of BSS entries stored in mac80211. Each one is | ||
62 | * a bit over 4k at most, so this limits to roughly 4-5M of memory. | ||
63 | * If somebody wants to really attack this though, they'd likely | ||
64 | * use small beacons, and only one type of frame, limiting each of | ||
65 | * the entries to a much smaller size (in order to generate more | ||
66 | * entries in total, so overhead is bigger.) | ||
67 | */ | ||
68 | static int bss_entries_limit = 1000; | ||
69 | module_param(bss_entries_limit, int, 0644); | ||
70 | MODULE_PARM_DESC(bss_entries_limit, | ||
71 | "limit to number of scan BSS entries (per wiphy, default 1000)"); | ||
72 | |||
60 | #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) | 73 | #define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ) |
61 | 74 | ||
62 | static void bss_free(struct cfg80211_internal_bss *bss) | 75 | static void bss_free(struct cfg80211_internal_bss *bss) |
@@ -137,6 +150,10 @@ static bool __cfg80211_unlink_bss(struct cfg80211_registered_device *rdev, | |||
137 | 150 | ||
138 | list_del_init(&bss->list); | 151 | list_del_init(&bss->list); |
139 | rb_erase(&bss->rbn, &rdev->bss_tree); | 152 | rb_erase(&bss->rbn, &rdev->bss_tree); |
153 | rdev->bss_entries--; | ||
154 | WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list), | ||
155 | "rdev bss entries[%d]/list[empty:%d] corruption\n", | ||
156 | rdev->bss_entries, list_empty(&rdev->bss_list)); | ||
140 | bss_ref_put(rdev, bss); | 157 | bss_ref_put(rdev, bss); |
141 | return true; | 158 | return true; |
142 | } | 159 | } |
@@ -163,6 +180,40 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *rdev, | |||
163 | rdev->bss_generation++; | 180 | rdev->bss_generation++; |
164 | } | 181 | } |
165 | 182 | ||
183 | static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev) | ||
184 | { | ||
185 | struct cfg80211_internal_bss *bss, *oldest = NULL; | ||
186 | bool ret; | ||
187 | |||
188 | lockdep_assert_held(&rdev->bss_lock); | ||
189 | |||
190 | list_for_each_entry(bss, &rdev->bss_list, list) { | ||
191 | if (atomic_read(&bss->hold)) | ||
192 | continue; | ||
193 | |||
194 | if (!list_empty(&bss->hidden_list) && | ||
195 | !bss->pub.hidden_beacon_bss) | ||
196 | continue; | ||
197 | |||
198 | if (oldest && time_before(oldest->ts, bss->ts)) | ||
199 | continue; | ||
200 | oldest = bss; | ||
201 | } | ||
202 | |||
203 | if (WARN_ON(!oldest)) | ||
204 | return false; | ||
205 | |||
206 | /* | ||
207 | * The callers make sure to increase rdev->bss_generation if anything | ||
208 | * gets removed (and a new entry added), so there's no need to also do | ||
209 | * it here. | ||
210 | */ | ||
211 | |||
212 | ret = __cfg80211_unlink_bss(rdev, oldest); | ||
213 | WARN_ON(!ret); | ||
214 | return ret; | ||
215 | } | ||
216 | |||
166 | void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, | 217 | void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, |
167 | bool send_message) | 218 | bool send_message) |
168 | { | 219 | { |
@@ -689,6 +740,7 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, | |||
689 | const u8 *ie; | 740 | const u8 *ie; |
690 | int i, ssidlen; | 741 | int i, ssidlen; |
691 | u8 fold = 0; | 742 | u8 fold = 0; |
743 | u32 n_entries = 0; | ||
692 | 744 | ||
693 | ies = rcu_access_pointer(new->pub.beacon_ies); | 745 | ies = rcu_access_pointer(new->pub.beacon_ies); |
694 | if (WARN_ON(!ies)) | 746 | if (WARN_ON(!ies)) |
@@ -712,6 +764,12 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, | |||
712 | /* This is the bad part ... */ | 764 | /* This is the bad part ... */ |
713 | 765 | ||
714 | list_for_each_entry(bss, &rdev->bss_list, list) { | 766 | list_for_each_entry(bss, &rdev->bss_list, list) { |
767 | /* | ||
768 | * we're iterating all the entries anyway, so take the | ||
769 | * opportunity to validate the list length accounting | ||
770 | */ | ||
771 | n_entries++; | ||
772 | |||
715 | if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) | 773 | if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid)) |
716 | continue; | 774 | continue; |
717 | if (bss->pub.channel != new->pub.channel) | 775 | if (bss->pub.channel != new->pub.channel) |
@@ -740,6 +798,10 @@ static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, | |||
740 | new->pub.beacon_ies); | 798 | new->pub.beacon_ies); |
741 | } | 799 | } |
742 | 800 | ||
801 | WARN_ONCE(n_entries != rdev->bss_entries, | ||
802 | "rdev bss entries[%d]/list[len:%d] corruption\n", | ||
803 | rdev->bss_entries, n_entries); | ||
804 | |||
743 | return true; | 805 | return true; |
744 | } | 806 | } |
745 | 807 | ||
@@ -894,7 +956,14 @@ cfg80211_bss_update(struct cfg80211_registered_device *rdev, | |||
894 | } | 956 | } |
895 | } | 957 | } |
896 | 958 | ||
959 | if (rdev->bss_entries >= bss_entries_limit && | ||
960 | !cfg80211_bss_expire_oldest(rdev)) { | ||
961 | kfree(new); | ||
962 | goto drop; | ||
963 | } | ||
964 | |||
897 | list_add_tail(&new->list, &rdev->bss_list); | 965 | list_add_tail(&new->list, &rdev->bss_list); |
966 | rdev->bss_entries++; | ||
898 | rb_insert_bss(rdev, new); | 967 | rb_insert_bss(rdev, new); |
899 | found = new; | 968 | found = new; |
900 | } | 969 | } |
diff --git a/net/wireless/util.c b/net/wireless/util.c index 5ea12afc7706..659b507b347d 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c | |||
@@ -1158,7 +1158,8 @@ static u32 cfg80211_calculate_bitrate_vht(struct rate_info *rate) | |||
1158 | 58500000, | 1158 | 58500000, |
1159 | 65000000, | 1159 | 65000000, |
1160 | 78000000, | 1160 | 78000000, |
1161 | 0, | 1161 | /* not in the spec, but some devices use this: */ |
1162 | 86500000, | ||
1162 | }, | 1163 | }, |
1163 | { 13500000, | 1164 | { 13500000, |
1164 | 27000000, | 1165 | 27000000, |
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile index 12b7304d55dc..72c58675973e 100644 --- a/samples/bpf/Makefile +++ b/samples/bpf/Makefile | |||
@@ -27,6 +27,7 @@ hostprogs-y += xdp2 | |||
27 | hostprogs-y += test_current_task_under_cgroup | 27 | hostprogs-y += test_current_task_under_cgroup |
28 | hostprogs-y += trace_event | 28 | hostprogs-y += trace_event |
29 | hostprogs-y += sampleip | 29 | hostprogs-y += sampleip |
30 | hostprogs-y += tc_l2_redirect | ||
30 | 31 | ||
31 | test_verifier-objs := test_verifier.o libbpf.o | 32 | test_verifier-objs := test_verifier.o libbpf.o |
32 | test_maps-objs := test_maps.o libbpf.o | 33 | test_maps-objs := test_maps.o libbpf.o |
@@ -56,6 +57,7 @@ test_current_task_under_cgroup-objs := bpf_load.o libbpf.o \ | |||
56 | test_current_task_under_cgroup_user.o | 57 | test_current_task_under_cgroup_user.o |
57 | trace_event-objs := bpf_load.o libbpf.o trace_event_user.o | 58 | trace_event-objs := bpf_load.o libbpf.o trace_event_user.o |
58 | sampleip-objs := bpf_load.o libbpf.o sampleip_user.o | 59 | sampleip-objs := bpf_load.o libbpf.o sampleip_user.o |
60 | tc_l2_redirect-objs := bpf_load.o libbpf.o tc_l2_redirect_user.o | ||
59 | 61 | ||
60 | # Tell kbuild to always build the programs | 62 | # Tell kbuild to always build the programs |
61 | always := $(hostprogs-y) | 63 | always := $(hostprogs-y) |
@@ -72,6 +74,7 @@ always += test_probe_write_user_kern.o | |||
72 | always += trace_output_kern.o | 74 | always += trace_output_kern.o |
73 | always += tcbpf1_kern.o | 75 | always += tcbpf1_kern.o |
74 | always += tcbpf2_kern.o | 76 | always += tcbpf2_kern.o |
77 | always += tc_l2_redirect_kern.o | ||
75 | always += lathist_kern.o | 78 | always += lathist_kern.o |
76 | always += offwaketime_kern.o | 79 | always += offwaketime_kern.o |
77 | always += spintest_kern.o | 80 | always += spintest_kern.o |
@@ -111,6 +114,7 @@ HOSTLOADLIBES_xdp2 += -lelf | |||
111 | HOSTLOADLIBES_test_current_task_under_cgroup += -lelf | 114 | HOSTLOADLIBES_test_current_task_under_cgroup += -lelf |
112 | HOSTLOADLIBES_trace_event += -lelf | 115 | HOSTLOADLIBES_trace_event += -lelf |
113 | HOSTLOADLIBES_sampleip += -lelf | 116 | HOSTLOADLIBES_sampleip += -lelf |
117 | HOSTLOADLIBES_tc_l2_redirect += -l elf | ||
114 | 118 | ||
115 | # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: | 119 | # Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline: |
116 | # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang | 120 | # make samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang |
diff --git a/samples/bpf/tc_l2_redirect.sh b/samples/bpf/tc_l2_redirect.sh new file mode 100755 index 000000000000..80a05591a140 --- /dev/null +++ b/samples/bpf/tc_l2_redirect.sh | |||
@@ -0,0 +1,173 @@ | |||
1 | #!/bin/bash | ||
2 | |||
3 | [[ -z $TC ]] && TC='tc' | ||
4 | [[ -z $IP ]] && IP='ip' | ||
5 | |||
6 | REDIRECT_USER='./tc_l2_redirect' | ||
7 | REDIRECT_BPF='./tc_l2_redirect_kern.o' | ||
8 | |||
9 | RP_FILTER=$(< /proc/sys/net/ipv4/conf/all/rp_filter) | ||
10 | IPV6_FORWARDING=$(< /proc/sys/net/ipv6/conf/all/forwarding) | ||
11 | |||
12 | function config_common { | ||
13 | local tun_type=$1 | ||
14 | |||
15 | $IP netns add ns1 | ||
16 | $IP netns add ns2 | ||
17 | $IP link add ve1 type veth peer name vens1 | ||
18 | $IP link add ve2 type veth peer name vens2 | ||
19 | $IP link set dev ve1 up | ||
20 | $IP link set dev ve2 up | ||
21 | $IP link set dev ve1 mtu 1500 | ||
22 | $IP link set dev ve2 mtu 1500 | ||
23 | $IP link set dev vens1 netns ns1 | ||
24 | $IP link set dev vens2 netns ns2 | ||
25 | |||
26 | $IP -n ns1 link set dev lo up | ||
27 | $IP -n ns1 link set dev vens1 up | ||
28 | $IP -n ns1 addr add 10.1.1.101/24 dev vens1 | ||
29 | $IP -n ns1 addr add 2401:db01::65/64 dev vens1 nodad | ||
30 | $IP -n ns1 route add default via 10.1.1.1 dev vens1 | ||
31 | $IP -n ns1 route add default via 2401:db01::1 dev vens1 | ||
32 | |||
33 | $IP -n ns2 link set dev lo up | ||
34 | $IP -n ns2 link set dev vens2 up | ||
35 | $IP -n ns2 addr add 10.2.1.102/24 dev vens2 | ||
36 | $IP -n ns2 addr add 2401:db02::66/64 dev vens2 nodad | ||
37 | $IP -n ns2 addr add 10.10.1.102 dev lo | ||
38 | $IP -n ns2 addr add 2401:face::66/64 dev lo nodad | ||
39 | $IP -n ns2 link add ipt2 type ipip local 10.2.1.102 remote 10.2.1.1 | ||
40 | $IP -n ns2 link add ip6t2 type ip6tnl mode any local 2401:db02::66 remote 2401:db02::1 | ||
41 | $IP -n ns2 link set dev ipt2 up | ||
42 | $IP -n ns2 link set dev ip6t2 up | ||
43 | $IP netns exec ns2 $TC qdisc add dev vens2 clsact | ||
44 | $IP netns exec ns2 $TC filter add dev vens2 ingress bpf da obj $REDIRECT_BPF sec drop_non_tun_vip | ||
45 | if [[ $tun_type == "ipip" ]]; then | ||
46 | $IP -n ns2 route add 10.1.1.0/24 dev ipt2 | ||
47 | $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0 | ||
48 | $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ipt2.rp_filter=0 | ||
49 | else | ||
50 | $IP -n ns2 route add 10.1.1.0/24 dev ip6t2 | ||
51 | $IP -n ns2 route add 2401:db01::/64 dev ip6t2 | ||
52 | $IP netns exec ns2 sysctl -q -w net.ipv4.conf.all.rp_filter=0 | ||
53 | $IP netns exec ns2 sysctl -q -w net.ipv4.conf.ip6t2.rp_filter=0 | ||
54 | fi | ||
55 | |||
56 | $IP addr add 10.1.1.1/24 dev ve1 | ||
57 | $IP addr add 2401:db01::1/64 dev ve1 nodad | ||
58 | $IP addr add 10.2.1.1/24 dev ve2 | ||
59 | $IP addr add 2401:db02::1/64 dev ve2 nodad | ||
60 | |||
61 | $TC qdisc add dev ve2 clsact | ||
62 | $TC filter add dev ve2 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_forward | ||
63 | |||
64 | sysctl -q -w net.ipv4.conf.all.rp_filter=0 | ||
65 | sysctl -q -w net.ipv6.conf.all.forwarding=1 | ||
66 | } | ||
67 | |||
68 | function cleanup { | ||
69 | set +e | ||
70 | [[ -z $DEBUG ]] || set +x | ||
71 | $IP netns delete ns1 >& /dev/null | ||
72 | $IP netns delete ns2 >& /dev/null | ||
73 | $IP link del ve1 >& /dev/null | ||
74 | $IP link del ve2 >& /dev/null | ||
75 | $IP link del ipt >& /dev/null | ||
76 | $IP link del ip6t >& /dev/null | ||
77 | sysctl -q -w net.ipv4.conf.all.rp_filter=$RP_FILTER | ||
78 | sysctl -q -w net.ipv6.conf.all.forwarding=$IPV6_FORWARDING | ||
79 | rm -f /sys/fs/bpf/tc/globals/tun_iface | ||
80 | [[ -z $DEBUG ]] || set -x | ||
81 | set -e | ||
82 | } | ||
83 | |||
84 | function l2_to_ipip { | ||
85 | echo -n "l2_to_ipip $1: " | ||
86 | |||
87 | local dir=$1 | ||
88 | |||
89 | config_common ipip | ||
90 | |||
91 | $IP link add ipt type ipip external | ||
92 | $IP link set dev ipt up | ||
93 | sysctl -q -w net.ipv4.conf.ipt.rp_filter=0 | ||
94 | sysctl -q -w net.ipv4.conf.ipt.forwarding=1 | ||
95 | |||
96 | if [[ $dir == "egress" ]]; then | ||
97 | $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2 | ||
98 | $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect | ||
99 | sysctl -q -w net.ipv4.conf.ve1.forwarding=1 | ||
100 | else | ||
101 | $TC qdisc add dev ve1 clsact | ||
102 | $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_iptun_ingress_redirect | ||
103 | fi | ||
104 | |||
105 | $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ipt/ifindex) | ||
106 | |||
107 | $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null | ||
108 | |||
109 | if [[ $dir == "egress" ]]; then | ||
110 | # test direct egress to ve2 (i.e. not forwarding from | ||
111 | # ve1 to ve2). | ||
112 | ping -c1 10.10.1.102 >& /dev/null | ||
113 | fi | ||
114 | |||
115 | cleanup | ||
116 | |||
117 | echo "OK" | ||
118 | } | ||
119 | |||
120 | function l2_to_ip6tnl { | ||
121 | echo -n "l2_to_ip6tnl $1: " | ||
122 | |||
123 | local dir=$1 | ||
124 | |||
125 | config_common ip6tnl | ||
126 | |||
127 | $IP link add ip6t type ip6tnl mode any external | ||
128 | $IP link set dev ip6t up | ||
129 | sysctl -q -w net.ipv4.conf.ip6t.rp_filter=0 | ||
130 | sysctl -q -w net.ipv4.conf.ip6t.forwarding=1 | ||
131 | |||
132 | if [[ $dir == "egress" ]]; then | ||
133 | $IP route add 10.10.1.0/24 via 10.2.1.102 dev ve2 | ||
134 | $IP route add 2401:face::/64 via 2401:db02::66 dev ve2 | ||
135 | $TC filter add dev ve2 egress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect | ||
136 | sysctl -q -w net.ipv4.conf.ve1.forwarding=1 | ||
137 | else | ||
138 | $TC qdisc add dev ve1 clsact | ||
139 | $TC filter add dev ve1 ingress bpf da obj $REDIRECT_BPF sec l2_to_ip6tun_ingress_redirect | ||
140 | fi | ||
141 | |||
142 | $REDIRECT_USER -U /sys/fs/bpf/tc/globals/tun_iface -i $(< /sys/class/net/ip6t/ifindex) | ||
143 | |||
144 | $IP netns exec ns1 ping -c1 10.10.1.102 >& /dev/null | ||
145 | $IP netns exec ns1 ping -6 -c1 2401:face::66 >& /dev/null | ||
146 | |||
147 | if [[ $dir == "egress" ]]; then | ||
148 | # test direct egress to ve2 (i.e. not forwarding from | ||
149 | # ve1 to ve2). | ||
150 | ping -c1 10.10.1.102 >& /dev/null | ||
151 | ping -6 -c1 2401:face::66 >& /dev/null | ||
152 | fi | ||
153 | |||
154 | cleanup | ||
155 | |||
156 | echo "OK" | ||
157 | } | ||
158 | |||
159 | cleanup | ||
160 | test_names="l2_to_ipip l2_to_ip6tnl" | ||
161 | test_dirs="ingress egress" | ||
162 | if [[ $# -ge 2 ]]; then | ||
163 | test_names=$1 | ||
164 | test_dirs=$2 | ||
165 | elif [[ $# -ge 1 ]]; then | ||
166 | test_names=$1 | ||
167 | fi | ||
168 | |||
169 | for t in $test_names; do | ||
170 | for d in $test_dirs; do | ||
171 | $t $d | ||
172 | done | ||
173 | done | ||
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c new file mode 100644 index 000000000000..92a44729dbe4 --- /dev/null +++ b/samples/bpf/tc_l2_redirect_kern.c | |||
@@ -0,0 +1,236 @@ | |||
1 | /* Copyright (c) 2016 Facebook | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or | ||
4 | * modify it under the terms of version 2 of the GNU General Public | ||
5 | * License as published by the Free Software Foundation. | ||
6 | */ | ||
7 | #include <uapi/linux/bpf.h> | ||
8 | #include <uapi/linux/if_ether.h> | ||
9 | #include <uapi/linux/if_packet.h> | ||
10 | #include <uapi/linux/ip.h> | ||
11 | #include <uapi/linux/ipv6.h> | ||
12 | #include <uapi/linux/in.h> | ||
13 | #include <uapi/linux/tcp.h> | ||
14 | #include <uapi/linux/filter.h> | ||
15 | #include <uapi/linux/pkt_cls.h> | ||
16 | #include <net/ipv6.h> | ||
17 | #include "bpf_helpers.h" | ||
18 | |||
19 | #define _htonl __builtin_bswap32 | ||
20 | |||
21 | #define PIN_GLOBAL_NS 2 | ||
22 | struct bpf_elf_map { | ||
23 | __u32 type; | ||
24 | __u32 size_key; | ||
25 | __u32 size_value; | ||
26 | __u32 max_elem; | ||
27 | __u32 flags; | ||
28 | __u32 id; | ||
29 | __u32 pinning; | ||
30 | }; | ||
31 | |||
32 | /* copy of 'struct ethhdr' without __packed */ | ||
33 | struct eth_hdr { | ||
34 | unsigned char h_dest[ETH_ALEN]; | ||
35 | unsigned char h_source[ETH_ALEN]; | ||
36 | unsigned short h_proto; | ||
37 | }; | ||
38 | |||
39 | struct bpf_elf_map SEC("maps") tun_iface = { | ||
40 | .type = BPF_MAP_TYPE_ARRAY, | ||
41 | .size_key = sizeof(int), | ||
42 | .size_value = sizeof(int), | ||
43 | .pinning = PIN_GLOBAL_NS, | ||
44 | .max_elem = 1, | ||
45 | }; | ||
46 | |||
47 | static __always_inline bool is_vip_addr(__be16 eth_proto, __be32 daddr) | ||
48 | { | ||
49 | if (eth_proto == htons(ETH_P_IP)) | ||
50 | return (_htonl(0xffffff00) & daddr) == _htonl(0x0a0a0100); | ||
51 | else if (eth_proto == htons(ETH_P_IPV6)) | ||
52 | return (daddr == _htonl(0x2401face)); | ||
53 | |||
54 | return false; | ||
55 | } | ||
56 | |||
57 | SEC("l2_to_iptun_ingress_forward") | ||
58 | int _l2_to_iptun_ingress_forward(struct __sk_buff *skb) | ||
59 | { | ||
60 | struct bpf_tunnel_key tkey = {}; | ||
61 | void *data = (void *)(long)skb->data; | ||
62 | struct eth_hdr *eth = data; | ||
63 | void *data_end = (void *)(long)skb->data_end; | ||
64 | int key = 0, *ifindex; | ||
65 | |||
66 | int ret; | ||
67 | |||
68 | if (data + sizeof(*eth) > data_end) | ||
69 | return TC_ACT_OK; | ||
70 | |||
71 | ifindex = bpf_map_lookup_elem(&tun_iface, &key); | ||
72 | if (!ifindex) | ||
73 | return TC_ACT_OK; | ||
74 | |||
75 | if (eth->h_proto == htons(ETH_P_IP)) { | ||
76 | char fmt4[] = "ingress forward to ifindex:%d daddr4:%x\n"; | ||
77 | struct iphdr *iph = data + sizeof(*eth); | ||
78 | |||
79 | if (data + sizeof(*eth) + sizeof(*iph) > data_end) | ||
80 | return TC_ACT_OK; | ||
81 | |||
82 | if (iph->protocol != IPPROTO_IPIP) | ||
83 | return TC_ACT_OK; | ||
84 | |||
85 | bpf_trace_printk(fmt4, sizeof(fmt4), *ifindex, | ||
86 | _htonl(iph->daddr)); | ||
87 | return bpf_redirect(*ifindex, BPF_F_INGRESS); | ||
88 | } else if (eth->h_proto == htons(ETH_P_IPV6)) { | ||
89 | char fmt6[] = "ingress forward to ifindex:%d daddr6:%x::%x\n"; | ||
90 | struct ipv6hdr *ip6h = data + sizeof(*eth); | ||
91 | |||
92 | if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) | ||
93 | return TC_ACT_OK; | ||
94 | |||
95 | if (ip6h->nexthdr != IPPROTO_IPIP && | ||
96 | ip6h->nexthdr != IPPROTO_IPV6) | ||
97 | return TC_ACT_OK; | ||
98 | |||
99 | bpf_trace_printk(fmt6, sizeof(fmt6), *ifindex, | ||
100 | _htonl(ip6h->daddr.s6_addr32[0]), | ||
101 | _htonl(ip6h->daddr.s6_addr32[3])); | ||
102 | return bpf_redirect(*ifindex, BPF_F_INGRESS); | ||
103 | } | ||
104 | |||
105 | return TC_ACT_OK; | ||
106 | } | ||
107 | |||
108 | SEC("l2_to_iptun_ingress_redirect") | ||
109 | int _l2_to_iptun_ingress_redirect(struct __sk_buff *skb) | ||
110 | { | ||
111 | struct bpf_tunnel_key tkey = {}; | ||
112 | void *data = (void *)(long)skb->data; | ||
113 | struct eth_hdr *eth = data; | ||
114 | void *data_end = (void *)(long)skb->data_end; | ||
115 | int key = 0, *ifindex; | ||
116 | |||
117 | int ret; | ||
118 | |||
119 | if (data + sizeof(*eth) > data_end) | ||
120 | return TC_ACT_OK; | ||
121 | |||
122 | ifindex = bpf_map_lookup_elem(&tun_iface, &key); | ||
123 | if (!ifindex) | ||
124 | return TC_ACT_OK; | ||
125 | |||
126 | if (eth->h_proto == htons(ETH_P_IP)) { | ||
127 | char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n"; | ||
128 | struct iphdr *iph = data + sizeof(*eth); | ||
129 | __be32 daddr = iph->daddr; | ||
130 | |||
131 | if (data + sizeof(*eth) + sizeof(*iph) > data_end) | ||
132 | return TC_ACT_OK; | ||
133 | |||
134 | if (!is_vip_addr(eth->h_proto, daddr)) | ||
135 | return TC_ACT_OK; | ||
136 | |||
137 | bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(daddr), *ifindex); | ||
138 | } else { | ||
139 | return TC_ACT_OK; | ||
140 | } | ||
141 | |||
142 | tkey.tunnel_id = 10000; | ||
143 | tkey.tunnel_ttl = 64; | ||
144 | tkey.remote_ipv4 = 0x0a020166; /* 10.2.1.102 */ | ||
145 | bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), 0); | ||
146 | return bpf_redirect(*ifindex, 0); | ||
147 | } | ||
148 | |||
149 | SEC("l2_to_ip6tun_ingress_redirect") | ||
150 | int _l2_to_ip6tun_ingress_redirect(struct __sk_buff *skb) | ||
151 | { | ||
152 | struct bpf_tunnel_key tkey = {}; | ||
153 | void *data = (void *)(long)skb->data; | ||
154 | struct eth_hdr *eth = data; | ||
155 | void *data_end = (void *)(long)skb->data_end; | ||
156 | int key = 0, *ifindex; | ||
157 | |||
158 | if (data + sizeof(*eth) > data_end) | ||
159 | return TC_ACT_OK; | ||
160 | |||
161 | ifindex = bpf_map_lookup_elem(&tun_iface, &key); | ||
162 | if (!ifindex) | ||
163 | return TC_ACT_OK; | ||
164 | |||
165 | if (eth->h_proto == htons(ETH_P_IP)) { | ||
166 | char fmt4[] = "e/ingress redirect daddr4:%x to ifindex:%d\n"; | ||
167 | struct iphdr *iph = data + sizeof(*eth); | ||
168 | |||
169 | if (data + sizeof(*eth) + sizeof(*iph) > data_end) | ||
170 | return TC_ACT_OK; | ||
171 | |||
172 | if (!is_vip_addr(eth->h_proto, iph->daddr)) | ||
173 | return TC_ACT_OK; | ||
174 | |||
175 | bpf_trace_printk(fmt4, sizeof(fmt4), _htonl(iph->daddr), | ||
176 | *ifindex); | ||
177 | } else if (eth->h_proto == htons(ETH_P_IPV6)) { | ||
178 | char fmt6[] = "e/ingress redirect daddr6:%x to ifindex:%d\n"; | ||
179 | struct ipv6hdr *ip6h = data + sizeof(*eth); | ||
180 | |||
181 | if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) | ||
182 | return TC_ACT_OK; | ||
183 | |||
184 | if (!is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0])) | ||
185 | return TC_ACT_OK; | ||
186 | |||
187 | bpf_trace_printk(fmt6, sizeof(fmt6), | ||
188 | _htonl(ip6h->daddr.s6_addr32[0]), *ifindex); | ||
189 | } else { | ||
190 | return TC_ACT_OK; | ||
191 | } | ||
192 | |||
193 | tkey.tunnel_id = 10000; | ||
194 | tkey.tunnel_ttl = 64; | ||
195 | /* 2401:db02:0:0:0:0:0:66 */ | ||
196 | tkey.remote_ipv6[0] = _htonl(0x2401db02); | ||
197 | tkey.remote_ipv6[1] = 0; | ||
198 | tkey.remote_ipv6[2] = 0; | ||
199 | tkey.remote_ipv6[3] = _htonl(0x00000066); | ||
200 | bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), BPF_F_TUNINFO_IPV6); | ||
201 | return bpf_redirect(*ifindex, 0); | ||
202 | } | ||
203 | |||
204 | SEC("drop_non_tun_vip") | ||
205 | int _drop_non_tun_vip(struct __sk_buff *skb) | ||
206 | { | ||
207 | struct bpf_tunnel_key tkey = {}; | ||
208 | void *data = (void *)(long)skb->data; | ||
209 | struct eth_hdr *eth = data; | ||
210 | void *data_end = (void *)(long)skb->data_end; | ||
211 | |||
212 | if (data + sizeof(*eth) > data_end) | ||
213 | return TC_ACT_OK; | ||
214 | |||
215 | if (eth->h_proto == htons(ETH_P_IP)) { | ||
216 | struct iphdr *iph = data + sizeof(*eth); | ||
217 | |||
218 | if (data + sizeof(*eth) + sizeof(*iph) > data_end) | ||
219 | return TC_ACT_OK; | ||
220 | |||
221 | if (is_vip_addr(eth->h_proto, iph->daddr)) | ||
222 | return TC_ACT_SHOT; | ||
223 | } else if (eth->h_proto == htons(ETH_P_IPV6)) { | ||
224 | struct ipv6hdr *ip6h = data + sizeof(*eth); | ||
225 | |||
226 | if (data + sizeof(*eth) + sizeof(*ip6h) > data_end) | ||
227 | return TC_ACT_OK; | ||
228 | |||
229 | if (is_vip_addr(eth->h_proto, ip6h->daddr.s6_addr32[0])) | ||
230 | return TC_ACT_SHOT; | ||
231 | } | ||
232 | |||
233 | return TC_ACT_OK; | ||
234 | } | ||
235 | |||
236 | char _license[] SEC("license") = "GPL"; | ||
diff --git a/samples/bpf/tc_l2_redirect_user.c b/samples/bpf/tc_l2_redirect_user.c new file mode 100644 index 000000000000..4013c5337b91 --- /dev/null +++ b/samples/bpf/tc_l2_redirect_user.c | |||
@@ -0,0 +1,73 @@ | |||
1 | /* Copyright (c) 2016 Facebook | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or | ||
4 | * modify it under the terms of version 2 of the GNU General Public | ||
5 | * License as published by the Free Software Foundation. | ||
6 | */ | ||
7 | #include <linux/unistd.h> | ||
8 | #include <linux/bpf.h> | ||
9 | |||
10 | #include <stdlib.h> | ||
11 | #include <stdio.h> | ||
12 | #include <unistd.h> | ||
13 | #include <string.h> | ||
14 | #include <errno.h> | ||
15 | |||
16 | #include "libbpf.h" | ||
17 | |||
18 | static void usage(void) | ||
19 | { | ||
20 | printf("Usage: tc_l2_ipip_redirect [...]\n"); | ||
21 | printf(" -U <file> Update an already pinned BPF array\n"); | ||
22 | printf(" -i <ifindex> Interface index\n"); | ||
23 | printf(" -h Display this help\n"); | ||
24 | } | ||
25 | |||
26 | int main(int argc, char **argv) | ||
27 | { | ||
28 | const char *pinned_file = NULL; | ||
29 | int ifindex = -1; | ||
30 | int array_key = 0; | ||
31 | int array_fd = -1; | ||
32 | int ret = -1; | ||
33 | int opt; | ||
34 | |||
35 | while ((opt = getopt(argc, argv, "F:U:i:")) != -1) { | ||
36 | switch (opt) { | ||
37 | /* General args */ | ||
38 | case 'U': | ||
39 | pinned_file = optarg; | ||
40 | break; | ||
41 | case 'i': | ||
42 | ifindex = atoi(optarg); | ||
43 | break; | ||
44 | default: | ||
45 | usage(); | ||
46 | goto out; | ||
47 | } | ||
48 | } | ||
49 | |||
50 | if (ifindex < 0 || !pinned_file) { | ||
51 | usage(); | ||
52 | goto out; | ||
53 | } | ||
54 | |||
55 | array_fd = bpf_obj_get(pinned_file); | ||
56 | if (array_fd < 0) { | ||
57 | fprintf(stderr, "bpf_obj_get(%s): %s(%d)\n", | ||
58 | pinned_file, strerror(errno), errno); | ||
59 | goto out; | ||
60 | } | ||
61 | |||
62 | /* bpf_tunnel_key.remote_ipv4 expects host byte orders */ | ||
63 | ret = bpf_update_elem(array_fd, &array_key, &ifindex, 0); | ||
64 | if (ret) { | ||
65 | perror("bpf_update_elem"); | ||
66 | goto out; | ||
67 | } | ||
68 | |||
69 | out: | ||
70 | if (array_fd != -1) | ||
71 | close(array_fd); | ||
72 | return ret; | ||
73 | } | ||
diff --git a/scripts/Makefile.build b/scripts/Makefile.build index de46ab03f063..7675d11ee65e 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build | |||
@@ -159,7 +159,8 @@ cmd_cpp_i_c = $(CPP) $(c_flags) -o $@ $< | |||
159 | $(obj)/%.i: $(src)/%.c FORCE | 159 | $(obj)/%.i: $(src)/%.c FORCE |
160 | $(call if_changed_dep,cpp_i_c) | 160 | $(call if_changed_dep,cpp_i_c) |
161 | 161 | ||
162 | cmd_gensymtypes = \ | 162 | # These mirror gensymtypes_S and co below, keep them in synch. |
163 | cmd_gensymtypes_c = \ | ||
163 | $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ | 164 | $(CPP) -D__GENKSYMS__ $(c_flags) $< | \ |
164 | $(GENKSYMS) $(if $(1), -T $(2)) \ | 165 | $(GENKSYMS) $(if $(1), -T $(2)) \ |
165 | $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ | 166 | $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ |
@@ -169,7 +170,7 @@ cmd_gensymtypes = \ | |||
169 | quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ | 170 | quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@ |
170 | cmd_cc_symtypes_c = \ | 171 | cmd_cc_symtypes_c = \ |
171 | set -e; \ | 172 | set -e; \ |
172 | $(call cmd_gensymtypes,true,$@) >/dev/null; \ | 173 | $(call cmd_gensymtypes_c,true,$@) >/dev/null; \ |
173 | test -s $@ || rm -f $@ | 174 | test -s $@ || rm -f $@ |
174 | 175 | ||
175 | $(obj)/%.symtypes : $(src)/%.c FORCE | 176 | $(obj)/%.symtypes : $(src)/%.c FORCE |
@@ -198,9 +199,10 @@ else | |||
198 | # the actual value of the checksum generated by genksyms | 199 | # the actual value of the checksum generated by genksyms |
199 | 200 | ||
200 | cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $< | 201 | cmd_cc_o_c = $(CC) $(c_flags) -c -o $(@D)/.tmp_$(@F) $< |
201 | cmd_modversions = \ | 202 | |
203 | cmd_modversions_c = \ | ||
202 | if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ | 204 | if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ |
203 | $(call cmd_gensymtypes,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ | 205 | $(call cmd_gensymtypes_c,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ |
204 | > $(@D)/.tmp_$(@F:.o=.ver); \ | 206 | > $(@D)/.tmp_$(@F:.o=.ver); \ |
205 | \ | 207 | \ |
206 | $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ | 208 | $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ |
@@ -268,13 +270,14 @@ endif # CONFIG_STACK_VALIDATION | |||
268 | define rule_cc_o_c | 270 | define rule_cc_o_c |
269 | $(call echo-cmd,checksrc) $(cmd_checksrc) \ | 271 | $(call echo-cmd,checksrc) $(cmd_checksrc) \ |
270 | $(call cmd_and_fixdep,cc_o_c) \ | 272 | $(call cmd_and_fixdep,cc_o_c) \ |
271 | $(cmd_modversions) \ | 273 | $(cmd_modversions_c) \ |
272 | $(cmd_objtool) \ | 274 | $(cmd_objtool) \ |
273 | $(call echo-cmd,record_mcount) $(cmd_record_mcount) | 275 | $(call echo-cmd,record_mcount) $(cmd_record_mcount) |
274 | endef | 276 | endef |
275 | 277 | ||
276 | define rule_as_o_S | 278 | define rule_as_o_S |
277 | $(call cmd_and_fixdep,as_o_S) \ | 279 | $(call cmd_and_fixdep,as_o_S) \ |
280 | $(cmd_modversions_S) \ | ||
278 | $(cmd_objtool) | 281 | $(cmd_objtool) |
279 | endef | 282 | endef |
280 | 283 | ||
@@ -314,6 +317,39 @@ modkern_aflags := $(KBUILD_AFLAGS_KERNEL) $(AFLAGS_KERNEL) | |||
314 | $(real-objs-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) | 317 | $(real-objs-m) : modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) |
315 | $(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) | 318 | $(real-objs-m:.o=.s): modkern_aflags := $(KBUILD_AFLAGS_MODULE) $(AFLAGS_MODULE) |
316 | 319 | ||
320 | # .S file exports must have their C prototypes defined in asm/asm-prototypes.h | ||
321 | # or a file that it includes, in order to get versioned symbols. We build a | ||
322 | # dummy C file that includes asm-prototypes and the EXPORT_SYMBOL lines from | ||
323 | # the .S file (with trailing ';'), and run genksyms on that, to extract vers. | ||
324 | # | ||
325 | # This is convoluted. The .S file must first be preprocessed to run guards and | ||
326 | # expand names, then the resulting exports must be constructed into plain | ||
327 | # EXPORT_SYMBOL(symbol); to build our dummy C file, and that gets preprocessed | ||
328 | # to make the genksyms input. | ||
329 | # | ||
330 | # These mirror gensymtypes_c and co above, keep them in synch. | ||
331 | cmd_gensymtypes_S = \ | ||
332 | (echo "\#include <linux/kernel.h>" ; \ | ||
333 | echo "\#include <asm/asm-prototypes.h>" ; \ | ||
334 | $(CPP) $(a_flags) $< | \ | ||
335 | grep "\<___EXPORT_SYMBOL\>" | \ | ||
336 | sed 's/.*___EXPORT_SYMBOL[[:space:]]*\([a-zA-Z0-9_]*\)[[:space:]]*,.*/EXPORT_SYMBOL(\1);/' ) | \ | ||
337 | $(CPP) -D__GENKSYMS__ $(c_flags) -xc - | \ | ||
338 | $(GENKSYMS) $(if $(1), -T $(2)) \ | ||
339 | $(patsubst y,-s _,$(CONFIG_HAVE_UNDERSCORE_SYMBOL_PREFIX)) \ | ||
340 | $(if $(KBUILD_PRESERVE),-p) \ | ||
341 | -r $(firstword $(wildcard $(2:.symtypes=.symref) /dev/null)) | ||
342 | |||
343 | quiet_cmd_cc_symtypes_S = SYM $(quiet_modtag) $@ | ||
344 | cmd_cc_symtypes_S = \ | ||
345 | set -e; \ | ||
346 | $(call cmd_gensymtypes_S,true,$@) >/dev/null; \ | ||
347 | test -s $@ || rm -f $@ | ||
348 | |||
349 | $(obj)/%.symtypes : $(src)/%.S FORCE | ||
350 | $(call cmd,cc_symtypes_S) | ||
351 | |||
352 | |||
317 | quiet_cmd_cpp_s_S = CPP $(quiet_modtag) $@ | 353 | quiet_cmd_cpp_s_S = CPP $(quiet_modtag) $@ |
318 | cmd_cpp_s_S = $(CPP) $(a_flags) -o $@ $< | 354 | cmd_cpp_s_S = $(CPP) $(a_flags) -o $@ $< |
319 | 355 | ||
@@ -321,7 +357,37 @@ $(obj)/%.s: $(src)/%.S FORCE | |||
321 | $(call if_changed_dep,cpp_s_S) | 357 | $(call if_changed_dep,cpp_s_S) |
322 | 358 | ||
323 | quiet_cmd_as_o_S = AS $(quiet_modtag) $@ | 359 | quiet_cmd_as_o_S = AS $(quiet_modtag) $@ |
324 | cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< | 360 | |
361 | ifndef CONFIG_MODVERSIONS | ||
362 | cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< | ||
363 | |||
364 | else | ||
365 | |||
366 | ASM_PROTOTYPES := $(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/asm-prototypes.h) | ||
367 | |||
368 | ifeq ($(ASM_PROTOTYPES),) | ||
369 | cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $< | ||
370 | |||
371 | else | ||
372 | |||
373 | # versioning matches the C process described above, with difference that | ||
374 | # we parse asm-prototypes.h C header to get function definitions. | ||
375 | |||
376 | cmd_as_o_S = $(CC) $(a_flags) -c -o $(@D)/.tmp_$(@F) $< | ||
377 | |||
378 | cmd_modversions_S = \ | ||
379 | if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \ | ||
380 | $(call cmd_gensymtypes_S,$(KBUILD_SYMTYPES),$(@:.o=.symtypes)) \ | ||
381 | > $(@D)/.tmp_$(@F:.o=.ver); \ | ||
382 | \ | ||
383 | $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \ | ||
384 | -T $(@D)/.tmp_$(@F:.o=.ver); \ | ||
385 | rm -f $(@D)/.tmp_$(@F) $(@D)/.tmp_$(@F:.o=.ver); \ | ||
386 | else \ | ||
387 | mv -f $(@D)/.tmp_$(@F) $@; \ | ||
388 | fi; | ||
389 | endif | ||
390 | endif | ||
325 | 391 | ||
326 | $(obj)/%.o: $(src)/%.S $(objtool_obj) FORCE | 392 | $(obj)/%.o: $(src)/%.S $(objtool_obj) FORCE |
327 | $(call if_changed_rule,as_o_S) | 393 | $(call if_changed_rule,as_o_S) |
@@ -430,6 +496,9 @@ cmd_export_list = $(OBJDUMP) -h $< | \ | |||
430 | 496 | ||
431 | $(obj)/lib-ksyms.o: $(lib-target) FORCE | 497 | $(obj)/lib-ksyms.o: $(lib-target) FORCE |
432 | $(call if_changed,export_list) | 498 | $(call if_changed,export_list) |
499 | |||
500 | targets += $(obj)/lib-ksyms.o | ||
501 | |||
433 | endif | 502 | endif |
434 | 503 | ||
435 | # | 504 | # |
diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh index 973e8c141567..17867e723a51 100755 --- a/scripts/gcc-x86_64-has-stack-protector.sh +++ b/scripts/gcc-x86_64-has-stack-protector.sh | |||
@@ -1,6 +1,6 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | 2 | ||
3 | echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fstack-protector - -o - 2> /dev/null | grep -q "%gs" | 3 | echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs" |
4 | if [ "$?" -eq "0" ] ; then | 4 | if [ "$?" -eq "0" ] ; then |
5 | echo y | 5 | echo y |
6 | else | 6 | else |
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index fc3036b34e51..a4d90aa1045a 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c | |||
@@ -621,8 +621,8 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest) | |||
621 | /* released below */ | 621 | /* released below */ |
622 | cred = get_current_cred(); | 622 | cred = get_current_cred(); |
623 | cxt = cred_cxt(cred); | 623 | cxt = cred_cxt(cred); |
624 | profile = aa_cred_profile(cred); | 624 | profile = aa_get_newest_profile(aa_cred_profile(cred)); |
625 | previous_profile = cxt->previous; | 625 | previous_profile = aa_get_newest_profile(cxt->previous); |
626 | 626 | ||
627 | if (unconfined(profile)) { | 627 | if (unconfined(profile)) { |
628 | info = "unconfined"; | 628 | info = "unconfined"; |
@@ -718,6 +718,8 @@ audit: | |||
718 | out: | 718 | out: |
719 | aa_put_profile(hat); | 719 | aa_put_profile(hat); |
720 | kfree(name); | 720 | kfree(name); |
721 | aa_put_profile(profile); | ||
722 | aa_put_profile(previous_profile); | ||
721 | put_cred(cred); | 723 | put_cred(cred); |
722 | 724 | ||
723 | return error; | 725 | return error; |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 2f909dd8b7b8..ea81c08ddc7a 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -6907,8 +6907,6 @@ static const struct hda_fixup alc662_fixups[] = { | |||
6907 | .v.pins = (const struct hda_pintbl[]) { | 6907 | .v.pins = (const struct hda_pintbl[]) { |
6908 | { 0x15, 0x40f000f0 }, /* disabled */ | 6908 | { 0x15, 0x40f000f0 }, /* disabled */ |
6909 | { 0x16, 0x40f000f0 }, /* disabled */ | 6909 | { 0x16, 0x40f000f0 }, /* disabled */ |
6910 | { 0x18, 0x01014011 }, /* LO */ | ||
6911 | { 0x1a, 0x01014012 }, /* LO */ | ||
6912 | { } | 6910 | { } |
6913 | } | 6911 | } |
6914 | }, | 6912 | }, |
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c index 6a23302297c9..4d9d320a7971 100644 --- a/sound/pci/hda/thinkpad_helper.c +++ b/sound/pci/hda/thinkpad_helper.c | |||
@@ -13,7 +13,8 @@ static void (*old_vmaster_hook)(void *, int); | |||
13 | static bool is_thinkpad(struct hda_codec *codec) | 13 | static bool is_thinkpad(struct hda_codec *codec) |
14 | { | 14 | { |
15 | return (codec->core.subsystem_id >> 16 == 0x17aa) && | 15 | return (codec->core.subsystem_id >> 16 == 0x17aa) && |
16 | (acpi_dev_found("LEN0068") || acpi_dev_found("IBM0068")); | 16 | (acpi_dev_found("LEN0068") || acpi_dev_found("LEN0268") || |
17 | acpi_dev_found("IBM0068")); | ||
17 | } | 18 | } |
18 | 19 | ||
19 | static void update_tpacpi_mute_led(void *private_data, int enabled) | 20 | static void update_tpacpi_mute_led(void *private_data, int enabled) |
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c index 07000f53db44..b392e51de94d 100644 --- a/sound/soc/qcom/lpass-platform.c +++ b/sound/soc/qcom/lpass-platform.c | |||
@@ -75,6 +75,7 @@ static int lpass_platform_pcmops_open(struct snd_pcm_substream *substream) | |||
75 | data->i2s_port = cpu_dai->driver->id; | 75 | data->i2s_port = cpu_dai->driver->id; |
76 | runtime->private_data = data; | 76 | runtime->private_data = data; |
77 | 77 | ||
78 | dma_ch = 0; | ||
78 | if (v->alloc_dma_channel) | 79 | if (v->alloc_dma_channel) |
79 | dma_ch = v->alloc_dma_channel(drvdata, dir); | 80 | dma_ch = v->alloc_dma_channel(drvdata, dir); |
80 | if (dma_ch < 0) | 81 | if (dma_ch < 0) |
diff --git a/sound/usb/card.c b/sound/usb/card.c index 9e5276d6dda0..2ddc034673a8 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
@@ -315,7 +315,8 @@ static int snd_usb_audio_free(struct snd_usb_audio *chip) | |||
315 | snd_usb_endpoint_free(ep); | 315 | snd_usb_endpoint_free(ep); |
316 | 316 | ||
317 | mutex_destroy(&chip->mutex); | 317 | mutex_destroy(&chip->mutex); |
318 | dev_set_drvdata(&chip->dev->dev, NULL); | 318 | if (!atomic_read(&chip->shutdown)) |
319 | dev_set_drvdata(&chip->dev->dev, NULL); | ||
319 | kfree(chip); | 320 | kfree(chip); |
320 | return 0; | 321 | return 0; |
321 | } | 322 | } |
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 4ffff7be9299..a53fef0c673b 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c | |||
@@ -1337,8 +1337,8 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser, | |||
1337 | } | 1337 | } |
1338 | 1338 | ||
1339 | if (first) { | 1339 | if (first) { |
1340 | ui_browser__printf(&browser->b, "%c", folded_sign); | 1340 | ui_browser__printf(&browser->b, "%c ", folded_sign); |
1341 | width--; | 1341 | width -= 2; |
1342 | first = false; | 1342 | first = false; |
1343 | } else { | 1343 | } else { |
1344 | ui_browser__printf(&browser->b, " "); | 1344 | ui_browser__printf(&browser->b, " "); |
@@ -1361,8 +1361,10 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser, | |||
1361 | width -= hpp.buf - s; | 1361 | width -= hpp.buf - s; |
1362 | } | 1362 | } |
1363 | 1363 | ||
1364 | ui_browser__write_nstring(&browser->b, "", hierarchy_indent); | 1364 | if (!first) { |
1365 | width -= hierarchy_indent; | 1365 | ui_browser__write_nstring(&browser->b, "", hierarchy_indent); |
1366 | width -= hierarchy_indent; | ||
1367 | } | ||
1366 | 1368 | ||
1367 | if (column >= browser->b.horiz_scroll) { | 1369 | if (column >= browser->b.horiz_scroll) { |
1368 | char s[2048]; | 1370 | char s[2048]; |
@@ -1381,7 +1383,13 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser, | |||
1381 | } | 1383 | } |
1382 | 1384 | ||
1383 | perf_hpp_list__for_each_format(entry->hpp_list, fmt) { | 1385 | perf_hpp_list__for_each_format(entry->hpp_list, fmt) { |
1384 | ui_browser__write_nstring(&browser->b, "", 2); | 1386 | if (first) { |
1387 | ui_browser__printf(&browser->b, "%c ", folded_sign); | ||
1388 | first = false; | ||
1389 | } else { | ||
1390 | ui_browser__write_nstring(&browser->b, "", 2); | ||
1391 | } | ||
1392 | |||
1385 | width -= 2; | 1393 | width -= 2; |
1386 | 1394 | ||
1387 | /* | 1395 | /* |
@@ -1555,10 +1563,11 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows | |||
1555 | int indent = hists->nr_hpp_node - 2; | 1563 | int indent = hists->nr_hpp_node - 2; |
1556 | bool first_node, first_col; | 1564 | bool first_node, first_col; |
1557 | 1565 | ||
1558 | ret = scnprintf(buf, size, " "); | 1566 | ret = scnprintf(buf, size, " "); |
1559 | if (advance_hpp_check(&dummy_hpp, ret)) | 1567 | if (advance_hpp_check(&dummy_hpp, ret)) |
1560 | return ret; | 1568 | return ret; |
1561 | 1569 | ||
1570 | first_node = true; | ||
1562 | /* the first hpp_list_node is for overhead columns */ | 1571 | /* the first hpp_list_node is for overhead columns */ |
1563 | fmt_node = list_first_entry(&hists->hpp_formats, | 1572 | fmt_node = list_first_entry(&hists->hpp_formats, |
1564 | struct perf_hpp_list_node, list); | 1573 | struct perf_hpp_list_node, list); |
@@ -1573,12 +1582,16 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows | |||
1573 | ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " "); | 1582 | ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " "); |
1574 | if (advance_hpp_check(&dummy_hpp, ret)) | 1583 | if (advance_hpp_check(&dummy_hpp, ret)) |
1575 | break; | 1584 | break; |
1585 | |||
1586 | first_node = false; | ||
1576 | } | 1587 | } |
1577 | 1588 | ||
1578 | ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s", | 1589 | if (!first_node) { |
1579 | indent * HIERARCHY_INDENT, ""); | 1590 | ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s", |
1580 | if (advance_hpp_check(&dummy_hpp, ret)) | 1591 | indent * HIERARCHY_INDENT, ""); |
1581 | return ret; | 1592 | if (advance_hpp_check(&dummy_hpp, ret)) |
1593 | return ret; | ||
1594 | } | ||
1582 | 1595 | ||
1583 | first_node = true; | 1596 | first_node = true; |
1584 | list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) { | 1597 | list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) { |
@@ -2076,8 +2089,21 @@ void hist_browser__init(struct hist_browser *browser, | |||
2076 | browser->b.use_navkeypressed = true; | 2089 | browser->b.use_navkeypressed = true; |
2077 | browser->show_headers = symbol_conf.show_hist_headers; | 2090 | browser->show_headers = symbol_conf.show_hist_headers; |
2078 | 2091 | ||
2079 | hists__for_each_format(hists, fmt) | 2092 | if (symbol_conf.report_hierarchy) { |
2093 | struct perf_hpp_list_node *fmt_node; | ||
2094 | |||
2095 | /* count overhead columns (in the first node) */ | ||
2096 | fmt_node = list_first_entry(&hists->hpp_formats, | ||
2097 | struct perf_hpp_list_node, list); | ||
2098 | perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) | ||
2099 | ++browser->b.columns; | ||
2100 | |||
2101 | /* add a single column for whole hierarchy sort keys*/ | ||
2080 | ++browser->b.columns; | 2102 | ++browser->b.columns; |
2103 | } else { | ||
2104 | hists__for_each_format(hists, fmt) | ||
2105 | ++browser->b.columns; | ||
2106 | } | ||
2081 | 2107 | ||
2082 | hists__reset_column_width(hists); | 2108 | hists__reset_column_width(hists); |
2083 | } | 2109 | } |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index b02992efb513..a69f027368ef 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -1600,18 +1600,18 @@ static void hists__hierarchy_output_resort(struct hists *hists, | |||
1600 | if (prog) | 1600 | if (prog) |
1601 | ui_progress__update(prog, 1); | 1601 | ui_progress__update(prog, 1); |
1602 | 1602 | ||
1603 | hists->nr_entries++; | ||
1604 | if (!he->filtered) { | ||
1605 | hists->nr_non_filtered_entries++; | ||
1606 | hists__calc_col_len(hists, he); | ||
1607 | } | ||
1608 | |||
1603 | if (!he->leaf) { | 1609 | if (!he->leaf) { |
1604 | hists__hierarchy_output_resort(hists, prog, | 1610 | hists__hierarchy_output_resort(hists, prog, |
1605 | &he->hroot_in, | 1611 | &he->hroot_in, |
1606 | &he->hroot_out, | 1612 | &he->hroot_out, |
1607 | min_callchain_hits, | 1613 | min_callchain_hits, |
1608 | use_callchain); | 1614 | use_callchain); |
1609 | hists->nr_entries++; | ||
1610 | if (!he->filtered) { | ||
1611 | hists->nr_non_filtered_entries++; | ||
1612 | hists__calc_col_len(hists, he); | ||
1613 | } | ||
1614 | |||
1615 | continue; | 1615 | continue; |
1616 | } | 1616 | } |
1617 | 1617 | ||
diff --git a/tools/power/acpi/Makefile.config b/tools/power/acpi/Makefile.config index a538ff44b108..a1883bbb0144 100644 --- a/tools/power/acpi/Makefile.config +++ b/tools/power/acpi/Makefile.config | |||
@@ -8,18 +8,19 @@ | |||
8 | # as published by the Free Software Foundation; version 2 | 8 | # as published by the Free Software Foundation; version 2 |
9 | # of the License. | 9 | # of the License. |
10 | 10 | ||
11 | include ../../../../scripts/Makefile.include | 11 | ifeq ($(srctree),) |
12 | 12 | srctree := $(patsubst %/,%,$(dir $(shell pwd))) | |
13 | OUTPUT=./ | 13 | srctree := $(patsubst %/,%,$(dir $(srctree))) |
14 | ifeq ("$(origin O)", "command line") | 14 | #$(info Determined 'srctree' to be $(srctree)) |
15 | OUTPUT := $(O)/ | ||
16 | endif | 15 | endif |
17 | 16 | ||
18 | ifneq ($(OUTPUT),) | 17 | include $(srctree)/../../scripts/Makefile.include |
19 | # check that the output directory actually exists | 18 | |
20 | OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) | 19 | OUTPUT=$(srctree)/ |
21 | $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) | 20 | ifeq ("$(origin O)", "command line") |
21 | OUTPUT := $(O)/power/acpi/ | ||
22 | endif | 22 | endif |
23 | #$(info Determined 'OUTPUT' to be $(OUTPUT)) | ||
23 | 24 | ||
24 | # --- CONFIGURATION BEGIN --- | 25 | # --- CONFIGURATION BEGIN --- |
25 | 26 | ||
@@ -70,8 +71,8 @@ WARNINGS := -Wall | |||
70 | WARNINGS += $(call cc-supports,-Wstrict-prototypes) | 71 | WARNINGS += $(call cc-supports,-Wstrict-prototypes) |
71 | WARNINGS += $(call cc-supports,-Wdeclaration-after-statement) | 72 | WARNINGS += $(call cc-supports,-Wdeclaration-after-statement) |
72 | 73 | ||
73 | KERNEL_INCLUDE := ../../../include | 74 | KERNEL_INCLUDE := $(OUTPUT)include |
74 | ACPICA_INCLUDE := ../../../drivers/acpi/acpica | 75 | ACPICA_INCLUDE := $(srctree)/../../../drivers/acpi/acpica |
75 | CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE) | 76 | CFLAGS += -D_LINUX -I$(KERNEL_INCLUDE) -I$(ACPICA_INCLUDE) |
76 | CFLAGS += $(WARNINGS) | 77 | CFLAGS += $(WARNINGS) |
77 | 78 | ||
diff --git a/tools/power/acpi/Makefile.rules b/tools/power/acpi/Makefile.rules index ec87a9e562c0..373738338f51 100644 --- a/tools/power/acpi/Makefile.rules +++ b/tools/power/acpi/Makefile.rules | |||
@@ -8,28 +8,42 @@ | |||
8 | # as published by the Free Software Foundation; version 2 | 8 | # as published by the Free Software Foundation; version 2 |
9 | # of the License. | 9 | # of the License. |
10 | 10 | ||
11 | $(OUTPUT)$(TOOL): $(TOOL_OBJS) FORCE | 11 | objdir := $(OUTPUT)tools/$(TOOL)/ |
12 | $(ECHO) " LD " $@ | 12 | toolobjs := $(addprefix $(objdir),$(TOOL_OBJS)) |
13 | $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(TOOL_OBJS) -L$(OUTPUT) -o $@ | 13 | $(OUTPUT)$(TOOL): $(toolobjs) FORCE |
14 | $(ECHO) " LD " $(subst $(OUTPUT),,$@) | ||
15 | $(QUIET) $(LD) $(CFLAGS) $(LDFLAGS) $(toolobjs) -L$(OUTPUT) -o $@ | ||
16 | $(ECHO) " STRIP " $(subst $(OUTPUT),,$@) | ||
14 | $(QUIET) $(STRIPCMD) $@ | 17 | $(QUIET) $(STRIPCMD) $@ |
15 | 18 | ||
16 | $(OUTPUT)%.o: %.c | 19 | $(KERNEL_INCLUDE): |
17 | $(ECHO) " CC " $@ | 20 | $(ECHO) " MKDIR " $(subst $(OUTPUT),,$@) |
21 | $(QUIET) mkdir -p $(KERNEL_INCLUDE) | ||
22 | $(ECHO) " CP " $(subst $(OUTPUT),,$@) | ||
23 | $(QUIET) cp -rf $(srctree)/../../../include/acpi $(KERNEL_INCLUDE)/ | ||
24 | |||
25 | $(objdir)%.o: %.c $(KERNEL_INCLUDE) | ||
26 | $(ECHO) " CC " $(subst $(OUTPUT),,$@) | ||
18 | $(QUIET) $(CC) -c $(CFLAGS) -o $@ $< | 27 | $(QUIET) $(CC) -c $(CFLAGS) -o $@ $< |
19 | 28 | ||
20 | all: $(OUTPUT)$(TOOL) | 29 | all: $(OUTPUT)$(TOOL) |
21 | clean: | 30 | clean: |
22 | -find $(OUTPUT) \( -not -type d \) \ | 31 | $(ECHO) " RMOBJ " $(subst $(OUTPUT),,$(objdir)) |
23 | -and \( -name '*~' -o -name '*.[oas]' \) \ | 32 | $(QUIET) find $(objdir) \( -not -type d \)\ |
24 | -type f -print \ | 33 | -and \( -name '*~' -o -name '*.[oas]' \)\ |
25 | | xargs rm -f | 34 | -type f -print | xargs rm -f |
26 | -rm -f $(OUTPUT)$(TOOL) | 35 | $(ECHO) " RM " $(TOOL) |
36 | $(QUIET) rm -f $(OUTPUT)$(TOOL) | ||
37 | $(ECHO) " RMINC " $(subst $(OUTPUT),,$(KERNEL_INCLUDE)) | ||
38 | $(QUIET) rm -rf $(KERNEL_INCLUDE) | ||
27 | 39 | ||
28 | install-tools: | 40 | install-tools: |
29 | $(INSTALL) -d $(DESTDIR)${sbindir} | 41 | $(ECHO) " INST " $(TOOL) |
30 | $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)${sbindir} | 42 | $(QUIET) $(INSTALL) -d $(DESTDIR)$(sbindir) |
43 | $(QUIET) $(INSTALL_PROGRAM) $(OUTPUT)$(TOOL) $(DESTDIR)$(sbindir) | ||
31 | uninstall-tools: | 44 | uninstall-tools: |
32 | - rm -f $(DESTDIR)${sbindir}/$(TOOL) | 45 | $(ECHO) " UNINST " $(TOOL) |
46 | $(QUIET) rm -f $(DESTDIR)$(sbindir)/$(TOOL) | ||
33 | 47 | ||
34 | install: all install-tools $(EXTRA_INSTALL) | 48 | install: all install-tools $(EXTRA_INSTALL) |
35 | uninstall: uninstall-tools $(EXTRA_UNINSTALL) | 49 | uninstall: uninstall-tools $(EXTRA_UNINSTALL) |
diff --git a/tools/power/acpi/tools/acpidbg/Makefile b/tools/power/acpi/tools/acpidbg/Makefile index 352df4b41ae9..f2d06e773eb4 100644 --- a/tools/power/acpi/tools/acpidbg/Makefile +++ b/tools/power/acpi/tools/acpidbg/Makefile | |||
@@ -17,9 +17,7 @@ vpath %.c \ | |||
17 | ../../os_specific/service_layers\ | 17 | ../../os_specific/service_layers\ |
18 | . | 18 | . |
19 | CFLAGS += -DACPI_APPLICATION -DACPI_SINGLE_THREAD -DACPI_DEBUGGER\ | 19 | CFLAGS += -DACPI_APPLICATION -DACPI_SINGLE_THREAD -DACPI_DEBUGGER\ |
20 | -I.\ | 20 | -I. |
21 | -I../../../../../drivers/acpi/acpica\ | ||
22 | -I../../../../../include | ||
23 | LDFLAGS += -lpthread | 21 | LDFLAGS += -lpthread |
24 | TOOL_OBJS = \ | 22 | TOOL_OBJS = \ |
25 | acpidbg.o | 23 | acpidbg.o |
diff --git a/tools/power/acpi/tools/acpidbg/acpidbg.c b/tools/power/acpi/tools/acpidbg/acpidbg.c index a88ac45b7756..4308362d7068 100644 --- a/tools/power/acpi/tools/acpidbg/acpidbg.c +++ b/tools/power/acpi/tools/acpidbg/acpidbg.c | |||
@@ -12,10 +12,16 @@ | |||
12 | #include <acpi/acpi.h> | 12 | #include <acpi/acpi.h> |
13 | 13 | ||
14 | /* Headers not included by include/acpi/platform/aclinux.h */ | 14 | /* Headers not included by include/acpi/platform/aclinux.h */ |
15 | #include <unistd.h> | ||
16 | #include <stdio.h> | ||
17 | #include <stdlib.h> | ||
18 | #include <string.h> | ||
19 | #include <error.h> | ||
15 | #include <stdbool.h> | 20 | #include <stdbool.h> |
16 | #include <fcntl.h> | 21 | #include <fcntl.h> |
17 | #include <assert.h> | 22 | #include <assert.h> |
18 | #include <linux/circ_buf.h> | 23 | #include <sys/select.h> |
24 | #include "../../../../../include/linux/circ_buf.h" | ||
19 | 25 | ||
20 | #define ACPI_AML_FILE "/sys/kernel/debug/acpi/acpidbg" | 26 | #define ACPI_AML_FILE "/sys/kernel/debug/acpi/acpidbg" |
21 | #define ACPI_AML_SEC_TICK 1 | 27 | #define ACPI_AML_SEC_TICK 1 |
diff --git a/tools/power/acpi/tools/acpidump/Makefile b/tools/power/acpi/tools/acpidump/Makefile index 04b5db7c7c0b..f7c7af1f9258 100644 --- a/tools/power/acpi/tools/acpidump/Makefile +++ b/tools/power/acpi/tools/acpidump/Makefile | |||
@@ -19,9 +19,7 @@ vpath %.c \ | |||
19 | ./\ | 19 | ./\ |
20 | ../../common\ | 20 | ../../common\ |
21 | ../../os_specific/service_layers | 21 | ../../os_specific/service_layers |
22 | CFLAGS += -DACPI_DUMP_APP -I.\ | 22 | CFLAGS += -DACPI_DUMP_APP -I. |
23 | -I../../../../../drivers/acpi/acpica\ | ||
24 | -I../../../../../include | ||
25 | TOOL_OBJS = \ | 23 | TOOL_OBJS = \ |
26 | apdump.o\ | 24 | apdump.o\ |
27 | apfiles.o\ | 25 | apfiles.o\ |
@@ -49,7 +47,9 @@ TOOL_OBJS = \ | |||
49 | 47 | ||
50 | include ../../Makefile.rules | 48 | include ../../Makefile.rules |
51 | 49 | ||
52 | install-man: ../../man/acpidump.8 | 50 | install-man: $(srctree)/man/acpidump.8 |
53 | $(INSTALL_DATA) -D $< $(DESTDIR)${mandir}/man8/acpidump.8 | 51 | $(ECHO) " INST " acpidump.8 |
52 | $(QUIET) $(INSTALL_DATA) -D $< $(DESTDIR)$(mandir)/man8/acpidump.8 | ||
54 | uninstall-man: | 53 | uninstall-man: |
55 | - rm -f $(DESTDIR)${mandir}/man8/acpidump.8 | 54 | $(ECHO) " UNINST " acpidump.8 |
55 | $(QUIET) rm -f $(DESTDIR)$(mandir)/man8/acpidump.8 | ||
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c index 6e9c40eea208..69ccce308458 100644 --- a/virt/kvm/arm/pmu.c +++ b/virt/kvm/arm/pmu.c | |||
@@ -305,7 +305,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) | |||
305 | continue; | 305 | continue; |
306 | type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i) | 306 | type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i) |
307 | & ARMV8_PMU_EVTYPE_EVENT; | 307 | & ARMV8_PMU_EVTYPE_EVENT; |
308 | if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) | 308 | if ((type == ARMV8_PMUV3_PERFCTR_SW_INCR) |
309 | && (enable & BIT(i))) { | 309 | && (enable & BIT(i))) { |
310 | reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; | 310 | reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; |
311 | reg = lower_32_bits(reg); | 311 | reg = lower_32_bits(reg); |
@@ -379,7 +379,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, | |||
379 | eventsel = data & ARMV8_PMU_EVTYPE_EVENT; | 379 | eventsel = data & ARMV8_PMU_EVTYPE_EVENT; |
380 | 380 | ||
381 | /* Software increment event does't need to be backed by a perf event */ | 381 | /* Software increment event does't need to be backed by a perf event */ |
382 | if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR) | 382 | if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR && |
383 | select_idx != ARMV8_PMU_CYCLE_IDX) | ||
383 | return; | 384 | return; |
384 | 385 | ||
385 | memset(&attr, 0, sizeof(struct perf_event_attr)); | 386 | memset(&attr, 0, sizeof(struct perf_event_attr)); |
@@ -391,7 +392,8 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, | |||
391 | attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; | 392 | attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; |
392 | attr.exclude_hv = 1; /* Don't count EL2 events */ | 393 | attr.exclude_hv = 1; /* Don't count EL2 events */ |
393 | attr.exclude_host = 1; /* Don't count host events */ | 394 | attr.exclude_host = 1; /* Don't count host events */ |
394 | attr.config = eventsel; | 395 | attr.config = (select_idx == ARMV8_PMU_CYCLE_IDX) ? |
396 | ARMV8_PMUV3_PERFCTR_CPU_CYCLES : eventsel; | ||
395 | 397 | ||
396 | counter = kvm_pmu_get_counter_value(vcpu, select_idx); | 398 | counter = kvm_pmu_get_counter_value(vcpu, select_idx); |
397 | /* The initial sample period (overflow count) of an event. */ | 399 | /* The initial sample period (overflow count) of an event. */ |
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 8035cc1eb955..efeceb0a222d 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c | |||
@@ -91,6 +91,7 @@ static void async_pf_execute(struct work_struct *work) | |||
91 | 91 | ||
92 | spin_lock(&vcpu->async_pf.lock); | 92 | spin_lock(&vcpu->async_pf.lock); |
93 | list_add_tail(&apf->link, &vcpu->async_pf.done); | 93 | list_add_tail(&apf->link, &vcpu->async_pf.done); |
94 | apf->vcpu = NULL; | ||
94 | spin_unlock(&vcpu->async_pf.lock); | 95 | spin_unlock(&vcpu->async_pf.lock); |
95 | 96 | ||
96 | /* | 97 | /* |
@@ -113,6 +114,8 @@ static void async_pf_execute(struct work_struct *work) | |||
113 | 114 | ||
114 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) | 115 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) |
115 | { | 116 | { |
117 | spin_lock(&vcpu->async_pf.lock); | ||
118 | |||
116 | /* cancel outstanding work queue item */ | 119 | /* cancel outstanding work queue item */ |
117 | while (!list_empty(&vcpu->async_pf.queue)) { | 120 | while (!list_empty(&vcpu->async_pf.queue)) { |
118 | struct kvm_async_pf *work = | 121 | struct kvm_async_pf *work = |
@@ -120,6 +123,14 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) | |||
120 | typeof(*work), queue); | 123 | typeof(*work), queue); |
121 | list_del(&work->queue); | 124 | list_del(&work->queue); |
122 | 125 | ||
126 | /* | ||
127 | * We know it's present in vcpu->async_pf.done, do | ||
128 | * nothing here. | ||
129 | */ | ||
130 | if (!work->vcpu) | ||
131 | continue; | ||
132 | |||
133 | spin_unlock(&vcpu->async_pf.lock); | ||
123 | #ifdef CONFIG_KVM_ASYNC_PF_SYNC | 134 | #ifdef CONFIG_KVM_ASYNC_PF_SYNC |
124 | flush_work(&work->work); | 135 | flush_work(&work->work); |
125 | #else | 136 | #else |
@@ -129,9 +140,9 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu) | |||
129 | kmem_cache_free(async_pf_cache, work); | 140 | kmem_cache_free(async_pf_cache, work); |
130 | } | 141 | } |
131 | #endif | 142 | #endif |
143 | spin_lock(&vcpu->async_pf.lock); | ||
132 | } | 144 | } |
133 | 145 | ||
134 | spin_lock(&vcpu->async_pf.lock); | ||
135 | while (!list_empty(&vcpu->async_pf.done)) { | 146 | while (!list_empty(&vcpu->async_pf.done)) { |
136 | struct kvm_async_pf *work = | 147 | struct kvm_async_pf *work = |
137 | list_first_entry(&vcpu->async_pf.done, | 148 | list_first_entry(&vcpu->async_pf.done, |