diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2015-07-27 14:12:39 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2015-07-27 14:12:39 -0400 |
commit | 92311e46ecf2298d87f175c8449ab4d8c400a38e (patch) | |
tree | 92bfbbc23c3acfb035a88bf257849465b26aa156 | |
parent | d684779335856d8177514b42a801d46088d897b0 (diff) | |
parent | cbfe8fa6cd672011c755c3cd85c9ffd4e2d10a6f (diff) |
Merge 4.2-rc4 into tty-next
Other serial driver work wants to build on patches now in 4.2-rc4 so
merge the branch so this can properly happen.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
274 files changed, 2107 insertions, 1580 deletions
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt index c03eec116872..3443e0f838df 100644 --- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt +++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt | |||
@@ -35,3 +35,6 @@ the PCIe specification. | |||
35 | 35 | ||
36 | NOTE: this only applies to the SMMU itself, not | 36 | NOTE: this only applies to the SMMU itself, not |
37 | masters connected upstream of the SMMU. | 37 | masters connected upstream of the SMMU. |
38 | |||
39 | - hisilicon,broken-prefetch-cmd | ||
40 | : Avoid sending CMD_PREFETCH_* commands to the SMMU. | ||
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt index 5d0376b8f202..211e7785f4d2 100644 --- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt +++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt | |||
@@ -17,7 +17,6 @@ Required properties: | |||
17 | "fsl,imx6sx-usdhc" | 17 | "fsl,imx6sx-usdhc" |
18 | 18 | ||
19 | Optional properties: | 19 | Optional properties: |
20 | - fsl,cd-controller : Indicate to use controller internal card detection | ||
21 | - fsl,wp-controller : Indicate to use controller internal write protection | 20 | - fsl,wp-controller : Indicate to use controller internal write protection |
22 | - fsl,delay-line : Specify the number of delay cells for override mode. | 21 | - fsl,delay-line : Specify the number of delay cells for override mode. |
23 | This is used to set the clock delay for DLL(Delay Line) on override mode | 22 | This is used to set the clock delay for DLL(Delay Line) on override mode |
@@ -35,7 +34,6 @@ esdhc@70004000 { | |||
35 | compatible = "fsl,imx51-esdhc"; | 34 | compatible = "fsl,imx51-esdhc"; |
36 | reg = <0x70004000 0x4000>; | 35 | reg = <0x70004000 0x4000>; |
37 | interrupts = <1>; | 36 | interrupts = <1>; |
38 | fsl,cd-controller; | ||
39 | fsl,wp-controller; | 37 | fsl,wp-controller; |
40 | }; | 38 | }; |
41 | 39 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index a2264167791a..9289ecb57b68 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5899,7 +5899,6 @@ S: Supported | |||
5899 | F: Documentation/s390/kvm.txt | 5899 | F: Documentation/s390/kvm.txt |
5900 | F: arch/s390/include/asm/kvm* | 5900 | F: arch/s390/include/asm/kvm* |
5901 | F: arch/s390/kvm/ | 5901 | F: arch/s390/kvm/ |
5902 | F: drivers/s390/kvm/ | ||
5903 | 5902 | ||
5904 | KERNEL VIRTUAL MACHINE (KVM) FOR ARM | 5903 | KERNEL VIRTUAL MACHINE (KVM) FOR ARM |
5905 | M: Christoffer Dall <christoffer.dall@linaro.org> | 5904 | M: Christoffer Dall <christoffer.dall@linaro.org> |
@@ -6839,6 +6838,12 @@ T: git git://linuxtv.org/anttip/media_tree.git | |||
6839 | S: Maintained | 6838 | S: Maintained |
6840 | F: drivers/media/usb/msi2500/ | 6839 | F: drivers/media/usb/msi2500/ |
6841 | 6840 | ||
6841 | MSYSTEMS DISKONCHIP G3 MTD DRIVER | ||
6842 | M: Robert Jarzmik <robert.jarzmik@free.fr> | ||
6843 | L: linux-mtd@lists.infradead.org | ||
6844 | S: Maintained | ||
6845 | F: drivers/mtd/devices/docg3* | ||
6846 | |||
6842 | MT9M032 APTINA SENSOR DRIVER | 6847 | MT9M032 APTINA SENSOR DRIVER |
6843 | M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> | 6848 | M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> |
6844 | L: linux-media@vger.kernel.org | 6849 | L: linux-media@vger.kernel.org |
@@ -10896,6 +10901,15 @@ F: drivers/block/virtio_blk.c | |||
10896 | F: include/linux/virtio_*.h | 10901 | F: include/linux/virtio_*.h |
10897 | F: include/uapi/linux/virtio_*.h | 10902 | F: include/uapi/linux/virtio_*.h |
10898 | 10903 | ||
10904 | VIRTIO DRIVERS FOR S390 | ||
10905 | M: Christian Borntraeger <borntraeger@de.ibm.com> | ||
10906 | M: Cornelia Huck <cornelia.huck@de.ibm.com> | ||
10907 | L: linux-s390@vger.kernel.org | ||
10908 | L: virtualization@lists.linux-foundation.org | ||
10909 | L: kvm@vger.kernel.org | ||
10910 | S: Supported | ||
10911 | F: drivers/s390/virtio/ | ||
10912 | |||
10899 | VIRTIO GPU DRIVER | 10913 | VIRTIO GPU DRIVER |
10900 | M: David Airlie <airlied@linux.ie> | 10914 | M: David Airlie <airlied@linux.ie> |
10901 | M: Gerd Hoffmann <kraxel@redhat.com> | 10915 | M: Gerd Hoffmann <kraxel@redhat.com> |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 2 | 2 | PATCHLEVEL = 2 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc3 | 4 | EXTRAVERSION = -rc4 |
5 | NAME = Hurr durr I'ma sheep | 5 | NAME = Hurr durr I'ma sheep |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts index dd45e6971bc3..9351296356dc 100644 --- a/arch/arm/boot/dts/imx25-pdk.dts +++ b/arch/arm/boot/dts/imx25-pdk.dts | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | /dts-v1/; | 12 | /dts-v1/; |
13 | #include <dt-bindings/gpio/gpio.h> | ||
13 | #include <dt-bindings/input/input.h> | 14 | #include <dt-bindings/input/input.h> |
14 | #include "imx25.dtsi" | 15 | #include "imx25.dtsi" |
15 | 16 | ||
@@ -114,8 +115,8 @@ | |||
114 | &esdhc1 { | 115 | &esdhc1 { |
115 | pinctrl-names = "default"; | 116 | pinctrl-names = "default"; |
116 | pinctrl-0 = <&pinctrl_esdhc1>; | 117 | pinctrl-0 = <&pinctrl_esdhc1>; |
117 | cd-gpios = <&gpio2 1 0>; | 118 | cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; |
118 | wp-gpios = <&gpio2 0 0>; | 119 | wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; |
119 | status = "okay"; | 120 | status = "okay"; |
120 | }; | 121 | }; |
121 | 122 | ||
diff --git a/arch/arm/boot/dts/imx51-apf51dev.dts b/arch/arm/boot/dts/imx51-apf51dev.dts index 93d3ea12328c..0f3fe29b816e 100644 --- a/arch/arm/boot/dts/imx51-apf51dev.dts +++ b/arch/arm/boot/dts/imx51-apf51dev.dts | |||
@@ -98,7 +98,7 @@ | |||
98 | &esdhc1 { | 98 | &esdhc1 { |
99 | pinctrl-names = "default"; | 99 | pinctrl-names = "default"; |
100 | pinctrl-0 = <&pinctrl_esdhc1>; | 100 | pinctrl-0 = <&pinctrl_esdhc1>; |
101 | cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>; | 101 | cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>; |
102 | bus-width = <4>; | 102 | bus-width = <4>; |
103 | status = "okay"; | 103 | status = "okay"; |
104 | }; | 104 | }; |
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts index e9337ad52f59..3bc18835fb4b 100644 --- a/arch/arm/boot/dts/imx53-ard.dts +++ b/arch/arm/boot/dts/imx53-ard.dts | |||
@@ -103,8 +103,8 @@ | |||
103 | &esdhc1 { | 103 | &esdhc1 { |
104 | pinctrl-names = "default"; | 104 | pinctrl-names = "default"; |
105 | pinctrl-0 = <&pinctrl_esdhc1>; | 105 | pinctrl-0 = <&pinctrl_esdhc1>; |
106 | cd-gpios = <&gpio1 1 0>; | 106 | cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; |
107 | wp-gpios = <&gpio1 9 0>; | 107 | wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>; |
108 | status = "okay"; | 108 | status = "okay"; |
109 | }; | 109 | }; |
110 | 110 | ||
diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts index d0e0f57eb432..53f40885c530 100644 --- a/arch/arm/boot/dts/imx53-m53evk.dts +++ b/arch/arm/boot/dts/imx53-m53evk.dts | |||
@@ -124,8 +124,8 @@ | |||
124 | &esdhc1 { | 124 | &esdhc1 { |
125 | pinctrl-names = "default"; | 125 | pinctrl-names = "default"; |
126 | pinctrl-0 = <&pinctrl_esdhc1>; | 126 | pinctrl-0 = <&pinctrl_esdhc1>; |
127 | cd-gpios = <&gpio1 1 0>; | 127 | cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>; |
128 | wp-gpios = <&gpio1 9 0>; | 128 | wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>; |
129 | status = "okay"; | 129 | status = "okay"; |
130 | }; | 130 | }; |
131 | 131 | ||
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi index ab4ba39f2ed9..b0d5542ac829 100644 --- a/arch/arm/boot/dts/imx53-qsb-common.dtsi +++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi | |||
@@ -147,8 +147,8 @@ | |||
147 | &esdhc3 { | 147 | &esdhc3 { |
148 | pinctrl-names = "default"; | 148 | pinctrl-names = "default"; |
149 | pinctrl-0 = <&pinctrl_esdhc3>; | 149 | pinctrl-0 = <&pinctrl_esdhc3>; |
150 | cd-gpios = <&gpio3 11 0>; | 150 | cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>; |
151 | wp-gpios = <&gpio3 12 0>; | 151 | wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>; |
152 | bus-width = <8>; | 152 | bus-width = <8>; |
153 | status = "okay"; | 153 | status = "okay"; |
154 | }; | 154 | }; |
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts index 1d325576bcc0..fc89ce1e5763 100644 --- a/arch/arm/boot/dts/imx53-smd.dts +++ b/arch/arm/boot/dts/imx53-smd.dts | |||
@@ -41,8 +41,8 @@ | |||
41 | &esdhc1 { | 41 | &esdhc1 { |
42 | pinctrl-names = "default"; | 42 | pinctrl-names = "default"; |
43 | pinctrl-0 = <&pinctrl_esdhc1>; | 43 | pinctrl-0 = <&pinctrl_esdhc1>; |
44 | cd-gpios = <&gpio3 13 0>; | 44 | cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>; |
45 | wp-gpios = <&gpio4 11 0>; | 45 | wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>; |
46 | status = "okay"; | 46 | status = "okay"; |
47 | }; | 47 | }; |
48 | 48 | ||
diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi index 4f1f0e2868bf..e03373a58760 100644 --- a/arch/arm/boot/dts/imx53-tqma53.dtsi +++ b/arch/arm/boot/dts/imx53-tqma53.dtsi | |||
@@ -41,8 +41,8 @@ | |||
41 | pinctrl-0 = <&pinctrl_esdhc2>, | 41 | pinctrl-0 = <&pinctrl_esdhc2>, |
42 | <&pinctrl_esdhc2_cdwp>; | 42 | <&pinctrl_esdhc2_cdwp>; |
43 | vmmc-supply = <®_3p3v>; | 43 | vmmc-supply = <®_3p3v>; |
44 | wp-gpios = <&gpio1 2 0>; | 44 | wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; |
45 | cd-gpios = <&gpio1 4 0>; | 45 | cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; |
46 | status = "disabled"; | 46 | status = "disabled"; |
47 | }; | 47 | }; |
48 | 48 | ||
diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi index 704bd72cbfec..d3e50b22064f 100644 --- a/arch/arm/boot/dts/imx53-tx53.dtsi +++ b/arch/arm/boot/dts/imx53-tx53.dtsi | |||
@@ -183,7 +183,7 @@ | |||
183 | }; | 183 | }; |
184 | 184 | ||
185 | &esdhc1 { | 185 | &esdhc1 { |
186 | cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>; | 186 | cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>; |
187 | fsl,wp-controller; | 187 | fsl,wp-controller; |
188 | pinctrl-names = "default"; | 188 | pinctrl-names = "default"; |
189 | pinctrl-0 = <&pinctrl_esdhc1>; | 189 | pinctrl-0 = <&pinctrl_esdhc1>; |
@@ -191,7 +191,7 @@ | |||
191 | }; | 191 | }; |
192 | 192 | ||
193 | &esdhc2 { | 193 | &esdhc2 { |
194 | cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>; | 194 | cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>; |
195 | fsl,wp-controller; | 195 | fsl,wp-controller; |
196 | pinctrl-names = "default"; | 196 | pinctrl-names = "default"; |
197 | pinctrl-0 = <&pinctrl_esdhc2>; | 197 | pinctrl-0 = <&pinctrl_esdhc2>; |
diff --git a/arch/arm/boot/dts/imx53-voipac-bsb.dts b/arch/arm/boot/dts/imx53-voipac-bsb.dts index c17d3ad6dba5..fc51b87ad208 100644 --- a/arch/arm/boot/dts/imx53-voipac-bsb.dts +++ b/arch/arm/boot/dts/imx53-voipac-bsb.dts | |||
@@ -119,8 +119,8 @@ | |||
119 | &esdhc2 { | 119 | &esdhc2 { |
120 | pinctrl-names = "default"; | 120 | pinctrl-names = "default"; |
121 | pinctrl-0 = <&pinctrl_esdhc2>; | 121 | pinctrl-0 = <&pinctrl_esdhc2>; |
122 | cd-gpios = <&gpio3 25 0>; | 122 | cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>; |
123 | wp-gpios = <&gpio2 19 0>; | 123 | wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>; |
124 | vmmc-supply = <®_3p3v>; | 124 | vmmc-supply = <®_3p3v>; |
125 | status = "okay"; | 125 | status = "okay"; |
126 | }; | 126 | }; |
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts index 43cb3fd76be7..5111f5170d53 100644 --- a/arch/arm/boot/dts/imx6dl-riotboard.dts +++ b/arch/arm/boot/dts/imx6dl-riotboard.dts | |||
@@ -305,8 +305,8 @@ | |||
305 | &usdhc2 { | 305 | &usdhc2 { |
306 | pinctrl-names = "default"; | 306 | pinctrl-names = "default"; |
307 | pinctrl-0 = <&pinctrl_usdhc2>; | 307 | pinctrl-0 = <&pinctrl_usdhc2>; |
308 | cd-gpios = <&gpio1 4 0>; | 308 | cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; |
309 | wp-gpios = <&gpio1 2 0>; | 309 | wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; |
310 | vmmc-supply = <®_3p3v>; | 310 | vmmc-supply = <®_3p3v>; |
311 | status = "okay"; | 311 | status = "okay"; |
312 | }; | 312 | }; |
@@ -314,8 +314,8 @@ | |||
314 | &usdhc3 { | 314 | &usdhc3 { |
315 | pinctrl-names = "default"; | 315 | pinctrl-names = "default"; |
316 | pinctrl-0 = <&pinctrl_usdhc3>; | 316 | pinctrl-0 = <&pinctrl_usdhc3>; |
317 | cd-gpios = <&gpio7 0 0>; | 317 | cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; |
318 | wp-gpios = <&gpio7 1 0>; | 318 | wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; |
319 | vmmc-supply = <®_3p3v>; | 319 | vmmc-supply = <®_3p3v>; |
320 | status = "okay"; | 320 | status = "okay"; |
321 | }; | 321 | }; |
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts index 78df05e9d1ce..d6515f7a56c4 100644 --- a/arch/arm/boot/dts/imx6q-arm2.dts +++ b/arch/arm/boot/dts/imx6q-arm2.dts | |||
@@ -11,6 +11,7 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | /dts-v1/; | 13 | /dts-v1/; |
14 | #include <dt-bindings/gpio/gpio.h> | ||
14 | #include "imx6q.dtsi" | 15 | #include "imx6q.dtsi" |
15 | 16 | ||
16 | / { | 17 | / { |
@@ -196,8 +197,8 @@ | |||
196 | }; | 197 | }; |
197 | 198 | ||
198 | &usdhc3 { | 199 | &usdhc3 { |
199 | cd-gpios = <&gpio6 11 0>; | 200 | cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>; |
200 | wp-gpios = <&gpio6 14 0>; | 201 | wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>; |
201 | vmmc-supply = <®_3p3v>; | 202 | vmmc-supply = <®_3p3v>; |
202 | pinctrl-names = "default"; | 203 | pinctrl-names = "default"; |
203 | pinctrl-0 = <&pinctrl_usdhc3 | 204 | pinctrl-0 = <&pinctrl_usdhc3 |
diff --git a/arch/arm/boot/dts/imx6q-gk802.dts b/arch/arm/boot/dts/imx6q-gk802.dts index 703539cf36d3..00bd63e63d0c 100644 --- a/arch/arm/boot/dts/imx6q-gk802.dts +++ b/arch/arm/boot/dts/imx6q-gk802.dts | |||
@@ -7,6 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | /dts-v1/; | 9 | /dts-v1/; |
10 | #include <dt-bindings/gpio/gpio.h> | ||
10 | #include "imx6q.dtsi" | 11 | #include "imx6q.dtsi" |
11 | 12 | ||
12 | / { | 13 | / { |
@@ -161,7 +162,7 @@ | |||
161 | pinctrl-names = "default"; | 162 | pinctrl-names = "default"; |
162 | pinctrl-0 = <&pinctrl_usdhc3>; | 163 | pinctrl-0 = <&pinctrl_usdhc3>; |
163 | bus-width = <4>; | 164 | bus-width = <4>; |
164 | cd-gpios = <&gpio6 11 0>; | 165 | cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>; |
165 | vmmc-supply = <®_3p3v>; | 166 | vmmc-supply = <®_3p3v>; |
166 | status = "okay"; | 167 | status = "okay"; |
167 | }; | 168 | }; |
diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts index a43abfa21e33..5645d52850a7 100644 --- a/arch/arm/boot/dts/imx6q-tbs2910.dts +++ b/arch/arm/boot/dts/imx6q-tbs2910.dts | |||
@@ -251,7 +251,7 @@ | |||
251 | pinctrl-names = "default"; | 251 | pinctrl-names = "default"; |
252 | pinctrl-0 = <&pinctrl_usdhc2>; | 252 | pinctrl-0 = <&pinctrl_usdhc2>; |
253 | bus-width = <4>; | 253 | bus-width = <4>; |
254 | cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>; | 254 | cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; |
255 | vmmc-supply = <®_3p3v>; | 255 | vmmc-supply = <®_3p3v>; |
256 | status = "okay"; | 256 | status = "okay"; |
257 | }; | 257 | }; |
@@ -260,7 +260,7 @@ | |||
260 | pinctrl-names = "default"; | 260 | pinctrl-names = "default"; |
261 | pinctrl-0 = <&pinctrl_usdhc3>; | 261 | pinctrl-0 = <&pinctrl_usdhc3>; |
262 | bus-width = <4>; | 262 | bus-width = <4>; |
263 | cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; | 263 | cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; |
264 | wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; | 264 | wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; |
265 | vmmc-supply = <®_3p3v>; | 265 | vmmc-supply = <®_3p3v>; |
266 | status = "okay"; | 266 | status = "okay"; |
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi index e6d9195a1da7..f4d6ae564ead 100644 --- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi +++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi | |||
@@ -173,7 +173,7 @@ | |||
173 | pinctrl-names = "default"; | 173 | pinctrl-names = "default"; |
174 | pinctrl-0 = <&pinctrl_usdhc1>; | 174 | pinctrl-0 = <&pinctrl_usdhc1>; |
175 | vmmc-supply = <®_3p3v>; | 175 | vmmc-supply = <®_3p3v>; |
176 | cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; | 176 | cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>; |
177 | status = "okay"; | 177 | status = "okay"; |
178 | }; | 178 | }; |
179 | 179 | ||
@@ -181,7 +181,7 @@ | |||
181 | pinctrl-names = "default"; | 181 | pinctrl-names = "default"; |
182 | pinctrl-0 = <&pinctrl_usdhc2>; | 182 | pinctrl-0 = <&pinctrl_usdhc2>; |
183 | vmmc-supply = <®_3p3v>; | 183 | vmmc-supply = <®_3p3v>; |
184 | cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>; | 184 | cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>; |
185 | status = "okay"; | 185 | status = "okay"; |
186 | }; | 186 | }; |
187 | 187 | ||
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi index 1d85de2befb3..a47a0399a172 100644 --- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi +++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi | |||
@@ -392,7 +392,7 @@ | |||
392 | &usdhc1 { | 392 | &usdhc1 { |
393 | pinctrl-names = "default"; | 393 | pinctrl-names = "default"; |
394 | pinctrl-0 = <&pinctrl_usdhc1>; | 394 | pinctrl-0 = <&pinctrl_usdhc1>; |
395 | cd-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>; | 395 | cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; |
396 | no-1-8-v; | 396 | no-1-8-v; |
397 | status = "okay"; | 397 | status = "okay"; |
398 | }; | 398 | }; |
@@ -400,7 +400,7 @@ | |||
400 | &usdhc2 { | 400 | &usdhc2 { |
401 | pinctrl-names = "default"; | 401 | pinctrl-names = "default"; |
402 | pinctrl-0 = <&pinctrl_usdhc2>; | 402 | pinctrl-0 = <&pinctrl_usdhc2>; |
403 | cd-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; | 403 | cd-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>; |
404 | wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; | 404 | wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; |
405 | no-1-8-v; | 405 | no-1-8-v; |
406 | status = "okay"; | 406 | status = "okay"; |
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi index 59e5d15e3ec4..ff41f83551de 100644 --- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi +++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi | |||
@@ -258,6 +258,6 @@ | |||
258 | pinctrl-names = "default"; | 258 | pinctrl-names = "default"; |
259 | pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>; | 259 | pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>; |
260 | vmmc-supply = <®_3p3v>; | 260 | vmmc-supply = <®_3p3v>; |
261 | cd-gpios = <&gpio1 4 0>; | 261 | cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; |
262 | status = "okay"; | 262 | status = "okay"; |
263 | }; | 263 | }; |
diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi index 2c253d6d20bd..45e7c39e80d5 100644 --- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi +++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi | |||
@@ -1,3 +1,5 @@ | |||
1 | #include <dt-bindings/gpio/gpio.h> | ||
2 | |||
1 | / { | 3 | / { |
2 | regulators { | 4 | regulators { |
3 | compatible = "simple-bus"; | 5 | compatible = "simple-bus"; |
@@ -181,7 +183,7 @@ | |||
181 | &usdhc2 { /* module slot */ | 183 | &usdhc2 { /* module slot */ |
182 | pinctrl-names = "default"; | 184 | pinctrl-names = "default"; |
183 | pinctrl-0 = <&pinctrl_usdhc2>; | 185 | pinctrl-0 = <&pinctrl_usdhc2>; |
184 | cd-gpios = <&gpio2 2 0>; | 186 | cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; |
185 | status = "okay"; | 187 | status = "okay"; |
186 | }; | 188 | }; |
187 | 189 | ||
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi index b5756c21ea1d..4493f6e99330 100644 --- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi | |||
@@ -318,7 +318,7 @@ | |||
318 | &usdhc3 { | 318 | &usdhc3 { |
319 | pinctrl-names = "default"; | 319 | pinctrl-names = "default"; |
320 | pinctrl-0 = <&pinctrl_usdhc3>; | 320 | pinctrl-0 = <&pinctrl_usdhc3>; |
321 | cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; | 321 | cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; |
322 | vmmc-supply = <®_3p3v>; | 322 | vmmc-supply = <®_3p3v>; |
323 | status = "okay"; | 323 | status = "okay"; |
324 | }; | 324 | }; |
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi index 86f03c1b147c..a857d1294609 100644 --- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi | |||
@@ -324,7 +324,7 @@ | |||
324 | &usdhc3 { | 324 | &usdhc3 { |
325 | pinctrl-names = "default"; | 325 | pinctrl-names = "default"; |
326 | pinctrl-0 = <&pinctrl_usdhc3>; | 326 | pinctrl-0 = <&pinctrl_usdhc3>; |
327 | cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; | 327 | cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; |
328 | vmmc-supply = <®_3p3v>; | 328 | vmmc-supply = <®_3p3v>; |
329 | status = "okay"; | 329 | status = "okay"; |
330 | }; | 330 | }; |
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi index 4a8d97f47759..1afe3385e2d2 100644 --- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi +++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi | |||
@@ -417,7 +417,7 @@ | |||
417 | &usdhc3 { | 417 | &usdhc3 { |
418 | pinctrl-names = "default"; | 418 | pinctrl-names = "default"; |
419 | pinctrl-0 = <&pinctrl_usdhc3>; | 419 | pinctrl-0 = <&pinctrl_usdhc3>; |
420 | cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; | 420 | cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; |
421 | vmmc-supply = <®_3p3v>; | 421 | vmmc-supply = <®_3p3v>; |
422 | status = "okay"; | 422 | status = "okay"; |
423 | }; | 423 | }; |
diff --git a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi index 62a82f3eba88..6dd0b764e036 100644 --- a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi +++ b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi | |||
@@ -299,6 +299,6 @@ | |||
299 | &pinctrl_hummingboard_usdhc2 | 299 | &pinctrl_hummingboard_usdhc2 |
300 | >; | 300 | >; |
301 | vmmc-supply = <®_3p3v>; | 301 | vmmc-supply = <®_3p3v>; |
302 | cd-gpios = <&gpio1 4 0>; | 302 | cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; |
303 | status = "okay"; | 303 | status = "okay"; |
304 | }; | 304 | }; |
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi index 3af16dfe417b..d7fe6672d00c 100644 --- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi +++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi | |||
@@ -453,7 +453,7 @@ | |||
453 | &usdhc3 { | 453 | &usdhc3 { |
454 | pinctrl-names = "default"; | 454 | pinctrl-names = "default"; |
455 | pinctrl-0 = <&pinctrl_usdhc3>; | 455 | pinctrl-0 = <&pinctrl_usdhc3>; |
456 | cd-gpios = <&gpio7 0 0>; | 456 | cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; |
457 | vmmc-supply = <®_3p3v>; | 457 | vmmc-supply = <®_3p3v>; |
458 | status = "okay"; | 458 | status = "okay"; |
459 | }; | 459 | }; |
@@ -461,7 +461,7 @@ | |||
461 | &usdhc4 { | 461 | &usdhc4 { |
462 | pinctrl-names = "default"; | 462 | pinctrl-names = "default"; |
463 | pinctrl-0 = <&pinctrl_usdhc4>; | 463 | pinctrl-0 = <&pinctrl_usdhc4>; |
464 | cd-gpios = <&gpio2 6 0>; | 464 | cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; |
465 | vmmc-supply = <®_3p3v>; | 465 | vmmc-supply = <®_3p3v>; |
466 | status = "okay"; | 466 | status = "okay"; |
467 | }; | 467 | }; |
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi index 1ce6133b67f5..9e6ecd99b472 100644 --- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi +++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi | |||
@@ -409,8 +409,8 @@ | |||
409 | &usdhc2 { | 409 | &usdhc2 { |
410 | pinctrl-names = "default"; | 410 | pinctrl-names = "default"; |
411 | pinctrl-0 = <&pinctrl_usdhc2>; | 411 | pinctrl-0 = <&pinctrl_usdhc2>; |
412 | cd-gpios = <&gpio1 4 0>; | 412 | cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; |
413 | wp-gpios = <&gpio1 2 0>; | 413 | wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>; |
414 | status = "disabled"; | 414 | status = "disabled"; |
415 | }; | 415 | }; |
416 | 416 | ||
@@ -418,7 +418,7 @@ | |||
418 | pinctrl-names = "default"; | 418 | pinctrl-names = "default"; |
419 | pinctrl-0 = <&pinctrl_usdhc3 | 419 | pinctrl-0 = <&pinctrl_usdhc3 |
420 | &pinctrl_usdhc3_cdwp>; | 420 | &pinctrl_usdhc3_cdwp>; |
421 | cd-gpios = <&gpio1 27 0>; | 421 | cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>; |
422 | wp-gpios = <&gpio1 29 0>; | 422 | wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>; |
423 | status = "disabled"; | 423 | status = "disabled"; |
424 | }; | 424 | }; |
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi index 488a640796ac..3373fd958e95 100644 --- a/arch/arm/boot/dts/imx6qdl-rex.dtsi +++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi | |||
@@ -342,7 +342,7 @@ | |||
342 | pinctrl-0 = <&pinctrl_usdhc2>; | 342 | pinctrl-0 = <&pinctrl_usdhc2>; |
343 | bus-width = <4>; | 343 | bus-width = <4>; |
344 | cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; | 344 | cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; |
345 | wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>; | 345 | wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>; |
346 | status = "okay"; | 346 | status = "okay"; |
347 | }; | 347 | }; |
348 | 348 | ||
@@ -351,6 +351,6 @@ | |||
351 | pinctrl-0 = <&pinctrl_usdhc3>; | 351 | pinctrl-0 = <&pinctrl_usdhc3>; |
352 | bus-width = <4>; | 352 | bus-width = <4>; |
353 | cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; | 353 | cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; |
354 | wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; | 354 | wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; |
355 | status = "okay"; | 355 | status = "okay"; |
356 | }; | 356 | }; |
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi index 3b24b12651b2..e329ca5c3322 100644 --- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi | |||
@@ -467,8 +467,8 @@ | |||
467 | pinctrl-0 = <&pinctrl_usdhc3>; | 467 | pinctrl-0 = <&pinctrl_usdhc3>; |
468 | pinctrl-1 = <&pinctrl_usdhc3_100mhz>; | 468 | pinctrl-1 = <&pinctrl_usdhc3_100mhz>; |
469 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; | 469 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; |
470 | cd-gpios = <&gpio6 15 0>; | 470 | cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>; |
471 | wp-gpios = <&gpio1 13 0>; | 471 | wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>; |
472 | status = "okay"; | 472 | status = "okay"; |
473 | }; | 473 | }; |
474 | 474 | ||
diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi index e00c44f6a0df..782379320517 100644 --- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi | |||
@@ -448,8 +448,8 @@ | |||
448 | &usdhc3 { | 448 | &usdhc3 { |
449 | pinctrl-names = "default"; | 449 | pinctrl-names = "default"; |
450 | pinctrl-0 = <&pinctrl_usdhc3>; | 450 | pinctrl-0 = <&pinctrl_usdhc3>; |
451 | cd-gpios = <&gpio7 0 0>; | 451 | cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; |
452 | wp-gpios = <&gpio7 1 0>; | 452 | wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>; |
453 | vmmc-supply = <®_3p3v>; | 453 | vmmc-supply = <®_3p3v>; |
454 | status = "okay"; | 454 | status = "okay"; |
455 | }; | 455 | }; |
@@ -457,7 +457,7 @@ | |||
457 | &usdhc4 { | 457 | &usdhc4 { |
458 | pinctrl-names = "default"; | 458 | pinctrl-names = "default"; |
459 | pinctrl-0 = <&pinctrl_usdhc4>; | 459 | pinctrl-0 = <&pinctrl_usdhc4>; |
460 | cd-gpios = <&gpio2 6 0>; | 460 | cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; |
461 | vmmc-supply = <®_3p3v>; | 461 | vmmc-supply = <®_3p3v>; |
462 | status = "okay"; | 462 | status = "okay"; |
463 | }; | 463 | }; |
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi index a626e6dd8022..944eb81cb2b8 100644 --- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi +++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi | |||
@@ -562,8 +562,8 @@ | |||
562 | pinctrl-names = "default"; | 562 | pinctrl-names = "default"; |
563 | pinctrl-0 = <&pinctrl_usdhc2>; | 563 | pinctrl-0 = <&pinctrl_usdhc2>; |
564 | bus-width = <8>; | 564 | bus-width = <8>; |
565 | cd-gpios = <&gpio2 2 0>; | 565 | cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; |
566 | wp-gpios = <&gpio2 3 0>; | 566 | wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>; |
567 | status = "okay"; | 567 | status = "okay"; |
568 | }; | 568 | }; |
569 | 569 | ||
@@ -571,8 +571,8 @@ | |||
571 | pinctrl-names = "default"; | 571 | pinctrl-names = "default"; |
572 | pinctrl-0 = <&pinctrl_usdhc3>; | 572 | pinctrl-0 = <&pinctrl_usdhc3>; |
573 | bus-width = <8>; | 573 | bus-width = <8>; |
574 | cd-gpios = <&gpio2 0 0>; | 574 | cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; |
575 | wp-gpios = <&gpio2 1 0>; | 575 | wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; |
576 | status = "okay"; | 576 | status = "okay"; |
577 | }; | 577 | }; |
578 | 578 | ||
diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi index f02b80b41d4f..da08de324e9e 100644 --- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi +++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi | |||
@@ -680,7 +680,7 @@ | |||
680 | pinctrl-0 = <&pinctrl_usdhc1>; | 680 | pinctrl-0 = <&pinctrl_usdhc1>; |
681 | bus-width = <4>; | 681 | bus-width = <4>; |
682 | no-1-8-v; | 682 | no-1-8-v; |
683 | cd-gpios = <&gpio7 2 0>; | 683 | cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>; |
684 | fsl,wp-controller; | 684 | fsl,wp-controller; |
685 | status = "okay"; | 685 | status = "okay"; |
686 | }; | 686 | }; |
@@ -690,7 +690,7 @@ | |||
690 | pinctrl-0 = <&pinctrl_usdhc2>; | 690 | pinctrl-0 = <&pinctrl_usdhc2>; |
691 | bus-width = <4>; | 691 | bus-width = <4>; |
692 | no-1-8-v; | 692 | no-1-8-v; |
693 | cd-gpios = <&gpio7 3 0>; | 693 | cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>; |
694 | fsl,wp-controller; | 694 | fsl,wp-controller; |
695 | status = "okay"; | 695 | status = "okay"; |
696 | }; | 696 | }; |
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi index 5fb091675582..9e096d811bed 100644 --- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi +++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <dt-bindings/gpio/gpio.h> | ||
13 | |||
12 | / { | 14 | / { |
13 | regulators { | 15 | regulators { |
14 | compatible = "simple-bus"; | 16 | compatible = "simple-bus"; |
@@ -250,13 +252,13 @@ | |||
250 | &usdhc1 { | 252 | &usdhc1 { |
251 | pinctrl-names = "default"; | 253 | pinctrl-names = "default"; |
252 | pinctrl-0 = <&pinctrl_usdhc1>; | 254 | pinctrl-0 = <&pinctrl_usdhc1>; |
253 | cd-gpios = <&gpio1 2 0>; | 255 | cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>; |
254 | status = "okay"; | 256 | status = "okay"; |
255 | }; | 257 | }; |
256 | 258 | ||
257 | &usdhc3 { | 259 | &usdhc3 { |
258 | pinctrl-names = "default"; | 260 | pinctrl-names = "default"; |
259 | pinctrl-0 = <&pinctrl_usdhc3>; | 261 | pinctrl-0 = <&pinctrl_usdhc3>; |
260 | cd-gpios = <&gpio3 9 0>; | 262 | cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>; |
261 | status = "okay"; | 263 | status = "okay"; |
262 | }; | 264 | }; |
diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts index 945887d3fdb3..b84dff2e94ea 100644 --- a/arch/arm/boot/dts/imx6sl-evk.dts +++ b/arch/arm/boot/dts/imx6sl-evk.dts | |||
@@ -617,8 +617,8 @@ | |||
617 | pinctrl-1 = <&pinctrl_usdhc1_100mhz>; | 617 | pinctrl-1 = <&pinctrl_usdhc1_100mhz>; |
618 | pinctrl-2 = <&pinctrl_usdhc1_200mhz>; | 618 | pinctrl-2 = <&pinctrl_usdhc1_200mhz>; |
619 | bus-width = <8>; | 619 | bus-width = <8>; |
620 | cd-gpios = <&gpio4 7 0>; | 620 | cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>; |
621 | wp-gpios = <&gpio4 6 0>; | 621 | wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>; |
622 | status = "okay"; | 622 | status = "okay"; |
623 | }; | 623 | }; |
624 | 624 | ||
@@ -627,8 +627,8 @@ | |||
627 | pinctrl-0 = <&pinctrl_usdhc2>; | 627 | pinctrl-0 = <&pinctrl_usdhc2>; |
628 | pinctrl-1 = <&pinctrl_usdhc2_100mhz>; | 628 | pinctrl-1 = <&pinctrl_usdhc2_100mhz>; |
629 | pinctrl-2 = <&pinctrl_usdhc2_200mhz>; | 629 | pinctrl-2 = <&pinctrl_usdhc2_200mhz>; |
630 | cd-gpios = <&gpio5 0 0>; | 630 | cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; |
631 | wp-gpios = <&gpio4 29 0>; | 631 | wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>; |
632 | status = "okay"; | 632 | status = "okay"; |
633 | }; | 633 | }; |
634 | 634 | ||
@@ -637,6 +637,6 @@ | |||
637 | pinctrl-0 = <&pinctrl_usdhc3>; | 637 | pinctrl-0 = <&pinctrl_usdhc3>; |
638 | pinctrl-1 = <&pinctrl_usdhc3_100mhz>; | 638 | pinctrl-1 = <&pinctrl_usdhc3_100mhz>; |
639 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; | 639 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; |
640 | cd-gpios = <&gpio3 22 0>; | 640 | cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>; |
641 | status = "okay"; | 641 | status = "okay"; |
642 | }; | 642 | }; |
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts index e3c0b63c2205..115f3fd78971 100644 --- a/arch/arm/boot/dts/imx6sx-sabreauto.dts +++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts | |||
@@ -49,7 +49,7 @@ | |||
49 | pinctrl-1 = <&pinctrl_usdhc3_100mhz>; | 49 | pinctrl-1 = <&pinctrl_usdhc3_100mhz>; |
50 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; | 50 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; |
51 | bus-width = <8>; | 51 | bus-width = <8>; |
52 | cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>; | 52 | cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>; |
53 | wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; | 53 | wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; |
54 | keep-power-in-suspend; | 54 | keep-power-in-suspend; |
55 | enable-sdio-wakeup; | 55 | enable-sdio-wakeup; |
@@ -61,7 +61,7 @@ | |||
61 | pinctrl-names = "default"; | 61 | pinctrl-names = "default"; |
62 | pinctrl-0 = <&pinctrl_usdhc4>; | 62 | pinctrl-0 = <&pinctrl_usdhc4>; |
63 | bus-width = <8>; | 63 | bus-width = <8>; |
64 | cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>; | 64 | cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>; |
65 | no-1-8-v; | 65 | no-1-8-v; |
66 | keep-power-in-suspend; | 66 | keep-power-in-suspend; |
67 | enable-sdio-wakup; | 67 | enable-sdio-wakup; |
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi index cef04cef3a80..ac88c3467078 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dtsi +++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi | |||
@@ -293,7 +293,7 @@ | |||
293 | pinctrl-1 = <&pinctrl_usdhc3_100mhz>; | 293 | pinctrl-1 = <&pinctrl_usdhc3_100mhz>; |
294 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; | 294 | pinctrl-2 = <&pinctrl_usdhc3_200mhz>; |
295 | bus-width = <8>; | 295 | bus-width = <8>; |
296 | cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; | 296 | cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>; |
297 | wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>; | 297 | wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>; |
298 | keep-power-in-suspend; | 298 | keep-power-in-suspend; |
299 | enable-sdio-wakeup; | 299 | enable-sdio-wakeup; |
@@ -304,7 +304,7 @@ | |||
304 | &usdhc4 { | 304 | &usdhc4 { |
305 | pinctrl-names = "default"; | 305 | pinctrl-names = "default"; |
306 | pinctrl-0 = <&pinctrl_usdhc4>; | 306 | pinctrl-0 = <&pinctrl_usdhc4>; |
307 | cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>; | 307 | cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>; |
308 | wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>; | 308 | wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>; |
309 | status = "okay"; | 309 | status = "okay"; |
310 | }; | 310 | }; |
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts index 4d1a4b977d84..fdd1d7c9a5cc 100644 --- a/arch/arm/boot/dts/imx7d-sdb.dts +++ b/arch/arm/boot/dts/imx7d-sdb.dts | |||
@@ -234,8 +234,8 @@ | |||
234 | &usdhc1 { | 234 | &usdhc1 { |
235 | pinctrl-names = "default"; | 235 | pinctrl-names = "default"; |
236 | pinctrl-0 = <&pinctrl_usdhc1>; | 236 | pinctrl-0 = <&pinctrl_usdhc1>; |
237 | cd-gpios = <&gpio5 0 0>; | 237 | cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>; |
238 | wp-gpios = <&gpio5 1 0>; | 238 | wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>; |
239 | enable-sdio-wakeup; | 239 | enable-sdio-wakeup; |
240 | keep-power-in-suspend; | 240 | keep-power-in-suspend; |
241 | status = "okay"; | 241 | status = "okay"; |
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 4550d247e308..c011e2296cb1 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c | |||
@@ -74,32 +74,52 @@ struct jit_ctx { | |||
74 | 74 | ||
75 | int bpf_jit_enable __read_mostly; | 75 | int bpf_jit_enable __read_mostly; |
76 | 76 | ||
77 | static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) | 77 | static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret, |
78 | unsigned int size) | ||
79 | { | ||
80 | void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size); | ||
81 | |||
82 | if (!ptr) | ||
83 | return -EFAULT; | ||
84 | memcpy(ret, ptr, size); | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static u64 jit_get_skb_b(struct sk_buff *skb, int offset) | ||
78 | { | 89 | { |
79 | u8 ret; | 90 | u8 ret; |
80 | int err; | 91 | int err; |
81 | 92 | ||
82 | err = skb_copy_bits(skb, offset, &ret, 1); | 93 | if (offset < 0) |
94 | err = call_neg_helper(skb, offset, &ret, 1); | ||
95 | else | ||
96 | err = skb_copy_bits(skb, offset, &ret, 1); | ||
83 | 97 | ||
84 | return (u64)err << 32 | ret; | 98 | return (u64)err << 32 | ret; |
85 | } | 99 | } |
86 | 100 | ||
87 | static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) | 101 | static u64 jit_get_skb_h(struct sk_buff *skb, int offset) |
88 | { | 102 | { |
89 | u16 ret; | 103 | u16 ret; |
90 | int err; | 104 | int err; |
91 | 105 | ||
92 | err = skb_copy_bits(skb, offset, &ret, 2); | 106 | if (offset < 0) |
107 | err = call_neg_helper(skb, offset, &ret, 2); | ||
108 | else | ||
109 | err = skb_copy_bits(skb, offset, &ret, 2); | ||
93 | 110 | ||
94 | return (u64)err << 32 | ntohs(ret); | 111 | return (u64)err << 32 | ntohs(ret); |
95 | } | 112 | } |
96 | 113 | ||
97 | static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) | 114 | static u64 jit_get_skb_w(struct sk_buff *skb, int offset) |
98 | { | 115 | { |
99 | u32 ret; | 116 | u32 ret; |
100 | int err; | 117 | int err; |
101 | 118 | ||
102 | err = skb_copy_bits(skb, offset, &ret, 4); | 119 | if (offset < 0) |
120 | err = call_neg_helper(skb, offset, &ret, 4); | ||
121 | else | ||
122 | err = skb_copy_bits(skb, offset, &ret, 4); | ||
103 | 123 | ||
104 | return (u64)err << 32 | ntohl(ret); | 124 | return (u64)err << 32 | ntohl(ret); |
105 | } | 125 | } |
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx) | |||
536 | case BPF_LD | BPF_B | BPF_ABS: | 556 | case BPF_LD | BPF_B | BPF_ABS: |
537 | load_order = 0; | 557 | load_order = 0; |
538 | load: | 558 | load: |
539 | /* the interpreter will deal with the negative K */ | ||
540 | if ((int)k < 0) | ||
541 | return -ENOTSUPP; | ||
542 | emit_mov_i(r_off, k, ctx); | 559 | emit_mov_i(r_off, k, ctx); |
543 | load_common: | 560 | load_common: |
544 | ctx->seen |= SEEN_DATA | SEEN_CALL; | 561 | ctx->seen |= SEEN_DATA | SEEN_CALL; |
@@ -547,12 +564,24 @@ load_common: | |||
547 | emit(ARM_SUB_I(r_scratch, r_skb_hl, | 564 | emit(ARM_SUB_I(r_scratch, r_skb_hl, |
548 | 1 << load_order), ctx); | 565 | 1 << load_order), ctx); |
549 | emit(ARM_CMP_R(r_scratch, r_off), ctx); | 566 | emit(ARM_CMP_R(r_scratch, r_off), ctx); |
550 | condt = ARM_COND_HS; | 567 | condt = ARM_COND_GE; |
551 | } else { | 568 | } else { |
552 | emit(ARM_CMP_R(r_skb_hl, r_off), ctx); | 569 | emit(ARM_CMP_R(r_skb_hl, r_off), ctx); |
553 | condt = ARM_COND_HI; | 570 | condt = ARM_COND_HI; |
554 | } | 571 | } |
555 | 572 | ||
573 | /* | ||
574 | * test for negative offset, only if we are | ||
575 | * currently scheduled to take the fast | ||
576 | * path. this will update the flags so that | ||
577 | * the slowpath instruction are ignored if the | ||
578 | * offset is negative. | ||
579 | * | ||
580 | * for loard_order == 0 the HI condition will | ||
581 | * make loads at offset 0 take the slow path too. | ||
582 | */ | ||
583 | _emit(condt, ARM_CMP_I(r_off, 0), ctx); | ||
584 | |||
556 | _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), | 585 | _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), |
557 | ctx); | 586 | ctx); |
558 | 587 | ||
@@ -860,9 +889,11 @@ b_epilogue: | |||
860 | off = offsetof(struct sk_buff, vlan_tci); | 889 | off = offsetof(struct sk_buff, vlan_tci); |
861 | emit(ARM_LDRH_I(r_A, r_skb, off), ctx); | 890 | emit(ARM_LDRH_I(r_A, r_skb, off), ctx); |
862 | if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) | 891 | if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) |
863 | OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); | 892 | OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx); |
864 | else | 893 | else { |
865 | OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); | 894 | OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx); |
895 | OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx); | ||
896 | } | ||
866 | break; | 897 | break; |
867 | case BPF_ANC | SKF_AD_QUEUE: | 898 | case BPF_ANC | SKF_AD_QUEUE: |
868 | ctx->seen |= SEEN_SKB; | 899 | ctx->seen |= SEEN_SKB; |
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index f860bfda454a..e16351819fed 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -585,7 +585,8 @@ ENDPROC(el0_irq) | |||
585 | * | 585 | * |
586 | */ | 586 | */ |
587 | ENTRY(cpu_switch_to) | 587 | ENTRY(cpu_switch_to) |
588 | add x8, x0, #THREAD_CPU_CONTEXT | 588 | mov x10, #THREAD_CPU_CONTEXT |
589 | add x8, x0, x10 | ||
589 | mov x9, sp | 590 | mov x9, sp |
590 | stp x19, x20, [x8], #16 // store callee-saved registers | 591 | stp x19, x20, [x8], #16 // store callee-saved registers |
591 | stp x21, x22, [x8], #16 | 592 | stp x21, x22, [x8], #16 |
@@ -594,7 +595,7 @@ ENTRY(cpu_switch_to) | |||
594 | stp x27, x28, [x8], #16 | 595 | stp x27, x28, [x8], #16 |
595 | stp x29, x9, [x8], #16 | 596 | stp x29, x9, [x8], #16 |
596 | str lr, [x8] | 597 | str lr, [x8] |
597 | add x8, x1, #THREAD_CPU_CONTEXT | 598 | add x8, x1, x10 |
598 | ldp x19, x20, [x8], #16 // restore callee-saved registers | 599 | ldp x19, x20, [x8], #16 // restore callee-saved registers |
599 | ldp x21, x22, [x8], #16 | 600 | ldp x21, x22, [x8], #16 |
600 | ldp x23, x24, [x8], #16 | 601 | ldp x23, x24, [x8], #16 |
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 240b75c0e94f..463fa2e7e34c 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c | |||
@@ -61,7 +61,7 @@ void __init init_IRQ(void) | |||
61 | static bool migrate_one_irq(struct irq_desc *desc) | 61 | static bool migrate_one_irq(struct irq_desc *desc) |
62 | { | 62 | { |
63 | struct irq_data *d = irq_desc_get_irq_data(desc); | 63 | struct irq_data *d = irq_desc_get_irq_data(desc); |
64 | const struct cpumask *affinity = d->affinity; | 64 | const struct cpumask *affinity = irq_data_get_affinity_mask(d); |
65 | struct irq_chip *c; | 65 | struct irq_chip *c; |
66 | bool ret = false; | 66 | bool ret = false; |
67 | 67 | ||
@@ -81,7 +81,7 @@ static bool migrate_one_irq(struct irq_desc *desc) | |||
81 | if (!c->irq_set_affinity) | 81 | if (!c->irq_set_affinity) |
82 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | 82 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); |
83 | else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) | 83 | else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) |
84 | cpumask_copy(d->affinity, affinity); | 84 | cpumask_copy(irq_data_get_affinity_mask(d), affinity); |
85 | 85 | ||
86 | return ret; | 86 | return ret; |
87 | } | 87 | } |
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index d0f771be9e96..a124c55733db 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include <mach/pm.h> | 19 | #include <mach/pm.h> |
20 | 20 | ||
21 | static bool disable_cpu_idle_poll; | ||
21 | 22 | ||
22 | static cycle_t read_cycle_count(struct clocksource *cs) | 23 | static cycle_t read_cycle_count(struct clocksource *cs) |
23 | { | 24 | { |
@@ -80,45 +81,45 @@ static int comparator_next_event(unsigned long delta, | |||
80 | return 0; | 81 | return 0; |
81 | } | 82 | } |
82 | 83 | ||
83 | static void comparator_mode(enum clock_event_mode mode, | 84 | static int comparator_shutdown(struct clock_event_device *evdev) |
84 | struct clock_event_device *evdev) | ||
85 | { | 85 | { |
86 | switch (mode) { | 86 | pr_debug("%s: %s\n", __func__, evdev->name); |
87 | case CLOCK_EVT_MODE_ONESHOT: | 87 | sysreg_write(COMPARE, 0); |
88 | pr_debug("%s: start\n", evdev->name); | 88 | |
89 | /* FALLTHROUGH */ | 89 | if (disable_cpu_idle_poll) { |
90 | case CLOCK_EVT_MODE_RESUME: | 90 | disable_cpu_idle_poll = false; |
91 | /* | 91 | /* |
92 | * If we're using the COUNT and COMPARE registers we | 92 | * Only disable idle poll if we have forced that |
93 | * need to force idle poll. | 93 | * in a previous call. |
94 | */ | 94 | */ |
95 | cpu_idle_poll_ctrl(true); | 95 | cpu_idle_poll_ctrl(false); |
96 | break; | ||
97 | case CLOCK_EVT_MODE_UNUSED: | ||
98 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
99 | sysreg_write(COMPARE, 0); | ||
100 | pr_debug("%s: stop\n", evdev->name); | ||
101 | if (evdev->mode == CLOCK_EVT_MODE_ONESHOT || | ||
102 | evdev->mode == CLOCK_EVT_MODE_RESUME) { | ||
103 | /* | ||
104 | * Only disable idle poll if we have forced that | ||
105 | * in a previous call. | ||
106 | */ | ||
107 | cpu_idle_poll_ctrl(false); | ||
108 | } | ||
109 | break; | ||
110 | default: | ||
111 | BUG(); | ||
112 | } | 96 | } |
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static int comparator_set_oneshot(struct clock_event_device *evdev) | ||
101 | { | ||
102 | pr_debug("%s: %s\n", __func__, evdev->name); | ||
103 | |||
104 | disable_cpu_idle_poll = true; | ||
105 | /* | ||
106 | * If we're using the COUNT and COMPARE registers we | ||
107 | * need to force idle poll. | ||
108 | */ | ||
109 | cpu_idle_poll_ctrl(true); | ||
110 | |||
111 | return 0; | ||
113 | } | 112 | } |
114 | 113 | ||
115 | static struct clock_event_device comparator = { | 114 | static struct clock_event_device comparator = { |
116 | .name = "avr32_comparator", | 115 | .name = "avr32_comparator", |
117 | .features = CLOCK_EVT_FEAT_ONESHOT, | 116 | .features = CLOCK_EVT_FEAT_ONESHOT, |
118 | .shift = 16, | 117 | .shift = 16, |
119 | .rating = 50, | 118 | .rating = 50, |
120 | .set_next_event = comparator_next_event, | 119 | .set_next_event = comparator_next_event, |
121 | .set_mode = comparator_mode, | 120 | .set_state_shutdown = comparator_shutdown, |
121 | .set_state_oneshot = comparator_set_oneshot, | ||
122 | .tick_resume = comparator_set_oneshot, | ||
122 | }; | 123 | }; |
123 | 124 | ||
124 | void read_persistent_clock(struct timespec *ts) | 125 | void read_persistent_clock(struct timespec *ts) |
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h index 0c3f25ee3381..f8de767ce2bc 100644 --- a/arch/m32r/include/asm/io.h +++ b/arch/m32r/include/asm/io.h | |||
@@ -174,6 +174,11 @@ static inline void _writel(unsigned long l, unsigned long addr) | |||
174 | #define iowrite16 writew | 174 | #define iowrite16 writew |
175 | #define iowrite32 writel | 175 | #define iowrite32 writel |
176 | 176 | ||
177 | #define ioread16be(addr) be16_to_cpu(readw(addr)) | ||
178 | #define ioread32be(addr) be32_to_cpu(readl(addr)) | ||
179 | #define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr)) | ||
180 | #define iowrite32be(v, addr) writel(cpu_to_be32(v), (addr)) | ||
181 | |||
177 | #define mmiowb() | 182 | #define mmiowb() |
178 | 183 | ||
179 | #define flush_write_buffers() do { } while (0) /* M32R_FIXME */ | 184 | #define flush_write_buffers() do { } while (0) /* M32R_FIXME */ |
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index c7d1b9d09011..a2da259d9327 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -23,15 +23,15 @@ | |||
23 | 23 | ||
24 | int main(void) | 24 | int main(void) |
25 | { | 25 | { |
26 | DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); | 26 | DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack)); |
27 | DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); | 27 | DEFINE(__TASK_thread, offsetof(struct task_struct, thread)); |
28 | DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment)); | ||
29 | BLANK(); | ||
30 | DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); | 28 | DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); |
31 | BLANK(); | 29 | BLANK(); |
32 | DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause)); | 30 | DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp)); |
33 | DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address)); | 31 | DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause)); |
34 | DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid)); | 32 | DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address)); |
33 | DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid)); | ||
34 | DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb)); | ||
35 | BLANK(); | 35 | BLANK(); |
36 | DEFINE(__TI_task, offsetof(struct thread_info, task)); | 36 | DEFINE(__TI_task, offsetof(struct thread_info, task)); |
37 | DEFINE(__TI_flags, offsetof(struct thread_info, flags)); | 37 | DEFINE(__TI_flags, offsetof(struct thread_info, flags)); |
@@ -176,7 +176,6 @@ int main(void) | |||
176 | DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); | 176 | DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); |
177 | DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); | 177 | DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); |
178 | DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); | 178 | DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); |
179 | DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb)); | ||
180 | DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); | 179 | DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); |
181 | DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); | 180 | DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); |
182 | DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); | 181 | DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 3238893c9d4f..84062e7a77da 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -178,17 +178,21 @@ _PIF_WORK = (_PIF_PER_TRAP) | |||
178 | */ | 178 | */ |
179 | ENTRY(__switch_to) | 179 | ENTRY(__switch_to) |
180 | stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task | 180 | stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task |
181 | stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev | 181 | lgr %r1,%r2 |
182 | lg %r4,__THREAD_info(%r2) # get thread_info of prev | 182 | aghi %r1,__TASK_thread # thread_struct of prev task |
183 | lg %r5,__THREAD_info(%r3) # get thread_info of next | 183 | lg %r4,__TASK_thread_info(%r2) # get thread_info of prev |
184 | lg %r5,__TASK_thread_info(%r3) # get thread_info of next | ||
185 | stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev | ||
186 | lgr %r1,%r3 | ||
187 | aghi %r1,__TASK_thread # thread_struct of next task | ||
184 | lgr %r15,%r5 | 188 | lgr %r15,%r5 |
185 | aghi %r15,STACK_INIT # end of kernel stack of next | 189 | aghi %r15,STACK_INIT # end of kernel stack of next |
186 | stg %r3,__LC_CURRENT # store task struct of next | 190 | stg %r3,__LC_CURRENT # store task struct of next |
187 | stg %r5,__LC_THREAD_INFO # store thread info of next | 191 | stg %r5,__LC_THREAD_INFO # store thread info of next |
188 | stg %r15,__LC_KERNEL_STACK # store end of kernel stack | 192 | stg %r15,__LC_KERNEL_STACK # store end of kernel stack |
193 | lg %r15,__THREAD_ksp(%r1) # load kernel stack of next | ||
189 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 | 194 | lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 |
190 | mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next | 195 | mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next |
191 | lg %r15,__THREAD_ksp(%r3) # load kernel stack of next | ||
192 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task | 196 | lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task |
193 | br %r14 | 197 | br %r14 |
194 | 198 | ||
@@ -417,6 +421,7 @@ ENTRY(pgm_check_handler) | |||
417 | LAST_BREAK %r14 | 421 | LAST_BREAK %r14 |
418 | lg %r15,__LC_KERNEL_STACK | 422 | lg %r15,__LC_KERNEL_STACK |
419 | lg %r14,__TI_task(%r12) | 423 | lg %r14,__TI_task(%r12) |
424 | aghi %r14,__TASK_thread # pointer to thread_struct | ||
420 | lghi %r13,__LC_PGM_TDB | 425 | lghi %r13,__LC_PGM_TDB |
421 | tm __LC_PGM_ILC+2,0x02 # check for transaction abort | 426 | tm __LC_PGM_ILC+2,0x02 # check for transaction abort |
422 | jz 2f | 427 | jz 2f |
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 4d96c9f53455..7bea81d8a363 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -259,7 +259,7 @@ void vector_exception(struct pt_regs *regs) | |||
259 | } | 259 | } |
260 | 260 | ||
261 | /* get vector interrupt code from fpc */ | 261 | /* get vector interrupt code from fpc */ |
262 | asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); | 262 | asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc)); |
263 | vic = (current->thread.fp_regs.fpc & 0xf00) >> 8; | 263 | vic = (current->thread.fp_regs.fpc & 0xf00) >> 8; |
264 | switch (vic) { | 264 | switch (vic) { |
265 | case 1: /* invalid vector operation */ | 265 | case 1: /* invalid vector operation */ |
@@ -297,7 +297,7 @@ void data_exception(struct pt_regs *regs) | |||
297 | 297 | ||
298 | location = get_trap_ip(regs); | 298 | location = get_trap_ip(regs); |
299 | 299 | ||
300 | asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); | 300 | asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc)); |
301 | /* Check for vector register enablement */ | 301 | /* Check for vector register enablement */ |
302 | if (MACHINE_HAS_VX && !current->thread.vxrs && | 302 | if (MACHINE_HAS_VX && !current->thread.vxrs && |
303 | (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { | 303 | (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { |
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 99c9ff87e018..6b755d125783 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c | |||
@@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void) | |||
1139 | 1139 | ||
1140 | void __init free_initrd_mem(unsigned long begin, unsigned long end) | 1140 | void __init free_initrd_mem(unsigned long begin, unsigned long end) |
1141 | { | 1141 | { |
1142 | free_bootmem(__pa(begin), end - begin); | 1142 | free_bootmem_late(__pa(begin), end - begin); |
1143 | } | 1143 | } |
1144 | 1144 | ||
1145 | static int __init setup_initrd(char *str) | 1145 | static int __init setup_initrd(char *str) |
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S index bb187a6a877c..5a1844765a7a 100644 --- a/arch/x86/entry/entry_64_compat.S +++ b/arch/x86/entry/entry_64_compat.S | |||
@@ -205,7 +205,6 @@ sysexit_from_sys_call: | |||
205 | movl RDX(%rsp), %edx /* arg3 */ | 205 | movl RDX(%rsp), %edx /* arg3 */ |
206 | movl RSI(%rsp), %ecx /* arg4 */ | 206 | movl RSI(%rsp), %ecx /* arg4 */ |
207 | movl RDI(%rsp), %r8d /* arg5 */ | 207 | movl RDI(%rsp), %r8d /* arg5 */ |
208 | movl %ebp, %r9d /* arg6 */ | ||
209 | .endm | 208 | .endm |
210 | 209 | ||
211 | .macro auditsys_exit exit | 210 | .macro auditsys_exit exit |
@@ -236,6 +235,7 @@ sysexit_from_sys_call: | |||
236 | 235 | ||
237 | sysenter_auditsys: | 236 | sysenter_auditsys: |
238 | auditsys_entry_common | 237 | auditsys_entry_common |
238 | movl %ebp, %r9d /* reload 6th syscall arg */ | ||
239 | jmp sysenter_dispatch | 239 | jmp sysenter_dispatch |
240 | 240 | ||
241 | sysexit_audit: | 241 | sysexit_audit: |
@@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat) | |||
336 | * 32-bit zero extended: | 336 | * 32-bit zero extended: |
337 | */ | 337 | */ |
338 | ASM_STAC | 338 | ASM_STAC |
339 | 1: movl (%r8), %ebp | 339 | 1: movl (%r8), %r9d |
340 | _ASM_EXTABLE(1b, ia32_badarg) | 340 | _ASM_EXTABLE(1b, ia32_badarg) |
341 | ASM_CLAC | 341 | ASM_CLAC |
342 | orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) | 342 | orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) |
@@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat) | |||
346 | cstar_do_call: | 346 | cstar_do_call: |
347 | /* 32-bit syscall -> 64-bit C ABI argument conversion */ | 347 | /* 32-bit syscall -> 64-bit C ABI argument conversion */ |
348 | movl %edi, %r8d /* arg5 */ | 348 | movl %edi, %r8d /* arg5 */ |
349 | movl %ebp, %r9d /* arg6 */ | 349 | /* r9 already loaded */ /* arg6 */ |
350 | xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ | 350 | xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ |
351 | movl %ebx, %edi /* arg1 */ | 351 | movl %ebx, %edi /* arg1 */ |
352 | movl %edx, %edx /* arg3 (zero extension) */ | 352 | movl %edx, %edx /* arg3 (zero extension) */ |
@@ -358,7 +358,6 @@ cstar_dispatch: | |||
358 | call *ia32_sys_call_table(, %rax, 8) | 358 | call *ia32_sys_call_table(, %rax, 8) |
359 | movq %rax, RAX(%rsp) | 359 | movq %rax, RAX(%rsp) |
360 | 1: | 360 | 1: |
361 | movl RCX(%rsp), %ebp | ||
362 | DISABLE_INTERRUPTS(CLBR_NONE) | 361 | DISABLE_INTERRUPTS(CLBR_NONE) |
363 | TRACE_IRQS_OFF | 362 | TRACE_IRQS_OFF |
364 | testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) | 363 | testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
@@ -392,7 +391,9 @@ sysretl_from_sys_call: | |||
392 | 391 | ||
393 | #ifdef CONFIG_AUDITSYSCALL | 392 | #ifdef CONFIG_AUDITSYSCALL |
394 | cstar_auditsys: | 393 | cstar_auditsys: |
394 | movl %r9d, R9(%rsp) /* register to be clobbered by call */ | ||
395 | auditsys_entry_common | 395 | auditsys_entry_common |
396 | movl R9(%rsp), %r9d /* reload 6th syscall arg */ | ||
396 | jmp cstar_dispatch | 397 | jmp cstar_dispatch |
397 | 398 | ||
398 | sysretl_audit: | 399 | sysretl_audit: |
@@ -404,14 +405,16 @@ cstar_tracesys: | |||
404 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) | 405 | testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) |
405 | jz cstar_auditsys | 406 | jz cstar_auditsys |
406 | #endif | 407 | #endif |
408 | xchgl %r9d, %ebp | ||
407 | SAVE_EXTRA_REGS | 409 | SAVE_EXTRA_REGS |
408 | xorl %eax, %eax /* Do not leak kernel information */ | 410 | xorl %eax, %eax /* Do not leak kernel information */ |
409 | movq %rax, R11(%rsp) | 411 | movq %rax, R11(%rsp) |
410 | movq %rax, R10(%rsp) | 412 | movq %rax, R10(%rsp) |
411 | movq %rax, R9(%rsp) | 413 | movq %r9, R9(%rsp) |
412 | movq %rax, R8(%rsp) | 414 | movq %rax, R8(%rsp) |
413 | movq %rsp, %rdi /* &pt_regs -> arg1 */ | 415 | movq %rsp, %rdi /* &pt_regs -> arg1 */ |
414 | call syscall_trace_enter | 416 | call syscall_trace_enter |
417 | movl R9(%rsp), %r9d | ||
415 | 418 | ||
416 | /* Reload arg registers from stack. (see sysenter_tracesys) */ | 419 | /* Reload arg registers from stack. (see sysenter_tracesys) */ |
417 | movl RCX(%rsp), %ecx | 420 | movl RCX(%rsp), %ecx |
@@ -421,6 +424,7 @@ cstar_tracesys: | |||
421 | movl %eax, %eax /* zero extension */ | 424 | movl %eax, %eax /* zero extension */ |
422 | 425 | ||
423 | RESTORE_EXTRA_REGS | 426 | RESTORE_EXTRA_REGS |
427 | xchgl %ebp, %r9d | ||
424 | jmp cstar_do_call | 428 | jmp cstar_do_call |
425 | END(entry_SYSCALL_compat) | 429 | END(entry_SYSCALL_compat) |
426 | 430 | ||
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index a4ae82eb82aa..cd54147cb365 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h | |||
@@ -354,7 +354,7 @@ struct kvm_xcrs { | |||
354 | struct kvm_sync_regs { | 354 | struct kvm_sync_regs { |
355 | }; | 355 | }; |
356 | 356 | ||
357 | #define KVM_QUIRK_LINT0_REENABLED (1 << 0) | 357 | #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) |
358 | #define KVM_QUIRK_CD_NW_CLEARED (1 << 1) | 358 | #define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1) |
359 | 359 | ||
360 | #endif /* _ASM_X86_KVM_H */ | 360 | #endif /* _ASM_X86_KVM_H */ |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c index 188076161c1b..63eb68b73589 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c +++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c | |||
@@ -952,6 +952,14 @@ static u64 intel_cqm_event_count(struct perf_event *event) | |||
952 | return 0; | 952 | return 0; |
953 | 953 | ||
954 | /* | 954 | /* |
955 | * Getting up-to-date values requires an SMP IPI which is not | ||
956 | * possible if we're being called in interrupt context. Return | ||
957 | * the cached values instead. | ||
958 | */ | ||
959 | if (unlikely(in_interrupt())) | ||
960 | goto out; | ||
961 | |||
962 | /* | ||
955 | * Notice that we don't perform the reading of an RMID | 963 | * Notice that we don't perform the reading of an RMID |
956 | * atomically, because we can't hold a spin lock across the | 964 | * atomically, because we can't hold a spin lock across the |
957 | * IPIs. | 965 | * IPIs. |
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 0b39173dd971..1e173f6285c7 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c | |||
@@ -351,9 +351,15 @@ static int __init x86_noxsave_setup(char *s) | |||
351 | 351 | ||
352 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); | 352 | setup_clear_cpu_cap(X86_FEATURE_XSAVE); |
353 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); | 353 | setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); |
354 | setup_clear_cpu_cap(X86_FEATURE_XSAVEC); | ||
354 | setup_clear_cpu_cap(X86_FEATURE_XSAVES); | 355 | setup_clear_cpu_cap(X86_FEATURE_XSAVES); |
355 | setup_clear_cpu_cap(X86_FEATURE_AVX); | 356 | setup_clear_cpu_cap(X86_FEATURE_AVX); |
356 | setup_clear_cpu_cap(X86_FEATURE_AVX2); | 357 | setup_clear_cpu_cap(X86_FEATURE_AVX2); |
358 | setup_clear_cpu_cap(X86_FEATURE_AVX512F); | ||
359 | setup_clear_cpu_cap(X86_FEATURE_AVX512PF); | ||
360 | setup_clear_cpu_cap(X86_FEATURE_AVX512ER); | ||
361 | setup_clear_cpu_cap(X86_FEATURE_AVX512CD); | ||
362 | setup_clear_cpu_cap(X86_FEATURE_MPX); | ||
357 | 363 | ||
358 | return 1; | 364 | return 1; |
359 | } | 365 | } |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 954e98a8c2e3..2a5ca97c263b 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) | |||
1595 | for (i = 0; i < APIC_LVT_NUM; i++) | 1595 | for (i = 0; i < APIC_LVT_NUM; i++) |
1596 | apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); | 1596 | apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); |
1597 | apic_update_lvtt(apic); | 1597 | apic_update_lvtt(apic); |
1598 | if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED)) | 1598 | if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED)) |
1599 | apic_set_reg(apic, APIC_LVT0, | 1599 | apic_set_reg(apic, APIC_LVT0, |
1600 | SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); | 1600 | SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); |
1601 | apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); | 1601 | apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); |
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c index de1d2d8062e2..dc0a84a6f309 100644 --- a/arch/x86/kvm/mtrr.c +++ b/arch/x86/kvm/mtrr.c | |||
@@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state) | |||
120 | return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; | 120 | return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; |
121 | } | 121 | } |
122 | 122 | ||
123 | static u8 mtrr_disabled_type(void) | ||
124 | { | ||
125 | /* | ||
126 | * Intel SDM 11.11.2.2: all MTRRs are disabled when | ||
127 | * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC | ||
128 | * memory type is applied to all of physical memory. | ||
129 | */ | ||
130 | return MTRR_TYPE_UNCACHABLE; | ||
131 | } | ||
132 | |||
123 | /* | 133 | /* |
124 | * Three terms are used in the following code: | 134 | * Three terms are used in the following code: |
125 | * - segment, it indicates the address segments covered by fixed MTRRs. | 135 | * - segment, it indicates the address segments covered by fixed MTRRs. |
@@ -434,6 +444,8 @@ struct mtrr_iter { | |||
434 | 444 | ||
435 | /* output fields. */ | 445 | /* output fields. */ |
436 | int mem_type; | 446 | int mem_type; |
447 | /* mtrr is completely disabled? */ | ||
448 | bool mtrr_disabled; | ||
437 | /* [start, end) is not fully covered in MTRRs? */ | 449 | /* [start, end) is not fully covered in MTRRs? */ |
438 | bool partial_map; | 450 | bool partial_map; |
439 | 451 | ||
@@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter) | |||
549 | static void mtrr_lookup_start(struct mtrr_iter *iter) | 561 | static void mtrr_lookup_start(struct mtrr_iter *iter) |
550 | { | 562 | { |
551 | if (!mtrr_is_enabled(iter->mtrr_state)) { | 563 | if (!mtrr_is_enabled(iter->mtrr_state)) { |
552 | iter->partial_map = true; | 564 | iter->mtrr_disabled = true; |
553 | return; | 565 | return; |
554 | } | 566 | } |
555 | 567 | ||
@@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter, | |||
563 | iter->mtrr_state = mtrr_state; | 575 | iter->mtrr_state = mtrr_state; |
564 | iter->start = start; | 576 | iter->start = start; |
565 | iter->end = end; | 577 | iter->end = end; |
578 | iter->mtrr_disabled = false; | ||
566 | iter->partial_map = false; | 579 | iter->partial_map = false; |
567 | iter->fixed = false; | 580 | iter->fixed = false; |
568 | iter->range = NULL; | 581 | iter->range = NULL; |
@@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) | |||
656 | return MTRR_TYPE_WRBACK; | 669 | return MTRR_TYPE_WRBACK; |
657 | } | 670 | } |
658 | 671 | ||
659 | /* It is not covered by MTRRs. */ | 672 | if (iter.mtrr_disabled) |
660 | if (iter.partial_map) { | 673 | return mtrr_disabled_type(); |
661 | /* | 674 | |
662 | * We just check one page, partially covered by MTRRs is | 675 | /* |
663 | * impossible. | 676 | * We just check one page, partially covered by MTRRs is |
664 | */ | 677 | * impossible. |
665 | WARN_ON(type != -1); | 678 | */ |
666 | type = mtrr_default_type(mtrr_state); | 679 | WARN_ON(iter.partial_map); |
667 | } | 680 | |
681 | /* not contained in any MTRRs. */ | ||
682 | if (type == -1) | ||
683 | return mtrr_default_type(mtrr_state); | ||
684 | |||
668 | return type; | 685 | return type; |
669 | } | 686 | } |
670 | EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); | 687 | EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); |
@@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, | |||
689 | return false; | 706 | return false; |
690 | } | 707 | } |
691 | 708 | ||
709 | if (iter.mtrr_disabled) | ||
710 | return true; | ||
711 | |||
692 | if (!iter.partial_map) | 712 | if (!iter.partial_map) |
693 | return true; | 713 | return true; |
694 | 714 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index bbc678a66b18..8e0c0844c6b9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1672,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) | |||
1672 | * does not do it - this results in some delay at | 1672 | * does not do it - this results in some delay at |
1673 | * reboot | 1673 | * reboot |
1674 | */ | 1674 | */ |
1675 | if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED)) | 1675 | if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) |
1676 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); | 1676 | cr0 &= ~(X86_CR0_CD | X86_CR0_NW); |
1677 | svm->vmcb->save.cr0 = cr0; | 1677 | svm->vmcb->save.cr0 = cr0; |
1678 | mark_dirty(svm->vmcb, VMCB_CR); | 1678 | mark_dirty(svm->vmcb, VMCB_CR); |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5b4e9384717a..83b7b5cd75d5 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -8650,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) | |||
8650 | 8650 | ||
8651 | if (kvm_read_cr0(vcpu) & X86_CR0_CD) { | 8651 | if (kvm_read_cr0(vcpu) & X86_CR0_CD) { |
8652 | ipat = VMX_EPT_IPAT_BIT; | 8652 | ipat = VMX_EPT_IPAT_BIT; |
8653 | cache = MTRR_TYPE_UNCACHABLE; | 8653 | if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED)) |
8654 | cache = MTRR_TYPE_WRBACK; | ||
8655 | else | ||
8656 | cache = MTRR_TYPE_UNCACHABLE; | ||
8654 | goto exit; | 8657 | goto exit; |
8655 | } | 8658 | } |
8656 | 8659 | ||
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index edc8cdcd786b..0ca2f3e4803c 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h | |||
@@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu, | |||
147 | return kvm_register_write(vcpu, reg, val); | 147 | return kvm_register_write(vcpu, reg, val); |
148 | } | 148 | } |
149 | 149 | ||
150 | static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk) | ||
151 | { | ||
152 | return !(kvm->arch.disabled_quirks & quirk); | ||
153 | } | ||
154 | |||
150 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); | 155 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); |
151 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); | 156 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); |
152 | void kvm_set_pending_timer(struct kvm_vcpu *vcpu); | 157 | void kvm_set_pending_timer(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index cc5ccc415cc0..b9c78f3bcd67 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages, | |||
63 | !PageReserved(pfn_to_page(start_pfn + i))) | 63 | !PageReserved(pfn_to_page(start_pfn + i))) |
64 | return 1; | 64 | return 1; |
65 | 65 | ||
66 | WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn); | ||
67 | |||
68 | return 0; | 66 | return 0; |
69 | } | 67 | } |
70 | 68 | ||
@@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
94 | pgprot_t prot; | 92 | pgprot_t prot; |
95 | int retval; | 93 | int retval; |
96 | void __iomem *ret_addr; | 94 | void __iomem *ret_addr; |
97 | int ram_region; | ||
98 | 95 | ||
99 | /* Don't allow wraparound or zero size */ | 96 | /* Don't allow wraparound or zero size */ |
100 | last_addr = phys_addr + size - 1; | 97 | last_addr = phys_addr + size - 1; |
@@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
117 | /* | 114 | /* |
118 | * Don't allow anybody to remap normal RAM that we're using.. | 115 | * Don't allow anybody to remap normal RAM that we're using.. |
119 | */ | 116 | */ |
120 | /* First check if whole region can be identified as RAM or not */ | 117 | pfn = phys_addr >> PAGE_SHIFT; |
121 | ram_region = region_is_ram(phys_addr, size); | 118 | last_pfn = last_addr >> PAGE_SHIFT; |
122 | if (ram_region > 0) { | 119 | if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, |
123 | WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", | 120 | __ioremap_check_ram) == 1) { |
124 | (unsigned long int)phys_addr, | 121 | WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n", |
125 | (unsigned long int)last_addr); | 122 | &phys_addr, &last_addr); |
126 | return NULL; | 123 | return NULL; |
127 | } | 124 | } |
128 | 125 | ||
129 | /* If could not be identified(-1), check page by page */ | ||
130 | if (ram_region < 0) { | ||
131 | pfn = phys_addr >> PAGE_SHIFT; | ||
132 | last_pfn = last_addr >> PAGE_SHIFT; | ||
133 | if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL, | ||
134 | __ioremap_check_ram) == 1) | ||
135 | return NULL; | ||
136 | } | ||
137 | /* | 126 | /* |
138 | * Mappings have to be page-aligned | 127 | * Mappings have to be page-aligned |
139 | */ | 128 | */ |
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 9d518d693b4b..844b06d67df4 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c | |||
@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) | |||
126 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; | 126 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; |
127 | } | 127 | } |
128 | } | 128 | } |
129 | |||
130 | const char *arch_vma_name(struct vm_area_struct *vma) | ||
131 | { | ||
132 | if (vma->vm_flags & VM_MPX) | ||
133 | return "[mpx]"; | ||
134 | return NULL; | ||
135 | } | ||
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 7a657f58bbea..db1b0bc5017c 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c | |||
@@ -20,20 +20,6 @@ | |||
20 | #define CREATE_TRACE_POINTS | 20 | #define CREATE_TRACE_POINTS |
21 | #include <asm/trace/mpx.h> | 21 | #include <asm/trace/mpx.h> |
22 | 22 | ||
23 | static const char *mpx_mapping_name(struct vm_area_struct *vma) | ||
24 | { | ||
25 | return "[mpx]"; | ||
26 | } | ||
27 | |||
28 | static struct vm_operations_struct mpx_vma_ops = { | ||
29 | .name = mpx_mapping_name, | ||
30 | }; | ||
31 | |||
32 | static int is_mpx_vma(struct vm_area_struct *vma) | ||
33 | { | ||
34 | return (vma->vm_ops == &mpx_vma_ops); | ||
35 | } | ||
36 | |||
37 | static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) | 23 | static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) |
38 | { | 24 | { |
39 | if (is_64bit_mm(mm)) | 25 | if (is_64bit_mm(mm)) |
@@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm) | |||
53 | /* | 39 | /* |
54 | * This is really a simplified "vm_mmap". it only handles MPX | 40 | * This is really a simplified "vm_mmap". it only handles MPX |
55 | * bounds tables (the bounds directory is user-allocated). | 41 | * bounds tables (the bounds directory is user-allocated). |
56 | * | ||
57 | * Later on, we use the vma->vm_ops to uniquely identify these | ||
58 | * VMAs. | ||
59 | */ | 42 | */ |
60 | static unsigned long mpx_mmap(unsigned long len) | 43 | static unsigned long mpx_mmap(unsigned long len) |
61 | { | 44 | { |
@@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len) | |||
101 | ret = -ENOMEM; | 84 | ret = -ENOMEM; |
102 | goto out; | 85 | goto out; |
103 | } | 86 | } |
104 | vma->vm_ops = &mpx_vma_ops; | ||
105 | 87 | ||
106 | if (vm_flags & VM_LOCKED) { | 88 | if (vm_flags & VM_LOCKED) { |
107 | up_write(&mm->mmap_sem); | 89 | up_write(&mm->mmap_sem); |
@@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm, | |||
812 | * so stop immediately and return an error. This | 794 | * so stop immediately and return an error. This |
813 | * probably results in a SIGSEGV. | 795 | * probably results in a SIGSEGV. |
814 | */ | 796 | */ |
815 | if (!is_mpx_vma(vma)) | 797 | if (!(vma->vm_flags & VM_MPX)) |
816 | return -EINVAL; | 798 | return -EINVAL; |
817 | 799 | ||
818 | len = min(vma->vm_end, end) - addr; | 800 | len = min(vma->vm_end, end) - addr; |
@@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm, | |||
945 | * lots of tables even though we have no actual table | 927 | * lots of tables even though we have no actual table |
946 | * entries in use. | 928 | * entries in use. |
947 | */ | 929 | */ |
948 | while (next && is_mpx_vma(next)) | 930 | while (next && (next->vm_flags & VM_MPX)) |
949 | next = next->vm_next; | 931 | next = next->vm_next; |
950 | while (prev && is_mpx_vma(prev)) | 932 | while (prev && (prev->vm_flags & VM_MPX)) |
951 | prev = prev->vm_prev; | 933 | prev = prev->vm_prev; |
952 | /* | 934 | /* |
953 | * We know 'start' and 'end' lie within an area controlled | 935 | * We know 'start' and 'end' lie within an area controlled |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 3250f2371aea..90b924acd982 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -117,7 +117,7 @@ static void flush_tlb_func(void *info) | |||
117 | } else { | 117 | } else { |
118 | unsigned long addr; | 118 | unsigned long addr; |
119 | unsigned long nr_pages = | 119 | unsigned long nr_pages = |
120 | f->flush_end - f->flush_start / PAGE_SIZE; | 120 | (f->flush_end - f->flush_start) / PAGE_SIZE; |
121 | addr = f->flush_start; | 121 | addr = f->flush_start; |
122 | while (addr < f->flush_end) { | 122 | while (addr < f->flush_end) { |
123 | __flush_tlb_single(addr); | 123 | __flush_tlb_single(addr); |
diff --git a/block/bio.c b/block/bio.c index 2a00d349cd68..d6e5ba3399f0 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -1831,8 +1831,9 @@ EXPORT_SYMBOL(bio_endio); | |||
1831 | * Allocates and returns a new bio which represents @sectors from the start of | 1831 | * Allocates and returns a new bio which represents @sectors from the start of |
1832 | * @bio, and updates @bio to represent the remaining sectors. | 1832 | * @bio, and updates @bio to represent the remaining sectors. |
1833 | * | 1833 | * |
1834 | * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's | 1834 | * Unless this is a discard request the newly allocated bio will point |
1835 | * responsibility to ensure that @bio is not freed before the split. | 1835 | * to @bio's bi_io_vec; it is the caller's responsibility to ensure that |
1836 | * @bio is not freed before the split. | ||
1836 | */ | 1837 | */ |
1837 | struct bio *bio_split(struct bio *bio, int sectors, | 1838 | struct bio *bio_split(struct bio *bio, int sectors, |
1838 | gfp_t gfp, struct bio_set *bs) | 1839 | gfp_t gfp, struct bio_set *bs) |
@@ -1842,7 +1843,15 @@ struct bio *bio_split(struct bio *bio, int sectors, | |||
1842 | BUG_ON(sectors <= 0); | 1843 | BUG_ON(sectors <= 0); |
1843 | BUG_ON(sectors >= bio_sectors(bio)); | 1844 | BUG_ON(sectors >= bio_sectors(bio)); |
1844 | 1845 | ||
1845 | split = bio_clone_fast(bio, gfp, bs); | 1846 | /* |
1847 | * Discards need a mutable bio_vec to accommodate the payload | ||
1848 | * required by the DSM TRIM and UNMAP commands. | ||
1849 | */ | ||
1850 | if (bio->bi_rw & REQ_DISCARD) | ||
1851 | split = bio_clone_bioset(bio, gfp, bs); | ||
1852 | else | ||
1853 | split = bio_clone_fast(bio, gfp, bs); | ||
1854 | |||
1846 | if (!split) | 1855 | if (!split) |
1847 | return NULL; | 1856 | return NULL; |
1848 | 1857 | ||
@@ -2009,6 +2018,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css) | |||
2009 | bio->bi_css = blkcg_css; | 2018 | bio->bi_css = blkcg_css; |
2010 | return 0; | 2019 | return 0; |
2011 | } | 2020 | } |
2021 | EXPORT_SYMBOL_GPL(bio_associate_blkcg); | ||
2012 | 2022 | ||
2013 | /** | 2023 | /** |
2014 | * bio_associate_current - associate a bio with %current | 2024 | * bio_associate_current - associate a bio with %current |
@@ -2039,6 +2049,7 @@ int bio_associate_current(struct bio *bio) | |||
2039 | bio->bi_css = task_get_css(current, blkio_cgrp_id); | 2049 | bio->bi_css = task_get_css(current, blkio_cgrp_id); |
2040 | return 0; | 2050 | return 0; |
2041 | } | 2051 | } |
2052 | EXPORT_SYMBOL_GPL(bio_associate_current); | ||
2042 | 2053 | ||
2043 | /** | 2054 | /** |
2044 | * bio_disassociate_task - undo bio_associate_current() | 2055 | * bio_disassociate_task - undo bio_associate_current() |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 9da02c021ebe..d6283b3f5db5 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -718,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol, | |||
718 | return -EINVAL; | 718 | return -EINVAL; |
719 | 719 | ||
720 | disk = get_gendisk(MKDEV(major, minor), &part); | 720 | disk = get_gendisk(MKDEV(major, minor), &part); |
721 | if (!disk || part) | 721 | if (!disk) |
722 | return -EINVAL; | 722 | return -EINVAL; |
723 | if (part) { | ||
724 | put_disk(disk); | ||
725 | return -EINVAL; | ||
726 | } | ||
723 | 727 | ||
724 | rcu_read_lock(); | 728 | rcu_read_lock(); |
725 | spin_lock_irq(disk->queue->queue_lock); | 729 | spin_lock_irq(disk->queue->queue_lock); |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index e83fc3d0da9c..db5d9f79a247 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -2478,6 +2478,10 @@ int ata_dev_configure(struct ata_device *dev) | |||
2478 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, | 2478 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, |
2479 | dev->max_sectors); | 2479 | dev->max_sectors); |
2480 | 2480 | ||
2481 | if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024) | ||
2482 | dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024, | ||
2483 | dev->max_sectors); | ||
2484 | |||
2481 | if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) | 2485 | if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) |
2482 | dev->max_sectors = ATA_MAX_SECTORS_LBA48; | 2486 | dev->max_sectors = ATA_MAX_SECTORS_LBA48; |
2483 | 2487 | ||
@@ -4146,6 +4150,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4146 | { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, | 4150 | { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, |
4147 | { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, | 4151 | { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, |
4148 | 4152 | ||
4153 | /* | ||
4154 | * Causes silent data corruption with higher max sects. | ||
4155 | * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com | ||
4156 | */ | ||
4157 | { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, | ||
4158 | |||
4149 | /* Devices we expect to fail diagnostics */ | 4159 | /* Devices we expect to fail diagnostics */ |
4150 | 4160 | ||
4151 | /* Devices where NCQ should be avoided */ | 4161 | /* Devices where NCQ should be avoided */ |
@@ -4174,9 +4184,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4174 | { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | | 4184 | { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | |
4175 | ATA_HORKAGE_FIRMWARE_WARN }, | 4185 | ATA_HORKAGE_FIRMWARE_WARN }, |
4176 | 4186 | ||
4177 | /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ | 4187 | /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ |
4178 | { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, | 4188 | { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, |
4179 | { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, | 4189 | { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, |
4190 | { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, | ||
4180 | 4191 | ||
4181 | /* Blacklist entries taken from Silicon Image 3124/3132 | 4192 | /* Blacklist entries taken from Silicon Image 3124/3132 |
4182 | Windows driver .inf file - also several Linux problem reports */ | 4193 | Windows driver .inf file - also several Linux problem reports */ |
@@ -4229,7 +4240,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4229 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4240 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4230 | { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | 4241 | { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
4231 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4242 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4232 | { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | | 4243 | { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | |
4233 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4244 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4234 | { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | | 4245 | { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | |
4235 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4246 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
@@ -4238,6 +4249,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { | |||
4238 | { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | | 4249 | { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | |
4239 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, | 4250 | ATA_HORKAGE_ZERO_AFTER_TRIM, }, |
4240 | 4251 | ||
4252 | /* devices that don't properly handle TRIM commands */ | ||
4253 | { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, | ||
4254 | |||
4241 | /* | 4255 | /* |
4242 | * As defined, the DRAT (Deterministic Read After Trim) and RZAT | 4256 | * As defined, the DRAT (Deterministic Read After Trim) and RZAT |
4243 | * (Return Zero After Trim) flags in the ATA Command Set are | 4257 | * (Return Zero After Trim) flags in the ATA Command Set are |
@@ -4501,7 +4515,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev) | |||
4501 | else /* In the ancient relic department - skip all of this */ | 4515 | else /* In the ancient relic department - skip all of this */ |
4502 | return 0; | 4516 | return 0; |
4503 | 4517 | ||
4504 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); | 4518 | /* On some disks, this command causes spin-up, so we need longer timeout */ |
4519 | err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000); | ||
4505 | 4520 | ||
4506 | DPRINTK("EXIT, err_mask=%x\n", err_mask); | 4521 | DPRINTK("EXIT, err_mask=%x\n", err_mask); |
4507 | return err_mask; | 4522 | return err_mask; |
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 7ccc084bf1df..85aa76116a30 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c | |||
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap) | |||
460 | ATA_LFLAG_NO_SRST | | 460 | ATA_LFLAG_NO_SRST | |
461 | ATA_LFLAG_ASSUME_ATA; | 461 | ATA_LFLAG_ASSUME_ATA; |
462 | } | 462 | } |
463 | } else if (vendor == 0x11ab && devid == 0x4140) { | ||
464 | /* Marvell 4140 quirks */ | ||
465 | ata_for_each_link(link, ap, EDGE) { | ||
466 | /* port 4 is for SEMB device and it doesn't like SRST */ | ||
467 | if (link->pmp == 4) | ||
468 | link->flags |= ATA_LFLAG_DISABLED; | ||
469 | } | ||
463 | } | 470 | } |
464 | } | 471 | } |
465 | 472 | ||
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 3131adcc1f87..641a61a59e89 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -2568,7 +2568,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) | |||
2568 | rbuf[14] = (lowest_aligned >> 8) & 0x3f; | 2568 | rbuf[14] = (lowest_aligned >> 8) & 0x3f; |
2569 | rbuf[15] = lowest_aligned; | 2569 | rbuf[15] = lowest_aligned; |
2570 | 2570 | ||
2571 | if (ata_id_has_trim(args->id)) { | 2571 | if (ata_id_has_trim(args->id) && |
2572 | !(dev->horkage & ATA_HORKAGE_NOTRIM)) { | ||
2572 | rbuf[14] |= 0x80; /* LBPME */ | 2573 | rbuf[14] |= 0x80; /* LBPME */ |
2573 | 2574 | ||
2574 | if (ata_id_has_zero_after_trim(args->id) && | 2575 | if (ata_id_has_zero_after_trim(args->id) && |
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index d6c37bcd416d..e2d94972962d 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c | |||
@@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev, | |||
569 | 569 | ||
570 | if (!ata_id_has_trim(ata_dev->id)) | 570 | if (!ata_id_has_trim(ata_dev->id)) |
571 | mode = "unsupported"; | 571 | mode = "unsupported"; |
572 | else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM) | ||
573 | mode = "forced_unsupported"; | ||
572 | else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) | 574 | else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) |
573 | mode = "forced_unqueued"; | 575 | mode = "forced_unqueued"; |
574 | else if (ata_fpdma_dsm_supported(ata_dev)) | 576 | else if (ata_fpdma_dsm_supported(ata_dev)) |
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 69de41a87b74..3177b245d2bd 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer) | |||
240 | while ((entry = llist_del_all(&cq->list)) != NULL) { | 240 | while ((entry = llist_del_all(&cq->list)) != NULL) { |
241 | entry = llist_reverse_order(entry); | 241 | entry = llist_reverse_order(entry); |
242 | do { | 242 | do { |
243 | struct request_queue *q = NULL; | ||
244 | |||
243 | cmd = container_of(entry, struct nullb_cmd, ll_list); | 245 | cmd = container_of(entry, struct nullb_cmd, ll_list); |
244 | entry = entry->next; | 246 | entry = entry->next; |
247 | if (cmd->rq) | ||
248 | q = cmd->rq->q; | ||
245 | end_cmd(cmd); | 249 | end_cmd(cmd); |
246 | 250 | ||
247 | if (cmd->rq) { | 251 | if (q && !q->mq_ops && blk_queue_stopped(q)) { |
248 | struct request_queue *q = cmd->rq->q; | 252 | spin_lock(q->queue_lock); |
249 | 253 | if (blk_queue_stopped(q)) | |
250 | if (!q->mq_ops && blk_queue_stopped(q)) { | 254 | blk_start_queue(q); |
251 | spin_lock(q->queue_lock); | 255 | spin_unlock(q->queue_lock); |
252 | if (blk_queue_stopped(q)) | ||
253 | blk_start_queue(q); | ||
254 | spin_unlock(q->queue_lock); | ||
255 | } | ||
256 | } | 256 | } |
257 | } while (entry); | 257 | } while (entry); |
258 | } | 258 | } |
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 1e1a4323a71f..9ceb8ac68fdc 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c | |||
@@ -472,12 +472,11 @@ int btbcm_setup_apple(struct hci_dev *hdev) | |||
472 | 472 | ||
473 | /* Read Verbose Config Version Info */ | 473 | /* Read Verbose Config Version Info */ |
474 | skb = btbcm_read_verbose_config(hdev); | 474 | skb = btbcm_read_verbose_config(hdev); |
475 | if (IS_ERR(skb)) | 475 | if (!IS_ERR(skb)) { |
476 | return PTR_ERR(skb); | 476 | BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], |
477 | 477 | get_unaligned_le16(skb->data + 5)); | |
478 | BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], | 478 | kfree_skb(skb); |
479 | get_unaligned_le16(skb->data + 5)); | 479 | } |
480 | kfree_skb(skb); | ||
481 | 480 | ||
482 | set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); | 481 | set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); |
483 | 482 | ||
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c index 4fd9961d552e..d42537425438 100644 --- a/drivers/firmware/efi/cper.c +++ b/drivers/firmware/efi/cper.c | |||
@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p, | |||
305 | return ret; | 305 | return ret; |
306 | } | 306 | } |
307 | 307 | ||
308 | static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) | 308 | static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem, |
309 | int len) | ||
309 | { | 310 | { |
310 | struct cper_mem_err_compact cmem; | 311 | struct cper_mem_err_compact cmem; |
311 | 312 | ||
313 | /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */ | ||
314 | if (len == sizeof(struct cper_sec_mem_err_old) && | ||
315 | (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) { | ||
316 | pr_err(FW_WARN "valid bits set for fields beyond structure\n"); | ||
317 | return; | ||
318 | } | ||
312 | if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) | 319 | if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) |
313 | printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); | 320 | printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); |
314 | if (mem->validation_bits & CPER_MEM_VALID_PA) | 321 | if (mem->validation_bits & CPER_MEM_VALID_PA) |
@@ -405,8 +412,10 @@ static void cper_estatus_print_section( | |||
405 | } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { | 412 | } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { |
406 | struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); | 413 | struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); |
407 | printk("%s""section_type: memory error\n", newpfx); | 414 | printk("%s""section_type: memory error\n", newpfx); |
408 | if (gdata->error_data_length >= sizeof(*mem_err)) | 415 | if (gdata->error_data_length >= |
409 | cper_print_mem(newpfx, mem_err); | 416 | sizeof(struct cper_sec_mem_err_old)) |
417 | cper_print_mem(newpfx, mem_err, | ||
418 | gdata->error_data_length); | ||
410 | else | 419 | else |
411 | goto err_section_too_small; | 420 | goto err_section_too_small; |
412 | } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { | 421 | } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 01657830b470..e9fde72cf038 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h | |||
@@ -1614,6 +1614,9 @@ struct amdgpu_uvd { | |||
1614 | #define AMDGPU_MAX_VCE_HANDLES 16 | 1614 | #define AMDGPU_MAX_VCE_HANDLES 16 |
1615 | #define AMDGPU_VCE_FIRMWARE_OFFSET 256 | 1615 | #define AMDGPU_VCE_FIRMWARE_OFFSET 256 |
1616 | 1616 | ||
1617 | #define AMDGPU_VCE_HARVEST_VCE0 (1 << 0) | ||
1618 | #define AMDGPU_VCE_HARVEST_VCE1 (1 << 1) | ||
1619 | |||
1617 | struct amdgpu_vce { | 1620 | struct amdgpu_vce { |
1618 | struct amdgpu_bo *vcpu_bo; | 1621 | struct amdgpu_bo *vcpu_bo; |
1619 | uint64_t gpu_addr; | 1622 | uint64_t gpu_addr; |
@@ -1626,6 +1629,7 @@ struct amdgpu_vce { | |||
1626 | const struct firmware *fw; /* VCE firmware */ | 1629 | const struct firmware *fw; /* VCE firmware */ |
1627 | struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; | 1630 | struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; |
1628 | struct amdgpu_irq_src irq; | 1631 | struct amdgpu_irq_src irq; |
1632 | unsigned harvest_config; | ||
1629 | }; | 1633 | }; |
1630 | 1634 | ||
1631 | /* | 1635 | /* |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 5533434c7a8f..31ad444c6386 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | |||
@@ -459,6 +459,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file | |||
459 | memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); | 459 | memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); |
460 | dev_info.vram_type = adev->mc.vram_type; | 460 | dev_info.vram_type = adev->mc.vram_type; |
461 | dev_info.vram_bit_width = adev->mc.vram_width; | 461 | dev_info.vram_bit_width = adev->mc.vram_width; |
462 | dev_info.vce_harvest_config = adev->vce.harvest_config; | ||
462 | 463 | ||
463 | return copy_to_user(out, &dev_info, | 464 | return copy_to_user(out, &dev_info, |
464 | min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; | 465 | min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; |
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 1a2d419cbf16..ace870afc7d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c | |||
@@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev) | |||
494 | amdgpu_free_extended_power_table(adev); | 494 | amdgpu_free_extended_power_table(adev); |
495 | } | 495 | } |
496 | 496 | ||
497 | #define ixSMUSVI_NB_CURRENTVID 0xD8230044 | ||
498 | #define CURRENT_NB_VID_MASK 0xff000000 | ||
499 | #define CURRENT_NB_VID__SHIFT 24 | ||
500 | #define ixSMUSVI_GFX_CURRENTVID 0xD8230048 | ||
501 | #define CURRENT_GFX_VID_MASK 0xff000000 | ||
502 | #define CURRENT_GFX_VID__SHIFT 24 | ||
503 | |||
497 | static void | 504 | static void |
498 | cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, | 505 | cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, |
499 | struct seq_file *m) | 506 | struct seq_file *m) |
500 | { | 507 | { |
508 | struct cz_power_info *pi = cz_get_pi(adev); | ||
501 | struct amdgpu_clock_voltage_dependency_table *table = | 509 | struct amdgpu_clock_voltage_dependency_table *table = |
502 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; | 510 | &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; |
503 | u32 current_index = | 511 | struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = |
504 | (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & | 512 | &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; |
505 | TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> | 513 | struct amdgpu_vce_clock_voltage_dependency_table *vce_table = |
506 | TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; | 514 | &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; |
507 | u32 sclk, tmp; | 515 | u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX), |
508 | u16 vddc; | 516 | TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX); |
509 | 517 | u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), | |
510 | if (current_index >= NUM_SCLK_LEVELS) { | 518 | TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX); |
511 | seq_printf(m, "invalid dpm profile %d\n", current_index); | 519 | u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2), |
520 | TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX); | ||
521 | u32 sclk, vclk, dclk, ecclk, tmp; | ||
522 | u16 vddnb, vddgfx; | ||
523 | |||
524 | if (sclk_index >= NUM_SCLK_LEVELS) { | ||
525 | seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index); | ||
512 | } else { | 526 | } else { |
513 | sclk = table->entries[current_index].clk; | 527 | sclk = table->entries[sclk_index].clk; |
514 | tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & | 528 | seq_printf(m, "%u sclk: %u\n", sclk_index, sclk); |
515 | SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> | 529 | } |
516 | SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; | 530 | |
517 | vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); | 531 | tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) & |
518 | seq_printf(m, "power level %d sclk: %u vddc: %u\n", | 532 | CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT; |
519 | current_index, sclk, vddc); | 533 | vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); |
534 | tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) & | ||
535 | CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT; | ||
536 | vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); | ||
537 | seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx); | ||
538 | |||
539 | seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); | ||
540 | if (!pi->uvd_power_gated) { | ||
541 | if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) { | ||
542 | seq_printf(m, "invalid uvd dpm level %d\n", uvd_index); | ||
543 | } else { | ||
544 | vclk = uvd_table->entries[uvd_index].vclk; | ||
545 | dclk = uvd_table->entries[uvd_index].dclk; | ||
546 | seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk); | ||
547 | } | ||
548 | } | ||
549 | |||
550 | seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); | ||
551 | if (!pi->vce_power_gated) { | ||
552 | if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) { | ||
553 | seq_printf(m, "invalid vce dpm level %d\n", vce_index); | ||
554 | } else { | ||
555 | ecclk = vce_table->entries[vce_index].ecclk; | ||
556 | seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk); | ||
557 | } | ||
520 | } | 558 | } |
521 | } | 559 | } |
522 | 560 | ||
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 6e77964f1b64..e70a26f587a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | |||
@@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2632 | struct drm_device *dev = crtc->dev; | 2632 | struct drm_device *dev = crtc->dev; |
2633 | struct amdgpu_device *adev = dev->dev_private; | 2633 | struct amdgpu_device *adev = dev->dev_private; |
2634 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2634 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
2635 | unsigned type; | ||
2635 | 2636 | ||
2636 | switch (mode) { | 2637 | switch (mode) { |
2637 | case DRM_MODE_DPMS_ON: | 2638 | case DRM_MODE_DPMS_ON: |
@@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2640 | dce_v10_0_vga_enable(crtc, true); | 2641 | dce_v10_0_vga_enable(crtc, true); |
2641 | amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); | 2642 | amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); |
2642 | dce_v10_0_vga_enable(crtc, false); | 2643 | dce_v10_0_vga_enable(crtc, false); |
2644 | /* Make sure VBLANK interrupt is still enabled */ | ||
2645 | type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); | ||
2646 | amdgpu_irq_update(adev, &adev->crtc_irq, type); | ||
2643 | drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); | 2647 | drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); |
2644 | dce_v10_0_crtc_load_lut(crtc); | 2648 | dce_v10_0_crtc_load_lut(crtc); |
2645 | break; | 2649 | break; |
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 7f7abb0e0be5..dcb402ee048a 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | |||
@@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2631 | struct drm_device *dev = crtc->dev; | 2631 | struct drm_device *dev = crtc->dev; |
2632 | struct amdgpu_device *adev = dev->dev_private; | 2632 | struct amdgpu_device *adev = dev->dev_private; |
2633 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); | 2633 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
2634 | unsigned type; | ||
2634 | 2635 | ||
2635 | switch (mode) { | 2636 | switch (mode) { |
2636 | case DRM_MODE_DPMS_ON: | 2637 | case DRM_MODE_DPMS_ON: |
@@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
2639 | dce_v11_0_vga_enable(crtc, true); | 2640 | dce_v11_0_vga_enable(crtc, true); |
2640 | amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); | 2641 | amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); |
2641 | dce_v11_0_vga_enable(crtc, false); | 2642 | dce_v11_0_vga_enable(crtc, false); |
2643 | /* Make sure VBLANK interrupt is still enabled */ | ||
2644 | type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id); | ||
2645 | amdgpu_irq_update(adev, &adev->crtc_irq, type); | ||
2642 | drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); | 2646 | drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); |
2643 | dce_v11_0_crtc_load_lut(crtc); | 2647 | dce_v11_0_crtc_load_lut(crtc); |
2644 | break; | 2648 | break; |
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c index d62c4002e39c..d1064ca3670e 100644 --- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c | |||
@@ -35,6 +35,8 @@ | |||
35 | #include "oss/oss_2_0_d.h" | 35 | #include "oss/oss_2_0_d.h" |
36 | #include "oss/oss_2_0_sh_mask.h" | 36 | #include "oss/oss_2_0_sh_mask.h" |
37 | #include "gca/gfx_8_0_d.h" | 37 | #include "gca/gfx_8_0_d.h" |
38 | #include "smu/smu_7_1_2_d.h" | ||
39 | #include "smu/smu_7_1_2_sh_mask.h" | ||
38 | 40 | ||
39 | #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 | 41 | #define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 |
40 | #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 | 42 | #define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 |
@@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
112 | 114 | ||
113 | mutex_lock(&adev->grbm_idx_mutex); | 115 | mutex_lock(&adev->grbm_idx_mutex); |
114 | for (idx = 0; idx < 2; ++idx) { | 116 | for (idx = 0; idx < 2; ++idx) { |
117 | |||
118 | if (adev->vce.harvest_config & (1 << idx)) | ||
119 | continue; | ||
120 | |||
115 | if(idx == 0) | 121 | if(idx == 0) |
116 | WREG32_P(mmGRBM_GFX_INDEX, 0, | 122 | WREG32_P(mmGRBM_GFX_INDEX, 0, |
117 | ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); | 123 | ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); |
@@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev) | |||
190 | return 0; | 196 | return 0; |
191 | } | 197 | } |
192 | 198 | ||
199 | #define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074 | ||
200 | #define VCE_HARVEST_FUSE_MACRO__SHIFT 27 | ||
201 | #define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000 | ||
202 | |||
203 | static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev) | ||
204 | { | ||
205 | u32 tmp; | ||
206 | unsigned ret; | ||
207 | |||
208 | if (adev->flags & AMDGPU_IS_APU) | ||
209 | tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) & | ||
210 | VCE_HARVEST_FUSE_MACRO__MASK) >> | ||
211 | VCE_HARVEST_FUSE_MACRO__SHIFT; | ||
212 | else | ||
213 | tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) & | ||
214 | CC_HARVEST_FUSES__VCE_DISABLE_MASK) >> | ||
215 | CC_HARVEST_FUSES__VCE_DISABLE__SHIFT; | ||
216 | |||
217 | switch (tmp) { | ||
218 | case 1: | ||
219 | ret = AMDGPU_VCE_HARVEST_VCE0; | ||
220 | break; | ||
221 | case 2: | ||
222 | ret = AMDGPU_VCE_HARVEST_VCE1; | ||
223 | break; | ||
224 | case 3: | ||
225 | ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1; | ||
226 | break; | ||
227 | default: | ||
228 | ret = 0; | ||
229 | } | ||
230 | |||
231 | return ret; | ||
232 | } | ||
233 | |||
193 | static int vce_v3_0_early_init(void *handle) | 234 | static int vce_v3_0_early_init(void *handle) |
194 | { | 235 | { |
195 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 236 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
196 | 237 | ||
238 | adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev); | ||
239 | |||
240 | if ((adev->vce.harvest_config & | ||
241 | (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) == | ||
242 | (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) | ||
243 | return -ENOENT; | ||
244 | |||
197 | vce_v3_0_set_ring_funcs(adev); | 245 | vce_v3_0_set_ring_funcs(adev); |
198 | vce_v3_0_set_irq_funcs(adev); | 246 | vce_v3_0_set_irq_funcs(adev); |
199 | 247 | ||
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index f69b92535505..5ae5c6923128 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c | |||
@@ -355,6 +355,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev) | |||
355 | planes->overlays[i]->base.possible_crtcs = 1 << crtc->id; | 355 | planes->overlays[i]->base.possible_crtcs = 1 << crtc->id; |
356 | 356 | ||
357 | drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs); | 357 | drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs); |
358 | drm_crtc_vblank_reset(&crtc->base); | ||
358 | 359 | ||
359 | dc->crtc = &crtc->base; | 360 | dc->crtc = &crtc->base; |
360 | 361 | ||
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 60b0c13d7ff5..6fad1f9648f3 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c | |||
@@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) | |||
313 | 313 | ||
314 | pm_runtime_enable(dev->dev); | 314 | pm_runtime_enable(dev->dev); |
315 | 315 | ||
316 | ret = atmel_hlcdc_dc_modeset_init(dev); | 316 | ret = drm_vblank_init(dev, 1); |
317 | if (ret < 0) { | 317 | if (ret < 0) { |
318 | dev_err(dev->dev, "failed to initialize mode setting\n"); | 318 | dev_err(dev->dev, "failed to initialize vblank\n"); |
319 | goto err_periph_clk_disable; | 319 | goto err_periph_clk_disable; |
320 | } | 320 | } |
321 | 321 | ||
322 | drm_mode_config_reset(dev); | 322 | ret = atmel_hlcdc_dc_modeset_init(dev); |
323 | |||
324 | ret = drm_vblank_init(dev, 1); | ||
325 | if (ret < 0) { | 323 | if (ret < 0) { |
326 | dev_err(dev->dev, "failed to initialize vblank\n"); | 324 | dev_err(dev->dev, "failed to initialize mode setting\n"); |
327 | goto err_periph_clk_disable; | 325 | goto err_periph_clk_disable; |
328 | } | 326 | } |
329 | 327 | ||
328 | drm_mode_config_reset(dev); | ||
329 | |||
330 | pm_runtime_get_sync(dev->dev); | 330 | pm_runtime_get_sync(dev->dev); |
331 | ret = drm_irq_install(dev, dc->hlcdc->irq); | 331 | ret = drm_irq_install(dev, dc->hlcdc->irq); |
332 | pm_runtime_put_sync(dev->dev); | 332 | pm_runtime_put_sync(dev->dev); |
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 357bd04a173b..fed748311b92 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -5398,12 +5398,9 @@ void drm_mode_config_reset(struct drm_device *dev) | |||
5398 | if (encoder->funcs->reset) | 5398 | if (encoder->funcs->reset) |
5399 | encoder->funcs->reset(encoder); | 5399 | encoder->funcs->reset(encoder); |
5400 | 5400 | ||
5401 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | 5401 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) |
5402 | connector->status = connector_status_unknown; | ||
5403 | |||
5404 | if (connector->funcs->reset) | 5402 | if (connector->funcs->reset) |
5405 | connector->funcs->reset(connector); | 5403 | connector->funcs->reset(connector); |
5406 | } | ||
5407 | } | 5404 | } |
5408 | EXPORT_SYMBOL(drm_mode_config_reset); | 5405 | EXPORT_SYMBOL(drm_mode_config_reset); |
5409 | 5406 | ||
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index a6d8a3ee7750..260389acfb77 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev, | |||
1274 | struct drm_i915_private *dev_priv = dev->dev_private; | 1274 | struct drm_i915_private *dev_priv = dev->dev_private; |
1275 | struct drm_i915_reg_read *reg = data; | 1275 | struct drm_i915_reg_read *reg = data; |
1276 | struct register_whitelist const *entry = whitelist; | 1276 | struct register_whitelist const *entry = whitelist; |
1277 | unsigned size; | ||
1278 | u64 offset; | ||
1277 | int i, ret = 0; | 1279 | int i, ret = 0; |
1278 | 1280 | ||
1279 | for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { | 1281 | for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { |
1280 | if (entry->offset == reg->offset && | 1282 | if (entry->offset == (reg->offset & -entry->size) && |
1281 | (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) | 1283 | (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) |
1282 | break; | 1284 | break; |
1283 | } | 1285 | } |
@@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev, | |||
1285 | if (i == ARRAY_SIZE(whitelist)) | 1287 | if (i == ARRAY_SIZE(whitelist)) |
1286 | return -EINVAL; | 1288 | return -EINVAL; |
1287 | 1289 | ||
1290 | /* We use the low bits to encode extra flags as the register should | ||
1291 | * be naturally aligned (and those that are not so aligned merely | ||
1292 | * limit the available flags for that register). | ||
1293 | */ | ||
1294 | offset = entry->offset; | ||
1295 | size = entry->size; | ||
1296 | size |= reg->offset ^ offset; | ||
1297 | |||
1288 | intel_runtime_pm_get(dev_priv); | 1298 | intel_runtime_pm_get(dev_priv); |
1289 | 1299 | ||
1290 | switch (entry->size) { | 1300 | switch (size) { |
1301 | case 8 | 1: | ||
1302 | reg->val = I915_READ64_2x32(offset, offset+4); | ||
1303 | break; | ||
1291 | case 8: | 1304 | case 8: |
1292 | reg->val = I915_READ64(reg->offset); | 1305 | reg->val = I915_READ64(offset); |
1293 | break; | 1306 | break; |
1294 | case 4: | 1307 | case 4: |
1295 | reg->val = I915_READ(reg->offset); | 1308 | reg->val = I915_READ(offset); |
1296 | break; | 1309 | break; |
1297 | case 2: | 1310 | case 2: |
1298 | reg->val = I915_READ16(reg->offset); | 1311 | reg->val = I915_READ16(offset); |
1299 | break; | 1312 | break; |
1300 | case 1: | 1313 | case 1: |
1301 | reg->val = I915_READ8(reg->offset); | 1314 | reg->val = I915_READ8(offset); |
1302 | break; | 1315 | break; |
1303 | default: | 1316 | default: |
1304 | MISSING_CASE(entry->size); | ||
1305 | ret = -EINVAL; | 1317 | ret = -EINVAL; |
1306 | goto out; | 1318 | goto out; |
1307 | } | 1319 | } |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 882cccdad272..ac6fe40b99f7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) | |||
490 | else if (boot_cpu_data.x86 > 3) | 490 | else if (boot_cpu_data.x86 > 3) |
491 | tmp = pgprot_noncached(tmp); | 491 | tmp = pgprot_noncached(tmp); |
492 | #endif | 492 | #endif |
493 | #if defined(__ia64__) || defined(__arm__) || defined(__powerpc__) | 493 | #if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \ |
494 | defined(__powerpc__) | ||
494 | if (caching_flags & TTM_PL_FLAG_WC) | 495 | if (caching_flags & TTM_PL_FLAG_WC) |
495 | tmp = pgprot_writecombine(tmp); | 496 | tmp = pgprot_writecombine(tmp); |
496 | else | 497 | else |
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c index 3318de690e00..a2dbbbe0d8d7 100644 --- a/drivers/hid/hid-cp2112.c +++ b/drivers/hid/hid-cp2112.c | |||
@@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size) | |||
356 | struct cp2112_force_read_report report; | 356 | struct cp2112_force_read_report report; |
357 | int ret; | 357 | int ret; |
358 | 358 | ||
359 | if (size > sizeof(dev->read_data)) | ||
360 | size = sizeof(dev->read_data); | ||
359 | report.report = CP2112_DATA_READ_FORCE_SEND; | 361 | report.report = CP2112_DATA_READ_FORCE_SEND; |
360 | report.length = cpu_to_be16(size); | 362 | report.length = cpu_to_be16(size); |
361 | 363 | ||
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 6a9b05b328a9..7c811252c1ce 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c | |||
@@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi, | |||
778 | /* | 778 | /* |
779 | * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN" | 779 | * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN" |
780 | * for the stylus. | 780 | * for the stylus. |
781 | * The check for mt_report_id ensures we don't process | ||
782 | * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical | ||
783 | * collection, but within the report ID. | ||
781 | */ | 784 | */ |
782 | if (field->physical == HID_DG_STYLUS) | 785 | if (field->physical == HID_DG_STYLUS) |
783 | return 0; | 786 | return 0; |
787 | else if ((field->physical == 0) && | ||
788 | (field->report->id != td->mt_report_id) && | ||
789 | (td->mt_report_id != -1)) | ||
790 | return 0; | ||
784 | 791 | ||
785 | if (field->application == HID_DG_TOUCHSCREEN || | 792 | if (field->application == HID_DG_TOUCHSCREEN || |
786 | field->application == HID_DG_TOUCHPAD) | 793 | field->application == HID_DG_TOUCHPAD) |
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index 53e7de7cb9e2..20f9a653444c 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c | |||
@@ -87,6 +87,9 @@ static const struct hid_blacklist { | |||
87 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, | 87 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, |
88 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, | 88 | { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, |
89 | { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, | 89 | { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, |
90 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS }, | ||
91 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS }, | ||
92 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS }, | ||
90 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, | 93 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, |
91 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, | 94 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, |
92 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS }, | 95 | { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS }, |
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index 4c0ffca97bef..44958d79d598 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c | |||
@@ -1271,11 +1271,13 @@ fail_leds: | |||
1271 | pad_input_dev = NULL; | 1271 | pad_input_dev = NULL; |
1272 | wacom_wac->pad_registered = false; | 1272 | wacom_wac->pad_registered = false; |
1273 | fail_register_pad_input: | 1273 | fail_register_pad_input: |
1274 | input_unregister_device(touch_input_dev); | 1274 | if (touch_input_dev) |
1275 | input_unregister_device(touch_input_dev); | ||
1275 | wacom_wac->touch_input = NULL; | 1276 | wacom_wac->touch_input = NULL; |
1276 | wacom_wac->touch_registered = false; | 1277 | wacom_wac->touch_registered = false; |
1277 | fail_register_touch_input: | 1278 | fail_register_touch_input: |
1278 | input_unregister_device(pen_input_dev); | 1279 | if (pen_input_dev) |
1280 | input_unregister_device(pen_input_dev); | ||
1279 | wacom_wac->pen_input = NULL; | 1281 | wacom_wac->pen_input = NULL; |
1280 | wacom_wac->pen_registered = false; | 1282 | wacom_wac->pen_registered = false; |
1281 | fail_register_pen_input: | 1283 | fail_register_pen_input: |
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 232da89f4e88..0d244239e55d 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c | |||
@@ -2213,6 +2213,9 @@ void wacom_setup_device_quirks(struct wacom *wacom) | |||
2213 | features->x_max = 4096; | 2213 | features->x_max = 4096; |
2214 | features->y_max = 4096; | 2214 | features->y_max = 4096; |
2215 | } | 2215 | } |
2216 | else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) { | ||
2217 | features->device_type |= WACOM_DEVICETYPE_PAD; | ||
2218 | } | ||
2216 | } | 2219 | } |
2217 | 2220 | ||
2218 | /* | 2221 | /* |
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index e8e2077c7244..13ea1ea23328 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c | |||
@@ -557,21 +557,21 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev) | |||
557 | if (src & MMA8452_TRANSIENT_SRC_XTRANSE) | 557 | if (src & MMA8452_TRANSIENT_SRC_XTRANSE) |
558 | iio_push_event(indio_dev, | 558 | iio_push_event(indio_dev, |
559 | IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X, | 559 | IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X, |
560 | IIO_EV_TYPE_THRESH, | 560 | IIO_EV_TYPE_MAG, |
561 | IIO_EV_DIR_RISING), | 561 | IIO_EV_DIR_RISING), |
562 | ts); | 562 | ts); |
563 | 563 | ||
564 | if (src & MMA8452_TRANSIENT_SRC_YTRANSE) | 564 | if (src & MMA8452_TRANSIENT_SRC_YTRANSE) |
565 | iio_push_event(indio_dev, | 565 | iio_push_event(indio_dev, |
566 | IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y, | 566 | IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y, |
567 | IIO_EV_TYPE_THRESH, | 567 | IIO_EV_TYPE_MAG, |
568 | IIO_EV_DIR_RISING), | 568 | IIO_EV_DIR_RISING), |
569 | ts); | 569 | ts); |
570 | 570 | ||
571 | if (src & MMA8452_TRANSIENT_SRC_ZTRANSE) | 571 | if (src & MMA8452_TRANSIENT_SRC_ZTRANSE) |
572 | iio_push_event(indio_dev, | 572 | iio_push_event(indio_dev, |
573 | IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z, | 573 | IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z, |
574 | IIO_EV_TYPE_THRESH, | 574 | IIO_EV_TYPE_MAG, |
575 | IIO_EV_DIR_RISING), | 575 | IIO_EV_DIR_RISING), |
576 | ts); | 576 | ts); |
577 | } | 577 | } |
@@ -644,7 +644,7 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev, | |||
644 | 644 | ||
645 | static const struct iio_event_spec mma8452_transient_event[] = { | 645 | static const struct iio_event_spec mma8452_transient_event[] = { |
646 | { | 646 | { |
647 | .type = IIO_EV_TYPE_THRESH, | 647 | .type = IIO_EV_TYPE_MAG, |
648 | .dir = IIO_EV_DIR_RISING, | 648 | .dir = IIO_EV_DIR_RISING, |
649 | .mask_separate = BIT(IIO_EV_INFO_ENABLE), | 649 | .mask_separate = BIT(IIO_EV_INFO_ENABLE), |
650 | .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | | 650 | .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | |
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c index 8d9c9b9215dd..d819823f7257 100644 --- a/drivers/iio/adc/mcp320x.c +++ b/drivers/iio/adc/mcp320x.c | |||
@@ -299,6 +299,8 @@ static int mcp320x_probe(struct spi_device *spi) | |||
299 | indio_dev->channels = chip_info->channels; | 299 | indio_dev->channels = chip_info->channels; |
300 | indio_dev->num_channels = chip_info->num_channels; | 300 | indio_dev->num_channels = chip_info->num_channels; |
301 | 301 | ||
302 | adc->chip_info = chip_info; | ||
303 | |||
302 | adc->transfer[0].tx_buf = &adc->tx_buf; | 304 | adc->transfer[0].tx_buf = &adc->tx_buf; |
303 | adc->transfer[0].len = sizeof(adc->tx_buf); | 305 | adc->transfer[0].len = sizeof(adc->tx_buf); |
304 | adc->transfer[1].rx_buf = adc->rx_buf; | 306 | adc->transfer[1].rx_buf = adc->rx_buf; |
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c index 480f335a0f9f..819632bf1fda 100644 --- a/drivers/iio/adc/vf610_adc.c +++ b/drivers/iio/adc/vf610_adc.c | |||
@@ -635,7 +635,7 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev, | |||
635 | struct vf610_adc *info = iio_priv(indio_dev); | 635 | struct vf610_adc *info = iio_priv(indio_dev); |
636 | 636 | ||
637 | if ((readval == NULL) || | 637 | if ((readval == NULL) || |
638 | (!(reg % 4) || (reg > VF610_REG_ADC_PCTL))) | 638 | ((reg % 4) || (reg > VF610_REG_ADC_PCTL))) |
639 | return -EINVAL; | 639 | return -EINVAL; |
640 | 640 | ||
641 | *readval = readl(info->regs + reg); | 641 | *readval = readl(info->regs + reg); |
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c index c1a218236be5..11a027adc204 100644 --- a/drivers/iio/light/stk3310.c +++ b/drivers/iio/light/stk3310.c | |||
@@ -200,7 +200,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev, | |||
200 | int *val, int *val2) | 200 | int *val, int *val2) |
201 | { | 201 | { |
202 | u8 reg; | 202 | u8 reg; |
203 | u16 buf; | 203 | __be16 buf; |
204 | int ret; | 204 | int ret; |
205 | struct stk3310_data *data = iio_priv(indio_dev); | 205 | struct stk3310_data *data = iio_priv(indio_dev); |
206 | 206 | ||
@@ -222,7 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev, | |||
222 | dev_err(&data->client->dev, "register read failed\n"); | 222 | dev_err(&data->client->dev, "register read failed\n"); |
223 | return ret; | 223 | return ret; |
224 | } | 224 | } |
225 | *val = swab16(buf); | 225 | *val = be16_to_cpu(buf); |
226 | 226 | ||
227 | return IIO_VAL_INT; | 227 | return IIO_VAL_INT; |
228 | } | 228 | } |
@@ -235,7 +235,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev, | |||
235 | int val, int val2) | 235 | int val, int val2) |
236 | { | 236 | { |
237 | u8 reg; | 237 | u8 reg; |
238 | u16 buf; | 238 | __be16 buf; |
239 | int ret; | 239 | int ret; |
240 | unsigned int index; | 240 | unsigned int index; |
241 | struct stk3310_data *data = iio_priv(indio_dev); | 241 | struct stk3310_data *data = iio_priv(indio_dev); |
@@ -252,7 +252,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev, | |||
252 | else | 252 | else |
253 | return -EINVAL; | 253 | return -EINVAL; |
254 | 254 | ||
255 | buf = swab16(val); | 255 | buf = cpu_to_be16(val); |
256 | ret = regmap_bulk_write(data->regmap, reg, &buf, 2); | 256 | ret = regmap_bulk_write(data->regmap, reg, &buf, 2); |
257 | if (ret < 0) | 257 | if (ret < 0) |
258 | dev_err(&client->dev, "failed to set PS threshold!\n"); | 258 | dev_err(&client->dev, "failed to set PS threshold!\n"); |
@@ -301,7 +301,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev, | |||
301 | int *val, int *val2, long mask) | 301 | int *val, int *val2, long mask) |
302 | { | 302 | { |
303 | u8 reg; | 303 | u8 reg; |
304 | u16 buf; | 304 | __be16 buf; |
305 | int ret; | 305 | int ret; |
306 | unsigned int index; | 306 | unsigned int index; |
307 | struct stk3310_data *data = iio_priv(indio_dev); | 307 | struct stk3310_data *data = iio_priv(indio_dev); |
@@ -322,7 +322,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev, | |||
322 | mutex_unlock(&data->lock); | 322 | mutex_unlock(&data->lock); |
323 | return ret; | 323 | return ret; |
324 | } | 324 | } |
325 | *val = swab16(buf); | 325 | *val = be16_to_cpu(buf); |
326 | mutex_unlock(&data->lock); | 326 | mutex_unlock(&data->lock); |
327 | return IIO_VAL_INT; | 327 | return IIO_VAL_INT; |
328 | case IIO_CHAN_INFO_INT_TIME: | 328 | case IIO_CHAN_INFO_INT_TIME: |
@@ -608,13 +608,7 @@ static int stk3310_probe(struct i2c_client *client, | |||
608 | if (ret < 0) | 608 | if (ret < 0) |
609 | return ret; | 609 | return ret; |
610 | 610 | ||
611 | ret = iio_device_register(indio_dev); | 611 | if (client->irq < 0) |
612 | if (ret < 0) { | ||
613 | dev_err(&client->dev, "device_register failed\n"); | ||
614 | stk3310_set_state(data, STK3310_STATE_STANDBY); | ||
615 | } | ||
616 | |||
617 | if (client->irq <= 0) | ||
618 | client->irq = stk3310_gpio_probe(client); | 612 | client->irq = stk3310_gpio_probe(client); |
619 | 613 | ||
620 | if (client->irq >= 0) { | 614 | if (client->irq >= 0) { |
@@ -629,6 +623,12 @@ static int stk3310_probe(struct i2c_client *client, | |||
629 | client->irq); | 623 | client->irq); |
630 | } | 624 | } |
631 | 625 | ||
626 | ret = iio_device_register(indio_dev); | ||
627 | if (ret < 0) { | ||
628 | dev_err(&client->dev, "device_register failed\n"); | ||
629 | stk3310_set_state(data, STK3310_STATE_STANDBY); | ||
630 | } | ||
631 | |||
632 | return ret; | 632 | return ret; |
633 | } | 633 | } |
634 | 634 | ||
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig index dcadfc4f0661..efb9350b0d76 100644 --- a/drivers/iio/magnetometer/Kconfig +++ b/drivers/iio/magnetometer/Kconfig | |||
@@ -90,6 +90,7 @@ config IIO_ST_MAGN_SPI_3AXIS | |||
90 | config BMC150_MAGN | 90 | config BMC150_MAGN |
91 | tristate "Bosch BMC150 Magnetometer Driver" | 91 | tristate "Bosch BMC150 Magnetometer Driver" |
92 | depends on I2C | 92 | depends on I2C |
93 | select REGMAP_I2C | ||
93 | select IIO_BUFFER | 94 | select IIO_BUFFER |
94 | select IIO_TRIGGERED_BUFFER | 95 | select IIO_TRIGGERED_BUFFER |
95 | help | 96 | help |
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c index d4c178869991..1347a1f2e46f 100644 --- a/drivers/iio/magnetometer/bmc150_magn.c +++ b/drivers/iio/magnetometer/bmc150_magn.c | |||
@@ -706,11 +706,11 @@ static int bmc150_magn_init(struct bmc150_magn_data *data) | |||
706 | goto err_poweroff; | 706 | goto err_poweroff; |
707 | } | 707 | } |
708 | if (chip_id != BMC150_MAGN_CHIP_ID_VAL) { | 708 | if (chip_id != BMC150_MAGN_CHIP_ID_VAL) { |
709 | dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret); | 709 | dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id); |
710 | ret = -ENODEV; | 710 | ret = -ENODEV; |
711 | goto err_poweroff; | 711 | goto err_poweroff; |
712 | } | 712 | } |
713 | dev_dbg(&data->client->dev, "Chip id %x\n", ret); | 713 | dev_dbg(&data->client->dev, "Chip id %x\n", chip_id); |
714 | 714 | ||
715 | preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET]; | 715 | preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET]; |
716 | ret = bmc150_magn_set_odr(data, preset.odr); | 716 | ret = bmc150_magn_set_odr(data, preset.odr); |
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c index d927397a6ef7..706ebfd6297f 100644 --- a/drivers/iio/magnetometer/mmc35240.c +++ b/drivers/iio/magnetometer/mmc35240.c | |||
@@ -202,8 +202,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set) | |||
202 | coil_bit = MMC35240_CTRL0_RESET_BIT; | 202 | coil_bit = MMC35240_CTRL0_RESET_BIT; |
203 | 203 | ||
204 | return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0, | 204 | return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0, |
205 | MMC35240_CTRL0_REFILL_BIT, | 205 | coil_bit, coil_bit); |
206 | coil_bit); | 206 | |
207 | } | 207 | } |
208 | 208 | ||
209 | static int mmc35240_init(struct mmc35240_data *data) | 209 | static int mmc35240_init(struct mmc35240_data *data) |
@@ -222,14 +222,15 @@ static int mmc35240_init(struct mmc35240_data *data) | |||
222 | 222 | ||
223 | /* | 223 | /* |
224 | * make sure we restore sensor characteristics, by doing | 224 | * make sure we restore sensor characteristics, by doing |
225 | * a RESET/SET sequence | 225 | * a SET/RESET sequence, the axis polarity being naturally |
226 | * aligned after RESET | ||
226 | */ | 227 | */ |
227 | ret = mmc35240_hw_set(data, false); | 228 | ret = mmc35240_hw_set(data, true); |
228 | if (ret < 0) | 229 | if (ret < 0) |
229 | return ret; | 230 | return ret; |
230 | usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1); | 231 | usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1); |
231 | 232 | ||
232 | ret = mmc35240_hw_set(data, true); | 233 | ret = mmc35240_hw_set(data, false); |
233 | if (ret < 0) | 234 | if (ret < 0) |
234 | return ret; | 235 | return ret; |
235 | 236 | ||
@@ -503,6 +504,7 @@ static int mmc35240_probe(struct i2c_client *client, | |||
503 | } | 504 | } |
504 | 505 | ||
505 | data = iio_priv(indio_dev); | 506 | data = iio_priv(indio_dev); |
507 | i2c_set_clientdata(client, indio_dev); | ||
506 | data->client = client; | 508 | data->client = client; |
507 | data->regmap = regmap; | 509 | data->regmap = regmap; |
508 | data->res = MMC35240_16_BITS_SLOW; | 510 | data->res = MMC35240_16_BITS_SLOW; |
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c index cb2e8ad8bfdc..7a2b639eaa96 100644 --- a/drivers/iio/temperature/mlx90614.c +++ b/drivers/iio/temperature/mlx90614.c | |||
@@ -204,7 +204,7 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev, | |||
204 | *val = ret; | 204 | *val = ret; |
205 | return IIO_VAL_INT; | 205 | return IIO_VAL_INT; |
206 | case IIO_CHAN_INFO_OFFSET: | 206 | case IIO_CHAN_INFO_OFFSET: |
207 | *val = 13657; | 207 | *val = -13657; |
208 | *val2 = 500000; | 208 | *val2 = 500000; |
209 | return IIO_VAL_INT_PLUS_MICRO; | 209 | return IIO_VAL_INT_PLUS_MICRO; |
210 | case IIO_CHAN_INFO_SCALE: | 210 | case IIO_CHAN_INFO_SCALE: |
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 2d7e503d13cb..871dbe56216a 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -31,6 +31,8 @@ | |||
31 | * SOFTWARE. | 31 | * SOFTWARE. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
35 | |||
34 | #include <linux/sched.h> | 36 | #include <linux/sched.h> |
35 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
36 | #include <linux/idr.h> | 38 | #include <linux/idr.h> |
@@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
399 | u32 bar0 = 0, bar1 = 0; | 401 | u32 bar0 = 0, bar1 = 0; |
400 | 402 | ||
401 | #ifdef CONFIG_X86_64 | 403 | #ifdef CONFIG_X86_64 |
402 | if (WARN(pat_enabled(), | 404 | if (pat_enabled()) { |
403 | "ipath needs PAT disabled, boot with nopat kernel parameter\n")) { | 405 | pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n"); |
404 | ret = -ENODEV; | 406 | ret = -ENODEV; |
405 | goto bail; | 407 | goto bail; |
406 | } | 408 | } |
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c index 074a65ed17bb..766bf2660116 100644 --- a/drivers/input/input-leds.c +++ b/drivers/input/input-leds.c | |||
@@ -71,6 +71,18 @@ static void input_leds_event(struct input_handle *handle, unsigned int type, | |||
71 | { | 71 | { |
72 | } | 72 | } |
73 | 73 | ||
74 | static int input_leds_get_count(struct input_dev *dev) | ||
75 | { | ||
76 | unsigned int led_code; | ||
77 | int count = 0; | ||
78 | |||
79 | for_each_set_bit(led_code, dev->ledbit, LED_CNT) | ||
80 | if (input_led_info[led_code].name) | ||
81 | count++; | ||
82 | |||
83 | return count; | ||
84 | } | ||
85 | |||
74 | static int input_leds_connect(struct input_handler *handler, | 86 | static int input_leds_connect(struct input_handler *handler, |
75 | struct input_dev *dev, | 87 | struct input_dev *dev, |
76 | const struct input_device_id *id) | 88 | const struct input_device_id *id) |
@@ -81,7 +93,7 @@ static int input_leds_connect(struct input_handler *handler, | |||
81 | int led_no; | 93 | int led_no; |
82 | int error; | 94 | int error; |
83 | 95 | ||
84 | num_leds = bitmap_weight(dev->ledbit, LED_CNT); | 96 | num_leds = input_leds_get_count(dev); |
85 | if (!num_leds) | 97 | if (!num_leds) |
86 | return -ENXIO; | 98 | return -ENXIO; |
87 | 99 | ||
@@ -112,7 +124,7 @@ static int input_leds_connect(struct input_handler *handler, | |||
112 | led->handle = &leds->handle; | 124 | led->handle = &leds->handle; |
113 | led->code = led_code; | 125 | led->code = led_code; |
114 | 126 | ||
115 | if (WARN_ON(!input_led_info[led_code].name)) | 127 | if (!input_led_info[led_code].name) |
116 | continue; | 128 | continue; |
117 | 129 | ||
118 | led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", | 130 | led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", |
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index ce3d40004458..22b9ca901f4e 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c | |||
@@ -1167,7 +1167,7 @@ static int elantech_set_input_params(struct psmouse *psmouse) | |||
1167 | struct input_dev *dev = psmouse->dev; | 1167 | struct input_dev *dev = psmouse->dev; |
1168 | struct elantech_data *etd = psmouse->private; | 1168 | struct elantech_data *etd = psmouse->private; |
1169 | unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0; | 1169 | unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0; |
1170 | unsigned int x_res = 0, y_res = 0; | 1170 | unsigned int x_res = 31, y_res = 31; |
1171 | 1171 | ||
1172 | if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width)) | 1172 | if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width)) |
1173 | return -1; | 1173 | return -1; |
@@ -1232,8 +1232,6 @@ static int elantech_set_input_params(struct psmouse *psmouse) | |||
1232 | /* For X to recognize me as touchpad. */ | 1232 | /* For X to recognize me as touchpad. */ |
1233 | input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0); | 1233 | input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0); |
1234 | input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0); | 1234 | input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0); |
1235 | input_abs_set_res(dev, ABS_X, x_res); | ||
1236 | input_abs_set_res(dev, ABS_Y, y_res); | ||
1237 | /* | 1235 | /* |
1238 | * range of pressure and width is the same as v2, | 1236 | * range of pressure and width is the same as v2, |
1239 | * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility. | 1237 | * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility. |
@@ -1246,8 +1244,6 @@ static int elantech_set_input_params(struct psmouse *psmouse) | |||
1246 | input_mt_init_slots(dev, ETP_MAX_FINGERS, 0); | 1244 | input_mt_init_slots(dev, ETP_MAX_FINGERS, 0); |
1247 | input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); | 1245 | input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); |
1248 | input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); | 1246 | input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); |
1249 | input_abs_set_res(dev, ABS_MT_POSITION_X, x_res); | ||
1250 | input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res); | ||
1251 | input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2, | 1247 | input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2, |
1252 | ETP_PMAX_V2, 0, 0); | 1248 | ETP_PMAX_V2, 0, 0); |
1253 | /* | 1249 | /* |
@@ -1259,6 +1255,13 @@ static int elantech_set_input_params(struct psmouse *psmouse) | |||
1259 | break; | 1255 | break; |
1260 | } | 1256 | } |
1261 | 1257 | ||
1258 | input_abs_set_res(dev, ABS_X, x_res); | ||
1259 | input_abs_set_res(dev, ABS_Y, y_res); | ||
1260 | if (etd->hw_version > 1) { | ||
1261 | input_abs_set_res(dev, ABS_MT_POSITION_X, x_res); | ||
1262 | input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res); | ||
1263 | } | ||
1264 | |||
1262 | etd->y_max = y_max; | 1265 | etd->y_max = y_max; |
1263 | etd->width = width; | 1266 | etd->width = width; |
1264 | 1267 | ||
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index b4d12e29abff..e36162b28c2a 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c | |||
@@ -15,6 +15,7 @@ | |||
15 | */ | 15 | */ |
16 | 16 | ||
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/dmi.h> | ||
18 | #include <linux/i2c.h> | 19 | #include <linux/i2c.h> |
19 | #include <linux/input.h> | 20 | #include <linux/input.h> |
20 | #include <linux/input/mt.h> | 21 | #include <linux/input/mt.h> |
@@ -34,6 +35,7 @@ struct goodix_ts_data { | |||
34 | int abs_y_max; | 35 | int abs_y_max; |
35 | unsigned int max_touch_num; | 36 | unsigned int max_touch_num; |
36 | unsigned int int_trigger_type; | 37 | unsigned int int_trigger_type; |
38 | bool rotated_screen; | ||
37 | }; | 39 | }; |
38 | 40 | ||
39 | #define GOODIX_MAX_HEIGHT 4096 | 41 | #define GOODIX_MAX_HEIGHT 4096 |
@@ -60,6 +62,30 @@ static const unsigned long goodix_irq_flags[] = { | |||
60 | IRQ_TYPE_LEVEL_HIGH, | 62 | IRQ_TYPE_LEVEL_HIGH, |
61 | }; | 63 | }; |
62 | 64 | ||
65 | /* | ||
66 | * Those tablets have their coordinates origin at the bottom right | ||
67 | * of the tablet, as if rotated 180 degrees | ||
68 | */ | ||
69 | static const struct dmi_system_id rotated_screen[] = { | ||
70 | #if defined(CONFIG_DMI) && defined(CONFIG_X86) | ||
71 | { | ||
72 | .ident = "WinBook TW100", | ||
73 | .matches = { | ||
74 | DMI_MATCH(DMI_SYS_VENDOR, "WinBook"), | ||
75 | DMI_MATCH(DMI_PRODUCT_NAME, "TW100") | ||
76 | } | ||
77 | }, | ||
78 | { | ||
79 | .ident = "WinBook TW700", | ||
80 | .matches = { | ||
81 | DMI_MATCH(DMI_SYS_VENDOR, "WinBook"), | ||
82 | DMI_MATCH(DMI_PRODUCT_NAME, "TW700") | ||
83 | }, | ||
84 | }, | ||
85 | #endif | ||
86 | {} | ||
87 | }; | ||
88 | |||
63 | /** | 89 | /** |
64 | * goodix_i2c_read - read data from a register of the i2c slave device. | 90 | * goodix_i2c_read - read data from a register of the i2c slave device. |
65 | * | 91 | * |
@@ -129,6 +155,11 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) | |||
129 | int input_y = get_unaligned_le16(&coor_data[3]); | 155 | int input_y = get_unaligned_le16(&coor_data[3]); |
130 | int input_w = get_unaligned_le16(&coor_data[5]); | 156 | int input_w = get_unaligned_le16(&coor_data[5]); |
131 | 157 | ||
158 | if (ts->rotated_screen) { | ||
159 | input_x = ts->abs_x_max - input_x; | ||
160 | input_y = ts->abs_y_max - input_y; | ||
161 | } | ||
162 | |||
132 | input_mt_slot(ts->input_dev, id); | 163 | input_mt_slot(ts->input_dev, id); |
133 | input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true); | 164 | input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true); |
134 | input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x); | 165 | input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x); |
@@ -223,6 +254,11 @@ static void goodix_read_config(struct goodix_ts_data *ts) | |||
223 | ts->abs_y_max = GOODIX_MAX_HEIGHT; | 254 | ts->abs_y_max = GOODIX_MAX_HEIGHT; |
224 | ts->max_touch_num = GOODIX_MAX_CONTACTS; | 255 | ts->max_touch_num = GOODIX_MAX_CONTACTS; |
225 | } | 256 | } |
257 | |||
258 | ts->rotated_screen = dmi_check_system(rotated_screen); | ||
259 | if (ts->rotated_screen) | ||
260 | dev_dbg(&ts->client->dev, | ||
261 | "Applying '180 degrees rotated screen' quirk\n"); | ||
226 | } | 262 | } |
227 | 263 | ||
228 | /** | 264 | /** |
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c index f2c6c352c55a..2c41107240de 100644 --- a/drivers/input/touchscreen/usbtouchscreen.c +++ b/drivers/input/touchscreen/usbtouchscreen.c | |||
@@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch) | |||
627 | goto err_out; | 627 | goto err_out; |
628 | } | 628 | } |
629 | 629 | ||
630 | /* TSC-25 data sheet specifies a delay after the RESET command */ | ||
631 | msleep(150); | ||
632 | |||
630 | /* set coordinate output rate */ | 633 | /* set coordinate output rate */ |
631 | buf[0] = buf[1] = 0xFF; | 634 | buf[0] = buf[1] = 0xFF; |
632 | ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), | 635 | ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), |
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c index f58a196521a9..80285c71786e 100644 --- a/drivers/input/touchscreen/zforce_ts.c +++ b/drivers/input/touchscreen/zforce_ts.c | |||
@@ -429,7 +429,7 @@ static int zforce_read_packet(struct zforce_ts *ts, u8 *buf) | |||
429 | goto unlock; | 429 | goto unlock; |
430 | } | 430 | } |
431 | 431 | ||
432 | if (buf[PAYLOAD_LENGTH] == 0) { | 432 | if (buf[PAYLOAD_LENGTH] == 0 || buf[PAYLOAD_LENGTH] > FRAME_MAXSIZE) { |
433 | dev_err(&client->dev, "invalid payload length: %d\n", | 433 | dev_err(&client->dev, "invalid payload length: %d\n", |
434 | buf[PAYLOAD_LENGTH]); | 434 | buf[PAYLOAD_LENGTH]); |
435 | ret = -EIO; | 435 | ret = -EIO; |
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 8e9ec81ce4bb..da902baaa794 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c | |||
@@ -199,9 +199,10 @@ | |||
199 | * Stream table. | 199 | * Stream table. |
200 | * | 200 | * |
201 | * Linear: Enough to cover 1 << IDR1.SIDSIZE entries | 201 | * Linear: Enough to cover 1 << IDR1.SIDSIZE entries |
202 | * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus) | 202 | * 2lvl: 128k L1 entries, |
203 | * 256 lazy entries per table (each table covers a PCI bus) | ||
203 | */ | 204 | */ |
204 | #define STRTAB_L1_SZ_SHIFT 16 | 205 | #define STRTAB_L1_SZ_SHIFT 20 |
205 | #define STRTAB_SPLIT 8 | 206 | #define STRTAB_SPLIT 8 |
206 | 207 | ||
207 | #define STRTAB_L1_DESC_DWORDS 1 | 208 | #define STRTAB_L1_DESC_DWORDS 1 |
@@ -269,10 +270,10 @@ | |||
269 | #define ARM64_TCR_TG0_SHIFT 14 | 270 | #define ARM64_TCR_TG0_SHIFT 14 |
270 | #define ARM64_TCR_TG0_MASK 0x3UL | 271 | #define ARM64_TCR_TG0_MASK 0x3UL |
271 | #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8 | 272 | #define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8 |
272 | #define ARM64_TCR_IRGN0_SHIFT 24 | 273 | #define ARM64_TCR_IRGN0_SHIFT 8 |
273 | #define ARM64_TCR_IRGN0_MASK 0x3UL | 274 | #define ARM64_TCR_IRGN0_MASK 0x3UL |
274 | #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10 | 275 | #define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10 |
275 | #define ARM64_TCR_ORGN0_SHIFT 26 | 276 | #define ARM64_TCR_ORGN0_SHIFT 10 |
276 | #define ARM64_TCR_ORGN0_MASK 0x3UL | 277 | #define ARM64_TCR_ORGN0_MASK 0x3UL |
277 | #define CTXDESC_CD_0_TCR_SH0_SHIFT 12 | 278 | #define CTXDESC_CD_0_TCR_SH0_SHIFT 12 |
278 | #define ARM64_TCR_SH0_SHIFT 12 | 279 | #define ARM64_TCR_SH0_SHIFT 12 |
@@ -542,6 +543,9 @@ struct arm_smmu_device { | |||
542 | #define ARM_SMMU_FEAT_HYP (1 << 12) | 543 | #define ARM_SMMU_FEAT_HYP (1 << 12) |
543 | u32 features; | 544 | u32 features; |
544 | 545 | ||
546 | #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) | ||
547 | u32 options; | ||
548 | |||
545 | struct arm_smmu_cmdq cmdq; | 549 | struct arm_smmu_cmdq cmdq; |
546 | struct arm_smmu_evtq evtq; | 550 | struct arm_smmu_evtq evtq; |
547 | struct arm_smmu_priq priq; | 551 | struct arm_smmu_priq priq; |
@@ -602,11 +606,35 @@ struct arm_smmu_domain { | |||
602 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); | 606 | static DEFINE_SPINLOCK(arm_smmu_devices_lock); |
603 | static LIST_HEAD(arm_smmu_devices); | 607 | static LIST_HEAD(arm_smmu_devices); |
604 | 608 | ||
609 | struct arm_smmu_option_prop { | ||
610 | u32 opt; | ||
611 | const char *prop; | ||
612 | }; | ||
613 | |||
614 | static struct arm_smmu_option_prop arm_smmu_options[] = { | ||
615 | { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, | ||
616 | { 0, NULL}, | ||
617 | }; | ||
618 | |||
605 | static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) | 619 | static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) |
606 | { | 620 | { |
607 | return container_of(dom, struct arm_smmu_domain, domain); | 621 | return container_of(dom, struct arm_smmu_domain, domain); |
608 | } | 622 | } |
609 | 623 | ||
624 | static void parse_driver_options(struct arm_smmu_device *smmu) | ||
625 | { | ||
626 | int i = 0; | ||
627 | |||
628 | do { | ||
629 | if (of_property_read_bool(smmu->dev->of_node, | ||
630 | arm_smmu_options[i].prop)) { | ||
631 | smmu->options |= arm_smmu_options[i].opt; | ||
632 | dev_notice(smmu->dev, "option %s\n", | ||
633 | arm_smmu_options[i].prop); | ||
634 | } | ||
635 | } while (arm_smmu_options[++i].opt); | ||
636 | } | ||
637 | |||
610 | /* Low-level queue manipulation functions */ | 638 | /* Low-level queue manipulation functions */ |
611 | static bool queue_full(struct arm_smmu_queue *q) | 639 | static bool queue_full(struct arm_smmu_queue *q) |
612 | { | 640 | { |
@@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid, | |||
1036 | arm_smmu_sync_ste_for_sid(smmu, sid); | 1064 | arm_smmu_sync_ste_for_sid(smmu, sid); |
1037 | 1065 | ||
1038 | /* It's likely that we'll want to use the new STE soon */ | 1066 | /* It's likely that we'll want to use the new STE soon */ |
1039 | arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); | 1067 | if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH)) |
1068 | arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); | ||
1040 | } | 1069 | } |
1041 | 1070 | ||
1042 | static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) | 1071 | static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) |
@@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) | |||
1064 | return 0; | 1093 | return 0; |
1065 | 1094 | ||
1066 | size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); | 1095 | size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); |
1067 | strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS]; | 1096 | strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS]; |
1068 | 1097 | ||
1069 | desc->span = STRTAB_SPLIT + 1; | 1098 | desc->span = STRTAB_SPLIT + 1; |
1070 | desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma, | 1099 | desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma, |
@@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) | |||
2020 | { | 2049 | { |
2021 | void *strtab; | 2050 | void *strtab; |
2022 | u64 reg; | 2051 | u64 reg; |
2023 | u32 size; | 2052 | u32 size, l1size; |
2024 | int ret; | 2053 | int ret; |
2025 | struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; | 2054 | struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; |
2026 | 2055 | ||
2027 | /* Calculate the L1 size, capped to the SIDSIZE */ | 2056 | /* Calculate the L1 size, capped to the SIDSIZE */ |
2028 | size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); | 2057 | size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); |
2029 | size = min(size, smmu->sid_bits - STRTAB_SPLIT); | 2058 | size = min(size, smmu->sid_bits - STRTAB_SPLIT); |
2030 | if (size + STRTAB_SPLIT < smmu->sid_bits) | 2059 | cfg->num_l1_ents = 1 << size; |
2060 | |||
2061 | size += STRTAB_SPLIT; | ||
2062 | if (size < smmu->sid_bits) | ||
2031 | dev_warn(smmu->dev, | 2063 | dev_warn(smmu->dev, |
2032 | "2-level strtab only covers %u/%u bits of SID\n", | 2064 | "2-level strtab only covers %u/%u bits of SID\n", |
2033 | size + STRTAB_SPLIT, smmu->sid_bits); | 2065 | size, smmu->sid_bits); |
2034 | 2066 | ||
2035 | cfg->num_l1_ents = 1 << size; | 2067 | l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); |
2036 | size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); | 2068 | strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma, |
2037 | strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma, | ||
2038 | GFP_KERNEL); | 2069 | GFP_KERNEL); |
2039 | if (!strtab) { | 2070 | if (!strtab) { |
2040 | dev_err(smmu->dev, | 2071 | dev_err(smmu->dev, |
@@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) | |||
2055 | ret = arm_smmu_init_l1_strtab(smmu); | 2086 | ret = arm_smmu_init_l1_strtab(smmu); |
2056 | if (ret) | 2087 | if (ret) |
2057 | dma_free_coherent(smmu->dev, | 2088 | dma_free_coherent(smmu->dev, |
2058 | cfg->num_l1_ents * | 2089 | l1size, |
2059 | (STRTAB_L1_DESC_DWORDS << 3), | ||
2060 | strtab, | 2090 | strtab, |
2061 | cfg->strtab_dma); | 2091 | cfg->strtab_dma); |
2062 | return ret; | 2092 | return ret; |
@@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) | |||
2573 | if (irq > 0) | 2603 | if (irq > 0) |
2574 | smmu->gerr_irq = irq; | 2604 | smmu->gerr_irq = irq; |
2575 | 2605 | ||
2606 | parse_driver_options(smmu); | ||
2607 | |||
2576 | /* Probe the h/w */ | 2608 | /* Probe the h/w */ |
2577 | ret = arm_smmu_device_probe(smmu); | 2609 | ret = arm_smmu_device_probe(smmu); |
2578 | if (ret) | 2610 | if (ret) |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index a98a7b27aca1..0649b94f5958 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width) | |||
1830 | 1830 | ||
1831 | static void domain_exit(struct dmar_domain *domain) | 1831 | static void domain_exit(struct dmar_domain *domain) |
1832 | { | 1832 | { |
1833 | struct dmar_drhd_unit *drhd; | ||
1834 | struct intel_iommu *iommu; | ||
1833 | struct page *freelist = NULL; | 1835 | struct page *freelist = NULL; |
1834 | int i; | ||
1835 | 1836 | ||
1836 | /* Domain 0 is reserved, so dont process it */ | 1837 | /* Domain 0 is reserved, so dont process it */ |
1837 | if (!domain) | 1838 | if (!domain) |
@@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain) | |||
1851 | 1852 | ||
1852 | /* clear attached or cached domains */ | 1853 | /* clear attached or cached domains */ |
1853 | rcu_read_lock(); | 1854 | rcu_read_lock(); |
1854 | for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) | 1855 | for_each_active_iommu(iommu, drhd) |
1855 | iommu_detach_domain(domain, g_iommus[i]); | 1856 | if (domain_type_is_vm(domain) || |
1857 | test_bit(iommu->seq_id, domain->iommu_bmp)) | ||
1858 | iommu_detach_domain(domain, iommu); | ||
1856 | rcu_read_unlock(); | 1859 | rcu_read_unlock(); |
1857 | 1860 | ||
1858 | dma_free_pagelist(freelist); | 1861 | dma_free_pagelist(freelist); |
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index 8c91fd5eb6fd..375be509e95f 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c | |||
@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty) | |||
524 | cs->hw.ser->tty = tty; | 524 | cs->hw.ser->tty = tty; |
525 | atomic_set(&cs->hw.ser->refcnt, 1); | 525 | atomic_set(&cs->hw.ser->refcnt, 1); |
526 | init_completion(&cs->hw.ser->dead_cmp); | 526 | init_completion(&cs->hw.ser->dead_cmp); |
527 | |||
528 | tty->disc_data = cs; | 527 | tty->disc_data = cs; |
529 | 528 | ||
529 | /* Set the amount of data we're willing to receive per call | ||
530 | * from the hardware driver to half of the input buffer size | ||
531 | * to leave some reserve. | ||
532 | * Note: We don't do flow control towards the hardware driver. | ||
533 | * If more data is received than will fit into the input buffer, | ||
534 | * it will be dropped and an error will be logged. This should | ||
535 | * never happen as the device is slow and the buffer size ample. | ||
536 | */ | ||
537 | tty->receive_room = RBUFSIZE/2; | ||
538 | |||
530 | /* OK.. Initialization of the datastructures and the HW is done.. Now | 539 | /* OK.. Initialization of the datastructures and the HW is done.. Now |
531 | * startup system and notify the LL that we are ready to run | 540 | * startup system and notify the LL that we are ready to run |
532 | */ | 541 | */ |
@@ -598,28 +607,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty) | |||
598 | } | 607 | } |
599 | 608 | ||
600 | /* | 609 | /* |
601 | * Read on the tty. | ||
602 | * Unused, received data goes only to the Gigaset driver. | ||
603 | */ | ||
604 | static ssize_t | ||
605 | gigaset_tty_read(struct tty_struct *tty, struct file *file, | ||
606 | unsigned char __user *buf, size_t count) | ||
607 | { | ||
608 | return -EAGAIN; | ||
609 | } | ||
610 | |||
611 | /* | ||
612 | * Write on the tty. | ||
613 | * Unused, transmit data comes only from the Gigaset driver. | ||
614 | */ | ||
615 | static ssize_t | ||
616 | gigaset_tty_write(struct tty_struct *tty, struct file *file, | ||
617 | const unsigned char *buf, size_t count) | ||
618 | { | ||
619 | return -EAGAIN; | ||
620 | } | ||
621 | |||
622 | /* | ||
623 | * Ioctl on the tty. | 610 | * Ioctl on the tty. |
624 | * Called in process context only. | 611 | * Called in process context only. |
625 | * May be re-entered by multiple ioctl calling threads. | 612 | * May be re-entered by multiple ioctl calling threads. |
@@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = { | |||
752 | .open = gigaset_tty_open, | 739 | .open = gigaset_tty_open, |
753 | .close = gigaset_tty_close, | 740 | .close = gigaset_tty_close, |
754 | .hangup = gigaset_tty_hangup, | 741 | .hangup = gigaset_tty_hangup, |
755 | .read = gigaset_tty_read, | ||
756 | .write = gigaset_tty_write, | ||
757 | .ioctl = gigaset_tty_ioctl, | 742 | .ioctl = gigaset_tty_ioctl, |
758 | .receive_buf = gigaset_tty_receive, | 743 | .receive_buf = gigaset_tty_receive, |
759 | .write_wakeup = gigaset_tty_wakeup, | 744 | .write_wakeup = gigaset_tty_wakeup, |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index ed2346ddf4c9..e51de52eeb94 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) | |||
494 | bitmap_super_t *sb; | 494 | bitmap_super_t *sb; |
495 | unsigned long chunksize, daemon_sleep, write_behind; | 495 | unsigned long chunksize, daemon_sleep, write_behind; |
496 | 496 | ||
497 | bitmap->storage.sb_page = alloc_page(GFP_KERNEL); | 497 | bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO); |
498 | if (bitmap->storage.sb_page == NULL) | 498 | if (bitmap->storage.sb_page == NULL) |
499 | return -ENOMEM; | 499 | return -ENOMEM; |
500 | bitmap->storage.sb_page->index = 0; | 500 | bitmap->storage.sb_page->index = 0; |
@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap) | |||
541 | sb->state = cpu_to_le32(bitmap->flags); | 541 | sb->state = cpu_to_le32(bitmap->flags); |
542 | bitmap->events_cleared = bitmap->mddev->events; | 542 | bitmap->events_cleared = bitmap->mddev->events; |
543 | sb->events_cleared = cpu_to_le64(bitmap->mddev->events); | 543 | sb->events_cleared = cpu_to_le64(bitmap->mddev->events); |
544 | bitmap->mddev->bitmap_info.nodes = 0; | ||
544 | 545 | ||
545 | kunmap_atomic(sb); | 546 | kunmap_atomic(sb); |
546 | 547 | ||
@@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap) | |||
558 | unsigned long sectors_reserved = 0; | 559 | unsigned long sectors_reserved = 0; |
559 | int err = -EINVAL; | 560 | int err = -EINVAL; |
560 | struct page *sb_page; | 561 | struct page *sb_page; |
562 | loff_t offset = bitmap->mddev->bitmap_info.offset; | ||
561 | 563 | ||
562 | if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { | 564 | if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { |
563 | chunksize = 128 * 1024 * 1024; | 565 | chunksize = 128 * 1024 * 1024; |
@@ -584,9 +586,9 @@ re_read: | |||
584 | bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); | 586 | bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); |
585 | /* to 4k blocks */ | 587 | /* to 4k blocks */ |
586 | bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); | 588 | bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); |
587 | bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3); | 589 | offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3)); |
588 | pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, | 590 | pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, |
589 | bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset); | 591 | bitmap->cluster_slot, offset); |
590 | } | 592 | } |
591 | 593 | ||
592 | if (bitmap->storage.file) { | 594 | if (bitmap->storage.file) { |
@@ -597,7 +599,7 @@ re_read: | |||
597 | bitmap, bytes, sb_page); | 599 | bitmap, bytes, sb_page); |
598 | } else { | 600 | } else { |
599 | err = read_sb_page(bitmap->mddev, | 601 | err = read_sb_page(bitmap->mddev, |
600 | bitmap->mddev->bitmap_info.offset, | 602 | offset, |
601 | sb_page, | 603 | sb_page, |
602 | 0, sizeof(bitmap_super_t)); | 604 | 0, sizeof(bitmap_super_t)); |
603 | } | 605 | } |
@@ -611,8 +613,16 @@ re_read: | |||
611 | daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; | 613 | daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; |
612 | write_behind = le32_to_cpu(sb->write_behind); | 614 | write_behind = le32_to_cpu(sb->write_behind); |
613 | sectors_reserved = le32_to_cpu(sb->sectors_reserved); | 615 | sectors_reserved = le32_to_cpu(sb->sectors_reserved); |
614 | nodes = le32_to_cpu(sb->nodes); | 616 | /* XXX: This is a hack to ensure that we don't use clustering |
615 | strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); | 617 | * in case: |
618 | * - dm-raid is in use and | ||
619 | * - the nodes written in bitmap_sb is erroneous. | ||
620 | */ | ||
621 | if (!bitmap->mddev->sync_super) { | ||
622 | nodes = le32_to_cpu(sb->nodes); | ||
623 | strlcpy(bitmap->mddev->bitmap_info.cluster_name, | ||
624 | sb->cluster_name, 64); | ||
625 | } | ||
616 | 626 | ||
617 | /* verify that the bitmap-specific fields are valid */ | 627 | /* verify that the bitmap-specific fields are valid */ |
618 | if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) | 628 | if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) |
@@ -671,7 +681,7 @@ out: | |||
671 | kunmap_atomic(sb); | 681 | kunmap_atomic(sb); |
672 | /* Assiging chunksize is required for "re_read" */ | 682 | /* Assiging chunksize is required for "re_read" */ |
673 | bitmap->mddev->bitmap_info.chunksize = chunksize; | 683 | bitmap->mddev->bitmap_info.chunksize = chunksize; |
674 | if (nodes && (bitmap->cluster_slot < 0)) { | 684 | if (err == 0 && nodes && (bitmap->cluster_slot < 0)) { |
675 | err = md_setup_cluster(bitmap->mddev, nodes); | 685 | err = md_setup_cluster(bitmap->mddev, nodes); |
676 | if (err) { | 686 | if (err) { |
677 | pr_err("%s: Could not setup cluster service (%d)\n", | 687 | pr_err("%s: Could not setup cluster service (%d)\n", |
@@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot, | |||
1866 | if (IS_ERR(bitmap)) | 1876 | if (IS_ERR(bitmap)) |
1867 | return PTR_ERR(bitmap); | 1877 | return PTR_ERR(bitmap); |
1868 | 1878 | ||
1869 | rv = bitmap_read_sb(bitmap); | ||
1870 | if (rv) | ||
1871 | goto err; | ||
1872 | |||
1873 | rv = bitmap_init_from_disk(bitmap, 0); | 1879 | rv = bitmap_init_from_disk(bitmap, 0); |
1874 | if (rv) | 1880 | if (rv) |
1875 | goto err; | 1881 | goto err; |
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c index fcfc4b9b2672..0072190515e0 100644 --- a/drivers/md/md-cluster.c +++ b/drivers/md/md-cluster.c | |||
@@ -44,6 +44,7 @@ struct resync_info { | |||
44 | 44 | ||
45 | /* md_cluster_info flags */ | 45 | /* md_cluster_info flags */ |
46 | #define MD_CLUSTER_WAITING_FOR_NEWDISK 1 | 46 | #define MD_CLUSTER_WAITING_FOR_NEWDISK 1 |
47 | #define MD_CLUSTER_SUSPEND_READ_BALANCING 2 | ||
47 | 48 | ||
48 | 49 | ||
49 | struct md_cluster_info { | 50 | struct md_cluster_info { |
@@ -275,6 +276,9 @@ clear_bit: | |||
275 | 276 | ||
276 | static void recover_prep(void *arg) | 277 | static void recover_prep(void *arg) |
277 | { | 278 | { |
279 | struct mddev *mddev = arg; | ||
280 | struct md_cluster_info *cinfo = mddev->cluster_info; | ||
281 | set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); | ||
278 | } | 282 | } |
279 | 283 | ||
280 | static void recover_slot(void *arg, struct dlm_slot *slot) | 284 | static void recover_slot(void *arg, struct dlm_slot *slot) |
@@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots, | |||
307 | 311 | ||
308 | cinfo->slot_number = our_slot; | 312 | cinfo->slot_number = our_slot; |
309 | complete(&cinfo->completion); | 313 | complete(&cinfo->completion); |
314 | clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state); | ||
310 | } | 315 | } |
311 | 316 | ||
312 | static const struct dlm_lockspace_ops md_ls_ops = { | 317 | static const struct dlm_lockspace_ops md_ls_ops = { |
@@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev) | |||
816 | resync_send(mddev, RESYNCING, 0, 0); | 821 | resync_send(mddev, RESYNCING, 0, 0); |
817 | } | 822 | } |
818 | 823 | ||
819 | static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi) | 824 | static int area_resyncing(struct mddev *mddev, int direction, |
825 | sector_t lo, sector_t hi) | ||
820 | { | 826 | { |
821 | struct md_cluster_info *cinfo = mddev->cluster_info; | 827 | struct md_cluster_info *cinfo = mddev->cluster_info; |
822 | int ret = 0; | 828 | int ret = 0; |
823 | struct suspend_info *s; | 829 | struct suspend_info *s; |
824 | 830 | ||
831 | if ((direction == READ) && | ||
832 | test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state)) | ||
833 | return 1; | ||
834 | |||
825 | spin_lock_irq(&cinfo->suspend_lock); | 835 | spin_lock_irq(&cinfo->suspend_lock); |
826 | if (list_empty(&cinfo->suspend_list)) | 836 | if (list_empty(&cinfo->suspend_list)) |
827 | goto out; | 837 | goto out; |
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h index 6817ee00e053..00defe2badbc 100644 --- a/drivers/md/md-cluster.h +++ b/drivers/md/md-cluster.h | |||
@@ -18,7 +18,7 @@ struct md_cluster_operations { | |||
18 | int (*metadata_update_start)(struct mddev *mddev); | 18 | int (*metadata_update_start)(struct mddev *mddev); |
19 | int (*metadata_update_finish)(struct mddev *mddev); | 19 | int (*metadata_update_finish)(struct mddev *mddev); |
20 | int (*metadata_update_cancel)(struct mddev *mddev); | 20 | int (*metadata_update_cancel)(struct mddev *mddev); |
21 | int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi); | 21 | int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi); |
22 | int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); | 22 | int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); |
23 | int (*add_new_disk_finish)(struct mddev *mddev); | 23 | int (*add_new_disk_finish)(struct mddev *mddev); |
24 | int (*new_disk_ack)(struct mddev *mddev, bool ack); | 24 | int (*new_disk_ack)(struct mddev *mddev, bool ack); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index d429c30cd514..0c2a4e8b873c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev) | |||
5382 | { | 5382 | { |
5383 | struct md_personality *pers = mddev->pers; | 5383 | struct md_personality *pers = mddev->pers; |
5384 | mddev_detach(mddev); | 5384 | mddev_detach(mddev); |
5385 | /* Ensure ->event_work is done */ | ||
5386 | flush_workqueue(md_misc_wq); | ||
5385 | spin_lock(&mddev->lock); | 5387 | spin_lock(&mddev->lock); |
5386 | mddev->ready = 0; | 5388 | mddev->ready = 0; |
5387 | mddev->pers = NULL; | 5389 | mddev->pers = NULL; |
@@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes) | |||
7437 | err = request_module("md-cluster"); | 7439 | err = request_module("md-cluster"); |
7438 | if (err) { | 7440 | if (err) { |
7439 | pr_err("md-cluster module not found.\n"); | 7441 | pr_err("md-cluster module not found.\n"); |
7440 | return err; | 7442 | return -ENOENT; |
7441 | } | 7443 | } |
7442 | 7444 | ||
7443 | spin_lock(&pers_lock); | 7445 | spin_lock(&pers_lock); |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f80f1af61ce7..94f5b55069e0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error) | |||
336 | spin_lock_irqsave(&conf->device_lock, flags); | 336 | spin_lock_irqsave(&conf->device_lock, flags); |
337 | if (r1_bio->mddev->degraded == conf->raid_disks || | 337 | if (r1_bio->mddev->degraded == conf->raid_disks || |
338 | (r1_bio->mddev->degraded == conf->raid_disks-1 && | 338 | (r1_bio->mddev->degraded == conf->raid_disks-1 && |
339 | !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) | 339 | test_bit(In_sync, &conf->mirrors[mirror].rdev->flags))) |
340 | uptodate = 1; | 340 | uptodate = 1; |
341 | spin_unlock_irqrestore(&conf->device_lock, flags); | 341 | spin_unlock_irqrestore(&conf->device_lock, flags); |
342 | } | 342 | } |
@@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect | |||
541 | 541 | ||
542 | if ((conf->mddev->recovery_cp < this_sector + sectors) || | 542 | if ((conf->mddev->recovery_cp < this_sector + sectors) || |
543 | (mddev_is_clustered(conf->mddev) && | 543 | (mddev_is_clustered(conf->mddev) && |
544 | md_cluster_ops->area_resyncing(conf->mddev, this_sector, | 544 | md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector, |
545 | this_sector + sectors))) | 545 | this_sector + sectors))) |
546 | choose_first = 1; | 546 | choose_first = 1; |
547 | else | 547 | else |
@@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
1111 | ((bio_end_sector(bio) > mddev->suspend_lo && | 1111 | ((bio_end_sector(bio) > mddev->suspend_lo && |
1112 | bio->bi_iter.bi_sector < mddev->suspend_hi) || | 1112 | bio->bi_iter.bi_sector < mddev->suspend_hi) || |
1113 | (mddev_is_clustered(mddev) && | 1113 | (mddev_is_clustered(mddev) && |
1114 | md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { | 1114 | md_cluster_ops->area_resyncing(mddev, WRITE, |
1115 | bio->bi_iter.bi_sector, bio_end_sector(bio))))) { | ||
1115 | /* As the suspend_* range is controlled by | 1116 | /* As the suspend_* range is controlled by |
1116 | * userspace, we want an interruptible | 1117 | * userspace, we want an interruptible |
1117 | * wait. | 1118 | * wait. |
@@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
1124 | if (bio_end_sector(bio) <= mddev->suspend_lo || | 1125 | if (bio_end_sector(bio) <= mddev->suspend_lo || |
1125 | bio->bi_iter.bi_sector >= mddev->suspend_hi || | 1126 | bio->bi_iter.bi_sector >= mddev->suspend_hi || |
1126 | (mddev_is_clustered(mddev) && | 1127 | (mddev_is_clustered(mddev) && |
1127 | !md_cluster_ops->area_resyncing(mddev, | 1128 | !md_cluster_ops->area_resyncing(mddev, WRITE, |
1128 | bio->bi_iter.bi_sector, bio_end_sector(bio)))) | 1129 | bio->bi_iter.bi_sector, bio_end_sector(bio)))) |
1129 | break; | 1130 | break; |
1130 | schedule(); | 1131 | schedule(); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 940f2f365461..38c58e19cfce 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) | |||
3556 | /* far_copies must be 1 */ | 3556 | /* far_copies must be 1 */ |
3557 | conf->prev.stride = conf->dev_sectors; | 3557 | conf->prev.stride = conf->dev_sectors; |
3558 | } | 3558 | } |
3559 | conf->reshape_safe = conf->reshape_progress; | ||
3559 | spin_lock_init(&conf->device_lock); | 3560 | spin_lock_init(&conf->device_lock); |
3560 | INIT_LIST_HEAD(&conf->retry_list); | 3561 | INIT_LIST_HEAD(&conf->retry_list); |
3561 | 3562 | ||
@@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev) | |||
3760 | } | 3761 | } |
3761 | conf->offset_diff = min_offset_diff; | 3762 | conf->offset_diff = min_offset_diff; |
3762 | 3763 | ||
3763 | conf->reshape_safe = conf->reshape_progress; | ||
3764 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 3764 | clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
3765 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); | 3765 | clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
3766 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); | 3766 | set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); |
@@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev) | |||
4103 | conf->reshape_progress = size; | 4103 | conf->reshape_progress = size; |
4104 | } else | 4104 | } else |
4105 | conf->reshape_progress = 0; | 4105 | conf->reshape_progress = 0; |
4106 | conf->reshape_safe = conf->reshape_progress; | ||
4106 | spin_unlock_irq(&conf->device_lock); | 4107 | spin_unlock_irq(&conf->device_lock); |
4107 | 4108 | ||
4108 | if (mddev->delta_disks && mddev->bitmap) { | 4109 | if (mddev->delta_disks && mddev->bitmap) { |
@@ -4170,6 +4171,7 @@ abort: | |||
4170 | rdev->new_data_offset = rdev->data_offset; | 4171 | rdev->new_data_offset = rdev->data_offset; |
4171 | smp_wmb(); | 4172 | smp_wmb(); |
4172 | conf->reshape_progress = MaxSector; | 4173 | conf->reshape_progress = MaxSector; |
4174 | conf->reshape_safe = MaxSector; | ||
4173 | mddev->reshape_position = MaxSector; | 4175 | mddev->reshape_position = MaxSector; |
4174 | spin_unlock_irq(&conf->device_lock); | 4176 | spin_unlock_irq(&conf->device_lock); |
4175 | return ret; | 4177 | return ret; |
@@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf) | |||
4524 | md_finish_reshape(conf->mddev); | 4526 | md_finish_reshape(conf->mddev); |
4525 | smp_wmb(); | 4527 | smp_wmb(); |
4526 | conf->reshape_progress = MaxSector; | 4528 | conf->reshape_progress = MaxSector; |
4529 | conf->reshape_safe = MaxSector; | ||
4527 | spin_unlock_irq(&conf->device_lock); | 4530 | spin_unlock_irq(&conf->device_lock); |
4528 | 4531 | ||
4529 | /* read-ahead size must cover two whole stripes, which is | 4532 | /* read-ahead size must cover two whole stripes, which is |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 59e44e99eef3..643d217bfa13 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
2162 | if (!sc) | 2162 | if (!sc) |
2163 | return -ENOMEM; | 2163 | return -ENOMEM; |
2164 | 2164 | ||
2165 | /* Need to ensure auto-resizing doesn't interfere */ | ||
2166 | mutex_lock(&conf->cache_size_mutex); | ||
2167 | |||
2165 | for (i = conf->max_nr_stripes; i; i--) { | 2168 | for (i = conf->max_nr_stripes; i; i--) { |
2166 | nsh = alloc_stripe(sc, GFP_KERNEL); | 2169 | nsh = alloc_stripe(sc, GFP_KERNEL); |
2167 | if (!nsh) | 2170 | if (!nsh) |
@@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
2178 | kmem_cache_free(sc, nsh); | 2181 | kmem_cache_free(sc, nsh); |
2179 | } | 2182 | } |
2180 | kmem_cache_destroy(sc); | 2183 | kmem_cache_destroy(sc); |
2184 | mutex_unlock(&conf->cache_size_mutex); | ||
2181 | return -ENOMEM; | 2185 | return -ENOMEM; |
2182 | } | 2186 | } |
2183 | /* Step 2 - Must use GFP_NOIO now. | 2187 | /* Step 2 - Must use GFP_NOIO now. |
@@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
2224 | } else | 2228 | } else |
2225 | err = -ENOMEM; | 2229 | err = -ENOMEM; |
2226 | 2230 | ||
2231 | mutex_unlock(&conf->cache_size_mutex); | ||
2227 | /* Step 4, return new stripes to service */ | 2232 | /* Step 4, return new stripes to service */ |
2228 | while(!list_empty(&newstripes)) { | 2233 | while(!list_empty(&newstripes)) { |
2229 | nsh = list_entry(newstripes.next, struct stripe_head, lru); | 2234 | nsh = list_entry(newstripes.next, struct stripe_head, lru); |
@@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) | |||
4061 | &first_bad, &bad_sectors)) | 4066 | &first_bad, &bad_sectors)) |
4062 | set_bit(R5_ReadRepl, &dev->flags); | 4067 | set_bit(R5_ReadRepl, &dev->flags); |
4063 | else { | 4068 | else { |
4064 | if (rdev) | 4069 | if (rdev && !test_bit(Faulty, &rdev->flags)) |
4065 | set_bit(R5_NeedReplace, &dev->flags); | 4070 | set_bit(R5_NeedReplace, &dev->flags); |
4071 | else | ||
4072 | clear_bit(R5_NeedReplace, &dev->flags); | ||
4066 | rdev = rcu_dereference(conf->disks[i].rdev); | 4073 | rdev = rcu_dereference(conf->disks[i].rdev); |
4067 | clear_bit(R5_ReadRepl, &dev->flags); | 4074 | clear_bit(R5_ReadRepl, &dev->flags); |
4068 | } | 4075 | } |
@@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread) | |||
5857 | pr_debug("%d stripes handled\n", handled); | 5864 | pr_debug("%d stripes handled\n", handled); |
5858 | 5865 | ||
5859 | spin_unlock_irq(&conf->device_lock); | 5866 | spin_unlock_irq(&conf->device_lock); |
5860 | if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { | 5867 | if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && |
5868 | mutex_trylock(&conf->cache_size_mutex)) { | ||
5861 | grow_one_stripe(conf, __GFP_NOWARN); | 5869 | grow_one_stripe(conf, __GFP_NOWARN); |
5862 | /* Set flag even if allocation failed. This helps | 5870 | /* Set flag even if allocation failed. This helps |
5863 | * slow down allocation requests when mem is short | 5871 | * slow down allocation requests when mem is short |
5864 | */ | 5872 | */ |
5865 | set_bit(R5_DID_ALLOC, &conf->cache_state); | 5873 | set_bit(R5_DID_ALLOC, &conf->cache_state); |
5874 | mutex_unlock(&conf->cache_size_mutex); | ||
5866 | } | 5875 | } |
5867 | 5876 | ||
5868 | async_tx_issue_pending_all(); | 5877 | async_tx_issue_pending_all(); |
@@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size) | |||
5894 | return -EINVAL; | 5903 | return -EINVAL; |
5895 | 5904 | ||
5896 | conf->min_nr_stripes = size; | 5905 | conf->min_nr_stripes = size; |
5906 | mutex_lock(&conf->cache_size_mutex); | ||
5897 | while (size < conf->max_nr_stripes && | 5907 | while (size < conf->max_nr_stripes && |
5898 | drop_one_stripe(conf)) | 5908 | drop_one_stripe(conf)) |
5899 | ; | 5909 | ; |
5910 | mutex_unlock(&conf->cache_size_mutex); | ||
5900 | 5911 | ||
5901 | 5912 | ||
5902 | err = md_allow_write(mddev); | 5913 | err = md_allow_write(mddev); |
5903 | if (err) | 5914 | if (err) |
5904 | return err; | 5915 | return err; |
5905 | 5916 | ||
5917 | mutex_lock(&conf->cache_size_mutex); | ||
5906 | while (size > conf->max_nr_stripes) | 5918 | while (size > conf->max_nr_stripes) |
5907 | if (!grow_one_stripe(conf, GFP_KERNEL)) | 5919 | if (!grow_one_stripe(conf, GFP_KERNEL)) |
5908 | break; | 5920 | break; |
5921 | mutex_unlock(&conf->cache_size_mutex); | ||
5909 | 5922 | ||
5910 | return 0; | 5923 | return 0; |
5911 | } | 5924 | } |
@@ -6371,11 +6384,18 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink, | |||
6371 | struct shrink_control *sc) | 6384 | struct shrink_control *sc) |
6372 | { | 6385 | { |
6373 | struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); | 6386 | struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); |
6374 | int ret = 0; | 6387 | unsigned long ret = SHRINK_STOP; |
6375 | while (ret < sc->nr_to_scan) { | 6388 | |
6376 | if (drop_one_stripe(conf) == 0) | 6389 | if (mutex_trylock(&conf->cache_size_mutex)) { |
6377 | return SHRINK_STOP; | 6390 | ret= 0; |
6378 | ret++; | 6391 | while (ret < sc->nr_to_scan) { |
6392 | if (drop_one_stripe(conf) == 0) { | ||
6393 | ret = SHRINK_STOP; | ||
6394 | break; | ||
6395 | } | ||
6396 | ret++; | ||
6397 | } | ||
6398 | mutex_unlock(&conf->cache_size_mutex); | ||
6379 | } | 6399 | } |
6380 | return ret; | 6400 | return ret; |
6381 | } | 6401 | } |
@@ -6444,6 +6464,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) | |||
6444 | goto abort; | 6464 | goto abort; |
6445 | spin_lock_init(&conf->device_lock); | 6465 | spin_lock_init(&conf->device_lock); |
6446 | seqcount_init(&conf->gen_lock); | 6466 | seqcount_init(&conf->gen_lock); |
6467 | mutex_init(&conf->cache_size_mutex); | ||
6447 | init_waitqueue_head(&conf->wait_for_quiescent); | 6468 | init_waitqueue_head(&conf->wait_for_quiescent); |
6448 | for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { | 6469 | for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { |
6449 | init_waitqueue_head(&conf->wait_for_stripe[i]); | 6470 | init_waitqueue_head(&conf->wait_for_stripe[i]); |
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 02c3bf8fbfe7..d05144278690 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h | |||
@@ -482,7 +482,8 @@ struct r5conf { | |||
482 | */ | 482 | */ |
483 | int active_name; | 483 | int active_name; |
484 | char cache_name[2][32]; | 484 | char cache_name[2][32]; |
485 | struct kmem_cache *slab_cache; /* for allocating stripes */ | 485 | struct kmem_cache *slab_cache; /* for allocating stripes */ |
486 | struct mutex cache_size_mutex; /* Protect changes to cache size */ | ||
486 | 487 | ||
487 | int seq_flush, seq_write; | 488 | int seq_flush, seq_write; |
488 | int quiesce; | 489 | int quiesce; |
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c index 4cb365d4ffdc..8b95eefb610b 100644 --- a/drivers/media/pci/ivtv/ivtvfb.c +++ b/drivers/media/pci/ivtv/ivtvfb.c | |||
@@ -38,6 +38,8 @@ | |||
38 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 38 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
39 | */ | 39 | */ |
40 | 40 | ||
41 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
42 | |||
41 | #include <linux/module.h> | 43 | #include <linux/module.h> |
42 | #include <linux/kernel.h> | 44 | #include <linux/kernel.h> |
43 | #include <linux/fb.h> | 45 | #include <linux/fb.h> |
@@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv) | |||
1171 | { | 1173 | { |
1172 | int rc; | 1174 | int rc; |
1173 | 1175 | ||
1176 | #ifdef CONFIG_X86_64 | ||
1177 | if (pat_enabled()) { | ||
1178 | pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n"); | ||
1179 | return -ENODEV; | ||
1180 | } | ||
1181 | #endif | ||
1182 | |||
1174 | if (itv->osd_info) { | 1183 | if (itv->osd_info) { |
1175 | IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); | 1184 | IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); |
1176 | return -EBUSY; | 1185 | return -EBUSY; |
@@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void) | |||
1265 | int registered = 0; | 1274 | int registered = 0; |
1266 | int err; | 1275 | int err; |
1267 | 1276 | ||
1268 | #ifdef CONFIG_X86_64 | ||
1269 | if (WARN(pat_enabled(), | ||
1270 | "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) { | ||
1271 | return -ENODEV; | ||
1272 | } | ||
1273 | #endif | ||
1274 | 1277 | ||
1275 | if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { | 1278 | if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { |
1276 | printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", | 1279 | printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", |
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c index 8eb0a9500a90..e9513d651cd3 100644 --- a/drivers/misc/mei/main.c +++ b/drivers/misc/mei/main.c | |||
@@ -682,7 +682,7 @@ int mei_register(struct mei_device *dev, struct device *parent) | |||
682 | /* Fill in the data structures */ | 682 | /* Fill in the data structures */ |
683 | devno = MKDEV(MAJOR(mei_devt), dev->minor); | 683 | devno = MKDEV(MAJOR(mei_devt), dev->minor); |
684 | cdev_init(&dev->cdev, &mei_fops); | 684 | cdev_init(&dev->cdev, &mei_fops); |
685 | dev->cdev.owner = mei_fops.owner; | 685 | dev->cdev.owner = parent->driver->owner; |
686 | 686 | ||
687 | /* Add the device */ | 687 | /* Add the device */ |
688 | ret = cdev_add(&dev->cdev, devno, 1); | 688 | ret = cdev_add(&dev->cdev, devno, 1); |
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c index 41e3bdb10061..6dfdae3452d6 100644 --- a/drivers/misc/mic/scif/scif_nodeqp.c +++ b/drivers/misc/mic/scif/scif_nodeqp.c | |||
@@ -357,7 +357,7 @@ static void scif_p2p_freesg(struct scatterlist *sg) | |||
357 | } | 357 | } |
358 | 358 | ||
359 | static struct scatterlist * | 359 | static struct scatterlist * |
360 | scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt) | 360 | scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt) |
361 | { | 361 | { |
362 | struct scatterlist *sg; | 362 | struct scatterlist *sg; |
363 | struct page *page; | 363 | struct page *page; |
@@ -368,16 +368,11 @@ scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt) | |||
368 | return NULL; | 368 | return NULL; |
369 | sg_init_table(sg, page_cnt); | 369 | sg_init_table(sg, page_cnt); |
370 | for (i = 0; i < page_cnt; i++) { | 370 | for (i = 0; i < page_cnt; i++) { |
371 | page = vmalloc_to_page((void __force *)va); | 371 | page = pfn_to_page(pa >> PAGE_SHIFT); |
372 | if (!page) | ||
373 | goto p2p_sg_err; | ||
374 | sg_set_page(&sg[i], page, page_size, 0); | 372 | sg_set_page(&sg[i], page, page_size, 0); |
375 | va += page_size; | 373 | pa += page_size; |
376 | } | 374 | } |
377 | return sg; | 375 | return sg; |
378 | p2p_sg_err: | ||
379 | kfree(sg); | ||
380 | return NULL; | ||
381 | } | 376 | } |
382 | 377 | ||
383 | /* Init p2p mappings required to access peerdev from scifdev */ | 378 | /* Init p2p mappings required to access peerdev from scifdev */ |
@@ -395,14 +390,14 @@ scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev) | |||
395 | p2p = kzalloc(sizeof(*p2p), GFP_KERNEL); | 390 | p2p = kzalloc(sizeof(*p2p), GFP_KERNEL); |
396 | if (!p2p) | 391 | if (!p2p) |
397 | return NULL; | 392 | return NULL; |
398 | p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va, | 393 | p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa, |
399 | PAGE_SIZE, num_mmio_pages); | 394 | PAGE_SIZE, num_mmio_pages); |
400 | if (!p2p->ppi_sg[SCIF_PPI_MMIO]) | 395 | if (!p2p->ppi_sg[SCIF_PPI_MMIO]) |
401 | goto free_p2p; | 396 | goto free_p2p; |
402 | p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages; | 397 | p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages; |
403 | sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30))); | 398 | sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30))); |
404 | num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT); | 399 | num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT); |
405 | p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va, | 400 | p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa, |
406 | 1 << sg_page_shift, | 401 | 1 << sg_page_shift, |
407 | num_aper_chunks); | 402 | num_aper_chunks); |
408 | p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks; | 403 | p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks; |
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index c9c3d20b784b..a1b820fcb2a6 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c | |||
@@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev, | |||
208 | 208 | ||
209 | ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); | 209 | ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); |
210 | 210 | ||
211 | mmc_blk_put(md); | ||
212 | |||
211 | return ret; | 213 | return ret; |
212 | } | 214 | } |
213 | 215 | ||
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index fd9a58e216a5..6a0f9c79be26 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig | |||
@@ -779,6 +779,7 @@ config MMC_TOSHIBA_PCI | |||
779 | 779 | ||
780 | config MMC_MTK | 780 | config MMC_MTK |
781 | tristate "MediaTek SD/MMC Card Interface support" | 781 | tristate "MediaTek SD/MMC Card Interface support" |
782 | depends on HAS_DMA | ||
782 | help | 783 | help |
783 | This selects the MediaTek(R) Secure digital and Multimedia card Interface. | 784 | This selects the MediaTek(R) Secure digital and Multimedia card Interface. |
784 | If you have a machine with a integrated SD/MMC card reader, say Y or M here. | 785 | If you have a machine with a integrated SD/MMC card reader, say Y or M here. |
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index b2b411da297b..4d1203236890 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c | |||
@@ -1062,9 +1062,14 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) | |||
1062 | 1062 | ||
1063 | if (status & (CTO_EN | CCRC_EN)) | 1063 | if (status & (CTO_EN | CCRC_EN)) |
1064 | end_cmd = 1; | 1064 | end_cmd = 1; |
1065 | if (host->data || host->response_busy) { | ||
1066 | end_trans = !end_cmd; | ||
1067 | host->response_busy = 0; | ||
1068 | } | ||
1065 | if (status & (CTO_EN | DTO_EN)) | 1069 | if (status & (CTO_EN | DTO_EN)) |
1066 | hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); | 1070 | hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); |
1067 | else if (status & (CCRC_EN | DCRC_EN)) | 1071 | else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN | |
1072 | BADA_EN)) | ||
1068 | hsmmc_command_incomplete(host, -EILSEQ, end_cmd); | 1073 | hsmmc_command_incomplete(host, -EILSEQ, end_cmd); |
1069 | 1074 | ||
1070 | if (status & ACE_EN) { | 1075 | if (status & ACE_EN) { |
@@ -1081,10 +1086,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) | |||
1081 | } | 1086 | } |
1082 | dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); | 1087 | dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); |
1083 | } | 1088 | } |
1084 | if (host->data || host->response_busy) { | ||
1085 | end_trans = !end_cmd; | ||
1086 | host->response_busy = 0; | ||
1087 | } | ||
1088 | } | 1089 | } |
1089 | 1090 | ||
1090 | OMAP_HSMMC_WRITE(host->base, STAT, status); | 1091 | OMAP_HSMMC_WRITE(host->base, STAT, status); |
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index faf0cb910c96..c6b9f6492e1a 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c | |||
@@ -581,13 +581,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) | |||
581 | static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) | 581 | static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) |
582 | { | 582 | { |
583 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); | 583 | struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); |
584 | struct pltfm_imx_data *imx_data = pltfm_host->priv; | ||
585 | struct esdhc_platform_data *boarddata = &imx_data->boarddata; | ||
586 | 584 | ||
587 | if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock)) | 585 | return pltfm_host->clock; |
588 | return boarddata->f_max; | ||
589 | else | ||
590 | return pltfm_host->clock; | ||
591 | } | 586 | } |
592 | 587 | ||
593 | static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) | 588 | static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) |
@@ -878,34 +873,19 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { | |||
878 | static int | 873 | static int |
879 | sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, | 874 | sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, |
880 | struct sdhci_host *host, | 875 | struct sdhci_host *host, |
881 | struct esdhc_platform_data *boarddata) | 876 | struct pltfm_imx_data *imx_data) |
882 | { | 877 | { |
883 | struct device_node *np = pdev->dev.of_node; | 878 | struct device_node *np = pdev->dev.of_node; |
884 | 879 | struct esdhc_platform_data *boarddata = &imx_data->boarddata; | |
885 | if (!np) | 880 | int ret; |
886 | return -ENODEV; | ||
887 | |||
888 | if (of_get_property(np, "non-removable", NULL)) | ||
889 | boarddata->cd_type = ESDHC_CD_PERMANENT; | ||
890 | |||
891 | if (of_get_property(np, "fsl,cd-controller", NULL)) | ||
892 | boarddata->cd_type = ESDHC_CD_CONTROLLER; | ||
893 | 881 | ||
894 | if (of_get_property(np, "fsl,wp-controller", NULL)) | 882 | if (of_get_property(np, "fsl,wp-controller", NULL)) |
895 | boarddata->wp_type = ESDHC_WP_CONTROLLER; | 883 | boarddata->wp_type = ESDHC_WP_CONTROLLER; |
896 | 884 | ||
897 | boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); | ||
898 | if (gpio_is_valid(boarddata->cd_gpio)) | ||
899 | boarddata->cd_type = ESDHC_CD_GPIO; | ||
900 | |||
901 | boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); | 885 | boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); |
902 | if (gpio_is_valid(boarddata->wp_gpio)) | 886 | if (gpio_is_valid(boarddata->wp_gpio)) |
903 | boarddata->wp_type = ESDHC_WP_GPIO; | 887 | boarddata->wp_type = ESDHC_WP_GPIO; |
904 | 888 | ||
905 | of_property_read_u32(np, "bus-width", &boarddata->max_bus_width); | ||
906 | |||
907 | of_property_read_u32(np, "max-frequency", &boarddata->f_max); | ||
908 | |||
909 | if (of_find_property(np, "no-1-8-v", NULL)) | 889 | if (of_find_property(np, "no-1-8-v", NULL)) |
910 | boarddata->support_vsel = false; | 890 | boarddata->support_vsel = false; |
911 | else | 891 | else |
@@ -916,29 +896,119 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, | |||
916 | 896 | ||
917 | mmc_of_parse_voltage(np, &host->ocr_mask); | 897 | mmc_of_parse_voltage(np, &host->ocr_mask); |
918 | 898 | ||
899 | /* sdr50 and sdr104 needs work on 1.8v signal voltage */ | ||
900 | if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) && | ||
901 | !IS_ERR(imx_data->pins_default)) { | ||
902 | imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, | ||
903 | ESDHC_PINCTRL_STATE_100MHZ); | ||
904 | imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, | ||
905 | ESDHC_PINCTRL_STATE_200MHZ); | ||
906 | if (IS_ERR(imx_data->pins_100mhz) || | ||
907 | IS_ERR(imx_data->pins_200mhz)) { | ||
908 | dev_warn(mmc_dev(host->mmc), | ||
909 | "could not get ultra high speed state, work on normal mode\n"); | ||
910 | /* | ||
911 | * fall back to not support uhs by specify no 1.8v quirk | ||
912 | */ | ||
913 | host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; | ||
914 | } | ||
915 | } else { | ||
916 | host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; | ||
917 | } | ||
918 | |||
919 | /* call to generic mmc_of_parse to support additional capabilities */ | 919 | /* call to generic mmc_of_parse to support additional capabilities */ |
920 | return mmc_of_parse(host->mmc); | 920 | ret = mmc_of_parse(host->mmc); |
921 | if (ret) | ||
922 | return ret; | ||
923 | |||
924 | if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc))) | ||
925 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | ||
926 | |||
927 | return 0; | ||
921 | } | 928 | } |
922 | #else | 929 | #else |
923 | static inline int | 930 | static inline int |
924 | sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, | 931 | sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, |
925 | struct sdhci_host *host, | 932 | struct sdhci_host *host, |
926 | struct esdhc_platform_data *boarddata) | 933 | struct pltfm_imx_data *imx_data) |
927 | { | 934 | { |
928 | return -ENODEV; | 935 | return -ENODEV; |
929 | } | 936 | } |
930 | #endif | 937 | #endif |
931 | 938 | ||
939 | static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, | ||
940 | struct sdhci_host *host, | ||
941 | struct pltfm_imx_data *imx_data) | ||
942 | { | ||
943 | struct esdhc_platform_data *boarddata = &imx_data->boarddata; | ||
944 | int err; | ||
945 | |||
946 | if (!host->mmc->parent->platform_data) { | ||
947 | dev_err(mmc_dev(host->mmc), "no board data!\n"); | ||
948 | return -EINVAL; | ||
949 | } | ||
950 | |||
951 | imx_data->boarddata = *((struct esdhc_platform_data *) | ||
952 | host->mmc->parent->platform_data); | ||
953 | /* write_protect */ | ||
954 | if (boarddata->wp_type == ESDHC_WP_GPIO) { | ||
955 | err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); | ||
956 | if (err) { | ||
957 | dev_err(mmc_dev(host->mmc), | ||
958 | "failed to request write-protect gpio!\n"); | ||
959 | return err; | ||
960 | } | ||
961 | host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; | ||
962 | } | ||
963 | |||
964 | /* card_detect */ | ||
965 | switch (boarddata->cd_type) { | ||
966 | case ESDHC_CD_GPIO: | ||
967 | err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); | ||
968 | if (err) { | ||
969 | dev_err(mmc_dev(host->mmc), | ||
970 | "failed to request card-detect gpio!\n"); | ||
971 | return err; | ||
972 | } | ||
973 | /* fall through */ | ||
974 | |||
975 | case ESDHC_CD_CONTROLLER: | ||
976 | /* we have a working card_detect back */ | ||
977 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | ||
978 | break; | ||
979 | |||
980 | case ESDHC_CD_PERMANENT: | ||
981 | host->mmc->caps |= MMC_CAP_NONREMOVABLE; | ||
982 | break; | ||
983 | |||
984 | case ESDHC_CD_NONE: | ||
985 | break; | ||
986 | } | ||
987 | |||
988 | switch (boarddata->max_bus_width) { | ||
989 | case 8: | ||
990 | host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; | ||
991 | break; | ||
992 | case 4: | ||
993 | host->mmc->caps |= MMC_CAP_4_BIT_DATA; | ||
994 | break; | ||
995 | case 1: | ||
996 | default: | ||
997 | host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; | ||
998 | break; | ||
999 | } | ||
1000 | |||
1001 | return 0; | ||
1002 | } | ||
1003 | |||
932 | static int sdhci_esdhc_imx_probe(struct platform_device *pdev) | 1004 | static int sdhci_esdhc_imx_probe(struct platform_device *pdev) |
933 | { | 1005 | { |
934 | const struct of_device_id *of_id = | 1006 | const struct of_device_id *of_id = |
935 | of_match_device(imx_esdhc_dt_ids, &pdev->dev); | 1007 | of_match_device(imx_esdhc_dt_ids, &pdev->dev); |
936 | struct sdhci_pltfm_host *pltfm_host; | 1008 | struct sdhci_pltfm_host *pltfm_host; |
937 | struct sdhci_host *host; | 1009 | struct sdhci_host *host; |
938 | struct esdhc_platform_data *boarddata; | ||
939 | int err; | 1010 | int err; |
940 | struct pltfm_imx_data *imx_data; | 1011 | struct pltfm_imx_data *imx_data; |
941 | bool dt = true; | ||
942 | 1012 | ||
943 | host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); | 1013 | host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); |
944 | if (IS_ERR(host)) | 1014 | if (IS_ERR(host)) |
@@ -1030,84 +1100,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) | |||
1030 | if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) | 1100 | if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) |
1031 | host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; | 1101 | host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; |
1032 | 1102 | ||
1033 | boarddata = &imx_data->boarddata; | 1103 | if (of_id) |
1034 | if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) { | 1104 | err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); |
1035 | if (!host->mmc->parent->platform_data) { | 1105 | else |
1036 | dev_err(mmc_dev(host->mmc), "no board data!\n"); | 1106 | err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data); |
1037 | err = -EINVAL; | 1107 | if (err) |
1038 | goto disable_clk; | 1108 | goto disable_clk; |
1039 | } | ||
1040 | imx_data->boarddata = *((struct esdhc_platform_data *) | ||
1041 | host->mmc->parent->platform_data); | ||
1042 | dt = false; | ||
1043 | } | ||
1044 | /* write_protect */ | ||
1045 | if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) { | ||
1046 | err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); | ||
1047 | if (err) { | ||
1048 | dev_err(mmc_dev(host->mmc), | ||
1049 | "failed to request write-protect gpio!\n"); | ||
1050 | goto disable_clk; | ||
1051 | } | ||
1052 | host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; | ||
1053 | } | ||
1054 | |||
1055 | /* card_detect */ | ||
1056 | switch (boarddata->cd_type) { | ||
1057 | case ESDHC_CD_GPIO: | ||
1058 | if (dt) | ||
1059 | break; | ||
1060 | err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); | ||
1061 | if (err) { | ||
1062 | dev_err(mmc_dev(host->mmc), | ||
1063 | "failed to request card-detect gpio!\n"); | ||
1064 | goto disable_clk; | ||
1065 | } | ||
1066 | /* fall through */ | ||
1067 | |||
1068 | case ESDHC_CD_CONTROLLER: | ||
1069 | /* we have a working card_detect back */ | ||
1070 | host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; | ||
1071 | break; | ||
1072 | |||
1073 | case ESDHC_CD_PERMANENT: | ||
1074 | host->mmc->caps |= MMC_CAP_NONREMOVABLE; | ||
1075 | break; | ||
1076 | |||
1077 | case ESDHC_CD_NONE: | ||
1078 | break; | ||
1079 | } | ||
1080 | |||
1081 | switch (boarddata->max_bus_width) { | ||
1082 | case 8: | ||
1083 | host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; | ||
1084 | break; | ||
1085 | case 4: | ||
1086 | host->mmc->caps |= MMC_CAP_4_BIT_DATA; | ||
1087 | break; | ||
1088 | case 1: | ||
1089 | default: | ||
1090 | host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; | ||
1091 | break; | ||
1092 | } | ||
1093 | |||
1094 | /* sdr50 and sdr104 needs work on 1.8v signal voltage */ | ||
1095 | if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) && | ||
1096 | !IS_ERR(imx_data->pins_default)) { | ||
1097 | imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, | ||
1098 | ESDHC_PINCTRL_STATE_100MHZ); | ||
1099 | imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, | ||
1100 | ESDHC_PINCTRL_STATE_200MHZ); | ||
1101 | if (IS_ERR(imx_data->pins_100mhz) || | ||
1102 | IS_ERR(imx_data->pins_200mhz)) { | ||
1103 | dev_warn(mmc_dev(host->mmc), | ||
1104 | "could not get ultra high speed state, work on normal mode\n"); | ||
1105 | /* fall back to not support uhs by specify no 1.8v quirk */ | ||
1106 | host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; | ||
1107 | } | ||
1108 | } else { | ||
1109 | host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; | ||
1110 | } | ||
1111 | 1109 | ||
1112 | err = sdhci_add_host(host); | 1110 | err = sdhci_add_host(host); |
1113 | if (err) | 1111 | if (err) |
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index 3497cfaf683c..a870c42731d7 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h | |||
@@ -45,6 +45,6 @@ | |||
45 | #define ESDHC_DMA_SYSCTL 0x40c | 45 | #define ESDHC_DMA_SYSCTL 0x40c |
46 | #define ESDHC_DMA_SNOOP 0x00000040 | 46 | #define ESDHC_DMA_SNOOP 0x00000040 |
47 | 47 | ||
48 | #define ESDHC_HOST_CONTROL_RES 0x05 | 48 | #define ESDHC_HOST_CONTROL_RES 0x01 |
49 | 49 | ||
50 | #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ | 50 | #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ |
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 9cd5fc62f130..946d37f94a31 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c | |||
@@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) | |||
411 | goto err_of_parse; | 411 | goto err_of_parse; |
412 | sdhci_get_of_property(pdev); | 412 | sdhci_get_of_property(pdev); |
413 | pdata = pxav3_get_mmc_pdata(dev); | 413 | pdata = pxav3_get_mmc_pdata(dev); |
414 | pdev->dev.platform_data = pdata; | ||
414 | } else if (pdata) { | 415 | } else if (pdata) { |
415 | /* on-chip device */ | 416 | /* on-chip device */ |
416 | if (pdata->flags & PXA_FLAG_CARD_PERMANENT) | 417 | if (pdata->flags & PXA_FLAG_CARD_PERMANENT) |
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index bc1445238fb3..1dbe93232030 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c | |||
@@ -2866,6 +2866,7 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2866 | u32 max_current_caps; | 2866 | u32 max_current_caps; |
2867 | unsigned int ocr_avail; | 2867 | unsigned int ocr_avail; |
2868 | unsigned int override_timeout_clk; | 2868 | unsigned int override_timeout_clk; |
2869 | u32 max_clk; | ||
2869 | int ret; | 2870 | int ret; |
2870 | 2871 | ||
2871 | WARN_ON(host == NULL); | 2872 | WARN_ON(host == NULL); |
@@ -2978,8 +2979,11 @@ int sdhci_add_host(struct sdhci_host *host) | |||
2978 | GFP_KERNEL); | 2979 | GFP_KERNEL); |
2979 | host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); | 2980 | host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); |
2980 | if (!host->adma_table || !host->align_buffer) { | 2981 | if (!host->adma_table || !host->align_buffer) { |
2981 | dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, | 2982 | if (host->adma_table) |
2982 | host->adma_table, host->adma_addr); | 2983 | dma_free_coherent(mmc_dev(mmc), |
2984 | host->adma_table_sz, | ||
2985 | host->adma_table, | ||
2986 | host->adma_addr); | ||
2983 | kfree(host->align_buffer); | 2987 | kfree(host->align_buffer); |
2984 | pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", | 2988 | pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", |
2985 | mmc_hostname(mmc)); | 2989 | mmc_hostname(mmc)); |
@@ -3047,18 +3051,22 @@ int sdhci_add_host(struct sdhci_host *host) | |||
3047 | * Set host parameters. | 3051 | * Set host parameters. |
3048 | */ | 3052 | */ |
3049 | mmc->ops = &sdhci_ops; | 3053 | mmc->ops = &sdhci_ops; |
3050 | mmc->f_max = host->max_clk; | 3054 | max_clk = host->max_clk; |
3055 | |||
3051 | if (host->ops->get_min_clock) | 3056 | if (host->ops->get_min_clock) |
3052 | mmc->f_min = host->ops->get_min_clock(host); | 3057 | mmc->f_min = host->ops->get_min_clock(host); |
3053 | else if (host->version >= SDHCI_SPEC_300) { | 3058 | else if (host->version >= SDHCI_SPEC_300) { |
3054 | if (host->clk_mul) { | 3059 | if (host->clk_mul) { |
3055 | mmc->f_min = (host->max_clk * host->clk_mul) / 1024; | 3060 | mmc->f_min = (host->max_clk * host->clk_mul) / 1024; |
3056 | mmc->f_max = host->max_clk * host->clk_mul; | 3061 | max_clk = host->max_clk * host->clk_mul; |
3057 | } else | 3062 | } else |
3058 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; | 3063 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; |
3059 | } else | 3064 | } else |
3060 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; | 3065 | mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; |
3061 | 3066 | ||
3067 | if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk))) | ||
3068 | mmc->f_max = max_clk; | ||
3069 | |||
3062 | if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { | 3070 | if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { |
3063 | host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> | 3071 | host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> |
3064 | SDHCI_TIMEOUT_CLK_SHIFT; | 3072 | SDHCI_TIMEOUT_CLK_SHIFT; |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 317a49480475..e1ccefce9a9d 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev, | |||
625 | call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); | 625 | call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); |
626 | } | 626 | } |
627 | 627 | ||
628 | static struct slave *bond_get_old_active(struct bonding *bond, | ||
629 | struct slave *new_active) | ||
630 | { | ||
631 | struct slave *slave; | ||
632 | struct list_head *iter; | ||
633 | |||
634 | bond_for_each_slave(bond, slave, iter) { | ||
635 | if (slave == new_active) | ||
636 | continue; | ||
637 | |||
638 | if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr)) | ||
639 | return slave; | ||
640 | } | ||
641 | |||
642 | return NULL; | ||
643 | } | ||
644 | |||
628 | /* bond_do_fail_over_mac | 645 | /* bond_do_fail_over_mac |
629 | * | 646 | * |
630 | * Perform special MAC address swapping for fail_over_mac settings | 647 | * Perform special MAC address swapping for fail_over_mac settings |
@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond, | |||
652 | if (!new_active) | 669 | if (!new_active) |
653 | return; | 670 | return; |
654 | 671 | ||
672 | if (!old_active) | ||
673 | old_active = bond_get_old_active(bond, new_active); | ||
674 | |||
655 | if (old_active) { | 675 | if (old_active) { |
656 | ether_addr_copy(tmp_mac, new_active->dev->dev_addr); | 676 | ether_addr_copy(tmp_mac, new_active->dev->dev_addr); |
657 | ether_addr_copy(saddr.sa_data, | 677 | ether_addr_copy(saddr.sa_data, |
@@ -1725,9 +1745,16 @@ err_free: | |||
1725 | 1745 | ||
1726 | err_undo_flags: | 1746 | err_undo_flags: |
1727 | /* Enslave of first slave has failed and we need to fix master's mac */ | 1747 | /* Enslave of first slave has failed and we need to fix master's mac */ |
1728 | if (!bond_has_slaves(bond) && | 1748 | if (!bond_has_slaves(bond)) { |
1729 | ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr)) | 1749 | if (ether_addr_equal_64bits(bond_dev->dev_addr, |
1730 | eth_hw_addr_random(bond_dev); | 1750 | slave_dev->dev_addr)) |
1751 | eth_hw_addr_random(bond_dev); | ||
1752 | if (bond_dev->type != ARPHRD_ETHER) { | ||
1753 | ether_setup(bond_dev); | ||
1754 | bond_dev->flags |= IFF_MASTER; | ||
1755 | bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; | ||
1756 | } | ||
1757 | } | ||
1731 | 1758 | ||
1732 | return res; | 1759 | return res; |
1733 | } | 1760 | } |
@@ -1916,6 +1943,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev, | |||
1916 | bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; | 1943 | bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; |
1917 | netdev_info(bond_dev, "Destroying bond %s\n", | 1944 | netdev_info(bond_dev, "Destroying bond %s\n", |
1918 | bond_dev->name); | 1945 | bond_dev->name); |
1946 | bond_remove_proc_entry(bond); | ||
1919 | unregister_netdevice(bond_dev); | 1947 | unregister_netdevice(bond_dev); |
1920 | } | 1948 | } |
1921 | return ret; | 1949 | return ret; |
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index f4e40aa4d2a2..945c0955a967 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c | |||
@@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev) | |||
577 | 577 | ||
578 | cf->can_id |= CAN_ERR_CRTL; | 578 | cf->can_id |= CAN_ERR_CRTL; |
579 | cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; | 579 | cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; |
580 | netif_receive_skb(skb); | ||
581 | 580 | ||
582 | stats->rx_packets++; | 581 | stats->rx_packets++; |
583 | stats->rx_bytes += cf->can_dlc; | 582 | stats->rx_bytes += cf->can_dlc; |
583 | netif_receive_skb(skb); | ||
584 | } | 584 | } |
585 | 585 | ||
586 | /** | 586 | /** |
@@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb) | |||
642 | } | 642 | } |
643 | 643 | ||
644 | at91_read_mb(dev, mb, cf); | 644 | at91_read_mb(dev, mb, cf); |
645 | netif_receive_skb(skb); | ||
646 | 645 | ||
647 | stats->rx_packets++; | 646 | stats->rx_packets++; |
648 | stats->rx_bytes += cf->can_dlc; | 647 | stats->rx_bytes += cf->can_dlc; |
648 | netif_receive_skb(skb); | ||
649 | 649 | ||
650 | can_led_event(dev, CAN_LED_EVENT_RX); | 650 | can_led_event(dev, CAN_LED_EVENT_RX); |
651 | } | 651 | } |
@@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) | |||
802 | return 0; | 802 | return 0; |
803 | 803 | ||
804 | at91_poll_err_frame(dev, cf, reg_sr); | 804 | at91_poll_err_frame(dev, cf, reg_sr); |
805 | netif_receive_skb(skb); | ||
806 | 805 | ||
807 | dev->stats.rx_packets++; | 806 | dev->stats.rx_packets++; |
808 | dev->stats.rx_bytes += cf->can_dlc; | 807 | dev->stats.rx_bytes += cf->can_dlc; |
808 | netif_receive_skb(skb); | ||
809 | 809 | ||
810 | return 1; | 810 | return 1; |
811 | } | 811 | } |
@@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev) | |||
1067 | return; | 1067 | return; |
1068 | 1068 | ||
1069 | at91_irq_err_state(dev, cf, new_state); | 1069 | at91_irq_err_state(dev, cf, new_state); |
1070 | netif_rx(skb); | ||
1071 | 1070 | ||
1072 | dev->stats.rx_packets++; | 1071 | dev->stats.rx_packets++; |
1073 | dev->stats.rx_bytes += cf->can_dlc; | 1072 | dev->stats.rx_bytes += cf->can_dlc; |
1073 | netif_rx(skb); | ||
1074 | 1074 | ||
1075 | priv->can.state = new_state; | 1075 | priv->can.state = new_state; |
1076 | } | 1076 | } |
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c index 27ad312e7abf..57dadd52b428 100644 --- a/drivers/net/can/bfin_can.c +++ b/drivers/net/can/bfin_can.c | |||
@@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc) | |||
424 | cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; | 424 | cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; |
425 | } | 425 | } |
426 | 426 | ||
427 | netif_rx(skb); | ||
428 | |||
429 | stats->rx_packets++; | 427 | stats->rx_packets++; |
430 | stats->rx_bytes += cf->can_dlc; | 428 | stats->rx_bytes += cf->can_dlc; |
429 | netif_rx(skb); | ||
431 | } | 430 | } |
432 | 431 | ||
433 | static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) | 432 | static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) |
@@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) | |||
508 | 507 | ||
509 | priv->can.state = state; | 508 | priv->can.state = state; |
510 | 509 | ||
511 | netif_rx(skb); | ||
512 | |||
513 | stats->rx_packets++; | 510 | stats->rx_packets++; |
514 | stats->rx_bytes += cf->can_dlc; | 511 | stats->rx_bytes += cf->can_dlc; |
512 | netif_rx(skb); | ||
515 | 513 | ||
516 | return 0; | 514 | return 0; |
517 | } | 515 | } |
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index c11d44984036..70a8cbb29e75 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c | |||
@@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1) | |||
504 | for (i = 0; i < cf->can_dlc; i++) | 504 | for (i = 0; i < cf->can_dlc; i++) |
505 | cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); | 505 | cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); |
506 | } | 506 | } |
507 | netif_rx(skb); | ||
508 | 507 | ||
509 | stats->rx_packets++; | 508 | stats->rx_packets++; |
510 | stats->rx_bytes += cf->can_dlc; | 509 | stats->rx_bytes += cf->can_dlc; |
510 | netif_rx(skb); | ||
511 | } | 511 | } |
512 | 512 | ||
513 | static int cc770_err(struct net_device *dev, u8 status) | 513 | static int cc770_err(struct net_device *dev, u8 status) |
@@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status) | |||
584 | } | 584 | } |
585 | } | 585 | } |
586 | 586 | ||
587 | netif_rx(skb); | ||
588 | 587 | ||
589 | stats->rx_packets++; | 588 | stats->rx_packets++; |
590 | stats->rx_bytes += cf->can_dlc; | 589 | stats->rx_bytes += cf->can_dlc; |
590 | netif_rx(skb); | ||
591 | 591 | ||
592 | return 0; | 592 | return 0; |
593 | } | 593 | } |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 6201c5a1a884..b1e8d729851c 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr) | |||
577 | return 0; | 577 | return 0; |
578 | 578 | ||
579 | do_bus_err(dev, cf, reg_esr); | 579 | do_bus_err(dev, cf, reg_esr); |
580 | netif_receive_skb(skb); | ||
581 | 580 | ||
582 | dev->stats.rx_packets++; | 581 | dev->stats.rx_packets++; |
583 | dev->stats.rx_bytes += cf->can_dlc; | 582 | dev->stats.rx_bytes += cf->can_dlc; |
583 | netif_receive_skb(skb); | ||
584 | 584 | ||
585 | return 1; | 585 | return 1; |
586 | } | 586 | } |
@@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr) | |||
622 | if (unlikely(new_state == CAN_STATE_BUS_OFF)) | 622 | if (unlikely(new_state == CAN_STATE_BUS_OFF)) |
623 | can_bus_off(dev); | 623 | can_bus_off(dev); |
624 | 624 | ||
625 | netif_receive_skb(skb); | ||
626 | |||
627 | dev->stats.rx_packets++; | 625 | dev->stats.rx_packets++; |
628 | dev->stats.rx_bytes += cf->can_dlc; | 626 | dev->stats.rx_bytes += cf->can_dlc; |
627 | netif_receive_skb(skb); | ||
629 | 628 | ||
630 | return 1; | 629 | return 1; |
631 | } | 630 | } |
@@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev) | |||
670 | } | 669 | } |
671 | 670 | ||
672 | flexcan_read_fifo(dev, cf); | 671 | flexcan_read_fifo(dev, cf); |
673 | netif_receive_skb(skb); | ||
674 | 672 | ||
675 | stats->rx_packets++; | 673 | stats->rx_packets++; |
676 | stats->rx_bytes += cf->can_dlc; | 674 | stats->rx_bytes += cf->can_dlc; |
675 | netif_receive_skb(skb); | ||
677 | 676 | ||
678 | can_led_event(dev, CAN_LED_EVENT_RX); | 677 | can_led_event(dev, CAN_LED_EVENT_RX); |
679 | 678 | ||
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index e3d7e22a4fa0..db9538d4b358 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c | |||
@@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget) | |||
1216 | cf->data[i] = (u8)(slot[j] >> shift); | 1216 | cf->data[i] = (u8)(slot[j] >> shift); |
1217 | } | 1217 | } |
1218 | } | 1218 | } |
1219 | netif_receive_skb(skb); | ||
1220 | 1219 | ||
1221 | /* Update statistics and read pointer */ | 1220 | /* Update statistics and read pointer */ |
1222 | stats->rx_packets++; | 1221 | stats->rx_packets++; |
1223 | stats->rx_bytes += cf->can_dlc; | 1222 | stats->rx_bytes += cf->can_dlc; |
1223 | netif_receive_skb(skb); | ||
1224 | |||
1224 | rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); | 1225 | rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); |
1225 | } | 1226 | } |
1226 | 1227 | ||
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 32bd7f451aa4..7b92e911a616 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c | |||
@@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev) | |||
377 | /* release receive buffer */ | 377 | /* release receive buffer */ |
378 | sja1000_write_cmdreg(priv, CMD_RRB); | 378 | sja1000_write_cmdreg(priv, CMD_RRB); |
379 | 379 | ||
380 | netif_rx(skb); | ||
381 | |||
382 | stats->rx_packets++; | 380 | stats->rx_packets++; |
383 | stats->rx_bytes += cf->can_dlc; | 381 | stats->rx_bytes += cf->can_dlc; |
382 | netif_rx(skb); | ||
384 | 383 | ||
385 | can_led_event(dev, CAN_LED_EVENT_RX); | 384 | can_led_event(dev, CAN_LED_EVENT_RX); |
386 | } | 385 | } |
@@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) | |||
484 | can_bus_off(dev); | 483 | can_bus_off(dev); |
485 | } | 484 | } |
486 | 485 | ||
487 | netif_rx(skb); | ||
488 | |||
489 | stats->rx_packets++; | 486 | stats->rx_packets++; |
490 | stats->rx_bytes += cf->can_dlc; | 487 | stats->rx_bytes += cf->can_dlc; |
488 | netif_rx(skb); | ||
491 | 489 | ||
492 | return 0; | 490 | return 0; |
493 | } | 491 | } |
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index a23a7af8eb9a..9a3f15cb7ef4 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c | |||
@@ -218,10 +218,10 @@ static void slc_bump(struct slcan *sl) | |||
218 | 218 | ||
219 | memcpy(skb_put(skb, sizeof(struct can_frame)), | 219 | memcpy(skb_put(skb, sizeof(struct can_frame)), |
220 | &cf, sizeof(struct can_frame)); | 220 | &cf, sizeof(struct can_frame)); |
221 | netif_rx_ni(skb); | ||
222 | 221 | ||
223 | sl->dev->stats.rx_packets++; | 222 | sl->dev->stats.rx_packets++; |
224 | sl->dev->stats.rx_bytes += cf.can_dlc; | 223 | sl->dev->stats.rx_bytes += cf.can_dlc; |
224 | netif_rx_ni(skb); | ||
225 | } | 225 | } |
226 | 226 | ||
227 | /* parse tty input stream */ | 227 | /* parse tty input stream */ |
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index c1a95a34d62e..b7e83c212023 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c | |||
@@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi) | |||
1086 | if (ret) | 1086 | if (ret) |
1087 | goto out_clk; | 1087 | goto out_clk; |
1088 | 1088 | ||
1089 | priv->power = devm_regulator_get(&spi->dev, "vdd"); | 1089 | priv->power = devm_regulator_get_optional(&spi->dev, "vdd"); |
1090 | priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); | 1090 | priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); |
1091 | if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || | 1091 | if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || |
1092 | (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { | 1092 | (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { |
1093 | ret = -EPROBE_DEFER; | 1093 | ret = -EPROBE_DEFER; |
@@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev) | |||
1222 | struct spi_device *spi = to_spi_device(dev); | 1222 | struct spi_device *spi = to_spi_device(dev); |
1223 | struct mcp251x_priv *priv = spi_get_drvdata(spi); | 1223 | struct mcp251x_priv *priv = spi_get_drvdata(spi); |
1224 | 1224 | ||
1225 | if (priv->after_suspend & AFTER_SUSPEND_POWER) { | 1225 | if (priv->after_suspend & AFTER_SUSPEND_POWER) |
1226 | mcp251x_power_enable(priv->power, 1); | 1226 | mcp251x_power_enable(priv->power, 1); |
1227 | |||
1228 | if (priv->after_suspend & AFTER_SUSPEND_UP) { | ||
1229 | mcp251x_power_enable(priv->transceiver, 1); | ||
1227 | queue_work(priv->wq, &priv->restart_work); | 1230 | queue_work(priv->wq, &priv->restart_work); |
1228 | } else { | 1231 | } else { |
1229 | if (priv->after_suspend & AFTER_SUSPEND_UP) { | 1232 | priv->after_suspend = 0; |
1230 | mcp251x_power_enable(priv->transceiver, 1); | ||
1231 | queue_work(priv->wq, &priv->restart_work); | ||
1232 | } else { | ||
1233 | priv->after_suspend = 0; | ||
1234 | } | ||
1235 | } | 1233 | } |
1234 | |||
1236 | priv->force_quit = 0; | 1235 | priv->force_quit = 0; |
1237 | enable_irq(spi->irq); | 1236 | enable_irq(spi->irq); |
1238 | return 0; | 1237 | return 0; |
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index e95a9e1a889f..cf345cbfe819 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c | |||
@@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status, | |||
747 | } | 747 | } |
748 | } | 748 | } |
749 | 749 | ||
750 | netif_rx(skb); | ||
751 | stats->rx_packets++; | 750 | stats->rx_packets++; |
752 | stats->rx_bytes += cf->can_dlc; | 751 | stats->rx_bytes += cf->can_dlc; |
752 | netif_rx(skb); | ||
753 | 753 | ||
754 | return 0; | 754 | return 0; |
755 | } | 755 | } |
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index 866bac0ae7e9..2d390384ef3b 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c | |||
@@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) | |||
324 | cf->data[i] = msg->msg.can_msg.msg[i]; | 324 | cf->data[i] = msg->msg.can_msg.msg[i]; |
325 | } | 325 | } |
326 | 326 | ||
327 | netif_rx(skb); | ||
328 | |||
329 | stats->rx_packets++; | 327 | stats->rx_packets++; |
330 | stats->rx_bytes += cf->can_dlc; | 328 | stats->rx_bytes += cf->can_dlc; |
329 | netif_rx(skb); | ||
331 | } | 330 | } |
332 | 331 | ||
333 | static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) | 332 | static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) |
@@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) | |||
400 | stats->rx_errors++; | 399 | stats->rx_errors++; |
401 | } | 400 | } |
402 | 401 | ||
403 | netif_rx(skb); | ||
404 | |||
405 | stats->rx_packets++; | 402 | stats->rx_packets++; |
406 | stats->rx_bytes += cf->can_dlc; | 403 | stats->rx_bytes += cf->can_dlc; |
404 | netif_rx(skb); | ||
407 | } | 405 | } |
408 | 406 | ||
409 | /* | 407 | /* |
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index 411c1af92c62..0e5a4493ba4f 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c | |||
@@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv, | |||
301 | cf->data[7] = rxerr; | 301 | cf->data[7] = rxerr; |
302 | } | 302 | } |
303 | 303 | ||
304 | netif_rx(skb); | ||
305 | |||
306 | priv->bec.txerr = txerr; | 304 | priv->bec.txerr = txerr; |
307 | priv->bec.rxerr = rxerr; | 305 | priv->bec.rxerr = rxerr; |
308 | 306 | ||
309 | stats->rx_packets++; | 307 | stats->rx_packets++; |
310 | stats->rx_bytes += cf->can_dlc; | 308 | stats->rx_bytes += cf->can_dlc; |
309 | netif_rx(skb); | ||
311 | } | 310 | } |
312 | } | 311 | } |
313 | 312 | ||
@@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv, | |||
347 | cf->data[i] = msg->msg.rx.data[i]; | 346 | cf->data[i] = msg->msg.rx.data[i]; |
348 | } | 347 | } |
349 | 348 | ||
350 | netif_rx(skb); | ||
351 | |||
352 | stats->rx_packets++; | 349 | stats->rx_packets++; |
353 | stats->rx_bytes += cf->can_dlc; | 350 | stats->rx_bytes += cf->can_dlc; |
351 | netif_rx(skb); | ||
354 | } | 352 | } |
355 | 353 | ||
356 | return; | 354 | return; |
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c index 72427f21edff..6b94007ae052 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb.c | |||
@@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, | |||
526 | hwts->hwtstamp = timeval_to_ktime(tv); | 526 | hwts->hwtstamp = timeval_to_ktime(tv); |
527 | } | 527 | } |
528 | 528 | ||
529 | netif_rx(skb); | ||
530 | mc->netdev->stats.rx_packets++; | 529 | mc->netdev->stats.rx_packets++; |
531 | mc->netdev->stats.rx_bytes += cf->can_dlc; | 530 | mc->netdev->stats.rx_bytes += cf->can_dlc; |
531 | netif_rx(skb); | ||
532 | 532 | ||
533 | return 0; | 533 | return 0; |
534 | } | 534 | } |
@@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) | |||
659 | hwts = skb_hwtstamps(skb); | 659 | hwts = skb_hwtstamps(skb); |
660 | hwts->hwtstamp = timeval_to_ktime(tv); | 660 | hwts->hwtstamp = timeval_to_ktime(tv); |
661 | 661 | ||
662 | /* push the skb */ | ||
663 | netif_rx(skb); | ||
664 | |||
665 | /* update statistics */ | 662 | /* update statistics */ |
666 | mc->netdev->stats.rx_packets++; | 663 | mc->netdev->stats.rx_packets++; |
667 | mc->netdev->stats.rx_bytes += cf->can_dlc; | 664 | mc->netdev->stats.rx_bytes += cf->can_dlc; |
665 | /* push the skb */ | ||
666 | netif_rx(skb); | ||
668 | 667 | ||
669 | return 0; | 668 | return 0; |
670 | 669 | ||
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c index dec51717635e..7d61b3279798 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c | |||
@@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, | |||
553 | hwts = skb_hwtstamps(skb); | 553 | hwts = skb_hwtstamps(skb); |
554 | hwts->hwtstamp = timeval_to_ktime(tv); | 554 | hwts->hwtstamp = timeval_to_ktime(tv); |
555 | 555 | ||
556 | netif_rx(skb); | ||
557 | netdev->stats.rx_packets++; | 556 | netdev->stats.rx_packets++; |
558 | netdev->stats.rx_bytes += can_frame->can_dlc; | 557 | netdev->stats.rx_bytes += can_frame->can_dlc; |
558 | netif_rx(skb); | ||
559 | 559 | ||
560 | return 0; | 560 | return 0; |
561 | } | 561 | } |
@@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, | |||
670 | peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); | 670 | peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); |
671 | hwts = skb_hwtstamps(skb); | 671 | hwts = skb_hwtstamps(skb); |
672 | hwts->hwtstamp = timeval_to_ktime(tv); | 672 | hwts->hwtstamp = timeval_to_ktime(tv); |
673 | netif_rx(skb); | ||
674 | netdev->stats.rx_packets++; | 673 | netdev->stats.rx_packets++; |
675 | netdev->stats.rx_bytes += can_frame->can_dlc; | 674 | netdev->stats.rx_bytes += can_frame->can_dlc; |
675 | netif_rx(skb); | ||
676 | 676 | ||
677 | return 0; | 677 | return 0; |
678 | } | 678 | } |
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index dd52c7a4c80d..de95b1ccba3e 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c | |||
@@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, | |||
461 | priv->bec.txerr = txerr; | 461 | priv->bec.txerr = txerr; |
462 | priv->bec.rxerr = rxerr; | 462 | priv->bec.rxerr = rxerr; |
463 | 463 | ||
464 | netif_rx(skb); | ||
465 | |||
466 | stats->rx_packets++; | 464 | stats->rx_packets++; |
467 | stats->rx_bytes += cf->can_dlc; | 465 | stats->rx_bytes += cf->can_dlc; |
466 | netif_rx(skb); | ||
468 | } | 467 | } |
469 | 468 | ||
470 | /* Read data and status frames */ | 469 | /* Read data and status frames */ |
@@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv, | |||
494 | else | 493 | else |
495 | memcpy(cf->data, msg->data, cf->can_dlc); | 494 | memcpy(cf->data, msg->data, cf->can_dlc); |
496 | 495 | ||
497 | netif_rx(skb); | ||
498 | |||
499 | stats->rx_packets++; | 496 | stats->rx_packets++; |
500 | stats->rx_bytes += cf->can_dlc; | 497 | stats->rx_bytes += cf->can_dlc; |
498 | netif_rx(skb); | ||
501 | 499 | ||
502 | can_led_event(priv->netdev, CAN_LED_EVENT_RX); | 500 | can_led_event(priv->netdev, CAN_LED_EVENT_RX); |
503 | } else { | 501 | } else { |
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 972982f8bea7..079897b3a955 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c | |||
@@ -696,9 +696,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds) | |||
696 | } | 696 | } |
697 | 697 | ||
698 | /* Include the pseudo-PHY address and the broadcast PHY address to | 698 | /* Include the pseudo-PHY address and the broadcast PHY address to |
699 | * divert reads towards our workaround | 699 | * divert reads towards our workaround. This is only required for |
700 | * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such | ||
701 | * that we can use the regular SWITCH_MDIO master controller instead. | ||
702 | * | ||
703 | * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask | ||
704 | * to have a 1:1 mapping between Port address and PHY address in order | ||
705 | * to utilize the slave_mii_bus instance to read from Port PHYs. This is | ||
706 | * not what we want here, so we initialize phys_mii_mask 0 to always | ||
707 | * utilize the "master" MDIO bus backed by the "mdio-unimac" driver. | ||
700 | */ | 708 | */ |
701 | ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); | 709 | if (of_machine_is_compatible("brcm,bcm7445d0")) |
710 | ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); | ||
711 | else | ||
712 | ds->phys_mii_mask = 0; | ||
702 | 713 | ||
703 | rev = reg_readl(priv, REG_SWITCH_REVISION); | 714 | rev = reg_readl(priv, REG_SWITCH_REVISION); |
704 | priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & | 715 | priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & |
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c index fd8547c2b79d..561342466076 100644 --- a/drivers/net/dsa/mv88e6xxx.c +++ b/drivers/net/dsa/mv88e6xxx.c | |||
@@ -1163,7 +1163,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask) | |||
1163 | 1163 | ||
1164 | newfid = __ffs(ps->fid_mask); | 1164 | newfid = __ffs(ps->fid_mask); |
1165 | ps->fid[port] = newfid; | 1165 | ps->fid[port] = newfid; |
1166 | ps->fid_mask &= (1 << newfid); | 1166 | ps->fid_mask &= ~(1 << newfid); |
1167 | ps->bridge_mask[fid] &= ~(1 << port); | 1167 | ps->bridge_mask[fid] &= ~(1 << port); |
1168 | ps->bridge_mask[newfid] = 1 << port; | 1168 | ps->bridge_mask[newfid] = 1 << port; |
1169 | 1169 | ||
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 42e20e5385ac..1f89c59b4353 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | #include <linux/pm_runtime.h> | ||
28 | #include <linux/ptrace.h> | 27 | #include <linux/ptrace.h> |
29 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
30 | #include <linux/ioport.h> | 29 | #include <linux/ioport.h> |
@@ -78,7 +77,6 @@ static void fec_enet_itr_coal_init(struct net_device *ndev); | |||
78 | #define FEC_ENET_RAEM_V 0x8 | 77 | #define FEC_ENET_RAEM_V 0x8 |
79 | #define FEC_ENET_RAFL_V 0x8 | 78 | #define FEC_ENET_RAFL_V 0x8 |
80 | #define FEC_ENET_OPD_V 0xFFF0 | 79 | #define FEC_ENET_OPD_V 0xFFF0 |
81 | #define FEC_MDIO_PM_TIMEOUT 100 /* ms */ | ||
82 | 80 | ||
83 | static struct platform_device_id fec_devtype[] = { | 81 | static struct platform_device_id fec_devtype[] = { |
84 | { | 82 | { |
@@ -1769,13 +1767,7 @@ static void fec_enet_adjust_link(struct net_device *ndev) | |||
1769 | static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | 1767 | static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) |
1770 | { | 1768 | { |
1771 | struct fec_enet_private *fep = bus->priv; | 1769 | struct fec_enet_private *fep = bus->priv; |
1772 | struct device *dev = &fep->pdev->dev; | ||
1773 | unsigned long time_left; | 1770 | unsigned long time_left; |
1774 | int ret = 0; | ||
1775 | |||
1776 | ret = pm_runtime_get_sync(dev); | ||
1777 | if (IS_ERR_VALUE(ret)) | ||
1778 | return ret; | ||
1779 | 1771 | ||
1780 | fep->mii_timeout = 0; | 1772 | fep->mii_timeout = 0; |
1781 | init_completion(&fep->mdio_done); | 1773 | init_completion(&fep->mdio_done); |
@@ -1791,30 +1783,18 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | |||
1791 | if (time_left == 0) { | 1783 | if (time_left == 0) { |
1792 | fep->mii_timeout = 1; | 1784 | fep->mii_timeout = 1; |
1793 | netdev_err(fep->netdev, "MDIO read timeout\n"); | 1785 | netdev_err(fep->netdev, "MDIO read timeout\n"); |
1794 | ret = -ETIMEDOUT; | 1786 | return -ETIMEDOUT; |
1795 | goto out; | ||
1796 | } | 1787 | } |
1797 | 1788 | ||
1798 | ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); | 1789 | /* return value */ |
1799 | 1790 | return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA)); | |
1800 | out: | ||
1801 | pm_runtime_mark_last_busy(dev); | ||
1802 | pm_runtime_put_autosuspend(dev); | ||
1803 | |||
1804 | return ret; | ||
1805 | } | 1791 | } |
1806 | 1792 | ||
1807 | static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | 1793 | static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, |
1808 | u16 value) | 1794 | u16 value) |
1809 | { | 1795 | { |
1810 | struct fec_enet_private *fep = bus->priv; | 1796 | struct fec_enet_private *fep = bus->priv; |
1811 | struct device *dev = &fep->pdev->dev; | ||
1812 | unsigned long time_left; | 1797 | unsigned long time_left; |
1813 | int ret = 0; | ||
1814 | |||
1815 | ret = pm_runtime_get_sync(dev); | ||
1816 | if (IS_ERR_VALUE(ret)) | ||
1817 | return ret; | ||
1818 | 1798 | ||
1819 | fep->mii_timeout = 0; | 1799 | fep->mii_timeout = 0; |
1820 | init_completion(&fep->mdio_done); | 1800 | init_completion(&fep->mdio_done); |
@@ -1831,13 +1811,10 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, | |||
1831 | if (time_left == 0) { | 1811 | if (time_left == 0) { |
1832 | fep->mii_timeout = 1; | 1812 | fep->mii_timeout = 1; |
1833 | netdev_err(fep->netdev, "MDIO write timeout\n"); | 1813 | netdev_err(fep->netdev, "MDIO write timeout\n"); |
1834 | ret = -ETIMEDOUT; | 1814 | return -ETIMEDOUT; |
1835 | } | 1815 | } |
1836 | 1816 | ||
1837 | pm_runtime_mark_last_busy(dev); | 1817 | return 0; |
1838 | pm_runtime_put_autosuspend(dev); | ||
1839 | |||
1840 | return ret; | ||
1841 | } | 1818 | } |
1842 | 1819 | ||
1843 | static int fec_enet_clk_enable(struct net_device *ndev, bool enable) | 1820 | static int fec_enet_clk_enable(struct net_device *ndev, bool enable) |
@@ -1849,6 +1826,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) | |||
1849 | ret = clk_prepare_enable(fep->clk_ahb); | 1826 | ret = clk_prepare_enable(fep->clk_ahb); |
1850 | if (ret) | 1827 | if (ret) |
1851 | return ret; | 1828 | return ret; |
1829 | ret = clk_prepare_enable(fep->clk_ipg); | ||
1830 | if (ret) | ||
1831 | goto failed_clk_ipg; | ||
1852 | if (fep->clk_enet_out) { | 1832 | if (fep->clk_enet_out) { |
1853 | ret = clk_prepare_enable(fep->clk_enet_out); | 1833 | ret = clk_prepare_enable(fep->clk_enet_out); |
1854 | if (ret) | 1834 | if (ret) |
@@ -1872,6 +1852,7 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) | |||
1872 | } | 1852 | } |
1873 | } else { | 1853 | } else { |
1874 | clk_disable_unprepare(fep->clk_ahb); | 1854 | clk_disable_unprepare(fep->clk_ahb); |
1855 | clk_disable_unprepare(fep->clk_ipg); | ||
1875 | if (fep->clk_enet_out) | 1856 | if (fep->clk_enet_out) |
1876 | clk_disable_unprepare(fep->clk_enet_out); | 1857 | clk_disable_unprepare(fep->clk_enet_out); |
1877 | if (fep->clk_ptp) { | 1858 | if (fep->clk_ptp) { |
@@ -1893,6 +1874,8 @@ failed_clk_ptp: | |||
1893 | if (fep->clk_enet_out) | 1874 | if (fep->clk_enet_out) |
1894 | clk_disable_unprepare(fep->clk_enet_out); | 1875 | clk_disable_unprepare(fep->clk_enet_out); |
1895 | failed_clk_enet_out: | 1876 | failed_clk_enet_out: |
1877 | clk_disable_unprepare(fep->clk_ipg); | ||
1878 | failed_clk_ipg: | ||
1896 | clk_disable_unprepare(fep->clk_ahb); | 1879 | clk_disable_unprepare(fep->clk_ahb); |
1897 | 1880 | ||
1898 | return ret; | 1881 | return ret; |
@@ -2864,14 +2847,10 @@ fec_enet_open(struct net_device *ndev) | |||
2864 | struct fec_enet_private *fep = netdev_priv(ndev); | 2847 | struct fec_enet_private *fep = netdev_priv(ndev); |
2865 | int ret; | 2848 | int ret; |
2866 | 2849 | ||
2867 | ret = pm_runtime_get_sync(&fep->pdev->dev); | ||
2868 | if (IS_ERR_VALUE(ret)) | ||
2869 | return ret; | ||
2870 | |||
2871 | pinctrl_pm_select_default_state(&fep->pdev->dev); | 2850 | pinctrl_pm_select_default_state(&fep->pdev->dev); |
2872 | ret = fec_enet_clk_enable(ndev, true); | 2851 | ret = fec_enet_clk_enable(ndev, true); |
2873 | if (ret) | 2852 | if (ret) |
2874 | goto clk_enable; | 2853 | return ret; |
2875 | 2854 | ||
2876 | /* I should reset the ring buffers here, but I don't yet know | 2855 | /* I should reset the ring buffers here, but I don't yet know |
2877 | * a simple way to do that. | 2856 | * a simple way to do that. |
@@ -2902,9 +2881,6 @@ err_enet_mii_probe: | |||
2902 | fec_enet_free_buffers(ndev); | 2881 | fec_enet_free_buffers(ndev); |
2903 | err_enet_alloc: | 2882 | err_enet_alloc: |
2904 | fec_enet_clk_enable(ndev, false); | 2883 | fec_enet_clk_enable(ndev, false); |
2905 | clk_enable: | ||
2906 | pm_runtime_mark_last_busy(&fep->pdev->dev); | ||
2907 | pm_runtime_put_autosuspend(&fep->pdev->dev); | ||
2908 | pinctrl_pm_select_sleep_state(&fep->pdev->dev); | 2884 | pinctrl_pm_select_sleep_state(&fep->pdev->dev); |
2909 | return ret; | 2885 | return ret; |
2910 | } | 2886 | } |
@@ -2927,9 +2903,6 @@ fec_enet_close(struct net_device *ndev) | |||
2927 | 2903 | ||
2928 | fec_enet_clk_enable(ndev, false); | 2904 | fec_enet_clk_enable(ndev, false); |
2929 | pinctrl_pm_select_sleep_state(&fep->pdev->dev); | 2905 | pinctrl_pm_select_sleep_state(&fep->pdev->dev); |
2930 | pm_runtime_mark_last_busy(&fep->pdev->dev); | ||
2931 | pm_runtime_put_autosuspend(&fep->pdev->dev); | ||
2932 | |||
2933 | fec_enet_free_buffers(ndev); | 2906 | fec_enet_free_buffers(ndev); |
2934 | 2907 | ||
2935 | return 0; | 2908 | return 0; |
@@ -3415,10 +3388,6 @@ fec_probe(struct platform_device *pdev) | |||
3415 | if (ret) | 3388 | if (ret) |
3416 | goto failed_clk; | 3389 | goto failed_clk; |
3417 | 3390 | ||
3418 | ret = clk_prepare_enable(fep->clk_ipg); | ||
3419 | if (ret) | ||
3420 | goto failed_clk_ipg; | ||
3421 | |||
3422 | fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); | 3391 | fep->reg_phy = devm_regulator_get(&pdev->dev, "phy"); |
3423 | if (!IS_ERR(fep->reg_phy)) { | 3392 | if (!IS_ERR(fep->reg_phy)) { |
3424 | ret = regulator_enable(fep->reg_phy); | 3393 | ret = regulator_enable(fep->reg_phy); |
@@ -3465,8 +3434,6 @@ fec_probe(struct platform_device *pdev) | |||
3465 | netif_carrier_off(ndev); | 3434 | netif_carrier_off(ndev); |
3466 | fec_enet_clk_enable(ndev, false); | 3435 | fec_enet_clk_enable(ndev, false); |
3467 | pinctrl_pm_select_sleep_state(&pdev->dev); | 3436 | pinctrl_pm_select_sleep_state(&pdev->dev); |
3468 | pm_runtime_set_active(&pdev->dev); | ||
3469 | pm_runtime_enable(&pdev->dev); | ||
3470 | 3437 | ||
3471 | ret = register_netdev(ndev); | 3438 | ret = register_netdev(ndev); |
3472 | if (ret) | 3439 | if (ret) |
@@ -3480,12 +3447,6 @@ fec_probe(struct platform_device *pdev) | |||
3480 | 3447 | ||
3481 | fep->rx_copybreak = COPYBREAK_DEFAULT; | 3448 | fep->rx_copybreak = COPYBREAK_DEFAULT; |
3482 | INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); | 3449 | INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); |
3483 | |||
3484 | pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT); | ||
3485 | pm_runtime_use_autosuspend(&pdev->dev); | ||
3486 | pm_runtime_mark_last_busy(&pdev->dev); | ||
3487 | pm_runtime_put_autosuspend(&pdev->dev); | ||
3488 | |||
3489 | return 0; | 3450 | return 0; |
3490 | 3451 | ||
3491 | failed_register: | 3452 | failed_register: |
@@ -3496,8 +3457,6 @@ failed_init: | |||
3496 | if (fep->reg_phy) | 3457 | if (fep->reg_phy) |
3497 | regulator_disable(fep->reg_phy); | 3458 | regulator_disable(fep->reg_phy); |
3498 | failed_regulator: | 3459 | failed_regulator: |
3499 | clk_disable_unprepare(fep->clk_ipg); | ||
3500 | failed_clk_ipg: | ||
3501 | fec_enet_clk_enable(ndev, false); | 3460 | fec_enet_clk_enable(ndev, false); |
3502 | failed_clk: | 3461 | failed_clk: |
3503 | failed_phy: | 3462 | failed_phy: |
@@ -3609,28 +3568,7 @@ failed_clk: | |||
3609 | return ret; | 3568 | return ret; |
3610 | } | 3569 | } |
3611 | 3570 | ||
3612 | static int __maybe_unused fec_runtime_suspend(struct device *dev) | 3571 | static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume); |
3613 | { | ||
3614 | struct net_device *ndev = dev_get_drvdata(dev); | ||
3615 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
3616 | |||
3617 | clk_disable_unprepare(fep->clk_ipg); | ||
3618 | |||
3619 | return 0; | ||
3620 | } | ||
3621 | |||
3622 | static int __maybe_unused fec_runtime_resume(struct device *dev) | ||
3623 | { | ||
3624 | struct net_device *ndev = dev_get_drvdata(dev); | ||
3625 | struct fec_enet_private *fep = netdev_priv(ndev); | ||
3626 | |||
3627 | return clk_prepare_enable(fep->clk_ipg); | ||
3628 | } | ||
3629 | |||
3630 | static const struct dev_pm_ops fec_pm_ops = { | ||
3631 | SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume) | ||
3632 | SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL) | ||
3633 | }; | ||
3634 | 3572 | ||
3635 | static struct platform_driver fec_driver = { | 3573 | static struct platform_driver fec_driver = { |
3636 | .driver = { | 3574 | .driver = { |
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 370e20ed224c..62e48bc0cb23 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c | |||
@@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, | |||
1462 | struct mvneta_rx_queue *rxq) | 1462 | struct mvneta_rx_queue *rxq) |
1463 | { | 1463 | { |
1464 | struct net_device *dev = pp->dev; | 1464 | struct net_device *dev = pp->dev; |
1465 | int rx_done, rx_filled; | 1465 | int rx_done; |
1466 | u32 rcvd_pkts = 0; | 1466 | u32 rcvd_pkts = 0; |
1467 | u32 rcvd_bytes = 0; | 1467 | u32 rcvd_bytes = 0; |
1468 | 1468 | ||
@@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, | |||
1473 | rx_todo = rx_done; | 1473 | rx_todo = rx_done; |
1474 | 1474 | ||
1475 | rx_done = 0; | 1475 | rx_done = 0; |
1476 | rx_filled = 0; | ||
1477 | 1476 | ||
1478 | /* Fairness NAPI loop */ | 1477 | /* Fairness NAPI loop */ |
1479 | while (rx_done < rx_todo) { | 1478 | while (rx_done < rx_todo) { |
@@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, | |||
1484 | int rx_bytes, err; | 1483 | int rx_bytes, err; |
1485 | 1484 | ||
1486 | rx_done++; | 1485 | rx_done++; |
1487 | rx_filled++; | ||
1488 | rx_status = rx_desc->status; | 1486 | rx_status = rx_desc->status; |
1489 | rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); | 1487 | rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); |
1490 | data = (unsigned char *)rx_desc->buf_cookie; | 1488 | data = (unsigned char *)rx_desc->buf_cookie; |
@@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, | |||
1524 | continue; | 1522 | continue; |
1525 | } | 1523 | } |
1526 | 1524 | ||
1525 | /* Refill processing */ | ||
1526 | err = mvneta_rx_refill(pp, rx_desc); | ||
1527 | if (err) { | ||
1528 | netdev_err(dev, "Linux processing - Can't refill\n"); | ||
1529 | rxq->missed++; | ||
1530 | goto err_drop_frame; | ||
1531 | } | ||
1532 | |||
1527 | skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); | 1533 | skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); |
1528 | if (!skb) | 1534 | if (!skb) |
1529 | goto err_drop_frame; | 1535 | goto err_drop_frame; |
@@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, | |||
1543 | mvneta_rx_csum(pp, rx_status, skb); | 1549 | mvneta_rx_csum(pp, rx_status, skb); |
1544 | 1550 | ||
1545 | napi_gro_receive(&pp->napi, skb); | 1551 | napi_gro_receive(&pp->napi, skb); |
1546 | |||
1547 | /* Refill processing */ | ||
1548 | err = mvneta_rx_refill(pp, rx_desc); | ||
1549 | if (err) { | ||
1550 | netdev_err(dev, "Linux processing - Can't refill\n"); | ||
1551 | rxq->missed++; | ||
1552 | rx_filled--; | ||
1553 | } | ||
1554 | } | 1552 | } |
1555 | 1553 | ||
1556 | if (rcvd_pkts) { | 1554 | if (rcvd_pkts) { |
@@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo, | |||
1563 | } | 1561 | } |
1564 | 1562 | ||
1565 | /* Update rxq management counters */ | 1563 | /* Update rxq management counters */ |
1566 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); | 1564 | mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); |
1567 | 1565 | ||
1568 | return rx_done; | 1566 | return rx_done; |
1569 | } | 1567 | } |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index fd9745714d90..78849dd4ef8e 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
@@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q) | |||
228 | struct ravb_desc *desc = NULL; | 228 | struct ravb_desc *desc = NULL; |
229 | int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; | 229 | int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; |
230 | int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; | 230 | int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; |
231 | struct sk_buff *skb; | ||
232 | dma_addr_t dma_addr; | 231 | dma_addr_t dma_addr; |
233 | void *buffer; | ||
234 | int i; | 232 | int i; |
235 | 233 | ||
236 | priv->cur_rx[q] = 0; | 234 | priv->cur_rx[q] = 0; |
@@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q) | |||
241 | memset(priv->rx_ring[q], 0, rx_ring_size); | 239 | memset(priv->rx_ring[q], 0, rx_ring_size); |
242 | /* Build RX ring buffer */ | 240 | /* Build RX ring buffer */ |
243 | for (i = 0; i < priv->num_rx_ring[q]; i++) { | 241 | for (i = 0; i < priv->num_rx_ring[q]; i++) { |
244 | priv->rx_skb[q][i] = NULL; | ||
245 | skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); | ||
246 | if (!skb) | ||
247 | break; | ||
248 | ravb_set_buffer_align(skb); | ||
249 | /* RX descriptor */ | 242 | /* RX descriptor */ |
250 | rx_desc = &priv->rx_ring[q][i]; | 243 | rx_desc = &priv->rx_ring[q][i]; |
251 | /* The size of the buffer should be on 16-byte boundary. */ | 244 | /* The size of the buffer should be on 16-byte boundary. */ |
252 | rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); | 245 | rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); |
253 | dma_addr = dma_map_single(&ndev->dev, skb->data, | 246 | dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data, |
254 | ALIGN(PKT_BUF_SZ, 16), | 247 | ALIGN(PKT_BUF_SZ, 16), |
255 | DMA_FROM_DEVICE); | 248 | DMA_FROM_DEVICE); |
256 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | 249 | /* We just set the data size to 0 for a failed mapping which |
257 | dev_kfree_skb(skb); | 250 | * should prevent DMA from happening... |
258 | break; | 251 | */ |
259 | } | 252 | if (dma_mapping_error(&ndev->dev, dma_addr)) |
260 | priv->rx_skb[q][i] = skb; | 253 | rx_desc->ds_cc = cpu_to_le16(0); |
261 | rx_desc->dptr = cpu_to_le32(dma_addr); | 254 | rx_desc->dptr = cpu_to_le32(dma_addr); |
262 | rx_desc->die_dt = DT_FEMPTY; | 255 | rx_desc->die_dt = DT_FEMPTY; |
263 | } | 256 | } |
264 | rx_desc = &priv->rx_ring[q][i]; | 257 | rx_desc = &priv->rx_ring[q][i]; |
265 | rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); | 258 | rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); |
266 | rx_desc->die_dt = DT_LINKFIX; /* type */ | 259 | rx_desc->die_dt = DT_LINKFIX; /* type */ |
267 | priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]); | ||
268 | 260 | ||
269 | memset(priv->tx_ring[q], 0, tx_ring_size); | 261 | memset(priv->tx_ring[q], 0, tx_ring_size); |
270 | /* Build TX ring buffer */ | 262 | /* Build TX ring buffer */ |
271 | for (i = 0; i < priv->num_tx_ring[q]; i++) { | 263 | for (i = 0; i < priv->num_tx_ring[q]; i++) { |
272 | priv->tx_skb[q][i] = NULL; | ||
273 | priv->tx_buffers[q][i] = NULL; | ||
274 | buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); | ||
275 | if (!buffer) | ||
276 | break; | ||
277 | /* Aligned TX buffer */ | ||
278 | priv->tx_buffers[q][i] = buffer; | ||
279 | tx_desc = &priv->tx_ring[q][i]; | 264 | tx_desc = &priv->tx_ring[q][i]; |
280 | tx_desc->die_dt = DT_EEMPTY; | 265 | tx_desc->die_dt = DT_EEMPTY; |
281 | } | 266 | } |
@@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q) | |||
298 | static int ravb_ring_init(struct net_device *ndev, int q) | 283 | static int ravb_ring_init(struct net_device *ndev, int q) |
299 | { | 284 | { |
300 | struct ravb_private *priv = netdev_priv(ndev); | 285 | struct ravb_private *priv = netdev_priv(ndev); |
286 | struct sk_buff *skb; | ||
301 | int ring_size; | 287 | int ring_size; |
288 | void *buffer; | ||
289 | int i; | ||
302 | 290 | ||
303 | /* Allocate RX and TX skb rings */ | 291 | /* Allocate RX and TX skb rings */ |
304 | priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], | 292 | priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], |
@@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q) | |||
308 | if (!priv->rx_skb[q] || !priv->tx_skb[q]) | 296 | if (!priv->rx_skb[q] || !priv->tx_skb[q]) |
309 | goto error; | 297 | goto error; |
310 | 298 | ||
299 | for (i = 0; i < priv->num_rx_ring[q]; i++) { | ||
300 | skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1); | ||
301 | if (!skb) | ||
302 | goto error; | ||
303 | ravb_set_buffer_align(skb); | ||
304 | priv->rx_skb[q][i] = skb; | ||
305 | } | ||
306 | |||
311 | /* Allocate rings for the aligned buffers */ | 307 | /* Allocate rings for the aligned buffers */ |
312 | priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], | 308 | priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], |
313 | sizeof(*priv->tx_buffers[q]), GFP_KERNEL); | 309 | sizeof(*priv->tx_buffers[q]), GFP_KERNEL); |
314 | if (!priv->tx_buffers[q]) | 310 | if (!priv->tx_buffers[q]) |
315 | goto error; | 311 | goto error; |
316 | 312 | ||
313 | for (i = 0; i < priv->num_tx_ring[q]; i++) { | ||
314 | buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL); | ||
315 | if (!buffer) | ||
316 | goto error; | ||
317 | /* Aligned TX buffer */ | ||
318 | priv->tx_buffers[q][i] = buffer; | ||
319 | } | ||
320 | |||
317 | /* Allocate all RX descriptors. */ | 321 | /* Allocate all RX descriptors. */ |
318 | ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); | 322 | ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); |
319 | priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, | 323 | priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, |
@@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) | |||
524 | if (--boguscnt < 0) | 528 | if (--boguscnt < 0) |
525 | break; | 529 | break; |
526 | 530 | ||
531 | /* We use 0-byte descriptors to mark the DMA mapping errors */ | ||
532 | if (!pkt_len) | ||
533 | continue; | ||
534 | |||
527 | if (desc_status & MSC_MC) | 535 | if (desc_status & MSC_MC) |
528 | stats->multicast++; | 536 | stats->multicast++; |
529 | 537 | ||
@@ -543,10 +551,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) | |||
543 | 551 | ||
544 | skb = priv->rx_skb[q][entry]; | 552 | skb = priv->rx_skb[q][entry]; |
545 | priv->rx_skb[q][entry] = NULL; | 553 | priv->rx_skb[q][entry] = NULL; |
546 | dma_sync_single_for_cpu(&ndev->dev, | 554 | dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), |
547 | le32_to_cpu(desc->dptr), | 555 | ALIGN(PKT_BUF_SZ, 16), |
548 | ALIGN(PKT_BUF_SZ, 16), | 556 | DMA_FROM_DEVICE); |
549 | DMA_FROM_DEVICE); | ||
550 | get_ts &= (q == RAVB_NC) ? | 557 | get_ts &= (q == RAVB_NC) ? |
551 | RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : | 558 | RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : |
552 | ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; | 559 | ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; |
@@ -584,17 +591,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) | |||
584 | if (!skb) | 591 | if (!skb) |
585 | break; /* Better luck next round. */ | 592 | break; /* Better luck next round. */ |
586 | ravb_set_buffer_align(skb); | 593 | ravb_set_buffer_align(skb); |
587 | dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr), | ||
588 | ALIGN(PKT_BUF_SZ, 16), | ||
589 | DMA_FROM_DEVICE); | ||
590 | dma_addr = dma_map_single(&ndev->dev, skb->data, | 594 | dma_addr = dma_map_single(&ndev->dev, skb->data, |
591 | le16_to_cpu(desc->ds_cc), | 595 | le16_to_cpu(desc->ds_cc), |
592 | DMA_FROM_DEVICE); | 596 | DMA_FROM_DEVICE); |
593 | skb_checksum_none_assert(skb); | 597 | skb_checksum_none_assert(skb); |
594 | if (dma_mapping_error(&ndev->dev, dma_addr)) { | 598 | /* We just set the data size to 0 for a failed mapping |
595 | dev_kfree_skb_any(skb); | 599 | * which should prevent DMA from happening... |
596 | break; | 600 | */ |
597 | } | 601 | if (dma_mapping_error(&ndev->dev, dma_addr)) |
602 | desc->ds_cc = cpu_to_le16(0); | ||
598 | desc->dptr = cpu_to_le32(dma_addr); | 603 | desc->dptr = cpu_to_le32(dma_addr); |
599 | priv->rx_skb[q][entry] = skb; | 604 | priv->rx_skb[q][entry] = skb; |
600 | } | 605 | } |
@@ -1279,7 +1284,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1279 | u32 dma_addr; | 1284 | u32 dma_addr; |
1280 | void *buffer; | 1285 | void *buffer; |
1281 | u32 entry; | 1286 | u32 entry; |
1282 | u32 tccr; | ||
1283 | 1287 | ||
1284 | spin_lock_irqsave(&priv->lock, flags); | 1288 | spin_lock_irqsave(&priv->lock, flags); |
1285 | if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { | 1289 | if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { |
@@ -1328,9 +1332,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) | |||
1328 | dma_wmb(); | 1332 | dma_wmb(); |
1329 | desc->die_dt = DT_FSINGLE; | 1333 | desc->die_dt = DT_FSINGLE; |
1330 | 1334 | ||
1331 | tccr = ravb_read(ndev, TCCR); | 1335 | ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR); |
1332 | if (!(tccr & (TCCR_TSRQ0 << q))) | ||
1333 | ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR); | ||
1334 | 1336 | ||
1335 | priv->cur_tx[q]++; | 1337 | priv->cur_tx[q]++; |
1336 | if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && | 1338 | if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 50f7a7a26821..864b476f7fd5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -2843,7 +2843,7 @@ int stmmac_dvr_probe(struct device *device, | |||
2843 | if (res->mac) | 2843 | if (res->mac) |
2844 | memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); | 2844 | memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); |
2845 | 2845 | ||
2846 | dev_set_drvdata(device, priv); | 2846 | dev_set_drvdata(device, priv->dev); |
2847 | 2847 | ||
2848 | /* Verify driver arguments */ | 2848 | /* Verify driver arguments */ |
2849 | stmmac_verify_args(); | 2849 | stmmac_verify_args(); |
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index f335bf119ab5..d155bf2573cd 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c | |||
@@ -793,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id) | |||
793 | static int cpsw_poll(struct napi_struct *napi, int budget) | 793 | static int cpsw_poll(struct napi_struct *napi, int budget) |
794 | { | 794 | { |
795 | struct cpsw_priv *priv = napi_to_priv(napi); | 795 | struct cpsw_priv *priv = napi_to_priv(napi); |
796 | int num_tx, num_rx; | 796 | int num_rx; |
797 | |||
798 | num_tx = cpdma_chan_process(priv->txch, 128); | ||
799 | 797 | ||
800 | num_rx = cpdma_chan_process(priv->rxch, budget); | 798 | num_rx = cpdma_chan_process(priv->rxch, budget); |
801 | if (num_rx < budget) { | 799 | if (num_rx < budget) { |
@@ -810,9 +808,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget) | |||
810 | } | 808 | } |
811 | } | 809 | } |
812 | 810 | ||
813 | if (num_rx || num_tx) | 811 | if (num_rx) |
814 | cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", | 812 | cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx); |
815 | num_rx, num_tx); | ||
816 | 813 | ||
817 | return num_rx; | 814 | return num_rx; |
818 | } | 815 | } |
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 5ec4ed3f6c8d..ec8ed30196f3 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c | |||
@@ -1617,11 +1617,11 @@ static int netcp_ndo_open(struct net_device *ndev) | |||
1617 | } | 1617 | } |
1618 | mutex_unlock(&netcp_modules_lock); | 1618 | mutex_unlock(&netcp_modules_lock); |
1619 | 1619 | ||
1620 | netcp_rxpool_refill(netcp); | ||
1621 | napi_enable(&netcp->rx_napi); | 1620 | napi_enable(&netcp->rx_napi); |
1622 | napi_enable(&netcp->tx_napi); | 1621 | napi_enable(&netcp->tx_napi); |
1623 | knav_queue_enable_notify(netcp->tx_compl_q); | 1622 | knav_queue_enable_notify(netcp->tx_compl_q); |
1624 | knav_queue_enable_notify(netcp->rx_queue); | 1623 | knav_queue_enable_notify(netcp->rx_queue); |
1624 | netcp_rxpool_refill(netcp); | ||
1625 | netif_tx_wake_all_queues(ndev); | 1625 | netif_tx_wake_all_queues(ndev); |
1626 | dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); | 1626 | dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); |
1627 | return 0; | 1627 | return 0; |
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h index 953a97492fab..9542b7bac61a 100644 --- a/drivers/net/ipvlan/ipvlan.h +++ b/drivers/net/ipvlan/ipvlan.h | |||
@@ -67,8 +67,6 @@ struct ipvl_dev { | |||
67 | struct ipvl_port *port; | 67 | struct ipvl_port *port; |
68 | struct net_device *phy_dev; | 68 | struct net_device *phy_dev; |
69 | struct list_head addrs; | 69 | struct list_head addrs; |
70 | int ipv4cnt; | ||
71 | int ipv6cnt; | ||
72 | struct ipvl_pcpu_stats __percpu *pcpu_stats; | 70 | struct ipvl_pcpu_stats __percpu *pcpu_stats; |
73 | DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); | 71 | DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); |
74 | netdev_features_t sfeatures; | 72 | netdev_features_t sfeatures; |
@@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d) | |||
106 | return rcu_dereference(d->rx_handler_data); | 104 | return rcu_dereference(d->rx_handler_data); |
107 | } | 105 | } |
108 | 106 | ||
107 | static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d) | ||
108 | { | ||
109 | return rcu_dereference_bh(d->rx_handler_data); | ||
110 | } | ||
111 | |||
109 | static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) | 112 | static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) |
110 | { | 113 | { |
111 | return rtnl_dereference(d->rx_handler_data); | 114 | return rtnl_dereference(d->rx_handler_data); |
@@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, | |||
124 | bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); | 127 | bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); |
125 | struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, | 128 | struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, |
126 | const void *iaddr, bool is_v6); | 129 | const void *iaddr, bool is_v6); |
127 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); | 130 | void ipvlan_ht_addr_del(struct ipvl_addr *addr); |
128 | #endif /* __IPVLAN_H */ | 131 | #endif /* __IPVLAN_H */ |
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c index 8afbedad620d..207f62e8de9a 100644 --- a/drivers/net/ipvlan/ipvlan_core.c +++ b/drivers/net/ipvlan/ipvlan_core.c | |||
@@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr) | |||
85 | hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); | 85 | hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); |
86 | } | 86 | } |
87 | 87 | ||
88 | void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) | 88 | void ipvlan_ht_addr_del(struct ipvl_addr *addr) |
89 | { | 89 | { |
90 | hlist_del_init_rcu(&addr->hlnode); | 90 | hlist_del_init_rcu(&addr->hlnode); |
91 | if (sync) | ||
92 | synchronize_rcu(); | ||
93 | } | 91 | } |
94 | 92 | ||
95 | struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, | 93 | struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, |
@@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev) | |||
531 | int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) | 529 | int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) |
532 | { | 530 | { |
533 | struct ipvl_dev *ipvlan = netdev_priv(dev); | 531 | struct ipvl_dev *ipvlan = netdev_priv(dev); |
534 | struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); | 532 | struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev); |
535 | 533 | ||
536 | if (!port) | 534 | if (!port) |
537 | goto out; | 535 | goto out; |
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 1acc283160d9..20b58bdecf75 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c | |||
@@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev) | |||
153 | else | 153 | else |
154 | dev->flags &= ~IFF_NOARP; | 154 | dev->flags &= ~IFF_NOARP; |
155 | 155 | ||
156 | if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { | 156 | list_for_each_entry(addr, &ipvlan->addrs, anode) |
157 | list_for_each_entry(addr, &ipvlan->addrs, anode) | 157 | ipvlan_ht_addr_add(ipvlan, addr); |
158 | ipvlan_ht_addr_add(ipvlan, addr); | 158 | |
159 | } | ||
160 | return dev_uc_add(phy_dev, phy_dev->dev_addr); | 159 | return dev_uc_add(phy_dev, phy_dev->dev_addr); |
161 | } | 160 | } |
162 | 161 | ||
@@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev) | |||
171 | 170 | ||
172 | dev_uc_del(phy_dev, phy_dev->dev_addr); | 171 | dev_uc_del(phy_dev, phy_dev->dev_addr); |
173 | 172 | ||
174 | if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { | 173 | list_for_each_entry(addr, &ipvlan->addrs, anode) |
175 | list_for_each_entry(addr, &ipvlan->addrs, anode) | 174 | ipvlan_ht_addr_del(addr); |
176 | ipvlan_ht_addr_del(addr, !dev->dismantle); | 175 | |
177 | } | ||
178 | return 0; | 176 | return 0; |
179 | } | 177 | } |
180 | 178 | ||
@@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev, | |||
471 | ipvlan->port = port; | 469 | ipvlan->port = port; |
472 | ipvlan->sfeatures = IPVLAN_FEATURES; | 470 | ipvlan->sfeatures = IPVLAN_FEATURES; |
473 | INIT_LIST_HEAD(&ipvlan->addrs); | 471 | INIT_LIST_HEAD(&ipvlan->addrs); |
474 | ipvlan->ipv4cnt = 0; | ||
475 | ipvlan->ipv6cnt = 0; | ||
476 | 472 | ||
477 | /* TODO Probably put random address here to be presented to the | 473 | /* TODO Probably put random address here to be presented to the |
478 | * world but keep using the physical-dev address for the outgoing | 474 | * world but keep using the physical-dev address for the outgoing |
@@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head) | |||
508 | struct ipvl_dev *ipvlan = netdev_priv(dev); | 504 | struct ipvl_dev *ipvlan = netdev_priv(dev); |
509 | struct ipvl_addr *addr, *next; | 505 | struct ipvl_addr *addr, *next; |
510 | 506 | ||
511 | if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { | 507 | list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { |
512 | list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { | 508 | ipvlan_ht_addr_del(addr); |
513 | ipvlan_ht_addr_del(addr, !dev->dismantle); | 509 | list_del(&addr->anode); |
514 | list_del(&addr->anode); | 510 | kfree_rcu(addr, rcu); |
515 | } | ||
516 | } | 511 | } |
512 | |||
517 | list_del_rcu(&ipvlan->pnode); | 513 | list_del_rcu(&ipvlan->pnode); |
518 | unregister_netdevice_queue(dev, head); | 514 | unregister_netdevice_queue(dev, head); |
519 | netdev_upper_dev_unlink(ipvlan->phy_dev, dev); | 515 | netdev_upper_dev_unlink(ipvlan->phy_dev, dev); |
@@ -627,7 +623,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) | |||
627 | memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); | 623 | memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); |
628 | addr->atype = IPVL_IPV6; | 624 | addr->atype = IPVL_IPV6; |
629 | list_add_tail(&addr->anode, &ipvlan->addrs); | 625 | list_add_tail(&addr->anode, &ipvlan->addrs); |
630 | ipvlan->ipv6cnt++; | 626 | |
631 | /* If the interface is not up, the address will be added to the hash | 627 | /* If the interface is not up, the address will be added to the hash |
632 | * list by ipvlan_open. | 628 | * list by ipvlan_open. |
633 | */ | 629 | */ |
@@ -645,10 +641,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr) | |||
645 | if (!addr) | 641 | if (!addr) |
646 | return; | 642 | return; |
647 | 643 | ||
648 | ipvlan_ht_addr_del(addr, true); | 644 | ipvlan_ht_addr_del(addr); |
649 | list_del(&addr->anode); | 645 | list_del(&addr->anode); |
650 | ipvlan->ipv6cnt--; | ||
651 | WARN_ON(ipvlan->ipv6cnt < 0); | ||
652 | kfree_rcu(addr, rcu); | 646 | kfree_rcu(addr, rcu); |
653 | 647 | ||
654 | return; | 648 | return; |
@@ -661,6 +655,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused, | |||
661 | struct net_device *dev = (struct net_device *)if6->idev->dev; | 655 | struct net_device *dev = (struct net_device *)if6->idev->dev; |
662 | struct ipvl_dev *ipvlan = netdev_priv(dev); | 656 | struct ipvl_dev *ipvlan = netdev_priv(dev); |
663 | 657 | ||
658 | /* FIXME IPv6 autoconf calls us from bh without RTNL */ | ||
659 | if (in_softirq()) | ||
660 | return NOTIFY_DONE; | ||
661 | |||
664 | if (!netif_is_ipvlan(dev)) | 662 | if (!netif_is_ipvlan(dev)) |
665 | return NOTIFY_DONE; | 663 | return NOTIFY_DONE; |
666 | 664 | ||
@@ -699,7 +697,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) | |||
699 | memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); | 697 | memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); |
700 | addr->atype = IPVL_IPV4; | 698 | addr->atype = IPVL_IPV4; |
701 | list_add_tail(&addr->anode, &ipvlan->addrs); | 699 | list_add_tail(&addr->anode, &ipvlan->addrs); |
702 | ipvlan->ipv4cnt++; | 700 | |
703 | /* If the interface is not up, the address will be added to the hash | 701 | /* If the interface is not up, the address will be added to the hash |
704 | * list by ipvlan_open. | 702 | * list by ipvlan_open. |
705 | */ | 703 | */ |
@@ -717,10 +715,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr) | |||
717 | if (!addr) | 715 | if (!addr) |
718 | return; | 716 | return; |
719 | 717 | ||
720 | ipvlan_ht_addr_del(addr, true); | 718 | ipvlan_ht_addr_del(addr); |
721 | list_del(&addr->anode); | 719 | list_del(&addr->anode); |
722 | ipvlan->ipv4cnt--; | ||
723 | WARN_ON(ipvlan->ipv4cnt < 0); | ||
724 | kfree_rcu(addr, rcu); | 720 | kfree_rcu(addr, rcu); |
725 | 721 | ||
726 | return; | 722 | return; |
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index c7a12e2e07b7..8a3bf5469892 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c | |||
@@ -164,7 +164,7 @@ static int dp83867_config_init(struct phy_device *phydev) | |||
164 | return ret; | 164 | return ret; |
165 | } | 165 | } |
166 | 166 | ||
167 | if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) || | 167 | if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) && |
168 | (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { | 168 | (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { |
169 | val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, | 169 | val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, |
170 | DP83867_DEVADDR, phydev->addr); | 170 | DP83867_DEVADDR, phydev->addr); |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 095ef3fe369a..46a14cbb0215 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
@@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) | |||
421 | { | 421 | { |
422 | struct phy_device *phydev = to_phy_device(dev); | 422 | struct phy_device *phydev = to_phy_device(dev); |
423 | struct phy_driver *phydrv = to_phy_driver(drv); | 423 | struct phy_driver *phydrv = to_phy_driver(drv); |
424 | const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); | ||
425 | int i; | ||
424 | 426 | ||
425 | if (of_driver_match_device(dev, drv)) | 427 | if (of_driver_match_device(dev, drv)) |
426 | return 1; | 428 | return 1; |
@@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) | |||
428 | if (phydrv->match_phy_device) | 430 | if (phydrv->match_phy_device) |
429 | return phydrv->match_phy_device(phydev); | 431 | return phydrv->match_phy_device(phydev); |
430 | 432 | ||
431 | return (phydrv->phy_id & phydrv->phy_id_mask) == | 433 | if (phydev->is_c45) { |
432 | (phydev->phy_id & phydrv->phy_id_mask); | 434 | for (i = 1; i < num_ids; i++) { |
435 | if (!(phydev->c45_ids.devices_in_package & (1 << i))) | ||
436 | continue; | ||
437 | |||
438 | if ((phydrv->phy_id & phydrv->phy_id_mask) == | ||
439 | (phydev->c45_ids.device_ids[i] & | ||
440 | phydrv->phy_id_mask)) | ||
441 | return 1; | ||
442 | } | ||
443 | return 0; | ||
444 | } else { | ||
445 | return (phydrv->phy_id & phydrv->phy_id_mask) == | ||
446 | (phydev->phy_id & phydrv->phy_id_mask); | ||
447 | } | ||
433 | } | 448 | } |
434 | 449 | ||
435 | #ifdef CONFIG_PM | 450 | #ifdef CONFIG_PM |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index f603f362504b..9d43460ce3c7 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -757,6 +757,7 @@ static const struct usb_device_id products[] = { | |||
757 | {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ | 757 | {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ |
758 | {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ | 758 | {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ |
759 | {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ | 759 | {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ |
760 | {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */ | ||
760 | {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ | 761 | {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ |
761 | {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */ | 762 | {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */ |
762 | {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ | 763 | {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 63c7810e1545..7fbca37a1adf 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev) | |||
1828 | else | 1828 | else |
1829 | vi->hdr_len = sizeof(struct virtio_net_hdr); | 1829 | vi->hdr_len = sizeof(struct virtio_net_hdr); |
1830 | 1830 | ||
1831 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) | 1831 | if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) || |
1832 | virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) | ||
1832 | vi->any_header_sg = true; | 1833 | vi->any_header_sg = true; |
1833 | 1834 | ||
1834 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) | 1835 | if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) |
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index 5e15e8e10ed3..a31a6804dc34 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c | |||
@@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah) | |||
279 | return; | 279 | return; |
280 | case AR9300_DEVID_QCA956X: | 280 | case AR9300_DEVID_QCA956X: |
281 | ah->hw_version.macVersion = AR_SREV_VERSION_9561; | 281 | ah->hw_version.macVersion = AR_SREV_VERSION_9561; |
282 | return; | ||
282 | } | 283 | } |
283 | 284 | ||
284 | val = REG_READ(ah, AR_SREV) & AR_SREV_ID; | 285 | val = REG_READ(ah, AR_SREV) & AR_SREV_ID; |
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h index d56064861a9c..d45dc021cda2 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/iwlwifi/iwl-fh.h | |||
@@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl) | |||
438 | #define RX_QUEUE_MASK 255 | 438 | #define RX_QUEUE_MASK 255 |
439 | #define RX_QUEUE_SIZE_LOG 8 | 439 | #define RX_QUEUE_SIZE_LOG 8 |
440 | 440 | ||
441 | /* | ||
442 | * RX related structures and functions | ||
443 | */ | ||
444 | #define RX_FREE_BUFFERS 64 | ||
445 | #define RX_LOW_WATERMARK 8 | ||
446 | |||
441 | /** | 447 | /** |
442 | * struct iwl_rb_status - reserve buffer status | 448 | * struct iwl_rb_status - reserve buffer status |
443 | * host memory mapped FH registers | 449 | * host memory mapped FH registers |
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c index 80fefe7d7b8c..3b8e85e51002 100644 --- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c | |||
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev, | |||
540 | hw_addr = (const u8 *)(mac_override + | 540 | hw_addr = (const u8 *)(mac_override + |
541 | MAC_ADDRESS_OVERRIDE_FAMILY_8000); | 541 | MAC_ADDRESS_OVERRIDE_FAMILY_8000); |
542 | 542 | ||
543 | /* The byte order is little endian 16 bit, meaning 214365 */ | 543 | /* |
544 | data->hw_addr[0] = hw_addr[1]; | 544 | * Store the MAC address from MAO section. |
545 | data->hw_addr[1] = hw_addr[0]; | 545 | * No byte swapping is required in MAO section |
546 | data->hw_addr[2] = hw_addr[3]; | 546 | */ |
547 | data->hw_addr[3] = hw_addr[2]; | 547 | memcpy(data->hw_addr, hw_addr, ETH_ALEN); |
548 | data->hw_addr[4] = hw_addr[5]; | ||
549 | data->hw_addr[5] = hw_addr[4]; | ||
550 | 548 | ||
551 | /* | 549 | /* |
552 | * Force the use of the OTP MAC address in case of reserved MAC | 550 | * Force the use of the OTP MAC address in case of reserved MAC |
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 5e4cbdb44c60..737774a01c74 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h | |||
@@ -660,7 +660,8 @@ struct iwl_scan_config { | |||
660 | * iwl_umac_scan_flags | 660 | * iwl_umac_scan_flags |
661 | *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request | 661 | *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request |
662 | * can be preempted by other scan requests with higher priority. | 662 | * can be preempted by other scan requests with higher priority. |
663 | * The low priority scan is aborted. | 663 | * The low priority scan will be resumed when the higher proirity scan is |
664 | * completed. | ||
664 | *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver | 665 | *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver |
665 | * when scan starts. | 666 | * when scan starts. |
666 | */ | 667 | */ |
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index 5de144968723..5000bfcded61 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c | |||
@@ -1109,6 +1109,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, | |||
1109 | cmd->uid = cpu_to_le32(uid); | 1109 | cmd->uid = cpu_to_le32(uid); |
1110 | cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); | 1110 | cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); |
1111 | 1111 | ||
1112 | if (type == IWL_MVM_SCAN_SCHED) | ||
1113 | cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE); | ||
1114 | |||
1112 | if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) | 1115 | if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) |
1113 | cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | | 1116 | cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | |
1114 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | | 1117 | IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c index d68dc697a4a0..26f076e82149 100644 --- a/drivers/net/wireless/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/iwlwifi/mvm/sta.c | |||
@@ -1401,6 +1401,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, | |||
1401 | bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); | 1401 | bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); |
1402 | u8 sta_id; | 1402 | u8 sta_id; |
1403 | int ret; | 1403 | int ret; |
1404 | static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; | ||
1404 | 1405 | ||
1405 | lockdep_assert_held(&mvm->mutex); | 1406 | lockdep_assert_held(&mvm->mutex); |
1406 | 1407 | ||
@@ -1467,7 +1468,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, | |||
1467 | end: | 1468 | end: |
1468 | IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", | 1469 | IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", |
1469 | keyconf->cipher, keyconf->keylen, keyconf->keyidx, | 1470 | keyconf->cipher, keyconf->keylen, keyconf->keyidx, |
1470 | sta->addr, ret); | 1471 | sta ? sta->addr : zero_addr, ret); |
1471 | return ret; | 1472 | return ret; |
1472 | } | 1473 | } |
1473 | 1474 | ||
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c index d24b6a83e68c..e472729e5f14 100644 --- a/drivers/net/wireless/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c | |||
@@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, | |||
86 | { | 86 | { |
87 | lockdep_assert_held(&mvm->time_event_lock); | 87 | lockdep_assert_held(&mvm->time_event_lock); |
88 | 88 | ||
89 | if (te_data->id == TE_MAX) | 89 | if (!te_data->vif) |
90 | return; | 90 | return; |
91 | 91 | ||
92 | list_del(&te_data->list); | 92 | list_del(&te_data->list); |
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 7ba7a118ff5c..89116864d2a0 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c | |||
@@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, | |||
252 | 252 | ||
253 | if (info->band == IEEE80211_BAND_2GHZ && | 253 | if (info->band == IEEE80211_BAND_2GHZ && |
254 | !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) | 254 | !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) |
255 | rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS; | 255 | rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; |
256 | else | 256 | else |
257 | rate_flags = | 257 | rate_flags = |
258 | BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; | 258 | BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; |
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 2ed1e4d2774d..9f65c1cff1b1 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c | |||
@@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
368 | /* 3165 Series */ | 368 | /* 3165 Series */ |
369 | {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, | 369 | {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, |
370 | {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, | 370 | {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, |
371 | {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)}, | ||
371 | {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, | 372 | {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, |
372 | {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, | 373 | {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, |
373 | {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, | 374 | {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, |
374 | {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, | 375 | {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, |
375 | {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, | 376 | {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, |
376 | {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, | 377 | {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, |
378 | {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)}, | ||
377 | 379 | ||
378 | /* 7265 Series */ | 380 | /* 7265 Series */ |
379 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, | 381 | {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, |
@@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = { | |||
426 | {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, | 428 | {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, |
427 | {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, | 429 | {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, |
428 | {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, | 430 | {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, |
431 | {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)}, | ||
429 | {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, | 432 | {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, |
430 | {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)}, | ||
431 | {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)}, | ||
432 | {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, | 433 | {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, |
433 | {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, | 434 | {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, |
434 | {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, | 435 | {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, |
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h index 31f72a61cc3f..376b84e54ad7 100644 --- a/drivers/net/wireless/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/iwlwifi/pcie/internal.h | |||
@@ -44,15 +44,6 @@ | |||
44 | #include "iwl-io.h" | 44 | #include "iwl-io.h" |
45 | #include "iwl-op-mode.h" | 45 | #include "iwl-op-mode.h" |
46 | 46 | ||
47 | /* | ||
48 | * RX related structures and functions | ||
49 | */ | ||
50 | #define RX_NUM_QUEUES 1 | ||
51 | #define RX_POST_REQ_ALLOC 2 | ||
52 | #define RX_CLAIM_REQ_ALLOC 8 | ||
53 | #define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES) | ||
54 | #define RX_LOW_WATERMARK 8 | ||
55 | |||
56 | struct iwl_host_cmd; | 47 | struct iwl_host_cmd; |
57 | 48 | ||
58 | /*This file includes the declaration that are internal to the | 49 | /*This file includes the declaration that are internal to the |
@@ -86,29 +77,29 @@ struct isr_statistics { | |||
86 | * struct iwl_rxq - Rx queue | 77 | * struct iwl_rxq - Rx queue |
87 | * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) | 78 | * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) |
88 | * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) | 79 | * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) |
80 | * @pool: | ||
81 | * @queue: | ||
89 | * @read: Shared index to newest available Rx buffer | 82 | * @read: Shared index to newest available Rx buffer |
90 | * @write: Shared index to oldest written Rx packet | 83 | * @write: Shared index to oldest written Rx packet |
91 | * @free_count: Number of pre-allocated buffers in rx_free | 84 | * @free_count: Number of pre-allocated buffers in rx_free |
92 | * @used_count: Number of RBDs handled to allocator to use for allocation | ||
93 | * @write_actual: | 85 | * @write_actual: |
94 | * @rx_free: list of RBDs with allocated RB ready for use | 86 | * @rx_free: list of free SKBs for use |
95 | * @rx_used: list of RBDs with no RB attached | 87 | * @rx_used: List of Rx buffers with no SKB |
96 | * @need_update: flag to indicate we need to update read/write index | 88 | * @need_update: flag to indicate we need to update read/write index |
97 | * @rb_stts: driver's pointer to receive buffer status | 89 | * @rb_stts: driver's pointer to receive buffer status |
98 | * @rb_stts_dma: bus address of receive buffer status | 90 | * @rb_stts_dma: bus address of receive buffer status |
99 | * @lock: | 91 | * @lock: |
100 | * @pool: initial pool of iwl_rx_mem_buffer for the queue | ||
101 | * @queue: actual rx queue | ||
102 | * | 92 | * |
103 | * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers | 93 | * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers |
104 | */ | 94 | */ |
105 | struct iwl_rxq { | 95 | struct iwl_rxq { |
106 | __le32 *bd; | 96 | __le32 *bd; |
107 | dma_addr_t bd_dma; | 97 | dma_addr_t bd_dma; |
98 | struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; | ||
99 | struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; | ||
108 | u32 read; | 100 | u32 read; |
109 | u32 write; | 101 | u32 write; |
110 | u32 free_count; | 102 | u32 free_count; |
111 | u32 used_count; | ||
112 | u32 write_actual; | 103 | u32 write_actual; |
113 | struct list_head rx_free; | 104 | struct list_head rx_free; |
114 | struct list_head rx_used; | 105 | struct list_head rx_used; |
@@ -116,32 +107,6 @@ struct iwl_rxq { | |||
116 | struct iwl_rb_status *rb_stts; | 107 | struct iwl_rb_status *rb_stts; |
117 | dma_addr_t rb_stts_dma; | 108 | dma_addr_t rb_stts_dma; |
118 | spinlock_t lock; | 109 | spinlock_t lock; |
119 | struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE]; | ||
120 | struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; | ||
121 | }; | ||
122 | |||
123 | /** | ||
124 | * struct iwl_rb_allocator - Rx allocator | ||
125 | * @pool: initial pool of allocator | ||
126 | * @req_pending: number of requests the allcator had not processed yet | ||
127 | * @req_ready: number of requests honored and ready for claiming | ||
128 | * @rbd_allocated: RBDs with pages allocated and ready to be handled to | ||
129 | * the queue. This is a list of &struct iwl_rx_mem_buffer | ||
130 | * @rbd_empty: RBDs with no page attached for allocator use. This is a list | ||
131 | * of &struct iwl_rx_mem_buffer | ||
132 | * @lock: protects the rbd_allocated and rbd_empty lists | ||
133 | * @alloc_wq: work queue for background calls | ||
134 | * @rx_alloc: work struct for background calls | ||
135 | */ | ||
136 | struct iwl_rb_allocator { | ||
137 | struct iwl_rx_mem_buffer pool[RX_POOL_SIZE]; | ||
138 | atomic_t req_pending; | ||
139 | atomic_t req_ready; | ||
140 | struct list_head rbd_allocated; | ||
141 | struct list_head rbd_empty; | ||
142 | spinlock_t lock; | ||
143 | struct workqueue_struct *alloc_wq; | ||
144 | struct work_struct rx_alloc; | ||
145 | }; | 110 | }; |
146 | 111 | ||
147 | struct iwl_dma_ptr { | 112 | struct iwl_dma_ptr { |
@@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) | |||
285 | /** | 250 | /** |
286 | * struct iwl_trans_pcie - PCIe transport specific data | 251 | * struct iwl_trans_pcie - PCIe transport specific data |
287 | * @rxq: all the RX queue data | 252 | * @rxq: all the RX queue data |
288 | * @rba: allocator for RX replenishing | 253 | * @rx_replenish: work that will be called when buffers need to be allocated |
289 | * @drv - pointer to iwl_drv | 254 | * @drv - pointer to iwl_drv |
290 | * @trans: pointer to the generic transport area | 255 | * @trans: pointer to the generic transport area |
291 | * @scd_base_addr: scheduler sram base address in SRAM | 256 | * @scd_base_addr: scheduler sram base address in SRAM |
@@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) | |||
308 | */ | 273 | */ |
309 | struct iwl_trans_pcie { | 274 | struct iwl_trans_pcie { |
310 | struct iwl_rxq rxq; | 275 | struct iwl_rxq rxq; |
311 | struct iwl_rb_allocator rba; | 276 | struct work_struct rx_replenish; |
312 | struct iwl_trans *trans; | 277 | struct iwl_trans *trans; |
313 | struct iwl_drv *drv; | 278 | struct iwl_drv *drv; |
314 | 279 | ||
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c index a3fbaa0ef5e0..adad8d0fae7f 100644 --- a/drivers/net/wireless/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/iwlwifi/pcie/rx.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /****************************************************************************** | 1 | /****************************************************************************** |
2 | * | 2 | * |
3 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. | 3 | * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. |
4 | * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH | 4 | * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH |
5 | * | 5 | * |
6 | * Portions of this file are derived from the ipw3945 project, as well | 6 | * Portions of this file are derived from the ipw3945 project, as well |
7 | * as portions of the ieee80211 subsystem header files. | 7 | * as portions of the ieee80211 subsystem header files. |
@@ -74,29 +74,16 @@ | |||
74 | * resets the Rx queue buffers with new memory. | 74 | * resets the Rx queue buffers with new memory. |
75 | * | 75 | * |
76 | * The management in the driver is as follows: | 76 | * The management in the driver is as follows: |
77 | * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. | 77 | * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When |
78 | * When the interrupt handler is called, the request is processed. | 78 | * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled |
79 | * The page is either stolen - transferred to the upper layer | 79 | * to replenish the iwl->rxq->rx_free. |
80 | * or reused - added immediately to the iwl->rxq->rx_free list. | 80 | * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the |
81 | * + When the page is stolen - the driver updates the matching queue's used | 81 | * iwl->rxq is replenished and the READ INDEX is updated (updating the |
82 | * count, detaches the RBD and transfers it to the queue used list. | 82 | * 'processed' and 'read' driver indexes as well) |
83 | * When there are two used RBDs - they are transferred to the allocator empty | ||
84 | * list. Work is then scheduled for the allocator to start allocating | ||
85 | * eight buffers. | ||
86 | * When there are another 6 used RBDs - they are transferred to the allocator | ||
87 | * empty list and the driver tries to claim the pre-allocated buffers and | ||
88 | * add them to iwl->rxq->rx_free. If it fails - it continues to claim them | ||
89 | * until ready. | ||
90 | * When there are 8+ buffers in the free list - either from allocation or from | ||
91 | * 8 reused unstolen pages - restock is called to update the FW and indexes. | ||
92 | * + In order to make sure the allocator always has RBDs to use for allocation | ||
93 | * the allocator has initial pool in the size of num_queues*(8-2) - the | ||
94 | * maximum missing RBDs per allocation request (request posted with 2 | ||
95 | * empty RBDs, there is no guarantee when the other 6 RBDs are supplied). | ||
96 | * The queues supplies the recycle of the rest of the RBDs. | ||
97 | * + A received packet is processed and handed to the kernel network stack, | 83 | * + A received packet is processed and handed to the kernel network stack, |
98 | * detached from the iwl->rxq. The driver 'processed' index is updated. | 84 | * detached from the iwl->rxq. The driver 'processed' index is updated. |
99 | * + If there are no allocated buffers in iwl->rxq->rx_free, | 85 | * + The Host/Firmware iwl->rxq is replenished at irq thread time from the |
86 | * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free, | ||
100 | * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. | 87 | * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. |
101 | * If there were enough free buffers and RX_STALLED is set it is cleared. | 88 | * If there were enough free buffers and RX_STALLED is set it is cleared. |
102 | * | 89 | * |
@@ -105,32 +92,18 @@ | |||
105 | * | 92 | * |
106 | * iwl_rxq_alloc() Allocates rx_free | 93 | * iwl_rxq_alloc() Allocates rx_free |
107 | * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls | 94 | * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls |
108 | * iwl_pcie_rxq_restock. | 95 | * iwl_pcie_rxq_restock |
109 | * Used only during initialization. | ||
110 | * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx | 96 | * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx |
111 | * queue, updates firmware pointers, and updates | 97 | * queue, updates firmware pointers, and updates |
112 | * the WRITE index. | 98 | * the WRITE index. If insufficient rx_free buffers |
113 | * iwl_pcie_rx_allocator() Background work for allocating pages. | 99 | * are available, schedules iwl_pcie_rx_replenish |
114 | * | 100 | * |
115 | * -- enable interrupts -- | 101 | * -- enable interrupts -- |
116 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the | 102 | * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the |
117 | * READ INDEX, detaching the SKB from the pool. | 103 | * READ INDEX, detaching the SKB from the pool. |
118 | * Moves the packet buffer from queue to rx_used. | 104 | * Moves the packet buffer from queue to rx_used. |
119 | * Posts and claims requests to the allocator. | ||
120 | * Calls iwl_pcie_rxq_restock to refill any empty | 105 | * Calls iwl_pcie_rxq_restock to refill any empty |
121 | * slots. | 106 | * slots. |
122 | * | ||
123 | * RBD life-cycle: | ||
124 | * | ||
125 | * Init: | ||
126 | * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue | ||
127 | * | ||
128 | * Regular Receive interrupt: | ||
129 | * Page Stolen: | ||
130 | * rxq.queue -> rxq.rx_used -> allocator.rbd_empty -> | ||
131 | * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue | ||
132 | * Page not Stolen: | ||
133 | * rxq.queue -> rxq.rx_free -> rxq.queue | ||
134 | * ... | 107 | * ... |
135 | * | 108 | * |
136 | */ | 109 | */ |
@@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) | |||
267 | rxq->free_count--; | 240 | rxq->free_count--; |
268 | } | 241 | } |
269 | spin_unlock(&rxq->lock); | 242 | spin_unlock(&rxq->lock); |
243 | /* If the pre-allocated buffer pool is dropping low, schedule to | ||
244 | * refill it */ | ||
245 | if (rxq->free_count <= RX_LOW_WATERMARK) | ||
246 | schedule_work(&trans_pcie->rx_replenish); | ||
270 | 247 | ||
271 | /* If we've added more space for the firmware to place data, tell it. | 248 | /* If we've added more space for the firmware to place data, tell it. |
272 | * Increment device's write pointer in multiples of 8. */ | 249 | * Increment device's write pointer in multiples of 8. */ |
@@ -278,44 +255,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans) | |||
278 | } | 255 | } |
279 | 256 | ||
280 | /* | 257 | /* |
281 | * iwl_pcie_rx_alloc_page - allocates and returns a page. | ||
282 | * | ||
283 | */ | ||
284 | static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans) | ||
285 | { | ||
286 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
287 | struct iwl_rxq *rxq = &trans_pcie->rxq; | ||
288 | struct page *page; | ||
289 | gfp_t gfp_mask = GFP_KERNEL; | ||
290 | |||
291 | if (rxq->free_count > RX_LOW_WATERMARK) | ||
292 | gfp_mask |= __GFP_NOWARN; | ||
293 | |||
294 | if (trans_pcie->rx_page_order > 0) | ||
295 | gfp_mask |= __GFP_COMP; | ||
296 | |||
297 | /* Alloc a new receive buffer */ | ||
298 | page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); | ||
299 | if (!page) { | ||
300 | if (net_ratelimit()) | ||
301 | IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n", | ||
302 | trans_pcie->rx_page_order); | ||
303 | /* Issue an error if the hardware has consumed more than half | ||
304 | * of its free buffer list and we don't have enough | ||
305 | * pre-allocated buffers. | ||
306 | ` */ | ||
307 | if (rxq->free_count <= RX_LOW_WATERMARK && | ||
308 | iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) && | ||
309 | net_ratelimit()) | ||
310 | IWL_CRIT(trans, | ||
311 | "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n", | ||
312 | rxq->free_count); | ||
313 | return NULL; | ||
314 | } | ||
315 | return page; | ||
316 | } | ||
317 | |||
318 | /* | ||
319 | * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD | 258 | * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD |
320 | * | 259 | * |
321 | * A used RBD is an Rx buffer that has been given to the stack. To use it again | 260 | * A used RBD is an Rx buffer that has been given to the stack. To use it again |
@@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans) | |||
324 | * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly | 263 | * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly |
325 | * allocated buffers. | 264 | * allocated buffers. |
326 | */ | 265 | */ |
327 | static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) | 266 | static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority) |
328 | { | 267 | { |
329 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 268 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
330 | struct iwl_rxq *rxq = &trans_pcie->rxq; | 269 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
331 | struct iwl_rx_mem_buffer *rxb; | 270 | struct iwl_rx_mem_buffer *rxb; |
332 | struct page *page; | 271 | struct page *page; |
272 | gfp_t gfp_mask = priority; | ||
333 | 273 | ||
334 | while (1) { | 274 | while (1) { |
335 | spin_lock(&rxq->lock); | 275 | spin_lock(&rxq->lock); |
@@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) | |||
339 | } | 279 | } |
340 | spin_unlock(&rxq->lock); | 280 | spin_unlock(&rxq->lock); |
341 | 281 | ||
282 | if (rxq->free_count > RX_LOW_WATERMARK) | ||
283 | gfp_mask |= __GFP_NOWARN; | ||
284 | |||
285 | if (trans_pcie->rx_page_order > 0) | ||
286 | gfp_mask |= __GFP_COMP; | ||
287 | |||
342 | /* Alloc a new receive buffer */ | 288 | /* Alloc a new receive buffer */ |
343 | page = iwl_pcie_rx_alloc_page(trans); | 289 | page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); |
344 | if (!page) | 290 | if (!page) { |
291 | if (net_ratelimit()) | ||
292 | IWL_DEBUG_INFO(trans, "alloc_pages failed, " | ||
293 | "order: %d\n", | ||
294 | trans_pcie->rx_page_order); | ||
295 | |||
296 | if ((rxq->free_count <= RX_LOW_WATERMARK) && | ||
297 | net_ratelimit()) | ||
298 | IWL_CRIT(trans, "Failed to alloc_pages with %s." | ||
299 | "Only %u free buffers remaining.\n", | ||
300 | priority == GFP_ATOMIC ? | ||
301 | "GFP_ATOMIC" : "GFP_KERNEL", | ||
302 | rxq->free_count); | ||
303 | /* We don't reschedule replenish work here -- we will | ||
304 | * call the restock method and if it still needs | ||
305 | * more buffers it will schedule replenish */ | ||
345 | return; | 306 | return; |
307 | } | ||
346 | 308 | ||
347 | spin_lock(&rxq->lock); | 309 | spin_lock(&rxq->lock); |
348 | 310 | ||
@@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) | |||
393 | 355 | ||
394 | lockdep_assert_held(&rxq->lock); | 356 | lockdep_assert_held(&rxq->lock); |
395 | 357 | ||
396 | for (i = 0; i < RX_QUEUE_SIZE; i++) { | 358 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { |
397 | if (!rxq->pool[i].page) | 359 | if (!rxq->pool[i].page) |
398 | continue; | 360 | continue; |
399 | dma_unmap_page(trans->dev, rxq->pool[i].page_dma, | 361 | dma_unmap_page(trans->dev, rxq->pool[i].page_dma, |
@@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans) | |||
410 | * When moving to rx_free an page is allocated for the slot. | 372 | * When moving to rx_free an page is allocated for the slot. |
411 | * | 373 | * |
412 | * Also restock the Rx queue via iwl_pcie_rxq_restock. | 374 | * Also restock the Rx queue via iwl_pcie_rxq_restock. |
413 | * This is called only during initialization | 375 | * This is called as a scheduled work item (except for during initialization) |
414 | */ | 376 | */ |
415 | static void iwl_pcie_rx_replenish(struct iwl_trans *trans) | 377 | static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp) |
416 | { | 378 | { |
417 | iwl_pcie_rxq_alloc_rbs(trans); | 379 | iwl_pcie_rxq_alloc_rbs(trans, gfp); |
418 | 380 | ||
419 | iwl_pcie_rxq_restock(trans); | 381 | iwl_pcie_rxq_restock(trans); |
420 | } | 382 | } |
421 | 383 | ||
422 | /* | 384 | static void iwl_pcie_rx_replenish_work(struct work_struct *data) |
423 | * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues | ||
424 | * | ||
425 | * Allocates for each received request 8 pages | ||
426 | * Called as a scheduled work item. | ||
427 | */ | ||
428 | static void iwl_pcie_rx_allocator(struct iwl_trans *trans) | ||
429 | { | ||
430 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
431 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | ||
432 | |||
433 | while (atomic_read(&rba->req_pending)) { | ||
434 | int i; | ||
435 | struct list_head local_empty; | ||
436 | struct list_head local_allocated; | ||
437 | |||
438 | INIT_LIST_HEAD(&local_allocated); | ||
439 | spin_lock(&rba->lock); | ||
440 | /* swap out the entire rba->rbd_empty to a local list */ | ||
441 | list_replace_init(&rba->rbd_empty, &local_empty); | ||
442 | spin_unlock(&rba->lock); | ||
443 | |||
444 | for (i = 0; i < RX_CLAIM_REQ_ALLOC;) { | ||
445 | struct iwl_rx_mem_buffer *rxb; | ||
446 | struct page *page; | ||
447 | |||
448 | /* List should never be empty - each reused RBD is | ||
449 | * returned to the list, and initial pool covers any | ||
450 | * possible gap between the time the page is allocated | ||
451 | * to the time the RBD is added. | ||
452 | */ | ||
453 | BUG_ON(list_empty(&local_empty)); | ||
454 | /* Get the first rxb from the rbd list */ | ||
455 | rxb = list_first_entry(&local_empty, | ||
456 | struct iwl_rx_mem_buffer, list); | ||
457 | BUG_ON(rxb->page); | ||
458 | |||
459 | /* Alloc a new receive buffer */ | ||
460 | page = iwl_pcie_rx_alloc_page(trans); | ||
461 | if (!page) | ||
462 | continue; | ||
463 | rxb->page = page; | ||
464 | |||
465 | /* Get physical address of the RB */ | ||
466 | rxb->page_dma = dma_map_page(trans->dev, page, 0, | ||
467 | PAGE_SIZE << trans_pcie->rx_page_order, | ||
468 | DMA_FROM_DEVICE); | ||
469 | if (dma_mapping_error(trans->dev, rxb->page_dma)) { | ||
470 | rxb->page = NULL; | ||
471 | __free_pages(page, trans_pcie->rx_page_order); | ||
472 | continue; | ||
473 | } | ||
474 | /* dma address must be no more than 36 bits */ | ||
475 | BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); | ||
476 | /* and also 256 byte aligned! */ | ||
477 | BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); | ||
478 | |||
479 | /* move the allocated entry to the out list */ | ||
480 | list_move(&rxb->list, &local_allocated); | ||
481 | i++; | ||
482 | } | ||
483 | |||
484 | spin_lock(&rba->lock); | ||
485 | /* add the allocated rbds to the allocator allocated list */ | ||
486 | list_splice_tail(&local_allocated, &rba->rbd_allocated); | ||
487 | /* add the unused rbds back to the allocator empty list */ | ||
488 | list_splice_tail(&local_empty, &rba->rbd_empty); | ||
489 | spin_unlock(&rba->lock); | ||
490 | |||
491 | atomic_dec(&rba->req_pending); | ||
492 | atomic_inc(&rba->req_ready); | ||
493 | } | ||
494 | } | ||
495 | |||
496 | /* | ||
497 | * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages | ||
498 | .* | ||
499 | .* Called by queue when the queue posted allocation request and | ||
500 | * has freed 8 RBDs in order to restock itself. | ||
501 | */ | ||
502 | static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans, | ||
503 | struct iwl_rx_mem_buffer | ||
504 | *out[RX_CLAIM_REQ_ALLOC]) | ||
505 | { | ||
506 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
507 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | ||
508 | int i; | ||
509 | |||
510 | if (atomic_dec_return(&rba->req_ready) < 0) { | ||
511 | atomic_inc(&rba->req_ready); | ||
512 | IWL_DEBUG_RX(trans, | ||
513 | "Allocation request not ready, pending requests = %d\n", | ||
514 | atomic_read(&rba->req_pending)); | ||
515 | return -ENOMEM; | ||
516 | } | ||
517 | |||
518 | spin_lock(&rba->lock); | ||
519 | for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) { | ||
520 | /* Get next free Rx buffer, remove it from free list */ | ||
521 | out[i] = list_first_entry(&rba->rbd_allocated, | ||
522 | struct iwl_rx_mem_buffer, list); | ||
523 | list_del(&out[i]->list); | ||
524 | } | ||
525 | spin_unlock(&rba->lock); | ||
526 | |||
527 | return 0; | ||
528 | } | ||
529 | |||
530 | static void iwl_pcie_rx_allocator_work(struct work_struct *data) | ||
531 | { | 385 | { |
532 | struct iwl_rb_allocator *rba_p = | ||
533 | container_of(data, struct iwl_rb_allocator, rx_alloc); | ||
534 | struct iwl_trans_pcie *trans_pcie = | 386 | struct iwl_trans_pcie *trans_pcie = |
535 | container_of(rba_p, struct iwl_trans_pcie, rba); | 387 | container_of(data, struct iwl_trans_pcie, rx_replenish); |
536 | 388 | ||
537 | iwl_pcie_rx_allocator(trans_pcie->trans); | 389 | iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL); |
538 | } | 390 | } |
539 | 391 | ||
540 | static int iwl_pcie_rx_alloc(struct iwl_trans *trans) | 392 | static int iwl_pcie_rx_alloc(struct iwl_trans *trans) |
541 | { | 393 | { |
542 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 394 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
543 | struct iwl_rxq *rxq = &trans_pcie->rxq; | 395 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
544 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | ||
545 | struct device *dev = trans->dev; | 396 | struct device *dev = trans->dev; |
546 | 397 | ||
547 | memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); | 398 | memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); |
548 | 399 | ||
549 | spin_lock_init(&rxq->lock); | 400 | spin_lock_init(&rxq->lock); |
550 | spin_lock_init(&rba->lock); | ||
551 | 401 | ||
552 | if (WARN_ON(rxq->bd || rxq->rb_stts)) | 402 | if (WARN_ON(rxq->bd || rxq->rb_stts)) |
553 | return -EINVAL; | 403 | return -EINVAL; |
@@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) | |||
637 | INIT_LIST_HEAD(&rxq->rx_free); | 487 | INIT_LIST_HEAD(&rxq->rx_free); |
638 | INIT_LIST_HEAD(&rxq->rx_used); | 488 | INIT_LIST_HEAD(&rxq->rx_used); |
639 | rxq->free_count = 0; | 489 | rxq->free_count = 0; |
640 | rxq->used_count = 0; | ||
641 | 490 | ||
642 | for (i = 0; i < RX_QUEUE_SIZE; i++) | 491 | for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) |
643 | list_add(&rxq->pool[i].list, &rxq->rx_used); | 492 | list_add(&rxq->pool[i].list, &rxq->rx_used); |
644 | } | 493 | } |
645 | 494 | ||
646 | static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba) | ||
647 | { | ||
648 | int i; | ||
649 | |||
650 | lockdep_assert_held(&rba->lock); | ||
651 | |||
652 | INIT_LIST_HEAD(&rba->rbd_allocated); | ||
653 | INIT_LIST_HEAD(&rba->rbd_empty); | ||
654 | |||
655 | for (i = 0; i < RX_POOL_SIZE; i++) | ||
656 | list_add(&rba->pool[i].list, &rba->rbd_empty); | ||
657 | } | ||
658 | |||
659 | static void iwl_pcie_rx_free_rba(struct iwl_trans *trans) | ||
660 | { | ||
661 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
662 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | ||
663 | int i; | ||
664 | |||
665 | lockdep_assert_held(&rba->lock); | ||
666 | |||
667 | for (i = 0; i < RX_POOL_SIZE; i++) { | ||
668 | if (!rba->pool[i].page) | ||
669 | continue; | ||
670 | dma_unmap_page(trans->dev, rba->pool[i].page_dma, | ||
671 | PAGE_SIZE << trans_pcie->rx_page_order, | ||
672 | DMA_FROM_DEVICE); | ||
673 | __free_pages(rba->pool[i].page, trans_pcie->rx_page_order); | ||
674 | rba->pool[i].page = NULL; | ||
675 | } | ||
676 | } | ||
677 | |||
678 | int iwl_pcie_rx_init(struct iwl_trans *trans) | 495 | int iwl_pcie_rx_init(struct iwl_trans *trans) |
679 | { | 496 | { |
680 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 497 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
681 | struct iwl_rxq *rxq = &trans_pcie->rxq; | 498 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
682 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | ||
683 | int i, err; | 499 | int i, err; |
684 | 500 | ||
685 | if (!rxq->bd) { | 501 | if (!rxq->bd) { |
@@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) | |||
687 | if (err) | 503 | if (err) |
688 | return err; | 504 | return err; |
689 | } | 505 | } |
690 | if (!rba->alloc_wq) | ||
691 | rba->alloc_wq = alloc_workqueue("rb_allocator", | ||
692 | WQ_HIGHPRI | WQ_UNBOUND, 1); | ||
693 | INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work); | ||
694 | |||
695 | spin_lock(&rba->lock); | ||
696 | atomic_set(&rba->req_pending, 0); | ||
697 | atomic_set(&rba->req_ready, 0); | ||
698 | /* free all first - we might be reconfigured for a different size */ | ||
699 | iwl_pcie_rx_free_rba(trans); | ||
700 | iwl_pcie_rx_init_rba(rba); | ||
701 | spin_unlock(&rba->lock); | ||
702 | 506 | ||
703 | spin_lock(&rxq->lock); | 507 | spin_lock(&rxq->lock); |
704 | 508 | ||
509 | INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work); | ||
510 | |||
705 | /* free all first - we might be reconfigured for a different size */ | 511 | /* free all first - we might be reconfigured for a different size */ |
706 | iwl_pcie_rxq_free_rbs(trans); | 512 | iwl_pcie_rxq_free_rbs(trans); |
707 | iwl_pcie_rx_init_rxb_lists(rxq); | 513 | iwl_pcie_rx_init_rxb_lists(rxq); |
@@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) | |||
716 | memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); | 522 | memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); |
717 | spin_unlock(&rxq->lock); | 523 | spin_unlock(&rxq->lock); |
718 | 524 | ||
719 | iwl_pcie_rx_replenish(trans); | 525 | iwl_pcie_rx_replenish(trans, GFP_KERNEL); |
720 | 526 | ||
721 | iwl_pcie_rx_hw_init(trans, rxq); | 527 | iwl_pcie_rx_hw_init(trans, rxq); |
722 | 528 | ||
@@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) | |||
731 | { | 537 | { |
732 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 538 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
733 | struct iwl_rxq *rxq = &trans_pcie->rxq; | 539 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
734 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | ||
735 | 540 | ||
736 | /*if rxq->bd is NULL, it means that nothing has been allocated, | 541 | /*if rxq->bd is NULL, it means that nothing has been allocated, |
737 | * exit now */ | 542 | * exit now */ |
@@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) | |||
740 | return; | 545 | return; |
741 | } | 546 | } |
742 | 547 | ||
743 | cancel_work_sync(&rba->rx_alloc); | 548 | cancel_work_sync(&trans_pcie->rx_replenish); |
744 | if (rba->alloc_wq) { | ||
745 | destroy_workqueue(rba->alloc_wq); | ||
746 | rba->alloc_wq = NULL; | ||
747 | } | ||
748 | |||
749 | spin_lock(&rba->lock); | ||
750 | iwl_pcie_rx_free_rba(trans); | ||
751 | spin_unlock(&rba->lock); | ||
752 | 549 | ||
753 | spin_lock(&rxq->lock); | 550 | spin_lock(&rxq->lock); |
754 | iwl_pcie_rxq_free_rbs(trans); | 551 | iwl_pcie_rxq_free_rbs(trans); |
@@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) | |||
769 | rxq->rb_stts = NULL; | 566 | rxq->rb_stts = NULL; |
770 | } | 567 | } |
771 | 568 | ||
772 | /* | ||
773 | * iwl_pcie_rx_reuse_rbd - Recycle used RBDs | ||
774 | * | ||
775 | * Called when a RBD can be reused. The RBD is transferred to the allocator. | ||
776 | * When there are 2 empty RBDs - a request for allocation is posted | ||
777 | */ | ||
778 | static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans, | ||
779 | struct iwl_rx_mem_buffer *rxb, | ||
780 | struct iwl_rxq *rxq) | ||
781 | { | ||
782 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | ||
783 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | ||
784 | |||
785 | /* Count the used RBDs */ | ||
786 | rxq->used_count++; | ||
787 | |||
788 | /* Move the RBD to the used list, will be moved to allocator in batches | ||
789 | * before claiming or posting a request*/ | ||
790 | list_add_tail(&rxb->list, &rxq->rx_used); | ||
791 | |||
792 | /* If we have RX_POST_REQ_ALLOC new released rx buffers - | ||
793 | * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is | ||
794 | * used for the case we failed to claim RX_CLAIM_REQ_ALLOC, | ||
795 | * after but we still need to post another request. | ||
796 | */ | ||
797 | if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) { | ||
798 | /* Move the 2 RBDs to the allocator ownership. | ||
799 | Allocator has another 6 from pool for the request completion*/ | ||
800 | spin_lock(&rba->lock); | ||
801 | list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); | ||
802 | spin_unlock(&rba->lock); | ||
803 | |||
804 | atomic_inc(&rba->req_pending); | ||
805 | queue_work(rba->alloc_wq, &rba->rx_alloc); | ||
806 | } | ||
807 | } | ||
808 | |||
809 | static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, | 569 | static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, |
810 | struct iwl_rx_mem_buffer *rxb) | 570 | struct iwl_rx_mem_buffer *rxb) |
811 | { | 571 | { |
@@ -928,13 +688,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, | |||
928 | */ | 688 | */ |
929 | __free_pages(rxb->page, trans_pcie->rx_page_order); | 689 | __free_pages(rxb->page, trans_pcie->rx_page_order); |
930 | rxb->page = NULL; | 690 | rxb->page = NULL; |
931 | iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); | 691 | list_add_tail(&rxb->list, &rxq->rx_used); |
932 | } else { | 692 | } else { |
933 | list_add_tail(&rxb->list, &rxq->rx_free); | 693 | list_add_tail(&rxb->list, &rxq->rx_free); |
934 | rxq->free_count++; | 694 | rxq->free_count++; |
935 | } | 695 | } |
936 | } else | 696 | } else |
937 | iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); | 697 | list_add_tail(&rxb->list, &rxq->rx_used); |
938 | } | 698 | } |
939 | 699 | ||
940 | /* | 700 | /* |
@@ -944,7 +704,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans) | |||
944 | { | 704 | { |
945 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); | 705 | struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); |
946 | struct iwl_rxq *rxq = &trans_pcie->rxq; | 706 | struct iwl_rxq *rxq = &trans_pcie->rxq; |
947 | u32 r, i, j; | 707 | u32 r, i; |
708 | u8 fill_rx = 0; | ||
709 | u32 count = 8; | ||
710 | int total_empty; | ||
948 | 711 | ||
949 | restart: | 712 | restart: |
950 | spin_lock(&rxq->lock); | 713 | spin_lock(&rxq->lock); |
@@ -957,6 +720,14 @@ restart: | |||
957 | if (i == r) | 720 | if (i == r) |
958 | IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); | 721 | IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); |
959 | 722 | ||
723 | /* calculate total frames need to be restock after handling RX */ | ||
724 | total_empty = r - rxq->write_actual; | ||
725 | if (total_empty < 0) | ||
726 | total_empty += RX_QUEUE_SIZE; | ||
727 | |||
728 | if (total_empty > (RX_QUEUE_SIZE / 2)) | ||
729 | fill_rx = 1; | ||
730 | |||
960 | while (i != r) { | 731 | while (i != r) { |
961 | struct iwl_rx_mem_buffer *rxb; | 732 | struct iwl_rx_mem_buffer *rxb; |
962 | 733 | ||
@@ -968,48 +739,29 @@ restart: | |||
968 | iwl_pcie_rx_handle_rb(trans, rxb); | 739 | iwl_pcie_rx_handle_rb(trans, rxb); |
969 | 740 | ||
970 | i = (i + 1) & RX_QUEUE_MASK; | 741 | i = (i + 1) & RX_QUEUE_MASK; |
971 | 742 | /* If there are a lot of unused frames, | |
972 | /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - | 743 | * restock the Rx queue so ucode wont assert. */ |
973 | * try to claim the pre-allocated buffers from the allocator */ | 744 | if (fill_rx) { |
974 | if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { | 745 | count++; |
975 | struct iwl_rb_allocator *rba = &trans_pcie->rba; | 746 | if (count >= 8) { |
976 | struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; | 747 | rxq->read = i; |
977 | 748 | spin_unlock(&rxq->lock); | |
978 | /* Add the remaining 6 empty RBDs for allocator use */ | 749 | iwl_pcie_rx_replenish(trans, GFP_ATOMIC); |
979 | spin_lock(&rba->lock); | 750 | count = 0; |
980 | list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); | 751 | goto restart; |
981 | spin_unlock(&rba->lock); | ||
982 | |||
983 | /* If not ready - continue, will try to reclaim later. | ||
984 | * No need to reschedule work - allocator exits only on | ||
985 | * success */ | ||
986 | if (!iwl_pcie_rx_allocator_get(trans, out)) { | ||
987 | /* If success - then RX_CLAIM_REQ_ALLOC | ||
988 | * buffers were retrieved and should be added | ||
989 | * to free list */ | ||
990 | rxq->used_count -= RX_CLAIM_REQ_ALLOC; | ||
991 | for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) { | ||
992 | list_add_tail(&out[j]->list, | ||
993 | &rxq->rx_free); | ||
994 | rxq->free_count++; | ||
995 | } | ||
996 | } | 752 | } |
997 | } | 753 | } |
998 | /* handle restock for two cases: | ||
999 | * - we just pulled buffers from the allocator | ||
1000 | * - we have 8+ unstolen pages accumulated */ | ||
1001 | if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) { | ||
1002 | rxq->read = i; | ||
1003 | spin_unlock(&rxq->lock); | ||
1004 | iwl_pcie_rxq_restock(trans); | ||
1005 | goto restart; | ||
1006 | } | ||
1007 | } | 754 | } |
1008 | 755 | ||
1009 | /* Backtrack one entry */ | 756 | /* Backtrack one entry */ |
1010 | rxq->read = i; | 757 | rxq->read = i; |
1011 | spin_unlock(&rxq->lock); | 758 | spin_unlock(&rxq->lock); |
1012 | 759 | ||
760 | if (fill_rx) | ||
761 | iwl_pcie_rx_replenish(trans, GFP_ATOMIC); | ||
762 | else | ||
763 | iwl_pcie_rxq_restock(trans); | ||
764 | |||
1013 | if (trans_pcie->napi.poll) | 765 | if (trans_pcie->napi.poll) |
1014 | napi_gro_flush(&trans_pcie->napi, false); | 766 | napi_gro_flush(&trans_pcie->napi, false); |
1015 | } | 767 | } |
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c index 43ae658af6ec..6203c4ad9bba 100644 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c | |||
@@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val) | |||
182 | 182 | ||
183 | static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) | 183 | static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) |
184 | { | 184 | { |
185 | if (!trans->cfg->apmg_not_supported) | 185 | if (trans->cfg->apmg_not_supported) |
186 | return; | 186 | return; |
187 | 187 | ||
188 | if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) | 188 | if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) |
@@ -2459,7 +2459,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
2459 | struct iwl_trans_pcie *trans_pcie; | 2459 | struct iwl_trans_pcie *trans_pcie; |
2460 | struct iwl_trans *trans; | 2460 | struct iwl_trans *trans; |
2461 | u16 pci_cmd; | 2461 | u16 pci_cmd; |
2462 | int err; | 2462 | int ret; |
2463 | 2463 | ||
2464 | trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), | 2464 | trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), |
2465 | &pdev->dev, cfg, &trans_ops_pcie, 0); | 2465 | &pdev->dev, cfg, &trans_ops_pcie, 0); |
@@ -2474,8 +2474,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
2474 | spin_lock_init(&trans_pcie->ref_lock); | 2474 | spin_lock_init(&trans_pcie->ref_lock); |
2475 | init_waitqueue_head(&trans_pcie->ucode_write_waitq); | 2475 | init_waitqueue_head(&trans_pcie->ucode_write_waitq); |
2476 | 2476 | ||
2477 | err = pci_enable_device(pdev); | 2477 | ret = pci_enable_device(pdev); |
2478 | if (err) | 2478 | if (ret) |
2479 | goto out_no_pci; | 2479 | goto out_no_pci; |
2480 | 2480 | ||
2481 | if (!cfg->base_params->pcie_l1_allowed) { | 2481 | if (!cfg->base_params->pcie_l1_allowed) { |
@@ -2491,23 +2491,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
2491 | 2491 | ||
2492 | pci_set_master(pdev); | 2492 | pci_set_master(pdev); |
2493 | 2493 | ||
2494 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); | 2494 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); |
2495 | if (!err) | 2495 | if (!ret) |
2496 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); | 2496 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); |
2497 | if (err) { | 2497 | if (ret) { |
2498 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 2498 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
2499 | if (!err) | 2499 | if (!ret) |
2500 | err = pci_set_consistent_dma_mask(pdev, | 2500 | ret = pci_set_consistent_dma_mask(pdev, |
2501 | DMA_BIT_MASK(32)); | 2501 | DMA_BIT_MASK(32)); |
2502 | /* both attempts failed: */ | 2502 | /* both attempts failed: */ |
2503 | if (err) { | 2503 | if (ret) { |
2504 | dev_err(&pdev->dev, "No suitable DMA available\n"); | 2504 | dev_err(&pdev->dev, "No suitable DMA available\n"); |
2505 | goto out_pci_disable_device; | 2505 | goto out_pci_disable_device; |
2506 | } | 2506 | } |
2507 | } | 2507 | } |
2508 | 2508 | ||
2509 | err = pci_request_regions(pdev, DRV_NAME); | 2509 | ret = pci_request_regions(pdev, DRV_NAME); |
2510 | if (err) { | 2510 | if (ret) { |
2511 | dev_err(&pdev->dev, "pci_request_regions failed\n"); | 2511 | dev_err(&pdev->dev, "pci_request_regions failed\n"); |
2512 | goto out_pci_disable_device; | 2512 | goto out_pci_disable_device; |
2513 | } | 2513 | } |
@@ -2515,7 +2515,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
2515 | trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); | 2515 | trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); |
2516 | if (!trans_pcie->hw_base) { | 2516 | if (!trans_pcie->hw_base) { |
2517 | dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); | 2517 | dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); |
2518 | err = -ENODEV; | 2518 | ret = -ENODEV; |
2519 | goto out_pci_release_regions; | 2519 | goto out_pci_release_regions; |
2520 | } | 2520 | } |
2521 | 2521 | ||
@@ -2527,9 +2527,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
2527 | trans_pcie->pci_dev = pdev; | 2527 | trans_pcie->pci_dev = pdev; |
2528 | iwl_disable_interrupts(trans); | 2528 | iwl_disable_interrupts(trans); |
2529 | 2529 | ||
2530 | err = pci_enable_msi(pdev); | 2530 | ret = pci_enable_msi(pdev); |
2531 | if (err) { | 2531 | if (ret) { |
2532 | dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); | 2532 | dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret); |
2533 | /* enable rfkill interrupt: hw bug w/a */ | 2533 | /* enable rfkill interrupt: hw bug w/a */ |
2534 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); | 2534 | pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); |
2535 | if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { | 2535 | if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { |
@@ -2547,11 +2547,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
2547 | */ | 2547 | */ |
2548 | if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { | 2548 | if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { |
2549 | unsigned long flags; | 2549 | unsigned long flags; |
2550 | int ret; | ||
2551 | 2550 | ||
2552 | trans->hw_rev = (trans->hw_rev & 0xfff0) | | 2551 | trans->hw_rev = (trans->hw_rev & 0xfff0) | |
2553 | (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); | 2552 | (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); |
2554 | 2553 | ||
2554 | ret = iwl_pcie_prepare_card_hw(trans); | ||
2555 | if (ret) { | ||
2556 | IWL_WARN(trans, "Exit HW not ready\n"); | ||
2557 | goto out_pci_disable_msi; | ||
2558 | } | ||
2559 | |||
2555 | /* | 2560 | /* |
2556 | * in-order to recognize C step driver should read chip version | 2561 | * in-order to recognize C step driver should read chip version |
2557 | * id located at the AUX bus MISC address space. | 2562 | * id located at the AUX bus MISC address space. |
@@ -2591,13 +2596,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, | |||
2591 | /* Initialize the wait queue for commands */ | 2596 | /* Initialize the wait queue for commands */ |
2592 | init_waitqueue_head(&trans_pcie->wait_command_queue); | 2597 | init_waitqueue_head(&trans_pcie->wait_command_queue); |
2593 | 2598 | ||
2594 | if (iwl_pcie_alloc_ict(trans)) | 2599 | ret = iwl_pcie_alloc_ict(trans); |
2600 | if (ret) | ||
2595 | goto out_pci_disable_msi; | 2601 | goto out_pci_disable_msi; |
2596 | 2602 | ||
2597 | err = request_threaded_irq(pdev->irq, iwl_pcie_isr, | 2603 | ret = request_threaded_irq(pdev->irq, iwl_pcie_isr, |
2598 | iwl_pcie_irq_handler, | 2604 | iwl_pcie_irq_handler, |
2599 | IRQF_SHARED, DRV_NAME, trans); | 2605 | IRQF_SHARED, DRV_NAME, trans); |
2600 | if (err) { | 2606 | if (ret) { |
2601 | IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); | 2607 | IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); |
2602 | goto out_free_ict; | 2608 | goto out_free_ict; |
2603 | } | 2609 | } |
@@ -2617,5 +2623,5 @@ out_pci_disable_device: | |||
2617 | pci_disable_device(pdev); | 2623 | pci_disable_device(pdev); |
2618 | out_no_pci: | 2624 | out_no_pci: |
2619 | iwl_trans_free(trans); | 2625 | iwl_trans_free(trans); |
2620 | return ERR_PTR(err); | 2626 | return ERR_PTR(ret); |
2621 | } | 2627 | } |
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 880d0d63e872..7d50711476fe 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c | |||
@@ -1566,13 +1566,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue) | |||
1566 | smp_rmb(); | 1566 | smp_rmb(); |
1567 | 1567 | ||
1568 | while (dc != dp) { | 1568 | while (dc != dp) { |
1569 | BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); | 1569 | BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS); |
1570 | pending_idx = | 1570 | pending_idx = |
1571 | queue->dealloc_ring[pending_index(dc++)]; | 1571 | queue->dealloc_ring[pending_index(dc++)]; |
1572 | 1572 | ||
1573 | pending_idx_release[gop-queue->tx_unmap_ops] = | 1573 | pending_idx_release[gop - queue->tx_unmap_ops] = |
1574 | pending_idx; | 1574 | pending_idx; |
1575 | queue->pages_to_unmap[gop-queue->tx_unmap_ops] = | 1575 | queue->pages_to_unmap[gop - queue->tx_unmap_ops] = |
1576 | queue->mmap_pages[pending_idx]; | 1576 | queue->mmap_pages[pending_idx]; |
1577 | gnttab_set_unmap_op(gop, | 1577 | gnttab_set_unmap_op(gop, |
1578 | idx_to_kaddr(queue, pending_idx), | 1578 | idx_to_kaddr(queue, pending_idx), |
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index a5233422f9dc..7384455792bf 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c | |||
@@ -458,10 +458,15 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus, | |||
458 | nvdimm_bus_unlock(dev); | 458 | nvdimm_bus_unlock(dev); |
459 | } | 459 | } |
460 | if (is_nd_btt(dev) && probe) { | 460 | if (is_nd_btt(dev) && probe) { |
461 | struct nd_btt *nd_btt = to_nd_btt(dev); | ||
462 | |||
461 | nd_region = to_nd_region(dev->parent); | 463 | nd_region = to_nd_region(dev->parent); |
462 | nvdimm_bus_lock(dev); | 464 | nvdimm_bus_lock(dev); |
463 | if (nd_region->btt_seed == dev) | 465 | if (nd_region->btt_seed == dev) |
464 | nd_region_create_btt_seed(nd_region); | 466 | nd_region_create_btt_seed(nd_region); |
467 | if (nd_region->ns_seed == &nd_btt->ndns->dev && | ||
468 | is_nd_blk(dev->parent)) | ||
469 | nd_region_create_blk_seed(nd_region); | ||
465 | nvdimm_bus_unlock(dev); | 470 | nvdimm_bus_unlock(dev); |
466 | } | 471 | } |
467 | } | 472 | } |
diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 8067f54ce050..5ce5ef211bdb 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c | |||
@@ -891,8 +891,10 @@ parport_register_dev_model(struct parport *port, const char *name, | |||
891 | par_dev->dev.release = free_pardevice; | 891 | par_dev->dev.release = free_pardevice; |
892 | par_dev->devmodel = true; | 892 | par_dev->devmodel = true; |
893 | ret = device_register(&par_dev->dev); | 893 | ret = device_register(&par_dev->dev); |
894 | if (ret) | 894 | if (ret) { |
895 | goto err_put_dev; | 895 | put_device(&par_dev->dev); |
896 | goto err_put_port; | ||
897 | } | ||
896 | 898 | ||
897 | /* Chain this onto the list */ | 899 | /* Chain this onto the list */ |
898 | par_dev->prev = NULL; | 900 | par_dev->prev = NULL; |
@@ -907,7 +909,8 @@ parport_register_dev_model(struct parport *port, const char *name, | |||
907 | spin_unlock(&port->physport->pardevice_lock); | 909 | spin_unlock(&port->physport->pardevice_lock); |
908 | pr_debug("%s: cannot grant exclusive access for device %s\n", | 910 | pr_debug("%s: cannot grant exclusive access for device %s\n", |
909 | port->name, name); | 911 | port->name, name); |
910 | goto err_put_dev; | 912 | device_unregister(&par_dev->dev); |
913 | goto err_put_port; | ||
911 | } | 914 | } |
912 | port->flags |= PARPORT_FLAG_EXCL; | 915 | port->flags |= PARPORT_FLAG_EXCL; |
913 | } | 916 | } |
@@ -938,8 +941,6 @@ parport_register_dev_model(struct parport *port, const char *name, | |||
938 | 941 | ||
939 | return par_dev; | 942 | return par_dev; |
940 | 943 | ||
941 | err_put_dev: | ||
942 | put_device(&par_dev->dev); | ||
943 | err_free_devname: | 944 | err_free_devname: |
944 | kfree(devname); | 945 | kfree(devname); |
945 | err_free_par_dev: | 946 | err_free_par_dev: |
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index c0e6ede3e27d..6b8dd162f644 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig | |||
@@ -56,6 +56,7 @@ config PHY_EXYNOS_MIPI_VIDEO | |||
56 | 56 | ||
57 | config PHY_PXA_28NM_HSIC | 57 | config PHY_PXA_28NM_HSIC |
58 | tristate "Marvell USB HSIC 28nm PHY Driver" | 58 | tristate "Marvell USB HSIC 28nm PHY Driver" |
59 | depends on HAS_IOMEM | ||
59 | select GENERIC_PHY | 60 | select GENERIC_PHY |
60 | help | 61 | help |
61 | Enable this to support Marvell USB HSIC PHY driver for Marvell | 62 | Enable this to support Marvell USB HSIC PHY driver for Marvell |
@@ -66,6 +67,7 @@ config PHY_PXA_28NM_HSIC | |||
66 | 67 | ||
67 | config PHY_PXA_28NM_USB2 | 68 | config PHY_PXA_28NM_USB2 |
68 | tristate "Marvell USB 2.0 28nm PHY Driver" | 69 | tristate "Marvell USB 2.0 28nm PHY Driver" |
70 | depends on HAS_IOMEM | ||
69 | select GENERIC_PHY | 71 | select GENERIC_PHY |
70 | help | 72 | help |
71 | Enable this to support Marvell USB 2.0 PHY driver for Marvell | 73 | Enable this to support Marvell USB 2.0 PHY driver for Marvell |
diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c index c6fc95b53083..335e06d66ed9 100644 --- a/drivers/phy/phy-berlin-usb.c +++ b/drivers/phy/phy-berlin-usb.c | |||
@@ -105,9 +105,9 @@ | |||
105 | 105 | ||
106 | static const u32 phy_berlin_pll_dividers[] = { | 106 | static const u32 phy_berlin_pll_dividers[] = { |
107 | /* Berlin 2 */ | 107 | /* Berlin 2 */ |
108 | CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54), | ||
109 | /* Berlin 2CD */ | ||
110 | CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55), | 108 | CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55), |
109 | /* Berlin 2CD/Q */ | ||
110 | CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54), | ||
111 | }; | 111 | }; |
112 | 112 | ||
113 | struct phy_berlin_usb_priv { | 113 | struct phy_berlin_usb_priv { |
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 53f295c1bab1..3510b81db3fa 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c | |||
@@ -28,7 +28,6 @@ | |||
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/phy/omap_control_phy.h> | 29 | #include <linux/phy/omap_control_phy.h> |
30 | #include <linux/of_platform.h> | 30 | #include <linux/of_platform.h> |
31 | #include <linux/spinlock.h> | ||
32 | 31 | ||
33 | #define PLL_STATUS 0x00000004 | 32 | #define PLL_STATUS 0x00000004 |
34 | #define PLL_GO 0x00000008 | 33 | #define PLL_GO 0x00000008 |
@@ -83,10 +82,6 @@ struct ti_pipe3 { | |||
83 | struct clk *refclk; | 82 | struct clk *refclk; |
84 | struct clk *div_clk; | 83 | struct clk *div_clk; |
85 | struct pipe3_dpll_map *dpll_map; | 84 | struct pipe3_dpll_map *dpll_map; |
86 | bool enabled; | ||
87 | spinlock_t lock; /* serialize clock enable/disable */ | ||
88 | /* the below flag is needed specifically for SATA */ | ||
89 | bool refclk_enabled; | ||
90 | }; | 85 | }; |
91 | 86 | ||
92 | static struct pipe3_dpll_map dpll_map_usb[] = { | 87 | static struct pipe3_dpll_map dpll_map_usb[] = { |
@@ -137,6 +132,9 @@ static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy) | |||
137 | return NULL; | 132 | return NULL; |
138 | } | 133 | } |
139 | 134 | ||
135 | static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy); | ||
136 | static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy); | ||
137 | |||
140 | static int ti_pipe3_power_off(struct phy *x) | 138 | static int ti_pipe3_power_off(struct phy *x) |
141 | { | 139 | { |
142 | struct ti_pipe3 *phy = phy_get_drvdata(x); | 140 | struct ti_pipe3 *phy = phy_get_drvdata(x); |
@@ -217,6 +215,7 @@ static int ti_pipe3_init(struct phy *x) | |||
217 | u32 val; | 215 | u32 val; |
218 | int ret = 0; | 216 | int ret = 0; |
219 | 217 | ||
218 | ti_pipe3_enable_clocks(phy); | ||
220 | /* | 219 | /* |
221 | * Set pcie_pcs register to 0x96 for proper functioning of phy | 220 | * Set pcie_pcs register to 0x96 for proper functioning of phy |
222 | * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table | 221 | * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table |
@@ -250,33 +249,35 @@ static int ti_pipe3_exit(struct phy *x) | |||
250 | u32 val; | 249 | u32 val; |
251 | unsigned long timeout; | 250 | unsigned long timeout; |
252 | 251 | ||
253 | /* SATA DPLL can't be powered down due to Errata i783 and PCIe | 252 | /* SATA DPLL can't be powered down due to Errata i783 */ |
254 | * does not have internal DPLL | 253 | if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) |
255 | */ | ||
256 | if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") || | ||
257 | of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) | ||
258 | return 0; | 254 | return 0; |
259 | 255 | ||
260 | /* Put DPLL in IDLE mode */ | 256 | /* PCIe doesn't have internal DPLL */ |
261 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); | 257 | if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) { |
262 | val |= PLL_IDLE; | 258 | /* Put DPLL in IDLE mode */ |
263 | ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); | 259 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); |
264 | 260 | val |= PLL_IDLE; | |
265 | /* wait for LDO and Oscillator to power down */ | 261 | ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); |
266 | timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME); | ||
267 | do { | ||
268 | cpu_relax(); | ||
269 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); | ||
270 | if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN)) | ||
271 | break; | ||
272 | } while (!time_after(jiffies, timeout)); | ||
273 | 262 | ||
274 | if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { | 263 | /* wait for LDO and Oscillator to power down */ |
275 | dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", | 264 | timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME); |
276 | val); | 265 | do { |
277 | return -EBUSY; | 266 | cpu_relax(); |
267 | val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); | ||
268 | if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN)) | ||
269 | break; | ||
270 | } while (!time_after(jiffies, timeout)); | ||
271 | |||
272 | if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { | ||
273 | dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", | ||
274 | val); | ||
275 | return -EBUSY; | ||
276 | } | ||
278 | } | 277 | } |
279 | 278 | ||
279 | ti_pipe3_disable_clocks(phy); | ||
280 | |||
280 | return 0; | 281 | return 0; |
281 | } | 282 | } |
282 | static struct phy_ops ops = { | 283 | static struct phy_ops ops = { |
@@ -306,7 +307,6 @@ static int ti_pipe3_probe(struct platform_device *pdev) | |||
306 | return -ENOMEM; | 307 | return -ENOMEM; |
307 | 308 | ||
308 | phy->dev = &pdev->dev; | 309 | phy->dev = &pdev->dev; |
309 | spin_lock_init(&phy->lock); | ||
310 | 310 | ||
311 | if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { | 311 | if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { |
312 | match = of_match_device(ti_pipe3_id_table, &pdev->dev); | 312 | match = of_match_device(ti_pipe3_id_table, &pdev->dev); |
@@ -402,6 +402,10 @@ static int ti_pipe3_probe(struct platform_device *pdev) | |||
402 | 402 | ||
403 | platform_set_drvdata(pdev, phy); | 403 | platform_set_drvdata(pdev, phy); |
404 | pm_runtime_enable(phy->dev); | 404 | pm_runtime_enable(phy->dev); |
405 | /* Prevent auto-disable of refclk for SATA PHY due to Errata i783 */ | ||
406 | if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) | ||
407 | if (!IS_ERR(phy->refclk)) | ||
408 | clk_prepare_enable(phy->refclk); | ||
405 | 409 | ||
406 | generic_phy = devm_phy_create(phy->dev, NULL, &ops); | 410 | generic_phy = devm_phy_create(phy->dev, NULL, &ops); |
407 | if (IS_ERR(generic_phy)) | 411 | if (IS_ERR(generic_phy)) |
@@ -413,63 +417,33 @@ static int ti_pipe3_probe(struct platform_device *pdev) | |||
413 | if (IS_ERR(phy_provider)) | 417 | if (IS_ERR(phy_provider)) |
414 | return PTR_ERR(phy_provider); | 418 | return PTR_ERR(phy_provider); |
415 | 419 | ||
416 | pm_runtime_get(&pdev->dev); | ||
417 | |||
418 | return 0; | 420 | return 0; |
419 | } | 421 | } |
420 | 422 | ||
421 | static int ti_pipe3_remove(struct platform_device *pdev) | 423 | static int ti_pipe3_remove(struct platform_device *pdev) |
422 | { | 424 | { |
423 | if (!pm_runtime_suspended(&pdev->dev)) | ||
424 | pm_runtime_put(&pdev->dev); | ||
425 | pm_runtime_disable(&pdev->dev); | 425 | pm_runtime_disable(&pdev->dev); |
426 | 426 | ||
427 | return 0; | 427 | return 0; |
428 | } | 428 | } |
429 | 429 | ||
430 | #ifdef CONFIG_PM | 430 | static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) |
431 | static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy) | ||
432 | { | 431 | { |
433 | if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) { | 432 | int ret = 0; |
434 | int ret; | ||
435 | 433 | ||
434 | if (!IS_ERR(phy->refclk)) { | ||
436 | ret = clk_prepare_enable(phy->refclk); | 435 | ret = clk_prepare_enable(phy->refclk); |
437 | if (ret) { | 436 | if (ret) { |
438 | dev_err(phy->dev, "Failed to enable refclk %d\n", ret); | 437 | dev_err(phy->dev, "Failed to enable refclk %d\n", ret); |
439 | return ret; | 438 | return ret; |
440 | } | 439 | } |
441 | phy->refclk_enabled = true; | ||
442 | } | 440 | } |
443 | 441 | ||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy) | ||
448 | { | ||
449 | if (!IS_ERR(phy->refclk)) | ||
450 | clk_disable_unprepare(phy->refclk); | ||
451 | |||
452 | phy->refclk_enabled = false; | ||
453 | } | ||
454 | |||
455 | static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) | ||
456 | { | ||
457 | int ret = 0; | ||
458 | unsigned long flags; | ||
459 | |||
460 | spin_lock_irqsave(&phy->lock, flags); | ||
461 | if (phy->enabled) | ||
462 | goto err1; | ||
463 | |||
464 | ret = ti_pipe3_enable_refclk(phy); | ||
465 | if (ret) | ||
466 | goto err1; | ||
467 | |||
468 | if (!IS_ERR(phy->wkupclk)) { | 442 | if (!IS_ERR(phy->wkupclk)) { |
469 | ret = clk_prepare_enable(phy->wkupclk); | 443 | ret = clk_prepare_enable(phy->wkupclk); |
470 | if (ret) { | 444 | if (ret) { |
471 | dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret); | 445 | dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret); |
472 | goto err2; | 446 | goto disable_refclk; |
473 | } | 447 | } |
474 | } | 448 | } |
475 | 449 | ||
@@ -477,96 +451,33 @@ static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy) | |||
477 | ret = clk_prepare_enable(phy->div_clk); | 451 | ret = clk_prepare_enable(phy->div_clk); |
478 | if (ret) { | 452 | if (ret) { |
479 | dev_err(phy->dev, "Failed to enable div_clk %d\n", ret); | 453 | dev_err(phy->dev, "Failed to enable div_clk %d\n", ret); |
480 | goto err3; | 454 | goto disable_wkupclk; |
481 | } | 455 | } |
482 | } | 456 | } |
483 | 457 | ||
484 | phy->enabled = true; | ||
485 | spin_unlock_irqrestore(&phy->lock, flags); | ||
486 | return 0; | 458 | return 0; |
487 | 459 | ||
488 | err3: | 460 | disable_wkupclk: |
489 | if (!IS_ERR(phy->wkupclk)) | 461 | if (!IS_ERR(phy->wkupclk)) |
490 | clk_disable_unprepare(phy->wkupclk); | 462 | clk_disable_unprepare(phy->wkupclk); |
491 | 463 | ||
492 | err2: | 464 | disable_refclk: |
493 | if (!IS_ERR(phy->refclk)) | 465 | if (!IS_ERR(phy->refclk)) |
494 | clk_disable_unprepare(phy->refclk); | 466 | clk_disable_unprepare(phy->refclk); |
495 | 467 | ||
496 | ti_pipe3_disable_refclk(phy); | ||
497 | err1: | ||
498 | spin_unlock_irqrestore(&phy->lock, flags); | ||
499 | return ret; | 468 | return ret; |
500 | } | 469 | } |
501 | 470 | ||
502 | static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) | 471 | static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) |
503 | { | 472 | { |
504 | unsigned long flags; | ||
505 | |||
506 | spin_lock_irqsave(&phy->lock, flags); | ||
507 | if (!phy->enabled) { | ||
508 | spin_unlock_irqrestore(&phy->lock, flags); | ||
509 | return; | ||
510 | } | ||
511 | |||
512 | if (!IS_ERR(phy->wkupclk)) | 473 | if (!IS_ERR(phy->wkupclk)) |
513 | clk_disable_unprepare(phy->wkupclk); | 474 | clk_disable_unprepare(phy->wkupclk); |
514 | /* Don't disable refclk for SATA PHY due to Errata i783 */ | 475 | if (!IS_ERR(phy->refclk)) |
515 | if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) | 476 | clk_disable_unprepare(phy->refclk); |
516 | ti_pipe3_disable_refclk(phy); | ||
517 | if (!IS_ERR(phy->div_clk)) | 477 | if (!IS_ERR(phy->div_clk)) |
518 | clk_disable_unprepare(phy->div_clk); | 478 | clk_disable_unprepare(phy->div_clk); |
519 | phy->enabled = false; | ||
520 | spin_unlock_irqrestore(&phy->lock, flags); | ||
521 | } | ||
522 | |||
523 | static int ti_pipe3_runtime_suspend(struct device *dev) | ||
524 | { | ||
525 | struct ti_pipe3 *phy = dev_get_drvdata(dev); | ||
526 | |||
527 | ti_pipe3_disable_clocks(phy); | ||
528 | return 0; | ||
529 | } | 479 | } |
530 | 480 | ||
531 | static int ti_pipe3_runtime_resume(struct device *dev) | ||
532 | { | ||
533 | struct ti_pipe3 *phy = dev_get_drvdata(dev); | ||
534 | int ret = 0; | ||
535 | |||
536 | ret = ti_pipe3_enable_clocks(phy); | ||
537 | return ret; | ||
538 | } | ||
539 | |||
540 | static int ti_pipe3_suspend(struct device *dev) | ||
541 | { | ||
542 | struct ti_pipe3 *phy = dev_get_drvdata(dev); | ||
543 | |||
544 | ti_pipe3_disable_clocks(phy); | ||
545 | return 0; | ||
546 | } | ||
547 | |||
548 | static int ti_pipe3_resume(struct device *dev) | ||
549 | { | ||
550 | struct ti_pipe3 *phy = dev_get_drvdata(dev); | ||
551 | int ret; | ||
552 | |||
553 | ret = ti_pipe3_enable_clocks(phy); | ||
554 | if (ret) | ||
555 | return ret; | ||
556 | |||
557 | pm_runtime_disable(dev); | ||
558 | pm_runtime_set_active(dev); | ||
559 | pm_runtime_enable(dev); | ||
560 | return 0; | ||
561 | } | ||
562 | #endif | ||
563 | |||
564 | static const struct dev_pm_ops ti_pipe3_pm_ops = { | ||
565 | SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend, | ||
566 | ti_pipe3_runtime_resume, NULL) | ||
567 | SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume) | ||
568 | }; | ||
569 | |||
570 | static const struct of_device_id ti_pipe3_id_table[] = { | 481 | static const struct of_device_id ti_pipe3_id_table[] = { |
571 | { | 482 | { |
572 | .compatible = "ti,phy-usb3", | 483 | .compatible = "ti,phy-usb3", |
@@ -592,7 +503,6 @@ static struct platform_driver ti_pipe3_driver = { | |||
592 | .remove = ti_pipe3_remove, | 503 | .remove = ti_pipe3_remove, |
593 | .driver = { | 504 | .driver = { |
594 | .name = "ti-pipe3", | 505 | .name = "ti-pipe3", |
595 | .pm = &ti_pipe3_pm_ops, | ||
596 | .of_match_table = ti_pipe3_id_table, | 506 | .of_match_table = ti_pipe3_id_table, |
597 | }, | 507 | }, |
598 | }; | 508 | }; |
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index efcf2a2b3975..6177315ab74e 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c | |||
@@ -473,6 +473,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data) | |||
473 | 473 | ||
474 | spin_lock_irqsave(&pc->irq_lock[bank], flags); | 474 | spin_lock_irqsave(&pc->irq_lock[bank], flags); |
475 | bcm2835_gpio_irq_config(pc, gpio, false); | 475 | bcm2835_gpio_irq_config(pc, gpio, false); |
476 | /* Clear events that were latched prior to clearing event sources */ | ||
477 | bcm2835_gpio_set_bit(pc, GPEDS0, gpio); | ||
476 | clear_bit(offset, &pc->enabled_irq_map[bank]); | 478 | clear_bit(offset, &pc->enabled_irq_map[bank]); |
477 | spin_unlock_irqrestore(&pc->irq_lock[bank], flags); | 479 | spin_unlock_irqrestore(&pc->irq_lock[bank], flags); |
478 | } | 480 | } |
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c index 5fd4437cee15..88a7fac11bd4 100644 --- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c +++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c | |||
@@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev, | |||
403 | unsigned num_configs) | 403 | unsigned num_configs) |
404 | { | 404 | { |
405 | struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); | 405 | struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); |
406 | const struct imx1_pinctrl_soc_info *info = ipctl->info; | ||
407 | int i; | 406 | int i; |
408 | 407 | ||
409 | for (i = 0; i != num_configs; ++i) { | 408 | for (i = 0; i != num_configs; ++i) { |
410 | imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN); | 409 | imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN); |
411 | 410 | ||
412 | dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n", | 411 | dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n", |
413 | info->pins[pin_id].name); | 412 | pin_desc_get(pctldev, pin_id)->name); |
414 | } | 413 | } |
415 | 414 | ||
416 | return 0; | 415 | return 0; |
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c index 557d0f2a3031..97681fac082e 100644 --- a/drivers/pinctrl/nomadik/pinctrl-abx500.c +++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c | |||
@@ -787,7 +787,6 @@ static const struct pinmux_ops abx500_pinmux_ops = { | |||
787 | .set_mux = abx500_pmx_set, | 787 | .set_mux = abx500_pmx_set, |
788 | .gpio_request_enable = abx500_gpio_request_enable, | 788 | .gpio_request_enable = abx500_gpio_request_enable, |
789 | .gpio_disable_free = abx500_gpio_disable_free, | 789 | .gpio_disable_free = abx500_gpio_disable_free, |
790 | .strict = true, | ||
791 | }; | 790 | }; |
792 | 791 | ||
793 | static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev) | 792 | static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev) |
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c index ef0b697639a7..347c763a6a78 100644 --- a/drivers/pinctrl/pinctrl-lpc18xx.c +++ b/drivers/pinctrl/pinctrl-lpc18xx.c | |||
@@ -823,7 +823,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev, | |||
823 | break; | 823 | break; |
824 | 824 | ||
825 | case PIN_CONFIG_INPUT_SCHMITT_ENABLE: | 825 | case PIN_CONFIG_INPUT_SCHMITT_ENABLE: |
826 | if (param) | 826 | if (param_val) |
827 | *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift); | 827 | *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift); |
828 | else | 828 | else |
829 | *reg |= (LPC18XX_SCU_I2C0_ZIF << shift); | 829 | *reg |= (LPC18XX_SCU_I2C0_ZIF << shift); |
@@ -876,7 +876,7 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev, | |||
876 | break; | 876 | break; |
877 | 877 | ||
878 | case PIN_CONFIG_INPUT_SCHMITT_ENABLE: | 878 | case PIN_CONFIG_INPUT_SCHMITT_ENABLE: |
879 | if (param) | 879 | if (param_val) |
880 | *reg &= ~LPC18XX_SCU_PIN_ZIF; | 880 | *reg &= ~LPC18XX_SCU_PIN_ZIF; |
881 | else | 881 | else |
882 | *reg |= LPC18XX_SCU_PIN_ZIF; | 882 | *reg |= LPC18XX_SCU_PIN_ZIF; |
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c index b2de09d3b1a0..0b8d480171a3 100644 --- a/drivers/pinctrl/pinctrl-single.c +++ b/drivers/pinctrl/pinctrl-single.c | |||
@@ -1760,7 +1760,8 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs, | |||
1760 | int res; | 1760 | int res; |
1761 | 1761 | ||
1762 | res = request_irq(pcs_soc->irq, pcs_irq_handler, | 1762 | res = request_irq(pcs_soc->irq, pcs_irq_handler, |
1763 | IRQF_SHARED | IRQF_NO_SUSPEND, | 1763 | IRQF_SHARED | IRQF_NO_SUSPEND | |
1764 | IRQF_NO_THREAD, | ||
1764 | name, pcs_soc); | 1765 | name, pcs_soc); |
1765 | if (res) { | 1766 | if (res) { |
1766 | pcs_soc->irq = -1; | 1767 | pcs_soc->irq = -1; |
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c index 3dd5a3b2ac62..c760bf43d116 100644 --- a/drivers/pinctrl/samsung/pinctrl-samsung.c +++ b/drivers/pinctrl/samsung/pinctrl-samsung.c | |||
@@ -33,11 +33,6 @@ | |||
33 | #include "../core.h" | 33 | #include "../core.h" |
34 | #include "pinctrl-samsung.h" | 34 | #include "pinctrl-samsung.h" |
35 | 35 | ||
36 | #define GROUP_SUFFIX "-grp" | ||
37 | #define GSUFFIX_LEN sizeof(GROUP_SUFFIX) | ||
38 | #define FUNCTION_SUFFIX "-mux" | ||
39 | #define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX) | ||
40 | |||
41 | /* list of all possible config options supported */ | 36 | /* list of all possible config options supported */ |
42 | static struct pin_config { | 37 | static struct pin_config { |
43 | const char *property; | 38 | const char *property; |
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h index c7508d5f6886..0874cfee6889 100644 --- a/drivers/pinctrl/sh-pfc/sh_pfc.h +++ b/drivers/pinctrl/sh-pfc/sh_pfc.h | |||
@@ -224,7 +224,7 @@ struct sh_pfc_soc_info { | |||
224 | 224 | ||
225 | /* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */ | 225 | /* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */ |
226 | #define _GP_GPIO(bank, _pin, _name, sfx) \ | 226 | #define _GP_GPIO(bank, _pin, _name, sfx) \ |
227 | [(bank * 32) + _pin] = { \ | 227 | { \ |
228 | .pin = (bank * 32) + _pin, \ | 228 | .pin = (bank * 32) + _pin, \ |
229 | .name = __stringify(_name), \ | 229 | .name = __stringify(_name), \ |
230 | .enum_id = _name##_DATA, \ | 230 | .enum_id = _name##_DATA, \ |
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c index 832932bdc977..7fd4f511d78f 100644 --- a/drivers/regulator/88pm800.c +++ b/drivers/regulator/88pm800.c | |||
@@ -130,7 +130,7 @@ struct pm800_regulators { | |||
130 | .owner = THIS_MODULE, \ | 130 | .owner = THIS_MODULE, \ |
131 | .n_voltages = ARRAY_SIZE(ldo_volt_table), \ | 131 | .n_voltages = ARRAY_SIZE(ldo_volt_table), \ |
132 | .vsel_reg = PM800_##vreg##_VOUT, \ | 132 | .vsel_reg = PM800_##vreg##_VOUT, \ |
133 | .vsel_mask = 0x1f, \ | 133 | .vsel_mask = 0xf, \ |
134 | .enable_reg = PM800_##ereg, \ | 134 | .enable_reg = PM800_##ereg, \ |
135 | .enable_mask = 1 << (ebit), \ | 135 | .enable_mask = 1 << (ebit), \ |
136 | .volt_table = ldo_volt_table, \ | 136 | .volt_table = ldo_volt_table, \ |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index c9f72019bd68..78387a6cbae5 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -109,6 +109,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, | |||
109 | static struct regulator *create_regulator(struct regulator_dev *rdev, | 109 | static struct regulator *create_regulator(struct regulator_dev *rdev, |
110 | struct device *dev, | 110 | struct device *dev, |
111 | const char *supply_name); | 111 | const char *supply_name); |
112 | static void _regulator_put(struct regulator *regulator); | ||
112 | 113 | ||
113 | static const char *rdev_get_name(struct regulator_dev *rdev) | 114 | static const char *rdev_get_name(struct regulator_dev *rdev) |
114 | { | 115 | { |
@@ -1105,6 +1106,9 @@ static int set_supply(struct regulator_dev *rdev, | |||
1105 | 1106 | ||
1106 | rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); | 1107 | rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); |
1107 | 1108 | ||
1109 | if (!try_module_get(supply_rdev->owner)) | ||
1110 | return -ENODEV; | ||
1111 | |||
1108 | rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); | 1112 | rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); |
1109 | if (rdev->supply == NULL) { | 1113 | if (rdev->supply == NULL) { |
1110 | err = -ENOMEM; | 1114 | err = -ENOMEM; |
@@ -1381,9 +1385,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) | |||
1381 | } | 1385 | } |
1382 | 1386 | ||
1383 | if (!r) { | 1387 | if (!r) { |
1384 | dev_err(dev, "Failed to resolve %s-supply for %s\n", | 1388 | if (have_full_constraints()) { |
1385 | rdev->supply_name, rdev->desc->name); | 1389 | r = dummy_regulator_rdev; |
1386 | return -EPROBE_DEFER; | 1390 | } else { |
1391 | dev_err(dev, "Failed to resolve %s-supply for %s\n", | ||
1392 | rdev->supply_name, rdev->desc->name); | ||
1393 | return -EPROBE_DEFER; | ||
1394 | } | ||
1387 | } | 1395 | } |
1388 | 1396 | ||
1389 | /* Recursively resolve the supply of the supply */ | 1397 | /* Recursively resolve the supply of the supply */ |
@@ -1398,8 +1406,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) | |||
1398 | /* Cascade always-on state to supply */ | 1406 | /* Cascade always-on state to supply */ |
1399 | if (_regulator_is_enabled(rdev)) { | 1407 | if (_regulator_is_enabled(rdev)) { |
1400 | ret = regulator_enable(rdev->supply); | 1408 | ret = regulator_enable(rdev->supply); |
1401 | if (ret < 0) | 1409 | if (ret < 0) { |
1410 | if (rdev->supply) | ||
1411 | _regulator_put(rdev->supply); | ||
1402 | return ret; | 1412 | return ret; |
1413 | } | ||
1403 | } | 1414 | } |
1404 | 1415 | ||
1405 | return 0; | 1416 | return 0; |
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c index 6f2bdad8b4d8..e94ddcf97722 100644 --- a/drivers/regulator/max8973-regulator.c +++ b/drivers/regulator/max8973-regulator.c | |||
@@ -450,7 +450,7 @@ static struct max8973_regulator_platform_data *max8973_parse_dt( | |||
450 | pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE; | 450 | pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE; |
451 | 451 | ||
452 | if (of_property_read_bool(np, "maxim,enable-bias-control")) | 452 | if (of_property_read_bool(np, "maxim,enable-bias-control")) |
453 | pdata->control_flags |= MAX8973_BIAS_ENABLE; | 453 | pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE; |
454 | 454 | ||
455 | return pdata; | 455 | return pdata; |
456 | } | 456 | } |
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 326ffb553371..72fc3c32db49 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c | |||
@@ -34,6 +34,8 @@ | |||
34 | #include <linux/mfd/samsung/s2mps14.h> | 34 | #include <linux/mfd/samsung/s2mps14.h> |
35 | #include <linux/mfd/samsung/s2mpu02.h> | 35 | #include <linux/mfd/samsung/s2mpu02.h> |
36 | 36 | ||
37 | /* The highest number of possible regulators for supported devices. */ | ||
38 | #define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX | ||
37 | struct s2mps11_info { | 39 | struct s2mps11_info { |
38 | unsigned int rdev_num; | 40 | unsigned int rdev_num; |
39 | int ramp_delay2; | 41 | int ramp_delay2; |
@@ -49,7 +51,7 @@ struct s2mps11_info { | |||
49 | * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether | 51 | * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether |
50 | * the suspend mode was enabled. | 52 | * the suspend mode was enabled. |
51 | */ | 53 | */ |
52 | unsigned long long s2mps14_suspend_state:50; | 54 | DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX); |
53 | 55 | ||
54 | /* Array of size rdev_num with GPIO-s for external sleep control */ | 56 | /* Array of size rdev_num with GPIO-s for external sleep control */ |
55 | int *ext_control_gpio; | 57 | int *ext_control_gpio; |
@@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev) | |||
500 | switch (s2mps11->dev_type) { | 502 | switch (s2mps11->dev_type) { |
501 | case S2MPS13X: | 503 | case S2MPS13X: |
502 | case S2MPS14X: | 504 | case S2MPS14X: |
503 | if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) | 505 | if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state)) |
504 | val = S2MPS14_ENABLE_SUSPEND; | 506 | val = S2MPS14_ENABLE_SUSPEND; |
505 | else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)])) | 507 | else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)])) |
506 | val = S2MPS14_ENABLE_EXT_CONTROL; | 508 | val = S2MPS14_ENABLE_EXT_CONTROL; |
@@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev) | |||
508 | val = rdev->desc->enable_mask; | 510 | val = rdev->desc->enable_mask; |
509 | break; | 511 | break; |
510 | case S2MPU02: | 512 | case S2MPU02: |
511 | if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) | 513 | if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state)) |
512 | val = S2MPU02_ENABLE_SUSPEND; | 514 | val = S2MPU02_ENABLE_SUSPEND; |
513 | else | 515 | else |
514 | val = rdev->desc->enable_mask; | 516 | val = rdev->desc->enable_mask; |
@@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev) | |||
562 | if (ret < 0) | 564 | if (ret < 0) |
563 | return ret; | 565 | return ret; |
564 | 566 | ||
565 | s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev)); | 567 | set_bit(rdev_get_id(rdev), s2mps11->suspend_state); |
566 | /* | 568 | /* |
567 | * Don't enable suspend mode if regulator is already disabled because | 569 | * Don't enable suspend mode if regulator is already disabled because |
568 | * this would effectively for a short time turn on the regulator after | 570 | * this would effectively for a short time turn on the regulator after |
@@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev) | |||
960 | case S2MPS11X: | 962 | case S2MPS11X: |
961 | s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators); | 963 | s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators); |
962 | regulators = s2mps11_regulators; | 964 | regulators = s2mps11_regulators; |
965 | BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); | ||
963 | break; | 966 | break; |
964 | case S2MPS13X: | 967 | case S2MPS13X: |
965 | s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators); | 968 | s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators); |
966 | regulators = s2mps13_regulators; | 969 | regulators = s2mps13_regulators; |
970 | BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); | ||
967 | break; | 971 | break; |
968 | case S2MPS14X: | 972 | case S2MPS14X: |
969 | s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators); | 973 | s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators); |
970 | regulators = s2mps14_regulators; | 974 | regulators = s2mps14_regulators; |
975 | BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); | ||
971 | break; | 976 | break; |
972 | case S2MPU02: | 977 | case S2MPU02: |
973 | s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators); | 978 | s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators); |
974 | regulators = s2mpu02_regulators; | 979 | regulators = s2mpu02_regulators; |
980 | BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num); | ||
975 | break; | 981 | break; |
976 | default: | 982 | default: |
977 | dev_err(&pdev->dev, "Invalid device type: %u\n", | 983 | dev_err(&pdev->dev, "Invalid device type: %u\n", |
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile index 95bccfd3f169..e5225ad9c5b1 100644 --- a/drivers/s390/Makefile +++ b/drivers/s390/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the S/390 specific device drivers | 2 | # Makefile for the S/390 specific device drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/ | 5 | obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/ |
6 | 6 | ||
7 | drivers-y += drivers/s390/built-in.o | 7 | drivers-y += drivers/s390/built-in.o |
8 | 8 | ||
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/virtio/Makefile index 241891a57caf..241891a57caf 100644 --- a/drivers/s390/kvm/Makefile +++ b/drivers/s390/virtio/Makefile | |||
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c index 53fb975c404b..53fb975c404b 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/virtio/kvm_virtio.c | |||
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c index f8d8fdb26b72..f8d8fdb26b72 100644 --- a/drivers/s390/kvm/virtio_ccw.c +++ b/drivers/s390/virtio/virtio_ccw.c | |||
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 285f77544c36..7dbbb29d24c6 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c | |||
@@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev) | |||
949 | { | 949 | { |
950 | struct Scsi_Host *shost; | 950 | struct Scsi_Host *shost; |
951 | struct virtio_scsi *vscsi; | 951 | struct virtio_scsi *vscsi; |
952 | int err, host_prot; | 952 | int err; |
953 | u32 sg_elems, num_targets; | 953 | u32 sg_elems, num_targets; |
954 | u32 cmd_per_lun; | 954 | u32 cmd_per_lun; |
955 | u32 num_queues; | 955 | u32 num_queues; |
@@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev) | |||
1009 | 1009 | ||
1010 | #ifdef CONFIG_BLK_DEV_INTEGRITY | 1010 | #ifdef CONFIG_BLK_DEV_INTEGRITY |
1011 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { | 1011 | if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { |
1012 | int host_prot; | ||
1013 | |||
1012 | host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | | 1014 | host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | |
1013 | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | | 1015 | SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | |
1014 | SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; | 1016 | SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 0cae1694014d..b0f30fb68914 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -612,7 +612,7 @@ config SPI_XTENSA_XTFPGA | |||
612 | 612 | ||
613 | config SPI_ZYNQMP_GQSPI | 613 | config SPI_ZYNQMP_GQSPI |
614 | tristate "Xilinx ZynqMP GQSPI controller" | 614 | tristate "Xilinx ZynqMP GQSPI controller" |
615 | depends on SPI_MASTER | 615 | depends on SPI_MASTER && HAS_DMA |
616 | help | 616 | help |
617 | Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. | 617 | Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. |
618 | 618 | ||
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index 788e2b176a4f..acce90ac7371 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #define SPFI_CONTROL_SOFT_RESET BIT(11) | 40 | #define SPFI_CONTROL_SOFT_RESET BIT(11) |
41 | #define SPFI_CONTROL_SEND_DMA BIT(10) | 41 | #define SPFI_CONTROL_SEND_DMA BIT(10) |
42 | #define SPFI_CONTROL_GET_DMA BIT(9) | 42 | #define SPFI_CONTROL_GET_DMA BIT(9) |
43 | #define SPFI_CONTROL_SE BIT(8) | ||
43 | #define SPFI_CONTROL_TMODE_SHIFT 5 | 44 | #define SPFI_CONTROL_TMODE_SHIFT 5 |
44 | #define SPFI_CONTROL_TMODE_MASK 0x7 | 45 | #define SPFI_CONTROL_TMODE_MASK 0x7 |
45 | #define SPFI_CONTROL_TMODE_SINGLE 0 | 46 | #define SPFI_CONTROL_TMODE_SINGLE 0 |
@@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi, | |||
491 | else if (xfer->tx_nbits == SPI_NBITS_QUAD && | 492 | else if (xfer->tx_nbits == SPI_NBITS_QUAD && |
492 | xfer->rx_nbits == SPI_NBITS_QUAD) | 493 | xfer->rx_nbits == SPI_NBITS_QUAD) |
493 | val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; | 494 | val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; |
495 | val |= SPFI_CONTROL_SE; | ||
494 | spfi_writel(spfi, val, SPFI_CONTROL); | 496 | spfi_writel(spfi, val, SPFI_CONTROL); |
495 | } | 497 | } |
496 | 498 | ||
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index eb7d3a6fb14c..f9deb84e4e55 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c | |||
@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, | |||
201 | { | 201 | { |
202 | struct spi_imx_data *spi_imx = spi_master_get_devdata(master); | 202 | struct spi_imx_data *spi_imx = spi_master_get_devdata(master); |
203 | 203 | ||
204 | if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml) | 204 | if (spi_imx->dma_is_inited |
205 | && (transfer->len > spi_imx->tx_wml)) | 205 | && transfer->len > spi_imx->rx_wml * sizeof(u32) |
206 | && transfer->len > spi_imx->tx_wml * sizeof(u32)) | ||
206 | return true; | 207 | return true; |
207 | return false; | 208 | return false; |
208 | } | 209 | } |
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c index 87b20a511a6b..f23f36ebaf3d 100644 --- a/drivers/spi/spi-zynqmp-gqspi.c +++ b/drivers/spi/spi-zynqmp-gqspi.c | |||
@@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr, | |||
214 | case GQSPI_SELECT_FLASH_CS_BOTH: | 214 | case GQSPI_SELECT_FLASH_CS_BOTH: |
215 | instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER | | 215 | instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER | |
216 | GQSPI_GENFIFO_CS_UPPER; | 216 | GQSPI_GENFIFO_CS_UPPER; |
217 | break; | ||
217 | case GQSPI_SELECT_FLASH_CS_UPPER: | 218 | case GQSPI_SELECT_FLASH_CS_UPPER: |
218 | instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER; | 219 | instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER; |
219 | break; | 220 | break; |
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index dd616ff0ffc5..c7de64171c45 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c | |||
@@ -693,6 +693,7 @@ static struct class *spidev_class; | |||
693 | #ifdef CONFIG_OF | 693 | #ifdef CONFIG_OF |
694 | static const struct of_device_id spidev_dt_ids[] = { | 694 | static const struct of_device_id spidev_dt_ids[] = { |
695 | { .compatible = "rohm,dh2228fv" }, | 695 | { .compatible = "rohm,dh2228fv" }, |
696 | { .compatible = "lineartechnology,ltc2488" }, | ||
696 | {}, | 697 | {}, |
697 | }; | 698 | }; |
698 | MODULE_DEVICE_TABLE(of, spidev_dt_ids); | 699 | MODULE_DEVICE_TABLE(of, spidev_dt_ids); |
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index de67b2c88bfc..20932cc9c8f7 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c | |||
@@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty) | |||
1108 | * Locking: ctrl_lock | 1108 | * Locking: ctrl_lock |
1109 | */ | 1109 | */ |
1110 | 1110 | ||
1111 | static void isig(int sig, struct tty_struct *tty) | 1111 | static void __isig(int sig, struct tty_struct *tty) |
1112 | { | 1112 | { |
1113 | struct n_tty_data *ldata = tty->disc_data; | ||
1114 | struct pid *tty_pgrp = tty_get_pgrp(tty); | 1113 | struct pid *tty_pgrp = tty_get_pgrp(tty); |
1115 | if (tty_pgrp) { | 1114 | if (tty_pgrp) { |
1116 | kill_pgrp(tty_pgrp, sig, 1); | 1115 | kill_pgrp(tty_pgrp, sig, 1); |
1117 | put_pid(tty_pgrp); | 1116 | put_pid(tty_pgrp); |
1118 | } | 1117 | } |
1118 | } | ||
1119 | 1119 | ||
1120 | if (!L_NOFLSH(tty)) { | 1120 | static void isig(int sig, struct tty_struct *tty) |
1121 | { | ||
1122 | struct n_tty_data *ldata = tty->disc_data; | ||
1123 | |||
1124 | if (L_NOFLSH(tty)) { | ||
1125 | /* signal only */ | ||
1126 | __isig(sig, tty); | ||
1127 | |||
1128 | } else { /* signal and flush */ | ||
1121 | up_read(&tty->termios_rwsem); | 1129 | up_read(&tty->termios_rwsem); |
1122 | down_write(&tty->termios_rwsem); | 1130 | down_write(&tty->termios_rwsem); |
1123 | 1131 | ||
1132 | __isig(sig, tty); | ||
1133 | |||
1124 | /* clear echo buffer */ | 1134 | /* clear echo buffer */ |
1125 | mutex_lock(&ldata->output_lock); | 1135 | mutex_lock(&ldata->output_lock); |
1126 | ldata->echo_head = ldata->echo_tail = 0; | 1136 | ldata->echo_head = ldata->echo_tail = 0; |
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index 10a5f0a503e3..4c8662ea7bb0 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig | |||
@@ -1185,7 +1185,7 @@ config SERIAL_SC16IS7XX_CORE | |||
1185 | config SERIAL_SC16IS7XX | 1185 | config SERIAL_SC16IS7XX |
1186 | tristate "SC16IS7xx serial support" | 1186 | tristate "SC16IS7xx serial support" |
1187 | select SERIAL_CORE | 1187 | select SERIAL_CORE |
1188 | depends on I2C || SPI_MASTER | 1188 | depends on (SPI_MASTER && !I2C) || I2C |
1189 | help | 1189 | help |
1190 | This selects support for SC16IS7xx serial ports. | 1190 | This selects support for SC16IS7xx serial ports. |
1191 | Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752, | 1191 | Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752, |
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index 50cf5b10ceed..fd27e986b1dd 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c | |||
@@ -2310,8 +2310,8 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap, | |||
2310 | void __iomem *base; | 2310 | void __iomem *base; |
2311 | 2311 | ||
2312 | base = devm_ioremap_resource(dev, mmiobase); | 2312 | base = devm_ioremap_resource(dev, mmiobase); |
2313 | if (!base) | 2313 | if (IS_ERR(base)) |
2314 | return -ENOMEM; | 2314 | return PTR_ERR(base); |
2315 | 2315 | ||
2316 | index = pl011_probe_dt_alias(index, dev); | 2316 | index = pl011_probe_dt_alias(index, dev); |
2317 | 2317 | ||
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c index 7444ca3e616b..72d89a8ff27e 100644 --- a/drivers/tty/serial/etraxfs-uart.c +++ b/drivers/tty/serial/etraxfs-uart.c | |||
@@ -943,7 +943,7 @@ static int etraxfs_uart_remove(struct platform_device *pdev) | |||
943 | 943 | ||
944 | port = platform_get_drvdata(pdev); | 944 | port = platform_get_drvdata(pdev); |
945 | uart_remove_one_port(&etraxfs_uart_driver, port); | 945 | uart_remove_one_port(&etraxfs_uart_driver, port); |
946 | etraxfs_uart_ports[pdev->id] = NULL; | 946 | etraxfs_uart_ports[port->line] = NULL; |
947 | 947 | ||
948 | return 0; | 948 | return 0; |
949 | } | 949 | } |
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 83b02d494723..e705149ba477 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c | |||
@@ -1139,11 +1139,6 @@ static int imx_startup(struct uart_port *port) | |||
1139 | 1139 | ||
1140 | writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); | 1140 | writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); |
1141 | 1141 | ||
1142 | /* Can we enable the DMA support? */ | ||
1143 | if (is_imx6q_uart(sport) && !uart_console(port) && | ||
1144 | !sport->dma_is_inited) | ||
1145 | imx_uart_dma_init(sport); | ||
1146 | |||
1147 | spin_lock_irqsave(&sport->port.lock, flags); | 1142 | spin_lock_irqsave(&sport->port.lock, flags); |
1148 | /* Reset fifo's and state machines */ | 1143 | /* Reset fifo's and state machines */ |
1149 | imx_reset(sport); | 1144 | imx_reset(sport); |
@@ -1154,9 +1149,6 @@ static int imx_startup(struct uart_port *port) | |||
1154 | writel(USR1_RTSD, sport->port.membase + USR1); | 1149 | writel(USR1_RTSD, sport->port.membase + USR1); |
1155 | writel(USR2_ORE, sport->port.membase + USR2); | 1150 | writel(USR2_ORE, sport->port.membase + USR2); |
1156 | 1151 | ||
1157 | if (sport->dma_is_inited && !sport->dma_is_enabled) | ||
1158 | imx_enable_dma(sport); | ||
1159 | |||
1160 | temp = readl(sport->port.membase + UCR1); | 1152 | temp = readl(sport->port.membase + UCR1); |
1161 | temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; | 1153 | temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; |
1162 | 1154 | ||
@@ -1327,6 +1319,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios, | |||
1327 | } else { | 1319 | } else { |
1328 | ucr2 |= UCR2_CTSC; | 1320 | ucr2 |= UCR2_CTSC; |
1329 | } | 1321 | } |
1322 | |||
1323 | /* Can we enable the DMA support? */ | ||
1324 | if (is_imx6q_uart(sport) && !uart_console(port) | ||
1325 | && !sport->dma_is_inited) | ||
1326 | imx_uart_dma_init(sport); | ||
1330 | } else { | 1327 | } else { |
1331 | termios->c_cflag &= ~CRTSCTS; | 1328 | termios->c_cflag &= ~CRTSCTS; |
1332 | } | 1329 | } |
@@ -1443,6 +1440,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios, | |||
1443 | if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) | 1440 | if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) |
1444 | imx_enable_ms(&sport->port); | 1441 | imx_enable_ms(&sport->port); |
1445 | 1442 | ||
1443 | if (sport->dma_is_inited && !sport->dma_is_enabled) | ||
1444 | imx_enable_dma(sport); | ||
1446 | spin_unlock_irqrestore(&sport->port.lock, flags); | 1445 | spin_unlock_irqrestore(&sport->port.lock, flags); |
1447 | } | 1446 | } |
1448 | 1447 | ||
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c index bbeb33561737..0ecc6a854ec1 100644 --- a/drivers/tty/serial/sc16is7xx.c +++ b/drivers/tty/serial/sc16is7xx.c | |||
@@ -354,6 +354,26 @@ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val) | |||
354 | (reg << SC16IS7XX_REG_SHIFT) | port->line, val); | 354 | (reg << SC16IS7XX_REG_SHIFT) | port->line, val); |
355 | } | 355 | } |
356 | 356 | ||
357 | static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen) | ||
358 | { | ||
359 | struct sc16is7xx_port *s = dev_get_drvdata(port->dev); | ||
360 | u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line; | ||
361 | |||
362 | regcache_cache_bypass(s->regmap, true); | ||
363 | regmap_raw_read(s->regmap, addr, s->buf, rxlen); | ||
364 | regcache_cache_bypass(s->regmap, false); | ||
365 | } | ||
366 | |||
367 | static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send) | ||
368 | { | ||
369 | struct sc16is7xx_port *s = dev_get_drvdata(port->dev); | ||
370 | u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line; | ||
371 | |||
372 | regcache_cache_bypass(s->regmap, true); | ||
373 | regmap_raw_write(s->regmap, addr, s->buf, to_send); | ||
374 | regcache_cache_bypass(s->regmap, false); | ||
375 | } | ||
376 | |||
357 | static void sc16is7xx_port_update(struct uart_port *port, u8 reg, | 377 | static void sc16is7xx_port_update(struct uart_port *port, u8 reg, |
358 | u8 mask, u8 val) | 378 | u8 mask, u8 val) |
359 | { | 379 | { |
@@ -508,10 +528,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen, | |||
508 | s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); | 528 | s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); |
509 | bytes_read = 1; | 529 | bytes_read = 1; |
510 | } else { | 530 | } else { |
511 | regcache_cache_bypass(s->regmap, true); | 531 | sc16is7xx_fifo_read(port, rxlen); |
512 | regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG, | ||
513 | s->buf, rxlen); | ||
514 | regcache_cache_bypass(s->regmap, false); | ||
515 | bytes_read = rxlen; | 532 | bytes_read = rxlen; |
516 | } | 533 | } |
517 | 534 | ||
@@ -591,9 +608,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port) | |||
591 | s->buf[i] = xmit->buf[xmit->tail]; | 608 | s->buf[i] = xmit->buf[xmit->tail]; |
592 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); | 609 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); |
593 | } | 610 | } |
594 | regcache_cache_bypass(s->regmap, true); | 611 | |
595 | regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send); | 612 | sc16is7xx_fifo_write(port, to_send); |
596 | regcache_cache_bypass(s->regmap, false); | ||
597 | } | 613 | } |
598 | 614 | ||
599 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 615 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 7ae1592f7ec9..f36852067f20 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c | |||
@@ -1418,7 +1418,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) | |||
1418 | mutex_lock(&port->mutex); | 1418 | mutex_lock(&port->mutex); |
1419 | uart_shutdown(tty, state); | 1419 | uart_shutdown(tty, state); |
1420 | tty_port_tty_set(port, NULL); | 1420 | tty_port_tty_set(port, NULL); |
1421 | tty->closing = 0; | 1421 | |
1422 | spin_lock_irqsave(&port->lock, flags); | 1422 | spin_lock_irqsave(&port->lock, flags); |
1423 | 1423 | ||
1424 | if (port->blocked_open) { | 1424 | if (port->blocked_open) { |
@@ -1444,6 +1444,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) | |||
1444 | mutex_unlock(&port->mutex); | 1444 | mutex_unlock(&port->mutex); |
1445 | 1445 | ||
1446 | tty_ldisc_flush(tty); | 1446 | tty_ldisc_flush(tty); |
1447 | tty->closing = 0; | ||
1447 | } | 1448 | } |
1448 | 1449 | ||
1449 | static void uart_wait_until_sent(struct tty_struct *tty, int timeout) | 1450 | static void uart_wait_until_sent(struct tty_struct *tty, int timeout) |
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c index ea27804d87af..381a2b13682c 100644 --- a/drivers/tty/vt/selection.c +++ b/drivers/tty/vt/selection.c | |||
@@ -356,6 +356,7 @@ int paste_selection(struct tty_struct *tty) | |||
356 | schedule(); | 356 | schedule(); |
357 | continue; | 357 | continue; |
358 | } | 358 | } |
359 | __set_current_state(TASK_RUNNING); | ||
359 | count = sel_buffer_lth - pasted; | 360 | count = sel_buffer_lth - pasted; |
360 | count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL, | 361 | count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL, |
361 | count); | 362 | count); |
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 8fe52989b380..4462d167900c 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c | |||
@@ -742,6 +742,8 @@ static void visual_init(struct vc_data *vc, int num, int init) | |||
742 | __module_get(vc->vc_sw->owner); | 742 | __module_get(vc->vc_sw->owner); |
743 | vc->vc_num = num; | 743 | vc->vc_num = num; |
744 | vc->vc_display_fg = &master_display_fg; | 744 | vc->vc_display_fg = &master_display_fg; |
745 | if (vc->vc_uni_pagedir_loc) | ||
746 | con_free_unimap(vc); | ||
745 | vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir; | 747 | vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir; |
746 | vc->vc_uni_pagedir = NULL; | 748 | vc->vc_uni_pagedir = NULL; |
747 | vc->vc_hi_font_mask = 0; | 749 | vc->vc_hi_font_mask = 0; |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 519a77ba214c..b30e7423549b 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -1944,6 +1944,7 @@ static void __exit acm_exit(void) | |||
1944 | usb_deregister(&acm_driver); | 1944 | usb_deregister(&acm_driver); |
1945 | tty_unregister_driver(acm_tty_driver); | 1945 | tty_unregister_driver(acm_tty_driver); |
1946 | put_tty_driver(acm_tty_driver); | 1946 | put_tty_driver(acm_tty_driver); |
1947 | idr_destroy(&acm_minors); | ||
1947 | } | 1948 | } |
1948 | 1949 | ||
1949 | module_init(acm_init); | 1950 | module_init(acm_init); |
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index 0e6f968e93fe..01c0c0477a9e 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c | |||
@@ -242,7 +242,7 @@ static int __init ulpi_init(void) | |||
242 | { | 242 | { |
243 | return bus_register(&ulpi_bus); | 243 | return bus_register(&ulpi_bus); |
244 | } | 244 | } |
245 | module_init(ulpi_init); | 245 | subsys_initcall(ulpi_init); |
246 | 246 | ||
247 | static void __exit ulpi_exit(void) | 247 | static void __exit ulpi_exit(void) |
248 | { | 248 | { |
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index be5b2074f906..cbcd0920fb51 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c | |||
@@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd) | |||
1022 | dev_name(&usb_dev->dev), retval); | 1022 | dev_name(&usb_dev->dev), retval); |
1023 | return (retval < 0) ? retval : -EMSGSIZE; | 1023 | return (retval < 0) ? retval : -EMSGSIZE; |
1024 | } | 1024 | } |
1025 | if (usb_dev->speed == USB_SPEED_SUPER) { | 1025 | |
1026 | if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) { | ||
1026 | retval = usb_get_bos_descriptor(usb_dev); | 1027 | retval = usb_get_bos_descriptor(usb_dev); |
1027 | if (retval < 0) { | 1028 | if (!retval) { |
1029 | usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev); | ||
1030 | } else if (usb_dev->speed == USB_SPEED_SUPER) { | ||
1028 | mutex_unlock(&usb_bus_list_lock); | 1031 | mutex_unlock(&usb_bus_list_lock); |
1029 | dev_dbg(parent_dev, "can't read %s bos descriptor %d\n", | 1032 | dev_dbg(parent_dev, "can't read %s bos descriptor %d\n", |
1030 | dev_name(&usb_dev->dev), retval); | 1033 | dev_name(&usb_dev->dev), retval); |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 43cb2f2e3b43..73dfa194160b 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) | |||
122 | return usb_get_intfdata(hdev->actconfig->interface[0]); | 122 | return usb_get_intfdata(hdev->actconfig->interface[0]); |
123 | } | 123 | } |
124 | 124 | ||
125 | static int usb_device_supports_lpm(struct usb_device *udev) | 125 | int usb_device_supports_lpm(struct usb_device *udev) |
126 | { | 126 | { |
127 | /* USB 2.1 (and greater) devices indicate LPM support through | 127 | /* USB 2.1 (and greater) devices indicate LPM support through |
128 | * their USB 2.0 Extended Capabilities BOS descriptor. | 128 | * their USB 2.0 Extended Capabilities BOS descriptor. |
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 7eb1e26798e5..457255a3306a 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h | |||
@@ -65,6 +65,7 @@ extern int usb_hub_init(void); | |||
65 | extern void usb_hub_cleanup(void); | 65 | extern void usb_hub_cleanup(void); |
66 | extern int usb_major_init(void); | 66 | extern int usb_major_init(void); |
67 | extern void usb_major_cleanup(void); | 67 | extern void usb_major_cleanup(void); |
68 | extern int usb_device_supports_lpm(struct usb_device *udev); | ||
68 | 69 | ||
69 | #ifdef CONFIG_PM | 70 | #ifdef CONFIG_PM |
70 | 71 | ||
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 2ef3c8d6a9db..69e769c35cf5 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c | |||
@@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) | |||
727 | dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); | 727 | dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); |
728 | ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); | 728 | ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); |
729 | break; | 729 | break; |
730 | case USB_REQ_SET_INTERFACE: | ||
731 | dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE"); | ||
732 | dwc->start_config_issued = false; | ||
733 | /* Fall through */ | ||
730 | default: | 734 | default: |
731 | dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); | 735 | dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); |
732 | ret = dwc3_ep0_delegate_req(dwc, ctrl); | 736 | ret = dwc3_ep0_delegate_req(dwc, ctrl); |
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c index d32160d6463f..5da37c957b53 100644 --- a/drivers/usb/gadget/udc/mv_udc_core.c +++ b/drivers/usb/gadget/udc/mv_udc_core.c | |||
@@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev) | |||
2167 | return -ENODEV; | 2167 | return -ENODEV; |
2168 | } | 2168 | } |
2169 | 2169 | ||
2170 | udc->phy_regs = ioremap(r->start, resource_size(r)); | 2170 | udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r)); |
2171 | if (udc->phy_regs == NULL) { | 2171 | if (udc->phy_regs == NULL) { |
2172 | dev_err(&pdev->dev, "failed to map phy I/O memory\n"); | 2172 | dev_err(&pdev->dev, "failed to map phy I/O memory\n"); |
2173 | return -EBUSY; | 2173 | return -EBUSY; |
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c index d69c35558f68..362ee8af5fce 100644 --- a/drivers/usb/gadget/udc/udc-core.c +++ b/drivers/usb/gadget/udc/udc-core.c | |||
@@ -60,13 +60,15 @@ static DEFINE_MUTEX(udc_lock); | |||
60 | int usb_gadget_map_request(struct usb_gadget *gadget, | 60 | int usb_gadget_map_request(struct usb_gadget *gadget, |
61 | struct usb_request *req, int is_in) | 61 | struct usb_request *req, int is_in) |
62 | { | 62 | { |
63 | struct device *dev = gadget->dev.parent; | ||
64 | |||
63 | if (req->length == 0) | 65 | if (req->length == 0) |
64 | return 0; | 66 | return 0; |
65 | 67 | ||
66 | if (req->num_sgs) { | 68 | if (req->num_sgs) { |
67 | int mapped; | 69 | int mapped; |
68 | 70 | ||
69 | mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs, | 71 | mapped = dma_map_sg(dev, req->sg, req->num_sgs, |
70 | is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 72 | is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
71 | if (mapped == 0) { | 73 | if (mapped == 0) { |
72 | dev_err(&gadget->dev, "failed to map SGs\n"); | 74 | dev_err(&gadget->dev, "failed to map SGs\n"); |
@@ -75,11 +77,11 @@ int usb_gadget_map_request(struct usb_gadget *gadget, | |||
75 | 77 | ||
76 | req->num_mapped_sgs = mapped; | 78 | req->num_mapped_sgs = mapped; |
77 | } else { | 79 | } else { |
78 | req->dma = dma_map_single(&gadget->dev, req->buf, req->length, | 80 | req->dma = dma_map_single(dev, req->buf, req->length, |
79 | is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 81 | is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
80 | 82 | ||
81 | if (dma_mapping_error(&gadget->dev, req->dma)) { | 83 | if (dma_mapping_error(dev, req->dma)) { |
82 | dev_err(&gadget->dev, "failed to map buffer\n"); | 84 | dev_err(dev, "failed to map buffer\n"); |
83 | return -EFAULT; | 85 | return -EFAULT; |
84 | } | 86 | } |
85 | } | 87 | } |
@@ -95,12 +97,12 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget, | |||
95 | return; | 97 | return; |
96 | 98 | ||
97 | if (req->num_mapped_sgs) { | 99 | if (req->num_mapped_sgs) { |
98 | dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs, | 100 | dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs, |
99 | is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 101 | is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
100 | 102 | ||
101 | req->num_mapped_sgs = 0; | 103 | req->num_mapped_sgs = 0; |
102 | } else { | 104 | } else { |
103 | dma_unmap_single(&gadget->dev, req->dma, req->length, | 105 | dma_unmap_single(gadget->dev.parent, req->dma, req->length, |
104 | is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); | 106 | is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); |
105 | } | 107 | } |
106 | } | 108 | } |
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c index f7d561ed3c23..d029bbe9eb36 100644 --- a/drivers/usb/host/ohci-q.c +++ b/drivers/usb/host/ohci-q.c | |||
@@ -981,10 +981,6 @@ rescan_all: | |||
981 | int completed, modified; | 981 | int completed, modified; |
982 | __hc32 *prev; | 982 | __hc32 *prev; |
983 | 983 | ||
984 | /* Is this ED already invisible to the hardware? */ | ||
985 | if (ed->state == ED_IDLE) | ||
986 | goto ed_idle; | ||
987 | |||
988 | /* only take off EDs that the HC isn't using, accounting for | 984 | /* only take off EDs that the HC isn't using, accounting for |
989 | * frame counter wraps and EDs with partially retired TDs | 985 | * frame counter wraps and EDs with partially retired TDs |
990 | */ | 986 | */ |
@@ -1012,12 +1008,10 @@ skip_ed: | |||
1012 | } | 1008 | } |
1013 | 1009 | ||
1014 | /* ED's now officially unlinked, hc doesn't see */ | 1010 | /* ED's now officially unlinked, hc doesn't see */ |
1015 | ed->state = ED_IDLE; | ||
1016 | ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); | 1011 | ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); |
1017 | ed->hwNextED = 0; | 1012 | ed->hwNextED = 0; |
1018 | wmb(); | 1013 | wmb(); |
1019 | ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); | 1014 | ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); |
1020 | ed_idle: | ||
1021 | 1015 | ||
1022 | /* reentrancy: if we drop the schedule lock, someone might | 1016 | /* reentrancy: if we drop the schedule lock, someone might |
1023 | * have modified this list. normally it's just prepending | 1017 | * have modified this list. normally it's just prepending |
@@ -1088,6 +1082,7 @@ rescan_this: | |||
1088 | if (list_empty(&ed->td_list)) { | 1082 | if (list_empty(&ed->td_list)) { |
1089 | *last = ed->ed_next; | 1083 | *last = ed->ed_next; |
1090 | ed->ed_next = NULL; | 1084 | ed->ed_next = NULL; |
1085 | ed->state = ED_IDLE; | ||
1091 | list_del(&ed->in_use_list); | 1086 | list_del(&ed->in_use_list); |
1092 | } else if (ohci->rh_state == OHCI_RH_RUNNING) { | 1087 | } else if (ohci->rh_state == OHCI_RH_RUNNING) { |
1093 | *last = ed->ed_next; | 1088 | *last = ed->ed_next; |
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c index e9a6eec39142..cfcfadfc94fc 100644 --- a/drivers/usb/host/ohci-tmio.c +++ b/drivers/usb/host/ohci-tmio.c | |||
@@ -58,7 +58,7 @@ | |||
58 | #define CCR_PM_CKRNEN 0x0002 | 58 | #define CCR_PM_CKRNEN 0x0002 |
59 | #define CCR_PM_USBPW1 0x0004 | 59 | #define CCR_PM_USBPW1 0x0004 |
60 | #define CCR_PM_USBPW2 0x0008 | 60 | #define CCR_PM_USBPW2 0x0008 |
61 | #define CCR_PM_USBPW3 0x0008 | 61 | #define CCR_PM_USBPW3 0x0010 |
62 | #define CCR_PM_PMEE 0x0100 | 62 | #define CCR_PM_PMEE 0x0100 |
63 | #define CCR_PM_PMES 0x8000 | 63 | #define CCR_PM_PMES 0x8000 |
64 | 64 | ||
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index e75c565feb53..78241b5550df 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci, | |||
484 | u32 pls = status_reg & PORT_PLS_MASK; | 484 | u32 pls = status_reg & PORT_PLS_MASK; |
485 | 485 | ||
486 | /* resume state is a xHCI internal state. | 486 | /* resume state is a xHCI internal state. |
487 | * Do not report it to usb core. | 487 | * Do not report it to usb core, instead, pretend to be U3, |
488 | * thus usb core knows it's not ready for transfer | ||
488 | */ | 489 | */ |
489 | if (pls == XDEV_RESUME) | 490 | if (pls == XDEV_RESUME) { |
491 | *status |= USB_SS_PORT_LS_U3; | ||
490 | return; | 492 | return; |
493 | } | ||
491 | 494 | ||
492 | /* When the CAS bit is set then warm reset | 495 | /* When the CAS bit is set then warm reset |
493 | * should be performed on port | 496 | * should be performed on port |
@@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, | |||
588 | status |= USB_PORT_STAT_C_RESET << 16; | 591 | status |= USB_PORT_STAT_C_RESET << 16; |
589 | /* USB3.0 only */ | 592 | /* USB3.0 only */ |
590 | if (hcd->speed == HCD_USB3) { | 593 | if (hcd->speed == HCD_USB3) { |
591 | if ((raw_port_status & PORT_PLC)) | 594 | /* Port link change with port in resume state should not be |
595 | * reported to usbcore, as this is an internal state to be | ||
596 | * handled by xhci driver. Reporting PLC to usbcore may | ||
597 | * cause usbcore clearing PLC first and port change event | ||
598 | * irq won't be generated. | ||
599 | */ | ||
600 | if ((raw_port_status & PORT_PLC) && | ||
601 | (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME) | ||
592 | status |= USB_PORT_STAT_C_LINK_STATE << 16; | 602 | status |= USB_PORT_STAT_C_LINK_STATE << 16; |
593 | if ((raw_port_status & PORT_WRC)) | 603 | if ((raw_port_status & PORT_WRC)) |
594 | status |= USB_PORT_STAT_C_BH_RESET << 16; | 604 | status |= USB_PORT_STAT_C_BH_RESET << 16; |
@@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd) | |||
1120 | spin_lock_irqsave(&xhci->lock, flags); | 1130 | spin_lock_irqsave(&xhci->lock, flags); |
1121 | 1131 | ||
1122 | if (hcd->self.root_hub->do_remote_wakeup) { | 1132 | if (hcd->self.root_hub->do_remote_wakeup) { |
1123 | if (bus_state->resuming_ports) { | 1133 | if (bus_state->resuming_ports || /* USB2 */ |
1134 | bus_state->port_remote_wakeup) { /* USB3 */ | ||
1124 | spin_unlock_irqrestore(&xhci->lock, flags); | 1135 | spin_unlock_irqrestore(&xhci->lock, flags); |
1125 | xhci_dbg(xhci, "suspend failed because " | 1136 | xhci_dbg(xhci, "suspend failed because a port is resuming\n"); |
1126 | "a port is resuming\n"); | ||
1127 | return -EBUSY; | 1137 | return -EBUSY; |
1128 | } | 1138 | } |
1129 | } | 1139 | } |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index f8336408ef07..3e442f77a2b9 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, | |||
1427 | /* Attempt to use the ring cache */ | 1427 | /* Attempt to use the ring cache */ |
1428 | if (virt_dev->num_rings_cached == 0) | 1428 | if (virt_dev->num_rings_cached == 0) |
1429 | return -ENOMEM; | 1429 | return -ENOMEM; |
1430 | virt_dev->num_rings_cached--; | ||
1430 | virt_dev->eps[ep_index].new_ring = | 1431 | virt_dev->eps[ep_index].new_ring = |
1431 | virt_dev->ring_cache[virt_dev->num_rings_cached]; | 1432 | virt_dev->ring_cache[virt_dev->num_rings_cached]; |
1432 | virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; | 1433 | virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; |
1433 | virt_dev->num_rings_cached--; | ||
1434 | xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, | 1434 | xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, |
1435 | 1, type); | 1435 | 1, type); |
1436 | } | 1436 | } |
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 4a4cb1d91ac8..5590eac2b22d 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c | |||
@@ -23,10 +23,15 @@ | |||
23 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/acpi.h> | ||
26 | 27 | ||
27 | #include "xhci.h" | 28 | #include "xhci.h" |
28 | #include "xhci-trace.h" | 29 | #include "xhci-trace.h" |
29 | 30 | ||
31 | #define PORT2_SSIC_CONFIG_REG2 0x883c | ||
32 | #define PROG_DONE (1 << 30) | ||
33 | #define SSIC_PORT_UNUSED (1 << 31) | ||
34 | |||
30 | /* Device for a quirk */ | 35 | /* Device for a quirk */ |
31 | #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 | 36 | #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 |
32 | #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 | 37 | #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 |
@@ -176,20 +181,63 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) | |||
176 | } | 181 | } |
177 | 182 | ||
178 | /* | 183 | /* |
184 | * In some Intel xHCI controllers, in order to get D3 working, | ||
185 | * through a vendor specific SSIC CONFIG register at offset 0x883c, | ||
186 | * SSIC PORT need to be marked as "unused" before putting xHCI | ||
187 | * into D3. After D3 exit, the SSIC port need to be marked as "used". | ||
188 | * Without this change, xHCI might not enter D3 state. | ||
179 | * Make sure PME works on some Intel xHCI controllers by writing 1 to clear | 189 | * Make sure PME works on some Intel xHCI controllers by writing 1 to clear |
180 | * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 | 190 | * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 |
181 | */ | 191 | */ |
182 | static void xhci_pme_quirk(struct xhci_hcd *xhci) | 192 | static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend) |
183 | { | 193 | { |
194 | struct xhci_hcd *xhci = hcd_to_xhci(hcd); | ||
195 | struct pci_dev *pdev = to_pci_dev(hcd->self.controller); | ||
184 | u32 val; | 196 | u32 val; |
185 | void __iomem *reg; | 197 | void __iomem *reg; |
186 | 198 | ||
199 | if (pdev->vendor == PCI_VENDOR_ID_INTEL && | ||
200 | pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { | ||
201 | |||
202 | reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2; | ||
203 | |||
204 | /* Notify SSIC that SSIC profile programming is not done */ | ||
205 | val = readl(reg) & ~PROG_DONE; | ||
206 | writel(val, reg); | ||
207 | |||
208 | /* Mark SSIC port as unused(suspend) or used(resume) */ | ||
209 | val = readl(reg); | ||
210 | if (suspend) | ||
211 | val |= SSIC_PORT_UNUSED; | ||
212 | else | ||
213 | val &= ~SSIC_PORT_UNUSED; | ||
214 | writel(val, reg); | ||
215 | |||
216 | /* Notify SSIC that SSIC profile programming is done */ | ||
217 | val = readl(reg) | PROG_DONE; | ||
218 | writel(val, reg); | ||
219 | readl(reg); | ||
220 | } | ||
221 | |||
187 | reg = (void __iomem *) xhci->cap_regs + 0x80a4; | 222 | reg = (void __iomem *) xhci->cap_regs + 0x80a4; |
188 | val = readl(reg); | 223 | val = readl(reg); |
189 | writel(val | BIT(28), reg); | 224 | writel(val | BIT(28), reg); |
190 | readl(reg); | 225 | readl(reg); |
191 | } | 226 | } |
192 | 227 | ||
228 | #ifdef CONFIG_ACPI | ||
229 | static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) | ||
230 | { | ||
231 | static const u8 intel_dsm_uuid[] = { | ||
232 | 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45, | ||
233 | 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23, | ||
234 | }; | ||
235 | acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL); | ||
236 | } | ||
237 | #else | ||
238 | static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } | ||
239 | #endif /* CONFIG_ACPI */ | ||
240 | |||
193 | /* called during probe() after chip reset completes */ | 241 | /* called during probe() after chip reset completes */ |
194 | static int xhci_pci_setup(struct usb_hcd *hcd) | 242 | static int xhci_pci_setup(struct usb_hcd *hcd) |
195 | { | 243 | { |
@@ -263,6 +311,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
263 | HCC_MAX_PSA(xhci->hcc_params) >= 4) | 311 | HCC_MAX_PSA(xhci->hcc_params) >= 4) |
264 | xhci->shared_hcd->can_do_streams = 1; | 312 | xhci->shared_hcd->can_do_streams = 1; |
265 | 313 | ||
314 | if (xhci->quirks & XHCI_PME_STUCK_QUIRK) | ||
315 | xhci_pme_acpi_rtd3_enable(dev); | ||
316 | |||
266 | /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ | 317 | /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ |
267 | pm_runtime_put_noidle(&dev->dev); | 318 | pm_runtime_put_noidle(&dev->dev); |
268 | 319 | ||
@@ -307,7 +358,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) | |||
307 | pdev->no_d3cold = true; | 358 | pdev->no_d3cold = true; |
308 | 359 | ||
309 | if (xhci->quirks & XHCI_PME_STUCK_QUIRK) | 360 | if (xhci->quirks & XHCI_PME_STUCK_QUIRK) |
310 | xhci_pme_quirk(xhci); | 361 | xhci_pme_quirk(hcd, true); |
311 | 362 | ||
312 | return xhci_suspend(xhci, do_wakeup); | 363 | return xhci_suspend(xhci, do_wakeup); |
313 | } | 364 | } |
@@ -340,7 +391,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) | |||
340 | usb_enable_intel_xhci_ports(pdev); | 391 | usb_enable_intel_xhci_ports(pdev); |
341 | 392 | ||
342 | if (xhci->quirks & XHCI_PME_STUCK_QUIRK) | 393 | if (xhci->quirks & XHCI_PME_STUCK_QUIRK) |
343 | xhci_pme_quirk(xhci); | 394 | xhci_pme_quirk(hcd, false); |
344 | 395 | ||
345 | retval = xhci_resume(xhci, hibernated); | 396 | retval = xhci_resume(xhci, hibernated); |
346 | return retval; | 397 | return retval; |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 94416ff70810..6a8fc52aed58 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci, | |||
1546 | usb_hcd_resume_root_hub(hcd); | 1546 | usb_hcd_resume_root_hub(hcd); |
1547 | } | 1547 | } |
1548 | 1548 | ||
1549 | if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE) | ||
1550 | bus_state->port_remote_wakeup &= ~(1 << faked_port_index); | ||
1551 | |||
1549 | if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { | 1552 | if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { |
1550 | xhci_dbg(xhci, "port resume event for port %d\n", port_id); | 1553 | xhci_dbg(xhci, "port resume event for port %d\n", port_id); |
1551 | 1554 | ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 7da0d6043d33..526ebc0c7e72 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev) | |||
3453 | return -EINVAL; | 3453 | return -EINVAL; |
3454 | } | 3454 | } |
3455 | 3455 | ||
3456 | if (virt_dev->tt_info) | ||
3457 | old_active_eps = virt_dev->tt_info->active_eps; | ||
3458 | |||
3456 | if (virt_dev->udev != udev) { | 3459 | if (virt_dev->udev != udev) { |
3457 | /* If the virt_dev and the udev does not match, this virt_dev | 3460 | /* If the virt_dev and the udev does not match, this virt_dev |
3458 | * may belong to another udev. | 3461 | * may belong to another udev. |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 31e46cc55807..ed2ebf647c38 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -285,6 +285,7 @@ struct xhci_op_regs { | |||
285 | #define XDEV_U0 (0x0 << 5) | 285 | #define XDEV_U0 (0x0 << 5) |
286 | #define XDEV_U2 (0x2 << 5) | 286 | #define XDEV_U2 (0x2 << 5) |
287 | #define XDEV_U3 (0x3 << 5) | 287 | #define XDEV_U3 (0x3 << 5) |
288 | #define XDEV_INACTIVE (0x6 << 5) | ||
288 | #define XDEV_RESUME (0xf << 5) | 289 | #define XDEV_RESUME (0xf << 5) |
289 | /* true: port has power (see HCC_PPC) */ | 290 | /* true: port has power (see HCC_PPC) */ |
290 | #define PORT_POWER (1 << 9) | 291 | #define PORT_POWER (1 << 9) |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index caf188800c67..6b2479123de7 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
@@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200, | |||
2065 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 2065 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
2066 | US_FL_NO_READ_DISC_INFO ), | 2066 | US_FL_NO_READ_DISC_INFO ), |
2067 | 2067 | ||
2068 | /* Reported by Oliver Neukum <oneukum@suse.com> | ||
2069 | * This device morphes spontaneously into another device if the access | ||
2070 | * pattern of Windows isn't followed. Thus writable media would be dirty | ||
2071 | * if the initial instance is used. So the device is limited to its | ||
2072 | * virtual CD. | ||
2073 | * And yes, the concept that BCD goes up to 9 is not heeded */ | ||
2074 | UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff, | ||
2075 | "ZTE,Incorporated", | ||
2076 | "ZTE WCDMA Technologies MSM", | ||
2077 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
2078 | US_FL_SINGLE_LUN ), | ||
2079 | |||
2068 | /* Reported by Sven Geggus <sven-usbst@geggus.net> | 2080 | /* Reported by Sven Geggus <sven-usbst@geggus.net> |
2069 | * This encrypted pen drive returns bogus data for the initial READ(10). | 2081 | * This encrypted pen drive returns bogus data for the initial READ(10). |
2070 | */ | 2082 | */ |
@@ -2074,6 +2086,17 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200, | |||
2074 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 2086 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
2075 | US_FL_INITIAL_READ10 ), | 2087 | US_FL_INITIAL_READ10 ), |
2076 | 2088 | ||
2089 | /* Reported by Hans de Goede <hdegoede@redhat.com> | ||
2090 | * These are mini projectors using USB for both power and video data transport | ||
2091 | * The usb-storage interface is a virtual windows driver CD, which the gm12u320 | ||
2092 | * driver automatically converts into framebuffer & kms dri device nodes. | ||
2093 | */ | ||
2094 | UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff, | ||
2095 | "Grain-media Technology Corp.", | ||
2096 | "USB3.0 Device GM12U320", | ||
2097 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
2098 | US_FL_IGNORE_DEVICE ), | ||
2099 | |||
2077 | /* Patch by Richard Schütz <r.schtz@t-online.de> | 2100 | /* Patch by Richard Schütz <r.schtz@t-online.de> |
2078 | * This external hard drive enclosure uses a JMicron chip which | 2101 | * This external hard drive enclosure uses a JMicron chip which |
2079 | * needs the US_FL_IGNORE_RESIDUE flag to work properly. */ | 2102 | * needs the US_FL_IGNORE_RESIDUE flag to work properly. */ |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 9e8e004bb1c3..a9fe859f43c8 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -22,14 +22,20 @@ | |||
22 | #include <linux/file.h> | 22 | #include <linux/file.h> |
23 | #include <linux/highmem.h> | 23 | #include <linux/highmem.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/vmalloc.h> | ||
25 | #include <linux/kthread.h> | 26 | #include <linux/kthread.h> |
26 | #include <linux/cgroup.h> | 27 | #include <linux/cgroup.h> |
27 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/sort.h> | ||
28 | 30 | ||
29 | #include "vhost.h" | 31 | #include "vhost.h" |
30 | 32 | ||
33 | static ushort max_mem_regions = 64; | ||
34 | module_param(max_mem_regions, ushort, 0444); | ||
35 | MODULE_PARM_DESC(max_mem_regions, | ||
36 | "Maximum number of memory regions in memory map. (default: 64)"); | ||
37 | |||
31 | enum { | 38 | enum { |
32 | VHOST_MEMORY_MAX_NREGIONS = 64, | ||
33 | VHOST_MEMORY_F_LOG = 0x1, | 39 | VHOST_MEMORY_F_LOG = 0x1, |
34 | }; | 40 | }; |
35 | 41 | ||
@@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked) | |||
543 | fput(dev->log_file); | 549 | fput(dev->log_file); |
544 | dev->log_file = NULL; | 550 | dev->log_file = NULL; |
545 | /* No one will access memory at this point */ | 551 | /* No one will access memory at this point */ |
546 | kfree(dev->memory); | 552 | kvfree(dev->memory); |
547 | dev->memory = NULL; | 553 | dev->memory = NULL; |
548 | WARN_ON(!list_empty(&dev->work_list)); | 554 | WARN_ON(!list_empty(&dev->work_list)); |
549 | if (dev->worker) { | 555 | if (dev->worker) { |
@@ -663,6 +669,28 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq) | |||
663 | } | 669 | } |
664 | EXPORT_SYMBOL_GPL(vhost_vq_access_ok); | 670 | EXPORT_SYMBOL_GPL(vhost_vq_access_ok); |
665 | 671 | ||
672 | static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2) | ||
673 | { | ||
674 | const struct vhost_memory_region *r1 = p1, *r2 = p2; | ||
675 | if (r1->guest_phys_addr < r2->guest_phys_addr) | ||
676 | return 1; | ||
677 | if (r1->guest_phys_addr > r2->guest_phys_addr) | ||
678 | return -1; | ||
679 | return 0; | ||
680 | } | ||
681 | |||
682 | static void *vhost_kvzalloc(unsigned long size) | ||
683 | { | ||
684 | void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); | ||
685 | |||
686 | if (!n) { | ||
687 | n = vzalloc(size); | ||
688 | if (!n) | ||
689 | return ERR_PTR(-ENOMEM); | ||
690 | } | ||
691 | return n; | ||
692 | } | ||
693 | |||
666 | static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) | 694 | static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) |
667 | { | 695 | { |
668 | struct vhost_memory mem, *newmem, *oldmem; | 696 | struct vhost_memory mem, *newmem, *oldmem; |
@@ -673,21 +701,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) | |||
673 | return -EFAULT; | 701 | return -EFAULT; |
674 | if (mem.padding) | 702 | if (mem.padding) |
675 | return -EOPNOTSUPP; | 703 | return -EOPNOTSUPP; |
676 | if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) | 704 | if (mem.nregions > max_mem_regions) |
677 | return -E2BIG; | 705 | return -E2BIG; |
678 | newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL); | 706 | newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions)); |
679 | if (!newmem) | 707 | if (!newmem) |
680 | return -ENOMEM; | 708 | return -ENOMEM; |
681 | 709 | ||
682 | memcpy(newmem, &mem, size); | 710 | memcpy(newmem, &mem, size); |
683 | if (copy_from_user(newmem->regions, m->regions, | 711 | if (copy_from_user(newmem->regions, m->regions, |
684 | mem.nregions * sizeof *m->regions)) { | 712 | mem.nregions * sizeof *m->regions)) { |
685 | kfree(newmem); | 713 | kvfree(newmem); |
686 | return -EFAULT; | 714 | return -EFAULT; |
687 | } | 715 | } |
716 | sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions), | ||
717 | vhost_memory_reg_sort_cmp, NULL); | ||
688 | 718 | ||
689 | if (!memory_access_ok(d, newmem, 0)) { | 719 | if (!memory_access_ok(d, newmem, 0)) { |
690 | kfree(newmem); | 720 | kvfree(newmem); |
691 | return -EFAULT; | 721 | return -EFAULT; |
692 | } | 722 | } |
693 | oldmem = d->memory; | 723 | oldmem = d->memory; |
@@ -699,7 +729,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) | |||
699 | d->vqs[i]->memory = newmem; | 729 | d->vqs[i]->memory = newmem; |
700 | mutex_unlock(&d->vqs[i]->mutex); | 730 | mutex_unlock(&d->vqs[i]->mutex); |
701 | } | 731 | } |
702 | kfree(oldmem); | 732 | kvfree(oldmem); |
703 | return 0; | 733 | return 0; |
704 | } | 734 | } |
705 | 735 | ||
@@ -992,17 +1022,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl); | |||
992 | static const struct vhost_memory_region *find_region(struct vhost_memory *mem, | 1022 | static const struct vhost_memory_region *find_region(struct vhost_memory *mem, |
993 | __u64 addr, __u32 len) | 1023 | __u64 addr, __u32 len) |
994 | { | 1024 | { |
995 | struct vhost_memory_region *reg; | 1025 | const struct vhost_memory_region *reg; |
996 | int i; | 1026 | int start = 0, end = mem->nregions; |
997 | 1027 | ||
998 | /* linear search is not brilliant, but we really have on the order of 6 | 1028 | while (start < end) { |
999 | * regions in practice */ | 1029 | int slot = start + (end - start) / 2; |
1000 | for (i = 0; i < mem->nregions; ++i) { | 1030 | reg = mem->regions + slot; |
1001 | reg = mem->regions + i; | 1031 | if (addr >= reg->guest_phys_addr) |
1002 | if (reg->guest_phys_addr <= addr && | 1032 | end = slot; |
1003 | reg->guest_phys_addr + reg->memory_size - 1 >= addr) | 1033 | else |
1004 | return reg; | 1034 | start = slot + 1; |
1005 | } | 1035 | } |
1036 | |||
1037 | reg = mem->regions + start; | ||
1038 | if (addr >= reg->guest_phys_addr && | ||
1039 | reg->guest_phys_addr + reg->memory_size > addr) | ||
1040 | return reg; | ||
1006 | return NULL; | 1041 | return NULL; |
1007 | } | 1042 | } |
1008 | 1043 | ||
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index f0520bcf2094..518c6294bf6c 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -702,6 +702,7 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page, | |||
702 | else | 702 | else |
703 | wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); | 703 | wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); |
704 | } | 704 | } |
705 | EXPORT_SYMBOL_GPL(wbc_account_io); | ||
705 | 706 | ||
706 | /** | 707 | /** |
707 | * inode_congested - test whether an inode is congested | 708 | * inode_congested - test whether an inode is congested |
diff --git a/fs/namespace.c b/fs/namespace.c index c7cb8a526c05..2b8aa15fd6df 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1361,6 +1361,36 @@ enum umount_tree_flags { | |||
1361 | UMOUNT_PROPAGATE = 2, | 1361 | UMOUNT_PROPAGATE = 2, |
1362 | UMOUNT_CONNECTED = 4, | 1362 | UMOUNT_CONNECTED = 4, |
1363 | }; | 1363 | }; |
1364 | |||
1365 | static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how) | ||
1366 | { | ||
1367 | /* Leaving mounts connected is only valid for lazy umounts */ | ||
1368 | if (how & UMOUNT_SYNC) | ||
1369 | return true; | ||
1370 | |||
1371 | /* A mount without a parent has nothing to be connected to */ | ||
1372 | if (!mnt_has_parent(mnt)) | ||
1373 | return true; | ||
1374 | |||
1375 | /* Because the reference counting rules change when mounts are | ||
1376 | * unmounted and connected, umounted mounts may not be | ||
1377 | * connected to mounted mounts. | ||
1378 | */ | ||
1379 | if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) | ||
1380 | return true; | ||
1381 | |||
1382 | /* Has it been requested that the mount remain connected? */ | ||
1383 | if (how & UMOUNT_CONNECTED) | ||
1384 | return false; | ||
1385 | |||
1386 | /* Is the mount locked such that it needs to remain connected? */ | ||
1387 | if (IS_MNT_LOCKED(mnt)) | ||
1388 | return false; | ||
1389 | |||
1390 | /* By default disconnect the mount */ | ||
1391 | return true; | ||
1392 | } | ||
1393 | |||
1364 | /* | 1394 | /* |
1365 | * mount_lock must be held | 1395 | * mount_lock must be held |
1366 | * namespace_sem must be held for write | 1396 | * namespace_sem must be held for write |
@@ -1398,10 +1428,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how) | |||
1398 | if (how & UMOUNT_SYNC) | 1428 | if (how & UMOUNT_SYNC) |
1399 | p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; | 1429 | p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; |
1400 | 1430 | ||
1401 | disconnect = !(((how & UMOUNT_CONNECTED) && | 1431 | disconnect = disconnect_mount(p, how); |
1402 | mnt_has_parent(p) && | ||
1403 | (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) || | ||
1404 | IS_MNT_LOCKED_AND_LAZY(p)); | ||
1405 | 1432 | ||
1406 | pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, | 1433 | pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, |
1407 | disconnect ? &unmounted : NULL); | 1434 | disconnect ? &unmounted : NULL); |
@@ -1538,11 +1565,8 @@ void __detach_mounts(struct dentry *dentry) | |||
1538 | while (!hlist_empty(&mp->m_list)) { | 1565 | while (!hlist_empty(&mp->m_list)) { |
1539 | mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); | 1566 | mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); |
1540 | if (mnt->mnt.mnt_flags & MNT_UMOUNT) { | 1567 | if (mnt->mnt.mnt_flags & MNT_UMOUNT) { |
1541 | struct mount *p, *tmp; | 1568 | hlist_add_head(&mnt->mnt_umount.s_list, &unmounted); |
1542 | list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { | 1569 | umount_mnt(mnt); |
1543 | hlist_add_head(&p->mnt_umount.s_list, &unmounted); | ||
1544 | umount_mnt(p); | ||
1545 | } | ||
1546 | } | 1570 | } |
1547 | else umount_tree(mnt, UMOUNT_CONNECTED); | 1571 | else umount_tree(mnt, UMOUNT_CONNECTED); |
1548 | } | 1572 | } |
diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 3e594ce41010..92e48c70f0f0 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c | |||
@@ -152,15 +152,31 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, | |||
152 | BUG(); | 152 | BUG(); |
153 | 153 | ||
154 | list_del_init(&mark->g_list); | 154 | list_del_init(&mark->g_list); |
155 | |||
155 | spin_unlock(&mark->lock); | 156 | spin_unlock(&mark->lock); |
156 | 157 | ||
157 | if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) | 158 | if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) |
158 | iput(inode); | 159 | iput(inode); |
160 | /* release lock temporarily */ | ||
161 | mutex_unlock(&group->mark_mutex); | ||
159 | 162 | ||
160 | spin_lock(&destroy_lock); | 163 | spin_lock(&destroy_lock); |
161 | list_add(&mark->g_list, &destroy_list); | 164 | list_add(&mark->g_list, &destroy_list); |
162 | spin_unlock(&destroy_lock); | 165 | spin_unlock(&destroy_lock); |
163 | wake_up(&destroy_waitq); | 166 | wake_up(&destroy_waitq); |
167 | /* | ||
168 | * We don't necessarily have a ref on mark from caller so the above destroy | ||
169 | * may have actually freed it, unless this group provides a 'freeing_mark' | ||
170 | * function which must be holding a reference. | ||
171 | */ | ||
172 | |||
173 | /* | ||
174 | * Some groups like to know that marks are being freed. This is a | ||
175 | * callback to the group function to let it know that this mark | ||
176 | * is being freed. | ||
177 | */ | ||
178 | if (group->ops->freeing_mark) | ||
179 | group->ops->freeing_mark(mark, group); | ||
164 | 180 | ||
165 | /* | 181 | /* |
166 | * __fsnotify_update_child_dentry_flags(inode); | 182 | * __fsnotify_update_child_dentry_flags(inode); |
@@ -175,6 +191,8 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, | |||
175 | */ | 191 | */ |
176 | 192 | ||
177 | atomic_dec(&group->num_marks); | 193 | atomic_dec(&group->num_marks); |
194 | |||
195 | mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); | ||
178 | } | 196 | } |
179 | 197 | ||
180 | void fsnotify_destroy_mark(struct fsnotify_mark *mark, | 198 | void fsnotify_destroy_mark(struct fsnotify_mark *mark, |
@@ -187,10 +205,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark, | |||
187 | 205 | ||
188 | /* | 206 | /* |
189 | * Destroy all marks in the given list. The marks must be already detached from | 207 | * Destroy all marks in the given list. The marks must be already detached from |
190 | * the original inode / vfsmount. Note that we can race with | 208 | * the original inode / vfsmount. |
191 | * fsnotify_clear_marks_by_group_flags(). However we hold a reference to each | ||
192 | * mark so they won't get freed from under us and nobody else touches our | ||
193 | * free_list list_head. | ||
194 | */ | 209 | */ |
195 | void fsnotify_destroy_marks(struct list_head *to_free) | 210 | void fsnotify_destroy_marks(struct list_head *to_free) |
196 | { | 211 | { |
@@ -391,7 +406,7 @@ struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head, | |||
391 | } | 406 | } |
392 | 407 | ||
393 | /* | 408 | /* |
394 | * Clear any marks in a group in which mark->flags & flags is true. | 409 | * clear any marks in a group in which mark->flags & flags is true |
395 | */ | 410 | */ |
396 | void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, | 411 | void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, |
397 | unsigned int flags) | 412 | unsigned int flags) |
@@ -445,7 +460,6 @@ static int fsnotify_mark_destroy(void *ignored) | |||
445 | { | 460 | { |
446 | struct fsnotify_mark *mark, *next; | 461 | struct fsnotify_mark *mark, *next; |
447 | struct list_head private_destroy_list; | 462 | struct list_head private_destroy_list; |
448 | struct fsnotify_group *group; | ||
449 | 463 | ||
450 | for (;;) { | 464 | for (;;) { |
451 | spin_lock(&destroy_lock); | 465 | spin_lock(&destroy_lock); |
@@ -457,14 +471,6 @@ static int fsnotify_mark_destroy(void *ignored) | |||
457 | 471 | ||
458 | list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) { | 472 | list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) { |
459 | list_del_init(&mark->g_list); | 473 | list_del_init(&mark->g_list); |
460 | group = mark->group; | ||
461 | /* | ||
462 | * Some groups like to know that marks are being freed. | ||
463 | * This is a callback to the group function to let it | ||
464 | * know that this mark is being freed. | ||
465 | */ | ||
466 | if (group && group->ops->freeing_mark) | ||
467 | group->ops->freeing_mark(mark, group); | ||
468 | fsnotify_put_mark(mark); | 474 | fsnotify_put_mark(mark); |
469 | } | 475 | } |
470 | 476 | ||
diff --git a/fs/pnode.h b/fs/pnode.h index 7114ce6e6b9e..0fcdbe7ca648 100644 --- a/fs/pnode.h +++ b/fs/pnode.h | |||
@@ -20,8 +20,6 @@ | |||
20 | #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) | 20 | #define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) |
21 | #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) | 21 | #define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) |
22 | #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED) | 22 | #define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED) |
23 | #define IS_MNT_LOCKED_AND_LAZY(m) \ | ||
24 | (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED) | ||
25 | 23 | ||
26 | #define CL_EXPIRE 0x01 | 24 | #define CL_EXPIRE 0x01 |
27 | #define CL_SLAVE 0x02 | 25 | #define CL_SLAVE 0x02 |
diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 6afac3d561ac..8d0b3ade0ff0 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c | |||
@@ -1652,17 +1652,9 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1652 | iinfo->i_ext.i_data, inode->i_sb->s_blocksize - | 1652 | iinfo->i_ext.i_data, inode->i_sb->s_blocksize - |
1653 | sizeof(struct unallocSpaceEntry)); | 1653 | sizeof(struct unallocSpaceEntry)); |
1654 | use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); | 1654 | use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); |
1655 | use->descTag.tagLocation = | 1655 | crclen = sizeof(struct unallocSpaceEntry); |
1656 | cpu_to_le32(iinfo->i_location.logicalBlockNum); | ||
1657 | crclen = sizeof(struct unallocSpaceEntry) + | ||
1658 | iinfo->i_lenAlloc - sizeof(struct tag); | ||
1659 | use->descTag.descCRCLength = cpu_to_le16(crclen); | ||
1660 | use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use + | ||
1661 | sizeof(struct tag), | ||
1662 | crclen)); | ||
1663 | use->descTag.tagChecksum = udf_tag_checksum(&use->descTag); | ||
1664 | 1656 | ||
1665 | goto out; | 1657 | goto finish; |
1666 | } | 1658 | } |
1667 | 1659 | ||
1668 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) | 1660 | if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) |
@@ -1782,6 +1774,8 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1782 | efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); | 1774 | efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); |
1783 | crclen = sizeof(struct extendedFileEntry); | 1775 | crclen = sizeof(struct extendedFileEntry); |
1784 | } | 1776 | } |
1777 | |||
1778 | finish: | ||
1785 | if (iinfo->i_strat4096) { | 1779 | if (iinfo->i_strat4096) { |
1786 | fe->icbTag.strategyType = cpu_to_le16(4096); | 1780 | fe->icbTag.strategyType = cpu_to_le16(4096); |
1787 | fe->icbTag.strategyParameter = cpu_to_le16(1); | 1781 | fe->icbTag.strategyParameter = cpu_to_le16(1); |
@@ -1791,7 +1785,9 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1791 | fe->icbTag.numEntries = cpu_to_le16(1); | 1785 | fe->icbTag.numEntries = cpu_to_le16(1); |
1792 | } | 1786 | } |
1793 | 1787 | ||
1794 | if (S_ISDIR(inode->i_mode)) | 1788 | if (iinfo->i_use) |
1789 | fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE; | ||
1790 | else if (S_ISDIR(inode->i_mode)) | ||
1795 | fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; | 1791 | fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; |
1796 | else if (S_ISREG(inode->i_mode)) | 1792 | else if (S_ISREG(inode->i_mode)) |
1797 | fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; | 1793 | fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; |
@@ -1828,7 +1824,6 @@ static int udf_update_inode(struct inode *inode, int do_sync) | |||
1828 | crclen)); | 1824 | crclen)); |
1829 | fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); | 1825 | fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); |
1830 | 1826 | ||
1831 | out: | ||
1832 | set_buffer_uptodate(bh); | 1827 | set_buffer_uptodate(bh); |
1833 | unlock_buffer(bh); | 1828 | unlock_buffer(bh); |
1834 | 1829 | ||
diff --git a/include/linux/ata.h b/include/linux/ata.h index fed36418dd1c..6c78956aa470 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -45,6 +45,7 @@ enum { | |||
45 | ATA_SECT_SIZE = 512, | 45 | ATA_SECT_SIZE = 512, |
46 | ATA_MAX_SECTORS_128 = 128, | 46 | ATA_MAX_SECTORS_128 = 128, |
47 | ATA_MAX_SECTORS = 256, | 47 | ATA_MAX_SECTORS = 256, |
48 | ATA_MAX_SECTORS_1024 = 1024, | ||
48 | ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ | 49 | ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ |
49 | ATA_MAX_SECTORS_TAPE = 65535, | 50 | ATA_MAX_SECTORS_TAPE = 65535, |
50 | 51 | ||
diff --git a/include/linux/cper.h b/include/linux/cper.h index 76abba4b238e..dcacb1a72e26 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h | |||
@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx { | |||
340 | __u64 mm_reg_addr; | 340 | __u64 mm_reg_addr; |
341 | }; | 341 | }; |
342 | 342 | ||
343 | /* Memory Error Section */ | 343 | /* Old Memory Error Section UEFI 2.1, 2.2 */ |
344 | struct cper_sec_mem_err_old { | ||
345 | __u64 validation_bits; | ||
346 | __u64 error_status; | ||
347 | __u64 physical_addr; | ||
348 | __u64 physical_addr_mask; | ||
349 | __u16 node; | ||
350 | __u16 card; | ||
351 | __u16 module; | ||
352 | __u16 bank; | ||
353 | __u16 device; | ||
354 | __u16 row; | ||
355 | __u16 column; | ||
356 | __u16 bit_pos; | ||
357 | __u64 requestor_id; | ||
358 | __u64 responder_id; | ||
359 | __u64 target_id; | ||
360 | __u8 error_type; | ||
361 | }; | ||
362 | |||
363 | /* Memory Error Section UEFI >= 2.3 */ | ||
344 | struct cper_sec_mem_err { | 364 | struct cper_sec_mem_err { |
345 | __u64 validation_bits; | 365 | __u64 validation_bits; |
346 | __u64 error_status; | 366 | __u64 error_status; |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 1da602982cf9..6cd8c0ee4b6f 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); | |||
116 | * SAVE_REGS. If another ops with this flag set is already registered | 116 | * SAVE_REGS. If another ops with this flag set is already registered |
117 | * for any of the functions that this ops will be registered for, then | 117 | * for any of the functions that this ops will be registered for, then |
118 | * this ops will fail to register or set_filter_ip. | 118 | * this ops will fail to register or set_filter_ip. |
119 | * PID - Is affected by set_ftrace_pid (allows filtering on those pids) | ||
119 | */ | 120 | */ |
120 | enum { | 121 | enum { |
121 | FTRACE_OPS_FL_ENABLED = 1 << 0, | 122 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
@@ -132,6 +133,7 @@ enum { | |||
132 | FTRACE_OPS_FL_MODIFYING = 1 << 11, | 133 | FTRACE_OPS_FL_MODIFYING = 1 << 11, |
133 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, | 134 | FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, |
134 | FTRACE_OPS_FL_IPMODIFY = 1 << 13, | 135 | FTRACE_OPS_FL_IPMODIFY = 1 << 13, |
136 | FTRACE_OPS_FL_PID = 1 << 14, | ||
135 | }; | 137 | }; |
136 | 138 | ||
137 | #ifdef CONFIG_DYNAMIC_FTRACE | 139 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -159,6 +161,7 @@ struct ftrace_ops { | |||
159 | struct ftrace_ops *next; | 161 | struct ftrace_ops *next; |
160 | unsigned long flags; | 162 | unsigned long flags; |
161 | void *private; | 163 | void *private; |
164 | ftrace_func_t saved_func; | ||
162 | int __percpu *disabled; | 165 | int __percpu *disabled; |
163 | #ifdef CONFIG_DYNAMIC_FTRACE | 166 | #ifdef CONFIG_DYNAMIC_FTRACE |
164 | int nr_trampolines; | 167 | int nr_trampolines; |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 36ce37bcc963..c9cfbcdb8d14 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -431,6 +431,8 @@ enum { | |||
431 | ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ | 431 | ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ |
432 | ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ | 432 | ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ |
433 | ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ | 433 | ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ |
434 | ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ | ||
435 | ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ | ||
434 | 436 | ||
435 | /* DMA mask for user DMA control: User visible values; DO NOT | 437 | /* DMA mask for user DMA control: User visible values; DO NOT |
436 | renumber */ | 438 | renumber */ |
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index f25e2bdd188c..272f42952f34 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h | |||
@@ -178,17 +178,17 @@ typedef enum { | |||
178 | /* Chip may not exist, so silence any errors in scan */ | 178 | /* Chip may not exist, so silence any errors in scan */ |
179 | #define NAND_SCAN_SILENT_NODEV 0x00040000 | 179 | #define NAND_SCAN_SILENT_NODEV 0x00040000 |
180 | /* | 180 | /* |
181 | * This option could be defined by controller drivers to protect against | ||
182 | * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers | ||
183 | */ | ||
184 | #define NAND_USE_BOUNCE_BUFFER 0x00080000 | ||
185 | /* | ||
186 | * Autodetect nand buswidth with readid/onfi. | 181 | * Autodetect nand buswidth with readid/onfi. |
187 | * This suppose the driver will configure the hardware in 8 bits mode | 182 | * This suppose the driver will configure the hardware in 8 bits mode |
188 | * when calling nand_scan_ident, and update its configuration | 183 | * when calling nand_scan_ident, and update its configuration |
189 | * before calling nand_scan_tail. | 184 | * before calling nand_scan_tail. |
190 | */ | 185 | */ |
191 | #define NAND_BUSWIDTH_AUTO 0x00080000 | 186 | #define NAND_BUSWIDTH_AUTO 0x00080000 |
187 | /* | ||
188 | * This option could be defined by controller drivers to protect against | ||
189 | * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers | ||
190 | */ | ||
191 | #define NAND_USE_BOUNCE_BUFFER 0x00100000 | ||
192 | 192 | ||
193 | /* Options set by nand scan */ | 193 | /* Options set by nand scan */ |
194 | /* Nand scan has allocated controller struct */ | 194 | /* Nand scan has allocated controller struct */ |
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h index 75f70f6ac137..e1571efa3f2b 100644 --- a/include/linux/platform_data/mmc-esdhc-imx.h +++ b/include/linux/platform_data/mmc-esdhc-imx.h | |||
@@ -43,7 +43,6 @@ struct esdhc_platform_data { | |||
43 | enum wp_types wp_type; | 43 | enum wp_types wp_type; |
44 | enum cd_types cd_type; | 44 | enum cd_types cd_type; |
45 | int max_bus_width; | 45 | int max_bus_width; |
46 | unsigned int f_max; | ||
47 | bool support_vsel; | 46 | bool support_vsel; |
48 | unsigned int delay_line; | 47 | unsigned int delay_line; |
49 | }; | 48 | }; |
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index a741678f24a2..883fe1e7c5a1 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
@@ -4868,6 +4868,23 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy, | |||
4868 | struct cfg80211_chan_def *chandef, | 4868 | struct cfg80211_chan_def *chandef, |
4869 | enum nl80211_iftype iftype); | 4869 | enum nl80211_iftype iftype); |
4870 | 4870 | ||
4871 | /** | ||
4872 | * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation | ||
4873 | * @wiphy: the wiphy | ||
4874 | * @chandef: the channel definition | ||
4875 | * @iftype: interface type | ||
4876 | * | ||
4877 | * Return: %true if there is no secondary channel or the secondary channel(s) | ||
4878 | * can be used for beaconing (i.e. is not a radar channel etc.). This version | ||
4879 | * also checks if IR-relaxation conditions apply, to allow beaconing under | ||
4880 | * more permissive conditions. | ||
4881 | * | ||
4882 | * Requires the RTNL to be held. | ||
4883 | */ | ||
4884 | bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, | ||
4885 | struct cfg80211_chan_def *chandef, | ||
4886 | enum nl80211_iftype iftype); | ||
4887 | |||
4871 | /* | 4888 | /* |
4872 | * cfg80211_ch_switch_notify - update wdev channel and notify userspace | 4889 | * cfg80211_ch_switch_notify - update wdev channel and notify userspace |
4873 | * @dev: the device which switched channels | 4890 | * @dev: the device which switched channels |
diff --git a/include/net/ip.h b/include/net/ip.h index 0750a186ea63..d5fe9f2ab699 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
@@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk) | |||
161 | } | 161 | } |
162 | 162 | ||
163 | /* datagram.c */ | 163 | /* datagram.c */ |
164 | int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); | ||
164 | int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); | 165 | int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); |
165 | 166 | ||
166 | void ip4_datagram_release_cb(struct sock *sk); | 167 | void ip4_datagram_release_cb(struct sock *sk); |
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index b6fce900a833..d708a53b8fb1 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h | |||
@@ -614,6 +614,8 @@ struct drm_amdgpu_info_device { | |||
614 | uint32_t vram_type; | 614 | uint32_t vram_type; |
615 | /** video memory bit width*/ | 615 | /** video memory bit width*/ |
616 | uint32_t vram_bit_width; | 616 | uint32_t vram_bit_width; |
617 | /* vce harvesting instance */ | ||
618 | uint32_t vce_harvest_config; | ||
617 | }; | 619 | }; |
618 | 620 | ||
619 | struct drm_amdgpu_info_hw_ip { | 621 | struct drm_amdgpu_info_hw_ip { |
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h index 6e1a2ed116cb..db809b722985 100644 --- a/include/uapi/drm/i915_drm.h +++ b/include/uapi/drm/i915_drm.h | |||
@@ -1070,6 +1070,14 @@ struct drm_i915_reg_read { | |||
1070 | __u64 offset; | 1070 | __u64 offset; |
1071 | __u64 val; /* Return value */ | 1071 | __u64 val; /* Return value */ |
1072 | }; | 1072 | }; |
1073 | /* Known registers: | ||
1074 | * | ||
1075 | * Render engine timestamp - 0x2358 + 64bit - gen7+ | ||
1076 | * - Note this register returns an invalid value if using the default | ||
1077 | * single instruction 8byte read, in order to workaround that use | ||
1078 | * offset (0x2538 | 1) instead. | ||
1079 | * | ||
1080 | */ | ||
1073 | 1081 | ||
1074 | struct drm_i915_reset_stats { | 1082 | struct drm_i915_reset_stats { |
1075 | __u32 ctx_id; | 1083 | __u32 ctx_id; |
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index 7bbee79ca293..ec32293a00db 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h | |||
@@ -34,6 +34,7 @@ | |||
34 | /* The feature bitmap for virtio net */ | 34 | /* The feature bitmap for virtio net */ |
35 | #define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ | 35 | #define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ |
36 | #define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ | 36 | #define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ |
37 | #define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */ | ||
37 | #define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ | 38 | #define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ |
38 | #define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ | 39 | #define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ |
39 | #define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ | 40 | #define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ |
@@ -226,4 +227,19 @@ struct virtio_net_ctrl_mq { | |||
226 | #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 | 227 | #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 |
227 | #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 | 228 | #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 |
228 | 229 | ||
230 | /* | ||
231 | * Control network offloads | ||
232 | * | ||
233 | * Reconfigures the network offloads that Guest can handle. | ||
234 | * | ||
235 | * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit. | ||
236 | * | ||
237 | * Command data format matches the feature bit mask exactly. | ||
238 | * | ||
239 | * See VIRTIO_NET_F_GUEST_* for the list of offloads | ||
240 | * that can be enabled/disabled. | ||
241 | */ | ||
242 | #define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5 | ||
243 | #define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0 | ||
244 | |||
229 | #endif /* _LINUX_VIRTIO_NET_H */ | 245 | #endif /* _LINUX_VIRTIO_NET_H */ |
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 75301468359f..90007a1abcab 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h | |||
@@ -157,6 +157,12 @@ struct virtio_pci_common_cfg { | |||
157 | __le32 queue_used_hi; /* read-write */ | 157 | __le32 queue_used_hi; /* read-write */ |
158 | }; | 158 | }; |
159 | 159 | ||
160 | /* Fields in VIRTIO_PCI_CAP_PCI_CFG: */ | ||
161 | struct virtio_pci_cfg_cap { | ||
162 | struct virtio_pci_cap cap; | ||
163 | __u8 pci_cfg_data[4]; /* Data for BAR access. */ | ||
164 | }; | ||
165 | |||
160 | /* Macro versions of offsets for the Old Timers! */ | 166 | /* Macro versions of offsets for the Old Timers! */ |
161 | #define VIRTIO_PCI_CAP_VNDR 0 | 167 | #define VIRTIO_PCI_CAP_VNDR 0 |
162 | #define VIRTIO_PCI_CAP_NEXT 1 | 168 | #define VIRTIO_PCI_CAP_NEXT 1 |
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h index 915980ac68df..c07295969b7e 100644 --- a/include/uapi/linux/virtio_ring.h +++ b/include/uapi/linux/virtio_ring.h | |||
@@ -31,6 +31,9 @@ | |||
31 | * SUCH DAMAGE. | 31 | * SUCH DAMAGE. |
32 | * | 32 | * |
33 | * Copyright Rusty Russell IBM Corporation 2007. */ | 33 | * Copyright Rusty Russell IBM Corporation 2007. */ |
34 | #ifndef __KERNEL__ | ||
35 | #include <stdint.h> | ||
36 | #endif | ||
34 | #include <linux/types.h> | 37 | #include <linux/types.h> |
35 | #include <linux/virtio_types.h> | 38 | #include <linux/virtio_types.h> |
36 | 39 | ||
@@ -143,7 +146,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p, | |||
143 | vr->num = num; | 146 | vr->num = num; |
144 | vr->desc = p; | 147 | vr->desc = p; |
145 | vr->avail = p + num*sizeof(struct vring_desc); | 148 | vr->avail = p + num*sizeof(struct vring_desc); |
146 | vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16) | 149 | vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16) |
147 | + align-1) & ~(align - 1)); | 150 | + align-1) & ~(align - 1)); |
148 | } | 151 | } |
149 | 152 | ||
diff --git a/kernel/resource.c b/kernel/resource.c index 90552aab5f2d..fed052a1bc9f 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size) | |||
504 | { | 504 | { |
505 | struct resource *p; | 505 | struct resource *p; |
506 | resource_size_t end = start + size - 1; | 506 | resource_size_t end = start + size - 1; |
507 | int flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 507 | unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
508 | const char *name = "System RAM"; | 508 | const char *name = "System RAM"; |
509 | int ret = -1; | 509 | int ret = -1; |
510 | 510 | ||
511 | read_lock(&resource_lock); | 511 | read_lock(&resource_lock); |
512 | for (p = iomem_resource.child; p ; p = p->sibling) { | 512 | for (p = iomem_resource.child; p ; p = p->sibling) { |
513 | if (end < p->start) | 513 | if (p->end < start) |
514 | continue; | 514 | continue; |
515 | 515 | ||
516 | if (p->start <= start && end <= p->end) { | 516 | if (p->start <= start && end <= p->end) { |
@@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size) | |||
521 | ret = 1; | 521 | ret = 1; |
522 | break; | 522 | break; |
523 | } | 523 | } |
524 | if (p->end < start) | 524 | if (end < p->start) |
525 | break; /* not found */ | 525 | break; /* not found */ |
526 | } | 526 | } |
527 | read_unlock(&resource_lock); | 527 | read_unlock(&resource_lock); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 02bece4a99ea..eb11011b5292 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -98,6 +98,13 @@ struct ftrace_pid { | |||
98 | struct pid *pid; | 98 | struct pid *pid; |
99 | }; | 99 | }; |
100 | 100 | ||
101 | static bool ftrace_pids_enabled(void) | ||
102 | { | ||
103 | return !list_empty(&ftrace_pids); | ||
104 | } | ||
105 | |||
106 | static void ftrace_update_trampoline(struct ftrace_ops *ops); | ||
107 | |||
101 | /* | 108 | /* |
102 | * ftrace_disabled is set when an anomaly is discovered. | 109 | * ftrace_disabled is set when an anomaly is discovered. |
103 | * ftrace_disabled is much stronger than ftrace_enabled. | 110 | * ftrace_disabled is much stronger than ftrace_enabled. |
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock); | |||
109 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; | 116 | static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; |
110 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; | 117 | static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; |
111 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; | 118 | ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; |
112 | ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | ||
113 | static struct ftrace_ops global_ops; | 119 | static struct ftrace_ops global_ops; |
114 | static struct ftrace_ops control_ops; | 120 | static struct ftrace_ops control_ops; |
115 | 121 | ||
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, | |||
183 | if (!test_tsk_trace_trace(current)) | 189 | if (!test_tsk_trace_trace(current)) |
184 | return; | 190 | return; |
185 | 191 | ||
186 | ftrace_pid_function(ip, parent_ip, op, regs); | 192 | op->saved_func(ip, parent_ip, op, regs); |
187 | } | ||
188 | |||
189 | static void set_ftrace_pid_function(ftrace_func_t func) | ||
190 | { | ||
191 | /* do not set ftrace_pid_function to itself! */ | ||
192 | if (func != ftrace_pid_func) | ||
193 | ftrace_pid_function = func; | ||
194 | } | 193 | } |
195 | 194 | ||
196 | /** | 195 | /** |
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func) | |||
202 | void clear_ftrace_function(void) | 201 | void clear_ftrace_function(void) |
203 | { | 202 | { |
204 | ftrace_trace_function = ftrace_stub; | 203 | ftrace_trace_function = ftrace_stub; |
205 | ftrace_pid_function = ftrace_stub; | ||
206 | } | 204 | } |
207 | 205 | ||
208 | static void control_ops_disable_all(struct ftrace_ops *ops) | 206 | static void control_ops_disable_all(struct ftrace_ops *ops) |
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
436 | } else | 434 | } else |
437 | add_ftrace_ops(&ftrace_ops_list, ops); | 435 | add_ftrace_ops(&ftrace_ops_list, ops); |
438 | 436 | ||
437 | /* Always save the function, and reset at unregistering */ | ||
438 | ops->saved_func = ops->func; | ||
439 | |||
440 | if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled()) | ||
441 | ops->func = ftrace_pid_func; | ||
442 | |||
439 | ftrace_update_trampoline(ops); | 443 | ftrace_update_trampoline(ops); |
440 | 444 | ||
441 | if (ftrace_enabled) | 445 | if (ftrace_enabled) |
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
463 | if (ftrace_enabled) | 467 | if (ftrace_enabled) |
464 | update_ftrace_function(); | 468 | update_ftrace_function(); |
465 | 469 | ||
470 | ops->func = ops->saved_func; | ||
471 | |||
466 | return 0; | 472 | return 0; |
467 | } | 473 | } |
468 | 474 | ||
469 | static void ftrace_update_pid_func(void) | 475 | static void ftrace_update_pid_func(void) |
470 | { | 476 | { |
477 | bool enabled = ftrace_pids_enabled(); | ||
478 | struct ftrace_ops *op; | ||
479 | |||
471 | /* Only do something if we are tracing something */ | 480 | /* Only do something if we are tracing something */ |
472 | if (ftrace_trace_function == ftrace_stub) | 481 | if (ftrace_trace_function == ftrace_stub) |
473 | return; | 482 | return; |
474 | 483 | ||
484 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
485 | if (op->flags & FTRACE_OPS_FL_PID) { | ||
486 | op->func = enabled ? ftrace_pid_func : | ||
487 | op->saved_func; | ||
488 | ftrace_update_trampoline(op); | ||
489 | } | ||
490 | } while_for_each_ftrace_op(op); | ||
491 | |||
475 | update_ftrace_function(); | 492 | update_ftrace_function(); |
476 | } | 493 | } |
477 | 494 | ||
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = { | |||
1133 | .local_hash.filter_hash = EMPTY_HASH, | 1150 | .local_hash.filter_hash = EMPTY_HASH, |
1134 | INIT_OPS_HASH(global_ops) | 1151 | INIT_OPS_HASH(global_ops) |
1135 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | 1152 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
1136 | FTRACE_OPS_FL_INITIALIZED, | 1153 | FTRACE_OPS_FL_INITIALIZED | |
1154 | FTRACE_OPS_FL_PID, | ||
1137 | }; | 1155 | }; |
1138 | 1156 | ||
1139 | /* | 1157 | /* |
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops) | |||
5023 | 5041 | ||
5024 | static struct ftrace_ops global_ops = { | 5042 | static struct ftrace_ops global_ops = { |
5025 | .func = ftrace_stub, | 5043 | .func = ftrace_stub, |
5026 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, | 5044 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
5045 | FTRACE_OPS_FL_INITIALIZED | | ||
5046 | FTRACE_OPS_FL_PID, | ||
5027 | }; | 5047 | }; |
5028 | 5048 | ||
5029 | static int __init ftrace_nodyn_init(void) | 5049 | static int __init ftrace_nodyn_init(void) |
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func) | |||
5080 | if (WARN_ON(tr->ops->func != ftrace_stub)) | 5100 | if (WARN_ON(tr->ops->func != ftrace_stub)) |
5081 | printk("ftrace ops had %pS for function\n", | 5101 | printk("ftrace ops had %pS for function\n", |
5082 | tr->ops->func); | 5102 | tr->ops->func); |
5083 | /* Only the top level instance does pid tracing */ | ||
5084 | if (!list_empty(&ftrace_pids)) { | ||
5085 | set_ftrace_pid_function(func); | ||
5086 | func = ftrace_pid_func; | ||
5087 | } | ||
5088 | } | 5103 | } |
5089 | tr->ops->func = func; | 5104 | tr->ops->func = func; |
5090 | tr->ops->private = tr; | 5105 | tr->ops->private = tr; |
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos) | |||
5371 | { | 5386 | { |
5372 | mutex_lock(&ftrace_lock); | 5387 | mutex_lock(&ftrace_lock); |
5373 | 5388 | ||
5374 | if (list_empty(&ftrace_pids) && (!*pos)) | 5389 | if (!ftrace_pids_enabled() && (!*pos)) |
5375 | return (void *) 1; | 5390 | return (void *) 1; |
5376 | 5391 | ||
5377 | return seq_list_start(&ftrace_pids, *pos); | 5392 | return seq_list_start(&ftrace_pids, *pos); |
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = { | |||
5610 | .func = ftrace_stub, | 5625 | .func = ftrace_stub, |
5611 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | | 5626 | .flags = FTRACE_OPS_FL_RECURSION_SAFE | |
5612 | FTRACE_OPS_FL_INITIALIZED | | 5627 | FTRACE_OPS_FL_INITIALIZED | |
5628 | FTRACE_OPS_FL_PID | | ||
5613 | FTRACE_OPS_FL_STUB, | 5629 | FTRACE_OPS_FL_STUB, |
5614 | #ifdef FTRACE_GRAPH_TRAMP_ADDR | 5630 | #ifdef FTRACE_GRAPH_TRAMP_ADDR |
5615 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, | 5631 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, |
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index 9dd49ca67dbc..6e70ddb158b4 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c | |||
@@ -704,6 +704,7 @@ static void p9_virtio_remove(struct virtio_device *vdev) | |||
704 | 704 | ||
705 | mutex_unlock(&virtio_9p_lock); | 705 | mutex_unlock(&virtio_9p_lock); |
706 | 706 | ||
707 | vdev->config->reset(vdev); | ||
707 | vdev->config->del_vqs(vdev); | 708 | vdev->config->del_vqs(vdev); |
708 | 709 | ||
709 | sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); | 710 | sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); |
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c index 1997538a5d23..3b78e8473a01 100644 --- a/net/ax25/ax25_subr.c +++ b/net/ax25/ax25_subr.c | |||
@@ -264,6 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason) | |||
264 | { | 264 | { |
265 | ax25_clear_queues(ax25); | 265 | ax25_clear_queues(ax25); |
266 | 266 | ||
267 | ax25_stop_heartbeat(ax25); | ||
267 | ax25_stop_t1timer(ax25); | 268 | ax25_stop_t1timer(ax25); |
268 | ax25_stop_t2timer(ax25); | 269 | ax25_stop_t2timer(ax25); |
269 | ax25_stop_t3timer(ax25); | 270 | ax25_stop_t3timer(ax25); |
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index c11cf2611db0..1198a3dbad95 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c | |||
@@ -351,7 +351,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port, | |||
351 | if (state == MDB_TEMPORARY) | 351 | if (state == MDB_TEMPORARY) |
352 | mod_timer(&p->timer, now + br->multicast_membership_interval); | 352 | mod_timer(&p->timer, now + br->multicast_membership_interval); |
353 | 353 | ||
354 | br_mdb_notify(br->dev, port, group, RTM_NEWMDB); | ||
355 | return 0; | 354 | return 0; |
356 | } | 355 | } |
357 | 356 | ||
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 742a6c27d7a2..79db489cdade 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -39,6 +39,16 @@ static void br_multicast_start_querier(struct net_bridge *br, | |||
39 | struct bridge_mcast_own_query *query); | 39 | struct bridge_mcast_own_query *query); |
40 | static void br_multicast_add_router(struct net_bridge *br, | 40 | static void br_multicast_add_router(struct net_bridge *br, |
41 | struct net_bridge_port *port); | 41 | struct net_bridge_port *port); |
42 | static void br_ip4_multicast_leave_group(struct net_bridge *br, | ||
43 | struct net_bridge_port *port, | ||
44 | __be32 group, | ||
45 | __u16 vid); | ||
46 | #if IS_ENABLED(CONFIG_IPV6) | ||
47 | static void br_ip6_multicast_leave_group(struct net_bridge *br, | ||
48 | struct net_bridge_port *port, | ||
49 | const struct in6_addr *group, | ||
50 | __u16 vid); | ||
51 | #endif | ||
42 | unsigned int br_mdb_rehash_seq; | 52 | unsigned int br_mdb_rehash_seq; |
43 | 53 | ||
44 | static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) | 54 | static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) |
@@ -1010,9 +1020,15 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br, | |||
1010 | continue; | 1020 | continue; |
1011 | } | 1021 | } |
1012 | 1022 | ||
1013 | err = br_ip4_multicast_add_group(br, port, group, vid); | 1023 | if ((type == IGMPV3_CHANGE_TO_INCLUDE || |
1014 | if (err) | 1024 | type == IGMPV3_MODE_IS_INCLUDE) && |
1015 | break; | 1025 | ntohs(grec->grec_nsrcs) == 0) { |
1026 | br_ip4_multicast_leave_group(br, port, group, vid); | ||
1027 | } else { | ||
1028 | err = br_ip4_multicast_add_group(br, port, group, vid); | ||
1029 | if (err) | ||
1030 | break; | ||
1031 | } | ||
1016 | } | 1032 | } |
1017 | 1033 | ||
1018 | return err; | 1034 | return err; |
@@ -1071,10 +1087,17 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, | |||
1071 | continue; | 1087 | continue; |
1072 | } | 1088 | } |
1073 | 1089 | ||
1074 | err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, | 1090 | if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE || |
1075 | vid); | 1091 | grec->grec_type == MLD2_MODE_IS_INCLUDE) && |
1076 | if (err) | 1092 | ntohs(*nsrcs) == 0) { |
1077 | break; | 1093 | br_ip6_multicast_leave_group(br, port, &grec->grec_mca, |
1094 | vid); | ||
1095 | } else { | ||
1096 | err = br_ip6_multicast_add_group(br, port, | ||
1097 | &grec->grec_mca, vid); | ||
1098 | if (!err) | ||
1099 | break; | ||
1100 | } | ||
1078 | } | 1101 | } |
1079 | 1102 | ||
1080 | return err; | 1103 | return err; |
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 3cc71b9f5517..cc858919108e 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c | |||
@@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode) | |||
121 | * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are | 121 | * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are |
122 | * not dropped, but CAIF is sending flow off instead. | 122 | * not dropped, but CAIF is sending flow off instead. |
123 | */ | 123 | */ |
124 | static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | 124 | static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) |
125 | { | 125 | { |
126 | int err; | 126 | int err; |
127 | unsigned long flags; | 127 | unsigned long flags; |
128 | struct sk_buff_head *list = &sk->sk_receive_queue; | 128 | struct sk_buff_head *list = &sk->sk_receive_queue; |
129 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); | 129 | struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); |
130 | bool queued = false; | ||
130 | 131 | ||
131 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= | 132 | if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= |
132 | (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { | 133 | (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { |
@@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
139 | 140 | ||
140 | err = sk_filter(sk, skb); | 141 | err = sk_filter(sk, skb); |
141 | if (err) | 142 | if (err) |
142 | return err; | 143 | goto out; |
144 | |||
143 | if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { | 145 | if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { |
144 | set_rx_flow_off(cf_sk); | 146 | set_rx_flow_off(cf_sk); |
145 | net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); | 147 | net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); |
@@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
147 | } | 149 | } |
148 | skb->dev = NULL; | 150 | skb->dev = NULL; |
149 | skb_set_owner_r(skb, sk); | 151 | skb_set_owner_r(skb, sk); |
150 | /* Cache the SKB length before we tack it onto the receive | ||
151 | * queue. Once it is added it no longer belongs to us and | ||
152 | * may be freed by other threads of control pulling packets | ||
153 | * from the queue. | ||
154 | */ | ||
155 | spin_lock_irqsave(&list->lock, flags); | 152 | spin_lock_irqsave(&list->lock, flags); |
156 | if (!sock_flag(sk, SOCK_DEAD)) | 153 | queued = !sock_flag(sk, SOCK_DEAD); |
154 | if (queued) | ||
157 | __skb_queue_tail(list, skb); | 155 | __skb_queue_tail(list, skb); |
158 | spin_unlock_irqrestore(&list->lock, flags); | 156 | spin_unlock_irqrestore(&list->lock, flags); |
159 | 157 | out: | |
160 | if (!sock_flag(sk, SOCK_DEAD)) | 158 | if (queued) |
161 | sk->sk_data_ready(sk); | 159 | sk->sk_data_ready(sk); |
162 | else | 160 | else |
163 | kfree_skb(skb); | 161 | kfree_skb(skb); |
164 | return 0; | ||
165 | } | 162 | } |
166 | 163 | ||
167 | /* Packet Receive Callback function called from CAIF Stack */ | 164 | /* Packet Receive Callback function called from CAIF Stack */ |
diff --git a/net/core/datagram.c b/net/core/datagram.c index b80fb91bb3f7..4967262b2707 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c | |||
@@ -131,6 +131,35 @@ out_noerr: | |||
131 | goto out; | 131 | goto out; |
132 | } | 132 | } |
133 | 133 | ||
134 | static int skb_set_peeked(struct sk_buff *skb) | ||
135 | { | ||
136 | struct sk_buff *nskb; | ||
137 | |||
138 | if (skb->peeked) | ||
139 | return 0; | ||
140 | |||
141 | /* We have to unshare an skb before modifying it. */ | ||
142 | if (!skb_shared(skb)) | ||
143 | goto done; | ||
144 | |||
145 | nskb = skb_clone(skb, GFP_ATOMIC); | ||
146 | if (!nskb) | ||
147 | return -ENOMEM; | ||
148 | |||
149 | skb->prev->next = nskb; | ||
150 | skb->next->prev = nskb; | ||
151 | nskb->prev = skb->prev; | ||
152 | nskb->next = skb->next; | ||
153 | |||
154 | consume_skb(skb); | ||
155 | skb = nskb; | ||
156 | |||
157 | done: | ||
158 | skb->peeked = 1; | ||
159 | |||
160 | return 0; | ||
161 | } | ||
162 | |||
134 | /** | 163 | /** |
135 | * __skb_recv_datagram - Receive a datagram skbuff | 164 | * __skb_recv_datagram - Receive a datagram skbuff |
136 | * @sk: socket | 165 | * @sk: socket |
@@ -165,7 +194,9 @@ out_noerr: | |||
165 | struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, | 194 | struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, |
166 | int *peeked, int *off, int *err) | 195 | int *peeked, int *off, int *err) |
167 | { | 196 | { |
197 | struct sk_buff_head *queue = &sk->sk_receive_queue; | ||
168 | struct sk_buff *skb, *last; | 198 | struct sk_buff *skb, *last; |
199 | unsigned long cpu_flags; | ||
169 | long timeo; | 200 | long timeo; |
170 | /* | 201 | /* |
171 | * Caller is allowed not to check sk->sk_err before skb_recv_datagram() | 202 | * Caller is allowed not to check sk->sk_err before skb_recv_datagram() |
@@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, | |||
184 | * Look at current nfs client by the way... | 215 | * Look at current nfs client by the way... |
185 | * However, this function was correct in any case. 8) | 216 | * However, this function was correct in any case. 8) |
186 | */ | 217 | */ |
187 | unsigned long cpu_flags; | ||
188 | struct sk_buff_head *queue = &sk->sk_receive_queue; | ||
189 | int _off = *off; | 218 | int _off = *off; |
190 | 219 | ||
191 | last = (struct sk_buff *)queue; | 220 | last = (struct sk_buff *)queue; |
@@ -199,7 +228,11 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, | |||
199 | _off -= skb->len; | 228 | _off -= skb->len; |
200 | continue; | 229 | continue; |
201 | } | 230 | } |
202 | skb->peeked = 1; | 231 | |
232 | error = skb_set_peeked(skb); | ||
233 | if (error) | ||
234 | goto unlock_err; | ||
235 | |||
203 | atomic_inc(&skb->users); | 236 | atomic_inc(&skb->users); |
204 | } else | 237 | } else |
205 | __skb_unlink(skb, queue); | 238 | __skb_unlink(skb, queue); |
@@ -223,6 +256,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, | |||
223 | 256 | ||
224 | return NULL; | 257 | return NULL; |
225 | 258 | ||
259 | unlock_err: | ||
260 | spin_unlock_irqrestore(&queue->lock, cpu_flags); | ||
226 | no_packet: | 261 | no_packet: |
227 | *err = error; | 262 | *err = error; |
228 | return NULL; | 263 | return NULL; |
@@ -622,7 +657,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len) | |||
622 | !skb->csum_complete_sw) | 657 | !skb->csum_complete_sw) |
623 | netdev_rx_csum_fault(skb->dev); | 658 | netdev_rx_csum_fault(skb->dev); |
624 | } | 659 | } |
625 | skb->csum_valid = !sum; | 660 | if (!skb_shared(skb)) |
661 | skb->csum_valid = !sum; | ||
626 | return sum; | 662 | return sum; |
627 | } | 663 | } |
628 | EXPORT_SYMBOL(__skb_checksum_complete_head); | 664 | EXPORT_SYMBOL(__skb_checksum_complete_head); |
@@ -642,11 +678,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb) | |||
642 | netdev_rx_csum_fault(skb->dev); | 678 | netdev_rx_csum_fault(skb->dev); |
643 | } | 679 | } |
644 | 680 | ||
645 | /* Save full packet checksum */ | 681 | if (!skb_shared(skb)) { |
646 | skb->csum = csum; | 682 | /* Save full packet checksum */ |
647 | skb->ip_summed = CHECKSUM_COMPLETE; | 683 | skb->csum = csum; |
648 | skb->csum_complete_sw = 1; | 684 | skb->ip_summed = CHECKSUM_COMPLETE; |
649 | skb->csum_valid = !sum; | 685 | skb->csum_complete_sw = 1; |
686 | skb->csum_valid = !sum; | ||
687 | } | ||
650 | 688 | ||
651 | return sum; | 689 | return sum; |
652 | } | 690 | } |
diff --git a/net/core/dst.c b/net/core/dst.c index e956ce6d1378..002144bea935 100644 --- a/net/core/dst.c +++ b/net/core/dst.c | |||
@@ -284,7 +284,9 @@ void dst_release(struct dst_entry *dst) | |||
284 | int newrefcnt; | 284 | int newrefcnt; |
285 | 285 | ||
286 | newrefcnt = atomic_dec_return(&dst->__refcnt); | 286 | newrefcnt = atomic_dec_return(&dst->__refcnt); |
287 | WARN_ON(newrefcnt < 0); | 287 | if (unlikely(newrefcnt < 0)) |
288 | net_warn_ratelimited("%s: dst:%p refcnt:%d\n", | ||
289 | __func__, dst, newrefcnt); | ||
288 | if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) | 290 | if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) |
289 | call_rcu(&dst->rcu_head, dst_destroy_rcu); | 291 | call_rcu(&dst->rcu_head, dst_destroy_rcu); |
290 | } | 292 | } |
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 9e433d58d265..dc004b1e1f85 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1804,10 +1804,13 @@ static int do_setlink(const struct sk_buff *skb, | |||
1804 | goto errout; | 1804 | goto errout; |
1805 | 1805 | ||
1806 | nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { | 1806 | nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { |
1807 | if (nla_type(attr) != IFLA_VF_PORT) | 1807 | if (nla_type(attr) != IFLA_VF_PORT || |
1808 | continue; | 1808 | nla_len(attr) < NLA_HDRLEN) { |
1809 | err = nla_parse_nested(port, IFLA_PORT_MAX, | 1809 | err = -EINVAL; |
1810 | attr, ifla_port_policy); | 1810 | goto errout; |
1811 | } | ||
1812 | err = nla_parse_nested(port, IFLA_PORT_MAX, attr, | ||
1813 | ifla_port_policy); | ||
1811 | if (err < 0) | 1814 | if (err < 0) |
1812 | goto errout; | 1815 | goto errout; |
1813 | if (!port[IFLA_PORT_VF]) { | 1816 | if (!port[IFLA_PORT_VF]) { |
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c index 90c0e8386116..574fad9cca05 100644 --- a/net/ipv4/datagram.c +++ b/net/ipv4/datagram.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <net/route.h> | 20 | #include <net/route.h> |
21 | #include <net/tcp_states.h> | 21 | #include <net/tcp_states.h> |
22 | 22 | ||
23 | int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | 23 | int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
24 | { | 24 | { |
25 | struct inet_sock *inet = inet_sk(sk); | 25 | struct inet_sock *inet = inet_sk(sk); |
26 | struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; | 26 | struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; |
@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
39 | 39 | ||
40 | sk_dst_reset(sk); | 40 | sk_dst_reset(sk); |
41 | 41 | ||
42 | lock_sock(sk); | ||
43 | |||
44 | oif = sk->sk_bound_dev_if; | 42 | oif = sk->sk_bound_dev_if; |
45 | saddr = inet->inet_saddr; | 43 | saddr = inet->inet_saddr; |
46 | if (ipv4_is_multicast(usin->sin_addr.s_addr)) { | 44 | if (ipv4_is_multicast(usin->sin_addr.s_addr)) { |
@@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
82 | sk_dst_set(sk, &rt->dst); | 80 | sk_dst_set(sk, &rt->dst); |
83 | err = 0; | 81 | err = 0; |
84 | out: | 82 | out: |
85 | release_sock(sk); | ||
86 | return err; | 83 | return err; |
87 | } | 84 | } |
85 | EXPORT_SYMBOL(__ip4_datagram_connect); | ||
86 | |||
87 | int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | ||
88 | { | ||
89 | int res; | ||
90 | |||
91 | lock_sock(sk); | ||
92 | res = __ip4_datagram_connect(sk, uaddr, addr_len); | ||
93 | release_sock(sk); | ||
94 | return res; | ||
95 | } | ||
88 | EXPORT_SYMBOL(ip4_datagram_connect); | 96 | EXPORT_SYMBOL(ip4_datagram_connect); |
89 | 97 | ||
90 | /* Because UDP xmit path can manipulate sk_dst_cache without holding | 98 | /* Because UDP xmit path can manipulate sk_dst_cache without holding |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 5f9b063bbe8a..0cb9165421d4 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -624,22 +624,21 @@ EXPORT_SYMBOL_GPL(inet_hashinfo_init); | |||
624 | 624 | ||
625 | int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) | 625 | int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) |
626 | { | 626 | { |
627 | unsigned int locksz = sizeof(spinlock_t); | ||
627 | unsigned int i, nblocks = 1; | 628 | unsigned int i, nblocks = 1; |
628 | 629 | ||
629 | if (sizeof(spinlock_t) != 0) { | 630 | if (locksz != 0) { |
630 | /* allocate 2 cache lines or at least one spinlock per cpu */ | 631 | /* allocate 2 cache lines or at least one spinlock per cpu */ |
631 | nblocks = max_t(unsigned int, | 632 | nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U); |
632 | 2 * L1_CACHE_BYTES / sizeof(spinlock_t), | ||
633 | 1); | ||
634 | nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); | 633 | nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); |
635 | 634 | ||
636 | /* no more locks than number of hash buckets */ | 635 | /* no more locks than number of hash buckets */ |
637 | nblocks = min(nblocks, hashinfo->ehash_mask + 1); | 636 | nblocks = min(nblocks, hashinfo->ehash_mask + 1); |
638 | 637 | ||
639 | hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t), | 638 | hashinfo->ehash_locks = kmalloc_array(nblocks, locksz, |
640 | GFP_KERNEL | __GFP_NOWARN); | 639 | GFP_KERNEL | __GFP_NOWARN); |
641 | if (!hashinfo->ehash_locks) | 640 | if (!hashinfo->ehash_locks) |
642 | hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t)); | 641 | hashinfo->ehash_locks = vmalloc(nblocks * locksz); |
643 | 642 | ||
644 | if (!hashinfo->ehash_locks) | 643 | if (!hashinfo->ehash_locks) |
645 | return -ENOMEM; | 644 | return -ENOMEM; |
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index a50dc6d408d1..31f71b15cfba 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c | |||
@@ -351,7 +351,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
351 | ihl = ip_hdrlen(skb); | 351 | ihl = ip_hdrlen(skb); |
352 | 352 | ||
353 | /* Determine the position of this fragment. */ | 353 | /* Determine the position of this fragment. */ |
354 | end = offset + skb->len - ihl; | 354 | end = offset + skb->len - skb_network_offset(skb) - ihl; |
355 | err = -EINVAL; | 355 | err = -EINVAL; |
356 | 356 | ||
357 | /* Is this the final fragment? */ | 357 | /* Is this the final fragment? */ |
@@ -381,7 +381,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) | |||
381 | goto err; | 381 | goto err; |
382 | 382 | ||
383 | err = -ENOMEM; | 383 | err = -ENOMEM; |
384 | if (!pskb_pull(skb, ihl)) | 384 | if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) |
385 | goto err; | 385 | goto err; |
386 | 386 | ||
387 | err = pskb_trim_rcsum(skb, end - offset); | 387 | err = pskb_trim_rcsum(skb, end - offset); |
@@ -641,6 +641,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, | |||
641 | iph->frag_off = 0; | 641 | iph->frag_off = 0; |
642 | } | 642 | } |
643 | 643 | ||
644 | ip_send_check(iph); | ||
645 | |||
644 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); | 646 | IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); |
645 | qp->q.fragments = NULL; | 647 | qp->q.fragments = NULL; |
646 | qp->q.fragments_tail = NULL; | 648 | qp->q.fragments_tail = NULL; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 684f095d196e..728f5b3d3c64 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1917,14 +1917,13 @@ void tcp_enter_loss(struct sock *sk) | |||
1917 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1917 | const struct inet_connection_sock *icsk = inet_csk(sk); |
1918 | struct tcp_sock *tp = tcp_sk(sk); | 1918 | struct tcp_sock *tp = tcp_sk(sk); |
1919 | struct sk_buff *skb; | 1919 | struct sk_buff *skb; |
1920 | bool new_recovery = false; | 1920 | bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; |
1921 | bool is_reneg; /* is receiver reneging on SACKs? */ | 1921 | bool is_reneg; /* is receiver reneging on SACKs? */ |
1922 | 1922 | ||
1923 | /* Reduce ssthresh if it has not yet been made inside this window. */ | 1923 | /* Reduce ssthresh if it has not yet been made inside this window. */ |
1924 | if (icsk->icsk_ca_state <= TCP_CA_Disorder || | 1924 | if (icsk->icsk_ca_state <= TCP_CA_Disorder || |
1925 | !after(tp->high_seq, tp->snd_una) || | 1925 | !after(tp->high_seq, tp->snd_una) || |
1926 | (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { | 1926 | (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { |
1927 | new_recovery = true; | ||
1928 | tp->prior_ssthresh = tcp_current_ssthresh(sk); | 1927 | tp->prior_ssthresh = tcp_current_ssthresh(sk); |
1929 | tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); | 1928 | tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); |
1930 | tcp_ca_event(sk, CA_EVENT_LOSS); | 1929 | tcp_ca_event(sk, CA_EVENT_LOSS); |
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 62d908e64eeb..b10a88986a98 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a) | |||
40 | return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); | 40 | return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); |
41 | } | 41 | } |
42 | 42 | ||
43 | int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | 43 | static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
44 | { | 44 | { |
45 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; | 45 | struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; |
46 | struct inet_sock *inet = inet_sk(sk); | 46 | struct inet_sock *inet = inet_sk(sk); |
@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
56 | if (usin->sin6_family == AF_INET) { | 56 | if (usin->sin6_family == AF_INET) { |
57 | if (__ipv6_only_sock(sk)) | 57 | if (__ipv6_only_sock(sk)) |
58 | return -EAFNOSUPPORT; | 58 | return -EAFNOSUPPORT; |
59 | err = ip4_datagram_connect(sk, uaddr, addr_len); | 59 | err = __ip4_datagram_connect(sk, uaddr, addr_len); |
60 | goto ipv4_connected; | 60 | goto ipv4_connected; |
61 | } | 61 | } |
62 | 62 | ||
@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | |||
98 | sin.sin_addr.s_addr = daddr->s6_addr32[3]; | 98 | sin.sin_addr.s_addr = daddr->s6_addr32[3]; |
99 | sin.sin_port = usin->sin6_port; | 99 | sin.sin_port = usin->sin6_port; |
100 | 100 | ||
101 | err = ip4_datagram_connect(sk, | 101 | err = __ip4_datagram_connect(sk, |
102 | (struct sockaddr *) &sin, | 102 | (struct sockaddr *) &sin, |
103 | sizeof(sin)); | 103 | sizeof(sin)); |
104 | 104 | ||
105 | ipv4_connected: | 105 | ipv4_connected: |
106 | if (err) | 106 | if (err) |
@@ -204,6 +204,16 @@ out: | |||
204 | fl6_sock_release(flowlabel); | 204 | fl6_sock_release(flowlabel); |
205 | return err; | 205 | return err; |
206 | } | 206 | } |
207 | |||
208 | int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) | ||
209 | { | ||
210 | int res; | ||
211 | |||
212 | lock_sock(sk); | ||
213 | res = __ip6_datagram_connect(sk, uaddr, addr_len); | ||
214 | release_sock(sk); | ||
215 | return res; | ||
216 | } | ||
207 | EXPORT_SYMBOL_GPL(ip6_datagram_connect); | 217 | EXPORT_SYMBOL_GPL(ip6_datagram_connect); |
208 | 218 | ||
209 | int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, | 219 | int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, |
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index e893cd18612f..08b62047c67f 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
@@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = { | |||
292 | static const struct net_offload sit_offload = { | 292 | static const struct net_offload sit_offload = { |
293 | .callbacks = { | 293 | .callbacks = { |
294 | .gso_segment = ipv6_gso_segment, | 294 | .gso_segment = ipv6_gso_segment, |
295 | .gro_receive = ipv6_gro_receive, | ||
296 | .gro_complete = ipv6_gro_complete, | ||
297 | }, | 295 | }, |
298 | }; | 296 | }; |
299 | 297 | ||
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c index 29236e832e44..c09c0131bfa2 100644 --- a/net/mac80211/debugfs_netdev.c +++ b/net/mac80211/debugfs_netdev.c | |||
@@ -723,6 +723,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata) | |||
723 | 723 | ||
724 | debugfs_remove_recursive(sdata->vif.debugfs_dir); | 724 | debugfs_remove_recursive(sdata->vif.debugfs_dir); |
725 | sdata->vif.debugfs_dir = NULL; | 725 | sdata->vif.debugfs_dir = NULL; |
726 | sdata->debugfs.subdir_stations = NULL; | ||
726 | } | 727 | } |
727 | 728 | ||
728 | void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) | 729 | void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index ed1edac14372..553ac6dd4867 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1863,10 +1863,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata) | |||
1863 | ieee80211_teardown_sdata(sdata); | 1863 | ieee80211_teardown_sdata(sdata); |
1864 | } | 1864 | } |
1865 | 1865 | ||
1866 | /* | ||
1867 | * Remove all interfaces, may only be called at hardware unregistration | ||
1868 | * time because it doesn't do RCU-safe list removals. | ||
1869 | */ | ||
1870 | void ieee80211_remove_interfaces(struct ieee80211_local *local) | 1866 | void ieee80211_remove_interfaces(struct ieee80211_local *local) |
1871 | { | 1867 | { |
1872 | struct ieee80211_sub_if_data *sdata, *tmp; | 1868 | struct ieee80211_sub_if_data *sdata, *tmp; |
@@ -1875,14 +1871,21 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) | |||
1875 | 1871 | ||
1876 | ASSERT_RTNL(); | 1872 | ASSERT_RTNL(); |
1877 | 1873 | ||
1878 | /* | 1874 | /* Before destroying the interfaces, make sure they're all stopped so |
1879 | * Close all AP_VLAN interfaces first, as otherwise they | 1875 | * that the hardware is stopped. Otherwise, the driver might still be |
1880 | * might be closed while the AP interface they belong to | 1876 | * iterating the interfaces during the shutdown, e.g. from a worker |
1881 | * is closed, causing unregister_netdevice_many() to crash. | 1877 | * or from RX processing or similar, and if it does so (using atomic |
1878 | * iteration) while we're manipulating the list, the iteration will | ||
1879 | * crash. | ||
1880 | * | ||
1881 | * After this, the hardware should be stopped and the driver should | ||
1882 | * have stopped all of its activities, so that we can do RCU-unaware | ||
1883 | * manipulations of the interface list below. | ||
1882 | */ | 1884 | */ |
1883 | list_for_each_entry(sdata, &local->interfaces, list) | 1885 | cfg80211_shutdown_all_interfaces(local->hw.wiphy); |
1884 | if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) | 1886 | |
1885 | dev_close(sdata->dev); | 1887 | WARN(local->open_count, "%s: open count remains %d\n", |
1888 | wiphy_name(local->hw.wiphy), local->open_count); | ||
1886 | 1889 | ||
1887 | mutex_lock(&local->iflist_mtx); | 1890 | mutex_lock(&local->iflist_mtx); |
1888 | list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { | 1891 | list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { |
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c index 5438d13e2f00..3b59099413fb 100644 --- a/net/mac80211/mesh_plink.c +++ b/net/mac80211/mesh_plink.c | |||
@@ -306,7 +306,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata, | |||
306 | if (action == WLAN_SP_MESH_PEERING_CONFIRM) { | 306 | if (action == WLAN_SP_MESH_PEERING_CONFIRM) { |
307 | /* AID */ | 307 | /* AID */ |
308 | pos = skb_put(skb, 2); | 308 | pos = skb_put(skb, 2); |
309 | put_unaligned_le16(plid, pos + 2); | 309 | put_unaligned_le16(plid, pos); |
310 | } | 310 | } |
311 | if (ieee80211_add_srates_ie(sdata, skb, true, band) || | 311 | if (ieee80211_add_srates_ie(sdata, skb, true, band) || |
312 | ieee80211_add_ext_srates_ie(sdata, skb, true, band) || | 312 | ieee80211_add_ext_srates_ie(sdata, skb, true, band) || |
@@ -1122,6 +1122,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, | |||
1122 | WLAN_SP_MESH_PEERING_CONFIRM) { | 1122 | WLAN_SP_MESH_PEERING_CONFIRM) { |
1123 | baseaddr += 4; | 1123 | baseaddr += 4; |
1124 | baselen += 4; | 1124 | baselen += 4; |
1125 | |||
1126 | if (baselen > len) | ||
1127 | return; | ||
1125 | } | 1128 | } |
1126 | ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems); | 1129 | ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems); |
1127 | mesh_process_plink_frame(sdata, mgmt, &elems); | 1130 | mesh_process_plink_frame(sdata, mgmt, &elems); |
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index 06b60980c62c..b676b9fa707b 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c | |||
@@ -76,6 +76,22 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) | |||
76 | if (sdata->vif.type != NL80211_IFTYPE_STATION) | 76 | if (sdata->vif.type != NL80211_IFTYPE_STATION) |
77 | continue; | 77 | continue; |
78 | ieee80211_mgd_quiesce(sdata); | 78 | ieee80211_mgd_quiesce(sdata); |
79 | /* If suspended during TX in progress, and wowlan | ||
80 | * is enabled (connection will be active) there | ||
81 | * can be a race where the driver is put out | ||
82 | * of power-save due to TX and during suspend | ||
83 | * dynamic_ps_timer is cancelled and TX packet | ||
84 | * is flushed, leaving the driver in ACTIVE even | ||
85 | * after resuming until dynamic_ps_timer puts | ||
86 | * driver back in DOZE. | ||
87 | */ | ||
88 | if (sdata->u.mgd.associated && | ||
89 | sdata->u.mgd.powersave && | ||
90 | !(local->hw.conf.flags & IEEE80211_CONF_PS)) { | ||
91 | local->hw.conf.flags |= IEEE80211_CONF_PS; | ||
92 | ieee80211_hw_config(local, | ||
93 | IEEE80211_CONF_CHANGE_PS); | ||
94 | } | ||
79 | } | 95 | } |
80 | 96 | ||
81 | err = drv_suspend(local, wowlan); | 97 | err = drv_suspend(local, wowlan); |
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c index ad31b2dab4f5..8db6e2994bbc 100644 --- a/net/mac80211/tdls.c +++ b/net/mac80211/tdls.c | |||
@@ -60,6 +60,7 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata, | |||
60 | struct ieee80211_channel *ch; | 60 | struct ieee80211_channel *ch; |
61 | struct cfg80211_chan_def chandef; | 61 | struct cfg80211_chan_def chandef; |
62 | int i, subband_start; | 62 | int i, subband_start; |
63 | struct wiphy *wiphy = sdata->local->hw.wiphy; | ||
63 | 64 | ||
64 | for (i = start; i <= end; i += spacing) { | 65 | for (i = start; i <= end; i += spacing) { |
65 | if (!ch_cnt) | 66 | if (!ch_cnt) |
@@ -70,9 +71,8 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata, | |||
70 | /* we will be active on the channel */ | 71 | /* we will be active on the channel */ |
71 | cfg80211_chandef_create(&chandef, ch, | 72 | cfg80211_chandef_create(&chandef, ch, |
72 | NL80211_CHAN_NO_HT); | 73 | NL80211_CHAN_NO_HT); |
73 | if (cfg80211_reg_can_beacon(sdata->local->hw.wiphy, | 74 | if (cfg80211_reg_can_beacon_relax(wiphy, &chandef, |
74 | &chandef, | 75 | sdata->wdev.iftype)) { |
75 | sdata->wdev.iftype)) { | ||
76 | ch_cnt++; | 76 | ch_cnt++; |
77 | /* | 77 | /* |
78 | * check if the next channel is also part of | 78 | * check if the next channel is also part of |
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 8410bb3bf5e8..b8233505bf9f 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c | |||
@@ -1117,7 +1117,9 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, | |||
1117 | queued = true; | 1117 | queued = true; |
1118 | info->control.vif = &tx->sdata->vif; | 1118 | info->control.vif = &tx->sdata->vif; |
1119 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; | 1119 | info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; |
1120 | info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; | 1120 | info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS | |
1121 | IEEE80211_TX_CTL_NO_PS_BUFFER | | ||
1122 | IEEE80211_TX_STATUS_EOSP; | ||
1121 | __skb_queue_tail(&tid_tx->pending, skb); | 1123 | __skb_queue_tail(&tid_tx->pending, skb); |
1122 | if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) | 1124 | if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) |
1123 | purge_skb = __skb_dequeue(&tid_tx->pending); | 1125 | purge_skb = __skb_dequeue(&tid_tx->pending); |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 9a0ae7172f92..d8e2e3918ce2 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -357,25 +357,52 @@ err1: | |||
357 | return NULL; | 357 | return NULL; |
358 | } | 358 | } |
359 | 359 | ||
360 | |||
361 | static void | ||
362 | __netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec, | ||
363 | unsigned int order) | ||
364 | { | ||
365 | struct netlink_sock *nlk = nlk_sk(sk); | ||
366 | struct sk_buff_head *queue; | ||
367 | struct netlink_ring *ring; | ||
368 | |||
369 | queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; | ||
370 | ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; | ||
371 | |||
372 | spin_lock_bh(&queue->lock); | ||
373 | |||
374 | ring->frame_max = req->nm_frame_nr - 1; | ||
375 | ring->head = 0; | ||
376 | ring->frame_size = req->nm_frame_size; | ||
377 | ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; | ||
378 | |||
379 | swap(ring->pg_vec_len, req->nm_block_nr); | ||
380 | swap(ring->pg_vec_order, order); | ||
381 | swap(ring->pg_vec, pg_vec); | ||
382 | |||
383 | __skb_queue_purge(queue); | ||
384 | spin_unlock_bh(&queue->lock); | ||
385 | |||
386 | WARN_ON(atomic_read(&nlk->mapped)); | ||
387 | |||
388 | if (pg_vec) | ||
389 | free_pg_vec(pg_vec, order, req->nm_block_nr); | ||
390 | } | ||
391 | |||
360 | static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, | 392 | static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, |
361 | bool closing, bool tx_ring) | 393 | bool tx_ring) |
362 | { | 394 | { |
363 | struct netlink_sock *nlk = nlk_sk(sk); | 395 | struct netlink_sock *nlk = nlk_sk(sk); |
364 | struct netlink_ring *ring; | 396 | struct netlink_ring *ring; |
365 | struct sk_buff_head *queue; | ||
366 | void **pg_vec = NULL; | 397 | void **pg_vec = NULL; |
367 | unsigned int order = 0; | 398 | unsigned int order = 0; |
368 | int err; | ||
369 | 399 | ||
370 | ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; | 400 | ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; |
371 | queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue; | ||
372 | 401 | ||
373 | if (!closing) { | 402 | if (atomic_read(&nlk->mapped)) |
374 | if (atomic_read(&nlk->mapped)) | 403 | return -EBUSY; |
375 | return -EBUSY; | 404 | if (atomic_read(&ring->pending)) |
376 | if (atomic_read(&ring->pending)) | 405 | return -EBUSY; |
377 | return -EBUSY; | ||
378 | } | ||
379 | 406 | ||
380 | if (req->nm_block_nr) { | 407 | if (req->nm_block_nr) { |
381 | if (ring->pg_vec != NULL) | 408 | if (ring->pg_vec != NULL) |
@@ -407,31 +434,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, | |||
407 | return -EINVAL; | 434 | return -EINVAL; |
408 | } | 435 | } |
409 | 436 | ||
410 | err = -EBUSY; | ||
411 | mutex_lock(&nlk->pg_vec_lock); | 437 | mutex_lock(&nlk->pg_vec_lock); |
412 | if (closing || atomic_read(&nlk->mapped) == 0) { | 438 | if (atomic_read(&nlk->mapped) == 0) { |
413 | err = 0; | 439 | __netlink_set_ring(sk, req, tx_ring, pg_vec, order); |
414 | spin_lock_bh(&queue->lock); | 440 | mutex_unlock(&nlk->pg_vec_lock); |
415 | 441 | return 0; | |
416 | ring->frame_max = req->nm_frame_nr - 1; | ||
417 | ring->head = 0; | ||
418 | ring->frame_size = req->nm_frame_size; | ||
419 | ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE; | ||
420 | |||
421 | swap(ring->pg_vec_len, req->nm_block_nr); | ||
422 | swap(ring->pg_vec_order, order); | ||
423 | swap(ring->pg_vec, pg_vec); | ||
424 | |||
425 | __skb_queue_purge(queue); | ||
426 | spin_unlock_bh(&queue->lock); | ||
427 | |||
428 | WARN_ON(atomic_read(&nlk->mapped)); | ||
429 | } | 442 | } |
443 | |||
430 | mutex_unlock(&nlk->pg_vec_lock); | 444 | mutex_unlock(&nlk->pg_vec_lock); |
431 | 445 | ||
432 | if (pg_vec) | 446 | if (pg_vec) |
433 | free_pg_vec(pg_vec, order, req->nm_block_nr); | 447 | free_pg_vec(pg_vec, order, req->nm_block_nr); |
434 | return err; | 448 | |
449 | return -EBUSY; | ||
435 | } | 450 | } |
436 | 451 | ||
437 | static void netlink_mm_open(struct vm_area_struct *vma) | 452 | static void netlink_mm_open(struct vm_area_struct *vma) |
@@ -900,10 +915,10 @@ static void netlink_sock_destruct(struct sock *sk) | |||
900 | 915 | ||
901 | memset(&req, 0, sizeof(req)); | 916 | memset(&req, 0, sizeof(req)); |
902 | if (nlk->rx_ring.pg_vec) | 917 | if (nlk->rx_ring.pg_vec) |
903 | netlink_set_ring(sk, &req, true, false); | 918 | __netlink_set_ring(sk, &req, false, NULL, 0); |
904 | memset(&req, 0, sizeof(req)); | 919 | memset(&req, 0, sizeof(req)); |
905 | if (nlk->tx_ring.pg_vec) | 920 | if (nlk->tx_ring.pg_vec) |
906 | netlink_set_ring(sk, &req, true, true); | 921 | __netlink_set_ring(sk, &req, true, NULL, 0); |
907 | } | 922 | } |
908 | #endif /* CONFIG_NETLINK_MMAP */ | 923 | #endif /* CONFIG_NETLINK_MMAP */ |
909 | 924 | ||
@@ -2223,7 +2238,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname, | |||
2223 | return -EINVAL; | 2238 | return -EINVAL; |
2224 | if (copy_from_user(&req, optval, sizeof(req))) | 2239 | if (copy_from_user(&req, optval, sizeof(req))) |
2225 | return -EFAULT; | 2240 | return -EFAULT; |
2226 | err = netlink_set_ring(sk, &req, false, | 2241 | err = netlink_set_ring(sk, &req, |
2227 | optname == NETLINK_TX_RING); | 2242 | optname == NETLINK_TX_RING); |
2228 | break; | 2243 | break; |
2229 | } | 2244 | } |
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c index 4613df8c8290..65523948fb95 100644 --- a/net/openvswitch/flow_table.c +++ b/net/openvswitch/flow_table.c | |||
@@ -752,7 +752,7 @@ int ovs_flow_init(void) | |||
752 | BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); | 752 | BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); |
753 | 753 | ||
754 | flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) | 754 | flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) |
755 | + (num_possible_nodes() | 755 | + (nr_node_ids |
756 | * sizeof(struct flow_stats *)), | 756 | * sizeof(struct flow_stats *)), |
757 | 0, 0, NULL); | 757 | 0, 0, NULL); |
758 | if (flow_cache == NULL) | 758 | if (flow_cache == NULL) |
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 1d56903fd4c7..1df78289e248 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c | |||
@@ -339,6 +339,9 @@ static void tcf_bpf_cleanup(struct tc_action *act, int bind) | |||
339 | bpf_prog_put(prog->filter); | 339 | bpf_prog_put(prog->filter); |
340 | else | 340 | else |
341 | bpf_prog_destroy(prog->filter); | 341 | bpf_prog_destroy(prog->filter); |
342 | |||
343 | kfree(prog->bpf_ops); | ||
344 | kfree(prog->bpf_name); | ||
342 | } | 345 | } |
343 | 346 | ||
344 | static struct tc_action_ops act_bpf_ops __read_mostly = { | 347 | static struct tc_action_ops act_bpf_ops __read_mostly = { |
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index c79ecfd36e0f..e5168f8b9640 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c | |||
@@ -378,7 +378,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb, | |||
378 | goto errout; | 378 | goto errout; |
379 | 379 | ||
380 | if (oldprog) { | 380 | if (oldprog) { |
381 | list_replace_rcu(&prog->link, &oldprog->link); | 381 | list_replace_rcu(&oldprog->link, &prog->link); |
382 | tcf_unbind_filter(tp, &oldprog->res); | 382 | tcf_unbind_filter(tp, &oldprog->res); |
383 | call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); | 383 | call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); |
384 | } else { | 384 | } else { |
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 76bc3a20ffdb..bb2a0f529c1f 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c | |||
@@ -425,6 +425,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, | |||
425 | if (!fnew) | 425 | if (!fnew) |
426 | goto err2; | 426 | goto err2; |
427 | 427 | ||
428 | tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); | ||
429 | |||
428 | fold = (struct flow_filter *)*arg; | 430 | fold = (struct flow_filter *)*arg; |
429 | if (fold) { | 431 | if (fold) { |
430 | err = -EINVAL; | 432 | err = -EINVAL; |
@@ -486,7 +488,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, | |||
486 | fnew->mask = ~0U; | 488 | fnew->mask = ~0U; |
487 | fnew->tp = tp; | 489 | fnew->tp = tp; |
488 | get_random_bytes(&fnew->hashrnd, 4); | 490 | get_random_bytes(&fnew->hashrnd, 4); |
489 | tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE); | ||
490 | } | 491 | } |
491 | 492 | ||
492 | fnew->perturb_timer.function = flow_perturbation; | 493 | fnew->perturb_timer.function = flow_perturbation; |
@@ -526,7 +527,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, | |||
526 | if (*arg == 0) | 527 | if (*arg == 0) |
527 | list_add_tail_rcu(&fnew->list, &head->filters); | 528 | list_add_tail_rcu(&fnew->list, &head->filters); |
528 | else | 529 | else |
529 | list_replace_rcu(&fnew->list, &fold->list); | 530 | list_replace_rcu(&fold->list, &fnew->list); |
530 | 531 | ||
531 | *arg = (unsigned long)fnew; | 532 | *arg = (unsigned long)fnew; |
532 | 533 | ||
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 9d37ccd95062..2f3d03f99487 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
@@ -499,7 +499,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, | |||
499 | *arg = (unsigned long) fnew; | 499 | *arg = (unsigned long) fnew; |
500 | 500 | ||
501 | if (fold) { | 501 | if (fold) { |
502 | list_replace_rcu(&fnew->list, &fold->list); | 502 | list_replace_rcu(&fold->list, &fnew->list); |
503 | tcf_unbind_filter(tp, &fold->res); | 503 | tcf_unbind_filter(tp, &fold->res); |
504 | call_rcu(&fold->rcu, fl_destroy_filter); | 504 | call_rcu(&fold->rcu, fl_destroy_filter); |
505 | } else { | 505 | } else { |
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index d75993f89fac..21ca33c9f036 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c | |||
@@ -155,14 +155,23 @@ static unsigned int fq_codel_drop(struct Qdisc *sch) | |||
155 | skb = dequeue_head(flow); | 155 | skb = dequeue_head(flow); |
156 | len = qdisc_pkt_len(skb); | 156 | len = qdisc_pkt_len(skb); |
157 | q->backlogs[idx] -= len; | 157 | q->backlogs[idx] -= len; |
158 | kfree_skb(skb); | ||
159 | sch->q.qlen--; | 158 | sch->q.qlen--; |
160 | qdisc_qstats_drop(sch); | 159 | qdisc_qstats_drop(sch); |
161 | qdisc_qstats_backlog_dec(sch, skb); | 160 | qdisc_qstats_backlog_dec(sch, skb); |
161 | kfree_skb(skb); | ||
162 | flow->dropped++; | 162 | flow->dropped++; |
163 | return idx; | 163 | return idx; |
164 | } | 164 | } |
165 | 165 | ||
166 | static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch) | ||
167 | { | ||
168 | unsigned int prev_backlog; | ||
169 | |||
170 | prev_backlog = sch->qstats.backlog; | ||
171 | fq_codel_drop(sch); | ||
172 | return prev_backlog - sch->qstats.backlog; | ||
173 | } | ||
174 | |||
166 | static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) | 175 | static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) |
167 | { | 176 | { |
168 | struct fq_codel_sched_data *q = qdisc_priv(sch); | 177 | struct fq_codel_sched_data *q = qdisc_priv(sch); |
@@ -604,7 +613,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = { | |||
604 | .enqueue = fq_codel_enqueue, | 613 | .enqueue = fq_codel_enqueue, |
605 | .dequeue = fq_codel_dequeue, | 614 | .dequeue = fq_codel_dequeue, |
606 | .peek = qdisc_peek_dequeued, | 615 | .peek = qdisc_peek_dequeued, |
607 | .drop = fq_codel_drop, | 616 | .drop = fq_codel_qdisc_drop, |
608 | .init = fq_codel_init, | 617 | .init = fq_codel_init, |
609 | .reset = fq_codel_reset, | 618 | .reset = fq_codel_reset, |
610 | .destroy = fq_codel_destroy, | 619 | .destroy = fq_codel_destroy, |
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c index 7d1492663360..52f75a5473e1 100644 --- a/net/sched/sch_sfq.c +++ b/net/sched/sch_sfq.c | |||
@@ -306,10 +306,10 @@ drop: | |||
306 | len = qdisc_pkt_len(skb); | 306 | len = qdisc_pkt_len(skb); |
307 | slot->backlog -= len; | 307 | slot->backlog -= len; |
308 | sfq_dec(q, x); | 308 | sfq_dec(q, x); |
309 | kfree_skb(skb); | ||
310 | sch->q.qlen--; | 309 | sch->q.qlen--; |
311 | qdisc_qstats_drop(sch); | 310 | qdisc_qstats_drop(sch); |
312 | qdisc_qstats_backlog_dec(sch, skb); | 311 | qdisc_qstats_backlog_dec(sch, skb); |
312 | kfree_skb(skb); | ||
313 | return len; | 313 | return len; |
314 | } | 314 | } |
315 | 315 | ||
diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 915b328b9ac5..59cabc9bce69 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c | |||
@@ -797,23 +797,18 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy, | |||
797 | return false; | 797 | return false; |
798 | } | 798 | } |
799 | 799 | ||
800 | bool cfg80211_reg_can_beacon(struct wiphy *wiphy, | 800 | static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy, |
801 | struct cfg80211_chan_def *chandef, | 801 | struct cfg80211_chan_def *chandef, |
802 | enum nl80211_iftype iftype) | 802 | enum nl80211_iftype iftype, |
803 | bool check_no_ir) | ||
803 | { | 804 | { |
804 | bool res; | 805 | bool res; |
805 | u32 prohibited_flags = IEEE80211_CHAN_DISABLED | | 806 | u32 prohibited_flags = IEEE80211_CHAN_DISABLED | |
806 | IEEE80211_CHAN_RADAR; | 807 | IEEE80211_CHAN_RADAR; |
807 | 808 | ||
808 | trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype); | 809 | trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir); |
809 | 810 | ||
810 | /* | 811 | if (check_no_ir) |
811 | * Under certain conditions suggested by some regulatory bodies a | ||
812 | * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag | ||
813 | * only if such relaxations are not enabled and the conditions are not | ||
814 | * met. | ||
815 | */ | ||
816 | if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan)) | ||
817 | prohibited_flags |= IEEE80211_CHAN_NO_IR; | 812 | prohibited_flags |= IEEE80211_CHAN_NO_IR; |
818 | 813 | ||
819 | if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 && | 814 | if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 && |
@@ -827,8 +822,36 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy, | |||
827 | trace_cfg80211_return_bool(res); | 822 | trace_cfg80211_return_bool(res); |
828 | return res; | 823 | return res; |
829 | } | 824 | } |
825 | |||
826 | bool cfg80211_reg_can_beacon(struct wiphy *wiphy, | ||
827 | struct cfg80211_chan_def *chandef, | ||
828 | enum nl80211_iftype iftype) | ||
829 | { | ||
830 | return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true); | ||
831 | } | ||
830 | EXPORT_SYMBOL(cfg80211_reg_can_beacon); | 832 | EXPORT_SYMBOL(cfg80211_reg_can_beacon); |
831 | 833 | ||
834 | bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, | ||
835 | struct cfg80211_chan_def *chandef, | ||
836 | enum nl80211_iftype iftype) | ||
837 | { | ||
838 | bool check_no_ir; | ||
839 | |||
840 | ASSERT_RTNL(); | ||
841 | |||
842 | /* | ||
843 | * Under certain conditions suggested by some regulatory bodies a | ||
844 | * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag | ||
845 | * only if such relaxations are not enabled and the conditions are not | ||
846 | * met. | ||
847 | */ | ||
848 | check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype, | ||
849 | chandef->chan); | ||
850 | |||
851 | return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir); | ||
852 | } | ||
853 | EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax); | ||
854 | |||
832 | int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, | 855 | int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, |
833 | struct cfg80211_chan_def *chandef) | 856 | struct cfg80211_chan_def *chandef) |
834 | { | 857 | { |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c264effd00a6..76b41578a838 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -2003,7 +2003,8 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, | |||
2003 | switch (iftype) { | 2003 | switch (iftype) { |
2004 | case NL80211_IFTYPE_AP: | 2004 | case NL80211_IFTYPE_AP: |
2005 | case NL80211_IFTYPE_P2P_GO: | 2005 | case NL80211_IFTYPE_P2P_GO: |
2006 | if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) { | 2006 | if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef, |
2007 | iftype)) { | ||
2007 | result = -EINVAL; | 2008 | result = -EINVAL; |
2008 | break; | 2009 | break; |
2009 | } | 2010 | } |
@@ -3403,8 +3404,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) | |||
3403 | } else if (!nl80211_get_ap_channel(rdev, ¶ms)) | 3404 | } else if (!nl80211_get_ap_channel(rdev, ¶ms)) |
3404 | return -EINVAL; | 3405 | return -EINVAL; |
3405 | 3406 | ||
3406 | if (!cfg80211_reg_can_beacon(&rdev->wiphy, ¶ms.chandef, | 3407 | if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, ¶ms.chandef, |
3407 | wdev->iftype)) | 3408 | wdev->iftype)) |
3408 | return -EINVAL; | 3409 | return -EINVAL; |
3409 | 3410 | ||
3410 | if (info->attrs[NL80211_ATTR_ACL_POLICY]) { | 3411 | if (info->attrs[NL80211_ATTR_ACL_POLICY]) { |
@@ -6492,8 +6493,8 @@ skip_beacons: | |||
6492 | if (err) | 6493 | if (err) |
6493 | return err; | 6494 | return err; |
6494 | 6495 | ||
6495 | if (!cfg80211_reg_can_beacon(&rdev->wiphy, ¶ms.chandef, | 6496 | if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, ¶ms.chandef, |
6496 | wdev->iftype)) | 6497 | wdev->iftype)) |
6497 | return -EINVAL; | 6498 | return -EINVAL; |
6498 | 6499 | ||
6499 | err = cfg80211_chandef_dfs_required(wdev->wiphy, | 6500 | err = cfg80211_chandef_dfs_required(wdev->wiphy, |
@@ -10170,7 +10171,8 @@ static int nl80211_tdls_channel_switch(struct sk_buff *skb, | |||
10170 | return -EINVAL; | 10171 | return -EINVAL; |
10171 | 10172 | ||
10172 | /* we will be active on the TDLS link */ | 10173 | /* we will be active on the TDLS link */ |
10173 | if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, wdev->iftype)) | 10174 | if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef, |
10175 | wdev->iftype)) | ||
10174 | return -EINVAL; | 10176 | return -EINVAL; |
10175 | 10177 | ||
10176 | /* don't allow switching to DFS channels */ | 10178 | /* don't allow switching to DFS channels */ |
diff --git a/net/wireless/reg.c b/net/wireless/reg.c index d359e0610198..aa2d75482017 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c | |||
@@ -544,15 +544,15 @@ static int call_crda(const char *alpha2) | |||
544 | reg_regdb_query(alpha2); | 544 | reg_regdb_query(alpha2); |
545 | 545 | ||
546 | if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { | 546 | if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { |
547 | pr_info("Exceeded CRDA call max attempts. Not calling CRDA\n"); | 547 | pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n"); |
548 | return -EINVAL; | 548 | return -EINVAL; |
549 | } | 549 | } |
550 | 550 | ||
551 | if (!is_world_regdom((char *) alpha2)) | 551 | if (!is_world_regdom((char *) alpha2)) |
552 | pr_info("Calling CRDA for country: %c%c\n", | 552 | pr_debug("Calling CRDA for country: %c%c\n", |
553 | alpha2[0], alpha2[1]); | 553 | alpha2[0], alpha2[1]); |
554 | else | 554 | else |
555 | pr_info("Calling CRDA to update world regulatory domain\n"); | 555 | pr_debug("Calling CRDA to update world regulatory domain\n"); |
556 | 556 | ||
557 | return kobject_uevent_env(®_pdev->dev.kobj, KOBJ_CHANGE, env); | 557 | return kobject_uevent_env(®_pdev->dev.kobj, KOBJ_CHANGE, env); |
558 | } | 558 | } |
@@ -1589,7 +1589,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) | |||
1589 | case NL80211_IFTYPE_AP: | 1589 | case NL80211_IFTYPE_AP: |
1590 | case NL80211_IFTYPE_P2P_GO: | 1590 | case NL80211_IFTYPE_P2P_GO: |
1591 | case NL80211_IFTYPE_ADHOC: | 1591 | case NL80211_IFTYPE_ADHOC: |
1592 | return cfg80211_reg_can_beacon(wiphy, &chandef, iftype); | 1592 | return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype); |
1593 | case NL80211_IFTYPE_STATION: | 1593 | case NL80211_IFTYPE_STATION: |
1594 | case NL80211_IFTYPE_P2P_CLIENT: | 1594 | case NL80211_IFTYPE_P2P_CLIENT: |
1595 | return cfg80211_chandef_usable(wiphy, &chandef, | 1595 | return cfg80211_chandef_usable(wiphy, &chandef, |
diff --git a/net/wireless/trace.h b/net/wireless/trace.h index af3617c9879e..a808279a432a 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h | |||
@@ -2358,20 +2358,23 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify, | |||
2358 | 2358 | ||
2359 | TRACE_EVENT(cfg80211_reg_can_beacon, | 2359 | TRACE_EVENT(cfg80211_reg_can_beacon, |
2360 | TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, | 2360 | TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, |
2361 | enum nl80211_iftype iftype), | 2361 | enum nl80211_iftype iftype, bool check_no_ir), |
2362 | TP_ARGS(wiphy, chandef, iftype), | 2362 | TP_ARGS(wiphy, chandef, iftype, check_no_ir), |
2363 | TP_STRUCT__entry( | 2363 | TP_STRUCT__entry( |
2364 | WIPHY_ENTRY | 2364 | WIPHY_ENTRY |
2365 | CHAN_DEF_ENTRY | 2365 | CHAN_DEF_ENTRY |
2366 | __field(enum nl80211_iftype, iftype) | 2366 | __field(enum nl80211_iftype, iftype) |
2367 | __field(bool, check_no_ir) | ||
2367 | ), | 2368 | ), |
2368 | TP_fast_assign( | 2369 | TP_fast_assign( |
2369 | WIPHY_ASSIGN; | 2370 | WIPHY_ASSIGN; |
2370 | CHAN_DEF_ASSIGN(chandef); | 2371 | CHAN_DEF_ASSIGN(chandef); |
2371 | __entry->iftype = iftype; | 2372 | __entry->iftype = iftype; |
2373 | __entry->check_no_ir = check_no_ir; | ||
2372 | ), | 2374 | ), |
2373 | TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d", | 2375 | TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s", |
2374 | WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype) | 2376 | WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype, |
2377 | BOOL_TO_STR(__entry->check_no_ir)) | ||
2375 | ); | 2378 | ); |
2376 | 2379 | ||
2377 | TRACE_EVENT(cfg80211_chandef_dfs_required, | 2380 | TRACE_EVENT(cfg80211_chandef_dfs_required, |
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h index 8965d1bb8811..125d6402f64f 100644 --- a/samples/trace_events/trace-events-sample.h +++ b/samples/trace_events/trace-events-sample.h | |||
@@ -168,7 +168,10 @@ | |||
168 | * | 168 | * |
169 | * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo) | 169 | * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo) |
170 | * Use __get_dynamic_array_len(foo) to get the length of the array | 170 | * Use __get_dynamic_array_len(foo) to get the length of the array |
171 | * saved. | 171 | * saved. Note, __get_dynamic_array_len() returns the total allocated |
172 | * length of the dynamic array; __print_array() expects the second | ||
173 | * parameter to be the number of elements. To get that, the array length | ||
174 | * needs to be divided by the element size. | ||
172 | * | 175 | * |
173 | * For __string(foo, bar) use __get_str(foo) | 176 | * For __string(foo, bar) use __get_str(foo) |
174 | * | 177 | * |
@@ -288,7 +291,7 @@ TRACE_EVENT(foo_bar, | |||
288 | * This prints out the array that is defined by __array in a nice format. | 291 | * This prints out the array that is defined by __array in a nice format. |
289 | */ | 292 | */ |
290 | __print_array(__get_dynamic_array(list), | 293 | __print_array(__get_dynamic_array(list), |
291 | __get_dynamic_array_len(list), | 294 | __get_dynamic_array_len(list) / sizeof(int), |
292 | sizeof(int)), | 295 | sizeof(int)), |
293 | __get_str(str), __get_bitmask(cpus)) | 296 | __get_str(str), __get_bitmask(cpus)) |
294 | ); | 297 | ); |
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index d126c03361ae..75888dd38a7f 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
@@ -85,7 +85,7 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem); | |||
85 | void snd_pcm_stream_lock(struct snd_pcm_substream *substream) | 85 | void snd_pcm_stream_lock(struct snd_pcm_substream *substream) |
86 | { | 86 | { |
87 | if (substream->pcm->nonatomic) { | 87 | if (substream->pcm->nonatomic) { |
88 | down_read(&snd_pcm_link_rwsem); | 88 | down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING); |
89 | mutex_lock(&substream->self_group.mutex); | 89 | mutex_lock(&substream->self_group.mutex); |
90 | } else { | 90 | } else { |
91 | read_lock(&snd_pcm_link_rwlock); | 91 | read_lock(&snd_pcm_link_rwlock); |
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 442500e06b7c..5676b849379d 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c | |||
@@ -56,8 +56,11 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable) | |||
56 | enable ? "enable" : "disable"); | 56 | enable ? "enable" : "disable"); |
57 | 57 | ||
58 | if (enable) { | 58 | if (enable) { |
59 | if (!bus->i915_power_refcount++) | 59 | if (!bus->i915_power_refcount++) { |
60 | acomp->ops->get_power(acomp->dev); | 60 | acomp->ops->get_power(acomp->dev); |
61 | snd_hdac_set_codec_wakeup(bus, true); | ||
62 | snd_hdac_set_codec_wakeup(bus, false); | ||
63 | } | ||
61 | } else { | 64 | } else { |
62 | WARN_ON(!bus->i915_power_refcount); | 65 | WARN_ON(!bus->i915_power_refcount); |
63 | if (!--bus->i915_power_refcount) | 66 | if (!--bus->i915_power_refcount) |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 745535d1840a..735bdcb04ce8 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -979,14 +979,16 @@ static int azx_runtime_resume(struct device *dev) | |||
979 | if (!azx_has_pm_runtime(chip)) | 979 | if (!azx_has_pm_runtime(chip)) |
980 | return 0; | 980 | return 0; |
981 | 981 | ||
982 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL | 982 | if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) { |
983 | && hda->need_i915_power) { | 983 | bus = azx_bus(chip); |
984 | bus = azx_bus(chip); | 984 | if (hda->need_i915_power) { |
985 | snd_hdac_display_power(bus, true); | 985 | snd_hdac_display_power(bus, true); |
986 | haswell_set_bclk(hda); | 986 | haswell_set_bclk(hda); |
987 | /* toggle codec wakeup bit for STATESTS read */ | 987 | } else { |
988 | snd_hdac_set_codec_wakeup(bus, true); | 988 | /* toggle codec wakeup bit for STATESTS read */ |
989 | snd_hdac_set_codec_wakeup(bus, false); | 989 | snd_hdac_set_codec_wakeup(bus, true); |
990 | snd_hdac_set_codec_wakeup(bus, false); | ||
991 | } | ||
990 | } | 992 | } |
991 | 993 | ||
992 | /* Read STATESTS before controller reset */ | 994 | /* Read STATESTS before controller reset */ |
@@ -2182,6 +2184,8 @@ static const struct pci_device_id azx_ids[] = { | |||
2182 | /* ATI HDMI */ | 2184 | /* ATI HDMI */ |
2183 | { PCI_DEVICE(0x1002, 0x1308), | 2185 | { PCI_DEVICE(0x1002, 0x1308), |
2184 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | 2186 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, |
2187 | { PCI_DEVICE(0x1002, 0x157a), | ||
2188 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | ||
2185 | { PCI_DEVICE(0x1002, 0x793b), | 2189 | { PCI_DEVICE(0x1002, 0x793b), |
2186 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, | 2190 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, |
2187 | { PCI_DEVICE(0x1002, 0x7919), | 2191 | { PCI_DEVICE(0x1002, 0x7919), |
@@ -2236,8 +2240,14 @@ static const struct pci_device_id azx_ids[] = { | |||
2236 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | 2240 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, |
2237 | { PCI_DEVICE(0x1002, 0xaab0), | 2241 | { PCI_DEVICE(0x1002, 0xaab0), |
2238 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | 2242 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, |
2243 | { PCI_DEVICE(0x1002, 0xaac0), | ||
2244 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | ||
2239 | { PCI_DEVICE(0x1002, 0xaac8), | 2245 | { PCI_DEVICE(0x1002, 0xaac8), |
2240 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | 2246 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, |
2247 | { PCI_DEVICE(0x1002, 0xaad8), | ||
2248 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | ||
2249 | { PCI_DEVICE(0x1002, 0xaae8), | ||
2250 | .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, | ||
2241 | /* VIA VT8251/VT8237A */ | 2251 | /* VIA VT8251/VT8237A */ |
2242 | { PCI_DEVICE(0x1106, 0x3288), | 2252 | { PCI_DEVICE(0x1106, 0x3288), |
2243 | .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, | 2253 | .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 95158914cc6c..a97db5fc8a15 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -3512,6 +3512,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = { | |||
3512 | { .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, | 3512 | { .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, |
3513 | { .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, | 3513 | { .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, |
3514 | { .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi }, | 3514 | { .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi }, |
3515 | { .id = 0x10de007d, .name = "GPU 7d HDMI/DP", .patch = patch_nvhdmi }, | ||
3515 | { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, | 3516 | { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, |
3516 | { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, | 3517 | { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, |
3517 | { .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, | 3518 | { .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, |
@@ -3576,6 +3577,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0067"); | |||
3576 | MODULE_ALIAS("snd-hda-codec-id:10de0070"); | 3577 | MODULE_ALIAS("snd-hda-codec-id:10de0070"); |
3577 | MODULE_ALIAS("snd-hda-codec-id:10de0071"); | 3578 | MODULE_ALIAS("snd-hda-codec-id:10de0071"); |
3578 | MODULE_ALIAS("snd-hda-codec-id:10de0072"); | 3579 | MODULE_ALIAS("snd-hda-codec-id:10de0072"); |
3580 | MODULE_ALIAS("snd-hda-codec-id:10de007d"); | ||
3579 | MODULE_ALIAS("snd-hda-codec-id:10de8001"); | 3581 | MODULE_ALIAS("snd-hda-codec-id:10de8001"); |
3580 | MODULE_ALIAS("snd-hda-codec-id:11069f80"); | 3582 | MODULE_ALIAS("snd-hda-codec-id:11069f80"); |
3581 | MODULE_ALIAS("snd-hda-codec-id:11069f81"); | 3583 | MODULE_ALIAS("snd-hda-codec-id:11069f81"); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d35cf506a7db..742fc626f9e1 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -5061,7 +5061,7 @@ static const struct hda_fixup alc269_fixups[] = { | |||
5061 | { 0x14, 0x90170110 }, | 5061 | { 0x14, 0x90170110 }, |
5062 | { 0x17, 0x40000008 }, | 5062 | { 0x17, 0x40000008 }, |
5063 | { 0x18, 0x411111f0 }, | 5063 | { 0x18, 0x411111f0 }, |
5064 | { 0x19, 0x411111f0 }, | 5064 | { 0x19, 0x01a1913c }, |
5065 | { 0x1a, 0x411111f0 }, | 5065 | { 0x1a, 0x411111f0 }, |
5066 | { 0x1b, 0x411111f0 }, | 5066 | { 0x1b, 0x411111f0 }, |
5067 | { 0x1d, 0x40f89b2d }, | 5067 | { 0x1d, 0x40f89b2d }, |
@@ -5430,8 +5430,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = { | |||
5430 | {0x15, 0x0221401f}, \ | 5430 | {0x15, 0x0221401f}, \ |
5431 | {0x1a, 0x411111f0}, \ | 5431 | {0x1a, 0x411111f0}, \ |
5432 | {0x1b, 0x411111f0}, \ | 5432 | {0x1b, 0x411111f0}, \ |
5433 | {0x1d, 0x40700001}, \ | 5433 | {0x1d, 0x40700001} |
5434 | {0x1e, 0x411111f0} | ||
5435 | 5434 | ||
5436 | #define ALC298_STANDARD_PINS \ | 5435 | #define ALC298_STANDARD_PINS \ |
5437 | {0x18, 0x411111f0}, \ | 5436 | {0x18, 0x411111f0}, \ |
@@ -5463,6 +5462,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
5463 | {0x1d, 0x40700001}, | 5462 | {0x1d, 0x40700001}, |
5464 | {0x21, 0x02211030}), | 5463 | {0x21, 0x02211030}), |
5465 | SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | 5464 | SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, |
5465 | {0x12, 0x40000000}, | ||
5466 | {0x14, 0x90170130}, | ||
5467 | {0x17, 0x411111f0}, | ||
5468 | {0x18, 0x411111f0}, | ||
5469 | {0x19, 0x411111f0}, | ||
5470 | {0x1a, 0x411111f0}, | ||
5471 | {0x1b, 0x01014020}, | ||
5472 | {0x1d, 0x4054c029}, | ||
5473 | {0x1e, 0x411111f0}, | ||
5474 | {0x21, 0x0221103f}), | ||
5475 | SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, | ||
5466 | {0x12, 0x90a60160}, | 5476 | {0x12, 0x90a60160}, |
5467 | {0x14, 0x90170120}, | 5477 | {0x14, 0x90170120}, |
5468 | {0x17, 0x90170140}, | 5478 | {0x17, 0x90170140}, |
@@ -5690,35 +5700,48 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { | |||
5690 | {0x13, 0x411111f0}, | 5700 | {0x13, 0x411111f0}, |
5691 | {0x16, 0x01014020}, | 5701 | {0x16, 0x01014020}, |
5692 | {0x18, 0x411111f0}, | 5702 | {0x18, 0x411111f0}, |
5693 | {0x19, 0x01a19030}), | 5703 | {0x19, 0x01a19030}, |
5704 | {0x1e, 0x411111f0}), | ||
5694 | SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, | 5705 | SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, |
5695 | ALC292_STANDARD_PINS, | 5706 | ALC292_STANDARD_PINS, |
5696 | {0x12, 0x90a60140}, | 5707 | {0x12, 0x90a60140}, |
5697 | {0x13, 0x411111f0}, | 5708 | {0x13, 0x411111f0}, |
5698 | {0x16, 0x01014020}, | 5709 | {0x16, 0x01014020}, |
5699 | {0x18, 0x02a19031}, | 5710 | {0x18, 0x02a19031}, |
5700 | {0x19, 0x01a1903e}), | 5711 | {0x19, 0x01a1903e}, |
5712 | {0x1e, 0x411111f0}), | ||
5701 | SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, | 5713 | SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, |
5702 | ALC292_STANDARD_PINS, | 5714 | ALC292_STANDARD_PINS, |
5703 | {0x12, 0x90a60140}, | 5715 | {0x12, 0x90a60140}, |
5704 | {0x13, 0x411111f0}, | 5716 | {0x13, 0x411111f0}, |
5705 | {0x16, 0x411111f0}, | 5717 | {0x16, 0x411111f0}, |
5706 | {0x18, 0x411111f0}, | 5718 | {0x18, 0x411111f0}, |
5707 | {0x19, 0x411111f0}), | 5719 | {0x19, 0x411111f0}, |
5720 | {0x1e, 0x411111f0}), | ||
5708 | SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, | 5721 | SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, |
5709 | ALC292_STANDARD_PINS, | 5722 | ALC292_STANDARD_PINS, |
5710 | {0x12, 0x40000000}, | 5723 | {0x12, 0x40000000}, |
5711 | {0x13, 0x90a60140}, | 5724 | {0x13, 0x90a60140}, |
5712 | {0x16, 0x21014020}, | 5725 | {0x16, 0x21014020}, |
5713 | {0x18, 0x411111f0}, | 5726 | {0x18, 0x411111f0}, |
5714 | {0x19, 0x21a19030}), | 5727 | {0x19, 0x21a19030}, |
5728 | {0x1e, 0x411111f0}), | ||
5715 | SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, | 5729 | SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, |
5716 | ALC292_STANDARD_PINS, | 5730 | ALC292_STANDARD_PINS, |
5717 | {0x12, 0x40000000}, | 5731 | {0x12, 0x40000000}, |
5718 | {0x13, 0x90a60140}, | 5732 | {0x13, 0x90a60140}, |
5719 | {0x16, 0x411111f0}, | 5733 | {0x16, 0x411111f0}, |
5720 | {0x18, 0x411111f0}, | 5734 | {0x18, 0x411111f0}, |
5721 | {0x19, 0x411111f0}), | 5735 | {0x19, 0x411111f0}, |
5736 | {0x1e, 0x411111f0}), | ||
5737 | SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, | ||
5738 | ALC292_STANDARD_PINS, | ||
5739 | {0x12, 0x40000000}, | ||
5740 | {0x13, 0x90a60140}, | ||
5741 | {0x16, 0x21014020}, | ||
5742 | {0x18, 0x411111f0}, | ||
5743 | {0x19, 0x21a19030}, | ||
5744 | {0x1e, 0x411111ff}), | ||
5722 | SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, | 5745 | SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, |
5723 | ALC298_STANDARD_PINS, | 5746 | ALC298_STANDARD_PINS, |
5724 | {0x12, 0x90a60130}, | 5747 | {0x12, 0x90a60130}, |
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c index 1b1a89e80d13..784ceb85b2d9 100644 --- a/sound/sparc/amd7930.c +++ b/sound/sparc/amd7930.c | |||
@@ -956,6 +956,7 @@ static int snd_amd7930_create(struct snd_card *card, | |||
956 | if (!amd->regs) { | 956 | if (!amd->regs) { |
957 | snd_printk(KERN_ERR | 957 | snd_printk(KERN_ERR |
958 | "amd7930-%d: Unable to map chip registers.\n", dev); | 958 | "amd7930-%d: Unable to map chip registers.\n", dev); |
959 | kfree(amd); | ||
959 | return -EIO; | 960 | return -EIO; |
960 | } | 961 | } |
961 | 962 | ||