diff options
author | Takashi Iwai <tiwai@suse.de> | 2017-11-01 12:43:20 -0400 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2017-11-01 12:43:20 -0400 |
commit | a53a0ab8ff725672fcb47bb9a5ef75fce45679d0 (patch) | |
tree | 75cdea78f27fdd569d72cea0b7837bbcc8d871f7 | |
parent | 1f20f9ff57ca23b9f5502fca85ce3977e8496cb1 (diff) | |
parent | b817d936248b9bcee8282e97fb1dda1b03c903fe (diff) |
Merge tag 'asoc-fix-v4.14-rc7' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v4.14
A bunch of fixes here, mostly device specific ones (the biggest one
being the revert of the hotword support for rt5514), with a couple of
core fixes for potential issues with corrupted or otherwise invalid
topology files.
707 files changed, 7146 insertions, 4365 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 index 33e96f740639..147d4e8a1403 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 +++ b/Documentation/ABI/testing/sysfs-bus-iio-proximity-as3935 | |||
@@ -14,3 +14,11 @@ Description: | |||
14 | Show or set the gain boost of the amp, from 0-31 range. | 14 | Show or set the gain boost of the amp, from 0-31 range. |
15 | 18 = indoors (default) | 15 | 18 = indoors (default) |
16 | 14 = outdoors | 16 | 14 = outdoors |
17 | |||
18 | What /sys/bus/iio/devices/iio:deviceX/noise_level_tripped | ||
19 | Date: May 2017 | ||
20 | KernelVersion: 4.13 | ||
21 | Contact: Matt Ranostay <matt.ranostay@konsulko.com> | ||
22 | Description: | ||
23 | When 1 the noise level is over the trip level and not reporting | ||
24 | valid data | ||
diff --git a/Documentation/ABI/testing/sysfs-devices-power b/Documentation/ABI/testing/sysfs-devices-power index 676fdf5f2a99..5cbb6f038615 100644 --- a/Documentation/ABI/testing/sysfs-devices-power +++ b/Documentation/ABI/testing/sysfs-devices-power | |||
@@ -211,7 +211,9 @@ Description: | |||
211 | device, after it has been suspended at run time, from a resume | 211 | device, after it has been suspended at run time, from a resume |
212 | request to the moment the device will be ready to process I/O, | 212 | request to the moment the device will be ready to process I/O, |
213 | in microseconds. If it is equal to 0, however, this means that | 213 | in microseconds. If it is equal to 0, however, this means that |
214 | the PM QoS resume latency may be arbitrary. | 214 | the PM QoS resume latency may be arbitrary and the special value |
215 | "n/a" means that user space cannot accept any resume latency at | ||
216 | all for the given device. | ||
215 | 217 | ||
216 | Not all drivers support this attribute. If it isn't supported, | 218 | Not all drivers support this attribute. If it isn't supported, |
217 | it is not present. | 219 | it is not present. |
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-swap b/Documentation/ABI/testing/sysfs-kernel-mm-swap index 587db52084c7..94672016c268 100644 --- a/Documentation/ABI/testing/sysfs-kernel-mm-swap +++ b/Documentation/ABI/testing/sysfs-kernel-mm-swap | |||
@@ -14,13 +14,3 @@ Description: Enable/disable VMA based swap readahead. | |||
14 | still used for tmpfs etc. other users. If set to | 14 | still used for tmpfs etc. other users. If set to |
15 | false, the global swap readahead algorithm will be | 15 | false, the global swap readahead algorithm will be |
16 | used for all swappable pages. | 16 | used for all swappable pages. |
17 | |||
18 | What: /sys/kernel/mm/swap/vma_ra_max_order | ||
19 | Date: August 2017 | ||
20 | Contact: Linux memory management mailing list <linux-mm@kvack.org> | ||
21 | Description: The max readahead size in order for VMA based swap readahead | ||
22 | |||
23 | VMA based swap readahead algorithm will readahead at | ||
24 | most 1 << max_order pages for each readahead. The | ||
25 | real readahead size for each readahead will be scaled | ||
26 | according to the estimation algorithm. | ||
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst index 8282099e0cbf..5da10184d908 100644 --- a/Documentation/core-api/kernel-api.rst +++ b/Documentation/core-api/kernel-api.rst | |||
@@ -352,44 +352,30 @@ Read-Copy Update (RCU) | |||
352 | ---------------------- | 352 | ---------------------- |
353 | 353 | ||
354 | .. kernel-doc:: include/linux/rcupdate.h | 354 | .. kernel-doc:: include/linux/rcupdate.h |
355 | :external: | ||
356 | 355 | ||
357 | .. kernel-doc:: include/linux/rcupdate_wait.h | 356 | .. kernel-doc:: include/linux/rcupdate_wait.h |
358 | :external: | ||
359 | 357 | ||
360 | .. kernel-doc:: include/linux/rcutree.h | 358 | .. kernel-doc:: include/linux/rcutree.h |
361 | :external: | ||
362 | 359 | ||
363 | .. kernel-doc:: kernel/rcu/tree.c | 360 | .. kernel-doc:: kernel/rcu/tree.c |
364 | :external: | ||
365 | 361 | ||
366 | .. kernel-doc:: kernel/rcu/tree_plugin.h | 362 | .. kernel-doc:: kernel/rcu/tree_plugin.h |
367 | :external: | ||
368 | 363 | ||
369 | .. kernel-doc:: kernel/rcu/tree_exp.h | 364 | .. kernel-doc:: kernel/rcu/tree_exp.h |
370 | :external: | ||
371 | 365 | ||
372 | .. kernel-doc:: kernel/rcu/update.c | 366 | .. kernel-doc:: kernel/rcu/update.c |
373 | :external: | ||
374 | 367 | ||
375 | .. kernel-doc:: include/linux/srcu.h | 368 | .. kernel-doc:: include/linux/srcu.h |
376 | :external: | ||
377 | 369 | ||
378 | .. kernel-doc:: kernel/rcu/srcutree.c | 370 | .. kernel-doc:: kernel/rcu/srcutree.c |
379 | :external: | ||
380 | 371 | ||
381 | .. kernel-doc:: include/linux/rculist_bl.h | 372 | .. kernel-doc:: include/linux/rculist_bl.h |
382 | :external: | ||
383 | 373 | ||
384 | .. kernel-doc:: include/linux/rculist.h | 374 | .. kernel-doc:: include/linux/rculist.h |
385 | :external: | ||
386 | 375 | ||
387 | .. kernel-doc:: include/linux/rculist_nulls.h | 376 | .. kernel-doc:: include/linux/rculist_nulls.h |
388 | :external: | ||
389 | 377 | ||
390 | .. kernel-doc:: include/linux/rcu_sync.h | 378 | .. kernel-doc:: include/linux/rcu_sync.h |
391 | :external: | ||
392 | 379 | ||
393 | .. kernel-doc:: kernel/rcu/sync.c | 380 | .. kernel-doc:: kernel/rcu/sync.c |
394 | :external: | ||
395 | 381 | ||
diff --git a/Documentation/devicetree/bindings/iio/proximity/as3935.txt b/Documentation/devicetree/bindings/iio/proximity/as3935.txt index 38d74314b7ab..b6c1afa6f02d 100644 --- a/Documentation/devicetree/bindings/iio/proximity/as3935.txt +++ b/Documentation/devicetree/bindings/iio/proximity/as3935.txt | |||
@@ -16,6 +16,10 @@ Optional properties: | |||
16 | - ams,tuning-capacitor-pf: Calibration tuning capacitor stepping | 16 | - ams,tuning-capacitor-pf: Calibration tuning capacitor stepping |
17 | value 0 - 120pF. This will require using the calibration data from | 17 | value 0 - 120pF. This will require using the calibration data from |
18 | the manufacturer. | 18 | the manufacturer. |
19 | - ams,nflwdth: Set the noise and watchdog threshold register on | ||
20 | startup. This will need to set according to the noise from the | ||
21 | MCU board, and possibly the local environment. Refer to the | ||
22 | datasheet for the threshold settings. | ||
19 | 23 | ||
20 | Example: | 24 | Example: |
21 | 25 | ||
@@ -27,4 +31,5 @@ as3935@0 { | |||
27 | interrupt-parent = <&gpio1>; | 31 | interrupt-parent = <&gpio1>; |
28 | interrupts = <16 1>; | 32 | interrupts = <16 1>; |
29 | ams,tuning-capacitor-pf = <80>; | 33 | ams,tuning-capacitor-pf = <80>; |
34 | ams,nflwdth = <0x44>; | ||
30 | }; | 35 | }; |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt index 4c29cdab0ea5..5eb108e180fa 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt | |||
@@ -99,7 +99,7 @@ Examples: | |||
99 | compatible = "arm,gic-v3-its"; | 99 | compatible = "arm,gic-v3-its"; |
100 | msi-controller; | 100 | msi-controller; |
101 | #msi-cells = <1>; | 101 | #msi-cells = <1>; |
102 | reg = <0x0 0x2c200000 0 0x200000>; | 102 | reg = <0x0 0x2c200000 0 0x20000>; |
103 | }; | 103 | }; |
104 | }; | 104 | }; |
105 | 105 | ||
@@ -124,14 +124,14 @@ Examples: | |||
124 | compatible = "arm,gic-v3-its"; | 124 | compatible = "arm,gic-v3-its"; |
125 | msi-controller; | 125 | msi-controller; |
126 | #msi-cells = <1>; | 126 | #msi-cells = <1>; |
127 | reg = <0x0 0x2c200000 0 0x200000>; | 127 | reg = <0x0 0x2c200000 0 0x20000>; |
128 | }; | 128 | }; |
129 | 129 | ||
130 | gic-its@2c400000 { | 130 | gic-its@2c400000 { |
131 | compatible = "arm,gic-v3-its"; | 131 | compatible = "arm,gic-v3-its"; |
132 | msi-controller; | 132 | msi-controller; |
133 | #msi-cells = <1>; | 133 | #msi-cells = <1>; |
134 | reg = <0x0 0x2c400000 0 0x200000>; | 134 | reg = <0x0 0x2c400000 0 0x20000>; |
135 | }; | 135 | }; |
136 | 136 | ||
137 | ppi-partitions { | 137 | ppi-partitions { |
diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt index 329e740adea7..f6f80380dff2 100644 --- a/Documentation/kbuild/makefiles.txt +++ b/Documentation/kbuild/makefiles.txt | |||
@@ -1108,14 +1108,6 @@ When kbuild executes, the following steps are followed (roughly): | |||
1108 | ld | 1108 | ld |
1109 | Link target. Often, LDFLAGS_$@ is used to set specific options to ld. | 1109 | Link target. Often, LDFLAGS_$@ is used to set specific options to ld. |
1110 | 1110 | ||
1111 | objcopy | ||
1112 | Copy binary. Uses OBJCOPYFLAGS usually specified in | ||
1113 | arch/$(ARCH)/Makefile. | ||
1114 | OBJCOPYFLAGS_$@ may be used to set additional options. | ||
1115 | |||
1116 | gzip | ||
1117 | Compress target. Use maximum compression to compress target. | ||
1118 | |||
1119 | Example: | 1111 | Example: |
1120 | #arch/x86/boot/Makefile | 1112 | #arch/x86/boot/Makefile |
1121 | LDFLAGS_bootsect := -Ttext 0x0 -s --oformat binary | 1113 | LDFLAGS_bootsect := -Ttext 0x0 -s --oformat binary |
@@ -1139,6 +1131,19 @@ When kbuild executes, the following steps are followed (roughly): | |||
1139 | resulting in the target file being recompiled for no | 1131 | resulting in the target file being recompiled for no |
1140 | obvious reason. | 1132 | obvious reason. |
1141 | 1133 | ||
1134 | objcopy | ||
1135 | Copy binary. Uses OBJCOPYFLAGS usually specified in | ||
1136 | arch/$(ARCH)/Makefile. | ||
1137 | OBJCOPYFLAGS_$@ may be used to set additional options. | ||
1138 | |||
1139 | gzip | ||
1140 | Compress target. Use maximum compression to compress target. | ||
1141 | |||
1142 | Example: | ||
1143 | #arch/x86/boot/compressed/Makefile | ||
1144 | $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE | ||
1145 | $(call if_changed,gzip) | ||
1146 | |||
1142 | dtc | 1147 | dtc |
1143 | Create flattened device tree blob object suitable for linking | 1148 | Create flattened device tree blob object suitable for linking |
1144 | into vmlinux. Device tree blobs linked into vmlinux are placed | 1149 | into vmlinux. Device tree blobs linked into vmlinux are placed |
@@ -1219,7 +1224,7 @@ When kbuild executes, the following steps are followed (roughly): | |||
1219 | that may be shared between individual architectures. | 1224 | that may be shared between individual architectures. |
1220 | The recommended approach how to use a generic header file is | 1225 | The recommended approach how to use a generic header file is |
1221 | to list the file in the Kbuild file. | 1226 | to list the file in the Kbuild file. |
1222 | See "7.3 generic-y" for further info on syntax etc. | 1227 | See "7.2 generic-y" for further info on syntax etc. |
1223 | 1228 | ||
1224 | --- 6.11 Post-link pass | 1229 | --- 6.11 Post-link pass |
1225 | 1230 | ||
@@ -1254,13 +1259,13 @@ A Kbuild file may be defined under arch/<arch>/include/uapi/asm/ and | |||
1254 | arch/<arch>/include/asm/ to list asm files coming from asm-generic. | 1259 | arch/<arch>/include/asm/ to list asm files coming from asm-generic. |
1255 | See subsequent chapter for the syntax of the Kbuild file. | 1260 | See subsequent chapter for the syntax of the Kbuild file. |
1256 | 1261 | ||
1257 | --- 7.1 no-export-headers | 1262 | --- 7.1 no-export-headers |
1258 | 1263 | ||
1259 | no-export-headers is essentially used by include/uapi/linux/Kbuild to | 1264 | no-export-headers is essentially used by include/uapi/linux/Kbuild to |
1260 | avoid exporting specific headers (e.g. kvm.h) on architectures that do | 1265 | avoid exporting specific headers (e.g. kvm.h) on architectures that do |
1261 | not support it. It should be avoided as much as possible. | 1266 | not support it. It should be avoided as much as possible. |
1262 | 1267 | ||
1263 | --- 7.2 generic-y | 1268 | --- 7.2 generic-y |
1264 | 1269 | ||
1265 | If an architecture uses a verbatim copy of a header from | 1270 | If an architecture uses a verbatim copy of a header from |
1266 | include/asm-generic then this is listed in the file | 1271 | include/asm-generic then this is listed in the file |
@@ -1287,7 +1292,7 @@ See subsequent chapter for the syntax of the Kbuild file. | |||
1287 | Example: termios.h | 1292 | Example: termios.h |
1288 | #include <asm-generic/termios.h> | 1293 | #include <asm-generic/termios.h> |
1289 | 1294 | ||
1290 | --- 7.3 generated-y | 1295 | --- 7.3 generated-y |
1291 | 1296 | ||
1292 | If an architecture generates other header files alongside generic-y | 1297 | If an architecture generates other header files alongside generic-y |
1293 | wrappers, generated-y specifies them. | 1298 | wrappers, generated-y specifies them. |
@@ -1299,7 +1304,7 @@ See subsequent chapter for the syntax of the Kbuild file. | |||
1299 | #arch/x86/include/asm/Kbuild | 1304 | #arch/x86/include/asm/Kbuild |
1300 | generated-y += syscalls_32.h | 1305 | generated-y += syscalls_32.h |
1301 | 1306 | ||
1302 | --- 7.5 mandatory-y | 1307 | --- 7.4 mandatory-y |
1303 | 1308 | ||
1304 | mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm | 1309 | mandatory-y is essentially used by include/uapi/asm-generic/Kbuild.asm |
1305 | to define the minimum set of headers that must be exported in | 1310 | to define the minimum set of headers that must be exported in |
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt index 57f52cdce32e..9ba04c0bab8d 100644 --- a/Documentation/networking/bonding.txt +++ b/Documentation/networking/bonding.txt | |||
@@ -2387,7 +2387,7 @@ broadcast: Like active-backup, there is not much advantage to this | |||
2387 | and packet type ID), so in a "gatewayed" configuration, all | 2387 | and packet type ID), so in a "gatewayed" configuration, all |
2388 | outgoing traffic will generally use the same device. Incoming | 2388 | outgoing traffic will generally use the same device. Incoming |
2389 | traffic may also end up on a single device, but that is | 2389 | traffic may also end up on a single device, but that is |
2390 | dependent upon the balancing policy of the peer's 8023.ad | 2390 | dependent upon the balancing policy of the peer's 802.3ad |
2391 | implementation. In a "local" configuration, traffic will be | 2391 | implementation. In a "local" configuration, traffic will be |
2392 | distributed across the devices in the bond. | 2392 | distributed across the devices in the bond. |
2393 | 2393 | ||
diff --git a/Documentation/process/index.rst b/Documentation/process/index.rst index 82fc399fcd33..61e43cc3ed17 100644 --- a/Documentation/process/index.rst +++ b/Documentation/process/index.rst | |||
@@ -25,6 +25,7 @@ Below are the essential guides that every developer should read. | |||
25 | submitting-patches | 25 | submitting-patches |
26 | coding-style | 26 | coding-style |
27 | email-clients | 27 | email-clients |
28 | kernel-enforcement-statement | ||
28 | 29 | ||
29 | Other guides to the community that are of interest to most developers are: | 30 | Other guides to the community that are of interest to most developers are: |
30 | 31 | ||
diff --git a/Documentation/process/kernel-enforcement-statement.rst b/Documentation/process/kernel-enforcement-statement.rst new file mode 100644 index 000000000000..1e23d4227337 --- /dev/null +++ b/Documentation/process/kernel-enforcement-statement.rst | |||
@@ -0,0 +1,147 @@ | |||
1 | Linux Kernel Enforcement Statement | ||
2 | ---------------------------------- | ||
3 | |||
4 | As developers of the Linux kernel, we have a keen interest in how our software | ||
5 | is used and how the license for our software is enforced. Compliance with the | ||
6 | reciprocal sharing obligations of GPL-2.0 is critical to the long-term | ||
7 | sustainability of our software and community. | ||
8 | |||
9 | Although there is a right to enforce the separate copyright interests in the | ||
10 | contributions made to our community, we share an interest in ensuring that | ||
11 | individual enforcement actions are conducted in a manner that benefits our | ||
12 | community and do not have an unintended negative impact on the health and | ||
13 | growth of our software ecosystem. In order to deter unhelpful enforcement | ||
14 | actions, we agree that it is in the best interests of our development | ||
15 | community to undertake the following commitment to users of the Linux kernel | ||
16 | on behalf of ourselves and any successors to our copyright interests: | ||
17 | |||
18 | Notwithstanding the termination provisions of the GPL-2.0, we agree that | ||
19 | it is in the best interests of our development community to adopt the | ||
20 | following provisions of GPL-3.0 as additional permissions under our | ||
21 | license with respect to any non-defensive assertion of rights under the | ||
22 | license. | ||
23 | |||
24 | However, if you cease all violation of this License, then your license | ||
25 | from a particular copyright holder is reinstated (a) provisionally, | ||
26 | unless and until the copyright holder explicitly and finally | ||
27 | terminates your license, and (b) permanently, if the copyright holder | ||
28 | fails to notify you of the violation by some reasonable means prior to | ||
29 | 60 days after the cessation. | ||
30 | |||
31 | Moreover, your license from a particular copyright holder is | ||
32 | reinstated permanently if the copyright holder notifies you of the | ||
33 | violation by some reasonable means, this is the first time you have | ||
34 | received notice of violation of this License (for any work) from that | ||
35 | copyright holder, and you cure the violation prior to 30 days after | ||
36 | your receipt of the notice. | ||
37 | |||
38 | Our intent in providing these assurances is to encourage more use of the | ||
39 | software. We want companies and individuals to use, modify and distribute | ||
40 | this software. We want to work with users in an open and transparent way to | ||
41 | eliminate any uncertainty about our expectations regarding compliance or | ||
42 | enforcement that might limit adoption of our software. We view legal action | ||
43 | as a last resort, to be initiated only when other community efforts have | ||
44 | failed to resolve the problem. | ||
45 | |||
46 | Finally, once a non-compliance issue is resolved, we hope the user will feel | ||
47 | welcome to join us in our efforts on this project. Working together, we will | ||
48 | be stronger. | ||
49 | |||
50 | Except where noted below, we speak only for ourselves, and not for any company | ||
51 | we might work for today, have in the past, or will in the future. | ||
52 | |||
53 | - Bjorn Andersson (Linaro) | ||
54 | - Andrea Arcangeli (Red Hat) | ||
55 | - Neil Armstrong | ||
56 | - Jens Axboe | ||
57 | - Pablo Neira Ayuso | ||
58 | - Khalid Aziz | ||
59 | - Ralf Baechle | ||
60 | - Felipe Balbi | ||
61 | - Arnd Bergmann | ||
62 | - Ard Biesheuvel | ||
63 | - Paolo Bonzini (Red Hat) | ||
64 | - Christian Borntraeger | ||
65 | - Mark Brown (Linaro) | ||
66 | - Paul Burton | ||
67 | - Javier Martinez Canillas | ||
68 | - Rob Clark | ||
69 | - Jonathan Corbet | ||
70 | - Vivien Didelot (Savoir-faire Linux) | ||
71 | - Hans de Goede (Red Hat) | ||
72 | - Mel Gorman (SUSE) | ||
73 | - Sven Eckelmann | ||
74 | - Alex Elder (Linaro) | ||
75 | - Fabio Estevam | ||
76 | - Larry Finger | ||
77 | - Bhumika Goyal | ||
78 | - Andy Gross | ||
79 | - Juergen Gross | ||
80 | - Shawn Guo | ||
81 | - Ulf Hansson | ||
82 | - Tejun Heo | ||
83 | - Rob Herring | ||
84 | - Masami Hiramatsu | ||
85 | - Michal Hocko | ||
86 | - Simon Horman | ||
87 | - Johan Hovold (Hovold Consulting AB) | ||
88 | - Christophe JAILLET | ||
89 | - Olof Johansson | ||
90 | - Lee Jones (Linaro) | ||
91 | - Heiner Kallweit | ||
92 | - Srinivas Kandagatla | ||
93 | - Jan Kara | ||
94 | - Shuah Khan (Samsung) | ||
95 | - David Kershner | ||
96 | - Jaegeuk Kim | ||
97 | - Namhyung Kim | ||
98 | - Colin Ian King | ||
99 | - Jeff Kirsher | ||
100 | - Greg Kroah-Hartman (Linux Foundation) | ||
101 | - Christian König | ||
102 | - Vinod Koul | ||
103 | - Krzysztof Kozlowski | ||
104 | - Viresh Kumar | ||
105 | - Aneesh Kumar K.V | ||
106 | - Julia Lawall | ||
107 | - Doug Ledford (Red Hat) | ||
108 | - Chuck Lever (Oracle) | ||
109 | - Daniel Lezcano | ||
110 | - Shaohua Li | ||
111 | - Xin Long (Red Hat) | ||
112 | - Tony Luck | ||
113 | - Mike Marshall | ||
114 | - Chris Mason | ||
115 | - Paul E. McKenney | ||
116 | - David S. Miller | ||
117 | - Ingo Molnar | ||
118 | - Kuninori Morimoto | ||
119 | - Borislav Petkov | ||
120 | - Jiri Pirko | ||
121 | - Josh Poimboeuf | ||
122 | - Sebastian Reichel (Collabora) | ||
123 | - Guenter Roeck | ||
124 | - Joerg Roedel | ||
125 | - Leon Romanovsky | ||
126 | - Steven Rostedt (VMware) | ||
127 | - Ivan Safonov | ||
128 | - Ivan Safonov | ||
129 | - Anna Schumaker | ||
130 | - Jes Sorensen | ||
131 | - K.Y. Srinivasan | ||
132 | - Heiko Stuebner | ||
133 | - Jiri Kosina (SUSE) | ||
134 | - Dmitry Torokhov | ||
135 | - Linus Torvalds | ||
136 | - Thierry Reding | ||
137 | - Rik van Riel | ||
138 | - Geert Uytterhoeven (Glider bvba) | ||
139 | - Daniel Vetter | ||
140 | - Linus Walleij | ||
141 | - Richard Weinberger | ||
142 | - Dan Williams | ||
143 | - Rafael J. Wysocki | ||
144 | - Arvind Yadav | ||
145 | - Masahiro Yamada | ||
146 | - Wei Yongjun | ||
147 | - Lv Zheng | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 2d3d750b19c0..af0cb69f6a3e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -5346,9 +5346,7 @@ M: "J. Bruce Fields" <bfields@fieldses.org> | |||
5346 | L: linux-fsdevel@vger.kernel.org | 5346 | L: linux-fsdevel@vger.kernel.org |
5347 | S: Maintained | 5347 | S: Maintained |
5348 | F: include/linux/fcntl.h | 5348 | F: include/linux/fcntl.h |
5349 | F: include/linux/fs.h | ||
5350 | F: include/uapi/linux/fcntl.h | 5349 | F: include/uapi/linux/fcntl.h |
5351 | F: include/uapi/linux/fs.h | ||
5352 | F: fs/fcntl.c | 5350 | F: fs/fcntl.c |
5353 | F: fs/locks.c | 5351 | F: fs/locks.c |
5354 | 5352 | ||
@@ -5357,6 +5355,8 @@ M: Alexander Viro <viro@zeniv.linux.org.uk> | |||
5357 | L: linux-fsdevel@vger.kernel.org | 5355 | L: linux-fsdevel@vger.kernel.org |
5358 | S: Maintained | 5356 | S: Maintained |
5359 | F: fs/* | 5357 | F: fs/* |
5358 | F: include/linux/fs.h | ||
5359 | F: include/uapi/linux/fs.h | ||
5360 | 5360 | ||
5361 | FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER | 5361 | FINTEK F75375S HARDWARE MONITOR AND FAN CONTROLLER DRIVER |
5362 | M: Riku Voipio <riku.voipio@iki.fi> | 5362 | M: Riku Voipio <riku.voipio@iki.fi> |
@@ -7571,7 +7571,7 @@ F: arch/mips/include/asm/kvm* | |||
7571 | F: arch/mips/kvm/ | 7571 | F: arch/mips/kvm/ |
7572 | 7572 | ||
7573 | KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc) | 7573 | KERNEL VIRTUAL MACHINE FOR POWERPC (KVM/powerpc) |
7574 | M: Alexander Graf <agraf@suse.com> | 7574 | M: Paul Mackerras <paulus@ozlabs.org> |
7575 | L: kvm-ppc@vger.kernel.org | 7575 | L: kvm-ppc@vger.kernel.org |
7576 | W: http://www.linux-kvm.org/ | 7576 | W: http://www.linux-kvm.org/ |
7577 | T: git git://github.com/agraf/linux-2.6.git | 7577 | T: git git://github.com/agraf/linux-2.6.git |
@@ -9213,7 +9213,6 @@ F: include/linux/isicom.h | |||
9213 | MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER | 9213 | MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER |
9214 | M: Bin Liu <b-liu@ti.com> | 9214 | M: Bin Liu <b-liu@ti.com> |
9215 | L: linux-usb@vger.kernel.org | 9215 | L: linux-usb@vger.kernel.org |
9216 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git | ||
9217 | S: Maintained | 9216 | S: Maintained |
9218 | F: drivers/usb/musb/ | 9217 | F: drivers/usb/musb/ |
9219 | 9218 | ||
@@ -10180,7 +10179,6 @@ F: Documentation/parport*.txt | |||
10180 | 10179 | ||
10181 | PARAVIRT_OPS INTERFACE | 10180 | PARAVIRT_OPS INTERFACE |
10182 | M: Juergen Gross <jgross@suse.com> | 10181 | M: Juergen Gross <jgross@suse.com> |
10183 | M: Chris Wright <chrisw@sous-sol.org> | ||
10184 | M: Alok Kataria <akataria@vmware.com> | 10182 | M: Alok Kataria <akataria@vmware.com> |
10185 | M: Rusty Russell <rusty@rustcorp.com.au> | 10183 | M: Rusty Russell <rusty@rustcorp.com.au> |
10186 | L: virtualization@lists.linux-foundation.org | 10184 | L: virtualization@lists.linux-foundation.org |
@@ -10560,6 +10558,8 @@ M: Peter Zijlstra <peterz@infradead.org> | |||
10560 | M: Ingo Molnar <mingo@redhat.com> | 10558 | M: Ingo Molnar <mingo@redhat.com> |
10561 | M: Arnaldo Carvalho de Melo <acme@kernel.org> | 10559 | M: Arnaldo Carvalho de Melo <acme@kernel.org> |
10562 | R: Alexander Shishkin <alexander.shishkin@linux.intel.com> | 10560 | R: Alexander Shishkin <alexander.shishkin@linux.intel.com> |
10561 | R: Jiri Olsa <jolsa@redhat.com> | ||
10562 | R: Namhyung Kim <namhyung@kernel.org> | ||
10563 | L: linux-kernel@vger.kernel.org | 10563 | L: linux-kernel@vger.kernel.org |
10564 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core | 10564 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core |
10565 | S: Supported | 10565 | S: Supported |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 4 | 1 | VERSION = 4 |
2 | PATCHLEVEL = 14 | 2 | PATCHLEVEL = 14 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc4 | 4 | EXTRAVERSION = -rc7 |
5 | NAME = Fearless Coyote | 5 | NAME = Fearless Coyote |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
@@ -130,8 +130,8 @@ endif | |||
130 | ifneq ($(KBUILD_OUTPUT),) | 130 | ifneq ($(KBUILD_OUTPUT),) |
131 | # check that the output directory actually exists | 131 | # check that the output directory actually exists |
132 | saved-output := $(KBUILD_OUTPUT) | 132 | saved-output := $(KBUILD_OUTPUT) |
133 | $(shell [ -d $(KBUILD_OUTPUT) ] || mkdir -p $(KBUILD_OUTPUT)) | 133 | KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \ |
134 | KBUILD_OUTPUT := $(realpath $(KBUILD_OUTPUT)) | 134 | && /bin/pwd) |
135 | $(if $(KBUILD_OUTPUT),, \ | 135 | $(if $(KBUILD_OUTPUT),, \ |
136 | $(error failed to create output directory "$(saved-output)")) | 136 | $(error failed to create output directory "$(saved-output)")) |
137 | 137 | ||
@@ -697,11 +697,11 @@ KBUILD_CFLAGS += $(stackp-flag) | |||
697 | 697 | ||
698 | ifeq ($(cc-name),clang) | 698 | ifeq ($(cc-name),clang) |
699 | ifneq ($(CROSS_COMPILE),) | 699 | ifneq ($(CROSS_COMPILE),) |
700 | CLANG_TARGET := -target $(notdir $(CROSS_COMPILE:%-=%)) | 700 | CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%)) |
701 | GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..) | 701 | GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..) |
702 | endif | 702 | endif |
703 | ifneq ($(GCC_TOOLCHAIN),) | 703 | ifneq ($(GCC_TOOLCHAIN),) |
704 | CLANG_GCC_TC := -gcc-toolchain $(GCC_TOOLCHAIN) | 704 | CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN) |
705 | endif | 705 | endif |
706 | KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) | 706 | KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) |
707 | KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) | 707 | KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC) |
@@ -933,7 +933,11 @@ ifdef CONFIG_STACK_VALIDATION | |||
933 | ifeq ($(has_libelf),1) | 933 | ifeq ($(has_libelf),1) |
934 | objtool_target := tools/objtool FORCE | 934 | objtool_target := tools/objtool FORCE |
935 | else | 935 | else |
936 | $(warning "Cannot use CONFIG_STACK_VALIDATION, please install libelf-dev, libelf-devel or elfutils-libelf-devel") | 936 | ifdef CONFIG_ORC_UNWINDER |
937 | $(error "Cannot generate ORC metadata for CONFIG_ORC_UNWINDER=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") | ||
938 | else | ||
939 | $(warning "Cannot use CONFIG_STACK_VALIDATION=y, please install libelf-dev, libelf-devel or elfutils-libelf-devel") | ||
940 | endif | ||
937 | SKIP_STACK_VALIDATION := 1 | 941 | SKIP_STACK_VALIDATION := 1 |
938 | export SKIP_STACK_VALIDATION | 942 | export SKIP_STACK_VALIDATION |
939 | endif | 943 | endif |
@@ -1395,7 +1399,7 @@ help: | |||
1395 | @echo ' Build, install, and boot kernel before' | 1399 | @echo ' Build, install, and boot kernel before' |
1396 | @echo ' running kselftest on it' | 1400 | @echo ' running kselftest on it' |
1397 | @echo ' kselftest-clean - Remove all generated kselftest files' | 1401 | @echo ' kselftest-clean - Remove all generated kselftest files' |
1398 | @echo ' kselftest-merge - Merge all the config dependencies of kselftest to existed' | 1402 | @echo ' kselftest-merge - Merge all the config dependencies of kselftest to existing' |
1399 | @echo ' .config.' | 1403 | @echo ' .config.' |
1400 | @echo '' | 1404 | @echo '' |
1401 | @echo 'Userspace tools targets:' | 1405 | @echo 'Userspace tools targets:' |
diff --git a/arch/Kconfig b/arch/Kconfig index 1aafb4efbb51..d789a89cb32c 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -937,9 +937,6 @@ config STRICT_MODULE_RWX | |||
937 | and non-text memory will be made non-executable. This provides | 937 | and non-text memory will be made non-executable. This provides |
938 | protection against certain security exploits (e.g. writing to text) | 938 | protection against certain security exploits (e.g. writing to text) |
939 | 939 | ||
940 | config ARCH_WANT_RELAX_ORDER | ||
941 | bool | ||
942 | |||
943 | config ARCH_HAS_REFCOUNT | 940 | config ARCH_HAS_REFCOUNT |
944 | bool | 941 | bool |
945 | help | 942 | help |
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c index 118dc6af1805..7ad074fd5ab5 100644 --- a/arch/alpha/kernel/sys_alcor.c +++ b/arch/alpha/kernel/sys_alcor.c | |||
@@ -181,10 +181,10 @@ alcor_init_irq(void) | |||
181 | * comes in on. This makes interrupt processing much easier. | 181 | * comes in on. This makes interrupt processing much easier. |
182 | */ | 182 | */ |
183 | 183 | ||
184 | static int __init | 184 | static int |
185 | alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 185 | alcor_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
186 | { | 186 | { |
187 | static char irq_tab[7][5] __initdata = { | 187 | static char irq_tab[7][5] = { |
188 | /*INT INTA INTB INTC INTD */ | 188 | /*INT INTA INTB INTC INTD */ |
189 | /* note: IDSEL 17 is XLT only */ | 189 | /* note: IDSEL 17 is XLT only */ |
190 | {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */ | 190 | {16+13, 16+13, 16+13, 16+13, 16+13}, /* IdSel 17, TULIP */ |
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index 4c50f8f40cbb..c0fa1fe5ce77 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c | |||
@@ -173,10 +173,10 @@ pc164_init_irq(void) | |||
173 | * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. | 173 | * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. |
174 | */ | 174 | */ |
175 | 175 | ||
176 | static inline int __init | 176 | static inline int |
177 | eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 177 | eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
178 | { | 178 | { |
179 | static char irq_tab[5][5] __initdata = { | 179 | static char irq_tab[5][5] = { |
180 | /*INT INTA INTB INTC INTD */ | 180 | /*INT INTA INTB INTC INTD */ |
181 | {16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */ | 181 | {16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J25 */ |
182 | {16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */ | 182 | {16+1, 16+1, 16+6, 16+10, 16+14}, /* IdSel 7, slot 1, J26 */ |
@@ -203,10 +203,10 @@ eb66p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
203 | * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. | 203 | * because it is the Saturn IO (SIO) PCI/ISA Bridge Chip. |
204 | */ | 204 | */ |
205 | 205 | ||
206 | static inline int __init | 206 | static inline int |
207 | cabriolet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 207 | cabriolet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
208 | { | 208 | { |
209 | static char irq_tab[5][5] __initdata = { | 209 | static char irq_tab[5][5] = { |
210 | /*INT INTA INTB INTC INTD */ | 210 | /*INT INTA INTB INTC INTD */ |
211 | { 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */ | 211 | { 16+2, 16+2, 16+7, 16+11, 16+15}, /* IdSel 5, slot 2, J21 */ |
212 | { 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */ | 212 | { 16+0, 16+0, 16+5, 16+9, 16+13}, /* IdSel 6, slot 0, J19 */ |
@@ -287,10 +287,10 @@ cia_cab_init_pci(void) | |||
287 | * | 287 | * |
288 | */ | 288 | */ |
289 | 289 | ||
290 | static inline int __init | 290 | static inline int |
291 | alphapc164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 291 | alphapc164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
292 | { | 292 | { |
293 | static char irq_tab[7][5] __initdata = { | 293 | static char irq_tab[7][5] = { |
294 | /*INT INTA INTB INTC INTD */ | 294 | /*INT INTA INTB INTC INTD */ |
295 | { 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */ | 295 | { 16+2, 16+2, 16+9, 16+13, 16+17}, /* IdSel 5, slot 2, J20 */ |
296 | { 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */ | 296 | { 16+0, 16+0, 16+7, 16+11, 16+15}, /* IdSel 6, slot 0, J29 */ |
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index 6c35159bc00e..9e1e40ea1d14 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c | |||
@@ -356,7 +356,7 @@ clipper_init_irq(void) | |||
356 | * 10 64 bit PCI option slot 3 (not bus 0) | 356 | * 10 64 bit PCI option slot 3 (not bus 0) |
357 | */ | 357 | */ |
358 | 358 | ||
359 | static int __init | 359 | static int |
360 | isa_irq_fixup(const struct pci_dev *dev, int irq) | 360 | isa_irq_fixup(const struct pci_dev *dev, int irq) |
361 | { | 361 | { |
362 | u8 irq8; | 362 | u8 irq8; |
@@ -372,10 +372,10 @@ isa_irq_fixup(const struct pci_dev *dev, int irq) | |||
372 | return irq8 & 0xf; | 372 | return irq8 & 0xf; |
373 | } | 373 | } |
374 | 374 | ||
375 | static int __init | 375 | static int |
376 | dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 376 | dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
377 | { | 377 | { |
378 | static char irq_tab[6][5] __initdata = { | 378 | static char irq_tab[6][5] = { |
379 | /*INT INTA INTB INTC INTD */ | 379 | /*INT INTA INTB INTC INTD */ |
380 | { -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */ | 380 | { -1, -1, -1, -1, -1}, /* IdSel 5 ISA Bridge */ |
381 | { 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/ | 381 | { 16+ 3, 16+ 3, 16+ 2, 16+ 2, 16+ 2}, /* IdSel 6 SCSI builtin*/ |
@@ -394,10 +394,10 @@ dp264_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
394 | return isa_irq_fixup(dev, irq); | 394 | return isa_irq_fixup(dev, irq); |
395 | } | 395 | } |
396 | 396 | ||
397 | static int __init | 397 | static int |
398 | monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 398 | monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
399 | { | 399 | { |
400 | static char irq_tab[13][5] __initdata = { | 400 | static char irq_tab[13][5] = { |
401 | /*INT INTA INTB INTC INTD */ | 401 | /*INT INTA INTB INTC INTD */ |
402 | { 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */ | 402 | { 45, 45, 45, 45, 45}, /* IdSel 3 21143 PCI1 */ |
403 | { -1, -1, -1, -1, -1}, /* IdSel 4 unused */ | 403 | { -1, -1, -1, -1, -1}, /* IdSel 4 unused */ |
@@ -423,7 +423,7 @@ monet_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
423 | return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP); | 423 | return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP); |
424 | } | 424 | } |
425 | 425 | ||
426 | static u8 __init | 426 | static u8 |
427 | monet_swizzle(struct pci_dev *dev, u8 *pinp) | 427 | monet_swizzle(struct pci_dev *dev, u8 *pinp) |
428 | { | 428 | { |
429 | struct pci_controller *hose = dev->sysdata; | 429 | struct pci_controller *hose = dev->sysdata; |
@@ -456,10 +456,10 @@ monet_swizzle(struct pci_dev *dev, u8 *pinp) | |||
456 | return slot; | 456 | return slot; |
457 | } | 457 | } |
458 | 458 | ||
459 | static int __init | 459 | static int |
460 | webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 460 | webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
461 | { | 461 | { |
462 | static char irq_tab[13][5] __initdata = { | 462 | static char irq_tab[13][5] = { |
463 | /*INT INTA INTB INTC INTD */ | 463 | /*INT INTA INTB INTC INTD */ |
464 | { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */ | 464 | { -1, -1, -1, -1, -1}, /* IdSel 7 ISA Bridge */ |
465 | { -1, -1, -1, -1, -1}, /* IdSel 8 unused */ | 465 | { -1, -1, -1, -1, -1}, /* IdSel 8 unused */ |
@@ -478,10 +478,10 @@ webbrick_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
478 | return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP); | 478 | return isa_irq_fixup(dev, COMMON_TABLE_LOOKUP); |
479 | } | 479 | } |
480 | 480 | ||
481 | static int __init | 481 | static int |
482 | clipper_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 482 | clipper_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
483 | { | 483 | { |
484 | static char irq_tab[7][5] __initdata = { | 484 | static char irq_tab[7][5] = { |
485 | /*INT INTA INTB INTC INTD */ | 485 | /*INT INTA INTB INTC INTD */ |
486 | { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */ | 486 | { 16+ 8, 16+ 8, 16+ 9, 16+10, 16+11}, /* IdSel 1 slot 1 */ |
487 | { 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */ | 487 | { 16+12, 16+12, 16+13, 16+14, 16+15}, /* IdSel 2 slot 2 */ |
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c index ad40a425e841..372661c56537 100644 --- a/arch/alpha/kernel/sys_eb64p.c +++ b/arch/alpha/kernel/sys_eb64p.c | |||
@@ -167,10 +167,10 @@ eb64p_init_irq(void) | |||
167 | * comes in on. This makes interrupt processing much easier. | 167 | * comes in on. This makes interrupt processing much easier. |
168 | */ | 168 | */ |
169 | 169 | ||
170 | static int __init | 170 | static int |
171 | eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 171 | eb64p_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
172 | { | 172 | { |
173 | static char irq_tab[5][5] __initdata = { | 173 | static char irq_tab[5][5] = { |
174 | /*INT INTA INTB INTC INTD */ | 174 | /*INT INTA INTB INTC INTD */ |
175 | {16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */ | 175 | {16+7, 16+7, 16+7, 16+7, 16+7}, /* IdSel 5, slot ?, ?? */ |
176 | {16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */ | 176 | {16+0, 16+0, 16+2, 16+4, 16+9}, /* IdSel 6, slot ?, ?? */ |
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c index 15f42083bdb3..2731738b5872 100644 --- a/arch/alpha/kernel/sys_eiger.c +++ b/arch/alpha/kernel/sys_eiger.c | |||
@@ -141,7 +141,7 @@ eiger_init_irq(void) | |||
141 | } | 141 | } |
142 | } | 142 | } |
143 | 143 | ||
144 | static int __init | 144 | static int |
145 | eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 145 | eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
146 | { | 146 | { |
147 | u8 irq_orig; | 147 | u8 irq_orig; |
@@ -158,7 +158,7 @@ eiger_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
158 | return irq_orig - 0x80; | 158 | return irq_orig - 0x80; |
159 | } | 159 | } |
160 | 160 | ||
161 | static u8 __init | 161 | static u8 |
162 | eiger_swizzle(struct pci_dev *dev, u8 *pinp) | 162 | eiger_swizzle(struct pci_dev *dev, u8 *pinp) |
163 | { | 163 | { |
164 | struct pci_controller *hose = dev->sysdata; | 164 | struct pci_controller *hose = dev->sysdata; |
diff --git a/arch/alpha/kernel/sys_miata.c b/arch/alpha/kernel/sys_miata.c index d5b9776a608d..731d693fa1f9 100644 --- a/arch/alpha/kernel/sys_miata.c +++ b/arch/alpha/kernel/sys_miata.c | |||
@@ -149,10 +149,10 @@ miata_init_irq(void) | |||
149 | * comes in on. This makes interrupt processing much easier. | 149 | * comes in on. This makes interrupt processing much easier. |
150 | */ | 150 | */ |
151 | 151 | ||
152 | static int __init | 152 | static int |
153 | miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 153 | miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
154 | { | 154 | { |
155 | static char irq_tab[18][5] __initdata = { | 155 | static char irq_tab[18][5] = { |
156 | /*INT INTA INTB INTC INTD */ | 156 | /*INT INTA INTB INTC INTD */ |
157 | {16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */ | 157 | {16+ 8, 16+ 8, 16+ 8, 16+ 8, 16+ 8}, /* IdSel 14, DC21142 */ |
158 | { -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */ | 158 | { -1, -1, -1, -1, -1}, /* IdSel 15, EIDE */ |
@@ -196,7 +196,7 @@ miata_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
196 | return COMMON_TABLE_LOOKUP; | 196 | return COMMON_TABLE_LOOKUP; |
197 | } | 197 | } |
198 | 198 | ||
199 | static u8 __init | 199 | static u8 |
200 | miata_swizzle(struct pci_dev *dev, u8 *pinp) | 200 | miata_swizzle(struct pci_dev *dev, u8 *pinp) |
201 | { | 201 | { |
202 | int slot, pin = *pinp; | 202 | int slot, pin = *pinp; |
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c index 5e82dc1ad6f2..350ec9c8335b 100644 --- a/arch/alpha/kernel/sys_mikasa.c +++ b/arch/alpha/kernel/sys_mikasa.c | |||
@@ -145,10 +145,10 @@ mikasa_init_irq(void) | |||
145 | * comes in on. This makes interrupt processing much easier. | 145 | * comes in on. This makes interrupt processing much easier. |
146 | */ | 146 | */ |
147 | 147 | ||
148 | static int __init | 148 | static int |
149 | mikasa_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 149 | mikasa_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
150 | { | 150 | { |
151 | static char irq_tab[8][5] __initdata = { | 151 | static char irq_tab[8][5] = { |
152 | /*INT INTA INTB INTC INTD */ | 152 | /*INT INTA INTB INTC INTD */ |
153 | {16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */ | 153 | {16+12, 16+12, 16+12, 16+12, 16+12}, /* IdSel 17, SCSI */ |
154 | { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */ | 154 | { -1, -1, -1, -1, -1}, /* IdSel 18, PCEB */ |
diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 8ae04a121186..d019e4ce07bd 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c | |||
@@ -62,7 +62,7 @@ nautilus_init_irq(void) | |||
62 | common_init_isa_dma(); | 62 | common_init_isa_dma(); |
63 | } | 63 | } |
64 | 64 | ||
65 | static int __init | 65 | static int |
66 | nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 66 | nautilus_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
67 | { | 67 | { |
68 | /* Preserve the IRQ set up by the console. */ | 68 | /* Preserve the IRQ set up by the console. */ |
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c index 063e594fd969..2301678d9f9d 100644 --- a/arch/alpha/kernel/sys_noritake.c +++ b/arch/alpha/kernel/sys_noritake.c | |||
@@ -193,10 +193,10 @@ noritake_init_irq(void) | |||
193 | * comes in on. This makes interrupt processing much easier. | 193 | * comes in on. This makes interrupt processing much easier. |
194 | */ | 194 | */ |
195 | 195 | ||
196 | static int __init | 196 | static int |
197 | noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 197 | noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
198 | { | 198 | { |
199 | static char irq_tab[15][5] __initdata = { | 199 | static char irq_tab[15][5] = { |
200 | /*INT INTA INTB INTC INTD */ | 200 | /*INT INTA INTB INTC INTD */ |
201 | /* note: IDSELs 16, 17, and 25 are CORELLE only */ | 201 | /* note: IDSELs 16, 17, and 25 are CORELLE only */ |
202 | { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ | 202 | { 16+1, 16+1, 16+1, 16+1, 16+1}, /* IdSel 16, QLOGIC */ |
@@ -221,7 +221,7 @@ noritake_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
221 | return COMMON_TABLE_LOOKUP; | 221 | return COMMON_TABLE_LOOKUP; |
222 | } | 222 | } |
223 | 223 | ||
224 | static u8 __init | 224 | static u8 |
225 | noritake_swizzle(struct pci_dev *dev, u8 *pinp) | 225 | noritake_swizzle(struct pci_dev *dev, u8 *pinp) |
226 | { | 226 | { |
227 | int slot, pin = *pinp; | 227 | int slot, pin = *pinp; |
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c index dfd510ae5d8c..546822d07dc7 100644 --- a/arch/alpha/kernel/sys_rawhide.c +++ b/arch/alpha/kernel/sys_rawhide.c | |||
@@ -221,10 +221,10 @@ rawhide_init_irq(void) | |||
221 | * | 221 | * |
222 | */ | 222 | */ |
223 | 223 | ||
224 | static int __init | 224 | static int |
225 | rawhide_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 225 | rawhide_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
226 | { | 226 | { |
227 | static char irq_tab[5][5] __initdata = { | 227 | static char irq_tab[5][5] = { |
228 | /*INT INTA INTB INTC INTD */ | 228 | /*INT INTA INTB INTC INTD */ |
229 | { 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */ | 229 | { 16+16, 16+16, 16+16, 16+16, 16+16}, /* IdSel 1 SCSI PCI 1 */ |
230 | { 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */ | 230 | { 16+ 0, 16+ 0, 16+ 1, 16+ 2, 16+ 3}, /* IdSel 2 slot 2 */ |
diff --git a/arch/alpha/kernel/sys_ruffian.c b/arch/alpha/kernel/sys_ruffian.c index a3f485257170..3b35e1913492 100644 --- a/arch/alpha/kernel/sys_ruffian.c +++ b/arch/alpha/kernel/sys_ruffian.c | |||
@@ -117,10 +117,10 @@ ruffian_kill_arch (int mode) | |||
117 | * | 117 | * |
118 | */ | 118 | */ |
119 | 119 | ||
120 | static int __init | 120 | static int |
121 | ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 121 | ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
122 | { | 122 | { |
123 | static char irq_tab[11][5] __initdata = { | 123 | static char irq_tab[11][5] = { |
124 | /*INT INTA INTB INTC INTD */ | 124 | /*INT INTA INTB INTC INTD */ |
125 | {-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */ | 125 | {-1, -1, -1, -1, -1}, /* IdSel 13, 21052 */ |
126 | {-1, -1, -1, -1, -1}, /* IdSel 14, SIO */ | 126 | {-1, -1, -1, -1, -1}, /* IdSel 14, SIO */ |
@@ -139,7 +139,7 @@ ruffian_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
139 | return COMMON_TABLE_LOOKUP; | 139 | return COMMON_TABLE_LOOKUP; |
140 | } | 140 | } |
141 | 141 | ||
142 | static u8 __init | 142 | static u8 |
143 | ruffian_swizzle(struct pci_dev *dev, u8 *pinp) | 143 | ruffian_swizzle(struct pci_dev *dev, u8 *pinp) |
144 | { | 144 | { |
145 | int slot, pin = *pinp; | 145 | int slot, pin = *pinp; |
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c index 08ee737d4fba..e178007107ef 100644 --- a/arch/alpha/kernel/sys_rx164.c +++ b/arch/alpha/kernel/sys_rx164.c | |||
@@ -142,7 +142,7 @@ rx164_init_irq(void) | |||
142 | * | 142 | * |
143 | */ | 143 | */ |
144 | 144 | ||
145 | static int __init | 145 | static int |
146 | rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 146 | rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
147 | { | 147 | { |
148 | #if 0 | 148 | #if 0 |
@@ -156,7 +156,7 @@ rx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
156 | { 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */ | 156 | { 16+1, 16+1, 16+6, 16+11, 16+16}, /* IdSel 10, slot 4 */ |
157 | }; | 157 | }; |
158 | #else | 158 | #else |
159 | static char irq_tab[6][5] __initdata = { | 159 | static char irq_tab[6][5] = { |
160 | /*INT INTA INTB INTC INTD */ | 160 | /*INT INTA INTB INTC INTD */ |
161 | { 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */ | 161 | { 16+0, 16+0, 16+6, 16+11, 16+16}, /* IdSel 5, slot 0 */ |
162 | { 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */ | 162 | { 16+1, 16+1, 16+7, 16+12, 16+17}, /* IdSel 6, slot 1 */ |
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c index 8a0aa6d67b53..86d259c2612d 100644 --- a/arch/alpha/kernel/sys_sable.c +++ b/arch/alpha/kernel/sys_sable.c | |||
@@ -192,10 +192,10 @@ sable_init_irq(void) | |||
192 | * with the values in the irq swizzling tables above. | 192 | * with the values in the irq swizzling tables above. |
193 | */ | 193 | */ |
194 | 194 | ||
195 | static int __init | 195 | static int |
196 | sable_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 196 | sable_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
197 | { | 197 | { |
198 | static char irq_tab[9][5] __initdata = { | 198 | static char irq_tab[9][5] = { |
199 | /*INT INTA INTB INTC INTD */ | 199 | /*INT INTA INTB INTC INTD */ |
200 | { 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */ | 200 | { 32+0, 32+0, 32+0, 32+0, 32+0}, /* IdSel 0, TULIP */ |
201 | { 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */ | 201 | { 32+1, 32+1, 32+1, 32+1, 32+1}, /* IdSel 1, SCSI */ |
@@ -374,10 +374,10 @@ lynx_init_irq(void) | |||
374 | * with the values in the irq swizzling tables above. | 374 | * with the values in the irq swizzling tables above. |
375 | */ | 375 | */ |
376 | 376 | ||
377 | static int __init | 377 | static int |
378 | lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 378 | lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
379 | { | 379 | { |
380 | static char irq_tab[19][5] __initdata = { | 380 | static char irq_tab[19][5] = { |
381 | /*INT INTA INTB INTC INTD */ | 381 | /*INT INTA INTB INTC INTD */ |
382 | { -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */ | 382 | { -1, -1, -1, -1, -1}, /* IdSel 13, PCEB */ |
383 | { -1, -1, -1, -1, -1}, /* IdSel 14, PPB */ | 383 | { -1, -1, -1, -1, -1}, /* IdSel 14, PPB */ |
@@ -404,7 +404,7 @@ lynx_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
404 | return COMMON_TABLE_LOOKUP; | 404 | return COMMON_TABLE_LOOKUP; |
405 | } | 405 | } |
406 | 406 | ||
407 | static u8 __init | 407 | static u8 |
408 | lynx_swizzle(struct pci_dev *dev, u8 *pinp) | 408 | lynx_swizzle(struct pci_dev *dev, u8 *pinp) |
409 | { | 409 | { |
410 | int slot, pin = *pinp; | 410 | int slot, pin = *pinp; |
diff --git a/arch/alpha/kernel/sys_sio.c b/arch/alpha/kernel/sys_sio.c index febd24eba7a6..9fd2895639d5 100644 --- a/arch/alpha/kernel/sys_sio.c +++ b/arch/alpha/kernel/sys_sio.c | |||
@@ -144,7 +144,7 @@ sio_fixup_irq_levels(unsigned int level_bits) | |||
144 | outb((level_bits >> 8) & 0xff, 0x4d1); | 144 | outb((level_bits >> 8) & 0xff, 0x4d1); |
145 | } | 145 | } |
146 | 146 | ||
147 | static inline int __init | 147 | static inline int |
148 | noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 148 | noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
149 | { | 149 | { |
150 | /* | 150 | /* |
@@ -165,7 +165,7 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
165 | * that they use the default INTA line, if they are interrupt | 165 | * that they use the default INTA line, if they are interrupt |
166 | * driven at all). | 166 | * driven at all). |
167 | */ | 167 | */ |
168 | static char irq_tab[][5] __initdata = { | 168 | static char irq_tab[][5] = { |
169 | /*INT A B C D */ | 169 | /*INT A B C D */ |
170 | { 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */ | 170 | { 3, 3, 3, 3, 3}, /* idsel 6 (53c810) */ |
171 | {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ | 171 | {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ |
@@ -183,10 +183,10 @@ noname_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
183 | return irq >= 0 ? tmp : -1; | 183 | return irq >= 0 ? tmp : -1; |
184 | } | 184 | } |
185 | 185 | ||
186 | static inline int __init | 186 | static inline int |
187 | p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 187 | p2k_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
188 | { | 188 | { |
189 | static char irq_tab[][5] __initdata = { | 189 | static char irq_tab[][5] = { |
190 | /*INT A B C D */ | 190 | /*INT A B C D */ |
191 | { 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */ | 191 | { 0, 0, -1, -1, -1}, /* idsel 6 (53c810) */ |
192 | {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ | 192 | {-1, -1, -1, -1, -1}, /* idsel 7 (SIO: PCI/ISA bridge) */ |
diff --git a/arch/alpha/kernel/sys_sx164.c b/arch/alpha/kernel/sys_sx164.c index d063b360efed..23eee54d714a 100644 --- a/arch/alpha/kernel/sys_sx164.c +++ b/arch/alpha/kernel/sys_sx164.c | |||
@@ -94,10 +94,10 @@ sx164_init_irq(void) | |||
94 | * 9 32 bit PCI option slot 3 | 94 | * 9 32 bit PCI option slot 3 |
95 | */ | 95 | */ |
96 | 96 | ||
97 | static int __init | 97 | static int |
98 | sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 98 | sx164_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
99 | { | 99 | { |
100 | static char irq_tab[5][5] __initdata = { | 100 | static char irq_tab[5][5] = { |
101 | /*INT INTA INTB INTC INTD */ | 101 | /*INT INTA INTB INTC INTD */ |
102 | { 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */ | 102 | { 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */ |
103 | { 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */ | 103 | { 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */ |
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c index dd0f1eae3c68..9101f2bb6176 100644 --- a/arch/alpha/kernel/sys_takara.c +++ b/arch/alpha/kernel/sys_takara.c | |||
@@ -155,10 +155,10 @@ takara_init_irq(void) | |||
155 | * assign it whatever the hell IRQ we like and it doesn't matter. | 155 | * assign it whatever the hell IRQ we like and it doesn't matter. |
156 | */ | 156 | */ |
157 | 157 | ||
158 | static int __init | 158 | static int |
159 | takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin) | 159 | takara_map_irq_srm(const struct pci_dev *dev, u8 slot, u8 pin) |
160 | { | 160 | { |
161 | static char irq_tab[15][5] __initdata = { | 161 | static char irq_tab[15][5] = { |
162 | { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */ | 162 | { 16+3, 16+3, 16+3, 16+3, 16+3}, /* slot 6 == device 3 */ |
163 | { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */ | 163 | { 16+2, 16+2, 16+2, 16+2, 16+2}, /* slot 7 == device 2 */ |
164 | { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */ | 164 | { 16+1, 16+1, 16+1, 16+1, 16+1}, /* slot 8 == device 1 */ |
@@ -210,7 +210,7 @@ takara_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | |||
210 | return COMMON_TABLE_LOOKUP; | 210 | return COMMON_TABLE_LOOKUP; |
211 | } | 211 | } |
212 | 212 | ||
213 | static u8 __init | 213 | static u8 |
214 | takara_swizzle(struct pci_dev *dev, u8 *pinp) | 214 | takara_swizzle(struct pci_dev *dev, u8 *pinp) |
215 | { | 215 | { |
216 | int slot = PCI_SLOT(dev->devfn); | 216 | int slot = PCI_SLOT(dev->devfn); |
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index ee1874887776..c3f8b79fe214 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c | |||
@@ -288,10 +288,10 @@ wildfire_device_interrupt(unsigned long vector) | |||
288 | * 7 64 bit PCI 1 option slot 7 | 288 | * 7 64 bit PCI 1 option slot 7 |
289 | */ | 289 | */ |
290 | 290 | ||
291 | static int __init | 291 | static int |
292 | wildfire_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) | 292 | wildfire_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
293 | { | 293 | { |
294 | static char irq_tab[8][5] __initdata = { | 294 | static char irq_tab[8][5] = { |
295 | /*INT INTA INTB INTC INTD */ | 295 | /*INT INTA INTB INTC INTD */ |
296 | { -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */ | 296 | { -1, -1, -1, -1, -1}, /* IdSel 0 ISA Bridge */ |
297 | { 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */ | 297 | { 36, 36, 36+1, 36+2, 36+3}, /* IdSel 1 SCSI builtin */ |
diff --git a/arch/arc/boot/dts/hsdk.dts b/arch/arc/boot/dts/hsdk.dts index 8adde1b492f1..8f627c200d60 100644 --- a/arch/arc/boot/dts/hsdk.dts +++ b/arch/arc/boot/dts/hsdk.dts | |||
@@ -137,14 +137,15 @@ | |||
137 | /* | 137 | /* |
138 | * DW sdio controller has external ciu clock divider | 138 | * DW sdio controller has external ciu clock divider |
139 | * controlled via register in SDIO IP. Due to its | 139 | * controlled via register in SDIO IP. Due to its |
140 | * unexpected default value (it should devide by 1 | 140 | * unexpected default value (it should divide by 1 |
141 | * but it devides by 8) SDIO IP uses wrong clock and | 141 | * but it divides by 8) SDIO IP uses wrong clock and |
142 | * works unstable (see STAR 9001204800) | 142 | * works unstable (see STAR 9001204800) |
143 | * We switched to the minimum possible value of the | ||
144 | * divisor (div-by-2) in HSDK platform code. | ||
143 | * So add temporary fix and change clock frequency | 145 | * So add temporary fix and change clock frequency |
144 | * from 100000000 to 12500000 Hz until we fix dw sdio | 146 | * to 50000000 Hz until we fix dw sdio driver itself. |
145 | * driver itself. | ||
146 | */ | 147 | */ |
147 | clock-frequency = <12500000>; | 148 | clock-frequency = <50000000>; |
148 | #clock-cells = <0>; | 149 | #clock-cells = <0>; |
149 | }; | 150 | }; |
150 | 151 | ||
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig index 15f0f6b5fec1..7b8f8faf8a24 100644 --- a/arch/arc/configs/hsdk_defconfig +++ b/arch/arc/configs/hsdk_defconfig | |||
@@ -63,7 +63,6 @@ CONFIG_MMC_SDHCI=y | |||
63 | CONFIG_MMC_SDHCI_PLTFM=y | 63 | CONFIG_MMC_SDHCI_PLTFM=y |
64 | CONFIG_MMC_DW=y | 64 | CONFIG_MMC_DW=y |
65 | # CONFIG_IOMMU_SUPPORT is not set | 65 | # CONFIG_IOMMU_SUPPORT is not set |
66 | CONFIG_RESET_HSDK=y | ||
67 | CONFIG_EXT3_FS=y | 66 | CONFIG_EXT3_FS=y |
68 | CONFIG_VFAT_FS=y | 67 | CONFIG_VFAT_FS=y |
69 | CONFIG_TMPFS=y | 68 | CONFIG_TMPFS=y |
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c index f46267153ec2..6df9d94a9537 100644 --- a/arch/arc/kernel/smp.c +++ b/arch/arc/kernel/smp.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <linux/cpumask.h> | 23 | #include <linux/cpumask.h> |
24 | #include <linux/reboot.h> | 24 | #include <linux/reboot.h> |
25 | #include <linux/irqdomain.h> | 25 | #include <linux/irqdomain.h> |
26 | #include <linux/export.h> | ||
27 | |||
26 | #include <asm/processor.h> | 28 | #include <asm/processor.h> |
27 | #include <asm/setup.h> | 29 | #include <asm/setup.h> |
28 | #include <asm/mach_desc.h> | 30 | #include <asm/mach_desc.h> |
@@ -30,6 +32,9 @@ | |||
30 | #ifndef CONFIG_ARC_HAS_LLSC | 32 | #ifndef CONFIG_ARC_HAS_LLSC |
31 | arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 33 | arch_spinlock_t smp_atomic_ops_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
32 | arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 34 | arch_spinlock_t smp_bitops_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
35 | |||
36 | EXPORT_SYMBOL_GPL(smp_atomic_ops_lock); | ||
37 | EXPORT_SYMBOL_GPL(smp_bitops_lock); | ||
33 | #endif | 38 | #endif |
34 | 39 | ||
35 | struct plat_smp_ops __weak plat_smp_ops; | 40 | struct plat_smp_ops __weak plat_smp_ops; |
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig index bd08de4be75e..19ab3cf98f0f 100644 --- a/arch/arc/plat-hsdk/Kconfig +++ b/arch/arc/plat-hsdk/Kconfig | |||
@@ -8,3 +8,4 @@ | |||
8 | menuconfig ARC_SOC_HSDK | 8 | menuconfig ARC_SOC_HSDK |
9 | bool "ARC HS Development Kit SOC" | 9 | bool "ARC HS Development Kit SOC" |
10 | select CLK_HSDK | 10 | select CLK_HSDK |
11 | select RESET_HSDK | ||
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c index 744e62e58788..fd0ae5e38639 100644 --- a/arch/arc/plat-hsdk/platform.c +++ b/arch/arc/plat-hsdk/platform.c | |||
@@ -74,6 +74,10 @@ static void __init hsdk_set_cpu_freq_1ghz(void) | |||
74 | pr_err("Failed to setup CPU frequency to 1GHz!"); | 74 | pr_err("Failed to setup CPU frequency to 1GHz!"); |
75 | } | 75 | } |
76 | 76 | ||
77 | #define SDIO_BASE (ARC_PERIPHERAL_BASE + 0xA000) | ||
78 | #define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108) | ||
79 | #define SDIO_UHS_REG_EXT_DIV_2 (2 << 30) | ||
80 | |||
77 | static void __init hsdk_init_early(void) | 81 | static void __init hsdk_init_early(void) |
78 | { | 82 | { |
79 | /* | 83 | /* |
@@ -90,6 +94,12 @@ static void __init hsdk_init_early(void) | |||
90 | writel(1, (void __iomem *) CREG_PAE_UPDATE); | 94 | writel(1, (void __iomem *) CREG_PAE_UPDATE); |
91 | 95 | ||
92 | /* | 96 | /* |
97 | * Switch SDIO external ciu clock divider from default div-by-8 to | ||
98 | * minimum possible div-by-2. | ||
99 | */ | ||
100 | iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT); | ||
101 | |||
102 | /* | ||
93 | * Setup CPU frequency to 1GHz. | 103 | * Setup CPU frequency to 1GHz. |
94 | * TODO: remove it after smart hsdk pll driver will be introduced. | 104 | * TODO: remove it after smart hsdk pll driver will be introduced. |
95 | */ | 105 | */ |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index 47d3a1ab08d2..817e5cfef83a 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -131,7 +131,7 @@ endif | |||
131 | KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm | 131 | KBUILD_CFLAGS +=$(CFLAGS_ABI) $(CFLAGS_ISA) $(arch-y) $(tune-y) $(call cc-option,-mshort-load-bytes,$(call cc-option,-malignment-traps,)) -msoft-float -Uarm |
132 | KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float | 132 | KBUILD_AFLAGS +=$(CFLAGS_ABI) $(AFLAGS_ISA) $(arch-y) $(tune-y) -include asm/unified.h -msoft-float |
133 | 133 | ||
134 | CHECKFLAGS += -D__arm__ | 134 | CHECKFLAGS += -D__arm__ -m32 |
135 | 135 | ||
136 | #Default value | 136 | #Default value |
137 | head-y := arch/arm/kernel/head$(MMUEXT).o | 137 | head-y := arch/arm/kernel/head$(MMUEXT).o |
diff --git a/arch/arm/boot/compressed/debug.S b/arch/arm/boot/compressed/debug.S index 5392ee63338f..8f6e37177de1 100644 --- a/arch/arm/boot/compressed/debug.S +++ b/arch/arm/boot/compressed/debug.S | |||
@@ -23,7 +23,11 @@ ENTRY(putc) | |||
23 | strb r0, [r1] | 23 | strb r0, [r1] |
24 | mov r0, #0x03 @ SYS_WRITEC | 24 | mov r0, #0x03 @ SYS_WRITEC |
25 | ARM( svc #0x123456 ) | 25 | ARM( svc #0x123456 ) |
26 | #ifdef CONFIG_CPU_V7M | ||
27 | THUMB( bkpt #0xab ) | ||
28 | #else | ||
26 | THUMB( svc #0xab ) | 29 | THUMB( svc #0xab ) |
30 | #endif | ||
27 | mov pc, lr | 31 | mov pc, lr |
28 | .align 2 | 32 | .align 2 |
29 | 1: .word _GLOBAL_OFFSET_TABLE_ - . | 33 | 1: .word _GLOBAL_OFFSET_TABLE_ - . |
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi index 7ff0811e61db..4960722aab32 100644 --- a/arch/arm/boot/dts/armada-38x.dtsi +++ b/arch/arm/boot/dts/armada-38x.dtsi | |||
@@ -178,7 +178,7 @@ | |||
178 | }; | 178 | }; |
179 | 179 | ||
180 | i2c0: i2c@11000 { | 180 | i2c0: i2c@11000 { |
181 | compatible = "marvell,mv64xxx-i2c"; | 181 | compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c"; |
182 | reg = <0x11000 0x20>; | 182 | reg = <0x11000 0x20>; |
183 | #address-cells = <1>; | 183 | #address-cells = <1>; |
184 | #size-cells = <0>; | 184 | #size-cells = <0>; |
@@ -189,7 +189,7 @@ | |||
189 | }; | 189 | }; |
190 | 190 | ||
191 | i2c1: i2c@11100 { | 191 | i2c1: i2c@11100 { |
192 | compatible = "marvell,mv64xxx-i2c"; | 192 | compatible = "marvell,mv78230-a0-i2c", "marvell,mv64xxx-i2c"; |
193 | reg = <0x11100 0x20>; | 193 | reg = <0x11100 0x20>; |
194 | #address-cells = <1>; | 194 | #address-cells = <1>; |
195 | #size-cells = <0>; | 195 | #size-cells = <0>; |
diff --git a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi index 63a5af898165..cf0087b4c9e1 100644 --- a/arch/arm/boot/dts/at91-sama5d27_som1.dtsi +++ b/arch/arm/boot/dts/at91-sama5d27_som1.dtsi | |||
@@ -67,8 +67,8 @@ | |||
67 | pinctrl-0 = <&pinctrl_macb0_default>; | 67 | pinctrl-0 = <&pinctrl_macb0_default>; |
68 | phy-mode = "rmii"; | 68 | phy-mode = "rmii"; |
69 | 69 | ||
70 | ethernet-phy@1 { | 70 | ethernet-phy@0 { |
71 | reg = <0x1>; | 71 | reg = <0x0>; |
72 | interrupt-parent = <&pioA>; | 72 | interrupt-parent = <&pioA>; |
73 | interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>; | 73 | interrupts = <PIN_PD31 IRQ_TYPE_LEVEL_LOW>; |
74 | pinctrl-names = "default"; | 74 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/at91-sama5d2_xplained.dts b/arch/arm/boot/dts/at91-sama5d2_xplained.dts index c7e9ccf2bc87..cbc26001247b 100644 --- a/arch/arm/boot/dts/at91-sama5d2_xplained.dts +++ b/arch/arm/boot/dts/at91-sama5d2_xplained.dts | |||
@@ -309,7 +309,7 @@ | |||
309 | vddana-supply = <&vdd_3v3_lp_reg>; | 309 | vddana-supply = <&vdd_3v3_lp_reg>; |
310 | vref-supply = <&vdd_3v3_lp_reg>; | 310 | vref-supply = <&vdd_3v3_lp_reg>; |
311 | pinctrl-names = "default"; | 311 | pinctrl-names = "default"; |
312 | pinctrl-0 = <&pinctrl_adc_default>; | 312 | pinctrl-0 = <&pinctrl_adc_default &pinctrl_adtrg_default>; |
313 | status = "okay"; | 313 | status = "okay"; |
314 | }; | 314 | }; |
315 | 315 | ||
@@ -340,6 +340,20 @@ | |||
340 | bias-disable; | 340 | bias-disable; |
341 | }; | 341 | }; |
342 | 342 | ||
343 | /* | ||
344 | * The ADTRG pin can work on any edge type. | ||
345 | * In here it's being pulled up, so need to | ||
346 | * connect it to ground to get an edge e.g. | ||
347 | * Trigger can be configured on falling, rise | ||
348 | * or any edge, and the pull-up can be changed | ||
349 | * to pull-down or left floating according to | ||
350 | * needs. | ||
351 | */ | ||
352 | pinctrl_adtrg_default: adtrg_default { | ||
353 | pinmux = <PIN_PD31__ADTRG>; | ||
354 | bias-pull-up; | ||
355 | }; | ||
356 | |||
343 | pinctrl_charger_chglev: charger_chglev { | 357 | pinctrl_charger_chglev: charger_chglev { |
344 | pinmux = <PIN_PA12__GPIO>; | 358 | pinmux = <PIN_PA12__GPIO>; |
345 | bias-disable; | 359 | bias-disable; |
diff --git a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts index 82651c3eb682..b8565fc33eea 100644 --- a/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts +++ b/arch/arm/boot/dts/bcm2835-rpi-zero-w.dts | |||
@@ -18,12 +18,9 @@ | |||
18 | compatible = "raspberrypi,model-zero-w", "brcm,bcm2835"; | 18 | compatible = "raspberrypi,model-zero-w", "brcm,bcm2835"; |
19 | model = "Raspberry Pi Zero W"; | 19 | model = "Raspberry Pi Zero W"; |
20 | 20 | ||
21 | /* Needed by firmware to properly init UARTs */ | 21 | chosen { |
22 | aliases { | 22 | /* 8250 auxiliary UART instead of pl011 */ |
23 | uart0 = "/soc/serial@7e201000"; | 23 | stdout-path = "serial1:115200n8"; |
24 | uart1 = "/soc/serial@7e215040"; | ||
25 | serial0 = "/soc/serial@7e201000"; | ||
26 | serial1 = "/soc/serial@7e215040"; | ||
27 | }; | 24 | }; |
28 | 25 | ||
29 | leds { | 26 | leds { |
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts index 20725ca487f3..c71a0d73d2a2 100644 --- a/arch/arm/boot/dts/bcm2837-rpi-3-b.dts +++ b/arch/arm/boot/dts/bcm2837-rpi-3-b.dts | |||
@@ -8,6 +8,11 @@ | |||
8 | compatible = "raspberrypi,3-model-b", "brcm,bcm2837"; | 8 | compatible = "raspberrypi,3-model-b", "brcm,bcm2837"; |
9 | model = "Raspberry Pi 3 Model B"; | 9 | model = "Raspberry Pi 3 Model B"; |
10 | 10 | ||
11 | chosen { | ||
12 | /* 8250 auxiliary UART instead of pl011 */ | ||
13 | stdout-path = "serial1:115200n8"; | ||
14 | }; | ||
15 | |||
11 | memory { | 16 | memory { |
12 | reg = <0 0x40000000>; | 17 | reg = <0 0x40000000>; |
13 | }; | 18 | }; |
diff --git a/arch/arm/boot/dts/bcm283x.dtsi b/arch/arm/boot/dts/bcm283x.dtsi index 431dcfc900c0..013431e3d7c3 100644 --- a/arch/arm/boot/dts/bcm283x.dtsi +++ b/arch/arm/boot/dts/bcm283x.dtsi | |||
@@ -20,8 +20,13 @@ | |||
20 | #address-cells = <1>; | 20 | #address-cells = <1>; |
21 | #size-cells = <1>; | 21 | #size-cells = <1>; |
22 | 22 | ||
23 | aliases { | ||
24 | serial0 = &uart0; | ||
25 | serial1 = &uart1; | ||
26 | }; | ||
27 | |||
23 | chosen { | 28 | chosen { |
24 | bootargs = "earlyprintk console=ttyAMA0"; | 29 | stdout-path = "serial0:115200n8"; |
25 | }; | 30 | }; |
26 | 31 | ||
27 | thermal-zones { | 32 | thermal-zones { |
diff --git a/arch/arm/boot/dts/gemini.dtsi b/arch/arm/boot/dts/gemini.dtsi index c68e8d430234..f0d178c77153 100644 --- a/arch/arm/boot/dts/gemini.dtsi +++ b/arch/arm/boot/dts/gemini.dtsi | |||
@@ -145,11 +145,12 @@ | |||
145 | }; | 145 | }; |
146 | 146 | ||
147 | watchdog@41000000 { | 147 | watchdog@41000000 { |
148 | compatible = "cortina,gemini-watchdog"; | 148 | compatible = "cortina,gemini-watchdog", "faraday,ftwdt010"; |
149 | reg = <0x41000000 0x1000>; | 149 | reg = <0x41000000 0x1000>; |
150 | interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; | 150 | interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; |
151 | resets = <&syscon GEMINI_RESET_WDOG>; | 151 | resets = <&syscon GEMINI_RESET_WDOG>; |
152 | clocks = <&syscon GEMINI_CLK_APB>; | 152 | clocks = <&syscon GEMINI_CLK_APB>; |
153 | clock-names = "PCLK"; | ||
153 | }; | 154 | }; |
154 | 155 | ||
155 | uart0: serial@42000000 { | 156 | uart0: serial@42000000 { |
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi index f46814a7ea44..4d308d17f040 100644 --- a/arch/arm/boot/dts/imx7d.dtsi +++ b/arch/arm/boot/dts/imx7d.dtsi | |||
@@ -144,10 +144,10 @@ | |||
144 | interrupt-names = "msi"; | 144 | interrupt-names = "msi"; |
145 | #interrupt-cells = <1>; | 145 | #interrupt-cells = <1>; |
146 | interrupt-map-mask = <0 0 0 0x7>; | 146 | interrupt-map-mask = <0 0 0 0x7>; |
147 | interrupt-map = <0 0 0 1 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>, | 147 | interrupt-map = <0 0 0 1 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, |
148 | <0 0 0 2 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, | 148 | <0 0 0 2 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, |
149 | <0 0 0 3 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, | 149 | <0 0 0 3 &intc GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>, |
150 | <0 0 0 4 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>; | 150 | <0 0 0 4 &intc GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>; |
151 | clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, | 151 | clocks = <&clks IMX7D_PCIE_CTRL_ROOT_CLK>, |
152 | <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, | 152 | <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>, |
153 | <&clks IMX7D_PCIE_PHY_ROOT_CLK>; | 153 | <&clks IMX7D_PCIE_PHY_ROOT_CLK>; |
diff --git a/arch/arm/boot/dts/moxart.dtsi b/arch/arm/boot/dts/moxart.dtsi index 1f4c795d3f72..da7b3237bfe9 100644 --- a/arch/arm/boot/dts/moxart.dtsi +++ b/arch/arm/boot/dts/moxart.dtsi | |||
@@ -87,9 +87,10 @@ | |||
87 | }; | 87 | }; |
88 | 88 | ||
89 | watchdog: watchdog@98500000 { | 89 | watchdog: watchdog@98500000 { |
90 | compatible = "moxa,moxart-watchdog"; | 90 | compatible = "moxa,moxart-watchdog", "faraday,ftwdt010"; |
91 | reg = <0x98500000 0x10>; | 91 | reg = <0x98500000 0x10>; |
92 | clocks = <&clk_apb>; | 92 | clocks = <&clk_apb>; |
93 | clock-names = "PCLK"; | ||
93 | }; | 94 | }; |
94 | 95 | ||
95 | sdhci: sdhci@98e00000 { | 96 | sdhci: sdhci@98e00000 { |
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 38d2216c7ead..b1a26b42d190 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi | |||
@@ -1430,6 +1430,7 @@ | |||
1430 | atmel,min-sample-rate-hz = <200000>; | 1430 | atmel,min-sample-rate-hz = <200000>; |
1431 | atmel,max-sample-rate-hz = <20000000>; | 1431 | atmel,max-sample-rate-hz = <20000000>; |
1432 | atmel,startup-time-ms = <4>; | 1432 | atmel,startup-time-ms = <4>; |
1433 | atmel,trigger-edge-type = <IRQ_TYPE_EDGE_RISING>; | ||
1433 | status = "disabled"; | 1434 | status = "disabled"; |
1434 | }; | 1435 | }; |
1435 | 1436 | ||
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index b147cb0dc14b..eef072a21acc 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi | |||
@@ -311,8 +311,8 @@ | |||
311 | #size-cells = <0>; | 311 | #size-cells = <0>; |
312 | reg = <0>; | 312 | reg = <0>; |
313 | 313 | ||
314 | tcon1_in_drc1: endpoint@0 { | 314 | tcon1_in_drc1: endpoint@1 { |
315 | reg = <0>; | 315 | reg = <1>; |
316 | remote-endpoint = <&drc1_out_tcon1>; | 316 | remote-endpoint = <&drc1_out_tcon1>; |
317 | }; | 317 | }; |
318 | }; | 318 | }; |
@@ -1012,8 +1012,8 @@ | |||
1012 | #size-cells = <0>; | 1012 | #size-cells = <0>; |
1013 | reg = <1>; | 1013 | reg = <1>; |
1014 | 1014 | ||
1015 | be1_out_drc1: endpoint@0 { | 1015 | be1_out_drc1: endpoint@1 { |
1016 | reg = <0>; | 1016 | reg = <1>; |
1017 | remote-endpoint = <&drc1_in_be1>; | 1017 | remote-endpoint = <&drc1_in_be1>; |
1018 | }; | 1018 | }; |
1019 | }; | 1019 | }; |
@@ -1042,8 +1042,8 @@ | |||
1042 | #size-cells = <0>; | 1042 | #size-cells = <0>; |
1043 | reg = <0>; | 1043 | reg = <0>; |
1044 | 1044 | ||
1045 | drc1_in_be1: endpoint@0 { | 1045 | drc1_in_be1: endpoint@1 { |
1046 | reg = <0>; | 1046 | reg = <1>; |
1047 | remote-endpoint = <&be1_out_drc1>; | 1047 | remote-endpoint = <&be1_out_drc1>; |
1048 | }; | 1048 | }; |
1049 | }; | 1049 | }; |
@@ -1053,8 +1053,8 @@ | |||
1053 | #size-cells = <0>; | 1053 | #size-cells = <0>; |
1054 | reg = <1>; | 1054 | reg = <1>; |
1055 | 1055 | ||
1056 | drc1_out_tcon1: endpoint@0 { | 1056 | drc1_out_tcon1: endpoint@1 { |
1057 | reg = <0>; | 1057 | reg = <1>; |
1058 | remote-endpoint = <&tcon1_in_drc1>; | 1058 | remote-endpoint = <&tcon1_in_drc1>; |
1059 | }; | 1059 | }; |
1060 | }; | 1060 | }; |
diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index ea9646cc2a0e..0a498cb3fad8 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S | |||
@@ -115,7 +115,11 @@ ENTRY(printascii) | |||
115 | mov r1, r0 | 115 | mov r1, r0 |
116 | mov r0, #0x04 @ SYS_WRITE0 | 116 | mov r0, #0x04 @ SYS_WRITE0 |
117 | ARM( svc #0x123456 ) | 117 | ARM( svc #0x123456 ) |
118 | #ifdef CONFIG_CPU_V7M | ||
119 | THUMB( bkpt #0xab ) | ||
120 | #else | ||
118 | THUMB( svc #0xab ) | 121 | THUMB( svc #0xab ) |
122 | #endif | ||
119 | ret lr | 123 | ret lr |
120 | ENDPROC(printascii) | 124 | ENDPROC(printascii) |
121 | 125 | ||
@@ -124,7 +128,11 @@ ENTRY(printch) | |||
124 | strb r0, [r1] | 128 | strb r0, [r1] |
125 | mov r0, #0x03 @ SYS_WRITEC | 129 | mov r0, #0x03 @ SYS_WRITEC |
126 | ARM( svc #0x123456 ) | 130 | ARM( svc #0x123456 ) |
131 | #ifdef CONFIG_CPU_V7M | ||
132 | THUMB( bkpt #0xab ) | ||
133 | #else | ||
127 | THUMB( svc #0xab ) | 134 | THUMB( svc #0xab ) |
135 | #endif | ||
128 | ret lr | 136 | ret lr |
129 | ENDPROC(printch) | 137 | ENDPROC(printch) |
130 | 138 | ||
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c index 71a34e8c345a..57058ac46f49 100644 --- a/arch/arm/mach-ux500/cpu-db8500.c +++ b/arch/arm/mach-ux500/cpu-db8500.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/mach/arch.h> | 32 | #include <asm/mach/arch.h> |
33 | 33 | ||
34 | #include "db8500-regs.h" | 34 | #include "db8500-regs.h" |
35 | #include "pm_domains.h" | ||
35 | 36 | ||
36 | static int __init ux500_l2x0_unlock(void) | 37 | static int __init ux500_l2x0_unlock(void) |
37 | { | 38 | { |
@@ -157,6 +158,9 @@ static const struct of_device_id u8500_local_bus_nodes[] = { | |||
157 | 158 | ||
158 | static void __init u8500_init_machine(void) | 159 | static void __init u8500_init_machine(void) |
159 | { | 160 | { |
161 | /* Initialize ux500 power domains */ | ||
162 | ux500_pm_domains_init(); | ||
163 | |||
160 | /* automatically probe child nodes of dbx5x0 devices */ | 164 | /* automatically probe child nodes of dbx5x0 devices */ |
161 | if (of_machine_is_compatible("st-ericsson,u8540")) | 165 | if (of_machine_is_compatible("st-ericsson,u8540")) |
162 | of_platform_populate(NULL, u8500_local_bus_nodes, | 166 | of_platform_populate(NULL, u8500_local_bus_nodes, |
diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c index a970e7fcba9e..f6c33a0c1c61 100644 --- a/arch/arm/mach-ux500/pm.c +++ b/arch/arm/mach-ux500/pm.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/of_address.h> | 19 | #include <linux/of_address.h> |
20 | 20 | ||
21 | #include "db8500-regs.h" | 21 | #include "db8500-regs.h" |
22 | #include "pm_domains.h" | ||
23 | 22 | ||
24 | /* ARM WFI Standby signal register */ | 23 | /* ARM WFI Standby signal register */ |
25 | #define PRCM_ARM_WFI_STANDBY (prcmu_base + 0x130) | 24 | #define PRCM_ARM_WFI_STANDBY (prcmu_base + 0x130) |
@@ -203,7 +202,4 @@ void __init ux500_pm_init(u32 phy_base, u32 size) | |||
203 | 202 | ||
204 | /* Set up ux500 suspend callbacks. */ | 203 | /* Set up ux500 suspend callbacks. */ |
205 | suspend_set_ops(UX500_SUSPEND_OPS); | 204 | suspend_set_ops(UX500_SUSPEND_OPS); |
206 | |||
207 | /* Initialize ux500 power domains */ | ||
208 | ux500_pm_domains_init(); | ||
209 | } | 205 | } |
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 3b8e728cc944..91537d90f5f5 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
@@ -344,6 +344,11 @@ void __init arm_mm_memblock_reserve(void) | |||
344 | * reserved here. | 344 | * reserved here. |
345 | */ | 345 | */ |
346 | #endif | 346 | #endif |
347 | /* | ||
348 | * In any case, always ensure address 0 is never used as many things | ||
349 | * get very confused if 0 is returned as a legitimate address. | ||
350 | */ | ||
351 | memblock_reserve(0, 1); | ||
347 | } | 352 | } |
348 | 353 | ||
349 | void __init adjust_lowmem_bounds(void) | 354 | void __init adjust_lowmem_bounds(void) |
diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index e71eefa2e427..0641ba54ab62 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c | |||
@@ -1,7 +1,7 @@ | |||
1 | #include <linux/bootmem.h> | 1 | #include <linux/bootmem.h> |
2 | #include <linux/gfp.h> | 2 | #include <linux/gfp.h> |
3 | #include <linux/export.h> | 3 | #include <linux/export.h> |
4 | #include <linux/rwlock.h> | 4 | #include <linux/spinlock.h> |
5 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | #include <linux/dma-mapping.h> | 7 | #include <linux/dma-mapping.h> |
diff --git a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts index caf8b6fbe5e3..d06e34b5d192 100644 --- a/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts +++ b/arch/arm64/boot/dts/allwinner/sun50i-a64-pine64.dts | |||
@@ -61,13 +61,6 @@ | |||
61 | chosen { | 61 | chosen { |
62 | stdout-path = "serial0:115200n8"; | 62 | stdout-path = "serial0:115200n8"; |
63 | }; | 63 | }; |
64 | |||
65 | reg_vcc3v3: vcc3v3 { | ||
66 | compatible = "regulator-fixed"; | ||
67 | regulator-name = "vcc3v3"; | ||
68 | regulator-min-microvolt = <3300000>; | ||
69 | regulator-max-microvolt = <3300000>; | ||
70 | }; | ||
71 | }; | 64 | }; |
72 | 65 | ||
73 | &ehci0 { | 66 | &ehci0 { |
@@ -91,7 +84,7 @@ | |||
91 | &mmc0 { | 84 | &mmc0 { |
92 | pinctrl-names = "default"; | 85 | pinctrl-names = "default"; |
93 | pinctrl-0 = <&mmc0_pins>; | 86 | pinctrl-0 = <&mmc0_pins>; |
94 | vmmc-supply = <®_vcc3v3>; | 87 | vmmc-supply = <®_dcdc1>; |
95 | cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; | 88 | cd-gpios = <&pio 5 6 GPIO_ACTIVE_HIGH>; |
96 | cd-inverted; | 89 | cd-inverted; |
97 | disable-wp; | 90 | disable-wp; |
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi index 8263a8a504a8..f2aa2a81de4d 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-master.dtsi | |||
@@ -336,7 +336,7 @@ | |||
336 | /* non-prefetchable memory */ | 336 | /* non-prefetchable memory */ |
337 | 0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>; | 337 | 0x82000000 0 0xf6000000 0 0xf6000000 0 0xf00000>; |
338 | interrupt-map-mask = <0 0 0 0>; | 338 | interrupt-map-mask = <0 0 0 0>; |
339 | interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; | 339 | interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; |
340 | interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; | 340 | interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; |
341 | num-lanes = <1>; | 341 | num-lanes = <1>; |
342 | clocks = <&cpm_clk 1 13>; | 342 | clocks = <&cpm_clk 1 13>; |
@@ -362,7 +362,7 @@ | |||
362 | /* non-prefetchable memory */ | 362 | /* non-prefetchable memory */ |
363 | 0x82000000 0 0xf7000000 0 0xf7000000 0 0xf00000>; | 363 | 0x82000000 0 0xf7000000 0 0xf7000000 0 0xf00000>; |
364 | interrupt-map-mask = <0 0 0 0>; | 364 | interrupt-map-mask = <0 0 0 0>; |
365 | interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; | 365 | interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; |
366 | interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; | 366 | interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; |
367 | 367 | ||
368 | num-lanes = <1>; | 368 | num-lanes = <1>; |
@@ -389,7 +389,7 @@ | |||
389 | /* non-prefetchable memory */ | 389 | /* non-prefetchable memory */ |
390 | 0x82000000 0 0xf8000000 0 0xf8000000 0 0xf00000>; | 390 | 0x82000000 0 0xf8000000 0 0xf8000000 0 0xf00000>; |
391 | interrupt-map-mask = <0 0 0 0>; | 391 | interrupt-map-mask = <0 0 0 0>; |
392 | interrupt-map = <0 0 0 0 &cpm_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; | 392 | interrupt-map = <0 0 0 0 &cpm_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; |
393 | interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; | 393 | interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; |
394 | 394 | ||
395 | num-lanes = <1>; | 395 | num-lanes = <1>; |
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi index b71ee6c83668..4fe70323abb3 100644 --- a/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi +++ b/arch/arm64/boot/dts/marvell/armada-cp110-slave.dtsi | |||
@@ -335,7 +335,7 @@ | |||
335 | /* non-prefetchable memory */ | 335 | /* non-prefetchable memory */ |
336 | 0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>; | 336 | 0x82000000 0 0xfa000000 0 0xfa000000 0 0xf00000>; |
337 | interrupt-map-mask = <0 0 0 0>; | 337 | interrupt-map-mask = <0 0 0 0>; |
338 | interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; | 338 | interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; |
339 | interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; | 339 | interrupts = <ICU_GRP_NSR 22 IRQ_TYPE_LEVEL_HIGH>; |
340 | num-lanes = <1>; | 340 | num-lanes = <1>; |
341 | clocks = <&cps_clk 1 13>; | 341 | clocks = <&cps_clk 1 13>; |
@@ -361,7 +361,7 @@ | |||
361 | /* non-prefetchable memory */ | 361 | /* non-prefetchable memory */ |
362 | 0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>; | 362 | 0x82000000 0 0xfb000000 0 0xfb000000 0 0xf00000>; |
363 | interrupt-map-mask = <0 0 0 0>; | 363 | interrupt-map-mask = <0 0 0 0>; |
364 | interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; | 364 | interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; |
365 | interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; | 365 | interrupts = <ICU_GRP_NSR 24 IRQ_TYPE_LEVEL_HIGH>; |
366 | 366 | ||
367 | num-lanes = <1>; | 367 | num-lanes = <1>; |
@@ -388,7 +388,7 @@ | |||
388 | /* non-prefetchable memory */ | 388 | /* non-prefetchable memory */ |
389 | 0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>; | 389 | 0x82000000 0 0xfc000000 0 0xfc000000 0 0xf00000>; |
390 | interrupt-map-mask = <0 0 0 0>; | 390 | interrupt-map-mask = <0 0 0 0>; |
391 | interrupt-map = <0 0 0 0 &cps_icu 0 ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; | 391 | interrupt-map = <0 0 0 0 &cps_icu ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; |
392 | interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; | 392 | interrupts = <ICU_GRP_NSR 23 IRQ_TYPE_LEVEL_HIGH>; |
393 | 393 | ||
394 | num-lanes = <1>; | 394 | num-lanes = <1>; |
diff --git a/arch/arm64/boot/dts/renesas/salvator-common.dtsi b/arch/arm64/boot/dts/renesas/salvator-common.dtsi index 4786c67b5e65..d9d885006a8e 100644 --- a/arch/arm64/boot/dts/renesas/salvator-common.dtsi +++ b/arch/arm64/boot/dts/renesas/salvator-common.dtsi | |||
@@ -62,6 +62,7 @@ | |||
62 | brightness-levels = <256 128 64 16 8 4 0>; | 62 | brightness-levels = <256 128 64 16 8 4 0>; |
63 | default-brightness-level = <6>; | 63 | default-brightness-level = <6>; |
64 | 64 | ||
65 | power-supply = <®_12v>; | ||
65 | enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>; | 66 | enable-gpios = <&gpio6 7 GPIO_ACTIVE_HIGH>; |
66 | }; | 67 | }; |
67 | 68 | ||
@@ -83,6 +84,15 @@ | |||
83 | regulator-always-on; | 84 | regulator-always-on; |
84 | }; | 85 | }; |
85 | 86 | ||
87 | reg_12v: regulator2 { | ||
88 | compatible = "regulator-fixed"; | ||
89 | regulator-name = "fixed-12V"; | ||
90 | regulator-min-microvolt = <12000000>; | ||
91 | regulator-max-microvolt = <12000000>; | ||
92 | regulator-boot-on; | ||
93 | regulator-always-on; | ||
94 | }; | ||
95 | |||
86 | rsnd_ak4613: sound { | 96 | rsnd_ak4613: sound { |
87 | compatible = "simple-audio-card"; | 97 | compatible = "simple-audio-card"; |
88 | 98 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi index 6d615cb6e64d..41d61840fb99 100644 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi | |||
@@ -582,7 +582,7 @@ | |||
582 | vop_mmu: iommu@ff373f00 { | 582 | vop_mmu: iommu@ff373f00 { |
583 | compatible = "rockchip,iommu"; | 583 | compatible = "rockchip,iommu"; |
584 | reg = <0x0 0xff373f00 0x0 0x100>; | 584 | reg = <0x0 0xff373f00 0x0 0x100>; |
585 | interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH 0>; | 585 | interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; |
586 | interrupt-names = "vop_mmu"; | 586 | interrupt-names = "vop_mmu"; |
587 | #iommu-cells = <0>; | 587 | #iommu-cells = <0>; |
588 | status = "disabled"; | 588 | status = "disabled"; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3368.dtsi b/arch/arm64/boot/dts/rockchip/rk3368.dtsi index 19fbaa5e7bdd..1070c8264c13 100644 --- a/arch/arm64/boot/dts/rockchip/rk3368.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3368.dtsi | |||
@@ -740,7 +740,7 @@ | |||
740 | iep_mmu: iommu@ff900800 { | 740 | iep_mmu: iommu@ff900800 { |
741 | compatible = "rockchip,iommu"; | 741 | compatible = "rockchip,iommu"; |
742 | reg = <0x0 0xff900800 0x0 0x100>; | 742 | reg = <0x0 0xff900800 0x0 0x100>; |
743 | interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH 0>; | 743 | interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; |
744 | interrupt-names = "iep_mmu"; | 744 | interrupt-names = "iep_mmu"; |
745 | #iommu-cells = <0>; | 745 | #iommu-cells = <0>; |
746 | status = "disabled"; | 746 | status = "disabled"; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts index 7fd4bfcaa38e..fef82274a39d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-firefly.dts | |||
@@ -371,10 +371,10 @@ | |||
371 | regulator-always-on; | 371 | regulator-always-on; |
372 | regulator-boot-on; | 372 | regulator-boot-on; |
373 | regulator-min-microvolt = <1800000>; | 373 | regulator-min-microvolt = <1800000>; |
374 | regulator-max-microvolt = <3300000>; | 374 | regulator-max-microvolt = <3000000>; |
375 | regulator-state-mem { | 375 | regulator-state-mem { |
376 | regulator-on-in-suspend; | 376 | regulator-on-in-suspend; |
377 | regulator-suspend-microvolt = <3300000>; | 377 | regulator-suspend-microvolt = <3000000>; |
378 | }; | 378 | }; |
379 | }; | 379 | }; |
380 | 380 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi index 53ff3d191a1d..910628d18add 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma.dtsi | |||
@@ -325,12 +325,12 @@ | |||
325 | vcc_sd: LDO_REG4 { | 325 | vcc_sd: LDO_REG4 { |
326 | regulator-name = "vcc_sd"; | 326 | regulator-name = "vcc_sd"; |
327 | regulator-min-microvolt = <1800000>; | 327 | regulator-min-microvolt = <1800000>; |
328 | regulator-max-microvolt = <3300000>; | 328 | regulator-max-microvolt = <3000000>; |
329 | regulator-always-on; | 329 | regulator-always-on; |
330 | regulator-boot-on; | 330 | regulator-boot-on; |
331 | regulator-state-mem { | 331 | regulator-state-mem { |
332 | regulator-on-in-suspend; | 332 | regulator-on-in-suspend; |
333 | regulator-suspend-microvolt = <3300000>; | 333 | regulator-suspend-microvolt = <3000000>; |
334 | }; | 334 | }; |
335 | }; | 335 | }; |
336 | 336 | ||
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi index 6c30bb02210d..0f873c897d0d 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-sapphire.dtsi | |||
@@ -315,10 +315,10 @@ | |||
315 | regulator-always-on; | 315 | regulator-always-on; |
316 | regulator-boot-on; | 316 | regulator-boot-on; |
317 | regulator-min-microvolt = <1800000>; | 317 | regulator-min-microvolt = <1800000>; |
318 | regulator-max-microvolt = <3300000>; | 318 | regulator-max-microvolt = <3000000>; |
319 | regulator-state-mem { | 319 | regulator-state-mem { |
320 | regulator-on-in-suspend; | 320 | regulator-on-in-suspend; |
321 | regulator-suspend-microvolt = <3300000>; | 321 | regulator-suspend-microvolt = <3000000>; |
322 | }; | 322 | }; |
323 | }; | 323 | }; |
324 | 324 | ||
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 903f3bf48419..7e25c5cc353a 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h | |||
@@ -155,14 +155,16 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, | |||
155 | return __cmpxchg_small(ptr, old, new, size); | 155 | return __cmpxchg_small(ptr, old, new, size); |
156 | 156 | ||
157 | case 4: | 157 | case 4: |
158 | return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new); | 158 | return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, |
159 | (u32)old, new); | ||
159 | 160 | ||
160 | case 8: | 161 | case 8: |
161 | /* lld/scd are only available for MIPS64 */ | 162 | /* lld/scd are only available for MIPS64 */ |
162 | if (!IS_ENABLED(CONFIG_64BIT)) | 163 | if (!IS_ENABLED(CONFIG_64BIT)) |
163 | return __cmpxchg_called_with_bad_pointer(); | 164 | return __cmpxchg_called_with_bad_pointer(); |
164 | 165 | ||
165 | return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new); | 166 | return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, |
167 | (u64)old, new); | ||
166 | 168 | ||
167 | default: | 169 | default: |
168 | return __cmpxchg_called_with_bad_pointer(); | 170 | return __cmpxchg_called_with_bad_pointer(); |
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c index 100f23dfa438..ac584c5823d0 100644 --- a/arch/mips/loongson32/common/platform.c +++ b/arch/mips/loongson32/common/platform.c | |||
@@ -183,18 +183,20 @@ int ls1x_eth_mux_init(struct platform_device *pdev, void *priv) | |||
183 | } | 183 | } |
184 | 184 | ||
185 | static struct plat_stmmacenet_data ls1x_eth0_pdata = { | 185 | static struct plat_stmmacenet_data ls1x_eth0_pdata = { |
186 | .bus_id = 0, | 186 | .bus_id = 0, |
187 | .phy_addr = -1, | 187 | .phy_addr = -1, |
188 | #if defined(CONFIG_LOONGSON1_LS1B) | 188 | #if defined(CONFIG_LOONGSON1_LS1B) |
189 | .interface = PHY_INTERFACE_MODE_MII, | 189 | .interface = PHY_INTERFACE_MODE_MII, |
190 | #elif defined(CONFIG_LOONGSON1_LS1C) | 190 | #elif defined(CONFIG_LOONGSON1_LS1C) |
191 | .interface = PHY_INTERFACE_MODE_RMII, | 191 | .interface = PHY_INTERFACE_MODE_RMII, |
192 | #endif | 192 | #endif |
193 | .mdio_bus_data = &ls1x_mdio_bus_data, | 193 | .mdio_bus_data = &ls1x_mdio_bus_data, |
194 | .dma_cfg = &ls1x_eth_dma_cfg, | 194 | .dma_cfg = &ls1x_eth_dma_cfg, |
195 | .has_gmac = 1, | 195 | .has_gmac = 1, |
196 | .tx_coe = 1, | 196 | .tx_coe = 1, |
197 | .init = ls1x_eth_mux_init, | 197 | .rx_queues_to_use = 1, |
198 | .tx_queues_to_use = 1, | ||
199 | .init = ls1x_eth_mux_init, | ||
198 | }; | 200 | }; |
199 | 201 | ||
200 | static struct resource ls1x_eth0_resources[] = { | 202 | static struct resource ls1x_eth0_resources[] = { |
@@ -222,14 +224,16 @@ struct platform_device ls1x_eth0_pdev = { | |||
222 | 224 | ||
223 | #ifdef CONFIG_LOONGSON1_LS1B | 225 | #ifdef CONFIG_LOONGSON1_LS1B |
224 | static struct plat_stmmacenet_data ls1x_eth1_pdata = { | 226 | static struct plat_stmmacenet_data ls1x_eth1_pdata = { |
225 | .bus_id = 1, | 227 | .bus_id = 1, |
226 | .phy_addr = -1, | 228 | .phy_addr = -1, |
227 | .interface = PHY_INTERFACE_MODE_MII, | 229 | .interface = PHY_INTERFACE_MODE_MII, |
228 | .mdio_bus_data = &ls1x_mdio_bus_data, | 230 | .mdio_bus_data = &ls1x_mdio_bus_data, |
229 | .dma_cfg = &ls1x_eth_dma_cfg, | 231 | .dma_cfg = &ls1x_eth_dma_cfg, |
230 | .has_gmac = 1, | 232 | .has_gmac = 1, |
231 | .tx_coe = 1, | 233 | .tx_coe = 1, |
232 | .init = ls1x_eth_mux_init, | 234 | .rx_queues_to_use = 1, |
235 | .tx_queues_to_use = 1, | ||
236 | .init = ls1x_eth_mux_init, | ||
233 | }; | 237 | }; |
234 | 238 | ||
235 | static struct resource ls1x_eth1_resources[] = { | 239 | static struct resource ls1x_eth1_resources[] = { |
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 192542dbd972..16d9ef5a78c5 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c | |||
@@ -2558,7 +2558,6 @@ dcopuop: | |||
2558 | break; | 2558 | break; |
2559 | default: | 2559 | default: |
2560 | /* Reserved R6 ops */ | 2560 | /* Reserved R6 ops */ |
2561 | pr_err("Reserved MIPS R6 CMP.condn.S operation\n"); | ||
2562 | return SIGILL; | 2561 | return SIGILL; |
2563 | } | 2562 | } |
2564 | } | 2563 | } |
@@ -2719,7 +2718,6 @@ dcopuop: | |||
2719 | break; | 2718 | break; |
2720 | default: | 2719 | default: |
2721 | /* Reserved R6 ops */ | 2720 | /* Reserved R6 ops */ |
2722 | pr_err("Reserved MIPS R6 CMP.condn.D operation\n"); | ||
2723 | return SIGILL; | 2721 | return SIGILL; |
2724 | } | 2722 | } |
2725 | } | 2723 | } |
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c index 7646891c4e9b..01b7a87ea678 100644 --- a/arch/mips/net/ebpf_jit.c +++ b/arch/mips/net/ebpf_jit.c | |||
@@ -667,7 +667,7 @@ static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, | |||
667 | { | 667 | { |
668 | int src, dst, r, td, ts, mem_off, b_off; | 668 | int src, dst, r, td, ts, mem_off, b_off; |
669 | bool need_swap, did_move, cmp_eq; | 669 | bool need_swap, did_move, cmp_eq; |
670 | unsigned int target; | 670 | unsigned int target = 0; |
671 | u64 t64; | 671 | u64 t64; |
672 | s64 t64s; | 672 | s64 t64s; |
673 | int bpf_op = BPF_OP(insn->code); | 673 | int bpf_op = BPF_OP(insn->code); |
diff --git a/arch/mips/tools/generic-board-config.sh b/arch/mips/tools/generic-board-config.sh index 5c4f93687039..654d652d7fa1 100755 --- a/arch/mips/tools/generic-board-config.sh +++ b/arch/mips/tools/generic-board-config.sh | |||
@@ -30,8 +30,6 @@ cfg="$4" | |||
30 | boards_origin="$5" | 30 | boards_origin="$5" |
31 | shift 5 | 31 | shift 5 |
32 | 32 | ||
33 | cd "${srctree}" | ||
34 | |||
35 | # Only print Skipping... lines if the user explicitly specified BOARDS=. In the | 33 | # Only print Skipping... lines if the user explicitly specified BOARDS=. In the |
36 | # general case it only serves to obscure the useful output about what actually | 34 | # general case it only serves to obscure the useful output about what actually |
37 | # was included. | 35 | # was included. |
@@ -48,7 +46,7 @@ environment*) | |||
48 | esac | 46 | esac |
49 | 47 | ||
50 | for board in $@; do | 48 | for board in $@; do |
51 | board_cfg="arch/mips/configs/generic/board-${board}.config" | 49 | board_cfg="${srctree}/arch/mips/configs/generic/board-${board}.config" |
52 | if [ ! -f "${board_cfg}" ]; then | 50 | if [ ! -f "${board_cfg}" ]; then |
53 | echo "WARNING: Board config '${board_cfg}' not found" | 51 | echo "WARNING: Board config '${board_cfg}' not found" |
54 | continue | 52 | continue |
@@ -84,7 +82,7 @@ for board in $@; do | |||
84 | done || continue | 82 | done || continue |
85 | 83 | ||
86 | # Merge this board config fragment into our final config file | 84 | # Merge this board config fragment into our final config file |
87 | ./scripts/kconfig/merge_config.sh \ | 85 | ${srctree}/scripts/kconfig/merge_config.sh \ |
88 | -m -O ${objtree} ${cfg} ${board_cfg} \ | 86 | -m -O ${objtree} ${cfg} ${board_cfg} \ |
89 | | grep -Ev '^(#|Using)' | 87 | | grep -Ev '^(#|Using)' |
90 | done | 88 | done |
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index c6d6272a934f..7baa2265d439 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c | |||
@@ -35,12 +35,12 @@ EXPORT_SYMBOL(memset); | |||
35 | EXPORT_SYMBOL(__xchg8); | 35 | EXPORT_SYMBOL(__xchg8); |
36 | EXPORT_SYMBOL(__xchg32); | 36 | EXPORT_SYMBOL(__xchg32); |
37 | EXPORT_SYMBOL(__cmpxchg_u32); | 37 | EXPORT_SYMBOL(__cmpxchg_u32); |
38 | EXPORT_SYMBOL(__cmpxchg_u64); | ||
38 | #ifdef CONFIG_SMP | 39 | #ifdef CONFIG_SMP |
39 | EXPORT_SYMBOL(__atomic_hash); | 40 | EXPORT_SYMBOL(__atomic_hash); |
40 | #endif | 41 | #endif |
41 | #ifdef CONFIG_64BIT | 42 | #ifdef CONFIG_64BIT |
42 | EXPORT_SYMBOL(__xchg64); | 43 | EXPORT_SYMBOL(__xchg64); |
43 | EXPORT_SYMBOL(__cmpxchg_u64); | ||
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | #include <linux/uaccess.h> | 46 | #include <linux/uaccess.h> |
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 23de307c3052..41e60a9c7db2 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
@@ -742,7 +742,7 @@ lws_compare_and_swap_2: | |||
742 | 10: ldd 0(%r25), %r25 | 742 | 10: ldd 0(%r25), %r25 |
743 | 11: ldd 0(%r24), %r24 | 743 | 11: ldd 0(%r24), %r24 |
744 | #else | 744 | #else |
745 | /* Load new value into r22/r23 - high/low */ | 745 | /* Load old value into r22/r23 - high/low */ |
746 | 10: ldw 0(%r25), %r22 | 746 | 10: ldw 0(%r25), %r22 |
747 | 11: ldw 4(%r25), %r23 | 747 | 11: ldw 4(%r25), %r23 |
748 | /* Load new value into fr4 for atomic store later */ | 748 | /* Load new value into fr4 for atomic store later */ |
@@ -834,11 +834,11 @@ cas2_action: | |||
834 | copy %r0, %r28 | 834 | copy %r0, %r28 |
835 | #else | 835 | #else |
836 | /* Compare first word */ | 836 | /* Compare first word */ |
837 | 19: ldw,ma 0(%r26), %r29 | 837 | 19: ldw 0(%r26), %r29 |
838 | sub,= %r29, %r22, %r0 | 838 | sub,= %r29, %r22, %r0 |
839 | b,n cas2_end | 839 | b,n cas2_end |
840 | /* Compare second word */ | 840 | /* Compare second word */ |
841 | 20: ldw,ma 4(%r26), %r29 | 841 | 20: ldw 4(%r26), %r29 |
842 | sub,= %r29, %r23, %r0 | 842 | sub,= %r29, %r23, %r0 |
843 | b,n cas2_end | 843 | b,n cas2_end |
844 | /* Perform the store */ | 844 | /* Perform the store */ |
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 2d956aa0a38a..8c0105a49839 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c | |||
@@ -253,7 +253,10 @@ static int __init init_cr16_clocksource(void) | |||
253 | cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; | 253 | cpu0_loc = per_cpu(cpu_data, 0).cpu_loc; |
254 | 254 | ||
255 | for_each_online_cpu(cpu) { | 255 | for_each_online_cpu(cpu) { |
256 | if (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc) | 256 | if (cpu == 0) |
257 | continue; | ||
258 | if ((cpu0_loc != 0) && | ||
259 | (cpu0_loc == per_cpu(cpu_data, cpu).cpu_loc)) | ||
257 | continue; | 260 | continue; |
258 | 261 | ||
259 | clocksource_cr16.name = "cr16_unstable"; | 262 | clocksource_cr16.name = "cr16_unstable"; |
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 48da0f5d2f7f..b82586c53560 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -734,7 +734,29 @@ EXC_REAL(program_check, 0x700, 0x100) | |||
734 | EXC_VIRT(program_check, 0x4700, 0x100, 0x700) | 734 | EXC_VIRT(program_check, 0x4700, 0x100, 0x700) |
735 | TRAMP_KVM(PACA_EXGEN, 0x700) | 735 | TRAMP_KVM(PACA_EXGEN, 0x700) |
736 | EXC_COMMON_BEGIN(program_check_common) | 736 | EXC_COMMON_BEGIN(program_check_common) |
737 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | 737 | /* |
738 | * It's possible to receive a TM Bad Thing type program check with | ||
739 | * userspace register values (in particular r1), but with SRR1 reporting | ||
740 | * that we came from the kernel. Normally that would confuse the bad | ||
741 | * stack logic, and we would report a bad kernel stack pointer. Instead | ||
742 | * we switch to the emergency stack if we're taking a TM Bad Thing from | ||
743 | * the kernel. | ||
744 | */ | ||
745 | li r10,MSR_PR /* Build a mask of MSR_PR .. */ | ||
746 | oris r10,r10,0x200000@h /* .. and SRR1_PROGTM */ | ||
747 | and r10,r10,r12 /* Mask SRR1 with that. */ | ||
748 | srdi r10,r10,8 /* Shift it so we can compare */ | ||
749 | cmpldi r10,(0x200000 >> 8) /* .. with an immediate. */ | ||
750 | bne 1f /* If != go to normal path. */ | ||
751 | |||
752 | /* SRR1 had PR=0 and SRR1_PROGTM=1, so use the emergency stack */ | ||
753 | andi. r10,r12,MSR_PR; /* Set CR0 correctly for label */ | ||
754 | /* 3 in EXCEPTION_PROLOG_COMMON */ | ||
755 | mr r10,r1 /* Save r1 */ | ||
756 | ld r1,PACAEMERGSP(r13) /* Use emergency stack */ | ||
757 | subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ | ||
758 | b 3f /* Jump into the macro !! */ | ||
759 | 1: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | ||
738 | bl save_nvgprs | 760 | bl save_nvgprs |
739 | RECONCILE_IRQ_STATE(r10, r11) | 761 | RECONCILE_IRQ_STATE(r10, r11) |
740 | addi r3,r1,STACK_FRAME_OVERHEAD | 762 | addi r3,r1,STACK_FRAME_OVERHEAD |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index c83c115858c1..b2c002993d78 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -452,9 +452,20 @@ static long restore_tm_sigcontexts(struct task_struct *tsk, | |||
452 | if (MSR_TM_RESV(msr)) | 452 | if (MSR_TM_RESV(msr)) |
453 | return -EINVAL; | 453 | return -EINVAL; |
454 | 454 | ||
455 | /* pull in MSR TM from user context */ | 455 | /* pull in MSR TS bits from user context */ |
456 | regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); | 456 | regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK); |
457 | 457 | ||
458 | /* | ||
459 | * Ensure that TM is enabled in regs->msr before we leave the signal | ||
460 | * handler. It could be the case that (a) user disabled the TM bit | ||
461 | * through the manipulation of the MSR bits in uc_mcontext or (b) the | ||
462 | * TM bit was disabled because a sufficient number of context switches | ||
463 | * happened whilst in the signal handler and load_tm overflowed, | ||
464 | * disabling the TM bit. In either case we can end up with an illegal | ||
465 | * TM state leading to a TM Bad Thing when we return to userspace. | ||
466 | */ | ||
467 | regs->msr |= MSR_TM; | ||
468 | |||
458 | /* pull in MSR LE from user context */ | 469 | /* pull in MSR LE from user context */ |
459 | regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); | 470 | regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE); |
460 | 471 | ||
diff --git a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S index c98e90b4ea7b..b4e2b7165f79 100644 --- a/arch/powerpc/kernel/trace/ftrace_64_mprofile.S +++ b/arch/powerpc/kernel/trace/ftrace_64_mprofile.S | |||
@@ -181,34 +181,25 @@ _GLOBAL(ftrace_stub) | |||
181 | * - we have no stack frame and can not allocate one | 181 | * - we have no stack frame and can not allocate one |
182 | * - LR points back to the original caller (in A) | 182 | * - LR points back to the original caller (in A) |
183 | * - CTR holds the new NIP in C | 183 | * - CTR holds the new NIP in C |
184 | * - r0 & r12 are free | 184 | * - r0, r11 & r12 are free |
185 | * | ||
186 | * r0 can't be used as the base register for a DS-form load or store, so | ||
187 | * we temporarily shuffle r1 (stack pointer) into r0 and then put it back. | ||
188 | */ | 185 | */ |
189 | livepatch_handler: | 186 | livepatch_handler: |
190 | CURRENT_THREAD_INFO(r12, r1) | 187 | CURRENT_THREAD_INFO(r12, r1) |
191 | 188 | ||
192 | /* Save stack pointer into r0 */ | ||
193 | mr r0, r1 | ||
194 | |||
195 | /* Allocate 3 x 8 bytes */ | 189 | /* Allocate 3 x 8 bytes */ |
196 | ld r1, TI_livepatch_sp(r12) | 190 | ld r11, TI_livepatch_sp(r12) |
197 | addi r1, r1, 24 | 191 | addi r11, r11, 24 |
198 | std r1, TI_livepatch_sp(r12) | 192 | std r11, TI_livepatch_sp(r12) |
199 | 193 | ||
200 | /* Save toc & real LR on livepatch stack */ | 194 | /* Save toc & real LR on livepatch stack */ |
201 | std r2, -24(r1) | 195 | std r2, -24(r11) |
202 | mflr r12 | 196 | mflr r12 |
203 | std r12, -16(r1) | 197 | std r12, -16(r11) |
204 | 198 | ||
205 | /* Store stack end marker */ | 199 | /* Store stack end marker */ |
206 | lis r12, STACK_END_MAGIC@h | 200 | lis r12, STACK_END_MAGIC@h |
207 | ori r12, r12, STACK_END_MAGIC@l | 201 | ori r12, r12, STACK_END_MAGIC@l |
208 | std r12, -8(r1) | 202 | std r12, -8(r11) |
209 | |||
210 | /* Restore real stack pointer */ | ||
211 | mr r1, r0 | ||
212 | 203 | ||
213 | /* Put ctr in r12 for global entry and branch there */ | 204 | /* Put ctr in r12 for global entry and branch there */ |
214 | mfctr r12 | 205 | mfctr r12 |
@@ -216,36 +207,30 @@ livepatch_handler: | |||
216 | 207 | ||
217 | /* | 208 | /* |
218 | * Now we are returning from the patched function to the original | 209 | * Now we are returning from the patched function to the original |
219 | * caller A. We are free to use r0 and r12, and we can use r2 until we | 210 | * caller A. We are free to use r11, r12 and we can use r2 until we |
220 | * restore it. | 211 | * restore it. |
221 | */ | 212 | */ |
222 | 213 | ||
223 | CURRENT_THREAD_INFO(r12, r1) | 214 | CURRENT_THREAD_INFO(r12, r1) |
224 | 215 | ||
225 | /* Save stack pointer into r0 */ | 216 | ld r11, TI_livepatch_sp(r12) |
226 | mr r0, r1 | ||
227 | |||
228 | ld r1, TI_livepatch_sp(r12) | ||
229 | 217 | ||
230 | /* Check stack marker hasn't been trashed */ | 218 | /* Check stack marker hasn't been trashed */ |
231 | lis r2, STACK_END_MAGIC@h | 219 | lis r2, STACK_END_MAGIC@h |
232 | ori r2, r2, STACK_END_MAGIC@l | 220 | ori r2, r2, STACK_END_MAGIC@l |
233 | ld r12, -8(r1) | 221 | ld r12, -8(r11) |
234 | 1: tdne r12, r2 | 222 | 1: tdne r12, r2 |
235 | EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0 | 223 | EMIT_BUG_ENTRY 1b, __FILE__, __LINE__ - 1, 0 |
236 | 224 | ||
237 | /* Restore LR & toc from livepatch stack */ | 225 | /* Restore LR & toc from livepatch stack */ |
238 | ld r12, -16(r1) | 226 | ld r12, -16(r11) |
239 | mtlr r12 | 227 | mtlr r12 |
240 | ld r2, -24(r1) | 228 | ld r2, -24(r11) |
241 | 229 | ||
242 | /* Pop livepatch stack frame */ | 230 | /* Pop livepatch stack frame */ |
243 | CURRENT_THREAD_INFO(r12, r0) | 231 | CURRENT_THREAD_INFO(r12, r1) |
244 | subi r1, r1, 24 | 232 | subi r11, r11, 24 |
245 | std r1, TI_livepatch_sp(r12) | 233 | std r11, TI_livepatch_sp(r12) |
246 | |||
247 | /* Restore real stack pointer */ | ||
248 | mr r1, r0 | ||
249 | 234 | ||
250 | /* Return to original caller of live patched function */ | 235 | /* Return to original caller of live patched function */ |
251 | blr | 236 | blr |
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c index 8f2da8bba737..4dffa611376d 100644 --- a/arch/powerpc/kvm/book3s_64_vio.c +++ b/arch/powerpc/kvm/book3s_64_vio.c | |||
@@ -478,28 +478,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | |||
478 | return ret; | 478 | return ret; |
479 | 479 | ||
480 | dir = iommu_tce_direction(tce); | 480 | dir = iommu_tce_direction(tce); |
481 | |||
482 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
483 | |||
481 | if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, | 484 | if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm, |
482 | tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) | 485 | tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) { |
483 | return H_PARAMETER; | 486 | ret = H_PARAMETER; |
487 | goto unlock_exit; | ||
488 | } | ||
484 | 489 | ||
485 | entry = ioba >> stt->page_shift; | 490 | entry = ioba >> stt->page_shift; |
486 | 491 | ||
487 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { | 492 | list_for_each_entry_lockless(stit, &stt->iommu_tables, next) { |
488 | if (dir == DMA_NONE) { | 493 | if (dir == DMA_NONE) |
489 | ret = kvmppc_tce_iommu_unmap(vcpu->kvm, | 494 | ret = kvmppc_tce_iommu_unmap(vcpu->kvm, |
490 | stit->tbl, entry); | 495 | stit->tbl, entry); |
491 | } else { | 496 | else |
492 | idx = srcu_read_lock(&vcpu->kvm->srcu); | ||
493 | ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl, | 497 | ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl, |
494 | entry, ua, dir); | 498 | entry, ua, dir); |
495 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
496 | } | ||
497 | 499 | ||
498 | if (ret == H_SUCCESS) | 500 | if (ret == H_SUCCESS) |
499 | continue; | 501 | continue; |
500 | 502 | ||
501 | if (ret == H_TOO_HARD) | 503 | if (ret == H_TOO_HARD) |
502 | return ret; | 504 | goto unlock_exit; |
503 | 505 | ||
504 | WARN_ON_ONCE(1); | 506 | WARN_ON_ONCE(1); |
505 | kvmppc_clear_tce(stit->tbl, entry); | 507 | kvmppc_clear_tce(stit->tbl, entry); |
@@ -507,7 +509,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | |||
507 | 509 | ||
508 | kvmppc_tce_put(stt, entry, tce); | 510 | kvmppc_tce_put(stt, entry, tce); |
509 | 511 | ||
510 | return H_SUCCESS; | 512 | unlock_exit: |
513 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | ||
514 | |||
515 | return ret; | ||
511 | } | 516 | } |
512 | EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); | 517 | EXPORT_SYMBOL_GPL(kvmppc_h_put_tce); |
513 | 518 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index ec69fa45d5a2..42639fba89e8 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -989,13 +989,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300) | |||
989 | beq no_xive | 989 | beq no_xive |
990 | ld r11, VCPU_XIVE_SAVED_STATE(r4) | 990 | ld r11, VCPU_XIVE_SAVED_STATE(r4) |
991 | li r9, TM_QW1_OS | 991 | li r9, TM_QW1_OS |
992 | stdcix r11,r9,r10 | ||
993 | eieio | 992 | eieio |
993 | stdcix r11,r9,r10 | ||
994 | lwz r11, VCPU_XIVE_CAM_WORD(r4) | 994 | lwz r11, VCPU_XIVE_CAM_WORD(r4) |
995 | li r9, TM_QW1_OS + TM_WORD2 | 995 | li r9, TM_QW1_OS + TM_WORD2 |
996 | stwcix r11,r9,r10 | 996 | stwcix r11,r9,r10 |
997 | li r9, 1 | 997 | li r9, 1 |
998 | stw r9, VCPU_XIVE_PUSHED(r4) | 998 | stw r9, VCPU_XIVE_PUSHED(r4) |
999 | eieio | ||
999 | no_xive: | 1000 | no_xive: |
1000 | #endif /* CONFIG_KVM_XICS */ | 1001 | #endif /* CONFIG_KVM_XICS */ |
1001 | 1002 | ||
@@ -1310,6 +1311,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR) | |||
1310 | bne 3f | 1311 | bne 3f |
1311 | BEGIN_FTR_SECTION | 1312 | BEGIN_FTR_SECTION |
1312 | PPC_MSGSYNC | 1313 | PPC_MSGSYNC |
1314 | lwsync | ||
1313 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | 1315 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) |
1314 | lbz r0, HSTATE_HOST_IPI(r13) | 1316 | lbz r0, HSTATE_HOST_IPI(r13) |
1315 | cmpwi r0, 0 | 1317 | cmpwi r0, 0 |
@@ -1400,8 +1402,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ | |||
1400 | cmpldi cr0, r10, 0 | 1402 | cmpldi cr0, r10, 0 |
1401 | beq 1f | 1403 | beq 1f |
1402 | /* First load to pull the context, we ignore the value */ | 1404 | /* First load to pull the context, we ignore the value */ |
1403 | lwzx r11, r7, r10 | ||
1404 | eieio | 1405 | eieio |
1406 | lwzx r11, r7, r10 | ||
1405 | /* Second load to recover the context state (Words 0 and 1) */ | 1407 | /* Second load to recover the context state (Words 0 and 1) */ |
1406 | ldx r11, r6, r10 | 1408 | ldx r11, r6, r10 |
1407 | b 3f | 1409 | b 3f |
@@ -1409,8 +1411,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ | |||
1409 | cmpldi cr0, r10, 0 | 1411 | cmpldi cr0, r10, 0 |
1410 | beq 1f | 1412 | beq 1f |
1411 | /* First load to pull the context, we ignore the value */ | 1413 | /* First load to pull the context, we ignore the value */ |
1412 | lwzcix r11, r7, r10 | ||
1413 | eieio | 1414 | eieio |
1415 | lwzcix r11, r7, r10 | ||
1414 | /* Second load to recover the context state (Words 0 and 1) */ | 1416 | /* Second load to recover the context state (Words 0 and 1) */ |
1415 | ldcix r11, r6, r10 | 1417 | ldcix r11, r6, r10 |
1416 | 3: std r11, VCPU_XIVE_SAVED_STATE(r9) | 1418 | 3: std r11, VCPU_XIVE_SAVED_STATE(r9) |
@@ -1420,6 +1422,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ | |||
1420 | stw r10, VCPU_XIVE_PUSHED(r9) | 1422 | stw r10, VCPU_XIVE_PUSHED(r9) |
1421 | stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) | 1423 | stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9) |
1422 | stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) | 1424 | stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9) |
1425 | eieio | ||
1423 | 1: | 1426 | 1: |
1424 | #endif /* CONFIG_KVM_XICS */ | 1427 | #endif /* CONFIG_KVM_XICS */ |
1425 | /* Save more register state */ | 1428 | /* Save more register state */ |
@@ -2788,6 +2791,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) | |||
2788 | PPC_MSGCLR(6) | 2791 | PPC_MSGCLR(6) |
2789 | /* see if it's a host IPI */ | 2792 | /* see if it's a host IPI */ |
2790 | li r3, 1 | 2793 | li r3, 1 |
2794 | BEGIN_FTR_SECTION | ||
2795 | PPC_MSGSYNC | ||
2796 | lwsync | ||
2797 | END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) | ||
2791 | lbz r0, HSTATE_HOST_IPI(r13) | 2798 | lbz r0, HSTATE_HOST_IPI(r13) |
2792 | cmpwi r0, 0 | 2799 | cmpwi r0, 0 |
2793 | bnelr | 2800 | bnelr |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 3480faaf1ef8..ee279c7f4802 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -644,8 +644,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |||
644 | break; | 644 | break; |
645 | #endif | 645 | #endif |
646 | case KVM_CAP_PPC_HTM: | 646 | case KVM_CAP_PPC_HTM: |
647 | r = cpu_has_feature(CPU_FTR_TM_COMP) && | 647 | r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled; |
648 | is_kvmppc_hv_enabled(kvm); | ||
649 | break; | 648 | break; |
650 | default: | 649 | default: |
651 | r = 0; | 650 | r = 0; |
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 5e8418c28bd8..f208f560aecd 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c | |||
@@ -1684,11 +1684,13 @@ int analyse_instr(struct instruction_op *op, const struct pt_regs *regs, | |||
1684 | * Logical instructions | 1684 | * Logical instructions |
1685 | */ | 1685 | */ |
1686 | case 26: /* cntlzw */ | 1686 | case 26: /* cntlzw */ |
1687 | op->val = __builtin_clz((unsigned int) regs->gpr[rd]); | 1687 | val = (unsigned int) regs->gpr[rd]; |
1688 | op->val = ( val ? __builtin_clz(val) : 32 ); | ||
1688 | goto logical_done; | 1689 | goto logical_done; |
1689 | #ifdef __powerpc64__ | 1690 | #ifdef __powerpc64__ |
1690 | case 58: /* cntlzd */ | 1691 | case 58: /* cntlzd */ |
1691 | op->val = __builtin_clzl(regs->gpr[rd]); | 1692 | val = regs->gpr[rd]; |
1693 | op->val = ( val ? __builtin_clzl(val) : 64 ); | ||
1692 | goto logical_done; | 1694 | goto logical_done; |
1693 | #endif | 1695 | #endif |
1694 | case 28: /* and */ | 1696 | case 28: /* and */ |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index b95c584ce19d..a51df9ef529d 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -1438,7 +1438,6 @@ out: | |||
1438 | 1438 | ||
1439 | int arch_update_cpu_topology(void) | 1439 | int arch_update_cpu_topology(void) |
1440 | { | 1440 | { |
1441 | lockdep_assert_cpus_held(); | ||
1442 | return numa_update_cpu_topology(true); | 1441 | return numa_update_cpu_topology(true); |
1443 | } | 1442 | } |
1444 | 1443 | ||
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c index 9ccac86f3463..88126245881b 100644 --- a/arch/powerpc/perf/imc-pmu.c +++ b/arch/powerpc/perf/imc-pmu.c | |||
@@ -399,6 +399,20 @@ static void nest_imc_counters_release(struct perf_event *event) | |||
399 | 399 | ||
400 | /* Take the mutex lock for this node and then decrement the reference count */ | 400 | /* Take the mutex lock for this node and then decrement the reference count */ |
401 | mutex_lock(&ref->lock); | 401 | mutex_lock(&ref->lock); |
402 | if (ref->refc == 0) { | ||
403 | /* | ||
404 | * The scenario where this is true is, when perf session is | ||
405 | * started, followed by offlining of all cpus in a given node. | ||
406 | * | ||
407 | * In the cpuhotplug offline path, ppc_nest_imc_cpu_offline() | ||
408 | * function set the ref->count to zero, if the cpu which is | ||
409 | * about to offline is the last cpu in a given node and make | ||
410 | * an OPAL call to disable the engine in that node. | ||
411 | * | ||
412 | */ | ||
413 | mutex_unlock(&ref->lock); | ||
414 | return; | ||
415 | } | ||
402 | ref->refc--; | 416 | ref->refc--; |
403 | if (ref->refc == 0) { | 417 | if (ref->refc == 0) { |
404 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, | 418 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST, |
@@ -523,8 +537,8 @@ static int core_imc_mem_init(int cpu, int size) | |||
523 | 537 | ||
524 | /* We need only vbase for core counters */ | 538 | /* We need only vbase for core counters */ |
525 | mem_info->vbase = page_address(alloc_pages_node(phys_id, | 539 | mem_info->vbase = page_address(alloc_pages_node(phys_id, |
526 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, | 540 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | |
527 | get_order(size))); | 541 | __GFP_NOWARN, get_order(size))); |
528 | if (!mem_info->vbase) | 542 | if (!mem_info->vbase) |
529 | return -ENOMEM; | 543 | return -ENOMEM; |
530 | 544 | ||
@@ -646,6 +660,20 @@ static void core_imc_counters_release(struct perf_event *event) | |||
646 | return; | 660 | return; |
647 | 661 | ||
648 | mutex_lock(&ref->lock); | 662 | mutex_lock(&ref->lock); |
663 | if (ref->refc == 0) { | ||
664 | /* | ||
665 | * The scenario where this is true is, when perf session is | ||
666 | * started, followed by offlining of all cpus in a given core. | ||
667 | * | ||
668 | * In the cpuhotplug offline path, ppc_core_imc_cpu_offline() | ||
669 | * function set the ref->count to zero, if the cpu which is | ||
670 | * about to offline is the last cpu in a given core and make | ||
671 | * an OPAL call to disable the engine in that core. | ||
672 | * | ||
673 | */ | ||
674 | mutex_unlock(&ref->lock); | ||
675 | return; | ||
676 | } | ||
649 | ref->refc--; | 677 | ref->refc--; |
650 | if (ref->refc == 0) { | 678 | if (ref->refc == 0) { |
651 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, | 679 | rc = opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE, |
@@ -763,8 +791,8 @@ static int thread_imc_mem_alloc(int cpu_id, int size) | |||
763 | * free the memory in cpu offline path. | 791 | * free the memory in cpu offline path. |
764 | */ | 792 | */ |
765 | local_mem = page_address(alloc_pages_node(phys_id, | 793 | local_mem = page_address(alloc_pages_node(phys_id, |
766 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, | 794 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE | |
767 | get_order(size))); | 795 | __GFP_NOWARN, get_order(size))); |
768 | if (!local_mem) | 796 | if (!local_mem) |
769 | return -ENOMEM; | 797 | return -ENOMEM; |
770 | 798 | ||
@@ -1148,7 +1176,8 @@ static void imc_common_cpuhp_mem_free(struct imc_pmu *pmu_ptr) | |||
1148 | } | 1176 | } |
1149 | 1177 | ||
1150 | /* Only free the attr_groups which are dynamically allocated */ | 1178 | /* Only free the attr_groups which are dynamically allocated */ |
1151 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); | 1179 | if (pmu_ptr->attr_groups[IMC_EVENT_ATTR]) |
1180 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); | ||
1152 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); | 1181 | kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); |
1153 | kfree(pmu_ptr); | 1182 | kfree(pmu_ptr); |
1154 | return; | 1183 | return; |
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index afa46a7406ea..04e042edbab7 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig | |||
@@ -27,6 +27,7 @@ CONFIG_NET=y | |||
27 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 27 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
28 | CONFIG_DEVTMPFS=y | 28 | CONFIG_DEVTMPFS=y |
29 | # CONFIG_FIRMWARE_IN_KERNEL is not set | 29 | # CONFIG_FIRMWARE_IN_KERNEL is not set |
30 | CONFIG_BLK_DEV_RAM=y | ||
30 | # CONFIG_BLK_DEV_XPRAM is not set | 31 | # CONFIG_BLK_DEV_XPRAM is not set |
31 | # CONFIG_DCSSBLK is not set | 32 | # CONFIG_DCSSBLK is not set |
32 | # CONFIG_DASD is not set | 33 | # CONFIG_DASD is not set |
@@ -59,6 +60,7 @@ CONFIG_CONFIGFS_FS=y | |||
59 | # CONFIG_NETWORK_FILESYSTEMS is not set | 60 | # CONFIG_NETWORK_FILESYSTEMS is not set |
60 | CONFIG_PRINTK_TIME=y | 61 | CONFIG_PRINTK_TIME=y |
61 | CONFIG_DEBUG_INFO=y | 62 | CONFIG_DEBUG_INFO=y |
63 | CONFIG_DEBUG_FS=y | ||
62 | CONFIG_DEBUG_KERNEL=y | 64 | CONFIG_DEBUG_KERNEL=y |
63 | CONFIG_PANIC_ON_OOPS=y | 65 | CONFIG_PANIC_ON_OOPS=y |
64 | # CONFIG_SCHED_DEBUG is not set | 66 | # CONFIG_SCHED_DEBUG is not set |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 21900e1cee9c..d185aa3965bf 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
@@ -521,12 +521,15 @@ ENTRY(pgm_check_handler) | |||
521 | tmhh %r8,0x0001 # test problem state bit | 521 | tmhh %r8,0x0001 # test problem state bit |
522 | jnz 2f # -> fault in user space | 522 | jnz 2f # -> fault in user space |
523 | #if IS_ENABLED(CONFIG_KVM) | 523 | #if IS_ENABLED(CONFIG_KVM) |
524 | # cleanup critical section for sie64a | 524 | # cleanup critical section for program checks in sie64a |
525 | lgr %r14,%r9 | 525 | lgr %r14,%r9 |
526 | slg %r14,BASED(.Lsie_critical_start) | 526 | slg %r14,BASED(.Lsie_critical_start) |
527 | clg %r14,BASED(.Lsie_critical_length) | 527 | clg %r14,BASED(.Lsie_critical_length) |
528 | jhe 0f | 528 | jhe 0f |
529 | brasl %r14,.Lcleanup_sie | 529 | lg %r14,__SF_EMPTY(%r15) # get control block pointer |
530 | ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE | ||
531 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
532 | larl %r9,sie_exit # skip forward to sie_exit | ||
530 | #endif | 533 | #endif |
531 | 0: tmhh %r8,0x4000 # PER bit set in old PSW ? | 534 | 0: tmhh %r8,0x4000 # PER bit set in old PSW ? |
532 | jnz 1f # -> enabled, can't be a double fault | 535 | jnz 1f # -> enabled, can't be a double fault |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 1cee6753d47a..495ff6959dec 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -293,7 +293,10 @@ static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk) | |||
293 | lc->lpp = LPP_MAGIC; | 293 | lc->lpp = LPP_MAGIC; |
294 | lc->current_pid = tsk->pid; | 294 | lc->current_pid = tsk->pid; |
295 | lc->user_timer = tsk->thread.user_timer; | 295 | lc->user_timer = tsk->thread.user_timer; |
296 | lc->guest_timer = tsk->thread.guest_timer; | ||
296 | lc->system_timer = tsk->thread.system_timer; | 297 | lc->system_timer = tsk->thread.system_timer; |
298 | lc->hardirq_timer = tsk->thread.hardirq_timer; | ||
299 | lc->softirq_timer = tsk->thread.softirq_timer; | ||
297 | lc->steal_timer = 0; | 300 | lc->steal_timer = 0; |
298 | } | 301 | } |
299 | 302 | ||
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 0be3828752e5..4e83f950713e 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -44,7 +44,6 @@ config SPARC | |||
44 | select ARCH_HAS_SG_CHAIN | 44 | select ARCH_HAS_SG_CHAIN |
45 | select CPU_NO_EFFICIENT_FFS | 45 | select CPU_NO_EFFICIENT_FFS |
46 | select LOCKDEP_SMALL if LOCKDEP | 46 | select LOCKDEP_SMALL if LOCKDEP |
47 | select ARCH_WANT_RELAX_ORDER | ||
48 | 47 | ||
49 | config SPARC32 | 48 | config SPARC32 |
50 | def_bool !64BIT | 49 | def_bool !64BIT |
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index 8a13d468635a..50e0d2bc4528 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S | |||
@@ -176,7 +176,7 @@ | |||
176 | /* | 176 | /* |
177 | * This is a sneaky trick to help the unwinder find pt_regs on the stack. The | 177 | * This is a sneaky trick to help the unwinder find pt_regs on the stack. The |
178 | * frame pointer is replaced with an encoded pointer to pt_regs. The encoding | 178 | * frame pointer is replaced with an encoded pointer to pt_regs. The encoding |
179 | * is just setting the LSB, which makes it an invalid stack address and is also | 179 | * is just clearing the MSB, which makes it an invalid stack address and is also |
180 | * a signal to the unwinder that it's a pt_regs pointer in disguise. | 180 | * a signal to the unwinder that it's a pt_regs pointer in disguise. |
181 | * | 181 | * |
182 | * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the | 182 | * NOTE: This macro must be used *after* SAVE_ALL because it corrupts the |
@@ -185,7 +185,7 @@ | |||
185 | .macro ENCODE_FRAME_POINTER | 185 | .macro ENCODE_FRAME_POINTER |
186 | #ifdef CONFIG_FRAME_POINTER | 186 | #ifdef CONFIG_FRAME_POINTER |
187 | mov %esp, %ebp | 187 | mov %esp, %ebp |
188 | orl $0x1, %ebp | 188 | andl $0x7fffffff, %ebp |
189 | #endif | 189 | #endif |
190 | .endm | 190 | .endm |
191 | 191 | ||
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 49167258d587..f6cdb7a1455e 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S | |||
@@ -808,7 +808,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt | |||
808 | 808 | ||
809 | .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 | 809 | .macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1 |
810 | ENTRY(\sym) | 810 | ENTRY(\sym) |
811 | UNWIND_HINT_IRET_REGS offset=8 | 811 | UNWIND_HINT_IRET_REGS offset=\has_error_code*8 |
812 | 812 | ||
813 | /* Sanity check */ | 813 | /* Sanity check */ |
814 | .if \shift_ist != -1 && \paranoid == 0 | 814 | .if \shift_ist != -1 && \paranoid == 0 |
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 16076eb34699..141e07b06216 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c | |||
@@ -546,9 +546,6 @@ static int bts_event_init(struct perf_event *event) | |||
546 | if (event->attr.type != bts_pmu.type) | 546 | if (event->attr.type != bts_pmu.type) |
547 | return -ENOENT; | 547 | return -ENOENT; |
548 | 548 | ||
549 | if (x86_add_exclusive(x86_lbr_exclusive_bts)) | ||
550 | return -EBUSY; | ||
551 | |||
552 | /* | 549 | /* |
553 | * BTS leaks kernel addresses even when CPL0 tracing is | 550 | * BTS leaks kernel addresses even when CPL0 tracing is |
554 | * disabled, so disallow intel_bts driver for unprivileged | 551 | * disabled, so disallow intel_bts driver for unprivileged |
@@ -562,6 +559,9 @@ static int bts_event_init(struct perf_event *event) | |||
562 | !capable(CAP_SYS_ADMIN)) | 559 | !capable(CAP_SYS_ADMIN)) |
563 | return -EACCES; | 560 | return -EACCES; |
564 | 561 | ||
562 | if (x86_add_exclusive(x86_lbr_exclusive_bts)) | ||
563 | return -EBUSY; | ||
564 | |||
565 | ret = x86_reserve_hardware(); | 565 | ret = x86_reserve_hardware(); |
566 | if (ret) { | 566 | if (ret) { |
567 | x86_del_exclusive(x86_lbr_exclusive_bts); | 567 | x86_del_exclusive(x86_lbr_exclusive_bts); |
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 1c5390f1cf09..d45e06346f14 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c | |||
@@ -822,7 +822,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid) | |||
822 | pmus[i].type = type; | 822 | pmus[i].type = type; |
823 | pmus[i].boxes = kzalloc(size, GFP_KERNEL); | 823 | pmus[i].boxes = kzalloc(size, GFP_KERNEL); |
824 | if (!pmus[i].boxes) | 824 | if (!pmus[i].boxes) |
825 | return -ENOMEM; | 825 | goto err; |
826 | } | 826 | } |
827 | 827 | ||
828 | type->pmus = pmus; | 828 | type->pmus = pmus; |
@@ -836,7 +836,7 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid) | |||
836 | attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) + | 836 | attr_group = kzalloc(sizeof(struct attribute *) * (i + 1) + |
837 | sizeof(*attr_group), GFP_KERNEL); | 837 | sizeof(*attr_group), GFP_KERNEL); |
838 | if (!attr_group) | 838 | if (!attr_group) |
839 | return -ENOMEM; | 839 | goto err; |
840 | 840 | ||
841 | attrs = (struct attribute **)(attr_group + 1); | 841 | attrs = (struct attribute **)(attr_group + 1); |
842 | attr_group->name = "events"; | 842 | attr_group->name = "events"; |
@@ -849,7 +849,15 @@ static int __init uncore_type_init(struct intel_uncore_type *type, bool setid) | |||
849 | } | 849 | } |
850 | 850 | ||
851 | type->pmu_group = &uncore_pmu_attr_group; | 851 | type->pmu_group = &uncore_pmu_attr_group; |
852 | |||
852 | return 0; | 853 | return 0; |
854 | |||
855 | err: | ||
856 | for (i = 0; i < type->num_boxes; i++) | ||
857 | kfree(pmus[i].boxes); | ||
858 | kfree(pmus); | ||
859 | |||
860 | return -ENOMEM; | ||
853 | } | 861 | } |
854 | 862 | ||
855 | static int __init | 863 | static int __init |
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index 1a8eb550c40f..a5db63f728a2 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c | |||
@@ -85,6 +85,8 @@ EXPORT_SYMBOL_GPL(hyperv_cs); | |||
85 | u32 *hv_vp_index; | 85 | u32 *hv_vp_index; |
86 | EXPORT_SYMBOL_GPL(hv_vp_index); | 86 | EXPORT_SYMBOL_GPL(hv_vp_index); |
87 | 87 | ||
88 | u32 hv_max_vp_index; | ||
89 | |||
88 | static int hv_cpu_init(unsigned int cpu) | 90 | static int hv_cpu_init(unsigned int cpu) |
89 | { | 91 | { |
90 | u64 msr_vp_index; | 92 | u64 msr_vp_index; |
@@ -93,6 +95,9 @@ static int hv_cpu_init(unsigned int cpu) | |||
93 | 95 | ||
94 | hv_vp_index[smp_processor_id()] = msr_vp_index; | 96 | hv_vp_index[smp_processor_id()] = msr_vp_index; |
95 | 97 | ||
98 | if (msr_vp_index > hv_max_vp_index) | ||
99 | hv_max_vp_index = msr_vp_index; | ||
100 | |||
96 | return 0; | 101 | return 0; |
97 | } | 102 | } |
98 | 103 | ||
diff --git a/arch/x86/hyperv/mmu.c b/arch/x86/hyperv/mmu.c index 39e7f6e50919..9cc9e1c1e2db 100644 --- a/arch/x86/hyperv/mmu.c +++ b/arch/x86/hyperv/mmu.c | |||
@@ -36,9 +36,9 @@ struct hv_flush_pcpu_ex { | |||
36 | /* Each gva in gva_list encodes up to 4096 pages to flush */ | 36 | /* Each gva in gva_list encodes up to 4096 pages to flush */ |
37 | #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) | 37 | #define HV_TLB_FLUSH_UNIT (4096 * PAGE_SIZE) |
38 | 38 | ||
39 | static struct hv_flush_pcpu __percpu *pcpu_flush; | 39 | static struct hv_flush_pcpu __percpu **pcpu_flush; |
40 | 40 | ||
41 | static struct hv_flush_pcpu_ex __percpu *pcpu_flush_ex; | 41 | static struct hv_flush_pcpu_ex __percpu **pcpu_flush_ex; |
42 | 42 | ||
43 | /* | 43 | /* |
44 | * Fills in gva_list starting from offset. Returns the number of items added. | 44 | * Fills in gva_list starting from offset. Returns the number of items added. |
@@ -76,6 +76,18 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush, | |||
76 | { | 76 | { |
77 | int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; | 77 | int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1; |
78 | 78 | ||
79 | /* valid_bank_mask can represent up to 64 banks */ | ||
80 | if (hv_max_vp_index / 64 >= 64) | ||
81 | return 0; | ||
82 | |||
83 | /* | ||
84 | * Clear all banks up to the maximum possible bank as hv_flush_pcpu_ex | ||
85 | * structs are not cleared between calls, we risk flushing unneeded | ||
86 | * vCPUs otherwise. | ||
87 | */ | ||
88 | for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++) | ||
89 | flush->hv_vp_set.bank_contents[vcpu_bank] = 0; | ||
90 | |||
79 | /* | 91 | /* |
80 | * Some banks may end up being empty but this is acceptable. | 92 | * Some banks may end up being empty but this is acceptable. |
81 | */ | 93 | */ |
@@ -83,11 +95,6 @@ static inline int cpumask_to_vp_set(struct hv_flush_pcpu_ex *flush, | |||
83 | vcpu = hv_cpu_number_to_vp_number(cpu); | 95 | vcpu = hv_cpu_number_to_vp_number(cpu); |
84 | vcpu_bank = vcpu / 64; | 96 | vcpu_bank = vcpu / 64; |
85 | vcpu_offset = vcpu % 64; | 97 | vcpu_offset = vcpu % 64; |
86 | |||
87 | /* valid_bank_mask can represent up to 64 banks */ | ||
88 | if (vcpu_bank >= 64) | ||
89 | return 0; | ||
90 | |||
91 | __set_bit(vcpu_offset, (unsigned long *) | 98 | __set_bit(vcpu_offset, (unsigned long *) |
92 | &flush->hv_vp_set.bank_contents[vcpu_bank]); | 99 | &flush->hv_vp_set.bank_contents[vcpu_bank]); |
93 | if (vcpu_bank >= nr_bank) | 100 | if (vcpu_bank >= nr_bank) |
@@ -102,6 +109,7 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, | |||
102 | const struct flush_tlb_info *info) | 109 | const struct flush_tlb_info *info) |
103 | { | 110 | { |
104 | int cpu, vcpu, gva_n, max_gvas; | 111 | int cpu, vcpu, gva_n, max_gvas; |
112 | struct hv_flush_pcpu **flush_pcpu; | ||
105 | struct hv_flush_pcpu *flush; | 113 | struct hv_flush_pcpu *flush; |
106 | u64 status = U64_MAX; | 114 | u64 status = U64_MAX; |
107 | unsigned long flags; | 115 | unsigned long flags; |
@@ -116,7 +124,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus, | |||
116 | 124 | ||
117 | local_irq_save(flags); | 125 | local_irq_save(flags); |
118 | 126 | ||
119 | flush = this_cpu_ptr(pcpu_flush); | 127 | flush_pcpu = this_cpu_ptr(pcpu_flush); |
128 | |||
129 | if (unlikely(!*flush_pcpu)) | ||
130 | *flush_pcpu = page_address(alloc_page(GFP_ATOMIC)); | ||
131 | |||
132 | flush = *flush_pcpu; | ||
133 | |||
134 | if (unlikely(!flush)) { | ||
135 | local_irq_restore(flags); | ||
136 | goto do_native; | ||
137 | } | ||
120 | 138 | ||
121 | if (info->mm) { | 139 | if (info->mm) { |
122 | flush->address_space = virt_to_phys(info->mm->pgd); | 140 | flush->address_space = virt_to_phys(info->mm->pgd); |
@@ -173,6 +191,7 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, | |||
173 | const struct flush_tlb_info *info) | 191 | const struct flush_tlb_info *info) |
174 | { | 192 | { |
175 | int nr_bank = 0, max_gvas, gva_n; | 193 | int nr_bank = 0, max_gvas, gva_n; |
194 | struct hv_flush_pcpu_ex **flush_pcpu; | ||
176 | struct hv_flush_pcpu_ex *flush; | 195 | struct hv_flush_pcpu_ex *flush; |
177 | u64 status = U64_MAX; | 196 | u64 status = U64_MAX; |
178 | unsigned long flags; | 197 | unsigned long flags; |
@@ -187,7 +206,17 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, | |||
187 | 206 | ||
188 | local_irq_save(flags); | 207 | local_irq_save(flags); |
189 | 208 | ||
190 | flush = this_cpu_ptr(pcpu_flush_ex); | 209 | flush_pcpu = this_cpu_ptr(pcpu_flush_ex); |
210 | |||
211 | if (unlikely(!*flush_pcpu)) | ||
212 | *flush_pcpu = page_address(alloc_page(GFP_ATOMIC)); | ||
213 | |||
214 | flush = *flush_pcpu; | ||
215 | |||
216 | if (unlikely(!flush)) { | ||
217 | local_irq_restore(flags); | ||
218 | goto do_native; | ||
219 | } | ||
191 | 220 | ||
192 | if (info->mm) { | 221 | if (info->mm) { |
193 | flush->address_space = virt_to_phys(info->mm->pgd); | 222 | flush->address_space = virt_to_phys(info->mm->pgd); |
@@ -222,18 +251,18 @@ static void hyperv_flush_tlb_others_ex(const struct cpumask *cpus, | |||
222 | flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; | 251 | flush->flags |= HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY; |
223 | status = hv_do_rep_hypercall( | 252 | status = hv_do_rep_hypercall( |
224 | HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, | 253 | HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, |
225 | 0, nr_bank + 2, flush, NULL); | 254 | 0, nr_bank, flush, NULL); |
226 | } else if (info->end && | 255 | } else if (info->end && |
227 | ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { | 256 | ((info->end - info->start)/HV_TLB_FLUSH_UNIT) > max_gvas) { |
228 | status = hv_do_rep_hypercall( | 257 | status = hv_do_rep_hypercall( |
229 | HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, | 258 | HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX, |
230 | 0, nr_bank + 2, flush, NULL); | 259 | 0, nr_bank, flush, NULL); |
231 | } else { | 260 | } else { |
232 | gva_n = fill_gva_list(flush->gva_list, nr_bank, | 261 | gva_n = fill_gva_list(flush->gva_list, nr_bank, |
233 | info->start, info->end); | 262 | info->start, info->end); |
234 | status = hv_do_rep_hypercall( | 263 | status = hv_do_rep_hypercall( |
235 | HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX, | 264 | HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX, |
236 | gva_n, nr_bank + 2, flush, NULL); | 265 | gva_n, nr_bank, flush, NULL); |
237 | } | 266 | } |
238 | 267 | ||
239 | local_irq_restore(flags); | 268 | local_irq_restore(flags); |
@@ -266,7 +295,7 @@ void hyper_alloc_mmu(void) | |||
266 | return; | 295 | return; |
267 | 296 | ||
268 | if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) | 297 | if (!(ms_hyperv.hints & HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED)) |
269 | pcpu_flush = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); | 298 | pcpu_flush = alloc_percpu(struct hv_flush_pcpu *); |
270 | else | 299 | else |
271 | pcpu_flush_ex = __alloc_percpu(PAGE_SIZE, PAGE_SIZE); | 300 | pcpu_flush_ex = alloc_percpu(struct hv_flush_pcpu_ex *); |
272 | } | 301 | } |
diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index e7636bac7372..6c98821fef5e 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h | |||
@@ -62,8 +62,10 @@ | |||
62 | #define new_len2 145f-144f | 62 | #define new_len2 145f-144f |
63 | 63 | ||
64 | /* | 64 | /* |
65 | * max without conditionals. Idea adapted from: | 65 | * gas compatible max based on the idea from: |
66 | * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax | 66 | * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax |
67 | * | ||
68 | * The additional "-" is needed because gas uses a "true" value of -1. | ||
67 | */ | 69 | */ |
68 | #define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) | 70 | #define alt_max_short(a, b) ((a) ^ (((a) ^ (b)) & -(-((a) < (b))))) |
69 | 71 | ||
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index c096624137ae..ccbe24e697c4 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h | |||
@@ -103,12 +103,12 @@ static inline int alternatives_text_reserved(void *start, void *end) | |||
103 | alt_end_marker ":\n" | 103 | alt_end_marker ":\n" |
104 | 104 | ||
105 | /* | 105 | /* |
106 | * max without conditionals. Idea adapted from: | 106 | * gas compatible max based on the idea from: |
107 | * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax | 107 | * http://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax |
108 | * | 108 | * |
109 | * The additional "-" is needed because gas works with s32s. | 109 | * The additional "-" is needed because gas uses a "true" value of -1. |
110 | */ | 110 | */ |
111 | #define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") - (" b ")))))" | 111 | #define alt_max_short(a, b) "((" a ") ^ (((" a ") ^ (" b ")) & -(-((" a ") < (" b ")))))" |
112 | 112 | ||
113 | /* | 113 | /* |
114 | * Pad the second replacement alternative with additional NOPs if it is | 114 | * Pad the second replacement alternative with additional NOPs if it is |
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 181264989db5..8edac1de2e35 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
@@ -187,7 +187,6 @@ struct mca_msr_regs { | |||
187 | 187 | ||
188 | extern struct mce_vendor_flags mce_flags; | 188 | extern struct mce_vendor_flags mce_flags; |
189 | 189 | ||
190 | extern struct mca_config mca_cfg; | ||
191 | extern struct mca_msr_regs msr_ops; | 190 | extern struct mca_msr_regs msr_ops; |
192 | 191 | ||
193 | enum mce_notifier_prios { | 192 | enum mce_notifier_prios { |
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index c120b5db178a..3c856a15b98e 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -126,13 +126,7 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next) | |||
126 | DEBUG_LOCKS_WARN_ON(preemptible()); | 126 | DEBUG_LOCKS_WARN_ON(preemptible()); |
127 | } | 127 | } |
128 | 128 | ||
129 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | 129 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); |
130 | { | ||
131 | int cpu = smp_processor_id(); | ||
132 | |||
133 | if (cpumask_test_cpu(cpu, mm_cpumask(mm))) | ||
134 | cpumask_clear_cpu(cpu, mm_cpumask(mm)); | ||
135 | } | ||
136 | 130 | ||
137 | static inline int init_new_context(struct task_struct *tsk, | 131 | static inline int init_new_context(struct task_struct *tsk, |
138 | struct mm_struct *mm) | 132 | struct mm_struct *mm) |
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 738503e1f80c..530f448fddaf 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h | |||
@@ -289,6 +289,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, | |||
289 | * to this information. | 289 | * to this information. |
290 | */ | 290 | */ |
291 | extern u32 *hv_vp_index; | 291 | extern u32 *hv_vp_index; |
292 | extern u32 hv_max_vp_index; | ||
292 | 293 | ||
293 | /** | 294 | /** |
294 | * hv_cpu_number_to_vp_number() - Map CPU to VP. | 295 | * hv_cpu_number_to_vp_number() - Map CPU to VP. |
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 4893abf7f74f..c4aed0de565e 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
@@ -82,6 +82,22 @@ static inline u64 inc_mm_tlb_gen(struct mm_struct *mm) | |||
82 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) | 82 | #define __flush_tlb_single(addr) __native_flush_tlb_single(addr) |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | static inline bool tlb_defer_switch_to_init_mm(void) | ||
86 | { | ||
87 | /* | ||
88 | * If we have PCID, then switching to init_mm is reasonably | ||
89 | * fast. If we don't have PCID, then switching to init_mm is | ||
90 | * quite slow, so we try to defer it in the hopes that we can | ||
91 | * avoid it entirely. The latter approach runs the risk of | ||
92 | * receiving otherwise unnecessary IPIs. | ||
93 | * | ||
94 | * This choice is just a heuristic. The tlb code can handle this | ||
95 | * function returning true or false regardless of whether we have | ||
96 | * PCID. | ||
97 | */ | ||
98 | return !static_cpu_has(X86_FEATURE_PCID); | ||
99 | } | ||
100 | |||
85 | /* | 101 | /* |
86 | * 6 because 6 should be plenty and struct tlb_state will fit in | 102 | * 6 because 6 should be plenty and struct tlb_state will fit in |
87 | * two cache lines. | 103 | * two cache lines. |
@@ -105,6 +121,23 @@ struct tlb_state { | |||
105 | u16 next_asid; | 121 | u16 next_asid; |
106 | 122 | ||
107 | /* | 123 | /* |
124 | * We can be in one of several states: | ||
125 | * | ||
126 | * - Actively using an mm. Our CPU's bit will be set in | ||
127 | * mm_cpumask(loaded_mm) and is_lazy == false; | ||
128 | * | ||
129 | * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit | ||
130 | * will not be set in mm_cpumask(&init_mm) and is_lazy == false. | ||
131 | * | ||
132 | * - Lazily using a real mm. loaded_mm != &init_mm, our bit | ||
133 | * is set in mm_cpumask(loaded_mm), but is_lazy == true. | ||
134 | * We're heuristically guessing that the CR3 load we | ||
135 | * skipped more than makes up for the overhead added by | ||
136 | * lazy mode. | ||
137 | */ | ||
138 | bool is_lazy; | ||
139 | |||
140 | /* | ||
108 | * Access to this CR4 shadow and to H/W CR4 is protected by | 141 | * Access to this CR4 shadow and to H/W CR4 is protected by |
109 | * disabling interrupts when modifying either one. | 142 | * disabling interrupts when modifying either one. |
110 | */ | 143 | */ |
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c index 458da8509b75..6db28f17ff28 100644 --- a/arch/x86/kernel/amd_nb.c +++ b/arch/x86/kernel/amd_nb.c | |||
@@ -27,6 +27,8 @@ static const struct pci_device_id amd_root_ids[] = { | |||
27 | {} | 27 | {} |
28 | }; | 28 | }; |
29 | 29 | ||
30 | #define PCI_DEVICE_ID_AMD_CNB17H_F4 0x1704 | ||
31 | |||
30 | const struct pci_device_id amd_nb_misc_ids[] = { | 32 | const struct pci_device_id amd_nb_misc_ids[] = { |
31 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, | 33 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, |
32 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 34 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
@@ -37,6 +39,7 @@ const struct pci_device_id amd_nb_misc_ids[] = { | |||
37 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, | 39 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, |
38 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, | 40 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, |
39 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, | 41 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, |
42 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, | ||
40 | {} | 43 | {} |
41 | }; | 44 | }; |
42 | EXPORT_SYMBOL_GPL(amd_nb_misc_ids); | 45 | EXPORT_SYMBOL_GPL(amd_nb_misc_ids); |
@@ -48,6 +51,7 @@ static const struct pci_device_id amd_nb_link_ids[] = { | |||
48 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, | 51 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, |
49 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, | 52 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) }, |
50 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, | 53 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) }, |
54 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) }, | ||
51 | {} | 55 | {} |
52 | }; | 56 | }; |
53 | 57 | ||
@@ -402,11 +406,48 @@ void amd_flush_garts(void) | |||
402 | } | 406 | } |
403 | EXPORT_SYMBOL_GPL(amd_flush_garts); | 407 | EXPORT_SYMBOL_GPL(amd_flush_garts); |
404 | 408 | ||
409 | static void __fix_erratum_688(void *info) | ||
410 | { | ||
411 | #define MSR_AMD64_IC_CFG 0xC0011021 | ||
412 | |||
413 | msr_set_bit(MSR_AMD64_IC_CFG, 3); | ||
414 | msr_set_bit(MSR_AMD64_IC_CFG, 14); | ||
415 | } | ||
416 | |||
417 | /* Apply erratum 688 fix so machines without a BIOS fix work. */ | ||
418 | static __init void fix_erratum_688(void) | ||
419 | { | ||
420 | struct pci_dev *F4; | ||
421 | u32 val; | ||
422 | |||
423 | if (boot_cpu_data.x86 != 0x14) | ||
424 | return; | ||
425 | |||
426 | if (!amd_northbridges.num) | ||
427 | return; | ||
428 | |||
429 | F4 = node_to_amd_nb(0)->link; | ||
430 | if (!F4) | ||
431 | return; | ||
432 | |||
433 | if (pci_read_config_dword(F4, 0x164, &val)) | ||
434 | return; | ||
435 | |||
436 | if (val & BIT(2)) | ||
437 | return; | ||
438 | |||
439 | on_each_cpu(__fix_erratum_688, NULL, 0); | ||
440 | |||
441 | pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n"); | ||
442 | } | ||
443 | |||
405 | static __init int init_amd_nbs(void) | 444 | static __init int init_amd_nbs(void) |
406 | { | 445 | { |
407 | amd_cache_northbridges(); | 446 | amd_cache_northbridges(); |
408 | amd_cache_gart(); | 447 | amd_cache_gart(); |
409 | 448 | ||
449 | fix_erratum_688(); | ||
450 | |||
410 | return 0; | 451 | return 0; |
411 | } | 452 | } |
412 | 453 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index d705c769f77d..ff891772c9f8 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -573,11 +573,21 @@ static u32 bdx_deadline_rev(void) | |||
573 | return ~0U; | 573 | return ~0U; |
574 | } | 574 | } |
575 | 575 | ||
576 | static u32 skx_deadline_rev(void) | ||
577 | { | ||
578 | switch (boot_cpu_data.x86_mask) { | ||
579 | case 0x03: return 0x01000136; | ||
580 | case 0x04: return 0x02000014; | ||
581 | } | ||
582 | |||
583 | return ~0U; | ||
584 | } | ||
585 | |||
576 | static const struct x86_cpu_id deadline_match[] = { | 586 | static const struct x86_cpu_id deadline_match[] = { |
577 | DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev), | 587 | DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_HASWELL_X, hsx_deadline_rev), |
578 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020), | 588 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_BROADWELL_X, 0x0b000020), |
579 | DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev), | 589 | DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_BROADWELL_XEON_D, bdx_deadline_rev), |
580 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_SKYLAKE_X, 0x02000014), | 590 | DEADLINE_MODEL_MATCH_FUNC( INTEL_FAM6_SKYLAKE_X, skx_deadline_rev), |
581 | 591 | ||
582 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22), | 592 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_CORE, 0x22), |
583 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20), | 593 | DEADLINE_MODEL_MATCH_REV ( INTEL_FAM6_HASWELL_ULT, 0x20), |
@@ -600,7 +610,8 @@ static void apic_check_deadline_errata(void) | |||
600 | const struct x86_cpu_id *m; | 610 | const struct x86_cpu_id *m; |
601 | u32 rev; | 611 | u32 rev; |
602 | 612 | ||
603 | if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER)) | 613 | if (!boot_cpu_has(X86_FEATURE_TSC_DEADLINE_TIMER) || |
614 | boot_cpu_has(X86_FEATURE_HYPERVISOR)) | ||
604 | return; | 615 | return; |
605 | 616 | ||
606 | m = x86_match_cpu(deadline_match); | 617 | m = x86_match_cpu(deadline_match); |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 24f749324c0f..9990a71e311f 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -831,7 +831,6 @@ static int __cache_amd_cpumap_setup(unsigned int cpu, int index, | |||
831 | } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { | 831 | } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { |
832 | unsigned int apicid, nshared, first, last; | 832 | unsigned int apicid, nshared, first, last; |
833 | 833 | ||
834 | this_leaf = this_cpu_ci->info_list + index; | ||
835 | nshared = base->eax.split.num_threads_sharing + 1; | 834 | nshared = base->eax.split.num_threads_sharing + 1; |
836 | apicid = cpu_data(cpu).apicid; | 835 | apicid = cpu_data(cpu).apicid; |
837 | first = apicid - (apicid % nshared); | 836 | first = apicid - (apicid % nshared); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index 098530a93bb7..debb974fd17d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h | |||
@@ -1,3 +1,6 @@ | |||
1 | #ifndef __X86_MCE_INTERNAL_H__ | ||
2 | #define __X86_MCE_INTERNAL_H__ | ||
3 | |||
1 | #include <linux/device.h> | 4 | #include <linux/device.h> |
2 | #include <asm/mce.h> | 5 | #include <asm/mce.h> |
3 | 6 | ||
@@ -108,3 +111,7 @@ static inline void mce_work_trigger(void) { } | |||
108 | static inline void mce_register_injector_chain(struct notifier_block *nb) { } | 111 | static inline void mce_register_injector_chain(struct notifier_block *nb) { } |
109 | static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } | 112 | static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } |
110 | #endif | 113 | #endif |
114 | |||
115 | extern struct mca_config mca_cfg; | ||
116 | |||
117 | #endif /* __X86_MCE_INTERNAL_H__ */ | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 40e28ed77fbf..486f640b02ef 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <asm/msr.h> | 28 | #include <asm/msr.h> |
29 | #include <asm/trace/irq_vectors.h> | 29 | #include <asm/trace/irq_vectors.h> |
30 | 30 | ||
31 | #include "mce-internal.h" | ||
32 | |||
31 | #define NR_BLOCKS 5 | 33 | #define NR_BLOCKS 5 |
32 | #define THRESHOLD_MAX 0xFFF | 34 | #define THRESHOLD_MAX 0xFFF |
33 | #define INT_TYPE_APIC 0x00020000 | 35 | #define INT_TYPE_APIC 0x00020000 |
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c index 86e8f0b2537b..c4fa4a85d4cb 100644 --- a/arch/x86/kernel/cpu/microcode/core.c +++ b/arch/x86/kernel/cpu/microcode/core.c | |||
@@ -122,9 +122,6 @@ static bool __init check_loader_disabled_bsp(void) | |||
122 | bool *res = &dis_ucode_ldr; | 122 | bool *res = &dis_ucode_ldr; |
123 | #endif | 123 | #endif |
124 | 124 | ||
125 | if (!have_cpuid_p()) | ||
126 | return *res; | ||
127 | |||
128 | /* | 125 | /* |
129 | * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not | 126 | * CPUID(1).ECX[31]: reserved for hypervisor use. This is still not |
130 | * completely accurate as xen pv guests don't see that CPUID bit set but | 127 | * completely accurate as xen pv guests don't see that CPUID bit set but |
@@ -166,24 +163,36 @@ bool get_builtin_firmware(struct cpio_data *cd, const char *name) | |||
166 | void __init load_ucode_bsp(void) | 163 | void __init load_ucode_bsp(void) |
167 | { | 164 | { |
168 | unsigned int cpuid_1_eax; | 165 | unsigned int cpuid_1_eax; |
166 | bool intel = true; | ||
169 | 167 | ||
170 | if (check_loader_disabled_bsp()) | 168 | if (!have_cpuid_p()) |
171 | return; | 169 | return; |
172 | 170 | ||
173 | cpuid_1_eax = native_cpuid_eax(1); | 171 | cpuid_1_eax = native_cpuid_eax(1); |
174 | 172 | ||
175 | switch (x86_cpuid_vendor()) { | 173 | switch (x86_cpuid_vendor()) { |
176 | case X86_VENDOR_INTEL: | 174 | case X86_VENDOR_INTEL: |
177 | if (x86_family(cpuid_1_eax) >= 6) | 175 | if (x86_family(cpuid_1_eax) < 6) |
178 | load_ucode_intel_bsp(); | 176 | return; |
179 | break; | 177 | break; |
178 | |||
180 | case X86_VENDOR_AMD: | 179 | case X86_VENDOR_AMD: |
181 | if (x86_family(cpuid_1_eax) >= 0x10) | 180 | if (x86_family(cpuid_1_eax) < 0x10) |
182 | load_ucode_amd_bsp(cpuid_1_eax); | 181 | return; |
182 | intel = false; | ||
183 | break; | 183 | break; |
184 | |||
184 | default: | 185 | default: |
185 | break; | 186 | return; |
186 | } | 187 | } |
188 | |||
189 | if (check_loader_disabled_bsp()) | ||
190 | return; | ||
191 | |||
192 | if (intel) | ||
193 | load_ucode_intel_bsp(); | ||
194 | else | ||
195 | load_ucode_amd_bsp(cpuid_1_eax); | ||
187 | } | 196 | } |
188 | 197 | ||
189 | static bool check_loader_disabled_ap(void) | 198 | static bool check_loader_disabled_ap(void) |
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index 8f7a9bbad514..7dbcb7adf797 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/mm.h> | 34 | #include <linux/mm.h> |
35 | 35 | ||
36 | #include <asm/microcode_intel.h> | 36 | #include <asm/microcode_intel.h> |
37 | #include <asm/intel-family.h> | ||
37 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
38 | #include <asm/tlbflush.h> | 39 | #include <asm/tlbflush.h> |
39 | #include <asm/setup.h> | 40 | #include <asm/setup.h> |
@@ -918,6 +919,18 @@ static int get_ucode_fw(void *to, const void *from, size_t n) | |||
918 | return 0; | 919 | return 0; |
919 | } | 920 | } |
920 | 921 | ||
922 | static bool is_blacklisted(unsigned int cpu) | ||
923 | { | ||
924 | struct cpuinfo_x86 *c = &cpu_data(cpu); | ||
925 | |||
926 | if (c->x86 == 6 && c->x86_model == INTEL_FAM6_BROADWELL_X) { | ||
927 | pr_err_once("late loading on model 79 is disabled.\n"); | ||
928 | return true; | ||
929 | } | ||
930 | |||
931 | return false; | ||
932 | } | ||
933 | |||
921 | static enum ucode_state request_microcode_fw(int cpu, struct device *device, | 934 | static enum ucode_state request_microcode_fw(int cpu, struct device *device, |
922 | bool refresh_fw) | 935 | bool refresh_fw) |
923 | { | 936 | { |
@@ -926,6 +939,9 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device, | |||
926 | const struct firmware *firmware; | 939 | const struct firmware *firmware; |
927 | enum ucode_state ret; | 940 | enum ucode_state ret; |
928 | 941 | ||
942 | if (is_blacklisted(cpu)) | ||
943 | return UCODE_NFOUND; | ||
944 | |||
929 | sprintf(name, "intel-ucode/%02x-%02x-%02x", | 945 | sprintf(name, "intel-ucode/%02x-%02x-%02x", |
930 | c->x86, c->x86_model, c->x86_mask); | 946 | c->x86, c->x86_model, c->x86_mask); |
931 | 947 | ||
@@ -950,6 +966,9 @@ static int get_ucode_user(void *to, const void *from, size_t n) | |||
950 | static enum ucode_state | 966 | static enum ucode_state |
951 | request_microcode_user(int cpu, const void __user *buf, size_t size) | 967 | request_microcode_user(int cpu, const void __user *buf, size_t size) |
952 | { | 968 | { |
969 | if (is_blacklisted(cpu)) | ||
970 | return UCODE_NFOUND; | ||
971 | |||
953 | return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); | 972 | return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); |
954 | } | 973 | } |
955 | 974 | ||
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index cf2ce063f65a..2902ca4d5993 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
@@ -30,10 +30,11 @@ static void __init i386_default_early_setup(void) | |||
30 | 30 | ||
31 | asmlinkage __visible void __init i386_start_kernel(void) | 31 | asmlinkage __visible void __init i386_start_kernel(void) |
32 | { | 32 | { |
33 | cr4_init_shadow(); | 33 | /* Make sure IDT is set up before any exception happens */ |
34 | |||
35 | idt_setup_early_handler(); | 34 | idt_setup_early_handler(); |
36 | 35 | ||
36 | cr4_init_shadow(); | ||
37 | |||
37 | sanitize_boot_params(&boot_params); | 38 | sanitize_boot_params(&boot_params); |
38 | 39 | ||
39 | x86_early_init_platform_quirks(); | 40 | x86_early_init_platform_quirks(); |
diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index db2182d63ed0..3fc0f9a794cb 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h | |||
@@ -3,6 +3,15 @@ | |||
3 | 3 | ||
4 | /* Kprobes and Optprobes common header */ | 4 | /* Kprobes and Optprobes common header */ |
5 | 5 | ||
6 | #include <asm/asm.h> | ||
7 | |||
8 | #ifdef CONFIG_FRAME_POINTER | ||
9 | # define SAVE_RBP_STRING " push %" _ASM_BP "\n" \ | ||
10 | " mov %" _ASM_SP ", %" _ASM_BP "\n" | ||
11 | #else | ||
12 | # define SAVE_RBP_STRING " push %" _ASM_BP "\n" | ||
13 | #endif | ||
14 | |||
6 | #ifdef CONFIG_X86_64 | 15 | #ifdef CONFIG_X86_64 |
7 | #define SAVE_REGS_STRING \ | 16 | #define SAVE_REGS_STRING \ |
8 | /* Skip cs, ip, orig_ax. */ \ | 17 | /* Skip cs, ip, orig_ax. */ \ |
@@ -17,7 +26,7 @@ | |||
17 | " pushq %r10\n" \ | 26 | " pushq %r10\n" \ |
18 | " pushq %r11\n" \ | 27 | " pushq %r11\n" \ |
19 | " pushq %rbx\n" \ | 28 | " pushq %rbx\n" \ |
20 | " pushq %rbp\n" \ | 29 | SAVE_RBP_STRING \ |
21 | " pushq %r12\n" \ | 30 | " pushq %r12\n" \ |
22 | " pushq %r13\n" \ | 31 | " pushq %r13\n" \ |
23 | " pushq %r14\n" \ | 32 | " pushq %r14\n" \ |
@@ -48,7 +57,7 @@ | |||
48 | " pushl %es\n" \ | 57 | " pushl %es\n" \ |
49 | " pushl %ds\n" \ | 58 | " pushl %ds\n" \ |
50 | " pushl %eax\n" \ | 59 | " pushl %eax\n" \ |
51 | " pushl %ebp\n" \ | 60 | SAVE_RBP_STRING \ |
52 | " pushl %edi\n" \ | 61 | " pushl %edi\n" \ |
53 | " pushl %esi\n" \ | 62 | " pushl %esi\n" \ |
54 | " pushl %edx\n" \ | 63 | " pushl %edx\n" \ |
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index f0153714ddac..0742491cbb73 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -1080,8 +1080,6 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) | |||
1080 | * raw stack chunk with redzones: | 1080 | * raw stack chunk with redzones: |
1081 | */ | 1081 | */ |
1082 | __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr)); | 1082 | __memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr, MIN_STACK_SIZE(addr)); |
1083 | regs->flags &= ~X86_EFLAGS_IF; | ||
1084 | trace_hardirqs_off(); | ||
1085 | regs->ip = (unsigned long)(jp->entry); | 1083 | regs->ip = (unsigned long)(jp->entry); |
1086 | 1084 | ||
1087 | /* | 1085 | /* |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 54180fa6f66f..add33f600531 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -105,6 +105,10 @@ void __noreturn machine_real_restart(unsigned int type) | |||
105 | load_cr3(initial_page_table); | 105 | load_cr3(initial_page_table); |
106 | #else | 106 | #else |
107 | write_cr3(real_mode_header->trampoline_pgd); | 107 | write_cr3(real_mode_header->trampoline_pgd); |
108 | |||
109 | /* Exiting long mode will fail if CR4.PCIDE is set. */ | ||
110 | if (static_cpu_has(X86_FEATURE_PCID)) | ||
111 | cr4_clear_bits(X86_CR4_PCIDE); | ||
108 | #endif | 112 | #endif |
109 | 113 | ||
110 | /* Jump to the identity-mapped low memory code */ | 114 | /* Jump to the identity-mapped low memory code */ |
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index d145a0b1f529..3dc26f95d46e 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c | |||
@@ -44,7 +44,8 @@ static void unwind_dump(struct unwind_state *state) | |||
44 | state->stack_info.type, state->stack_info.next_sp, | 44 | state->stack_info.type, state->stack_info.next_sp, |
45 | state->stack_mask, state->graph_idx); | 45 | state->stack_mask, state->graph_idx); |
46 | 46 | ||
47 | for (sp = state->orig_sp; sp; sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { | 47 | for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp; |
48 | sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) { | ||
48 | if (get_stack_info(sp, state->task, &stack_info, &visit_mask)) | 49 | if (get_stack_info(sp, state->task, &stack_info, &visit_mask)) |
49 | break; | 50 | break; |
50 | 51 | ||
@@ -174,6 +175,7 @@ static bool is_last_task_frame(struct unwind_state *state) | |||
174 | * This determines if the frame pointer actually contains an encoded pointer to | 175 | * This determines if the frame pointer actually contains an encoded pointer to |
175 | * pt_regs on the stack. See ENCODE_FRAME_POINTER. | 176 | * pt_regs on the stack. See ENCODE_FRAME_POINTER. |
176 | */ | 177 | */ |
178 | #ifdef CONFIG_X86_64 | ||
177 | static struct pt_regs *decode_frame_pointer(unsigned long *bp) | 179 | static struct pt_regs *decode_frame_pointer(unsigned long *bp) |
178 | { | 180 | { |
179 | unsigned long regs = (unsigned long)bp; | 181 | unsigned long regs = (unsigned long)bp; |
@@ -183,6 +185,23 @@ static struct pt_regs *decode_frame_pointer(unsigned long *bp) | |||
183 | 185 | ||
184 | return (struct pt_regs *)(regs & ~0x1); | 186 | return (struct pt_regs *)(regs & ~0x1); |
185 | } | 187 | } |
188 | #else | ||
189 | static struct pt_regs *decode_frame_pointer(unsigned long *bp) | ||
190 | { | ||
191 | unsigned long regs = (unsigned long)bp; | ||
192 | |||
193 | if (regs & 0x80000000) | ||
194 | return NULL; | ||
195 | |||
196 | return (struct pt_regs *)(regs | 0x80000000); | ||
197 | } | ||
198 | #endif | ||
199 | |||
200 | #ifdef CONFIG_X86_32 | ||
201 | #define KERNEL_REGS_SIZE (sizeof(struct pt_regs) - 2*sizeof(long)) | ||
202 | #else | ||
203 | #define KERNEL_REGS_SIZE (sizeof(struct pt_regs)) | ||
204 | #endif | ||
186 | 205 | ||
187 | static bool update_stack_state(struct unwind_state *state, | 206 | static bool update_stack_state(struct unwind_state *state, |
188 | unsigned long *next_bp) | 207 | unsigned long *next_bp) |
@@ -202,7 +221,7 @@ static bool update_stack_state(struct unwind_state *state, | |||
202 | regs = decode_frame_pointer(next_bp); | 221 | regs = decode_frame_pointer(next_bp); |
203 | if (regs) { | 222 | if (regs) { |
204 | frame = (unsigned long *)regs; | 223 | frame = (unsigned long *)regs; |
205 | len = regs_size(regs); | 224 | len = KERNEL_REGS_SIZE; |
206 | state->got_irq = true; | 225 | state->got_irq = true; |
207 | } else { | 226 | } else { |
208 | frame = next_bp; | 227 | frame = next_bp; |
@@ -226,6 +245,14 @@ static bool update_stack_state(struct unwind_state *state, | |||
226 | frame < prev_frame_end) | 245 | frame < prev_frame_end) |
227 | return false; | 246 | return false; |
228 | 247 | ||
248 | /* | ||
249 | * On 32-bit with user mode regs, make sure the last two regs are safe | ||
250 | * to access: | ||
251 | */ | ||
252 | if (IS_ENABLED(CONFIG_X86_32) && regs && user_mode(regs) && | ||
253 | !on_stack(info, frame, len + 2*sizeof(long))) | ||
254 | return false; | ||
255 | |||
229 | /* Move state to the next frame: */ | 256 | /* Move state to the next frame: */ |
230 | if (regs) { | 257 | if (regs) { |
231 | state->regs = regs; | 258 | state->regs = regs; |
@@ -328,6 +355,13 @@ bad_address: | |||
328 | state->regs->sp < (unsigned long)task_pt_regs(state->task)) | 355 | state->regs->sp < (unsigned long)task_pt_regs(state->task)) |
329 | goto the_end; | 356 | goto the_end; |
330 | 357 | ||
358 | /* | ||
359 | * There are some known frame pointer issues on 32-bit. Disable | ||
360 | * unwinder warnings on 32-bit until it gets objtool support. | ||
361 | */ | ||
362 | if (IS_ENABLED(CONFIG_X86_32)) | ||
363 | goto the_end; | ||
364 | |||
331 | if (state->regs) { | 365 | if (state->regs) { |
332 | printk_deferred_once(KERN_WARNING | 366 | printk_deferred_once(KERN_WARNING |
333 | "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", | 367 | "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n", |
diff --git a/arch/x86/kernel/unwind_orc.c b/arch/x86/kernel/unwind_orc.c index 570b70d3f604..b95007e7c1b3 100644 --- a/arch/x86/kernel/unwind_orc.c +++ b/arch/x86/kernel/unwind_orc.c | |||
@@ -86,8 +86,8 @@ static struct orc_entry *orc_find(unsigned long ip) | |||
86 | idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; | 86 | idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE; |
87 | 87 | ||
88 | if (unlikely((idx >= lookup_num_blocks-1))) { | 88 | if (unlikely((idx >= lookup_num_blocks-1))) { |
89 | orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%lx\n", | 89 | orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n", |
90 | idx, lookup_num_blocks, ip); | 90 | idx, lookup_num_blocks, (void *)ip); |
91 | return NULL; | 91 | return NULL; |
92 | } | 92 | } |
93 | 93 | ||
@@ -96,8 +96,8 @@ static struct orc_entry *orc_find(unsigned long ip) | |||
96 | 96 | ||
97 | if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || | 97 | if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) || |
98 | (__start_orc_unwind + stop > __stop_orc_unwind))) { | 98 | (__start_orc_unwind + stop > __stop_orc_unwind))) { |
99 | orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%lx\n", | 99 | orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n", |
100 | idx, lookup_num_blocks, start, stop, ip); | 100 | idx, lookup_num_blocks, start, stop, (void *)ip); |
101 | return NULL; | 101 | return NULL; |
102 | } | 102 | } |
103 | 103 | ||
@@ -373,7 +373,7 @@ bool unwind_next_frame(struct unwind_state *state) | |||
373 | 373 | ||
374 | case ORC_REG_R10: | 374 | case ORC_REG_R10: |
375 | if (!state->regs || !state->full_regs) { | 375 | if (!state->regs || !state->full_regs) { |
376 | orc_warn("missing regs for base reg R10 at ip %p\n", | 376 | orc_warn("missing regs for base reg R10 at ip %pB\n", |
377 | (void *)state->ip); | 377 | (void *)state->ip); |
378 | goto done; | 378 | goto done; |
379 | } | 379 | } |
@@ -382,7 +382,7 @@ bool unwind_next_frame(struct unwind_state *state) | |||
382 | 382 | ||
383 | case ORC_REG_R13: | 383 | case ORC_REG_R13: |
384 | if (!state->regs || !state->full_regs) { | 384 | if (!state->regs || !state->full_regs) { |
385 | orc_warn("missing regs for base reg R13 at ip %p\n", | 385 | orc_warn("missing regs for base reg R13 at ip %pB\n", |
386 | (void *)state->ip); | 386 | (void *)state->ip); |
387 | goto done; | 387 | goto done; |
388 | } | 388 | } |
@@ -391,7 +391,7 @@ bool unwind_next_frame(struct unwind_state *state) | |||
391 | 391 | ||
392 | case ORC_REG_DI: | 392 | case ORC_REG_DI: |
393 | if (!state->regs || !state->full_regs) { | 393 | if (!state->regs || !state->full_regs) { |
394 | orc_warn("missing regs for base reg DI at ip %p\n", | 394 | orc_warn("missing regs for base reg DI at ip %pB\n", |
395 | (void *)state->ip); | 395 | (void *)state->ip); |
396 | goto done; | 396 | goto done; |
397 | } | 397 | } |
@@ -400,7 +400,7 @@ bool unwind_next_frame(struct unwind_state *state) | |||
400 | 400 | ||
401 | case ORC_REG_DX: | 401 | case ORC_REG_DX: |
402 | if (!state->regs || !state->full_regs) { | 402 | if (!state->regs || !state->full_regs) { |
403 | orc_warn("missing regs for base reg DX at ip %p\n", | 403 | orc_warn("missing regs for base reg DX at ip %pB\n", |
404 | (void *)state->ip); | 404 | (void *)state->ip); |
405 | goto done; | 405 | goto done; |
406 | } | 406 | } |
@@ -408,7 +408,7 @@ bool unwind_next_frame(struct unwind_state *state) | |||
408 | break; | 408 | break; |
409 | 409 | ||
410 | default: | 410 | default: |
411 | orc_warn("unknown SP base reg %d for ip %p\n", | 411 | orc_warn("unknown SP base reg %d for ip %pB\n", |
412 | orc->sp_reg, (void *)state->ip); | 412 | orc->sp_reg, (void *)state->ip); |
413 | goto done; | 413 | goto done; |
414 | } | 414 | } |
@@ -436,7 +436,7 @@ bool unwind_next_frame(struct unwind_state *state) | |||
436 | 436 | ||
437 | case ORC_TYPE_REGS: | 437 | case ORC_TYPE_REGS: |
438 | if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) { | 438 | if (!deref_stack_regs(state, sp, &state->ip, &state->sp, true)) { |
439 | orc_warn("can't dereference registers at %p for ip %p\n", | 439 | orc_warn("can't dereference registers at %p for ip %pB\n", |
440 | (void *)sp, (void *)orig_ip); | 440 | (void *)sp, (void *)orig_ip); |
441 | goto done; | 441 | goto done; |
442 | } | 442 | } |
@@ -448,7 +448,7 @@ bool unwind_next_frame(struct unwind_state *state) | |||
448 | 448 | ||
449 | case ORC_TYPE_REGS_IRET: | 449 | case ORC_TYPE_REGS_IRET: |
450 | if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) { | 450 | if (!deref_stack_regs(state, sp, &state->ip, &state->sp, false)) { |
451 | orc_warn("can't dereference iret registers at %p for ip %p\n", | 451 | orc_warn("can't dereference iret registers at %p for ip %pB\n", |
452 | (void *)sp, (void *)orig_ip); | 452 | (void *)sp, (void *)orig_ip); |
453 | goto done; | 453 | goto done; |
454 | } | 454 | } |
@@ -465,7 +465,8 @@ bool unwind_next_frame(struct unwind_state *state) | |||
465 | break; | 465 | break; |
466 | 466 | ||
467 | default: | 467 | default: |
468 | orc_warn("unknown .orc_unwind entry type %d\n", orc->type); | 468 | orc_warn("unknown .orc_unwind entry type %d for ip %pB\n", |
469 | orc->type, (void *)orig_ip); | ||
469 | break; | 470 | break; |
470 | } | 471 | } |
471 | 472 | ||
@@ -487,7 +488,7 @@ bool unwind_next_frame(struct unwind_state *state) | |||
487 | break; | 488 | break; |
488 | 489 | ||
489 | default: | 490 | default: |
490 | orc_warn("unknown BP base reg %d for ip %p\n", | 491 | orc_warn("unknown BP base reg %d for ip %pB\n", |
491 | orc->bp_reg, (void *)orig_ip); | 492 | orc->bp_reg, (void *)orig_ip); |
492 | goto done; | 493 | goto done; |
493 | } | 494 | } |
@@ -496,7 +497,7 @@ bool unwind_next_frame(struct unwind_state *state) | |||
496 | if (state->stack_info.type == prev_type && | 497 | if (state->stack_info.type == prev_type && |
497 | on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) && | 498 | on_stack(&state->stack_info, (void *)state->sp, sizeof(long)) && |
498 | state->sp <= prev_sp) { | 499 | state->sp <= prev_sp) { |
499 | orc_warn("stack going in the wrong direction? ip=%p\n", | 500 | orc_warn("stack going in the wrong direction? ip=%pB\n", |
500 | (void *)orig_ip); | 501 | (void *)orig_ip); |
501 | goto done; | 502 | goto done; |
502 | } | 503 | } |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 106d4a029a8a..7a69cf053711 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -3974,19 +3974,19 @@ static inline bool is_last_gpte(struct kvm_mmu *mmu, | |||
3974 | unsigned level, unsigned gpte) | 3974 | unsigned level, unsigned gpte) |
3975 | { | 3975 | { |
3976 | /* | 3976 | /* |
3977 | * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set | ||
3978 | * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means | ||
3979 | * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then. | ||
3980 | */ | ||
3981 | gpte |= level - PT_PAGE_TABLE_LEVEL - 1; | ||
3982 | |||
3983 | /* | ||
3984 | * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. | 3977 | * The RHS has bit 7 set iff level < mmu->last_nonleaf_level. |
3985 | * If it is clear, there are no large pages at this level, so clear | 3978 | * If it is clear, there are no large pages at this level, so clear |
3986 | * PT_PAGE_SIZE_MASK in gpte if that is the case. | 3979 | * PT_PAGE_SIZE_MASK in gpte if that is the case. |
3987 | */ | 3980 | */ |
3988 | gpte &= level - mmu->last_nonleaf_level; | 3981 | gpte &= level - mmu->last_nonleaf_level; |
3989 | 3982 | ||
3983 | /* | ||
3984 | * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set | ||
3985 | * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means | ||
3986 | * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then. | ||
3987 | */ | ||
3988 | gpte |= level - PT_PAGE_TABLE_LEVEL - 1; | ||
3989 | |||
3990 | return gpte & PT_PAGE_SIZE_MASK; | 3990 | return gpte & PT_PAGE_SIZE_MASK; |
3991 | } | 3991 | } |
3992 | 3992 | ||
@@ -4555,6 +4555,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, | |||
4555 | 4555 | ||
4556 | update_permission_bitmask(vcpu, context, true); | 4556 | update_permission_bitmask(vcpu, context, true); |
4557 | update_pkru_bitmask(vcpu, context, true); | 4557 | update_pkru_bitmask(vcpu, context, true); |
4558 | update_last_nonleaf_level(vcpu, context); | ||
4558 | reset_rsvds_bits_mask_ept(vcpu, context, execonly); | 4559 | reset_rsvds_bits_mask_ept(vcpu, context, execonly); |
4559 | reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); | 4560 | reset_ept_shadow_zero_bits_mask(vcpu, context, execonly); |
4560 | } | 4561 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 86b68dc5a649..f18d1f8d332b 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -334,10 +334,11 @@ retry_walk: | |||
334 | --walker->level; | 334 | --walker->level; |
335 | 335 | ||
336 | index = PT_INDEX(addr, walker->level); | 336 | index = PT_INDEX(addr, walker->level); |
337 | |||
338 | table_gfn = gpte_to_gfn(pte); | 337 | table_gfn = gpte_to_gfn(pte); |
339 | offset = index * sizeof(pt_element_t); | 338 | offset = index * sizeof(pt_element_t); |
340 | pte_gpa = gfn_to_gpa(table_gfn) + offset; | 339 | pte_gpa = gfn_to_gpa(table_gfn) + offset; |
340 | |||
341 | BUG_ON(walker->level < 1); | ||
341 | walker->table_gfn[walker->level - 1] = table_gfn; | 342 | walker->table_gfn[walker->level - 1] = table_gfn; |
342 | walker->pte_gpa[walker->level - 1] = pte_gpa; | 343 | walker->pte_gpa[walker->level - 1] = pte_gpa; |
343 | 344 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a2b804e10c95..95a01609d7ee 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -11297,7 +11297,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, | |||
11297 | 11297 | ||
11298 | /* Same as above - no reason to call set_cr4_guest_host_mask(). */ | 11298 | /* Same as above - no reason to call set_cr4_guest_host_mask(). */ |
11299 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); | 11299 | vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK); |
11300 | kvm_set_cr4(vcpu, vmcs12->host_cr4); | 11300 | vmx_set_cr4(vcpu, vmcs12->host_cr4); |
11301 | 11301 | ||
11302 | nested_ept_uninit_mmu_context(vcpu); | 11302 | nested_ept_uninit_mmu_context(vcpu); |
11303 | 11303 | ||
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 72bf8c01c6e3..e1f095884386 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -1,5 +1,12 @@ | |||
1 | # Kernel does not boot with instrumentation of tlb.c. | 1 | # Kernel does not boot with instrumentation of tlb.c and mem_encrypt.c |
2 | KCOV_INSTRUMENT_tlb.o := n | 2 | KCOV_INSTRUMENT_tlb.o := n |
3 | KCOV_INSTRUMENT_mem_encrypt.o := n | ||
4 | |||
5 | KASAN_SANITIZE_mem_encrypt.o := n | ||
6 | |||
7 | ifdef CONFIG_FUNCTION_TRACER | ||
8 | CFLAGS_REMOVE_mem_encrypt.o = -pg | ||
9 | endif | ||
3 | 10 | ||
4 | obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ | 11 | obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ |
5 | pat.o pgtable.o physaddr.o setup_nx.o tlb.o | 12 | pat.o pgtable.o physaddr.o setup_nx.o tlb.o |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 49d9778376d7..0f3d0cea4d00 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -30,6 +30,7 @@ | |||
30 | 30 | ||
31 | atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); | 31 | atomic64_t last_mm_ctx_id = ATOMIC64_INIT(1); |
32 | 32 | ||
33 | |||
33 | static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, | 34 | static void choose_new_asid(struct mm_struct *next, u64 next_tlb_gen, |
34 | u16 *new_asid, bool *need_flush) | 35 | u16 *new_asid, bool *need_flush) |
35 | { | 36 | { |
@@ -80,7 +81,7 @@ void leave_mm(int cpu) | |||
80 | return; | 81 | return; |
81 | 82 | ||
82 | /* Warn if we're not lazy. */ | 83 | /* Warn if we're not lazy. */ |
83 | WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))); | 84 | WARN_ON(!this_cpu_read(cpu_tlbstate.is_lazy)); |
84 | 85 | ||
85 | switch_mm(NULL, &init_mm, NULL); | 86 | switch_mm(NULL, &init_mm, NULL); |
86 | } | 87 | } |
@@ -142,45 +143,24 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
142 | __flush_tlb_all(); | 143 | __flush_tlb_all(); |
143 | } | 144 | } |
144 | #endif | 145 | #endif |
146 | this_cpu_write(cpu_tlbstate.is_lazy, false); | ||
145 | 147 | ||
146 | if (real_prev == next) { | 148 | if (real_prev == next) { |
147 | VM_BUG_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != | 149 | VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[prev_asid].ctx_id) != |
148 | next->context.ctx_id); | 150 | next->context.ctx_id); |
149 | |||
150 | if (cpumask_test_cpu(cpu, mm_cpumask(next))) { | ||
151 | /* | ||
152 | * There's nothing to do: we weren't lazy, and we | ||
153 | * aren't changing our mm. We don't need to flush | ||
154 | * anything, nor do we need to update CR3, CR4, or | ||
155 | * LDTR. | ||
156 | */ | ||
157 | return; | ||
158 | } | ||
159 | |||
160 | /* Resume remote flushes and then read tlb_gen. */ | ||
161 | cpumask_set_cpu(cpu, mm_cpumask(next)); | ||
162 | next_tlb_gen = atomic64_read(&next->context.tlb_gen); | ||
163 | |||
164 | if (this_cpu_read(cpu_tlbstate.ctxs[prev_asid].tlb_gen) < | ||
165 | next_tlb_gen) { | ||
166 | /* | ||
167 | * Ideally, we'd have a flush_tlb() variant that | ||
168 | * takes the known CR3 value as input. This would | ||
169 | * be faster on Xen PV and on hypothetical CPUs | ||
170 | * on which INVPCID is fast. | ||
171 | */ | ||
172 | this_cpu_write(cpu_tlbstate.ctxs[prev_asid].tlb_gen, | ||
173 | next_tlb_gen); | ||
174 | write_cr3(build_cr3(next, prev_asid)); | ||
175 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, | ||
176 | TLB_FLUSH_ALL); | ||
177 | } | ||
178 | 151 | ||
179 | /* | 152 | /* |
180 | * We just exited lazy mode, which means that CR4 and/or LDTR | 153 | * We don't currently support having a real mm loaded without |
181 | * may be stale. (Changes to the required CR4 and LDTR states | 154 | * our cpu set in mm_cpumask(). We have all the bookkeeping |
182 | * are not reflected in tlb_gen.) | 155 | * in place to figure out whether we would need to flush |
156 | * if our cpu were cleared in mm_cpumask(), but we don't | ||
157 | * currently use it. | ||
183 | */ | 158 | */ |
159 | if (WARN_ON_ONCE(real_prev != &init_mm && | ||
160 | !cpumask_test_cpu(cpu, mm_cpumask(next)))) | ||
161 | cpumask_set_cpu(cpu, mm_cpumask(next)); | ||
162 | |||
163 | return; | ||
184 | } else { | 164 | } else { |
185 | u16 new_asid; | 165 | u16 new_asid; |
186 | bool need_flush; | 166 | bool need_flush; |
@@ -199,10 +179,9 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
199 | } | 179 | } |
200 | 180 | ||
201 | /* Stop remote flushes for the previous mm */ | 181 | /* Stop remote flushes for the previous mm */ |
202 | if (cpumask_test_cpu(cpu, mm_cpumask(real_prev))) | 182 | VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(real_prev)) && |
203 | cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); | 183 | real_prev != &init_mm); |
204 | 184 | cpumask_clear_cpu(cpu, mm_cpumask(real_prev)); | |
205 | VM_WARN_ON_ONCE(cpumask_test_cpu(cpu, mm_cpumask(next))); | ||
206 | 185 | ||
207 | /* | 186 | /* |
208 | * Start remote flushes and then read tlb_gen. | 187 | * Start remote flushes and then read tlb_gen. |
@@ -233,6 +212,40 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
233 | } | 212 | } |
234 | 213 | ||
235 | /* | 214 | /* |
215 | * Please ignore the name of this function. It should be called | ||
216 | * switch_to_kernel_thread(). | ||
217 | * | ||
218 | * enter_lazy_tlb() is a hint from the scheduler that we are entering a | ||
219 | * kernel thread or other context without an mm. Acceptable implementations | ||
220 | * include doing nothing whatsoever, switching to init_mm, or various clever | ||
221 | * lazy tricks to try to minimize TLB flushes. | ||
222 | * | ||
223 | * The scheduler reserves the right to call enter_lazy_tlb() several times | ||
224 | * in a row. It will notify us that we're going back to a real mm by | ||
225 | * calling switch_mm_irqs_off(). | ||
226 | */ | ||
227 | void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) | ||
228 | { | ||
229 | if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm) | ||
230 | return; | ||
231 | |||
232 | if (tlb_defer_switch_to_init_mm()) { | ||
233 | /* | ||
234 | * There's a significant optimization that may be possible | ||
235 | * here. We have accurate enough TLB flush tracking that we | ||
236 | * don't need to maintain coherence of TLB per se when we're | ||
237 | * lazy. We do, however, need to maintain coherence of | ||
238 | * paging-structure caches. We could, in principle, leave our | ||
239 | * old mm loaded and only switch to init_mm when | ||
240 | * tlb_remove_page() happens. | ||
241 | */ | ||
242 | this_cpu_write(cpu_tlbstate.is_lazy, true); | ||
243 | } else { | ||
244 | switch_mm(NULL, &init_mm, NULL); | ||
245 | } | ||
246 | } | ||
247 | |||
248 | /* | ||
236 | * Call this when reinitializing a CPU. It fixes the following potential | 249 | * Call this when reinitializing a CPU. It fixes the following potential |
237 | * problems: | 250 | * problems: |
238 | * | 251 | * |
@@ -303,16 +316,20 @@ static void flush_tlb_func_common(const struct flush_tlb_info *f, | |||
303 | /* This code cannot presently handle being reentered. */ | 316 | /* This code cannot presently handle being reentered. */ |
304 | VM_WARN_ON(!irqs_disabled()); | 317 | VM_WARN_ON(!irqs_disabled()); |
305 | 318 | ||
319 | if (unlikely(loaded_mm == &init_mm)) | ||
320 | return; | ||
321 | |||
306 | VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != | 322 | VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) != |
307 | loaded_mm->context.ctx_id); | 323 | loaded_mm->context.ctx_id); |
308 | 324 | ||
309 | if (!cpumask_test_cpu(smp_processor_id(), mm_cpumask(loaded_mm))) { | 325 | if (this_cpu_read(cpu_tlbstate.is_lazy)) { |
310 | /* | 326 | /* |
311 | * We're in lazy mode -- don't flush. We can get here on | 327 | * We're in lazy mode. We need to at least flush our |
312 | * remote flushes due to races and on local flushes if a | 328 | * paging-structure cache to avoid speculatively reading |
313 | * kernel thread coincidentally flushes the mm it's lazily | 329 | * garbage into our TLB. Since switching to init_mm is barely |
314 | * still using. | 330 | * slower than a minimal flush, just switch to init_mm. |
315 | */ | 331 | */ |
332 | switch_mm_irqs_off(NULL, &init_mm, NULL); | ||
316 | return; | 333 | return; |
317 | } | 334 | } |
318 | 335 | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 0e7ef69e8531..d669e9d89001 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -93,11 +93,11 @@ int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int), | |||
93 | int rc; | 93 | int rc; |
94 | 94 | ||
95 | rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, | 95 | rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, |
96 | "x86/xen/hvm_guest:prepare", | 96 | "x86/xen/guest:prepare", |
97 | cpu_up_prepare_cb, cpu_dead_cb); | 97 | cpu_up_prepare_cb, cpu_dead_cb); |
98 | if (rc >= 0) { | 98 | if (rc >= 0) { |
99 | rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, | 99 | rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, |
100 | "x86/xen/hvm_guest:online", | 100 | "x86/xen/guest:online", |
101 | xen_cpu_up_online, NULL); | 101 | xen_cpu_up_online, NULL); |
102 | if (rc < 0) | 102 | if (rc < 0) |
103 | cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); | 103 | cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); |
diff --git a/block/bio.c b/block/bio.c index b38e962fa83e..101c2a9b5481 100644 --- a/block/bio.c +++ b/block/bio.c | |||
@@ -1239,8 +1239,8 @@ struct bio *bio_copy_user_iov(struct request_queue *q, | |||
1239 | */ | 1239 | */ |
1240 | bmd->is_our_pages = map_data ? 0 : 1; | 1240 | bmd->is_our_pages = map_data ? 0 : 1; |
1241 | memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); | 1241 | memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs); |
1242 | iov_iter_init(&bmd->iter, iter->type, bmd->iov, | 1242 | bmd->iter = *iter; |
1243 | iter->nr_segs, iter->count); | 1243 | bmd->iter.iov = bmd->iov; |
1244 | 1244 | ||
1245 | ret = -ENOMEM; | 1245 | ret = -ENOMEM; |
1246 | bio = bio_kmalloc(gfp_mask, nr_pages); | 1246 | bio = bio_kmalloc(gfp_mask, nr_pages); |
@@ -1331,6 +1331,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
1331 | int ret, offset; | 1331 | int ret, offset; |
1332 | struct iov_iter i; | 1332 | struct iov_iter i; |
1333 | struct iovec iov; | 1333 | struct iovec iov; |
1334 | struct bio_vec *bvec; | ||
1334 | 1335 | ||
1335 | iov_for_each(iov, i, *iter) { | 1336 | iov_for_each(iov, i, *iter) { |
1336 | unsigned long uaddr = (unsigned long) iov.iov_base; | 1337 | unsigned long uaddr = (unsigned long) iov.iov_base; |
@@ -1375,7 +1376,12 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
1375 | ret = get_user_pages_fast(uaddr, local_nr_pages, | 1376 | ret = get_user_pages_fast(uaddr, local_nr_pages, |
1376 | (iter->type & WRITE) != WRITE, | 1377 | (iter->type & WRITE) != WRITE, |
1377 | &pages[cur_page]); | 1378 | &pages[cur_page]); |
1378 | if (ret < local_nr_pages) { | 1379 | if (unlikely(ret < local_nr_pages)) { |
1380 | for (j = cur_page; j < page_limit; j++) { | ||
1381 | if (!pages[j]) | ||
1382 | break; | ||
1383 | put_page(pages[j]); | ||
1384 | } | ||
1379 | ret = -EFAULT; | 1385 | ret = -EFAULT; |
1380 | goto out_unmap; | 1386 | goto out_unmap; |
1381 | } | 1387 | } |
@@ -1383,6 +1389,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
1383 | offset = offset_in_page(uaddr); | 1389 | offset = offset_in_page(uaddr); |
1384 | for (j = cur_page; j < page_limit; j++) { | 1390 | for (j = cur_page; j < page_limit; j++) { |
1385 | unsigned int bytes = PAGE_SIZE - offset; | 1391 | unsigned int bytes = PAGE_SIZE - offset; |
1392 | unsigned short prev_bi_vcnt = bio->bi_vcnt; | ||
1386 | 1393 | ||
1387 | if (len <= 0) | 1394 | if (len <= 0) |
1388 | break; | 1395 | break; |
@@ -1397,6 +1404,13 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
1397 | bytes) | 1404 | bytes) |
1398 | break; | 1405 | break; |
1399 | 1406 | ||
1407 | /* | ||
1408 | * check if vector was merged with previous | ||
1409 | * drop page reference if needed | ||
1410 | */ | ||
1411 | if (bio->bi_vcnt == prev_bi_vcnt) | ||
1412 | put_page(pages[j]); | ||
1413 | |||
1400 | len -= bytes; | 1414 | len -= bytes; |
1401 | offset = 0; | 1415 | offset = 0; |
1402 | } | 1416 | } |
@@ -1423,10 +1437,8 @@ struct bio *bio_map_user_iov(struct request_queue *q, | |||
1423 | return bio; | 1437 | return bio; |
1424 | 1438 | ||
1425 | out_unmap: | 1439 | out_unmap: |
1426 | for (j = 0; j < nr_pages; j++) { | 1440 | bio_for_each_segment_all(bvec, bio, j) { |
1427 | if (!pages[j]) | 1441 | put_page(bvec->bv_page); |
1428 | break; | ||
1429 | put_page(pages[j]); | ||
1430 | } | 1442 | } |
1431 | out: | 1443 | out: |
1432 | kfree(pages); | 1444 | kfree(pages); |
diff --git a/crypto/asymmetric_keys/asymmetric_type.c b/crypto/asymmetric_keys/asymmetric_type.c index e4b0ed386bc8..39aecad286fe 100644 --- a/crypto/asymmetric_keys/asymmetric_type.c +++ b/crypto/asymmetric_keys/asymmetric_type.c | |||
@@ -57,6 +57,8 @@ struct key *find_asymmetric_key(struct key *keyring, | |||
57 | char *req, *p; | 57 | char *req, *p; |
58 | int len; | 58 | int len; |
59 | 59 | ||
60 | BUG_ON(!id_0 && !id_1); | ||
61 | |||
60 | if (id_0) { | 62 | if (id_0) { |
61 | lookup = id_0->data; | 63 | lookup = id_0->data; |
62 | len = id_0->len; | 64 | len = id_0->len; |
@@ -105,7 +107,7 @@ struct key *find_asymmetric_key(struct key *keyring, | |||
105 | if (id_0 && id_1) { | 107 | if (id_0 && id_1) { |
106 | const struct asymmetric_key_ids *kids = asymmetric_key_ids(key); | 108 | const struct asymmetric_key_ids *kids = asymmetric_key_ids(key); |
107 | 109 | ||
108 | if (!kids->id[0]) { | 110 | if (!kids->id[1]) { |
109 | pr_debug("First ID matches, but second is missing\n"); | 111 | pr_debug("First ID matches, but second is missing\n"); |
110 | goto reject; | 112 | goto reject; |
111 | } | 113 | } |
diff --git a/crypto/asymmetric_keys/pkcs7_parser.c b/crypto/asymmetric_keys/pkcs7_parser.c index af4cd8649117..d140d8bb2c96 100644 --- a/crypto/asymmetric_keys/pkcs7_parser.c +++ b/crypto/asymmetric_keys/pkcs7_parser.c | |||
@@ -88,6 +88,9 @@ static int pkcs7_check_authattrs(struct pkcs7_message *msg) | |||
88 | bool want = false; | 88 | bool want = false; |
89 | 89 | ||
90 | sinfo = msg->signed_infos; | 90 | sinfo = msg->signed_infos; |
91 | if (!sinfo) | ||
92 | goto inconsistent; | ||
93 | |||
91 | if (sinfo->authattrs) { | 94 | if (sinfo->authattrs) { |
92 | want = true; | 95 | want = true; |
93 | msg->have_authattrs = true; | 96 | msg->have_authattrs = true; |
diff --git a/crypto/shash.c b/crypto/shash.c index 5e31c8d776df..325a14da5827 100644 --- a/crypto/shash.c +++ b/crypto/shash.c | |||
@@ -41,7 +41,7 @@ static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, | |||
41 | int err; | 41 | int err; |
42 | 42 | ||
43 | absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); | 43 | absize = keylen + (alignmask & ~(crypto_tfm_ctx_alignment() - 1)); |
44 | buffer = kmalloc(absize, GFP_KERNEL); | 44 | buffer = kmalloc(absize, GFP_ATOMIC); |
45 | if (!buffer) | 45 | if (!buffer) |
46 | return -ENOMEM; | 46 | return -ENOMEM; |
47 | 47 | ||
@@ -275,12 +275,14 @@ static int shash_async_finup(struct ahash_request *req) | |||
275 | 275 | ||
276 | int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) | 276 | int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) |
277 | { | 277 | { |
278 | struct scatterlist *sg = req->src; | ||
279 | unsigned int offset = sg->offset; | ||
280 | unsigned int nbytes = req->nbytes; | 278 | unsigned int nbytes = req->nbytes; |
279 | struct scatterlist *sg; | ||
280 | unsigned int offset; | ||
281 | int err; | 281 | int err; |
282 | 282 | ||
283 | if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { | 283 | if (nbytes && |
284 | (sg = req->src, offset = sg->offset, | ||
285 | nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { | ||
284 | void *data; | 286 | void *data; |
285 | 287 | ||
286 | data = kmap_atomic(sg_page(sg)); | 288 | data = kmap_atomic(sg_page(sg)); |
diff --git a/crypto/skcipher.c b/crypto/skcipher.c index 4faa0fd53b0c..d5692e35fab1 100644 --- a/crypto/skcipher.c +++ b/crypto/skcipher.c | |||
@@ -426,14 +426,9 @@ static int skcipher_copy_iv(struct skcipher_walk *walk) | |||
426 | 426 | ||
427 | static int skcipher_walk_first(struct skcipher_walk *walk) | 427 | static int skcipher_walk_first(struct skcipher_walk *walk) |
428 | { | 428 | { |
429 | walk->nbytes = 0; | ||
430 | |||
431 | if (WARN_ON_ONCE(in_irq())) | 429 | if (WARN_ON_ONCE(in_irq())) |
432 | return -EDEADLK; | 430 | return -EDEADLK; |
433 | 431 | ||
434 | if (unlikely(!walk->total)) | ||
435 | return 0; | ||
436 | |||
437 | walk->buffer = NULL; | 432 | walk->buffer = NULL; |
438 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { | 433 | if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { |
439 | int err = skcipher_copy_iv(walk); | 434 | int err = skcipher_copy_iv(walk); |
@@ -452,10 +447,15 @@ static int skcipher_walk_skcipher(struct skcipher_walk *walk, | |||
452 | { | 447 | { |
453 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); | 448 | struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); |
454 | 449 | ||
450 | walk->total = req->cryptlen; | ||
451 | walk->nbytes = 0; | ||
452 | |||
453 | if (unlikely(!walk->total)) | ||
454 | return 0; | ||
455 | |||
455 | scatterwalk_start(&walk->in, req->src); | 456 | scatterwalk_start(&walk->in, req->src); |
456 | scatterwalk_start(&walk->out, req->dst); | 457 | scatterwalk_start(&walk->out, req->dst); |
457 | 458 | ||
458 | walk->total = req->cryptlen; | ||
459 | walk->iv = req->iv; | 459 | walk->iv = req->iv; |
460 | walk->oiv = req->iv; | 460 | walk->oiv = req->iv; |
461 | 461 | ||
@@ -509,6 +509,11 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk, | |||
509 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); | 509 | struct crypto_aead *tfm = crypto_aead_reqtfm(req); |
510 | int err; | 510 | int err; |
511 | 511 | ||
512 | walk->nbytes = 0; | ||
513 | |||
514 | if (unlikely(!walk->total)) | ||
515 | return 0; | ||
516 | |||
512 | walk->flags &= ~SKCIPHER_WALK_PHYS; | 517 | walk->flags &= ~SKCIPHER_WALK_PHYS; |
513 | 518 | ||
514 | scatterwalk_start(&walk->in, req->src); | 519 | scatterwalk_start(&walk->in, req->src); |
diff --git a/crypto/xts.c b/crypto/xts.c index d86c11a8c882..e31828ed0046 100644 --- a/crypto/xts.c +++ b/crypto/xts.c | |||
@@ -554,8 +554,10 @@ static int create(struct crypto_template *tmpl, struct rtattr **tb) | |||
554 | ctx->name[len - 1] = 0; | 554 | ctx->name[len - 1] = 0; |
555 | 555 | ||
556 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, | 556 | if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME, |
557 | "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) | 557 | "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME) { |
558 | return -ENAMETOOLONG; | 558 | err = -ENAMETOOLONG; |
559 | goto err_drop_spawn; | ||
560 | } | ||
559 | } else | 561 | } else |
560 | goto err_drop_spawn; | 562 | goto err_drop_spawn; |
561 | 563 | ||
diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 3fb8ff513461..e26ea209b63e 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c | |||
@@ -571,10 +571,9 @@ static int acpi_data_get_property_array(const struct acpi_device_data *data, | |||
571 | * } | 571 | * } |
572 | * } | 572 | * } |
573 | * | 573 | * |
574 | * Calling this function with index %2 return %-ENOENT and with index %3 | 574 | * Calling this function with index %2 or index %3 return %-ENOENT. If the |
575 | * returns the last entry. If the property does not contain any more values | 575 | * property does not contain any more values %-ENOENT is returned. The NULL |
576 | * %-ENODATA is returned. The NULL entry must be single integer and | 576 | * entry must be single integer and preferably contain value %0. |
577 | * preferably contain value %0. | ||
578 | * | 577 | * |
579 | * Return: %0 on success, negative error code on failure. | 578 | * Return: %0 on success, negative error code on failure. |
580 | */ | 579 | */ |
@@ -590,11 +589,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
590 | 589 | ||
591 | data = acpi_device_data_of_node(fwnode); | 590 | data = acpi_device_data_of_node(fwnode); |
592 | if (!data) | 591 | if (!data) |
593 | return -EINVAL; | 592 | return -ENOENT; |
594 | 593 | ||
595 | ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); | 594 | ret = acpi_data_get_property(data, propname, ACPI_TYPE_ANY, &obj); |
596 | if (ret) | 595 | if (ret) |
597 | return ret; | 596 | return ret == -EINVAL ? -ENOENT : -EINVAL; |
598 | 597 | ||
599 | /* | 598 | /* |
600 | * The simplest case is when the value is a single reference. Just | 599 | * The simplest case is when the value is a single reference. Just |
@@ -606,7 +605,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
606 | 605 | ||
607 | ret = acpi_bus_get_device(obj->reference.handle, &device); | 606 | ret = acpi_bus_get_device(obj->reference.handle, &device); |
608 | if (ret) | 607 | if (ret) |
609 | return ret; | 608 | return ret == -ENODEV ? -EINVAL : ret; |
610 | 609 | ||
611 | args->adev = device; | 610 | args->adev = device; |
612 | args->nargs = 0; | 611 | args->nargs = 0; |
@@ -622,8 +621,10 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
622 | * The index argument is then used to determine which reference | 621 | * The index argument is then used to determine which reference |
623 | * the caller wants (along with the arguments). | 622 | * the caller wants (along with the arguments). |
624 | */ | 623 | */ |
625 | if (obj->type != ACPI_TYPE_PACKAGE || index >= obj->package.count) | 624 | if (obj->type != ACPI_TYPE_PACKAGE) |
626 | return -EPROTO; | 625 | return -EINVAL; |
626 | if (index >= obj->package.count) | ||
627 | return -ENOENT; | ||
627 | 628 | ||
628 | element = obj->package.elements; | 629 | element = obj->package.elements; |
629 | end = element + obj->package.count; | 630 | end = element + obj->package.count; |
@@ -635,7 +636,7 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
635 | ret = acpi_bus_get_device(element->reference.handle, | 636 | ret = acpi_bus_get_device(element->reference.handle, |
636 | &device); | 637 | &device); |
637 | if (ret) | 638 | if (ret) |
638 | return -ENODEV; | 639 | return -EINVAL; |
639 | 640 | ||
640 | nargs = 0; | 641 | nargs = 0; |
641 | element++; | 642 | element++; |
@@ -649,11 +650,11 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
649 | else if (type == ACPI_TYPE_LOCAL_REFERENCE) | 650 | else if (type == ACPI_TYPE_LOCAL_REFERENCE) |
650 | break; | 651 | break; |
651 | else | 652 | else |
652 | return -EPROTO; | 653 | return -EINVAL; |
653 | } | 654 | } |
654 | 655 | ||
655 | if (nargs > MAX_ACPI_REFERENCE_ARGS) | 656 | if (nargs > MAX_ACPI_REFERENCE_ARGS) |
656 | return -EPROTO; | 657 | return -EINVAL; |
657 | 658 | ||
658 | if (idx == index) { | 659 | if (idx == index) { |
659 | args->adev = device; | 660 | args->adev = device; |
@@ -670,13 +671,13 @@ int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode, | |||
670 | return -ENOENT; | 671 | return -ENOENT; |
671 | element++; | 672 | element++; |
672 | } else { | 673 | } else { |
673 | return -EPROTO; | 674 | return -EINVAL; |
674 | } | 675 | } |
675 | 676 | ||
676 | idx++; | 677 | idx++; |
677 | } | 678 | } |
678 | 679 | ||
679 | return -ENODATA; | 680 | return -ENOENT; |
680 | } | 681 | } |
681 | EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); | 682 | EXPORT_SYMBOL_GPL(__acpi_node_get_property_reference); |
682 | 683 | ||
diff --git a/drivers/android/binder.c b/drivers/android/binder.c index ab34239a76ee..fddf76ef5bd6 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c | |||
@@ -2582,6 +2582,48 @@ static bool binder_proc_transaction(struct binder_transaction *t, | |||
2582 | return true; | 2582 | return true; |
2583 | } | 2583 | } |
2584 | 2584 | ||
2585 | /** | ||
2586 | * binder_get_node_refs_for_txn() - Get required refs on node for txn | ||
2587 | * @node: struct binder_node for which to get refs | ||
2588 | * @proc: returns @node->proc if valid | ||
2589 | * @error: if no @proc then returns BR_DEAD_REPLY | ||
2590 | * | ||
2591 | * User-space normally keeps the node alive when creating a transaction | ||
2592 | * since it has a reference to the target. The local strong ref keeps it | ||
2593 | * alive if the sending process dies before the target process processes | ||
2594 | * the transaction. If the source process is malicious or has a reference | ||
2595 | * counting bug, relying on the local strong ref can fail. | ||
2596 | * | ||
2597 | * Since user-space can cause the local strong ref to go away, we also take | ||
2598 | * a tmpref on the node to ensure it survives while we are constructing | ||
2599 | * the transaction. We also need a tmpref on the proc while we are | ||
2600 | * constructing the transaction, so we take that here as well. | ||
2601 | * | ||
2602 | * Return: The target_node with refs taken or NULL if no @node->proc is NULL. | ||
2603 | * Also sets @proc if valid. If the @node->proc is NULL indicating that the | ||
2604 | * target proc has died, @error is set to BR_DEAD_REPLY | ||
2605 | */ | ||
2606 | static struct binder_node *binder_get_node_refs_for_txn( | ||
2607 | struct binder_node *node, | ||
2608 | struct binder_proc **procp, | ||
2609 | uint32_t *error) | ||
2610 | { | ||
2611 | struct binder_node *target_node = NULL; | ||
2612 | |||
2613 | binder_node_inner_lock(node); | ||
2614 | if (node->proc) { | ||
2615 | target_node = node; | ||
2616 | binder_inc_node_nilocked(node, 1, 0, NULL); | ||
2617 | binder_inc_node_tmpref_ilocked(node); | ||
2618 | node->proc->tmp_ref++; | ||
2619 | *procp = node->proc; | ||
2620 | } else | ||
2621 | *error = BR_DEAD_REPLY; | ||
2622 | binder_node_inner_unlock(node); | ||
2623 | |||
2624 | return target_node; | ||
2625 | } | ||
2626 | |||
2585 | static void binder_transaction(struct binder_proc *proc, | 2627 | static void binder_transaction(struct binder_proc *proc, |
2586 | struct binder_thread *thread, | 2628 | struct binder_thread *thread, |
2587 | struct binder_transaction_data *tr, int reply, | 2629 | struct binder_transaction_data *tr, int reply, |
@@ -2685,43 +2727,35 @@ static void binder_transaction(struct binder_proc *proc, | |||
2685 | ref = binder_get_ref_olocked(proc, tr->target.handle, | 2727 | ref = binder_get_ref_olocked(proc, tr->target.handle, |
2686 | true); | 2728 | true); |
2687 | if (ref) { | 2729 | if (ref) { |
2688 | binder_inc_node(ref->node, 1, 0, NULL); | 2730 | target_node = binder_get_node_refs_for_txn( |
2689 | target_node = ref->node; | 2731 | ref->node, &target_proc, |
2690 | } | 2732 | &return_error); |
2691 | binder_proc_unlock(proc); | 2733 | } else { |
2692 | if (target_node == NULL) { | ||
2693 | binder_user_error("%d:%d got transaction to invalid handle\n", | 2734 | binder_user_error("%d:%d got transaction to invalid handle\n", |
2694 | proc->pid, thread->pid); | 2735 | proc->pid, thread->pid); |
2695 | return_error = BR_FAILED_REPLY; | 2736 | return_error = BR_FAILED_REPLY; |
2696 | return_error_param = -EINVAL; | ||
2697 | return_error_line = __LINE__; | ||
2698 | goto err_invalid_target_handle; | ||
2699 | } | 2737 | } |
2738 | binder_proc_unlock(proc); | ||
2700 | } else { | 2739 | } else { |
2701 | mutex_lock(&context->context_mgr_node_lock); | 2740 | mutex_lock(&context->context_mgr_node_lock); |
2702 | target_node = context->binder_context_mgr_node; | 2741 | target_node = context->binder_context_mgr_node; |
2703 | if (target_node == NULL) { | 2742 | if (target_node) |
2743 | target_node = binder_get_node_refs_for_txn( | ||
2744 | target_node, &target_proc, | ||
2745 | &return_error); | ||
2746 | else | ||
2704 | return_error = BR_DEAD_REPLY; | 2747 | return_error = BR_DEAD_REPLY; |
2705 | mutex_unlock(&context->context_mgr_node_lock); | ||
2706 | return_error_line = __LINE__; | ||
2707 | goto err_no_context_mgr_node; | ||
2708 | } | ||
2709 | binder_inc_node(target_node, 1, 0, NULL); | ||
2710 | mutex_unlock(&context->context_mgr_node_lock); | 2748 | mutex_unlock(&context->context_mgr_node_lock); |
2711 | } | 2749 | } |
2712 | e->to_node = target_node->debug_id; | 2750 | if (!target_node) { |
2713 | binder_node_lock(target_node); | 2751 | /* |
2714 | target_proc = target_node->proc; | 2752 | * return_error is set above |
2715 | if (target_proc == NULL) { | 2753 | */ |
2716 | binder_node_unlock(target_node); | 2754 | return_error_param = -EINVAL; |
2717 | return_error = BR_DEAD_REPLY; | ||
2718 | return_error_line = __LINE__; | 2755 | return_error_line = __LINE__; |
2719 | goto err_dead_binder; | 2756 | goto err_dead_binder; |
2720 | } | 2757 | } |
2721 | binder_inner_proc_lock(target_proc); | 2758 | e->to_node = target_node->debug_id; |
2722 | target_proc->tmp_ref++; | ||
2723 | binder_inner_proc_unlock(target_proc); | ||
2724 | binder_node_unlock(target_node); | ||
2725 | if (security_binder_transaction(proc->tsk, | 2759 | if (security_binder_transaction(proc->tsk, |
2726 | target_proc->tsk) < 0) { | 2760 | target_proc->tsk) < 0) { |
2727 | return_error = BR_FAILED_REPLY; | 2761 | return_error = BR_FAILED_REPLY; |
@@ -3071,6 +3105,8 @@ static void binder_transaction(struct binder_proc *proc, | |||
3071 | if (target_thread) | 3105 | if (target_thread) |
3072 | binder_thread_dec_tmpref(target_thread); | 3106 | binder_thread_dec_tmpref(target_thread); |
3073 | binder_proc_dec_tmpref(target_proc); | 3107 | binder_proc_dec_tmpref(target_proc); |
3108 | if (target_node) | ||
3109 | binder_dec_node_tmpref(target_node); | ||
3074 | /* | 3110 | /* |
3075 | * write barrier to synchronize with initialization | 3111 | * write barrier to synchronize with initialization |
3076 | * of log entry | 3112 | * of log entry |
@@ -3090,6 +3126,8 @@ err_bad_parent: | |||
3090 | err_copy_data_failed: | 3126 | err_copy_data_failed: |
3091 | trace_binder_transaction_failed_buffer_release(t->buffer); | 3127 | trace_binder_transaction_failed_buffer_release(t->buffer); |
3092 | binder_transaction_buffer_release(target_proc, t->buffer, offp); | 3128 | binder_transaction_buffer_release(target_proc, t->buffer, offp); |
3129 | if (target_node) | ||
3130 | binder_dec_node_tmpref(target_node); | ||
3093 | target_node = NULL; | 3131 | target_node = NULL; |
3094 | t->buffer->transaction = NULL; | 3132 | t->buffer->transaction = NULL; |
3095 | binder_alloc_free_buf(&target_proc->alloc, t->buffer); | 3133 | binder_alloc_free_buf(&target_proc->alloc, t->buffer); |
@@ -3104,13 +3142,14 @@ err_bad_call_stack: | |||
3104 | err_empty_call_stack: | 3142 | err_empty_call_stack: |
3105 | err_dead_binder: | 3143 | err_dead_binder: |
3106 | err_invalid_target_handle: | 3144 | err_invalid_target_handle: |
3107 | err_no_context_mgr_node: | ||
3108 | if (target_thread) | 3145 | if (target_thread) |
3109 | binder_thread_dec_tmpref(target_thread); | 3146 | binder_thread_dec_tmpref(target_thread); |
3110 | if (target_proc) | 3147 | if (target_proc) |
3111 | binder_proc_dec_tmpref(target_proc); | 3148 | binder_proc_dec_tmpref(target_proc); |
3112 | if (target_node) | 3149 | if (target_node) { |
3113 | binder_dec_node(target_node, 1, 0); | 3150 | binder_dec_node(target_node, 1, 0); |
3151 | binder_dec_node_tmpref(target_node); | ||
3152 | } | ||
3114 | 3153 | ||
3115 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, | 3154 | binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, |
3116 | "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", | 3155 | "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n", |
@@ -3623,12 +3662,6 @@ static void binder_stat_br(struct binder_proc *proc, | |||
3623 | } | 3662 | } |
3624 | } | 3663 | } |
3625 | 3664 | ||
3626 | static int binder_has_thread_work(struct binder_thread *thread) | ||
3627 | { | ||
3628 | return !binder_worklist_empty(thread->proc, &thread->todo) || | ||
3629 | thread->looper_need_return; | ||
3630 | } | ||
3631 | |||
3632 | static int binder_put_node_cmd(struct binder_proc *proc, | 3665 | static int binder_put_node_cmd(struct binder_proc *proc, |
3633 | struct binder_thread *thread, | 3666 | struct binder_thread *thread, |
3634 | void __user **ptrp, | 3667 | void __user **ptrp, |
@@ -4258,12 +4291,9 @@ static unsigned int binder_poll(struct file *filp, | |||
4258 | 4291 | ||
4259 | binder_inner_proc_unlock(thread->proc); | 4292 | binder_inner_proc_unlock(thread->proc); |
4260 | 4293 | ||
4261 | if (binder_has_work(thread, wait_for_proc_work)) | ||
4262 | return POLLIN; | ||
4263 | |||
4264 | poll_wait(filp, &thread->wait, wait); | 4294 | poll_wait(filp, &thread->wait, wait); |
4265 | 4295 | ||
4266 | if (binder_has_thread_work(thread)) | 4296 | if (binder_has_work(thread, wait_for_proc_work)) |
4267 | return POLLIN; | 4297 | return POLLIN; |
4268 | 4298 | ||
4269 | return 0; | 4299 | return 0; |
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 064f5e31ec55..c2819a3d58a6 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c | |||
@@ -215,17 +215,12 @@ static int binder_update_page_range(struct binder_alloc *alloc, int allocate, | |||
215 | } | 215 | } |
216 | } | 216 | } |
217 | 217 | ||
218 | if (!vma && need_mm) | 218 | if (!vma && need_mm && mmget_not_zero(alloc->vma_vm_mm)) |
219 | mm = get_task_mm(alloc->tsk); | 219 | mm = alloc->vma_vm_mm; |
220 | 220 | ||
221 | if (mm) { | 221 | if (mm) { |
222 | down_write(&mm->mmap_sem); | 222 | down_write(&mm->mmap_sem); |
223 | vma = alloc->vma; | 223 | vma = alloc->vma; |
224 | if (vma && mm != alloc->vma_vm_mm) { | ||
225 | pr_err("%d: vma mm and task mm mismatch\n", | ||
226 | alloc->pid); | ||
227 | vma = NULL; | ||
228 | } | ||
229 | } | 224 | } |
230 | 225 | ||
231 | if (!vma && need_mm) { | 226 | if (!vma && need_mm) { |
@@ -565,7 +560,7 @@ static void binder_delete_free_buffer(struct binder_alloc *alloc, | |||
565 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, | 560 | binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, |
566 | "%d: merge free, buffer %pK do not share page with %pK or %pK\n", | 561 | "%d: merge free, buffer %pK do not share page with %pK or %pK\n", |
567 | alloc->pid, buffer->data, | 562 | alloc->pid, buffer->data, |
568 | prev->data, next->data); | 563 | prev->data, next ? next->data : NULL); |
569 | binder_update_page_range(alloc, 0, buffer_start_page(buffer), | 564 | binder_update_page_range(alloc, 0, buffer_start_page(buffer), |
570 | buffer_start_page(buffer) + PAGE_SIZE, | 565 | buffer_start_page(buffer) + PAGE_SIZE, |
571 | NULL); | 566 | NULL); |
@@ -720,6 +715,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, | |||
720 | barrier(); | 715 | barrier(); |
721 | alloc->vma = vma; | 716 | alloc->vma = vma; |
722 | alloc->vma_vm_mm = vma->vm_mm; | 717 | alloc->vma_vm_mm = vma->vm_mm; |
718 | mmgrab(alloc->vma_vm_mm); | ||
723 | 719 | ||
724 | return 0; | 720 | return 0; |
725 | 721 | ||
@@ -795,6 +791,8 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) | |||
795 | vfree(alloc->buffer); | 791 | vfree(alloc->buffer); |
796 | } | 792 | } |
797 | mutex_unlock(&alloc->mutex); | 793 | mutex_unlock(&alloc->mutex); |
794 | if (alloc->vma_vm_mm) | ||
795 | mmdrop(alloc->vma_vm_mm); | ||
798 | 796 | ||
799 | binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, | 797 | binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, |
800 | "%s: %d buffers %d, pages %d\n", | 798 | "%s: %d buffers %d, pages %d\n", |
@@ -889,7 +887,6 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) | |||
889 | void binder_alloc_vma_close(struct binder_alloc *alloc) | 887 | void binder_alloc_vma_close(struct binder_alloc *alloc) |
890 | { | 888 | { |
891 | WRITE_ONCE(alloc->vma, NULL); | 889 | WRITE_ONCE(alloc->vma, NULL); |
892 | WRITE_ONCE(alloc->vma_vm_mm, NULL); | ||
893 | } | 890 | } |
894 | 891 | ||
895 | /** | 892 | /** |
@@ -926,9 +923,9 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
926 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; | 923 | page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; |
927 | vma = alloc->vma; | 924 | vma = alloc->vma; |
928 | if (vma) { | 925 | if (vma) { |
929 | mm = get_task_mm(alloc->tsk); | 926 | if (!mmget_not_zero(alloc->vma_vm_mm)) |
930 | if (!mm) | 927 | goto err_mmget; |
931 | goto err_get_task_mm_failed; | 928 | mm = alloc->vma_vm_mm; |
932 | if (!down_write_trylock(&mm->mmap_sem)) | 929 | if (!down_write_trylock(&mm->mmap_sem)) |
933 | goto err_down_write_mmap_sem_failed; | 930 | goto err_down_write_mmap_sem_failed; |
934 | } | 931 | } |
@@ -963,7 +960,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, | |||
963 | 960 | ||
964 | err_down_write_mmap_sem_failed: | 961 | err_down_write_mmap_sem_failed: |
965 | mmput_async(mm); | 962 | mmput_async(mm); |
966 | err_get_task_mm_failed: | 963 | err_mmget: |
967 | err_page_already_freed: | 964 | err_page_already_freed: |
968 | mutex_unlock(&alloc->mutex); | 965 | mutex_unlock(&alloc->mutex); |
969 | err_get_alloc_mutex_failed: | 966 | err_get_alloc_mutex_failed: |
@@ -1002,7 +999,6 @@ struct shrinker binder_shrinker = { | |||
1002 | */ | 999 | */ |
1003 | void binder_alloc_init(struct binder_alloc *alloc) | 1000 | void binder_alloc_init(struct binder_alloc *alloc) |
1004 | { | 1001 | { |
1005 | alloc->tsk = current->group_leader; | ||
1006 | alloc->pid = current->group_leader->pid; | 1002 | alloc->pid = current->group_leader->pid; |
1007 | mutex_init(&alloc->mutex); | 1003 | mutex_init(&alloc->mutex); |
1008 | INIT_LIST_HEAD(&alloc->buffers); | 1004 | INIT_LIST_HEAD(&alloc->buffers); |
diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index a3a3602c689c..2dd33b6df104 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h | |||
@@ -100,7 +100,6 @@ struct binder_lru_page { | |||
100 | */ | 100 | */ |
101 | struct binder_alloc { | 101 | struct binder_alloc { |
102 | struct mutex mutex; | 102 | struct mutex mutex; |
103 | struct task_struct *tsk; | ||
104 | struct vm_area_struct *vma; | 103 | struct vm_area_struct *vma; |
105 | struct mm_struct *vma_vm_mm; | 104 | struct mm_struct *vma_vm_mm; |
106 | void *buffer; | 105 | void *buffer; |
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 321cd7b4d817..227bac5f1191 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
@@ -377,7 +377,8 @@ int register_cpu(struct cpu *cpu, int num) | |||
377 | 377 | ||
378 | per_cpu(cpu_sys_devices, num) = &cpu->dev; | 378 | per_cpu(cpu_sys_devices, num) = &cpu->dev; |
379 | register_cpu_under_node(num, cpu_to_node(num)); | 379 | register_cpu_under_node(num, cpu_to_node(num)); |
380 | dev_pm_qos_expose_latency_limit(&cpu->dev, 0); | 380 | dev_pm_qos_expose_latency_limit(&cpu->dev, |
381 | PM_QOS_RESUME_LATENCY_NO_CONSTRAINT); | ||
381 | 382 | ||
382 | return 0; | 383 | return 0; |
383 | } | 384 | } |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 3855902f2c5b..aae2402f3791 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
@@ -27,13 +27,21 @@ static struct bus_type node_subsys = { | |||
27 | 27 | ||
28 | static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) | 28 | static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf) |
29 | { | 29 | { |
30 | ssize_t n; | ||
31 | cpumask_var_t mask; | ||
30 | struct node *node_dev = to_node(dev); | 32 | struct node *node_dev = to_node(dev); |
31 | const struct cpumask *mask = cpumask_of_node(node_dev->dev.id); | ||
32 | 33 | ||
33 | /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ | 34 | /* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */ |
34 | BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); | 35 | BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1)); |
35 | 36 | ||
36 | return cpumap_print_to_pagebuf(list, buf, mask); | 37 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) |
38 | return 0; | ||
39 | |||
40 | cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask); | ||
41 | n = cpumap_print_to_pagebuf(list, buf, mask); | ||
42 | free_cpumask_var(mask); | ||
43 | |||
44 | return n; | ||
37 | } | 45 | } |
38 | 46 | ||
39 | static inline ssize_t node_read_cpumask(struct device *dev, | 47 | static inline ssize_t node_read_cpumask(struct device *dev, |
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c index 281f949c5ffe..51751cc8c9e6 100644 --- a/drivers/base/power/domain_governor.c +++ b/drivers/base/power/domain_governor.c | |||
@@ -14,23 +14,20 @@ | |||
14 | static int dev_update_qos_constraint(struct device *dev, void *data) | 14 | static int dev_update_qos_constraint(struct device *dev, void *data) |
15 | { | 15 | { |
16 | s64 *constraint_ns_p = data; | 16 | s64 *constraint_ns_p = data; |
17 | s32 constraint_ns = -1; | 17 | s64 constraint_ns = -1; |
18 | 18 | ||
19 | if (dev->power.subsys_data && dev->power.subsys_data->domain_data) | 19 | if (dev->power.subsys_data && dev->power.subsys_data->domain_data) |
20 | constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns; | 20 | constraint_ns = dev_gpd_data(dev)->td.effective_constraint_ns; |
21 | 21 | ||
22 | if (constraint_ns < 0) { | 22 | if (constraint_ns < 0) |
23 | constraint_ns = dev_pm_qos_read_value(dev); | 23 | constraint_ns = dev_pm_qos_read_value(dev); |
24 | constraint_ns *= NSEC_PER_USEC; | 24 | |
25 | } | 25 | if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) |
26 | if (constraint_ns == 0) | ||
27 | return 0; | 26 | return 0; |
28 | 27 | ||
29 | /* | 28 | constraint_ns *= NSEC_PER_USEC; |
30 | * constraint_ns cannot be negative here, because the device has been | 29 | |
31 | * suspended. | 30 | if (constraint_ns < *constraint_ns_p || *constraint_ns_p < 0) |
32 | */ | ||
33 | if (constraint_ns < *constraint_ns_p || *constraint_ns_p == 0) | ||
34 | *constraint_ns_p = constraint_ns; | 31 | *constraint_ns_p = constraint_ns; |
35 | 32 | ||
36 | return 0; | 33 | return 0; |
@@ -63,10 +60,14 @@ static bool default_suspend_ok(struct device *dev) | |||
63 | 60 | ||
64 | spin_unlock_irqrestore(&dev->power.lock, flags); | 61 | spin_unlock_irqrestore(&dev->power.lock, flags); |
65 | 62 | ||
66 | if (constraint_ns < 0) | 63 | if (constraint_ns == 0) |
67 | return false; | 64 | return false; |
68 | 65 | ||
69 | constraint_ns *= NSEC_PER_USEC; | 66 | if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) |
67 | constraint_ns = -1; | ||
68 | else | ||
69 | constraint_ns *= NSEC_PER_USEC; | ||
70 | |||
70 | /* | 71 | /* |
71 | * We can walk the children without any additional locking, because | 72 | * We can walk the children without any additional locking, because |
72 | * they all have been suspended at this point and their | 73 | * they all have been suspended at this point and their |
@@ -76,14 +77,19 @@ static bool default_suspend_ok(struct device *dev) | |||
76 | device_for_each_child(dev, &constraint_ns, | 77 | device_for_each_child(dev, &constraint_ns, |
77 | dev_update_qos_constraint); | 78 | dev_update_qos_constraint); |
78 | 79 | ||
79 | if (constraint_ns > 0) { | 80 | if (constraint_ns < 0) { |
80 | constraint_ns -= td->suspend_latency_ns + | 81 | /* The children have no constraints. */ |
81 | td->resume_latency_ns; | 82 | td->effective_constraint_ns = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; |
82 | if (constraint_ns == 0) | 83 | td->cached_suspend_ok = true; |
83 | return false; | 84 | } else { |
85 | constraint_ns -= td->suspend_latency_ns + td->resume_latency_ns; | ||
86 | if (constraint_ns > 0) { | ||
87 | td->effective_constraint_ns = constraint_ns; | ||
88 | td->cached_suspend_ok = true; | ||
89 | } else { | ||
90 | td->effective_constraint_ns = 0; | ||
91 | } | ||
84 | } | 92 | } |
85 | td->effective_constraint_ns = constraint_ns; | ||
86 | td->cached_suspend_ok = constraint_ns >= 0; | ||
87 | 93 | ||
88 | /* | 94 | /* |
89 | * The children have been suspended already, so we don't need to take | 95 | * The children have been suspended already, so we don't need to take |
@@ -145,13 +151,14 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd, | |||
145 | td = &to_gpd_data(pdd)->td; | 151 | td = &to_gpd_data(pdd)->td; |
146 | constraint_ns = td->effective_constraint_ns; | 152 | constraint_ns = td->effective_constraint_ns; |
147 | /* default_suspend_ok() need not be called before us. */ | 153 | /* default_suspend_ok() need not be called before us. */ |
148 | if (constraint_ns < 0) { | 154 | if (constraint_ns < 0) |
149 | constraint_ns = dev_pm_qos_read_value(pdd->dev); | 155 | constraint_ns = dev_pm_qos_read_value(pdd->dev); |
150 | constraint_ns *= NSEC_PER_USEC; | 156 | |
151 | } | 157 | if (constraint_ns == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) |
152 | if (constraint_ns == 0) | ||
153 | continue; | 158 | continue; |
154 | 159 | ||
160 | constraint_ns *= NSEC_PER_USEC; | ||
161 | |||
155 | /* | 162 | /* |
156 | * constraint_ns cannot be negative here, because the device has | 163 | * constraint_ns cannot be negative here, because the device has |
157 | * been suspended. | 164 | * been suspended. |
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index 277d43a83f53..7d29286d9313 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c | |||
@@ -189,7 +189,7 @@ static int dev_pm_qos_constraints_allocate(struct device *dev) | |||
189 | plist_head_init(&c->list); | 189 | plist_head_init(&c->list); |
190 | c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; | 190 | c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; |
191 | c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; | 191 | c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; |
192 | c->no_constraint_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE; | 192 | c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; |
193 | c->type = PM_QOS_MIN; | 193 | c->type = PM_QOS_MIN; |
194 | c->notifiers = n; | 194 | c->notifiers = n; |
195 | 195 | ||
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 7bcf80fa9ada..13e015905543 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
@@ -253,7 +253,7 @@ static int rpm_check_suspend_allowed(struct device *dev) | |||
253 | || (dev->power.request_pending | 253 | || (dev->power.request_pending |
254 | && dev->power.request == RPM_REQ_RESUME)) | 254 | && dev->power.request == RPM_REQ_RESUME)) |
255 | retval = -EAGAIN; | 255 | retval = -EAGAIN; |
256 | else if (__dev_pm_qos_read_value(dev) < 0) | 256 | else if (__dev_pm_qos_read_value(dev) == 0) |
257 | retval = -EPERM; | 257 | retval = -EPERM; |
258 | else if (dev->power.runtime_status == RPM_SUSPENDED) | 258 | else if (dev->power.runtime_status == RPM_SUSPENDED) |
259 | retval = 1; | 259 | retval = 1; |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 156ab57bca77..632077f05c57 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
@@ -218,7 +218,14 @@ static ssize_t pm_qos_resume_latency_show(struct device *dev, | |||
218 | struct device_attribute *attr, | 218 | struct device_attribute *attr, |
219 | char *buf) | 219 | char *buf) |
220 | { | 220 | { |
221 | return sprintf(buf, "%d\n", dev_pm_qos_requested_resume_latency(dev)); | 221 | s32 value = dev_pm_qos_requested_resume_latency(dev); |
222 | |||
223 | if (value == 0) | ||
224 | return sprintf(buf, "n/a\n"); | ||
225 | else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) | ||
226 | value = 0; | ||
227 | |||
228 | return sprintf(buf, "%d\n", value); | ||
222 | } | 229 | } |
223 | 230 | ||
224 | static ssize_t pm_qos_resume_latency_store(struct device *dev, | 231 | static ssize_t pm_qos_resume_latency_store(struct device *dev, |
@@ -228,11 +235,21 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev, | |||
228 | s32 value; | 235 | s32 value; |
229 | int ret; | 236 | int ret; |
230 | 237 | ||
231 | if (kstrtos32(buf, 0, &value)) | 238 | if (!kstrtos32(buf, 0, &value)) { |
232 | return -EINVAL; | 239 | /* |
240 | * Prevent users from writing negative or "no constraint" values | ||
241 | * directly. | ||
242 | */ | ||
243 | if (value < 0 || value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) | ||
244 | return -EINVAL; | ||
233 | 245 | ||
234 | if (value < 0) | 246 | if (value == 0) |
247 | value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; | ||
248 | } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) { | ||
249 | value = 0; | ||
250 | } else { | ||
235 | return -EINVAL; | 251 | return -EINVAL; |
252 | } | ||
236 | 253 | ||
237 | ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, | 254 | ret = dev_pm_qos_update_request(dev->power.qos->resume_latency_req, |
238 | value); | 255 | value); |
diff --git a/drivers/base/property.c b/drivers/base/property.c index d0b65bbe7e15..7ed99c1b2a8b 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/phy.h> | 21 | #include <linux/phy.h> |
22 | 22 | ||
23 | struct property_set { | 23 | struct property_set { |
24 | struct device *dev; | ||
24 | struct fwnode_handle fwnode; | 25 | struct fwnode_handle fwnode; |
25 | const struct property_entry *properties; | 26 | const struct property_entry *properties; |
26 | }; | 27 | }; |
@@ -682,6 +683,10 @@ EXPORT_SYMBOL_GPL(fwnode_property_match_string); | |||
682 | * Caller is responsible to call fwnode_handle_put() on the returned | 683 | * Caller is responsible to call fwnode_handle_put() on the returned |
683 | * args->fwnode pointer. | 684 | * args->fwnode pointer. |
684 | * | 685 | * |
686 | * Returns: %0 on success | ||
687 | * %-ENOENT when the index is out of bounds, the index has an empty | ||
688 | * reference or the property was not found | ||
689 | * %-EINVAL on parse error | ||
685 | */ | 690 | */ |
686 | int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, | 691 | int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, |
687 | const char *prop, const char *nargs_prop, | 692 | const char *prop, const char *nargs_prop, |
@@ -891,6 +896,7 @@ static struct property_set *pset_copy_set(const struct property_set *pset) | |||
891 | void device_remove_properties(struct device *dev) | 896 | void device_remove_properties(struct device *dev) |
892 | { | 897 | { |
893 | struct fwnode_handle *fwnode; | 898 | struct fwnode_handle *fwnode; |
899 | struct property_set *pset; | ||
894 | 900 | ||
895 | fwnode = dev_fwnode(dev); | 901 | fwnode = dev_fwnode(dev); |
896 | if (!fwnode) | 902 | if (!fwnode) |
@@ -900,16 +906,16 @@ void device_remove_properties(struct device *dev) | |||
900 | * the pset. If there is no real firmware node (ACPI/DT) primary | 906 | * the pset. If there is no real firmware node (ACPI/DT) primary |
901 | * will hold the pset. | 907 | * will hold the pset. |
902 | */ | 908 | */ |
903 | if (is_pset_node(fwnode)) { | 909 | pset = to_pset_node(fwnode); |
910 | if (pset) { | ||
904 | set_primary_fwnode(dev, NULL); | 911 | set_primary_fwnode(dev, NULL); |
905 | pset_free_set(to_pset_node(fwnode)); | ||
906 | } else { | 912 | } else { |
907 | fwnode = fwnode->secondary; | 913 | pset = to_pset_node(fwnode->secondary); |
908 | if (!IS_ERR(fwnode) && is_pset_node(fwnode)) { | 914 | if (pset && dev == pset->dev) |
909 | set_secondary_fwnode(dev, NULL); | 915 | set_secondary_fwnode(dev, NULL); |
910 | pset_free_set(to_pset_node(fwnode)); | ||
911 | } | ||
912 | } | 916 | } |
917 | if (pset && dev == pset->dev) | ||
918 | pset_free_set(pset); | ||
913 | } | 919 | } |
914 | EXPORT_SYMBOL_GPL(device_remove_properties); | 920 | EXPORT_SYMBOL_GPL(device_remove_properties); |
915 | 921 | ||
@@ -938,6 +944,7 @@ int device_add_properties(struct device *dev, | |||
938 | 944 | ||
939 | p->fwnode.ops = &pset_fwnode_ops; | 945 | p->fwnode.ops = &pset_fwnode_ops; |
940 | set_secondary_fwnode(dev, &p->fwnode); | 946 | set_secondary_fwnode(dev, &p->fwnode); |
947 | p->dev = dev; | ||
941 | return 0; | 948 | return 0; |
942 | } | 949 | } |
943 | EXPORT_SYMBOL_GPL(device_add_properties); | 950 | EXPORT_SYMBOL_GPL(device_add_properties); |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 883dfebd3014..9adfb5445f8d 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -243,7 +243,6 @@ static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize, | |||
243 | struct nbd_config *config = nbd->config; | 243 | struct nbd_config *config = nbd->config; |
244 | config->blksize = blocksize; | 244 | config->blksize = blocksize; |
245 | config->bytesize = blocksize * nr_blocks; | 245 | config->bytesize = blocksize * nr_blocks; |
246 | nbd_size_update(nbd); | ||
247 | } | 246 | } |
248 | 247 | ||
249 | static void nbd_complete_rq(struct request *req) | 248 | static void nbd_complete_rq(struct request *req) |
@@ -387,6 +386,15 @@ static int sock_xmit(struct nbd_device *nbd, int index, int send, | |||
387 | return result; | 386 | return result; |
388 | } | 387 | } |
389 | 388 | ||
389 | /* | ||
390 | * Different settings for sk->sk_sndtimeo can result in different return values | ||
391 | * if there is a signal pending when we enter sendmsg, because reasons? | ||
392 | */ | ||
393 | static inline int was_interrupted(int result) | ||
394 | { | ||
395 | return result == -ERESTARTSYS || result == -EINTR; | ||
396 | } | ||
397 | |||
390 | /* always call with the tx_lock held */ | 398 | /* always call with the tx_lock held */ |
391 | static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) | 399 | static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) |
392 | { | 400 | { |
@@ -459,7 +467,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index) | |||
459 | result = sock_xmit(nbd, index, 1, &from, | 467 | result = sock_xmit(nbd, index, 1, &from, |
460 | (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); | 468 | (type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent); |
461 | if (result <= 0) { | 469 | if (result <= 0) { |
462 | if (result == -ERESTARTSYS) { | 470 | if (was_interrupted(result)) { |
463 | /* If we havne't sent anything we can just return BUSY, | 471 | /* If we havne't sent anything we can just return BUSY, |
464 | * however if we have sent something we need to make | 472 | * however if we have sent something we need to make |
465 | * sure we only allow this req to be sent until we are | 473 | * sure we only allow this req to be sent until we are |
@@ -503,7 +511,7 @@ send_pages: | |||
503 | } | 511 | } |
504 | result = sock_xmit(nbd, index, 1, &from, flags, &sent); | 512 | result = sock_xmit(nbd, index, 1, &from, flags, &sent); |
505 | if (result <= 0) { | 513 | if (result <= 0) { |
506 | if (result == -ERESTARTSYS) { | 514 | if (was_interrupted(result)) { |
507 | /* We've already sent the header, we | 515 | /* We've already sent the header, we |
508 | * have no choice but to set pending and | 516 | * have no choice but to set pending and |
509 | * return BUSY. | 517 | * return BUSY. |
@@ -1094,6 +1102,7 @@ static int nbd_start_device(struct nbd_device *nbd) | |||
1094 | args->index = i; | 1102 | args->index = i; |
1095 | queue_work(recv_workqueue, &args->work); | 1103 | queue_work(recv_workqueue, &args->work); |
1096 | } | 1104 | } |
1105 | nbd_size_update(nbd); | ||
1097 | return error; | 1106 | return error; |
1098 | } | 1107 | } |
1099 | 1108 | ||
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c index 7cedb4295e9d..64d0fc17c174 100644 --- a/drivers/block/skd_main.c +++ b/drivers/block/skd_main.c | |||
@@ -2604,7 +2604,7 @@ static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s, | |||
2604 | return NULL; | 2604 | return NULL; |
2605 | *dma_handle = dma_map_single(dev, buf, s->size, dir); | 2605 | *dma_handle = dma_map_single(dev, buf, s->size, dir); |
2606 | if (dma_mapping_error(dev, *dma_handle)) { | 2606 | if (dma_mapping_error(dev, *dma_handle)) { |
2607 | kfree(buf); | 2607 | kmem_cache_free(s, buf); |
2608 | buf = NULL; | 2608 | buf = NULL; |
2609 | } | 2609 | } |
2610 | return buf; | 2610 | return buf; |
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c index c7f396903184..70db4d5638a6 100644 --- a/drivers/bus/mvebu-mbus.c +++ b/drivers/bus/mvebu-mbus.c | |||
@@ -720,7 +720,7 @@ mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus) | |||
720 | if (mbus->hw_io_coherency) | 720 | if (mbus->hw_io_coherency) |
721 | w->mbus_attr |= ATTR_HW_COHERENCY; | 721 | w->mbus_attr |= ATTR_HW_COHERENCY; |
722 | w->base = base & DDR_BASE_CS_LOW_MASK; | 722 | w->base = base & DDR_BASE_CS_LOW_MASK; |
723 | w->size = (size | ~DDR_SIZE_MASK) + 1; | 723 | w->size = (u64)(size | ~DDR_SIZE_MASK) + 1; |
724 | } | 724 | } |
725 | } | 725 | } |
726 | mvebu_mbus_dram_info.num_cs = cs; | 726 | mvebu_mbus_dram_info.num_cs = cs; |
diff --git a/drivers/clocksource/cs5535-clockevt.c b/drivers/clocksource/cs5535-clockevt.c index a1df588343f2..1de8cac99a0e 100644 --- a/drivers/clocksource/cs5535-clockevt.c +++ b/drivers/clocksource/cs5535-clockevt.c | |||
@@ -117,7 +117,8 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id) | |||
117 | /* Turn off the clock (and clear the event) */ | 117 | /* Turn off the clock (and clear the event) */ |
118 | disable_timer(cs5535_event_clock); | 118 | disable_timer(cs5535_event_clock); |
119 | 119 | ||
120 | if (clockevent_state_shutdown(&cs5535_clockevent)) | 120 | if (clockevent_state_detached(&cs5535_clockevent) || |
121 | clockevent_state_shutdown(&cs5535_clockevent)) | ||
121 | return IRQ_HANDLED; | 122 | return IRQ_HANDLED; |
122 | 123 | ||
123 | /* Clear the counter */ | 124 | /* Clear the counter */ |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 48eaf2879228..aa390404e85f 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -298,8 +298,8 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
298 | data->needs_update = 0; | 298 | data->needs_update = 0; |
299 | } | 299 | } |
300 | 300 | ||
301 | /* resume_latency is 0 means no restriction */ | 301 | if (resume_latency < latency_req && |
302 | if (resume_latency && resume_latency < latency_req) | 302 | resume_latency != PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) |
303 | latency_req = resume_latency; | 303 | latency_req = resume_latency; |
304 | 304 | ||
305 | /* Special case when user has set very strict latency requirement */ | 305 | /* Special case when user has set very strict latency requirement */ |
diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index d9fbbf01062b..0f9754e07719 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c | |||
@@ -349,8 +349,6 @@ struct artpec6_crypto_aead_req_ctx { | |||
349 | /* The crypto framework makes it hard to avoid this global. */ | 349 | /* The crypto framework makes it hard to avoid this global. */ |
350 | static struct device *artpec6_crypto_dev; | 350 | static struct device *artpec6_crypto_dev; |
351 | 351 | ||
352 | static struct dentry *dbgfs_root; | ||
353 | |||
354 | #ifdef CONFIG_FAULT_INJECTION | 352 | #ifdef CONFIG_FAULT_INJECTION |
355 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); | 353 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_status_read); |
356 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); | 354 | static DECLARE_FAULT_ATTR(artpec6_crypto_fail_dma_array_full); |
@@ -2984,6 +2982,8 @@ struct dbgfs_u32 { | |||
2984 | char *desc; | 2982 | char *desc; |
2985 | }; | 2983 | }; |
2986 | 2984 | ||
2985 | static struct dentry *dbgfs_root; | ||
2986 | |||
2987 | static void artpec6_crypto_init_debugfs(void) | 2987 | static void artpec6_crypto_init_debugfs(void) |
2988 | { | 2988 | { |
2989 | dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); | 2989 | dbgfs_root = debugfs_create_dir("artpec6_crypto", NULL); |
diff --git a/drivers/crypto/stm32/stm32-hash.c b/drivers/crypto/stm32/stm32-hash.c index b585ce54a802..4835dd4a9e50 100644 --- a/drivers/crypto/stm32/stm32-hash.c +++ b/drivers/crypto/stm32/stm32-hash.c | |||
@@ -553,9 +553,9 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) | |||
553 | { | 553 | { |
554 | struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); | 554 | struct stm32_hash_request_ctx *rctx = ahash_request_ctx(hdev->req); |
555 | struct scatterlist sg[1], *tsg; | 555 | struct scatterlist sg[1], *tsg; |
556 | int err = 0, len = 0, reg, ncp; | 556 | int err = 0, len = 0, reg, ncp = 0; |
557 | unsigned int i; | 557 | unsigned int i; |
558 | const u32 *buffer = (const u32 *)rctx->buffer; | 558 | u32 *buffer = (void *)rctx->buffer; |
559 | 559 | ||
560 | rctx->sg = hdev->req->src; | 560 | rctx->sg = hdev->req->src; |
561 | rctx->total = hdev->req->nbytes; | 561 | rctx->total = hdev->req->nbytes; |
@@ -620,10 +620,13 @@ static int stm32_hash_dma_send(struct stm32_hash_dev *hdev) | |||
620 | reg |= HASH_CR_DMAA; | 620 | reg |= HASH_CR_DMAA; |
621 | stm32_hash_write(hdev, HASH_CR, reg); | 621 | stm32_hash_write(hdev, HASH_CR, reg); |
622 | 622 | ||
623 | for (i = 0; i < DIV_ROUND_UP(ncp, sizeof(u32)); i++) | 623 | if (ncp) { |
624 | stm32_hash_write(hdev, HASH_DIN, buffer[i]); | 624 | memset(buffer + ncp, 0, |
625 | 625 | DIV_ROUND_UP(ncp, sizeof(u32)) - ncp); | |
626 | stm32_hash_set_nblw(hdev, ncp); | 626 | writesl(hdev->io_base + HASH_DIN, buffer, |
627 | DIV_ROUND_UP(ncp, sizeof(u32))); | ||
628 | } | ||
629 | stm32_hash_set_nblw(hdev, DIV_ROUND_UP(ncp, sizeof(u32))); | ||
627 | reg = stm32_hash_read(hdev, HASH_STR); | 630 | reg = stm32_hash_read(hdev, HASH_STR); |
628 | reg |= HASH_STR_DCAL; | 631 | reg |= HASH_STR_DCAL; |
629 | stm32_hash_write(hdev, HASH_STR, reg); | 632 | stm32_hash_write(hdev, HASH_STR, reg); |
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 66fb40d0ebdb..03830634e141 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c | |||
@@ -383,7 +383,7 @@ err_put_fd: | |||
383 | return err; | 383 | return err; |
384 | } | 384 | } |
385 | 385 | ||
386 | static void sync_fill_fence_info(struct dma_fence *fence, | 386 | static int sync_fill_fence_info(struct dma_fence *fence, |
387 | struct sync_fence_info *info) | 387 | struct sync_fence_info *info) |
388 | { | 388 | { |
389 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), | 389 | strlcpy(info->obj_name, fence->ops->get_timeline_name(fence), |
@@ -399,6 +399,8 @@ static void sync_fill_fence_info(struct dma_fence *fence, | |||
399 | test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? | 399 | test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? |
400 | ktime_to_ns(fence->timestamp) : | 400 | ktime_to_ns(fence->timestamp) : |
401 | ktime_set(0, 0); | 401 | ktime_set(0, 0); |
402 | |||
403 | return info->status; | ||
402 | } | 404 | } |
403 | 405 | ||
404 | static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | 406 | static long sync_file_ioctl_fence_info(struct sync_file *sync_file, |
@@ -424,8 +426,12 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
424 | * sync_fence_info and return the actual number of fences on | 426 | * sync_fence_info and return the actual number of fences on |
425 | * info->num_fences. | 427 | * info->num_fences. |
426 | */ | 428 | */ |
427 | if (!info.num_fences) | 429 | if (!info.num_fences) { |
430 | info.status = dma_fence_is_signaled(sync_file->fence); | ||
428 | goto no_fences; | 431 | goto no_fences; |
432 | } else { | ||
433 | info.status = 1; | ||
434 | } | ||
429 | 435 | ||
430 | if (info.num_fences < num_fences) | 436 | if (info.num_fences < num_fences) |
431 | return -EINVAL; | 437 | return -EINVAL; |
@@ -435,8 +441,10 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
435 | if (!fence_info) | 441 | if (!fence_info) |
436 | return -ENOMEM; | 442 | return -ENOMEM; |
437 | 443 | ||
438 | for (i = 0; i < num_fences; i++) | 444 | for (i = 0; i < num_fences; i++) { |
439 | sync_fill_fence_info(fences[i], &fence_info[i]); | 445 | int status = sync_fill_fence_info(fences[i], &fence_info[i]); |
446 | info.status = info.status <= 0 ? info.status : status; | ||
447 | } | ||
440 | 448 | ||
441 | if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, | 449 | if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info, |
442 | size)) { | 450 | size)) { |
@@ -446,7 +454,6 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file, | |||
446 | 454 | ||
447 | no_fences: | 455 | no_fences: |
448 | sync_file_get_name(sync_file, info.name, sizeof(info.name)); | 456 | sync_file_get_name(sync_file, info.name, sizeof(info.name)); |
449 | info.status = dma_fence_is_signaled(sync_file->fence); | ||
450 | info.num_fences = num_fences; | 457 | info.num_fences = num_fences; |
451 | 458 | ||
452 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) | 459 | if (copy_to_user((void __user *)arg, &info, sizeof(info))) |
diff --git a/drivers/dma/altera-msgdma.c b/drivers/dma/altera-msgdma.c index 32905d5606ac..55f9c62ee54b 100644 --- a/drivers/dma/altera-msgdma.c +++ b/drivers/dma/altera-msgdma.c | |||
@@ -212,11 +212,12 @@ struct msgdma_device { | |||
212 | static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) | 212 | static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) |
213 | { | 213 | { |
214 | struct msgdma_sw_desc *desc; | 214 | struct msgdma_sw_desc *desc; |
215 | unsigned long flags; | ||
215 | 216 | ||
216 | spin_lock_bh(&mdev->lock); | 217 | spin_lock_irqsave(&mdev->lock, flags); |
217 | desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); | 218 | desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); |
218 | list_del(&desc->node); | 219 | list_del(&desc->node); |
219 | spin_unlock_bh(&mdev->lock); | 220 | spin_unlock_irqrestore(&mdev->lock, flags); |
220 | 221 | ||
221 | INIT_LIST_HEAD(&desc->tx_list); | 222 | INIT_LIST_HEAD(&desc->tx_list); |
222 | 223 | ||
@@ -306,13 +307,14 @@ static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
306 | struct msgdma_device *mdev = to_mdev(tx->chan); | 307 | struct msgdma_device *mdev = to_mdev(tx->chan); |
307 | struct msgdma_sw_desc *new; | 308 | struct msgdma_sw_desc *new; |
308 | dma_cookie_t cookie; | 309 | dma_cookie_t cookie; |
310 | unsigned long flags; | ||
309 | 311 | ||
310 | new = tx_to_desc(tx); | 312 | new = tx_to_desc(tx); |
311 | spin_lock_bh(&mdev->lock); | 313 | spin_lock_irqsave(&mdev->lock, flags); |
312 | cookie = dma_cookie_assign(tx); | 314 | cookie = dma_cookie_assign(tx); |
313 | 315 | ||
314 | list_add_tail(&new->node, &mdev->pending_list); | 316 | list_add_tail(&new->node, &mdev->pending_list); |
315 | spin_unlock_bh(&mdev->lock); | 317 | spin_unlock_irqrestore(&mdev->lock, flags); |
316 | 318 | ||
317 | return cookie; | 319 | return cookie; |
318 | } | 320 | } |
@@ -336,17 +338,18 @@ msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst, | |||
336 | struct msgdma_extended_desc *desc; | 338 | struct msgdma_extended_desc *desc; |
337 | size_t copy; | 339 | size_t copy; |
338 | u32 desc_cnt; | 340 | u32 desc_cnt; |
341 | unsigned long irqflags; | ||
339 | 342 | ||
340 | desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); | 343 | desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN); |
341 | 344 | ||
342 | spin_lock_bh(&mdev->lock); | 345 | spin_lock_irqsave(&mdev->lock, irqflags); |
343 | if (desc_cnt > mdev->desc_free_cnt) { | 346 | if (desc_cnt > mdev->desc_free_cnt) { |
344 | spin_unlock_bh(&mdev->lock); | 347 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
345 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); | 348 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); |
346 | return NULL; | 349 | return NULL; |
347 | } | 350 | } |
348 | mdev->desc_free_cnt -= desc_cnt; | 351 | mdev->desc_free_cnt -= desc_cnt; |
349 | spin_unlock_bh(&mdev->lock); | 352 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
350 | 353 | ||
351 | do { | 354 | do { |
352 | /* Allocate and populate the descriptor */ | 355 | /* Allocate and populate the descriptor */ |
@@ -397,18 +400,19 @@ msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | |||
397 | u32 desc_cnt = 0, i; | 400 | u32 desc_cnt = 0, i; |
398 | struct scatterlist *sg; | 401 | struct scatterlist *sg; |
399 | u32 stride; | 402 | u32 stride; |
403 | unsigned long irqflags; | ||
400 | 404 | ||
401 | for_each_sg(sgl, sg, sg_len, i) | 405 | for_each_sg(sgl, sg, sg_len, i) |
402 | desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); | 406 | desc_cnt += DIV_ROUND_UP(sg_dma_len(sg), MSGDMA_MAX_TRANS_LEN); |
403 | 407 | ||
404 | spin_lock_bh(&mdev->lock); | 408 | spin_lock_irqsave(&mdev->lock, irqflags); |
405 | if (desc_cnt > mdev->desc_free_cnt) { | 409 | if (desc_cnt > mdev->desc_free_cnt) { |
406 | spin_unlock_bh(&mdev->lock); | 410 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
407 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); | 411 | dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); |
408 | return NULL; | 412 | return NULL; |
409 | } | 413 | } |
410 | mdev->desc_free_cnt -= desc_cnt; | 414 | mdev->desc_free_cnt -= desc_cnt; |
411 | spin_unlock_bh(&mdev->lock); | 415 | spin_unlock_irqrestore(&mdev->lock, irqflags); |
412 | 416 | ||
413 | avail = sg_dma_len(sgl); | 417 | avail = sg_dma_len(sgl); |
414 | 418 | ||
@@ -566,10 +570,11 @@ static void msgdma_start_transfer(struct msgdma_device *mdev) | |||
566 | static void msgdma_issue_pending(struct dma_chan *chan) | 570 | static void msgdma_issue_pending(struct dma_chan *chan) |
567 | { | 571 | { |
568 | struct msgdma_device *mdev = to_mdev(chan); | 572 | struct msgdma_device *mdev = to_mdev(chan); |
573 | unsigned long flags; | ||
569 | 574 | ||
570 | spin_lock_bh(&mdev->lock); | 575 | spin_lock_irqsave(&mdev->lock, flags); |
571 | msgdma_start_transfer(mdev); | 576 | msgdma_start_transfer(mdev); |
572 | spin_unlock_bh(&mdev->lock); | 577 | spin_unlock_irqrestore(&mdev->lock, flags); |
573 | } | 578 | } |
574 | 579 | ||
575 | /** | 580 | /** |
@@ -634,10 +639,11 @@ static void msgdma_free_descriptors(struct msgdma_device *mdev) | |||
634 | static void msgdma_free_chan_resources(struct dma_chan *dchan) | 639 | static void msgdma_free_chan_resources(struct dma_chan *dchan) |
635 | { | 640 | { |
636 | struct msgdma_device *mdev = to_mdev(dchan); | 641 | struct msgdma_device *mdev = to_mdev(dchan); |
642 | unsigned long flags; | ||
637 | 643 | ||
638 | spin_lock_bh(&mdev->lock); | 644 | spin_lock_irqsave(&mdev->lock, flags); |
639 | msgdma_free_descriptors(mdev); | 645 | msgdma_free_descriptors(mdev); |
640 | spin_unlock_bh(&mdev->lock); | 646 | spin_unlock_irqrestore(&mdev->lock, flags); |
641 | kfree(mdev->sw_desq); | 647 | kfree(mdev->sw_desq); |
642 | } | 648 | } |
643 | 649 | ||
@@ -682,8 +688,9 @@ static void msgdma_tasklet(unsigned long data) | |||
682 | u32 count; | 688 | u32 count; |
683 | u32 __maybe_unused size; | 689 | u32 __maybe_unused size; |
684 | u32 __maybe_unused status; | 690 | u32 __maybe_unused status; |
691 | unsigned long flags; | ||
685 | 692 | ||
686 | spin_lock(&mdev->lock); | 693 | spin_lock_irqsave(&mdev->lock, flags); |
687 | 694 | ||
688 | /* Read number of responses that are available */ | 695 | /* Read number of responses that are available */ |
689 | count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); | 696 | count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); |
@@ -698,13 +705,13 @@ static void msgdma_tasklet(unsigned long data) | |||
698 | * bits. So we need to just drop these values. | 705 | * bits. So we need to just drop these values. |
699 | */ | 706 | */ |
700 | size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); | 707 | size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); |
701 | status = ioread32(mdev->resp - MSGDMA_RESP_STATUS); | 708 | status = ioread32(mdev->resp + MSGDMA_RESP_STATUS); |
702 | 709 | ||
703 | msgdma_complete_descriptor(mdev); | 710 | msgdma_complete_descriptor(mdev); |
704 | msgdma_chan_desc_cleanup(mdev); | 711 | msgdma_chan_desc_cleanup(mdev); |
705 | } | 712 | } |
706 | 713 | ||
707 | spin_unlock(&mdev->lock); | 714 | spin_unlock_irqrestore(&mdev->lock, flags); |
708 | } | 715 | } |
709 | 716 | ||
710 | /** | 717 | /** |
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 3879f80a4815..a7ea20e7b8e9 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c | |||
@@ -1143,11 +1143,24 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
1143 | struct edma_desc *edesc; | 1143 | struct edma_desc *edesc; |
1144 | struct device *dev = chan->device->dev; | 1144 | struct device *dev = chan->device->dev; |
1145 | struct edma_chan *echan = to_edma_chan(chan); | 1145 | struct edma_chan *echan = to_edma_chan(chan); |
1146 | unsigned int width, pset_len; | 1146 | unsigned int width, pset_len, array_size; |
1147 | 1147 | ||
1148 | if (unlikely(!echan || !len)) | 1148 | if (unlikely(!echan || !len)) |
1149 | return NULL; | 1149 | return NULL; |
1150 | 1150 | ||
1151 | /* Align the array size (acnt block) with the transfer properties */ | ||
1152 | switch (__ffs((src | dest | len))) { | ||
1153 | case 0: | ||
1154 | array_size = SZ_32K - 1; | ||
1155 | break; | ||
1156 | case 1: | ||
1157 | array_size = SZ_32K - 2; | ||
1158 | break; | ||
1159 | default: | ||
1160 | array_size = SZ_32K - 4; | ||
1161 | break; | ||
1162 | } | ||
1163 | |||
1151 | if (len < SZ_64K) { | 1164 | if (len < SZ_64K) { |
1152 | /* | 1165 | /* |
1153 | * Transfer size less than 64K can be handled with one paRAM | 1166 | * Transfer size less than 64K can be handled with one paRAM |
@@ -1169,7 +1182,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
1169 | * When the full_length is multibple of 32767 one slot can be | 1182 | * When the full_length is multibple of 32767 one slot can be |
1170 | * used to complete the transfer. | 1183 | * used to complete the transfer. |
1171 | */ | 1184 | */ |
1172 | width = SZ_32K - 1; | 1185 | width = array_size; |
1173 | pset_len = rounddown(len, width); | 1186 | pset_len = rounddown(len, width); |
1174 | /* One slot is enough for lengths multiple of (SZ_32K -1) */ | 1187 | /* One slot is enough for lengths multiple of (SZ_32K -1) */ |
1175 | if (unlikely(pset_len == len)) | 1188 | if (unlikely(pset_len == len)) |
@@ -1217,7 +1230,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( | |||
1217 | } | 1230 | } |
1218 | dest += pset_len; | 1231 | dest += pset_len; |
1219 | src += pset_len; | 1232 | src += pset_len; |
1220 | pset_len = width = len % (SZ_32K - 1); | 1233 | pset_len = width = len % array_size; |
1221 | 1234 | ||
1222 | ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, | 1235 | ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, |
1223 | width, pset_len, DMA_MEM_TO_MEM); | 1236 | width, pset_len, DMA_MEM_TO_MEM); |
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 2f65a8fde21d..f1d04b70ee67 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c | |||
@@ -262,13 +262,14 @@ static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, | |||
262 | mutex_lock(&xbar->mutex); | 262 | mutex_lock(&xbar->mutex); |
263 | map->xbar_out = find_first_zero_bit(xbar->dma_inuse, | 263 | map->xbar_out = find_first_zero_bit(xbar->dma_inuse, |
264 | xbar->dma_requests); | 264 | xbar->dma_requests); |
265 | mutex_unlock(&xbar->mutex); | ||
266 | if (map->xbar_out == xbar->dma_requests) { | 265 | if (map->xbar_out == xbar->dma_requests) { |
266 | mutex_unlock(&xbar->mutex); | ||
267 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); | 267 | dev_err(&pdev->dev, "Run out of free DMA requests\n"); |
268 | kfree(map); | 268 | kfree(map); |
269 | return ERR_PTR(-ENOMEM); | 269 | return ERR_PTR(-ENOMEM); |
270 | } | 270 | } |
271 | set_bit(map->xbar_out, xbar->dma_inuse); | 271 | set_bit(map->xbar_out, xbar->dma_inuse); |
272 | mutex_unlock(&xbar->mutex); | ||
272 | 273 | ||
273 | map->xbar_in = (u16)dma_spec->args[0]; | 274 | map->xbar_in = (u16)dma_spec->args[0]; |
274 | 275 | ||
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c index 1cb2d1c070c3..a94601d5939e 100644 --- a/drivers/firmware/efi/libstub/arm-stub.c +++ b/drivers/firmware/efi/libstub/arm-stub.c | |||
@@ -238,7 +238,8 @@ unsigned long efi_entry(void *handle, efi_system_table_t *sys_table, | |||
238 | 238 | ||
239 | efi_random_get_seed(sys_table); | 239 | efi_random_get_seed(sys_table); |
240 | 240 | ||
241 | if (!nokaslr()) { | 241 | /* hibernation expects the runtime regions to stay in the same place */ |
242 | if (!IS_ENABLED(CONFIG_HIBERNATION) && !nokaslr()) { | ||
242 | /* | 243 | /* |
243 | * Randomize the base of the UEFI runtime services region. | 244 | * Randomize the base of the UEFI runtime services region. |
244 | * Preserve the 2 MB alignment of the region by taking a | 245 | * Preserve the 2 MB alignment of the region by taking a |
diff --git a/drivers/firmware/efi/test/efi_test.c b/drivers/firmware/efi/test/efi_test.c index 08129b7b80ab..41c48a1e8baa 100644 --- a/drivers/firmware/efi/test/efi_test.c +++ b/drivers/firmware/efi/test/efi_test.c | |||
@@ -593,6 +593,9 @@ static long efi_runtime_query_capsulecaps(unsigned long arg) | |||
593 | if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps))) | 593 | if (copy_from_user(&qcaps, qcaps_user, sizeof(qcaps))) |
594 | return -EFAULT; | 594 | return -EFAULT; |
595 | 595 | ||
596 | if (qcaps.capsule_count == ULONG_MAX) | ||
597 | return -EINVAL; | ||
598 | |||
596 | capsules = kcalloc(qcaps.capsule_count + 1, | 599 | capsules = kcalloc(qcaps.capsule_count + 1, |
597 | sizeof(efi_capsule_header_t), GFP_KERNEL); | 600 | sizeof(efi_capsule_header_t), GFP_KERNEL); |
598 | if (!capsules) | 601 | if (!capsules) |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index 3388d54ba114..3f80f167ed56 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -453,7 +453,8 @@ config GPIO_TS4800 | |||
453 | config GPIO_THUNDERX | 453 | config GPIO_THUNDERX |
454 | tristate "Cavium ThunderX/OCTEON-TX GPIO" | 454 | tristate "Cavium ThunderX/OCTEON-TX GPIO" |
455 | depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) | 455 | depends on ARCH_THUNDER || (64BIT && COMPILE_TEST) |
456 | depends on PCI_MSI && IRQ_DOMAIN_HIERARCHY | 456 | depends on PCI_MSI |
457 | select IRQ_DOMAIN_HIERARCHY | ||
457 | select IRQ_FASTEOI_HIERARCHY_HANDLERS | 458 | select IRQ_FASTEOI_HIERARCHY_HANDLERS |
458 | help | 459 | help |
459 | Say yes here to support the on-chip GPIO lines on the ThunderX | 460 | Say yes here to support the on-chip GPIO lines on the ThunderX |
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index dbf869fb63ce..3233b72b6828 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c | |||
@@ -518,7 +518,13 @@ static int omap_gpio_irq_type(struct irq_data *d, unsigned type) | |||
518 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | 518 | if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
519 | irq_set_handler_locked(d, handle_level_irq); | 519 | irq_set_handler_locked(d, handle_level_irq); |
520 | else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) | 520 | else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) |
521 | irq_set_handler_locked(d, handle_edge_irq); | 521 | /* |
522 | * Edge IRQs are already cleared/acked in irq_handler and | ||
523 | * not need to be masked, as result handle_edge_irq() | ||
524 | * logic is excessed here and may cause lose of interrupts. | ||
525 | * So just use handle_simple_irq. | ||
526 | */ | ||
527 | irq_set_handler_locked(d, handle_simple_irq); | ||
522 | 528 | ||
523 | return 0; | 529 | return 0; |
524 | 530 | ||
@@ -678,7 +684,7 @@ static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) | |||
678 | static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) | 684 | static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) |
679 | { | 685 | { |
680 | void __iomem *isr_reg = NULL; | 686 | void __iomem *isr_reg = NULL; |
681 | u32 isr; | 687 | u32 enabled, isr, level_mask; |
682 | unsigned int bit; | 688 | unsigned int bit; |
683 | struct gpio_bank *bank = gpiobank; | 689 | struct gpio_bank *bank = gpiobank; |
684 | unsigned long wa_lock_flags; | 690 | unsigned long wa_lock_flags; |
@@ -691,23 +697,21 @@ static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) | |||
691 | pm_runtime_get_sync(bank->chip.parent); | 697 | pm_runtime_get_sync(bank->chip.parent); |
692 | 698 | ||
693 | while (1) { | 699 | while (1) { |
694 | u32 isr_saved, level_mask = 0; | ||
695 | u32 enabled; | ||
696 | |||
697 | raw_spin_lock_irqsave(&bank->lock, lock_flags); | 700 | raw_spin_lock_irqsave(&bank->lock, lock_flags); |
698 | 701 | ||
699 | enabled = omap_get_gpio_irqbank_mask(bank); | 702 | enabled = omap_get_gpio_irqbank_mask(bank); |
700 | isr_saved = isr = readl_relaxed(isr_reg) & enabled; | 703 | isr = readl_relaxed(isr_reg) & enabled; |
701 | 704 | ||
702 | if (bank->level_mask) | 705 | if (bank->level_mask) |
703 | level_mask = bank->level_mask & enabled; | 706 | level_mask = bank->level_mask & enabled; |
707 | else | ||
708 | level_mask = 0; | ||
704 | 709 | ||
705 | /* clear edge sensitive interrupts before handler(s) are | 710 | /* clear edge sensitive interrupts before handler(s) are |
706 | called so that we don't miss any interrupt occurred while | 711 | called so that we don't miss any interrupt occurred while |
707 | executing them */ | 712 | executing them */ |
708 | omap_disable_gpio_irqbank(bank, isr_saved & ~level_mask); | 713 | if (isr & ~level_mask) |
709 | omap_clear_gpio_irqbank(bank, isr_saved & ~level_mask); | 714 | omap_clear_gpio_irqbank(bank, isr & ~level_mask); |
710 | omap_enable_gpio_irqbank(bank, isr_saved & ~level_mask); | ||
711 | 715 | ||
712 | raw_spin_unlock_irqrestore(&bank->lock, lock_flags); | 716 | raw_spin_unlock_irqrestore(&bank->lock, lock_flags); |
713 | 717 | ||
@@ -1010,7 +1014,7 @@ static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
1010 | 1014 | ||
1011 | /*---------------------------------------------------------------------*/ | 1015 | /*---------------------------------------------------------------------*/ |
1012 | 1016 | ||
1013 | static void __init omap_gpio_show_rev(struct gpio_bank *bank) | 1017 | static void omap_gpio_show_rev(struct gpio_bank *bank) |
1014 | { | 1018 | { |
1015 | static bool called; | 1019 | static bool called; |
1016 | u32 rev; | 1020 | u32 rev; |
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index 4d2113530735..eb4528c87c0b 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c | |||
@@ -203,7 +203,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares, | |||
203 | 203 | ||
204 | if (pin <= 255) { | 204 | if (pin <= 255) { |
205 | char ev_name[5]; | 205 | char ev_name[5]; |
206 | sprintf(ev_name, "_%c%02X", | 206 | sprintf(ev_name, "_%c%02hhX", |
207 | agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', | 207 | agpio->triggering == ACPI_EDGE_SENSITIVE ? 'E' : 'L', |
208 | pin); | 208 | pin); |
209 | if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) | 209 | if (ACPI_SUCCESS(acpi_get_handle(handle, ev_name, &evt_handle))) |
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7ef6c28a34d9..bc746131987f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | |||
@@ -834,7 +834,7 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem) | |||
834 | placement.busy_placement = &placements; | 834 | placement.busy_placement = &placements; |
835 | placements.fpfn = 0; | 835 | placements.fpfn = 0; |
836 | placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; | 836 | placements.lpfn = adev->mc.gart_size >> PAGE_SHIFT; |
837 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | 837 | placements.flags = bo->mem.placement | TTM_PL_FLAG_TT; |
838 | 838 | ||
839 | r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); | 839 | r = ttm_bo_mem_space(bo, &placement, &tmp, true, false); |
840 | if (unlikely(r)) | 840 | if (unlikely(r)) |
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 31db356476f8..430a6b4dfac9 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | |||
@@ -225,11 +225,7 @@ static int uvd_v6_0_suspend(void *handle) | |||
225 | if (r) | 225 | if (r) |
226 | return r; | 226 | return r; |
227 | 227 | ||
228 | /* Skip this for APU for now */ | 228 | return amdgpu_uvd_suspend(adev); |
229 | if (!(adev->flags & AMD_IS_APU)) | ||
230 | r = amdgpu_uvd_suspend(adev); | ||
231 | |||
232 | return r; | ||
233 | } | 229 | } |
234 | 230 | ||
235 | static int uvd_v6_0_resume(void *handle) | 231 | static int uvd_v6_0_resume(void *handle) |
@@ -237,12 +233,10 @@ static int uvd_v6_0_resume(void *handle) | |||
237 | int r; | 233 | int r; |
238 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; | 234 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
239 | 235 | ||
240 | /* Skip this for APU for now */ | 236 | r = amdgpu_uvd_resume(adev); |
241 | if (!(adev->flags & AMD_IS_APU)) { | 237 | if (r) |
242 | r = amdgpu_uvd_resume(adev); | 238 | return r; |
243 | if (r) | 239 | |
244 | return r; | ||
245 | } | ||
246 | return uvd_v6_0_hw_init(adev); | 240 | return uvd_v6_0_hw_init(adev); |
247 | } | 241 | } |
248 | 242 | ||
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index c2743233ba10..b526f49be65d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | |||
@@ -830,7 +830,7 @@ uint32_t smu7_get_xclk(struct pp_hwmgr *hwmgr) | |||
830 | { | 830 | { |
831 | uint32_t reference_clock, tmp; | 831 | uint32_t reference_clock, tmp; |
832 | struct cgs_display_info info = {0}; | 832 | struct cgs_display_info info = {0}; |
833 | struct cgs_mode_info mode_info; | 833 | struct cgs_mode_info mode_info = {0}; |
834 | 834 | ||
835 | info.mode_info = &mode_info; | 835 | info.mode_info = &mode_info; |
836 | 836 | ||
@@ -3948,10 +3948,9 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) | |||
3948 | uint32_t ref_clock; | 3948 | uint32_t ref_clock; |
3949 | uint32_t refresh_rate = 0; | 3949 | uint32_t refresh_rate = 0; |
3950 | struct cgs_display_info info = {0}; | 3950 | struct cgs_display_info info = {0}; |
3951 | struct cgs_mode_info mode_info; | 3951 | struct cgs_mode_info mode_info = {0}; |
3952 | 3952 | ||
3953 | info.mode_info = &mode_info; | 3953 | info.mode_info = &mode_info; |
3954 | |||
3955 | cgs_get_active_displays_info(hwmgr->device, &info); | 3954 | cgs_get_active_displays_info(hwmgr->device, &info); |
3956 | num_active_displays = info.display_count; | 3955 | num_active_displays = info.display_count; |
3957 | 3956 | ||
@@ -3967,6 +3966,7 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr) | |||
3967 | frame_time_in_us = 1000000 / refresh_rate; | 3966 | frame_time_in_us = 1000000 / refresh_rate; |
3968 | 3967 | ||
3969 | pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; | 3968 | pre_vbi_time_in_us = frame_time_in_us - 200 - mode_info.vblank_time_us; |
3969 | |||
3970 | data->frame_time_x2 = frame_time_in_us * 2 / 100; | 3970 | data->frame_time_x2 = frame_time_in_us * 2 / 100; |
3971 | 3971 | ||
3972 | display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); | 3972 | display_gap2 = pre_vbi_time_in_us * (ref_clock / 100); |
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 97c94f9683fa..38cea6fb25a8 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c | |||
@@ -205,32 +205,17 @@ void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, | |||
205 | struct amd_sched_entity *entity) | 205 | struct amd_sched_entity *entity) |
206 | { | 206 | { |
207 | struct amd_sched_rq *rq = entity->rq; | 207 | struct amd_sched_rq *rq = entity->rq; |
208 | int r; | ||
209 | 208 | ||
210 | if (!amd_sched_entity_is_initialized(sched, entity)) | 209 | if (!amd_sched_entity_is_initialized(sched, entity)) |
211 | return; | 210 | return; |
211 | |||
212 | /** | 212 | /** |
213 | * The client will not queue more IBs during this fini, consume existing | 213 | * The client will not queue more IBs during this fini, consume existing |
214 | * queued IBs or discard them on SIGKILL | 214 | * queued IBs |
215 | */ | 215 | */ |
216 | if ((current->flags & PF_SIGNALED) && current->exit_code == SIGKILL) | 216 | wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); |
217 | r = -ERESTARTSYS; | ||
218 | else | ||
219 | r = wait_event_killable(sched->job_scheduled, | ||
220 | amd_sched_entity_is_idle(entity)); | ||
221 | amd_sched_rq_remove_entity(rq, entity); | ||
222 | if (r) { | ||
223 | struct amd_sched_job *job; | ||
224 | 217 | ||
225 | /* Park the kernel for a moment to make sure it isn't processing | 218 | amd_sched_rq_remove_entity(rq, entity); |
226 | * our enity. | ||
227 | */ | ||
228 | kthread_park(sched->thread); | ||
229 | kthread_unpark(sched->thread); | ||
230 | while (kfifo_out(&entity->job_queue, &job, sizeof(job))) | ||
231 | sched->ops->free_job(job); | ||
232 | |||
233 | } | ||
234 | kfifo_free(&entity->job_queue); | 219 | kfifo_free(&entity->job_queue); |
235 | } | 220 | } |
236 | 221 | ||
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 4e53aae9a1fb..0028591f3f95 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c | |||
@@ -2960,6 +2960,7 @@ out: | |||
2960 | drm_modeset_backoff(&ctx); | 2960 | drm_modeset_backoff(&ctx); |
2961 | } | 2961 | } |
2962 | 2962 | ||
2963 | drm_atomic_state_put(state); | ||
2963 | drm_modeset_drop_locks(&ctx); | 2964 | drm_modeset_drop_locks(&ctx); |
2964 | drm_modeset_acquire_fini(&ctx); | 2965 | drm_modeset_acquire_fini(&ctx); |
2965 | 2966 | ||
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index e651a58c18cf..82b72425a42f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c | |||
@@ -168,11 +168,13 @@ static struct drm_driver exynos_drm_driver = { | |||
168 | static int exynos_drm_suspend(struct device *dev) | 168 | static int exynos_drm_suspend(struct device *dev) |
169 | { | 169 | { |
170 | struct drm_device *drm_dev = dev_get_drvdata(dev); | 170 | struct drm_device *drm_dev = dev_get_drvdata(dev); |
171 | struct exynos_drm_private *private = drm_dev->dev_private; | 171 | struct exynos_drm_private *private; |
172 | 172 | ||
173 | if (pm_runtime_suspended(dev) || !drm_dev) | 173 | if (pm_runtime_suspended(dev) || !drm_dev) |
174 | return 0; | 174 | return 0; |
175 | 175 | ||
176 | private = drm_dev->dev_private; | ||
177 | |||
176 | drm_kms_helper_poll_disable(drm_dev); | 178 | drm_kms_helper_poll_disable(drm_dev); |
177 | exynos_drm_fbdev_suspend(drm_dev); | 179 | exynos_drm_fbdev_suspend(drm_dev); |
178 | private->suspend_state = drm_atomic_helper_suspend(drm_dev); | 180 | private->suspend_state = drm_atomic_helper_suspend(drm_dev); |
@@ -188,11 +190,12 @@ static int exynos_drm_suspend(struct device *dev) | |||
188 | static int exynos_drm_resume(struct device *dev) | 190 | static int exynos_drm_resume(struct device *dev) |
189 | { | 191 | { |
190 | struct drm_device *drm_dev = dev_get_drvdata(dev); | 192 | struct drm_device *drm_dev = dev_get_drvdata(dev); |
191 | struct exynos_drm_private *private = drm_dev->dev_private; | 193 | struct exynos_drm_private *private; |
192 | 194 | ||
193 | if (pm_runtime_suspended(dev) || !drm_dev) | 195 | if (pm_runtime_suspended(dev) || !drm_dev) |
194 | return 0; | 196 | return 0; |
195 | 197 | ||
198 | private = drm_dev->dev_private; | ||
196 | drm_atomic_helper_resume(drm_dev, private->suspend_state); | 199 | drm_atomic_helper_resume(drm_dev, private->suspend_state); |
197 | exynos_drm_fbdev_resume(drm_dev); | 200 | exynos_drm_fbdev_resume(drm_dev); |
198 | drm_kms_helper_poll_enable(drm_dev); | 201 | drm_kms_helper_poll_enable(drm_dev); |
@@ -427,6 +430,7 @@ static void exynos_drm_unbind(struct device *dev) | |||
427 | 430 | ||
428 | kfree(drm->dev_private); | 431 | kfree(drm->dev_private); |
429 | drm->dev_private = NULL; | 432 | drm->dev_private = NULL; |
433 | dev_set_drvdata(dev, NULL); | ||
430 | 434 | ||
431 | drm_dev_unref(drm); | 435 | drm_dev_unref(drm); |
432 | } | 436 | } |
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 21c36e256884..d4726a3358a4 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c | |||
@@ -2723,6 +2723,9 @@ static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) | |||
2723 | uint32_t per_ctx_start[CACHELINE_DWORDS] = {0}; | 2723 | uint32_t per_ctx_start[CACHELINE_DWORDS] = {0}; |
2724 | unsigned char *bb_start_sva; | 2724 | unsigned char *bb_start_sva; |
2725 | 2725 | ||
2726 | if (!wa_ctx->per_ctx.valid) | ||
2727 | return 0; | ||
2728 | |||
2726 | per_ctx_start[0] = 0x18800001; | 2729 | per_ctx_start[0] = 0x18800001; |
2727 | per_ctx_start[1] = wa_ctx->per_ctx.guest_gma; | 2730 | per_ctx_start[1] = wa_ctx->per_ctx.guest_gma; |
2728 | 2731 | ||
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index 91b4300f3b39..e5320b4eb698 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c | |||
@@ -701,8 +701,7 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id, | |||
701 | CACHELINE_BYTES; | 701 | CACHELINE_BYTES; |
702 | workload->wa_ctx.per_ctx.guest_gma = | 702 | workload->wa_ctx.per_ctx.guest_gma = |
703 | per_ctx & PER_CTX_ADDR_MASK; | 703 | per_ctx & PER_CTX_ADDR_MASK; |
704 | 704 | workload->wa_ctx.per_ctx.valid = per_ctx & 1; | |
705 | WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1)); | ||
706 | } | 705 | } |
707 | 706 | ||
708 | if (emulate_schedule_in) | 707 | if (emulate_schedule_in) |
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 2294466dd415..a5bed2e71b92 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c | |||
@@ -1429,18 +1429,7 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset, | |||
1429 | return 0; | 1429 | return 0; |
1430 | } | 1430 | } |
1431 | 1431 | ||
1432 | static int ring_timestamp_mmio_read(struct intel_vgpu *vgpu, | 1432 | static int mmio_read_from_hw(struct intel_vgpu *vgpu, |
1433 | unsigned int offset, void *p_data, unsigned int bytes) | ||
1434 | { | ||
1435 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | ||
1436 | |||
1437 | mmio_hw_access_pre(dev_priv); | ||
1438 | vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset)); | ||
1439 | mmio_hw_access_post(dev_priv); | ||
1440 | return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); | ||
1441 | } | ||
1442 | |||
1443 | static int instdone_mmio_read(struct intel_vgpu *vgpu, | ||
1444 | unsigned int offset, void *p_data, unsigned int bytes) | 1433 | unsigned int offset, void *p_data, unsigned int bytes) |
1445 | { | 1434 | { |
1446 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; | 1435 | struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; |
@@ -1589,6 +1578,8 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu, | |||
1589 | MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \ | 1578 | MMIO_F(prefix(BLT_RING_BASE), s, f, am, rm, d, r, w); \ |
1590 | MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \ | 1579 | MMIO_F(prefix(GEN6_BSD_RING_BASE), s, f, am, rm, d, r, w); \ |
1591 | MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \ | 1580 | MMIO_F(prefix(VEBOX_RING_BASE), s, f, am, rm, d, r, w); \ |
1581 | if (HAS_BSD2(dev_priv)) \ | ||
1582 | MMIO_F(prefix(GEN8_BSD2_RING_BASE), s, f, am, rm, d, r, w); \ | ||
1592 | } while (0) | 1583 | } while (0) |
1593 | 1584 | ||
1594 | #define MMIO_RING_D(prefix, d) \ | 1585 | #define MMIO_RING_D(prefix, d) \ |
@@ -1635,10 +1626,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
1635 | #undef RING_REG | 1626 | #undef RING_REG |
1636 | 1627 | ||
1637 | #define RING_REG(base) (base + 0x6c) | 1628 | #define RING_REG(base) (base + 0x6c) |
1638 | MMIO_RING_DFH(RING_REG, D_ALL, 0, instdone_mmio_read, NULL); | 1629 | MMIO_RING_DFH(RING_REG, D_ALL, 0, mmio_read_from_hw, NULL); |
1639 | MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_ALL, instdone_mmio_read, NULL); | ||
1640 | #undef RING_REG | 1630 | #undef RING_REG |
1641 | MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, instdone_mmio_read, NULL); | 1631 | MMIO_DH(GEN7_SC_INSTDONE, D_BDW_PLUS, mmio_read_from_hw, NULL); |
1642 | 1632 | ||
1643 | MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL); | 1633 | MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL); |
1644 | MMIO_GM_RDR(CCID, D_ALL, NULL, NULL); | 1634 | MMIO_GM_RDR(CCID, D_ALL, NULL, NULL); |
@@ -1648,7 +1638,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
1648 | MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1638 | MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1649 | MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1639 | MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1650 | MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1640 | MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL); |
1651 | MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL); | 1641 | MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, mmio_read_from_hw, NULL); |
1652 | MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL); | 1642 | MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL); |
1653 | 1643 | ||
1654 | /* RING MODE */ | 1644 | /* RING MODE */ |
@@ -1662,9 +1652,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) | |||
1662 | MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS, | 1652 | MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS, |
1663 | NULL, NULL); | 1653 | NULL, NULL); |
1664 | MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, | 1654 | MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, |
1665 | ring_timestamp_mmio_read, NULL); | 1655 | mmio_read_from_hw, NULL); |
1666 | MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, | 1656 | MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, |
1667 | ring_timestamp_mmio_read, NULL); | 1657 | mmio_read_from_hw, NULL); |
1668 | 1658 | ||
1669 | MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 1659 | MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
1670 | MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS, | 1660 | MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS, |
@@ -2411,9 +2401,6 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2411 | struct drm_i915_private *dev_priv = gvt->dev_priv; | 2401 | struct drm_i915_private *dev_priv = gvt->dev_priv; |
2412 | int ret; | 2402 | int ret; |
2413 | 2403 | ||
2414 | MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL, | ||
2415 | intel_vgpu_reg_imr_handler); | ||
2416 | |||
2417 | MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); | 2404 | MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); |
2418 | MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); | 2405 | MMIO_DH(GEN8_GT_IER(0), D_BDW_PLUS, NULL, intel_vgpu_reg_ier_handler); |
2419 | MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); | 2406 | MMIO_DH(GEN8_GT_IIR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_iir_handler); |
@@ -2476,68 +2463,34 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2476 | MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, | 2463 | MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, |
2477 | intel_vgpu_reg_master_irq_handler); | 2464 | intel_vgpu_reg_master_irq_handler); |
2478 | 2465 | ||
2479 | MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, | 2466 | MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, |
2480 | F_CMD_ACCESS, NULL, NULL); | 2467 | mmio_read_from_hw, NULL); |
2481 | MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2482 | |||
2483 | MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, | ||
2484 | NULL, NULL); | ||
2485 | MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS, | ||
2486 | F_CMD_ACCESS, NULL, NULL); | ||
2487 | MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); | ||
2488 | MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, | ||
2489 | NULL, NULL); | ||
2490 | MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS, | ||
2491 | F_CMD_ACCESS, NULL, NULL); | ||
2492 | MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS, | ||
2493 | F_CMD_ACCESS, NULL, NULL); | ||
2494 | MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, | ||
2495 | ring_mode_mmio_write); | ||
2496 | MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, | ||
2497 | F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | ||
2498 | MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, | ||
2499 | F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | ||
2500 | MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, | ||
2501 | ring_timestamp_mmio_read, NULL); | ||
2502 | |||
2503 | MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2504 | 2468 | ||
2505 | #define RING_REG(base) (base + 0xd0) | 2469 | #define RING_REG(base) (base + 0xd0) |
2506 | MMIO_RING_F(RING_REG, 4, F_RO, 0, | 2470 | MMIO_RING_F(RING_REG, 4, F_RO, 0, |
2507 | ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL, | 2471 | ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL, |
2508 | ring_reset_ctl_write); | 2472 | ring_reset_ctl_write); |
2509 | MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, | ||
2510 | ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL, | ||
2511 | ring_reset_ctl_write); | ||
2512 | #undef RING_REG | 2473 | #undef RING_REG |
2513 | 2474 | ||
2514 | #define RING_REG(base) (base + 0x230) | 2475 | #define RING_REG(base) (base + 0x230) |
2515 | MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write); | 2476 | MMIO_RING_DFH(RING_REG, D_BDW_PLUS, 0, NULL, elsp_mmio_write); |
2516 | MMIO_DH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, elsp_mmio_write); | ||
2517 | #undef RING_REG | 2477 | #undef RING_REG |
2518 | 2478 | ||
2519 | #define RING_REG(base) (base + 0x234) | 2479 | #define RING_REG(base) (base + 0x234) |
2520 | MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS, | 2480 | MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS, |
2521 | NULL, NULL); | 2481 | NULL, NULL); |
2522 | MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0, | ||
2523 | ~0LL, D_BDW_PLUS, NULL, NULL); | ||
2524 | #undef RING_REG | 2482 | #undef RING_REG |
2525 | 2483 | ||
2526 | #define RING_REG(base) (base + 0x244) | 2484 | #define RING_REG(base) (base + 0x244) |
2527 | MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); | 2485 | MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); |
2528 | MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, | ||
2529 | NULL, NULL); | ||
2530 | #undef RING_REG | 2486 | #undef RING_REG |
2531 | 2487 | ||
2532 | #define RING_REG(base) (base + 0x370) | 2488 | #define RING_REG(base) (base + 0x370) |
2533 | MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); | 2489 | MMIO_RING_F(RING_REG, 48, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); |
2534 | MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 48, F_RO, 0, ~0, D_BDW_PLUS, | ||
2535 | NULL, NULL); | ||
2536 | #undef RING_REG | 2490 | #undef RING_REG |
2537 | 2491 | ||
2538 | #define RING_REG(base) (base + 0x3a0) | 2492 | #define RING_REG(base) (base + 0x3a0) |
2539 | MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); | 2493 | MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); |
2540 | MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, NULL, NULL); | ||
2541 | #undef RING_REG | 2494 | #undef RING_REG |
2542 | 2495 | ||
2543 | MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS); | 2496 | MMIO_D(PIPEMISC(PIPE_A), D_BDW_PLUS); |
@@ -2557,11 +2510,9 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) | |||
2557 | 2510 | ||
2558 | #define RING_REG(base) (base + 0x270) | 2511 | #define RING_REG(base) (base + 0x270) |
2559 | MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); | 2512 | MMIO_RING_F(RING_REG, 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); |
2560 | MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); | ||
2561 | #undef RING_REG | 2513 | #undef RING_REG |
2562 | 2514 | ||
2563 | MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); | 2515 | MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); |
2564 | MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); | ||
2565 | 2516 | ||
2566 | MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); | 2517 | MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); |
2567 | 2518 | ||
@@ -2849,7 +2800,6 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) | |||
2849 | MMIO_D(0x65f08, D_SKL | D_KBL); | 2800 | MMIO_D(0x65f08, D_SKL | D_KBL); |
2850 | MMIO_D(0x320f0, D_SKL | D_KBL); | 2801 | MMIO_D(0x320f0, D_SKL | D_KBL); |
2851 | 2802 | ||
2852 | MMIO_DFH(_REG_VCS2_EXCC, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); | ||
2853 | MMIO_D(0x70034, D_SKL_PLUS); | 2803 | MMIO_D(0x70034, D_SKL_PLUS); |
2854 | MMIO_D(0x71034, D_SKL_PLUS); | 2804 | MMIO_D(0x71034, D_SKL_PLUS); |
2855 | MMIO_D(0x72034, D_SKL_PLUS); | 2805 | MMIO_D(0x72034, D_SKL_PLUS); |
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index fbd023a16f18..7d01c77a0f7a 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h | |||
@@ -54,9 +54,6 @@ | |||
54 | 54 | ||
55 | #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) | 55 | #define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B) |
56 | 56 | ||
57 | #define _REG_VECS_EXCC 0x1A028 | ||
58 | #define _REG_VCS2_EXCC 0x1c028 | ||
59 | |||
60 | #define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100) | 57 | #define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100) |
61 | #define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100) | 58 | #define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100) |
62 | 59 | ||
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 436377da41ba..03532dfc0cd5 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c | |||
@@ -308,20 +308,8 @@ static int tbs_sched_init_vgpu(struct intel_vgpu *vgpu) | |||
308 | 308 | ||
309 | static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) | 309 | static void tbs_sched_clean_vgpu(struct intel_vgpu *vgpu) |
310 | { | 310 | { |
311 | struct intel_gvt_workload_scheduler *scheduler = &vgpu->gvt->scheduler; | ||
312 | int ring_id; | ||
313 | |||
314 | kfree(vgpu->sched_data); | 311 | kfree(vgpu->sched_data); |
315 | vgpu->sched_data = NULL; | 312 | vgpu->sched_data = NULL; |
316 | |||
317 | spin_lock_bh(&scheduler->mmio_context_lock); | ||
318 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { | ||
319 | if (scheduler->engine_owner[ring_id] == vgpu) { | ||
320 | intel_gvt_switch_mmio(vgpu, NULL, ring_id); | ||
321 | scheduler->engine_owner[ring_id] = NULL; | ||
322 | } | ||
323 | } | ||
324 | spin_unlock_bh(&scheduler->mmio_context_lock); | ||
325 | } | 313 | } |
326 | 314 | ||
327 | static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) | 315 | static void tbs_sched_start_schedule(struct intel_vgpu *vgpu) |
@@ -388,6 +376,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
388 | { | 376 | { |
389 | struct intel_gvt_workload_scheduler *scheduler = | 377 | struct intel_gvt_workload_scheduler *scheduler = |
390 | &vgpu->gvt->scheduler; | 378 | &vgpu->gvt->scheduler; |
379 | int ring_id; | ||
391 | 380 | ||
392 | gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); | 381 | gvt_dbg_core("vgpu%d: stop schedule\n", vgpu->id); |
393 | 382 | ||
@@ -401,4 +390,13 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) | |||
401 | scheduler->need_reschedule = true; | 390 | scheduler->need_reschedule = true; |
402 | scheduler->current_vgpu = NULL; | 391 | scheduler->current_vgpu = NULL; |
403 | } | 392 | } |
393 | |||
394 | spin_lock_bh(&scheduler->mmio_context_lock); | ||
395 | for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { | ||
396 | if (scheduler->engine_owner[ring_id] == vgpu) { | ||
397 | intel_gvt_switch_mmio(vgpu, NULL, ring_id); | ||
398 | scheduler->engine_owner[ring_id] = NULL; | ||
399 | } | ||
400 | } | ||
401 | spin_unlock_bh(&scheduler->mmio_context_lock); | ||
404 | } | 402 | } |
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 0d431a968a32..93a49eb0209e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h | |||
@@ -68,6 +68,7 @@ struct shadow_indirect_ctx { | |||
68 | struct shadow_per_ctx { | 68 | struct shadow_per_ctx { |
69 | unsigned long guest_gma; | 69 | unsigned long guest_gma; |
70 | unsigned long shadow_gma; | 70 | unsigned long shadow_gma; |
71 | unsigned valid; | ||
71 | }; | 72 | }; |
72 | 73 | ||
73 | struct intel_shadow_wa_ctx { | 74 | struct intel_shadow_wa_ctx { |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 19404c96eeb1..32e857dc507c 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2657,6 +2657,9 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj, | |||
2657 | if (READ_ONCE(obj->mm.pages)) | 2657 | if (READ_ONCE(obj->mm.pages)) |
2658 | return -ENODEV; | 2658 | return -ENODEV; |
2659 | 2659 | ||
2660 | if (obj->mm.madv != I915_MADV_WILLNEED) | ||
2661 | return -EFAULT; | ||
2662 | |||
2660 | /* Before the pages are instantiated the object is treated as being | 2663 | /* Before the pages are instantiated the object is treated as being |
2661 | * in the CPU domain. The pages will be clflushed as required before | 2664 | * in the CPU domain. The pages will be clflushed as required before |
2662 | * use, and we can freely write into the pages directly. If userspace | 2665 | * use, and we can freely write into the pages directly. If userspace |
@@ -3013,10 +3016,15 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv) | |||
3013 | 3016 | ||
3014 | static void nop_submit_request(struct drm_i915_gem_request *request) | 3017 | static void nop_submit_request(struct drm_i915_gem_request *request) |
3015 | { | 3018 | { |
3019 | unsigned long flags; | ||
3020 | |||
3016 | GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); | 3021 | GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error)); |
3017 | dma_fence_set_error(&request->fence, -EIO); | 3022 | dma_fence_set_error(&request->fence, -EIO); |
3018 | i915_gem_request_submit(request); | 3023 | |
3024 | spin_lock_irqsave(&request->engine->timeline->lock, flags); | ||
3025 | __i915_gem_request_submit(request); | ||
3019 | intel_engine_init_global_seqno(request->engine, request->global_seqno); | 3026 | intel_engine_init_global_seqno(request->engine, request->global_seqno); |
3027 | spin_unlock_irqrestore(&request->engine->timeline->lock, flags); | ||
3020 | } | 3028 | } |
3021 | 3029 | ||
3022 | static void engine_set_wedged(struct intel_engine_cs *engine) | 3030 | static void engine_set_wedged(struct intel_engine_cs *engine) |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 4df039ef2ce3..e161d383b526 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -33,21 +33,20 @@ | |||
33 | #include "intel_drv.h" | 33 | #include "intel_drv.h" |
34 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
35 | 35 | ||
36 | static bool ggtt_is_idle(struct drm_i915_private *dev_priv) | 36 | static bool ggtt_is_idle(struct drm_i915_private *i915) |
37 | { | 37 | { |
38 | struct i915_ggtt *ggtt = &dev_priv->ggtt; | 38 | struct intel_engine_cs *engine; |
39 | struct intel_engine_cs *engine; | 39 | enum intel_engine_id id; |
40 | enum intel_engine_id id; | ||
41 | 40 | ||
42 | for_each_engine(engine, dev_priv, id) { | 41 | if (i915->gt.active_requests) |
43 | struct intel_timeline *tl; | 42 | return false; |
44 | 43 | ||
45 | tl = &ggtt->base.timeline.engine[engine->id]; | 44 | for_each_engine(engine, i915, id) { |
46 | if (i915_gem_active_isset(&tl->last_request)) | 45 | if (engine->last_retired_context != i915->kernel_context) |
47 | return false; | 46 | return false; |
48 | } | 47 | } |
49 | 48 | ||
50 | return true; | 49 | return true; |
51 | } | 50 | } |
52 | 51 | ||
53 | static int ggtt_flush(struct drm_i915_private *i915) | 52 | static int ggtt_flush(struct drm_i915_private *i915) |
@@ -157,7 +156,8 @@ i915_gem_evict_something(struct i915_address_space *vm, | |||
157 | min_size, alignment, cache_level, | 156 | min_size, alignment, cache_level, |
158 | start, end, mode); | 157 | start, end, mode); |
159 | 158 | ||
160 | /* Retire before we search the active list. Although we have | 159 | /* |
160 | * Retire before we search the active list. Although we have | ||
161 | * reasonable accuracy in our retirement lists, we may have | 161 | * reasonable accuracy in our retirement lists, we may have |
162 | * a stray pin (preventing eviction) that can only be resolved by | 162 | * a stray pin (preventing eviction) that can only be resolved by |
163 | * retiring. | 163 | * retiring. |
@@ -182,7 +182,8 @@ search_again: | |||
182 | BUG_ON(ret); | 182 | BUG_ON(ret); |
183 | } | 183 | } |
184 | 184 | ||
185 | /* Can we unpin some objects such as idle hw contents, | 185 | /* |
186 | * Can we unpin some objects such as idle hw contents, | ||
186 | * or pending flips? But since only the GGTT has global entries | 187 | * or pending flips? But since only the GGTT has global entries |
187 | * such as scanouts, rinbuffers and contexts, we can skip the | 188 | * such as scanouts, rinbuffers and contexts, we can skip the |
188 | * purge when inspecting per-process local address spaces. | 189 | * purge when inspecting per-process local address spaces. |
@@ -190,19 +191,33 @@ search_again: | |||
190 | if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) | 191 | if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) |
191 | return -ENOSPC; | 192 | return -ENOSPC; |
192 | 193 | ||
193 | if (ggtt_is_idle(dev_priv)) { | 194 | /* |
194 | /* If we still have pending pageflip completions, drop | 195 | * Not everything in the GGTT is tracked via VMA using |
195 | * back to userspace to give our workqueues time to | 196 | * i915_vma_move_to_active(), otherwise we could evict as required |
196 | * acquire our locks and unpin the old scanouts. | 197 | * with minimal stalling. Instead we are forced to idle the GPU and |
197 | */ | 198 | * explicitly retire outstanding requests which will then remove |
198 | return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; | 199 | * the pinning for active objects such as contexts and ring, |
199 | } | 200 | * enabling us to evict them on the next iteration. |
201 | * | ||
202 | * To ensure that all user contexts are evictable, we perform | ||
203 | * a switch to the perma-pinned kernel context. This all also gives | ||
204 | * us a termination condition, when the last retired context is | ||
205 | * the kernel's there is no more we can evict. | ||
206 | */ | ||
207 | if (!ggtt_is_idle(dev_priv)) { | ||
208 | ret = ggtt_flush(dev_priv); | ||
209 | if (ret) | ||
210 | return ret; | ||
200 | 211 | ||
201 | ret = ggtt_flush(dev_priv); | 212 | goto search_again; |
202 | if (ret) | 213 | } |
203 | return ret; | ||
204 | 214 | ||
205 | goto search_again; | 215 | /* |
216 | * If we still have pending pageflip completions, drop | ||
217 | * back to userspace to give our workqueues time to | ||
218 | * acquire our locks and unpin the old scanouts. | ||
219 | */ | ||
220 | return intel_has_pending_fb_unpin(dev_priv) ? -EAGAIN : -ENOSPC; | ||
206 | 221 | ||
207 | found: | 222 | found: |
208 | /* drm_mm doesn't allow any other other operations while | 223 | /* drm_mm doesn't allow any other other operations while |
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 94185d610673..370b9d248fed 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c | |||
@@ -2537,6 +2537,10 @@ static const struct file_operations fops = { | |||
2537 | .poll = i915_perf_poll, | 2537 | .poll = i915_perf_poll, |
2538 | .read = i915_perf_read, | 2538 | .read = i915_perf_read, |
2539 | .unlocked_ioctl = i915_perf_ioctl, | 2539 | .unlocked_ioctl = i915_perf_ioctl, |
2540 | /* Our ioctl have no arguments, so it's safe to use the same function | ||
2541 | * to handle 32bits compatibility. | ||
2542 | */ | ||
2543 | .compat_ioctl = i915_perf_ioctl, | ||
2540 | }; | 2544 | }; |
2541 | 2545 | ||
2542 | 2546 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ed7cd9ee2c2a..c9bcc6c45012 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -6998,6 +6998,7 @@ enum { | |||
6998 | */ | 6998 | */ |
6999 | #define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) | 6999 | #define L3_GENERAL_PRIO_CREDITS(x) (((x) >> 1) << 19) |
7000 | #define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) | 7000 | #define L3_HIGH_PRIO_CREDITS(x) (((x) >> 1) << 14) |
7001 | #define L3_PRIO_CREDITS_MASK ((0x1f << 19) | (0x1f << 14)) | ||
7001 | 7002 | ||
7002 | #define GEN7_L3CNTLREG1 _MMIO(0xB01C) | 7003 | #define GEN7_L3CNTLREG1 _MMIO(0xB01C) |
7003 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C | 7004 | #define GEN7_WA_FOR_GEN7_L3_CONTROL 0x3C47FF8C |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 00c6aee0a9a1..5d4cd3d00564 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1240,7 +1240,7 @@ static void parse_ddi_ports(struct drm_i915_private *dev_priv, | |||
1240 | { | 1240 | { |
1241 | enum port port; | 1241 | enum port port; |
1242 | 1242 | ||
1243 | if (!HAS_DDI(dev_priv)) | 1243 | if (!HAS_DDI(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
1244 | return; | 1244 | return; |
1245 | 1245 | ||
1246 | if (!dev_priv->vbt.child_dev_num) | 1246 | if (!dev_priv->vbt.child_dev_num) |
diff --git a/drivers/gpu/drm/i915/intel_color.c b/drivers/gpu/drm/i915/intel_color.c index ff9ecd211abb..b8315bca852b 100644 --- a/drivers/gpu/drm/i915/intel_color.c +++ b/drivers/gpu/drm/i915/intel_color.c | |||
@@ -74,7 +74,7 @@ | |||
74 | #define I9XX_CSC_COEFF_1_0 \ | 74 | #define I9XX_CSC_COEFF_1_0 \ |
75 | ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) | 75 | ((7 << 12) | I9XX_CSC_COEFF_FP(CTM_COEFF_1_0, 8)) |
76 | 76 | ||
77 | static bool crtc_state_is_legacy(struct drm_crtc_state *state) | 77 | static bool crtc_state_is_legacy_gamma(struct drm_crtc_state *state) |
78 | { | 78 | { |
79 | return !state->degamma_lut && | 79 | return !state->degamma_lut && |
80 | !state->ctm && | 80 | !state->ctm && |
@@ -288,7 +288,7 @@ static void cherryview_load_csc_matrix(struct drm_crtc_state *state) | |||
288 | } | 288 | } |
289 | 289 | ||
290 | mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); | 290 | mode = (state->ctm ? CGM_PIPE_MODE_CSC : 0); |
291 | if (!crtc_state_is_legacy(state)) { | 291 | if (!crtc_state_is_legacy_gamma(state)) { |
292 | mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | | 292 | mode |= (state->degamma_lut ? CGM_PIPE_MODE_DEGAMMA : 0) | |
293 | (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); | 293 | (state->gamma_lut ? CGM_PIPE_MODE_GAMMA : 0); |
294 | } | 294 | } |
@@ -469,7 +469,7 @@ static void broadwell_load_luts(struct drm_crtc_state *state) | |||
469 | struct intel_crtc_state *intel_state = to_intel_crtc_state(state); | 469 | struct intel_crtc_state *intel_state = to_intel_crtc_state(state); |
470 | enum pipe pipe = to_intel_crtc(state->crtc)->pipe; | 470 | enum pipe pipe = to_intel_crtc(state->crtc)->pipe; |
471 | 471 | ||
472 | if (crtc_state_is_legacy(state)) { | 472 | if (crtc_state_is_legacy_gamma(state)) { |
473 | haswell_load_luts(state); | 473 | haswell_load_luts(state); |
474 | return; | 474 | return; |
475 | } | 475 | } |
@@ -529,7 +529,7 @@ static void glk_load_luts(struct drm_crtc_state *state) | |||
529 | 529 | ||
530 | glk_load_degamma_lut(state); | 530 | glk_load_degamma_lut(state); |
531 | 531 | ||
532 | if (crtc_state_is_legacy(state)) { | 532 | if (crtc_state_is_legacy_gamma(state)) { |
533 | haswell_load_luts(state); | 533 | haswell_load_luts(state); |
534 | return; | 534 | return; |
535 | } | 535 | } |
@@ -551,7 +551,7 @@ static void cherryview_load_luts(struct drm_crtc_state *state) | |||
551 | uint32_t i, lut_size; | 551 | uint32_t i, lut_size; |
552 | uint32_t word0, word1; | 552 | uint32_t word0, word1; |
553 | 553 | ||
554 | if (crtc_state_is_legacy(state)) { | 554 | if (crtc_state_is_legacy_gamma(state)) { |
555 | /* Turn off degamma/gamma on CGM block. */ | 555 | /* Turn off degamma/gamma on CGM block. */ |
556 | I915_WRITE(CGM_PIPE_MODE(pipe), | 556 | I915_WRITE(CGM_PIPE_MODE(pipe), |
557 | (state->ctm ? CGM_PIPE_MODE_CSC : 0)); | 557 | (state->ctm ? CGM_PIPE_MODE_CSC : 0)); |
@@ -632,12 +632,10 @@ int intel_color_check(struct drm_crtc *crtc, | |||
632 | return 0; | 632 | return 0; |
633 | 633 | ||
634 | /* | 634 | /* |
635 | * We also allow no degamma lut and a gamma lut at the legacy | 635 | * We also allow no degamma lut/ctm and a gamma lut at the legacy |
636 | * size (256 entries). | 636 | * size (256 entries). |
637 | */ | 637 | */ |
638 | if (!crtc_state->degamma_lut && | 638 | if (crtc_state_is_legacy_gamma(crtc_state)) |
639 | crtc_state->gamma_lut && | ||
640 | crtc_state->gamma_lut->length == LEGACY_LUT_LENGTH) | ||
641 | return 0; | 639 | return 0; |
642 | 640 | ||
643 | return -EINVAL; | 641 | return -EINVAL; |
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 476681d5940c..5e5fe03b638c 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c | |||
@@ -664,8 +664,8 @@ intel_ddi_get_buf_trans_fdi(struct drm_i915_private *dev_priv, | |||
664 | int *n_entries) | 664 | int *n_entries) |
665 | { | 665 | { |
666 | if (IS_BROADWELL(dev_priv)) { | 666 | if (IS_BROADWELL(dev_priv)) { |
667 | *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); | 667 | *n_entries = ARRAY_SIZE(bdw_ddi_translations_fdi); |
668 | return hsw_ddi_translations_fdi; | 668 | return bdw_ddi_translations_fdi; |
669 | } else if (IS_HASWELL(dev_priv)) { | 669 | } else if (IS_HASWELL(dev_priv)) { |
670 | *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); | 670 | *n_entries = ARRAY_SIZE(hsw_ddi_translations_fdi); |
671 | return hsw_ddi_translations_fdi; | 671 | return hsw_ddi_translations_fdi; |
@@ -2102,8 +2102,7 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder, | |||
2102 | * register writes. | 2102 | * register writes. |
2103 | */ | 2103 | */ |
2104 | val = I915_READ(DPCLKA_CFGCR0); | 2104 | val = I915_READ(DPCLKA_CFGCR0); |
2105 | val &= ~(DPCLKA_CFGCR0_DDI_CLK_OFF(port) | | 2105 | val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port); |
2106 | DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port)); | ||
2107 | I915_WRITE(DPCLKA_CFGCR0, val); | 2106 | I915_WRITE(DPCLKA_CFGCR0, val); |
2108 | } else if (IS_GEN9_BC(dev_priv)) { | 2107 | } else if (IS_GEN9_BC(dev_priv)) { |
2109 | /* DDI -> PLL mapping */ | 2108 | /* DDI -> PLL mapping */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 64f7b51ed97c..5c7828c52d12 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -10245,13 +10245,10 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
10245 | { | 10245 | { |
10246 | struct drm_i915_private *dev_priv = to_i915(dev); | 10246 | struct drm_i915_private *dev_priv = to_i915(dev); |
10247 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 10247 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
10248 | enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; | 10248 | enum transcoder cpu_transcoder; |
10249 | struct drm_display_mode *mode; | 10249 | struct drm_display_mode *mode; |
10250 | struct intel_crtc_state *pipe_config; | 10250 | struct intel_crtc_state *pipe_config; |
10251 | int htot = I915_READ(HTOTAL(cpu_transcoder)); | 10251 | u32 htot, hsync, vtot, vsync; |
10252 | int hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
10253 | int vtot = I915_READ(VTOTAL(cpu_transcoder)); | ||
10254 | int vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
10255 | enum pipe pipe = intel_crtc->pipe; | 10252 | enum pipe pipe = intel_crtc->pipe; |
10256 | 10253 | ||
10257 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | 10254 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); |
@@ -10279,6 +10276,13 @@ struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |||
10279 | i9xx_crtc_clock_get(intel_crtc, pipe_config); | 10276 | i9xx_crtc_clock_get(intel_crtc, pipe_config); |
10280 | 10277 | ||
10281 | mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; | 10278 | mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier; |
10279 | |||
10280 | cpu_transcoder = pipe_config->cpu_transcoder; | ||
10281 | htot = I915_READ(HTOTAL(cpu_transcoder)); | ||
10282 | hsync = I915_READ(HSYNC(cpu_transcoder)); | ||
10283 | vtot = I915_READ(VTOTAL(cpu_transcoder)); | ||
10284 | vsync = I915_READ(VSYNC(cpu_transcoder)); | ||
10285 | |||
10282 | mode->hdisplay = (htot & 0xffff) + 1; | 10286 | mode->hdisplay = (htot & 0xffff) + 1; |
10283 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; | 10287 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; |
10284 | mode->hsync_start = (hsync & 0xffff) + 1; | 10288 | mode->hsync_start = (hsync & 0xffff) + 1; |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 64134947c0aa..203198659ab2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -2307,8 +2307,8 @@ static void edp_panel_off(struct intel_dp *intel_dp) | |||
2307 | I915_WRITE(pp_ctrl_reg, pp); | 2307 | I915_WRITE(pp_ctrl_reg, pp); |
2308 | POSTING_READ(pp_ctrl_reg); | 2308 | POSTING_READ(pp_ctrl_reg); |
2309 | 2309 | ||
2310 | intel_dp->panel_power_off_time = ktime_get_boottime(); | ||
2311 | wait_panel_off(intel_dp); | 2310 | wait_panel_off(intel_dp); |
2311 | intel_dp->panel_power_off_time = ktime_get_boottime(); | ||
2312 | 2312 | ||
2313 | /* We got a reference when we enabled the VDD. */ | 2313 | /* We got a reference when we enabled the VDD. */ |
2314 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); | 2314 | intel_display_power_put(dev_priv, intel_dp->aux_power_domain); |
@@ -5273,7 +5273,7 @@ intel_dp_init_panel_power_sequencer(struct drm_device *dev, | |||
5273 | * seems sufficient to avoid this problem. | 5273 | * seems sufficient to avoid this problem. |
5274 | */ | 5274 | */ |
5275 | if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { | 5275 | if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) { |
5276 | vbt.t11_t12 = max_t(u16, vbt.t11_t12, 900 * 10); | 5276 | vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10); |
5277 | DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", | 5277 | DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n", |
5278 | vbt.t11_t12); | 5278 | vbt.t11_t12); |
5279 | } | 5279 | } |
diff --git a/drivers/gpu/drm/i915/intel_dpll_mgr.c b/drivers/gpu/drm/i915/intel_dpll_mgr.c index a2a3d93d67bd..df808a94c511 100644 --- a/drivers/gpu/drm/i915/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/intel_dpll_mgr.c | |||
@@ -1996,7 +1996,7 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv, | |||
1996 | 1996 | ||
1997 | /* 3. Configure DPLL_CFGCR0 */ | 1997 | /* 3. Configure DPLL_CFGCR0 */ |
1998 | /* Avoid touch CFGCR1 if HDMI mode is not enabled */ | 1998 | /* Avoid touch CFGCR1 if HDMI mode is not enabled */ |
1999 | if (pll->state.hw_state.cfgcr0 & DPLL_CTRL1_HDMI_MODE(pll->id)) { | 1999 | if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) { |
2000 | val = pll->state.hw_state.cfgcr1; | 2000 | val = pll->state.hw_state.cfgcr1; |
2001 | I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); | 2001 | I915_WRITE(CNL_DPLL_CFGCR1(pll->id), val); |
2002 | /* 4. Reab back to ensure writes completed */ | 2002 | /* 4. Reab back to ensure writes completed */ |
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index 9ab596941372..3c2d9cf22ed5 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
@@ -1048,9 +1048,12 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) | |||
1048 | } | 1048 | } |
1049 | 1049 | ||
1050 | /* WaProgramL3SqcReg1DefaultForPerf:bxt */ | 1050 | /* WaProgramL3SqcReg1DefaultForPerf:bxt */ |
1051 | if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) | 1051 | if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { |
1052 | I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) | | 1052 | u32 val = I915_READ(GEN8_L3SQCREG1); |
1053 | L3_HIGH_PRIO_CREDITS(2)); | 1053 | val &= ~L3_PRIO_CREDITS_MASK; |
1054 | val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2); | ||
1055 | I915_WRITE(GEN8_L3SQCREG1, val); | ||
1056 | } | ||
1054 | 1057 | ||
1055 | /* WaToEnableHwFixForPushConstHWBug:bxt */ | 1058 | /* WaToEnableHwFixForPushConstHWBug:bxt */ |
1056 | if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) | 1059 | if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index ed662937ec3c..0a09f8ff6aff 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -8245,14 +8245,17 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, | |||
8245 | int high_prio_credits) | 8245 | int high_prio_credits) |
8246 | { | 8246 | { |
8247 | u32 misccpctl; | 8247 | u32 misccpctl; |
8248 | u32 val; | ||
8248 | 8249 | ||
8249 | /* WaTempDisableDOPClkGating:bdw */ | 8250 | /* WaTempDisableDOPClkGating:bdw */ |
8250 | misccpctl = I915_READ(GEN7_MISCCPCTL); | 8251 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
8251 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | 8252 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); |
8252 | 8253 | ||
8253 | I915_WRITE(GEN8_L3SQCREG1, | 8254 | val = I915_READ(GEN8_L3SQCREG1); |
8254 | L3_GENERAL_PRIO_CREDITS(general_prio_credits) | | 8255 | val &= ~L3_PRIO_CREDITS_MASK; |
8255 | L3_HIGH_PRIO_CREDITS(high_prio_credits)); | 8256 | val |= L3_GENERAL_PRIO_CREDITS(general_prio_credits); |
8257 | val |= L3_HIGH_PRIO_CREDITS(high_prio_credits); | ||
8258 | I915_WRITE(GEN8_L3SQCREG1, val); | ||
8256 | 8259 | ||
8257 | /* | 8260 | /* |
8258 | * Wait at least 100 clocks before re-enabling clock gating. | 8261 | * Wait at least 100 clocks before re-enabling clock gating. |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index b3a087cb0860..49577eba8e7e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -368,7 +368,7 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv, | |||
368 | { | 368 | { |
369 | enum i915_power_well_id id = power_well->id; | 369 | enum i915_power_well_id id = power_well->id; |
370 | bool wait_fuses = power_well->hsw.has_fuses; | 370 | bool wait_fuses = power_well->hsw.has_fuses; |
371 | enum skl_power_gate pg; | 371 | enum skl_power_gate uninitialized_var(pg); |
372 | u32 val; | 372 | u32 val; |
373 | 373 | ||
374 | if (wait_fuses) { | 374 | if (wait_fuses) { |
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index dbb31a014419..deaf869374ea 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c | |||
@@ -248,7 +248,7 @@ disable_clks: | |||
248 | clk_disable_unprepare(ahb_clk); | 248 | clk_disable_unprepare(ahb_clk); |
249 | disable_gdsc: | 249 | disable_gdsc: |
250 | regulator_disable(gdsc_reg); | 250 | regulator_disable(gdsc_reg); |
251 | pm_runtime_put_autosuspend(dev); | 251 | pm_runtime_put_sync(dev); |
252 | put_clk: | 252 | put_clk: |
253 | clk_put(ahb_clk); | 253 | clk_put(ahb_clk); |
254 | put_gdsc: | 254 | put_gdsc: |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c index c2bdad88447e..824067d2d427 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c | |||
@@ -83,6 +83,8 @@ const struct mdp5_cfg_hw msm8x74v1_config = { | |||
83 | .caps = MDP_LM_CAP_WB }, | 83 | .caps = MDP_LM_CAP_WB }, |
84 | }, | 84 | }, |
85 | .nb_stages = 5, | 85 | .nb_stages = 5, |
86 | .max_width = 2048, | ||
87 | .max_height = 0xFFFF, | ||
86 | }, | 88 | }, |
87 | .dspp = { | 89 | .dspp = { |
88 | .count = 3, | 90 | .count = 3, |
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c index 6fcb58ab718c..440977677001 100644 --- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c | |||
@@ -804,8 +804,6 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, | |||
804 | 804 | ||
805 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); | 805 | spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags); |
806 | 806 | ||
807 | pm_runtime_put_autosuspend(&pdev->dev); | ||
808 | |||
809 | set_cursor: | 807 | set_cursor: |
810 | ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); | 808 | ret = mdp5_ctl_set_cursor(ctl, pipeline, 0, cursor_enable); |
811 | if (ret) { | 809 | if (ret) { |
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index f15821a0d900..ea5bb0e1632c 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c | |||
@@ -610,17 +610,6 @@ int msm_gem_sync_object(struct drm_gem_object *obj, | |||
610 | struct dma_fence *fence; | 610 | struct dma_fence *fence; |
611 | int i, ret; | 611 | int i, ret; |
612 | 612 | ||
613 | if (!exclusive) { | ||
614 | /* NOTE: _reserve_shared() must happen before _add_shared_fence(), | ||
615 | * which makes this a slightly strange place to call it. OTOH this | ||
616 | * is a convenient can-fail point to hook it in. (And similar to | ||
617 | * how etnaviv and nouveau handle this.) | ||
618 | */ | ||
619 | ret = reservation_object_reserve_shared(msm_obj->resv); | ||
620 | if (ret) | ||
621 | return ret; | ||
622 | } | ||
623 | |||
624 | fobj = reservation_object_get_list(msm_obj->resv); | 613 | fobj = reservation_object_get_list(msm_obj->resv); |
625 | if (!fobj || (fobj->shared_count == 0)) { | 614 | if (!fobj || (fobj->shared_count == 0)) { |
626 | fence = reservation_object_get_excl(msm_obj->resv); | 615 | fence = reservation_object_get_excl(msm_obj->resv); |
@@ -1045,10 +1034,10 @@ static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size, | |||
1045 | } | 1034 | } |
1046 | 1035 | ||
1047 | vaddr = msm_gem_get_vaddr(obj); | 1036 | vaddr = msm_gem_get_vaddr(obj); |
1048 | if (!vaddr) { | 1037 | if (IS_ERR(vaddr)) { |
1049 | msm_gem_put_iova(obj, aspace); | 1038 | msm_gem_put_iova(obj, aspace); |
1050 | drm_gem_object_unreference(obj); | 1039 | drm_gem_object_unreference(obj); |
1051 | return ERR_PTR(-ENOMEM); | 1040 | return ERR_CAST(vaddr); |
1052 | } | 1041 | } |
1053 | 1042 | ||
1054 | if (bo) | 1043 | if (bo) |
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 5d0a75d4b249..93535cac0676 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c | |||
@@ -221,7 +221,7 @@ fail: | |||
221 | return ret; | 221 | return ret; |
222 | } | 222 | } |
223 | 223 | ||
224 | static int submit_fence_sync(struct msm_gem_submit *submit) | 224 | static int submit_fence_sync(struct msm_gem_submit *submit, bool no_implicit) |
225 | { | 225 | { |
226 | int i, ret = 0; | 226 | int i, ret = 0; |
227 | 227 | ||
@@ -229,6 +229,20 @@ static int submit_fence_sync(struct msm_gem_submit *submit) | |||
229 | struct msm_gem_object *msm_obj = submit->bos[i].obj; | 229 | struct msm_gem_object *msm_obj = submit->bos[i].obj; |
230 | bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; | 230 | bool write = submit->bos[i].flags & MSM_SUBMIT_BO_WRITE; |
231 | 231 | ||
232 | if (!write) { | ||
233 | /* NOTE: _reserve_shared() must happen before | ||
234 | * _add_shared_fence(), which makes this a slightly | ||
235 | * strange place to call it. OTOH this is a | ||
236 | * convenient can-fail point to hook it in. | ||
237 | */ | ||
238 | ret = reservation_object_reserve_shared(msm_obj->resv); | ||
239 | if (ret) | ||
240 | return ret; | ||
241 | } | ||
242 | |||
243 | if (no_implicit) | ||
244 | continue; | ||
245 | |||
232 | ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); | 246 | ret = msm_gem_sync_object(&msm_obj->base, submit->gpu->fctx, write); |
233 | if (ret) | 247 | if (ret) |
234 | break; | 248 | break; |
@@ -451,11 +465,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, | |||
451 | if (ret) | 465 | if (ret) |
452 | goto out; | 466 | goto out; |
453 | 467 | ||
454 | if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) { | 468 | ret = submit_fence_sync(submit, !!(args->flags & MSM_SUBMIT_NO_IMPLICIT)); |
455 | ret = submit_fence_sync(submit); | 469 | if (ret) |
456 | if (ret) | 470 | goto out; |
457 | goto out; | ||
458 | } | ||
459 | 471 | ||
460 | ret = submit_pin_objects(submit); | 472 | ret = submit_pin_objects(submit); |
461 | if (ret) | 473 | if (ret) |
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index ffbff27600e0..6a887032c66a 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c | |||
@@ -718,7 +718,8 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) | |||
718 | msm_gem_put_iova(gpu->rb->bo, gpu->aspace); | 718 | msm_gem_put_iova(gpu->rb->bo, gpu->aspace); |
719 | msm_ringbuffer_destroy(gpu->rb); | 719 | msm_ringbuffer_destroy(gpu->rb); |
720 | } | 720 | } |
721 | if (gpu->aspace) { | 721 | |
722 | if (!IS_ERR_OR_NULL(gpu->aspace)) { | ||
722 | gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, | 723 | gpu->aspace->mmu->funcs->detach(gpu->aspace->mmu, |
723 | NULL, 0); | 724 | NULL, 0); |
724 | msm_gem_address_space_put(gpu->aspace); | 725 | msm_gem_address_space_put(gpu->aspace); |
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 0366b8092f97..ec56794ad039 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c | |||
@@ -111,10 +111,14 @@ static void rd_write(struct msm_rd_state *rd, const void *buf, int sz) | |||
111 | 111 | ||
112 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); | 112 | wait_event(rd->fifo_event, circ_space(&rd->fifo) > 0); |
113 | 113 | ||
114 | /* Note that smp_load_acquire() is not strictly required | ||
115 | * as CIRC_SPACE_TO_END() does not access the tail more | ||
116 | * than once. | ||
117 | */ | ||
114 | n = min(sz, circ_space_to_end(&rd->fifo)); | 118 | n = min(sz, circ_space_to_end(&rd->fifo)); |
115 | memcpy(fptr, ptr, n); | 119 | memcpy(fptr, ptr, n); |
116 | 120 | ||
117 | fifo->head = (fifo->head + n) & (BUF_SZ - 1); | 121 | smp_store_release(&fifo->head, (fifo->head + n) & (BUF_SZ - 1)); |
118 | sz -= n; | 122 | sz -= n; |
119 | ptr += n; | 123 | ptr += n; |
120 | 124 | ||
@@ -145,13 +149,17 @@ static ssize_t rd_read(struct file *file, char __user *buf, | |||
145 | if (ret) | 149 | if (ret) |
146 | goto out; | 150 | goto out; |
147 | 151 | ||
152 | /* Note that smp_load_acquire() is not strictly required | ||
153 | * as CIRC_CNT_TO_END() does not access the head more than | ||
154 | * once. | ||
155 | */ | ||
148 | n = min_t(int, sz, circ_count_to_end(&rd->fifo)); | 156 | n = min_t(int, sz, circ_count_to_end(&rd->fifo)); |
149 | if (copy_to_user(buf, fptr, n)) { | 157 | if (copy_to_user(buf, fptr, n)) { |
150 | ret = -EFAULT; | 158 | ret = -EFAULT; |
151 | goto out; | 159 | goto out; |
152 | } | 160 | } |
153 | 161 | ||
154 | fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); | 162 | smp_store_release(&fifo->tail, (fifo->tail + n) & (BUF_SZ - 1)); |
155 | *ppos += n; | 163 | *ppos += n; |
156 | 164 | ||
157 | wake_up_all(&rd->fifo_event); | 165 | wake_up_all(&rd->fifo_event); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index f7707849bb53..2b12d82aac15 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
@@ -223,7 +223,7 @@ void | |||
223 | nouveau_fbcon_accel_save_disable(struct drm_device *dev) | 223 | nouveau_fbcon_accel_save_disable(struct drm_device *dev) |
224 | { | 224 | { |
225 | struct nouveau_drm *drm = nouveau_drm(dev); | 225 | struct nouveau_drm *drm = nouveau_drm(dev); |
226 | if (drm->fbcon) { | 226 | if (drm->fbcon && drm->fbcon->helper.fbdev) { |
227 | drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; | 227 | drm->fbcon->saved_flags = drm->fbcon->helper.fbdev->flags; |
228 | drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | 228 | drm->fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; |
229 | } | 229 | } |
@@ -233,7 +233,7 @@ void | |||
233 | nouveau_fbcon_accel_restore(struct drm_device *dev) | 233 | nouveau_fbcon_accel_restore(struct drm_device *dev) |
234 | { | 234 | { |
235 | struct nouveau_drm *drm = nouveau_drm(dev); | 235 | struct nouveau_drm *drm = nouveau_drm(dev); |
236 | if (drm->fbcon) { | 236 | if (drm->fbcon && drm->fbcon->helper.fbdev) { |
237 | drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; | 237 | drm->fbcon->helper.fbdev->flags = drm->fbcon->saved_flags; |
238 | } | 238 | } |
239 | } | 239 | } |
@@ -245,7 +245,8 @@ nouveau_fbcon_accel_fini(struct drm_device *dev) | |||
245 | struct nouveau_fbdev *fbcon = drm->fbcon; | 245 | struct nouveau_fbdev *fbcon = drm->fbcon; |
246 | if (fbcon && drm->channel) { | 246 | if (fbcon && drm->channel) { |
247 | console_lock(); | 247 | console_lock(); |
248 | fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | 248 | if (fbcon->helper.fbdev) |
249 | fbcon->helper.fbdev->flags |= FBINFO_HWACCEL_DISABLED; | ||
249 | console_unlock(); | 250 | console_unlock(); |
250 | nouveau_channel_idle(drm->channel); | 251 | nouveau_channel_idle(drm->channel); |
251 | nvif_object_fini(&fbcon->twod); | 252 | nvif_object_fini(&fbcon->twod); |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 2dbf62a2ac41..e4751f92b342 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
@@ -3265,11 +3265,14 @@ nv50_mstm = { | |||
3265 | void | 3265 | void |
3266 | nv50_mstm_service(struct nv50_mstm *mstm) | 3266 | nv50_mstm_service(struct nv50_mstm *mstm) |
3267 | { | 3267 | { |
3268 | struct drm_dp_aux *aux = mstm->mgr.aux; | 3268 | struct drm_dp_aux *aux = mstm ? mstm->mgr.aux : NULL; |
3269 | bool handled = true; | 3269 | bool handled = true; |
3270 | int ret; | 3270 | int ret; |
3271 | u8 esi[8] = {}; | 3271 | u8 esi[8] = {}; |
3272 | 3272 | ||
3273 | if (!aux) | ||
3274 | return; | ||
3275 | |||
3273 | while (handled) { | 3276 | while (handled) { |
3274 | ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); | 3277 | ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT_ESI, esi, 8); |
3275 | if (ret != 8) { | 3278 | if (ret != 8) { |
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c index 8e2e24a74774..44e116f7880d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/bsp/g84.c | |||
@@ -39,5 +39,5 @@ int | |||
39 | g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) | 39 | g84_bsp_new(struct nvkm_device *device, int index, struct nvkm_engine **pengine) |
40 | { | 40 | { |
41 | return nvkm_xtensa_new_(&g84_bsp, device, index, | 41 | return nvkm_xtensa_new_(&g84_bsp, device, index, |
42 | true, 0x103000, pengine); | 42 | device->chipset != 0x92, 0x103000, pengine); |
43 | } | 43 | } |
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c index d06ad2c372bf..455da298227f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/base.c | |||
@@ -241,6 +241,8 @@ nvkm_vm_unmap_pgt(struct nvkm_vm *vm, int big, u32 fpde, u32 lpde) | |||
241 | mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); | 241 | mmu->func->map_pgt(vpgd->obj, pde, vpgt->mem); |
242 | } | 242 | } |
243 | 243 | ||
244 | mmu->func->flush(vm); | ||
245 | |||
244 | nvkm_memory_del(&pgt); | 246 | nvkm_memory_del(&pgt); |
245 | } | 247 | } |
246 | } | 248 | } |
diff --git a/drivers/gpu/ipu-v3/ipu-common.c b/drivers/gpu/ipu-v3/ipu-common.c index 6a573d21d3cc..658fa2d3e40c 100644 --- a/drivers/gpu/ipu-v3/ipu-common.c +++ b/drivers/gpu/ipu-v3/ipu-common.c | |||
@@ -405,6 +405,14 @@ int ipu_idmac_lock_enable(struct ipuv3_channel *channel, int num_bursts) | |||
405 | return -EINVAL; | 405 | return -EINVAL; |
406 | } | 406 | } |
407 | 407 | ||
408 | /* | ||
409 | * IPUv3EX / i.MX51 has a different register layout, and on IPUv3M / | ||
410 | * i.MX53 channel arbitration locking doesn't seem to work properly. | ||
411 | * Allow enabling the lock feature on IPUv3H / i.MX6 only. | ||
412 | */ | ||
413 | if (bursts && ipu->ipu_type != IPUV3H) | ||
414 | return -EINVAL; | ||
415 | |||
408 | for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { | 416 | for (i = 0; i < ARRAY_SIZE(idmac_lock_en_info); i++) { |
409 | if (channel->num == idmac_lock_en_info[i].chnum) | 417 | if (channel->num == idmac_lock_en_info[i].chnum) |
410 | break; | 418 | break; |
diff --git a/drivers/gpu/ipu-v3/ipu-pre.c b/drivers/gpu/ipu-v3/ipu-pre.c index c35f74c83065..c860a7997cb5 100644 --- a/drivers/gpu/ipu-v3/ipu-pre.c +++ b/drivers/gpu/ipu-v3/ipu-pre.c | |||
@@ -73,6 +73,14 @@ | |||
73 | #define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) | 73 | #define IPU_PRE_STORE_ENG_CTRL_WR_NUM_BYTES(v) ((v & 0x7) << 1) |
74 | #define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) | 74 | #define IPU_PRE_STORE_ENG_CTRL_OUTPUT_ACTIVE_BPP(v) ((v & 0x3) << 4) |
75 | 75 | ||
76 | #define IPU_PRE_STORE_ENG_STATUS 0x120 | ||
77 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_MASK 0xffff | ||
78 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_X_SHIFT 0 | ||
79 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK 0x3fff | ||
80 | #define IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT 16 | ||
81 | #define IPU_PRE_STORE_ENG_STATUS_STORE_FIFO_FULL (1 << 30) | ||
82 | #define IPU_PRE_STORE_ENG_STATUS_STORE_FIELD (1 << 31) | ||
83 | |||
76 | #define IPU_PRE_STORE_ENG_SIZE 0x130 | 84 | #define IPU_PRE_STORE_ENG_SIZE 0x130 |
77 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) | 85 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_WIDTH(v) ((v & 0xffff) << 0) |
78 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) | 86 | #define IPU_PRE_STORE_ENG_SIZE_INPUT_HEIGHT(v) ((v & 0xffff) << 16) |
@@ -93,6 +101,7 @@ struct ipu_pre { | |||
93 | dma_addr_t buffer_paddr; | 101 | dma_addr_t buffer_paddr; |
94 | void *buffer_virt; | 102 | void *buffer_virt; |
95 | bool in_use; | 103 | bool in_use; |
104 | unsigned int safe_window_end; | ||
96 | }; | 105 | }; |
97 | 106 | ||
98 | static DEFINE_MUTEX(ipu_pre_list_mutex); | 107 | static DEFINE_MUTEX(ipu_pre_list_mutex); |
@@ -160,6 +169,9 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
160 | u32 active_bpp = info->cpp[0] >> 1; | 169 | u32 active_bpp = info->cpp[0] >> 1; |
161 | u32 val; | 170 | u32 val; |
162 | 171 | ||
172 | /* calculate safe window for ctrl register updates */ | ||
173 | pre->safe_window_end = height - 2; | ||
174 | |||
163 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); | 175 | writel(bufaddr, pre->regs + IPU_PRE_CUR_BUF); |
164 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 176 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
165 | 177 | ||
@@ -199,7 +211,24 @@ void ipu_pre_configure(struct ipu_pre *pre, unsigned int width, | |||
199 | 211 | ||
200 | void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) | 212 | void ipu_pre_update(struct ipu_pre *pre, unsigned int bufaddr) |
201 | { | 213 | { |
214 | unsigned long timeout = jiffies + msecs_to_jiffies(5); | ||
215 | unsigned short current_yblock; | ||
216 | u32 val; | ||
217 | |||
202 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); | 218 | writel(bufaddr, pre->regs + IPU_PRE_NEXT_BUF); |
219 | |||
220 | do { | ||
221 | if (time_after(jiffies, timeout)) { | ||
222 | dev_warn(pre->dev, "timeout waiting for PRE safe window\n"); | ||
223 | return; | ||
224 | } | ||
225 | |||
226 | val = readl(pre->regs + IPU_PRE_STORE_ENG_STATUS); | ||
227 | current_yblock = | ||
228 | (val >> IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_SHIFT) & | ||
229 | IPU_PRE_STORE_ENG_STATUS_STORE_BLOCK_Y_MASK; | ||
230 | } while (current_yblock == 0 || current_yblock >= pre->safe_window_end); | ||
231 | |||
203 | writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); | 232 | writel(IPU_PRE_CTRL_SDW_UPDATE, pre->regs + IPU_PRE_CTRL_SET); |
204 | } | 233 | } |
205 | 234 | ||
diff --git a/drivers/gpu/ipu-v3/ipu-prg.c b/drivers/gpu/ipu-v3/ipu-prg.c index ecc9ea44dc50..0013ca9f72c8 100644 --- a/drivers/gpu/ipu-v3/ipu-prg.c +++ b/drivers/gpu/ipu-v3/ipu-prg.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <drm/drm_fourcc.h> | 14 | #include <drm/drm_fourcc.h> |
15 | #include <linux/clk.h> | 15 | #include <linux/clk.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/iopoll.h> | ||
17 | #include <linux/mfd/syscon.h> | 18 | #include <linux/mfd/syscon.h> |
18 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> | 19 | #include <linux/mfd/syscon/imx6q-iomuxc-gpr.h> |
19 | #include <linux/module.h> | 20 | #include <linux/module.h> |
@@ -329,6 +330,12 @@ int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan, | |||
329 | val = IPU_PRG_REG_UPDATE_REG_UPDATE; | 330 | val = IPU_PRG_REG_UPDATE_REG_UPDATE; |
330 | writel(val, prg->regs + IPU_PRG_REG_UPDATE); | 331 | writel(val, prg->regs + IPU_PRG_REG_UPDATE); |
331 | 332 | ||
333 | /* wait for both double buffers to be filled */ | ||
334 | readl_poll_timeout(prg->regs + IPU_PRG_STATUS, val, | ||
335 | (val & IPU_PRG_STATUS_BUFFER0_READY(prg_chan)) && | ||
336 | (val & IPU_PRG_STATUS_BUFFER1_READY(prg_chan)), | ||
337 | 5, 1000); | ||
338 | |||
332 | clk_disable_unprepare(prg->clk_ipg); | 339 | clk_disable_unprepare(prg->clk_ipg); |
333 | 340 | ||
334 | chan->enabled = true; | 341 | chan->enabled = true; |
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 0a3117cc29e7..374301fcbc86 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig | |||
@@ -281,6 +281,7 @@ config HID_ELECOM | |||
281 | Support for ELECOM devices: | 281 | Support for ELECOM devices: |
282 | - BM084 Bluetooth Mouse | 282 | - BM084 Bluetooth Mouse |
283 | - DEFT Trackball (Wired and wireless) | 283 | - DEFT Trackball (Wired and wireless) |
284 | - HUGE Trackball (Wired and wireless) | ||
284 | 285 | ||
285 | config HID_ELO | 286 | config HID_ELO |
286 | tristate "ELO USB 4000/4500 touchscreen" | 287 | tristate "ELO USB 4000/4500 touchscreen" |
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 9bc91160819b..330ca983828b 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -2032,6 +2032,8 @@ static const struct hid_device_id hid_have_special_driver[] = { | |||
2032 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, | 2032 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
2033 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, | 2033 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, |
2034 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, | 2034 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, |
2035 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) }, | ||
2036 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) }, | ||
2035 | #endif | 2037 | #endif |
2036 | #if IS_ENABLED(CONFIG_HID_ELO) | 2038 | #if IS_ENABLED(CONFIG_HID_ELO) |
2037 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, | 2039 | { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, |
diff --git a/drivers/hid/hid-elecom.c b/drivers/hid/hid-elecom.c index e2c7465df69f..54aeea57d209 100644 --- a/drivers/hid/hid-elecom.c +++ b/drivers/hid/hid-elecom.c | |||
@@ -3,6 +3,7 @@ | |||
3 | * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> | 3 | * Copyright (c) 2010 Richard Nauber <Richard.Nauber@gmail.com> |
4 | * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> | 4 | * Copyright (c) 2016 Yuxuan Shui <yshuiv7@gmail.com> |
5 | * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> | 5 | * Copyright (c) 2017 Diego Elio Pettenò <flameeyes@flameeyes.eu> |
6 | * Copyright (c) 2017 Alex Manoussakis <amanou@gnu.org> | ||
6 | */ | 7 | */ |
7 | 8 | ||
8 | /* | 9 | /* |
@@ -32,9 +33,11 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
32 | break; | 33 | break; |
33 | case USB_DEVICE_ID_ELECOM_DEFT_WIRED: | 34 | case USB_DEVICE_ID_ELECOM_DEFT_WIRED: |
34 | case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: | 35 | case USB_DEVICE_ID_ELECOM_DEFT_WIRELESS: |
35 | /* The DEFT trackball has eight buttons, but its descriptor only | 36 | case USB_DEVICE_ID_ELECOM_HUGE_WIRED: |
36 | * reports five, disabling the three Fn buttons on the top of | 37 | case USB_DEVICE_ID_ELECOM_HUGE_WIRELESS: |
37 | * the mouse. | 38 | /* The DEFT/HUGE trackball has eight buttons, but its descriptor |
39 | * only reports five, disabling the three Fn buttons on the top | ||
40 | * of the mouse. | ||
38 | * | 41 | * |
39 | * Apply the following diff to the descriptor: | 42 | * Apply the following diff to the descriptor: |
40 | * | 43 | * |
@@ -62,7 +65,7 @@ static __u8 *elecom_report_fixup(struct hid_device *hdev, __u8 *rdesc, | |||
62 | * End Collection, End Collection, | 65 | * End Collection, End Collection, |
63 | */ | 66 | */ |
64 | if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { | 67 | if (*rsize == 213 && rdesc[13] == 5 && rdesc[21] == 5) { |
65 | hid_info(hdev, "Fixing up Elecom DEFT Fn buttons\n"); | 68 | hid_info(hdev, "Fixing up Elecom DEFT/HUGE Fn buttons\n"); |
66 | rdesc[13] = 8; /* Button/Variable Report Count */ | 69 | rdesc[13] = 8; /* Button/Variable Report Count */ |
67 | rdesc[21] = 8; /* Button/Variable Usage Maximum */ | 70 | rdesc[21] = 8; /* Button/Variable Usage Maximum */ |
68 | rdesc[29] = 0; /* Button/Constant Report Count */ | 71 | rdesc[29] = 0; /* Button/Constant Report Count */ |
@@ -76,6 +79,8 @@ static const struct hid_device_id elecom_devices[] = { | |||
76 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, | 79 | { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, |
77 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, | 80 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, |
78 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, | 81 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, |
82 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRED) }, | ||
83 | { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_HUGE_WIRELESS) }, | ||
79 | { } | 84 | { } |
80 | }; | 85 | }; |
81 | MODULE_DEVICE_TABLE(hid, elecom_devices); | 86 | MODULE_DEVICE_TABLE(hid, elecom_devices); |
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index a98919199858..be2e005c3c51 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h | |||
@@ -368,6 +368,8 @@ | |||
368 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 | 368 | #define USB_DEVICE_ID_ELECOM_BM084 0x0061 |
369 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe | 369 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRED 0x00fe |
370 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff | 370 | #define USB_DEVICE_ID_ELECOM_DEFT_WIRELESS 0x00ff |
371 | #define USB_DEVICE_ID_ELECOM_HUGE_WIRED 0x010c | ||
372 | #define USB_DEVICE_ID_ELECOM_HUGE_WIRELESS 0x010d | ||
371 | 373 | ||
372 | #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 | 374 | #define USB_VENDOR_ID_DREAM_CHEEKY 0x1d34 |
373 | #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 | 375 | #define USB_DEVICE_ID_DREAM_CHEEKY_WN 0x0004 |
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c index 089bad8a9a21..045b5da9b992 100644 --- a/drivers/hid/usbhid/hid-core.c +++ b/drivers/hid/usbhid/hid-core.c | |||
@@ -975,6 +975,8 @@ static int usbhid_parse(struct hid_device *hid) | |||
975 | unsigned int rsize = 0; | 975 | unsigned int rsize = 0; |
976 | char *rdesc; | 976 | char *rdesc; |
977 | int ret, n; | 977 | int ret, n; |
978 | int num_descriptors; | ||
979 | size_t offset = offsetof(struct hid_descriptor, desc); | ||
978 | 980 | ||
979 | quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), | 981 | quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), |
980 | le16_to_cpu(dev->descriptor.idProduct)); | 982 | le16_to_cpu(dev->descriptor.idProduct)); |
@@ -997,10 +999,18 @@ static int usbhid_parse(struct hid_device *hid) | |||
997 | return -ENODEV; | 999 | return -ENODEV; |
998 | } | 1000 | } |
999 | 1001 | ||
1002 | if (hdesc->bLength < sizeof(struct hid_descriptor)) { | ||
1003 | dbg_hid("hid descriptor is too short\n"); | ||
1004 | return -EINVAL; | ||
1005 | } | ||
1006 | |||
1000 | hid->version = le16_to_cpu(hdesc->bcdHID); | 1007 | hid->version = le16_to_cpu(hdesc->bcdHID); |
1001 | hid->country = hdesc->bCountryCode; | 1008 | hid->country = hdesc->bCountryCode; |
1002 | 1009 | ||
1003 | for (n = 0; n < hdesc->bNumDescriptors; n++) | 1010 | num_descriptors = min_t(int, hdesc->bNumDescriptors, |
1011 | (hdesc->bLength - offset) / sizeof(struct hid_class_descriptor)); | ||
1012 | |||
1013 | for (n = 0; n < num_descriptors; n++) | ||
1004 | if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) | 1014 | if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) |
1005 | rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); | 1015 | rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); |
1006 | 1016 | ||
diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index efd5db743319..894b67ac2cae 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c | |||
@@ -640,6 +640,7 @@ void vmbus_close(struct vmbus_channel *channel) | |||
640 | */ | 640 | */ |
641 | return; | 641 | return; |
642 | } | 642 | } |
643 | mutex_lock(&vmbus_connection.channel_mutex); | ||
643 | /* | 644 | /* |
644 | * Close all the sub-channels first and then close the | 645 | * Close all the sub-channels first and then close the |
645 | * primary channel. | 646 | * primary channel. |
@@ -648,16 +649,15 @@ void vmbus_close(struct vmbus_channel *channel) | |||
648 | cur_channel = list_entry(cur, struct vmbus_channel, sc_list); | 649 | cur_channel = list_entry(cur, struct vmbus_channel, sc_list); |
649 | vmbus_close_internal(cur_channel); | 650 | vmbus_close_internal(cur_channel); |
650 | if (cur_channel->rescind) { | 651 | if (cur_channel->rescind) { |
651 | mutex_lock(&vmbus_connection.channel_mutex); | 652 | hv_process_channel_removal( |
652 | hv_process_channel_removal(cur_channel, | ||
653 | cur_channel->offermsg.child_relid); | 653 | cur_channel->offermsg.child_relid); |
654 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
655 | } | 654 | } |
656 | } | 655 | } |
657 | /* | 656 | /* |
658 | * Now close the primary. | 657 | * Now close the primary. |
659 | */ | 658 | */ |
660 | vmbus_close_internal(channel); | 659 | vmbus_close_internal(channel); |
660 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
661 | } | 661 | } |
662 | EXPORT_SYMBOL_GPL(vmbus_close); | 662 | EXPORT_SYMBOL_GPL(vmbus_close); |
663 | 663 | ||
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index bcbb031f7263..379b0df123be 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c | |||
@@ -159,7 +159,7 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel) | |||
159 | 159 | ||
160 | 160 | ||
161 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); | 161 | spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags); |
162 | 162 | channel->rescind = true; | |
163 | list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, | 163 | list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list, |
164 | msglistentry) { | 164 | msglistentry) { |
165 | 165 | ||
@@ -381,14 +381,21 @@ static void vmbus_release_relid(u32 relid) | |||
381 | true); | 381 | true); |
382 | } | 382 | } |
383 | 383 | ||
384 | void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid) | 384 | void hv_process_channel_removal(u32 relid) |
385 | { | 385 | { |
386 | unsigned long flags; | 386 | unsigned long flags; |
387 | struct vmbus_channel *primary_channel; | 387 | struct vmbus_channel *primary_channel, *channel; |
388 | 388 | ||
389 | BUG_ON(!channel->rescind); | ||
390 | BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); | 389 | BUG_ON(!mutex_is_locked(&vmbus_connection.channel_mutex)); |
391 | 390 | ||
391 | /* | ||
392 | * Make sure channel is valid as we may have raced. | ||
393 | */ | ||
394 | channel = relid2channel(relid); | ||
395 | if (!channel) | ||
396 | return; | ||
397 | |||
398 | BUG_ON(!channel->rescind); | ||
392 | if (channel->target_cpu != get_cpu()) { | 399 | if (channel->target_cpu != get_cpu()) { |
393 | put_cpu(); | 400 | put_cpu(); |
394 | smp_call_function_single(channel->target_cpu, | 401 | smp_call_function_single(channel->target_cpu, |
@@ -515,6 +522,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) | |||
515 | if (!fnew) { | 522 | if (!fnew) { |
516 | if (channel->sc_creation_callback != NULL) | 523 | if (channel->sc_creation_callback != NULL) |
517 | channel->sc_creation_callback(newchannel); | 524 | channel->sc_creation_callback(newchannel); |
525 | newchannel->probe_done = true; | ||
518 | return; | 526 | return; |
519 | } | 527 | } |
520 | 528 | ||
@@ -834,7 +842,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
834 | { | 842 | { |
835 | struct vmbus_channel_rescind_offer *rescind; | 843 | struct vmbus_channel_rescind_offer *rescind; |
836 | struct vmbus_channel *channel; | 844 | struct vmbus_channel *channel; |
837 | unsigned long flags; | ||
838 | struct device *dev; | 845 | struct device *dev; |
839 | 846 | ||
840 | rescind = (struct vmbus_channel_rescind_offer *)hdr; | 847 | rescind = (struct vmbus_channel_rescind_offer *)hdr; |
@@ -873,16 +880,6 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
873 | return; | 880 | return; |
874 | } | 881 | } |
875 | 882 | ||
876 | spin_lock_irqsave(&channel->lock, flags); | ||
877 | channel->rescind = true; | ||
878 | spin_unlock_irqrestore(&channel->lock, flags); | ||
879 | |||
880 | /* | ||
881 | * Now that we have posted the rescind state, perform | ||
882 | * rescind related cleanup. | ||
883 | */ | ||
884 | vmbus_rescind_cleanup(channel); | ||
885 | |||
886 | /* | 883 | /* |
887 | * Now wait for offer handling to complete. | 884 | * Now wait for offer handling to complete. |
888 | */ | 885 | */ |
@@ -901,6 +898,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
901 | if (channel->device_obj) { | 898 | if (channel->device_obj) { |
902 | if (channel->chn_rescind_callback) { | 899 | if (channel->chn_rescind_callback) { |
903 | channel->chn_rescind_callback(channel); | 900 | channel->chn_rescind_callback(channel); |
901 | vmbus_rescind_cleanup(channel); | ||
904 | return; | 902 | return; |
905 | } | 903 | } |
906 | /* | 904 | /* |
@@ -909,6 +907,7 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
909 | */ | 907 | */ |
910 | dev = get_device(&channel->device_obj->device); | 908 | dev = get_device(&channel->device_obj->device); |
911 | if (dev) { | 909 | if (dev) { |
910 | vmbus_rescind_cleanup(channel); | ||
912 | vmbus_device_unregister(channel->device_obj); | 911 | vmbus_device_unregister(channel->device_obj); |
913 | put_device(dev); | 912 | put_device(dev); |
914 | } | 913 | } |
@@ -921,16 +920,16 @@ static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr) | |||
921 | * 1. Close all sub-channels first | 920 | * 1. Close all sub-channels first |
922 | * 2. Then close the primary channel. | 921 | * 2. Then close the primary channel. |
923 | */ | 922 | */ |
923 | mutex_lock(&vmbus_connection.channel_mutex); | ||
924 | vmbus_rescind_cleanup(channel); | ||
924 | if (channel->state == CHANNEL_OPEN_STATE) { | 925 | if (channel->state == CHANNEL_OPEN_STATE) { |
925 | /* | 926 | /* |
926 | * The channel is currently not open; | 927 | * The channel is currently not open; |
927 | * it is safe for us to cleanup the channel. | 928 | * it is safe for us to cleanup the channel. |
928 | */ | 929 | */ |
929 | mutex_lock(&vmbus_connection.channel_mutex); | 930 | hv_process_channel_removal(rescind->child_relid); |
930 | hv_process_channel_removal(channel, | ||
931 | channel->offermsg.child_relid); | ||
932 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
933 | } | 931 | } |
932 | mutex_unlock(&vmbus_connection.channel_mutex); | ||
934 | } | 933 | } |
935 | } | 934 | } |
936 | 935 | ||
@@ -938,7 +937,10 @@ void vmbus_hvsock_device_unregister(struct vmbus_channel *channel) | |||
938 | { | 937 | { |
939 | BUG_ON(!is_hvsock_channel(channel)); | 938 | BUG_ON(!is_hvsock_channel(channel)); |
940 | 939 | ||
941 | channel->rescind = true; | 940 | /* We always get a rescind msg when a connection is closed. */ |
941 | while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind)) | ||
942 | msleep(1); | ||
943 | |||
942 | vmbus_device_unregister(channel->device_obj); | 944 | vmbus_device_unregister(channel->device_obj); |
943 | } | 945 | } |
944 | EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister); | 946 | EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister); |
diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index a9d49f6f6501..937801ac2fe0 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c | |||
@@ -768,8 +768,7 @@ static void vmbus_device_release(struct device *device) | |||
768 | struct vmbus_channel *channel = hv_dev->channel; | 768 | struct vmbus_channel *channel = hv_dev->channel; |
769 | 769 | ||
770 | mutex_lock(&vmbus_connection.channel_mutex); | 770 | mutex_lock(&vmbus_connection.channel_mutex); |
771 | hv_process_channel_removal(channel, | 771 | hv_process_channel_removal(channel->offermsg.child_relid); |
772 | channel->offermsg.child_relid); | ||
773 | mutex_unlock(&vmbus_connection.channel_mutex); | 772 | mutex_unlock(&vmbus_connection.channel_mutex); |
774 | kfree(hv_dev); | 773 | kfree(hv_dev); |
775 | 774 | ||
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c index 97a62f5b9ea4..a973eb6a2890 100644 --- a/drivers/hwmon/da9052-hwmon.c +++ b/drivers/hwmon/da9052-hwmon.c | |||
@@ -477,6 +477,11 @@ static int da9052_hwmon_probe(struct platform_device *pdev) | |||
477 | /* disable touchscreen features */ | 477 | /* disable touchscreen features */ |
478 | da9052_reg_write(hwmon->da9052, DA9052_TSI_CONT_A_REG, 0x00); | 478 | da9052_reg_write(hwmon->da9052, DA9052_TSI_CONT_A_REG, 0x00); |
479 | 479 | ||
480 | /* Sample every 1ms */ | ||
481 | da9052_reg_update(hwmon->da9052, DA9052_ADC_CONT_REG, | ||
482 | DA9052_ADCCONT_ADCMODE, | ||
483 | DA9052_ADCCONT_ADCMODE); | ||
484 | |||
480 | err = da9052_request_irq(hwmon->da9052, DA9052_IRQ_TSIREADY, | 485 | err = da9052_request_irq(hwmon->da9052, DA9052_IRQ_TSIREADY, |
481 | "tsiready-irq", da9052_tsi_datardy_irq, | 486 | "tsiready-irq", da9052_tsi_datardy_irq, |
482 | hwmon); | 487 | hwmon); |
diff --git a/drivers/hwmon/tmp102.c b/drivers/hwmon/tmp102.c index 5eafbaada795..dfc40c740d07 100644 --- a/drivers/hwmon/tmp102.c +++ b/drivers/hwmon/tmp102.c | |||
@@ -268,14 +268,11 @@ static int tmp102_probe(struct i2c_client *client, | |||
268 | return err; | 268 | return err; |
269 | } | 269 | } |
270 | 270 | ||
271 | tmp102->ready_time = jiffies; | 271 | /* |
272 | if (tmp102->config_orig & TMP102_CONF_SD) { | 272 | * Mark that we are not ready with data until the first |
273 | /* | 273 | * conversion is complete |
274 | * Mark that we are not ready with data until the first | 274 | */ |
275 | * conversion is complete | 275 | tmp102->ready_time = jiffies + msecs_to_jiffies(CONVERSION_TIME_MS); |
276 | */ | ||
277 | tmp102->ready_time += msecs_to_jiffies(CONVERSION_TIME_MS); | ||
278 | } | ||
279 | 276 | ||
280 | hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, | 277 | hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, |
281 | tmp102, | 278 | tmp102, |
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 54a47b40546f..f96830ffd9f1 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c | |||
@@ -1021,7 +1021,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx, | |||
1021 | } | 1021 | } |
1022 | 1022 | ||
1023 | dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n", | 1023 | dev_dbg(&pdev->dev, "using scl-gpio %d and sda-gpio %d for recovery\n", |
1024 | rinfo->sda_gpio, rinfo->scl_gpio); | 1024 | rinfo->scl_gpio, rinfo->sda_gpio); |
1025 | 1025 | ||
1026 | rinfo->prepare_recovery = i2c_imx_prepare_recovery; | 1026 | rinfo->prepare_recovery = i2c_imx_prepare_recovery; |
1027 | rinfo->unprepare_recovery = i2c_imx_unprepare_recovery; | 1027 | rinfo->unprepare_recovery = i2c_imx_unprepare_recovery; |
@@ -1100,7 +1100,7 @@ static int i2c_imx_probe(struct platform_device *pdev) | |||
1100 | } | 1100 | } |
1101 | 1101 | ||
1102 | /* Request IRQ */ | 1102 | /* Request IRQ */ |
1103 | ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0, | 1103 | ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED, |
1104 | pdev->name, i2c_imx); | 1104 | pdev->name, i2c_imx); |
1105 | if (ret) { | 1105 | if (ret) { |
1106 | dev_err(&pdev->dev, "can't claim irq %d\n", irq); | 1106 | dev_err(&pdev->dev, "can't claim irq %d\n", irq); |
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c index 22ffcb73c185..b51adffa4841 100644 --- a/drivers/i2c/busses/i2c-ismt.c +++ b/drivers/i2c/busses/i2c-ismt.c | |||
@@ -340,12 +340,15 @@ static int ismt_process_desc(const struct ismt_desc *desc, | |||
340 | data->word = dma_buffer[0] | (dma_buffer[1] << 8); | 340 | data->word = dma_buffer[0] | (dma_buffer[1] << 8); |
341 | break; | 341 | break; |
342 | case I2C_SMBUS_BLOCK_DATA: | 342 | case I2C_SMBUS_BLOCK_DATA: |
343 | case I2C_SMBUS_I2C_BLOCK_DATA: | ||
344 | if (desc->rxbytes != dma_buffer[0] + 1) | 343 | if (desc->rxbytes != dma_buffer[0] + 1) |
345 | return -EMSGSIZE; | 344 | return -EMSGSIZE; |
346 | 345 | ||
347 | memcpy(data->block, dma_buffer, desc->rxbytes); | 346 | memcpy(data->block, dma_buffer, desc->rxbytes); |
348 | break; | 347 | break; |
348 | case I2C_SMBUS_I2C_BLOCK_DATA: | ||
349 | memcpy(&data->block[1], dma_buffer, desc->rxbytes); | ||
350 | data->block[0] = desc->rxbytes; | ||
351 | break; | ||
349 | } | 352 | } |
350 | return 0; | 353 | return 0; |
351 | } | 354 | } |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 1ebb5e947e0b..23c2ea2baedc 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -360,6 +360,7 @@ static int omap_i2c_init(struct omap_i2c_dev *omap) | |||
360 | unsigned long fclk_rate = 12000000; | 360 | unsigned long fclk_rate = 12000000; |
361 | unsigned long internal_clk = 0; | 361 | unsigned long internal_clk = 0; |
362 | struct clk *fclk; | 362 | struct clk *fclk; |
363 | int error; | ||
363 | 364 | ||
364 | if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) { | 365 | if (omap->rev >= OMAP_I2C_REV_ON_3430_3530) { |
365 | /* | 366 | /* |
@@ -378,6 +379,13 @@ static int omap_i2c_init(struct omap_i2c_dev *omap) | |||
378 | * do this bit unconditionally. | 379 | * do this bit unconditionally. |
379 | */ | 380 | */ |
380 | fclk = clk_get(omap->dev, "fck"); | 381 | fclk = clk_get(omap->dev, "fck"); |
382 | if (IS_ERR(fclk)) { | ||
383 | error = PTR_ERR(fclk); | ||
384 | dev_err(omap->dev, "could not get fck: %i\n", error); | ||
385 | |||
386 | return error; | ||
387 | } | ||
388 | |||
381 | fclk_rate = clk_get_rate(fclk); | 389 | fclk_rate = clk_get_rate(fclk); |
382 | clk_put(fclk); | 390 | clk_put(fclk); |
383 | 391 | ||
@@ -410,6 +418,12 @@ static int omap_i2c_init(struct omap_i2c_dev *omap) | |||
410 | else | 418 | else |
411 | internal_clk = 4000; | 419 | internal_clk = 4000; |
412 | fclk = clk_get(omap->dev, "fck"); | 420 | fclk = clk_get(omap->dev, "fck"); |
421 | if (IS_ERR(fclk)) { | ||
422 | error = PTR_ERR(fclk); | ||
423 | dev_err(omap->dev, "could not get fck: %i\n", error); | ||
424 | |||
425 | return error; | ||
426 | } | ||
413 | fclk_rate = clk_get_rate(fclk) / 1000; | 427 | fclk_rate = clk_get_rate(fclk) / 1000; |
414 | clk_put(fclk); | 428 | clk_put(fclk); |
415 | 429 | ||
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 0ecdb47a23ab..174579d32e5f 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c | |||
@@ -85,6 +85,9 @@ | |||
85 | /* SB800 constants */ | 85 | /* SB800 constants */ |
86 | #define SB800_PIIX4_SMB_IDX 0xcd6 | 86 | #define SB800_PIIX4_SMB_IDX 0xcd6 |
87 | 87 | ||
88 | #define KERNCZ_IMC_IDX 0x3e | ||
89 | #define KERNCZ_IMC_DATA 0x3f | ||
90 | |||
88 | /* | 91 | /* |
89 | * SB800 port is selected by bits 2:1 of the smb_en register (0x2c) | 92 | * SB800 port is selected by bits 2:1 of the smb_en register (0x2c) |
90 | * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f. | 93 | * or the smb_sel register (0x2e), depending on bit 0 of register 0x2f. |
@@ -94,6 +97,12 @@ | |||
94 | #define SB800_PIIX4_PORT_IDX_ALT 0x2e | 97 | #define SB800_PIIX4_PORT_IDX_ALT 0x2e |
95 | #define SB800_PIIX4_PORT_IDX_SEL 0x2f | 98 | #define SB800_PIIX4_PORT_IDX_SEL 0x2f |
96 | #define SB800_PIIX4_PORT_IDX_MASK 0x06 | 99 | #define SB800_PIIX4_PORT_IDX_MASK 0x06 |
100 | #define SB800_PIIX4_PORT_IDX_SHIFT 1 | ||
101 | |||
102 | /* On kerncz, SmBus0Sel is at bit 20:19 of PMx00 DecodeEn */ | ||
103 | #define SB800_PIIX4_PORT_IDX_KERNCZ 0x02 | ||
104 | #define SB800_PIIX4_PORT_IDX_MASK_KERNCZ 0x18 | ||
105 | #define SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ 3 | ||
97 | 106 | ||
98 | /* insmod parameters */ | 107 | /* insmod parameters */ |
99 | 108 | ||
@@ -149,6 +158,8 @@ static const struct dmi_system_id piix4_dmi_ibm[] = { | |||
149 | */ | 158 | */ |
150 | static DEFINE_MUTEX(piix4_mutex_sb800); | 159 | static DEFINE_MUTEX(piix4_mutex_sb800); |
151 | static u8 piix4_port_sel_sb800; | 160 | static u8 piix4_port_sel_sb800; |
161 | static u8 piix4_port_mask_sb800; | ||
162 | static u8 piix4_port_shift_sb800; | ||
152 | static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = { | 163 | static const char *piix4_main_port_names_sb800[PIIX4_MAX_ADAPTERS] = { |
153 | " port 0", " port 2", " port 3", " port 4" | 164 | " port 0", " port 2", " port 3", " port 4" |
154 | }; | 165 | }; |
@@ -159,6 +170,7 @@ struct i2c_piix4_adapdata { | |||
159 | 170 | ||
160 | /* SB800 */ | 171 | /* SB800 */ |
161 | bool sb800_main; | 172 | bool sb800_main; |
173 | bool notify_imc; | ||
162 | u8 port; /* Port number, shifted */ | 174 | u8 port; /* Port number, shifted */ |
163 | }; | 175 | }; |
164 | 176 | ||
@@ -347,7 +359,19 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, | |||
347 | 359 | ||
348 | /* Find which register is used for port selection */ | 360 | /* Find which register is used for port selection */ |
349 | if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) { | 361 | if (PIIX4_dev->vendor == PCI_VENDOR_ID_AMD) { |
350 | piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; | 362 | switch (PIIX4_dev->device) { |
363 | case PCI_DEVICE_ID_AMD_KERNCZ_SMBUS: | ||
364 | piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_KERNCZ; | ||
365 | piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK_KERNCZ; | ||
366 | piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT_KERNCZ; | ||
367 | break; | ||
368 | case PCI_DEVICE_ID_AMD_HUDSON2_SMBUS: | ||
369 | default: | ||
370 | piix4_port_sel_sb800 = SB800_PIIX4_PORT_IDX_ALT; | ||
371 | piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; | ||
372 | piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; | ||
373 | break; | ||
374 | } | ||
351 | } else { | 375 | } else { |
352 | mutex_lock(&piix4_mutex_sb800); | 376 | mutex_lock(&piix4_mutex_sb800); |
353 | outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX); | 377 | outb_p(SB800_PIIX4_PORT_IDX_SEL, SB800_PIIX4_SMB_IDX); |
@@ -355,6 +379,8 @@ static int piix4_setup_sb800(struct pci_dev *PIIX4_dev, | |||
355 | piix4_port_sel_sb800 = (port_sel & 0x01) ? | 379 | piix4_port_sel_sb800 = (port_sel & 0x01) ? |
356 | SB800_PIIX4_PORT_IDX_ALT : | 380 | SB800_PIIX4_PORT_IDX_ALT : |
357 | SB800_PIIX4_PORT_IDX; | 381 | SB800_PIIX4_PORT_IDX; |
382 | piix4_port_mask_sb800 = SB800_PIIX4_PORT_IDX_MASK; | ||
383 | piix4_port_shift_sb800 = SB800_PIIX4_PORT_IDX_SHIFT; | ||
358 | mutex_unlock(&piix4_mutex_sb800); | 384 | mutex_unlock(&piix4_mutex_sb800); |
359 | } | 385 | } |
360 | 386 | ||
@@ -572,6 +598,67 @@ static s32 piix4_access(struct i2c_adapter * adap, u16 addr, | |||
572 | return 0; | 598 | return 0; |
573 | } | 599 | } |
574 | 600 | ||
601 | static uint8_t piix4_imc_read(uint8_t idx) | ||
602 | { | ||
603 | outb_p(idx, KERNCZ_IMC_IDX); | ||
604 | return inb_p(KERNCZ_IMC_DATA); | ||
605 | } | ||
606 | |||
607 | static void piix4_imc_write(uint8_t idx, uint8_t value) | ||
608 | { | ||
609 | outb_p(idx, KERNCZ_IMC_IDX); | ||
610 | outb_p(value, KERNCZ_IMC_DATA); | ||
611 | } | ||
612 | |||
613 | static int piix4_imc_sleep(void) | ||
614 | { | ||
615 | int timeout = MAX_TIMEOUT; | ||
616 | |||
617 | if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc")) | ||
618 | return -EBUSY; | ||
619 | |||
620 | /* clear response register */ | ||
621 | piix4_imc_write(0x82, 0x00); | ||
622 | /* request ownership flag */ | ||
623 | piix4_imc_write(0x83, 0xB4); | ||
624 | /* kick off IMC Mailbox command 96 */ | ||
625 | piix4_imc_write(0x80, 0x96); | ||
626 | |||
627 | while (timeout--) { | ||
628 | if (piix4_imc_read(0x82) == 0xfa) { | ||
629 | release_region(KERNCZ_IMC_IDX, 2); | ||
630 | return 0; | ||
631 | } | ||
632 | usleep_range(1000, 2000); | ||
633 | } | ||
634 | |||
635 | release_region(KERNCZ_IMC_IDX, 2); | ||
636 | return -ETIMEDOUT; | ||
637 | } | ||
638 | |||
639 | static void piix4_imc_wakeup(void) | ||
640 | { | ||
641 | int timeout = MAX_TIMEOUT; | ||
642 | |||
643 | if (!request_muxed_region(KERNCZ_IMC_IDX, 2, "smbus_kerncz_imc")) | ||
644 | return; | ||
645 | |||
646 | /* clear response register */ | ||
647 | piix4_imc_write(0x82, 0x00); | ||
648 | /* release ownership flag */ | ||
649 | piix4_imc_write(0x83, 0xB5); | ||
650 | /* kick off IMC Mailbox command 96 */ | ||
651 | piix4_imc_write(0x80, 0x96); | ||
652 | |||
653 | while (timeout--) { | ||
654 | if (piix4_imc_read(0x82) == 0xfa) | ||
655 | break; | ||
656 | usleep_range(1000, 2000); | ||
657 | } | ||
658 | |||
659 | release_region(KERNCZ_IMC_IDX, 2); | ||
660 | } | ||
661 | |||
575 | /* | 662 | /* |
576 | * Handles access to multiple SMBus ports on the SB800. | 663 | * Handles access to multiple SMBus ports on the SB800. |
577 | * The port is selected by bits 2:1 of the smb_en register (0x2c). | 664 | * The port is selected by bits 2:1 of the smb_en register (0x2c). |
@@ -612,12 +699,47 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, | |||
612 | return -EBUSY; | 699 | return -EBUSY; |
613 | } | 700 | } |
614 | 701 | ||
702 | /* | ||
703 | * Notify the IMC (Integrated Micro Controller) if required. | ||
704 | * Among other responsibilities, the IMC is in charge of monitoring | ||
705 | * the System fans and temperature sensors, and act accordingly. | ||
706 | * All this is done through SMBus and can/will collide | ||
707 | * with our transactions if they are long (BLOCK_DATA). | ||
708 | * Therefore we need to request the ownership flag during those | ||
709 | * transactions. | ||
710 | */ | ||
711 | if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) { | ||
712 | int ret; | ||
713 | |||
714 | ret = piix4_imc_sleep(); | ||
715 | switch (ret) { | ||
716 | case -EBUSY: | ||
717 | dev_warn(&adap->dev, | ||
718 | "IMC base address index region 0x%x already in use.\n", | ||
719 | KERNCZ_IMC_IDX); | ||
720 | break; | ||
721 | case -ETIMEDOUT: | ||
722 | dev_warn(&adap->dev, | ||
723 | "Failed to communicate with the IMC.\n"); | ||
724 | break; | ||
725 | default: | ||
726 | break; | ||
727 | } | ||
728 | |||
729 | /* If IMC communication fails do not retry */ | ||
730 | if (ret) { | ||
731 | dev_warn(&adap->dev, | ||
732 | "Continuing without IMC notification.\n"); | ||
733 | adapdata->notify_imc = false; | ||
734 | } | ||
735 | } | ||
736 | |||
615 | outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); | 737 | outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX); |
616 | smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); | 738 | smba_en_lo = inb_p(SB800_PIIX4_SMB_IDX + 1); |
617 | 739 | ||
618 | port = adapdata->port; | 740 | port = adapdata->port; |
619 | if ((smba_en_lo & SB800_PIIX4_PORT_IDX_MASK) != port) | 741 | if ((smba_en_lo & piix4_port_mask_sb800) != port) |
620 | outb_p((smba_en_lo & ~SB800_PIIX4_PORT_IDX_MASK) | port, | 742 | outb_p((smba_en_lo & ~piix4_port_mask_sb800) | port, |
621 | SB800_PIIX4_SMB_IDX + 1); | 743 | SB800_PIIX4_SMB_IDX + 1); |
622 | 744 | ||
623 | retval = piix4_access(adap, addr, flags, read_write, | 745 | retval = piix4_access(adap, addr, flags, read_write, |
@@ -628,6 +750,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr, | |||
628 | /* Release the semaphore */ | 750 | /* Release the semaphore */ |
629 | outb_p(smbslvcnt | 0x20, SMBSLVCNT); | 751 | outb_p(smbslvcnt | 0x20, SMBSLVCNT); |
630 | 752 | ||
753 | if ((size == I2C_SMBUS_BLOCK_DATA) && adapdata->notify_imc) | ||
754 | piix4_imc_wakeup(); | ||
755 | |||
631 | mutex_unlock(&piix4_mutex_sb800); | 756 | mutex_unlock(&piix4_mutex_sb800); |
632 | 757 | ||
633 | return retval; | 758 | return retval; |
@@ -679,7 +804,7 @@ static struct i2c_adapter *piix4_main_adapters[PIIX4_MAX_ADAPTERS]; | |||
679 | static struct i2c_adapter *piix4_aux_adapter; | 804 | static struct i2c_adapter *piix4_aux_adapter; |
680 | 805 | ||
681 | static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, | 806 | static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, |
682 | bool sb800_main, u8 port, | 807 | bool sb800_main, u8 port, bool notify_imc, |
683 | const char *name, struct i2c_adapter **padap) | 808 | const char *name, struct i2c_adapter **padap) |
684 | { | 809 | { |
685 | struct i2c_adapter *adap; | 810 | struct i2c_adapter *adap; |
@@ -706,7 +831,8 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, | |||
706 | 831 | ||
707 | adapdata->smba = smba; | 832 | adapdata->smba = smba; |
708 | adapdata->sb800_main = sb800_main; | 833 | adapdata->sb800_main = sb800_main; |
709 | adapdata->port = port << 1; | 834 | adapdata->port = port << piix4_port_shift_sb800; |
835 | adapdata->notify_imc = notify_imc; | ||
710 | 836 | ||
711 | /* set up the sysfs linkage to our parent device */ | 837 | /* set up the sysfs linkage to our parent device */ |
712 | adap->dev.parent = &dev->dev; | 838 | adap->dev.parent = &dev->dev; |
@@ -728,14 +854,15 @@ static int piix4_add_adapter(struct pci_dev *dev, unsigned short smba, | |||
728 | return 0; | 854 | return 0; |
729 | } | 855 | } |
730 | 856 | ||
731 | static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba) | 857 | static int piix4_add_adapters_sb800(struct pci_dev *dev, unsigned short smba, |
858 | bool notify_imc) | ||
732 | { | 859 | { |
733 | struct i2c_piix4_adapdata *adapdata; | 860 | struct i2c_piix4_adapdata *adapdata; |
734 | int port; | 861 | int port; |
735 | int retval; | 862 | int retval; |
736 | 863 | ||
737 | for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) { | 864 | for (port = 0; port < PIIX4_MAX_ADAPTERS; port++) { |
738 | retval = piix4_add_adapter(dev, smba, true, port, | 865 | retval = piix4_add_adapter(dev, smba, true, port, notify_imc, |
739 | piix4_main_port_names_sb800[port], | 866 | piix4_main_port_names_sb800[port], |
740 | &piix4_main_adapters[port]); | 867 | &piix4_main_adapters[port]); |
741 | if (retval < 0) | 868 | if (retval < 0) |
@@ -769,6 +896,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
769 | dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && | 896 | dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS && |
770 | dev->revision >= 0x40) || | 897 | dev->revision >= 0x40) || |
771 | dev->vendor == PCI_VENDOR_ID_AMD) { | 898 | dev->vendor == PCI_VENDOR_ID_AMD) { |
899 | bool notify_imc = false; | ||
772 | is_sb800 = true; | 900 | is_sb800 = true; |
773 | 901 | ||
774 | if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) { | 902 | if (!request_region(SB800_PIIX4_SMB_IDX, 2, "smba_idx")) { |
@@ -778,6 +906,20 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
778 | return -EBUSY; | 906 | return -EBUSY; |
779 | } | 907 | } |
780 | 908 | ||
909 | if (dev->vendor == PCI_VENDOR_ID_AMD && | ||
910 | dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS) { | ||
911 | u8 imc; | ||
912 | |||
913 | /* | ||
914 | * Detect if IMC is active or not, this method is | ||
915 | * described on coreboot's AMD IMC notes | ||
916 | */ | ||
917 | pci_bus_read_config_byte(dev->bus, PCI_DEVFN(0x14, 3), | ||
918 | 0x40, &imc); | ||
919 | if (imc & 0x80) | ||
920 | notify_imc = true; | ||
921 | } | ||
922 | |||
781 | /* base address location etc changed in SB800 */ | 923 | /* base address location etc changed in SB800 */ |
782 | retval = piix4_setup_sb800(dev, id, 0); | 924 | retval = piix4_setup_sb800(dev, id, 0); |
783 | if (retval < 0) { | 925 | if (retval < 0) { |
@@ -789,7 +931,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
789 | * Try to register multiplexed main SMBus adapter, | 931 | * Try to register multiplexed main SMBus adapter, |
790 | * give up if we can't | 932 | * give up if we can't |
791 | */ | 933 | */ |
792 | retval = piix4_add_adapters_sb800(dev, retval); | 934 | retval = piix4_add_adapters_sb800(dev, retval, notify_imc); |
793 | if (retval < 0) { | 935 | if (retval < 0) { |
794 | release_region(SB800_PIIX4_SMB_IDX, 2); | 936 | release_region(SB800_PIIX4_SMB_IDX, 2); |
795 | return retval; | 937 | return retval; |
@@ -800,7 +942,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
800 | return retval; | 942 | return retval; |
801 | 943 | ||
802 | /* Try to register main SMBus adapter, give up if we can't */ | 944 | /* Try to register main SMBus adapter, give up if we can't */ |
803 | retval = piix4_add_adapter(dev, retval, false, 0, "", | 945 | retval = piix4_add_adapter(dev, retval, false, 0, false, "", |
804 | &piix4_main_adapters[0]); | 946 | &piix4_main_adapters[0]); |
805 | if (retval < 0) | 947 | if (retval < 0) |
806 | return retval; | 948 | return retval; |
@@ -827,7 +969,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) | |||
827 | if (retval > 0) { | 969 | if (retval > 0) { |
828 | /* Try to add the aux adapter if it exists, | 970 | /* Try to add the aux adapter if it exists, |
829 | * piix4_add_adapter will clean up if this fails */ | 971 | * piix4_add_adapter will clean up if this fails */ |
830 | piix4_add_adapter(dev, retval, false, 0, | 972 | piix4_add_adapter(dev, retval, false, 0, false, |
831 | is_sb800 ? piix4_aux_port_name_sb800 : "", | 973 | is_sb800 ? piix4_aux_port_name_sb800 : "", |
832 | &piix4_aux_adapter); | 974 | &piix4_aux_adapter); |
833 | } | 975 | } |
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 57625653fcb6..1d13bf03c758 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig | |||
@@ -243,6 +243,8 @@ config DA9150_GPADC | |||
243 | config DLN2_ADC | 243 | config DLN2_ADC |
244 | tristate "Diolan DLN-2 ADC driver support" | 244 | tristate "Diolan DLN-2 ADC driver support" |
245 | depends on MFD_DLN2 | 245 | depends on MFD_DLN2 |
246 | select IIO_BUFFER | ||
247 | select IIO_TRIGGERED_BUFFER | ||
246 | help | 248 | help |
247 | Say yes here to build support for Diolan DLN-2 ADC. | 249 | Say yes here to build support for Diolan DLN-2 ADC. |
248 | 250 | ||
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index bc5b38e3a147..a70ef7fec95f 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c | |||
@@ -225,6 +225,7 @@ struct at91_adc_trigger { | |||
225 | char *name; | 225 | char *name; |
226 | unsigned int trgmod_value; | 226 | unsigned int trgmod_value; |
227 | unsigned int edge_type; | 227 | unsigned int edge_type; |
228 | bool hw_trig; | ||
228 | }; | 229 | }; |
229 | 230 | ||
230 | struct at91_adc_state { | 231 | struct at91_adc_state { |
@@ -254,16 +255,25 @@ static const struct at91_adc_trigger at91_adc_trigger_list[] = { | |||
254 | .name = "external_rising", | 255 | .name = "external_rising", |
255 | .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE, | 256 | .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_RISE, |
256 | .edge_type = IRQ_TYPE_EDGE_RISING, | 257 | .edge_type = IRQ_TYPE_EDGE_RISING, |
258 | .hw_trig = true, | ||
257 | }, | 259 | }, |
258 | { | 260 | { |
259 | .name = "external_falling", | 261 | .name = "external_falling", |
260 | .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL, | 262 | .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_FALL, |
261 | .edge_type = IRQ_TYPE_EDGE_FALLING, | 263 | .edge_type = IRQ_TYPE_EDGE_FALLING, |
264 | .hw_trig = true, | ||
262 | }, | 265 | }, |
263 | { | 266 | { |
264 | .name = "external_any", | 267 | .name = "external_any", |
265 | .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY, | 268 | .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_EXT_TRIG_ANY, |
266 | .edge_type = IRQ_TYPE_EDGE_BOTH, | 269 | .edge_type = IRQ_TYPE_EDGE_BOTH, |
270 | .hw_trig = true, | ||
271 | }, | ||
272 | { | ||
273 | .name = "software", | ||
274 | .trgmod_value = AT91_SAMA5D2_TRGR_TRGMOD_NO_TRIGGER, | ||
275 | .edge_type = IRQ_TYPE_NONE, | ||
276 | .hw_trig = false, | ||
267 | }, | 277 | }, |
268 | }; | 278 | }; |
269 | 279 | ||
@@ -597,7 +607,7 @@ static int at91_adc_probe(struct platform_device *pdev) | |||
597 | struct at91_adc_state *st; | 607 | struct at91_adc_state *st; |
598 | struct resource *res; | 608 | struct resource *res; |
599 | int ret, i; | 609 | int ret, i; |
600 | u32 edge_type; | 610 | u32 edge_type = IRQ_TYPE_NONE; |
601 | 611 | ||
602 | indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st)); | 612 | indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*st)); |
603 | if (!indio_dev) | 613 | if (!indio_dev) |
@@ -641,14 +651,14 @@ static int at91_adc_probe(struct platform_device *pdev) | |||
641 | ret = of_property_read_u32(pdev->dev.of_node, | 651 | ret = of_property_read_u32(pdev->dev.of_node, |
642 | "atmel,trigger-edge-type", &edge_type); | 652 | "atmel,trigger-edge-type", &edge_type); |
643 | if (ret) { | 653 | if (ret) { |
644 | dev_err(&pdev->dev, | 654 | dev_dbg(&pdev->dev, |
645 | "invalid or missing value for atmel,trigger-edge-type\n"); | 655 | "atmel,trigger-edge-type not specified, only software trigger available\n"); |
646 | return ret; | ||
647 | } | 656 | } |
648 | 657 | ||
649 | st->selected_trig = NULL; | 658 | st->selected_trig = NULL; |
650 | 659 | ||
651 | for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT; i++) | 660 | /* find the right trigger, or no trigger at all */ |
661 | for (i = 0; i < AT91_SAMA5D2_HW_TRIG_CNT + 1; i++) | ||
652 | if (at91_adc_trigger_list[i].edge_type == edge_type) { | 662 | if (at91_adc_trigger_list[i].edge_type == edge_type) { |
653 | st->selected_trig = &at91_adc_trigger_list[i]; | 663 | st->selected_trig = &at91_adc_trigger_list[i]; |
654 | break; | 664 | break; |
@@ -717,24 +727,27 @@ static int at91_adc_probe(struct platform_device *pdev) | |||
717 | 727 | ||
718 | platform_set_drvdata(pdev, indio_dev); | 728 | platform_set_drvdata(pdev, indio_dev); |
719 | 729 | ||
720 | ret = at91_adc_buffer_init(indio_dev); | 730 | if (st->selected_trig->hw_trig) { |
721 | if (ret < 0) { | 731 | ret = at91_adc_buffer_init(indio_dev); |
722 | dev_err(&pdev->dev, "couldn't initialize the buffer.\n"); | 732 | if (ret < 0) { |
723 | goto per_clk_disable_unprepare; | 733 | dev_err(&pdev->dev, "couldn't initialize the buffer.\n"); |
724 | } | 734 | goto per_clk_disable_unprepare; |
735 | } | ||
725 | 736 | ||
726 | ret = at91_adc_trigger_init(indio_dev); | 737 | ret = at91_adc_trigger_init(indio_dev); |
727 | if (ret < 0) { | 738 | if (ret < 0) { |
728 | dev_err(&pdev->dev, "couldn't setup the triggers.\n"); | 739 | dev_err(&pdev->dev, "couldn't setup the triggers.\n"); |
729 | goto per_clk_disable_unprepare; | 740 | goto per_clk_disable_unprepare; |
741 | } | ||
730 | } | 742 | } |
731 | 743 | ||
732 | ret = iio_device_register(indio_dev); | 744 | ret = iio_device_register(indio_dev); |
733 | if (ret < 0) | 745 | if (ret < 0) |
734 | goto per_clk_disable_unprepare; | 746 | goto per_clk_disable_unprepare; |
735 | 747 | ||
736 | dev_info(&pdev->dev, "setting up trigger as %s\n", | 748 | if (st->selected_trig->hw_trig) |
737 | st->selected_trig->name); | 749 | dev_info(&pdev->dev, "setting up trigger as %s\n", |
750 | st->selected_trig->name); | ||
738 | 751 | ||
739 | dev_info(&pdev->dev, "version: %x\n", | 752 | dev_info(&pdev->dev, "version: %x\n", |
740 | readl_relaxed(st->base + AT91_SAMA5D2_VERSION)); | 753 | readl_relaxed(st->base + AT91_SAMA5D2_VERSION)); |
diff --git a/drivers/iio/dummy/iio_simple_dummy_events.c b/drivers/iio/dummy/iio_simple_dummy_events.c index ed63ffd849f8..7ec2a0bb0807 100644 --- a/drivers/iio/dummy/iio_simple_dummy_events.c +++ b/drivers/iio/dummy/iio_simple_dummy_events.c | |||
@@ -72,6 +72,7 @@ int iio_simple_dummy_write_event_config(struct iio_dev *indio_dev, | |||
72 | st->event_en = state; | 72 | st->event_en = state; |
73 | else | 73 | else |
74 | return -EINVAL; | 74 | return -EINVAL; |
75 | break; | ||
75 | default: | 76 | default: |
76 | return -EINVAL; | 77 | return -EINVAL; |
77 | } | 78 | } |
diff --git a/drivers/iio/pressure/zpa2326.c b/drivers/iio/pressure/zpa2326.c index ebfb1de7377f..91431454eb85 100644 --- a/drivers/iio/pressure/zpa2326.c +++ b/drivers/iio/pressure/zpa2326.c | |||
@@ -865,7 +865,6 @@ complete: | |||
865 | static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev, | 865 | static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev, |
866 | struct zpa2326_private *private) | 866 | struct zpa2326_private *private) |
867 | { | 867 | { |
868 | int ret; | ||
869 | unsigned int val; | 868 | unsigned int val; |
870 | long timeout; | 869 | long timeout; |
871 | 870 | ||
@@ -887,14 +886,11 @@ static int zpa2326_wait_oneshot_completion(const struct iio_dev *indio_dev, | |||
887 | /* Timed out. */ | 886 | /* Timed out. */ |
888 | zpa2326_warn(indio_dev, "no one shot interrupt occurred (%ld)", | 887 | zpa2326_warn(indio_dev, "no one shot interrupt occurred (%ld)", |
889 | timeout); | 888 | timeout); |
890 | ret = -ETIME; | 889 | return -ETIME; |
891 | } else if (timeout < 0) { | ||
892 | zpa2326_warn(indio_dev, | ||
893 | "wait for one shot interrupt cancelled"); | ||
894 | ret = -ERESTARTSYS; | ||
895 | } | 890 | } |
896 | 891 | ||
897 | return ret; | 892 | zpa2326_warn(indio_dev, "wait for one shot interrupt cancelled"); |
893 | return -ERESTARTSYS; | ||
898 | } | 894 | } |
899 | 895 | ||
900 | static int zpa2326_init_managed_irq(struct device *parent, | 896 | static int zpa2326_init_managed_irq(struct device *parent, |
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c index 0eeff29b61be..4a48b7ba3a1c 100644 --- a/drivers/iio/proximity/as3935.c +++ b/drivers/iio/proximity/as3935.c | |||
@@ -39,8 +39,12 @@ | |||
39 | #define AS3935_AFE_GAIN_MAX 0x1F | 39 | #define AS3935_AFE_GAIN_MAX 0x1F |
40 | #define AS3935_AFE_PWR_BIT BIT(0) | 40 | #define AS3935_AFE_PWR_BIT BIT(0) |
41 | 41 | ||
42 | #define AS3935_NFLWDTH 0x01 | ||
43 | #define AS3935_NFLWDTH_MASK 0x7f | ||
44 | |||
42 | #define AS3935_INT 0x03 | 45 | #define AS3935_INT 0x03 |
43 | #define AS3935_INT_MASK 0x0f | 46 | #define AS3935_INT_MASK 0x0f |
47 | #define AS3935_DISTURB_INT BIT(2) | ||
44 | #define AS3935_EVENT_INT BIT(3) | 48 | #define AS3935_EVENT_INT BIT(3) |
45 | #define AS3935_NOISE_INT BIT(0) | 49 | #define AS3935_NOISE_INT BIT(0) |
46 | 50 | ||
@@ -48,6 +52,7 @@ | |||
48 | #define AS3935_DATA_MASK 0x3F | 52 | #define AS3935_DATA_MASK 0x3F |
49 | 53 | ||
50 | #define AS3935_TUNE_CAP 0x08 | 54 | #define AS3935_TUNE_CAP 0x08 |
55 | #define AS3935_DEFAULTS 0x3C | ||
51 | #define AS3935_CALIBRATE 0x3D | 56 | #define AS3935_CALIBRATE 0x3D |
52 | 57 | ||
53 | #define AS3935_READ_DATA BIT(14) | 58 | #define AS3935_READ_DATA BIT(14) |
@@ -62,7 +67,9 @@ struct as3935_state { | |||
62 | struct mutex lock; | 67 | struct mutex lock; |
63 | struct delayed_work work; | 68 | struct delayed_work work; |
64 | 69 | ||
70 | unsigned long noise_tripped; | ||
65 | u32 tune_cap; | 71 | u32 tune_cap; |
72 | u32 nflwdth_reg; | ||
66 | u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */ | 73 | u8 buffer[16]; /* 8-bit data + 56-bit padding + 64-bit timestamp */ |
67 | u8 buf[2] ____cacheline_aligned; | 74 | u8 buf[2] ____cacheline_aligned; |
68 | }; | 75 | }; |
@@ -145,12 +152,29 @@ static ssize_t as3935_sensor_sensitivity_store(struct device *dev, | |||
145 | return len; | 152 | return len; |
146 | } | 153 | } |
147 | 154 | ||
155 | static ssize_t as3935_noise_level_tripped_show(struct device *dev, | ||
156 | struct device_attribute *attr, | ||
157 | char *buf) | ||
158 | { | ||
159 | struct as3935_state *st = iio_priv(dev_to_iio_dev(dev)); | ||
160 | int ret; | ||
161 | |||
162 | mutex_lock(&st->lock); | ||
163 | ret = sprintf(buf, "%d\n", !time_after(jiffies, st->noise_tripped + HZ)); | ||
164 | mutex_unlock(&st->lock); | ||
165 | |||
166 | return ret; | ||
167 | } | ||
168 | |||
148 | static IIO_DEVICE_ATTR(sensor_sensitivity, S_IRUGO | S_IWUSR, | 169 | static IIO_DEVICE_ATTR(sensor_sensitivity, S_IRUGO | S_IWUSR, |
149 | as3935_sensor_sensitivity_show, as3935_sensor_sensitivity_store, 0); | 170 | as3935_sensor_sensitivity_show, as3935_sensor_sensitivity_store, 0); |
150 | 171 | ||
172 | static IIO_DEVICE_ATTR(noise_level_tripped, S_IRUGO, | ||
173 | as3935_noise_level_tripped_show, NULL, 0); | ||
151 | 174 | ||
152 | static struct attribute *as3935_attributes[] = { | 175 | static struct attribute *as3935_attributes[] = { |
153 | &iio_dev_attr_sensor_sensitivity.dev_attr.attr, | 176 | &iio_dev_attr_sensor_sensitivity.dev_attr.attr, |
177 | &iio_dev_attr_noise_level_tripped.dev_attr.attr, | ||
154 | NULL, | 178 | NULL, |
155 | }; | 179 | }; |
156 | 180 | ||
@@ -246,7 +270,11 @@ static void as3935_event_work(struct work_struct *work) | |||
246 | case AS3935_EVENT_INT: | 270 | case AS3935_EVENT_INT: |
247 | iio_trigger_poll_chained(st->trig); | 271 | iio_trigger_poll_chained(st->trig); |
248 | break; | 272 | break; |
273 | case AS3935_DISTURB_INT: | ||
249 | case AS3935_NOISE_INT: | 274 | case AS3935_NOISE_INT: |
275 | mutex_lock(&st->lock); | ||
276 | st->noise_tripped = jiffies; | ||
277 | mutex_unlock(&st->lock); | ||
250 | dev_warn(&st->spi->dev, "noise level is too high\n"); | 278 | dev_warn(&st->spi->dev, "noise level is too high\n"); |
251 | break; | 279 | break; |
252 | } | 280 | } |
@@ -269,15 +297,14 @@ static irqreturn_t as3935_interrupt_handler(int irq, void *private) | |||
269 | 297 | ||
270 | static void calibrate_as3935(struct as3935_state *st) | 298 | static void calibrate_as3935(struct as3935_state *st) |
271 | { | 299 | { |
272 | /* mask disturber interrupt bit */ | 300 | as3935_write(st, AS3935_DEFAULTS, 0x96); |
273 | as3935_write(st, AS3935_INT, BIT(5)); | ||
274 | |||
275 | as3935_write(st, AS3935_CALIBRATE, 0x96); | 301 | as3935_write(st, AS3935_CALIBRATE, 0x96); |
276 | as3935_write(st, AS3935_TUNE_CAP, | 302 | as3935_write(st, AS3935_TUNE_CAP, |
277 | BIT(5) | (st->tune_cap / TUNE_CAP_DIV)); | 303 | BIT(5) | (st->tune_cap / TUNE_CAP_DIV)); |
278 | 304 | ||
279 | mdelay(2); | 305 | mdelay(2); |
280 | as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV)); | 306 | as3935_write(st, AS3935_TUNE_CAP, (st->tune_cap / TUNE_CAP_DIV)); |
307 | as3935_write(st, AS3935_NFLWDTH, st->nflwdth_reg); | ||
281 | } | 308 | } |
282 | 309 | ||
283 | #ifdef CONFIG_PM_SLEEP | 310 | #ifdef CONFIG_PM_SLEEP |
@@ -370,6 +397,15 @@ static int as3935_probe(struct spi_device *spi) | |||
370 | return -EINVAL; | 397 | return -EINVAL; |
371 | } | 398 | } |
372 | 399 | ||
400 | ret = of_property_read_u32(np, | ||
401 | "ams,nflwdth", &st->nflwdth_reg); | ||
402 | if (!ret && st->nflwdth_reg > AS3935_NFLWDTH_MASK) { | ||
403 | dev_err(&spi->dev, | ||
404 | "invalid nflwdth setting of %d\n", | ||
405 | st->nflwdth_reg); | ||
406 | return -EINVAL; | ||
407 | } | ||
408 | |||
373 | indio_dev->dev.parent = &spi->dev; | 409 | indio_dev->dev.parent = &spi->dev; |
374 | indio_dev->name = spi_get_device_id(spi)->name; | 410 | indio_dev->name = spi_get_device_id(spi)->name; |
375 | indio_dev->channels = as3935_channels; | 411 | indio_dev->channels = as3935_channels; |
@@ -384,6 +420,7 @@ static int as3935_probe(struct spi_device *spi) | |||
384 | return -ENOMEM; | 420 | return -ENOMEM; |
385 | 421 | ||
386 | st->trig = trig; | 422 | st->trig = trig; |
423 | st->noise_tripped = jiffies - HZ; | ||
387 | trig->dev.parent = indio_dev->dev.parent; | 424 | trig->dev.parent = indio_dev->dev.parent; |
388 | iio_trigger_set_drvdata(trig, indio_dev); | 425 | iio_trigger_set_drvdata(trig, indio_dev); |
389 | trig->ops = &iio_interrupt_trigger_ops; | 426 | trig->ops = &iio_interrupt_trigger_ops; |
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c index b12e58787c3d..1fb72c356e36 100644 --- a/drivers/infiniband/core/netlink.c +++ b/drivers/infiniband/core/netlink.c | |||
@@ -175,13 +175,24 @@ static int rdma_nl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
175 | !netlink_capable(skb, CAP_NET_ADMIN)) | 175 | !netlink_capable(skb, CAP_NET_ADMIN)) |
176 | return -EPERM; | 176 | return -EPERM; |
177 | 177 | ||
178 | /* | ||
179 | * LS responses overload the 0x100 (NLM_F_ROOT) flag. Don't | ||
180 | * mistakenly call the .dump() function. | ||
181 | */ | ||
182 | if (index == RDMA_NL_LS) { | ||
183 | if (cb_table[op].doit) | ||
184 | return cb_table[op].doit(skb, nlh, extack); | ||
185 | return -EINVAL; | ||
186 | } | ||
178 | /* FIXME: Convert IWCM to properly handle doit callbacks */ | 187 | /* FIXME: Convert IWCM to properly handle doit callbacks */ |
179 | if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM || | 188 | if ((nlh->nlmsg_flags & NLM_F_DUMP) || index == RDMA_NL_RDMA_CM || |
180 | index == RDMA_NL_IWCM) { | 189 | index == RDMA_NL_IWCM) { |
181 | struct netlink_dump_control c = { | 190 | struct netlink_dump_control c = { |
182 | .dump = cb_table[op].dump, | 191 | .dump = cb_table[op].dump, |
183 | }; | 192 | }; |
184 | return netlink_dump_start(nls, skb, nlh, &c); | 193 | if (c.dump) |
194 | return netlink_dump_start(nls, skb, nlh, &c); | ||
195 | return -EINVAL; | ||
185 | } | 196 | } |
186 | 197 | ||
187 | if (cb_table[op].doit) | 198 | if (cb_table[op].doit) |
diff --git a/drivers/input/input.c b/drivers/input/input.c index d268fdc23c64..762bfb9487dc 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -933,58 +933,52 @@ int input_set_keycode(struct input_dev *dev, | |||
933 | } | 933 | } |
934 | EXPORT_SYMBOL(input_set_keycode); | 934 | EXPORT_SYMBOL(input_set_keycode); |
935 | 935 | ||
936 | bool input_match_device_id(const struct input_dev *dev, | ||
937 | const struct input_device_id *id) | ||
938 | { | ||
939 | if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) | ||
940 | if (id->bustype != dev->id.bustype) | ||
941 | return false; | ||
942 | |||
943 | if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) | ||
944 | if (id->vendor != dev->id.vendor) | ||
945 | return false; | ||
946 | |||
947 | if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) | ||
948 | if (id->product != dev->id.product) | ||
949 | return false; | ||
950 | |||
951 | if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) | ||
952 | if (id->version != dev->id.version) | ||
953 | return false; | ||
954 | |||
955 | if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) || | ||
956 | !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) || | ||
957 | !bitmap_subset(id->relbit, dev->relbit, REL_MAX) || | ||
958 | !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) || | ||
959 | !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) || | ||
960 | !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) || | ||
961 | !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) || | ||
962 | !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) || | ||
963 | !bitmap_subset(id->swbit, dev->swbit, SW_MAX) || | ||
964 | !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) { | ||
965 | return false; | ||
966 | } | ||
967 | |||
968 | return true; | ||
969 | } | ||
970 | EXPORT_SYMBOL(input_match_device_id); | ||
971 | |||
936 | static const struct input_device_id *input_match_device(struct input_handler *handler, | 972 | static const struct input_device_id *input_match_device(struct input_handler *handler, |
937 | struct input_dev *dev) | 973 | struct input_dev *dev) |
938 | { | 974 | { |
939 | const struct input_device_id *id; | 975 | const struct input_device_id *id; |
940 | 976 | ||
941 | for (id = handler->id_table; id->flags || id->driver_info; id++) { | 977 | for (id = handler->id_table; id->flags || id->driver_info; id++) { |
942 | 978 | if (input_match_device_id(dev, id) && | |
943 | if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) | 979 | (!handler->match || handler->match(handler, dev))) { |
944 | if (id->bustype != dev->id.bustype) | ||
945 | continue; | ||
946 | |||
947 | if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) | ||
948 | if (id->vendor != dev->id.vendor) | ||
949 | continue; | ||
950 | |||
951 | if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) | ||
952 | if (id->product != dev->id.product) | ||
953 | continue; | ||
954 | |||
955 | if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) | ||
956 | if (id->version != dev->id.version) | ||
957 | continue; | ||
958 | |||
959 | if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX)) | ||
960 | continue; | ||
961 | |||
962 | if (!bitmap_subset(id->keybit, dev->keybit, KEY_MAX)) | ||
963 | continue; | ||
964 | |||
965 | if (!bitmap_subset(id->relbit, dev->relbit, REL_MAX)) | ||
966 | continue; | ||
967 | |||
968 | if (!bitmap_subset(id->absbit, dev->absbit, ABS_MAX)) | ||
969 | continue; | ||
970 | |||
971 | if (!bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX)) | ||
972 | continue; | ||
973 | |||
974 | if (!bitmap_subset(id->ledbit, dev->ledbit, LED_MAX)) | ||
975 | continue; | ||
976 | |||
977 | if (!bitmap_subset(id->sndbit, dev->sndbit, SND_MAX)) | ||
978 | continue; | ||
979 | |||
980 | if (!bitmap_subset(id->ffbit, dev->ffbit, FF_MAX)) | ||
981 | continue; | ||
982 | |||
983 | if (!bitmap_subset(id->swbit, dev->swbit, SW_MAX)) | ||
984 | continue; | ||
985 | |||
986 | if (!handler->match || handler->match(handler, dev)) | ||
987 | return id; | 980 | return id; |
981 | } | ||
988 | } | 982 | } |
989 | 983 | ||
990 | return NULL; | 984 | return NULL; |
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c index 29d677c714d2..7b29a8944039 100644 --- a/drivers/input/joydev.c +++ b/drivers/input/joydev.c | |||
@@ -747,6 +747,68 @@ static void joydev_cleanup(struct joydev *joydev) | |||
747 | input_close_device(handle); | 747 | input_close_device(handle); |
748 | } | 748 | } |
749 | 749 | ||
750 | /* | ||
751 | * These codes are copied from from hid-ids.h, unfortunately there is no common | ||
752 | * usb_ids/bt_ids.h header. | ||
753 | */ | ||
754 | #define USB_VENDOR_ID_SONY 0x054c | ||
755 | #define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268 | ||
756 | #define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4 | ||
757 | #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc | ||
758 | #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0 | ||
759 | |||
760 | #define USB_VENDOR_ID_THQ 0x20d6 | ||
761 | #define USB_DEVICE_ID_THQ_PS3_UDRAW 0xcb17 | ||
762 | |||
763 | #define ACCEL_DEV(vnd, prd) \ | ||
764 | { \ | ||
765 | .flags = INPUT_DEVICE_ID_MATCH_VENDOR | \ | ||
766 | INPUT_DEVICE_ID_MATCH_PRODUCT | \ | ||
767 | INPUT_DEVICE_ID_MATCH_PROPBIT, \ | ||
768 | .vendor = (vnd), \ | ||
769 | .product = (prd), \ | ||
770 | .propbit = { BIT_MASK(INPUT_PROP_ACCELEROMETER) }, \ | ||
771 | } | ||
772 | |||
773 | static const struct input_device_id joydev_blacklist[] = { | ||
774 | /* Avoid touchpads and touchscreens */ | ||
775 | { | ||
776 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | | ||
777 | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
778 | .evbit = { BIT_MASK(EV_KEY) }, | ||
779 | .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, | ||
780 | }, | ||
781 | /* Avoid tablets, digitisers and similar devices */ | ||
782 | { | ||
783 | .flags = INPUT_DEVICE_ID_MATCH_EVBIT | | ||
784 | INPUT_DEVICE_ID_MATCH_KEYBIT, | ||
785 | .evbit = { BIT_MASK(EV_KEY) }, | ||
786 | .keybit = { [BIT_WORD(BTN_DIGI)] = BIT_MASK(BTN_DIGI) }, | ||
787 | }, | ||
788 | /* Disable accelerometers on composite devices */ | ||
789 | ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER), | ||
790 | ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER), | ||
791 | ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2), | ||
792 | ACCEL_DEV(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE), | ||
793 | ACCEL_DEV(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW), | ||
794 | { /* sentinel */ } | ||
795 | }; | ||
796 | |||
797 | static bool joydev_dev_is_blacklisted(struct input_dev *dev) | ||
798 | { | ||
799 | const struct input_device_id *id; | ||
800 | |||
801 | for (id = joydev_blacklist; id->flags; id++) { | ||
802 | if (input_match_device_id(dev, id)) { | ||
803 | dev_dbg(&dev->dev, | ||
804 | "joydev: blacklisting '%s'\n", dev->name); | ||
805 | return true; | ||
806 | } | ||
807 | } | ||
808 | |||
809 | return false; | ||
810 | } | ||
811 | |||
750 | static bool joydev_dev_is_absolute_mouse(struct input_dev *dev) | 812 | static bool joydev_dev_is_absolute_mouse(struct input_dev *dev) |
751 | { | 813 | { |
752 | DECLARE_BITMAP(jd_scratch, KEY_CNT); | 814 | DECLARE_BITMAP(jd_scratch, KEY_CNT); |
@@ -807,12 +869,8 @@ static bool joydev_dev_is_absolute_mouse(struct input_dev *dev) | |||
807 | 869 | ||
808 | static bool joydev_match(struct input_handler *handler, struct input_dev *dev) | 870 | static bool joydev_match(struct input_handler *handler, struct input_dev *dev) |
809 | { | 871 | { |
810 | /* Avoid touchpads and touchscreens */ | 872 | /* Disable blacklisted devices */ |
811 | if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_TOUCH, dev->keybit)) | 873 | if (joydev_dev_is_blacklisted(dev)) |
812 | return false; | ||
813 | |||
814 | /* Avoid tablets, digitisers and similar devices */ | ||
815 | if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit)) | ||
816 | return false; | 874 | return false; |
817 | 875 | ||
818 | /* Avoid absolute mice */ | 876 | /* Avoid absolute mice */ |
diff --git a/drivers/input/keyboard/tca8418_keypad.c b/drivers/input/keyboard/tca8418_keypad.c index e37e335e406f..6da607d3b811 100644 --- a/drivers/input/keyboard/tca8418_keypad.c +++ b/drivers/input/keyboard/tca8418_keypad.c | |||
@@ -234,14 +234,7 @@ static irqreturn_t tca8418_irq_handler(int irq, void *dev_id) | |||
234 | static int tca8418_configure(struct tca8418_keypad *keypad_data, | 234 | static int tca8418_configure(struct tca8418_keypad *keypad_data, |
235 | u32 rows, u32 cols) | 235 | u32 rows, u32 cols) |
236 | { | 236 | { |
237 | int reg, error; | 237 | int reg, error = 0; |
238 | |||
239 | /* Write config register, if this fails assume device not present */ | ||
240 | error = tca8418_write_byte(keypad_data, REG_CFG, | ||
241 | CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN); | ||
242 | if (error < 0) | ||
243 | return -ENODEV; | ||
244 | |||
245 | 238 | ||
246 | /* Assemble a mask for row and column registers */ | 239 | /* Assemble a mask for row and column registers */ |
247 | reg = ~(~0 << rows); | 240 | reg = ~(~0 << rows); |
@@ -257,6 +250,12 @@ static int tca8418_configure(struct tca8418_keypad *keypad_data, | |||
257 | error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8); | 250 | error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS2, reg >> 8); |
258 | error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16); | 251 | error |= tca8418_write_byte(keypad_data, REG_DEBOUNCE_DIS3, reg >> 16); |
259 | 252 | ||
253 | if (error) | ||
254 | return error; | ||
255 | |||
256 | error = tca8418_write_byte(keypad_data, REG_CFG, | ||
257 | CFG_INT_CFG | CFG_OVR_FLOW_IEN | CFG_KE_IEN); | ||
258 | |||
260 | return error; | 259 | return error; |
261 | } | 260 | } |
262 | 261 | ||
@@ -268,6 +267,7 @@ static int tca8418_keypad_probe(struct i2c_client *client, | |||
268 | struct input_dev *input; | 267 | struct input_dev *input; |
269 | u32 rows = 0, cols = 0; | 268 | u32 rows = 0, cols = 0; |
270 | int error, row_shift, max_keys; | 269 | int error, row_shift, max_keys; |
270 | u8 reg; | ||
271 | 271 | ||
272 | /* Check i2c driver capabilities */ | 272 | /* Check i2c driver capabilities */ |
273 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) { | 273 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE)) { |
@@ -301,10 +301,10 @@ static int tca8418_keypad_probe(struct i2c_client *client, | |||
301 | keypad_data->client = client; | 301 | keypad_data->client = client; |
302 | keypad_data->row_shift = row_shift; | 302 | keypad_data->row_shift = row_shift; |
303 | 303 | ||
304 | /* Initialize the chip or fail if chip isn't present */ | 304 | /* Read key lock register, if this fails assume device not present */ |
305 | error = tca8418_configure(keypad_data, rows, cols); | 305 | error = tca8418_read_byte(keypad_data, REG_KEY_LCK_EC, ®); |
306 | if (error < 0) | 306 | if (error) |
307 | return error; | 307 | return -ENODEV; |
308 | 308 | ||
309 | /* Configure input device */ | 309 | /* Configure input device */ |
310 | input = devm_input_allocate_device(dev); | 310 | input = devm_input_allocate_device(dev); |
@@ -340,6 +340,11 @@ static int tca8418_keypad_probe(struct i2c_client *client, | |||
340 | return error; | 340 | return error; |
341 | } | 341 | } |
342 | 342 | ||
343 | /* Initialize the chip */ | ||
344 | error = tca8418_configure(keypad_data, rows, cols); | ||
345 | if (error < 0) | ||
346 | return error; | ||
347 | |||
343 | error = input_register_device(input); | 348 | error = input_register_device(input); |
344 | if (error) { | 349 | if (error) { |
345 | dev_err(dev, "Unable to register input device, error: %d\n", | 350 | dev_err(dev, "Unable to register input device, error: %d\n", |
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c index 6cee5adc3b5c..debeeaeb8812 100644 --- a/drivers/input/misc/axp20x-pek.c +++ b/drivers/input/misc/axp20x-pek.c | |||
@@ -403,6 +403,7 @@ static const struct platform_device_id axp_pek_id_match[] = { | |||
403 | }, | 403 | }, |
404 | { /* sentinel */ } | 404 | { /* sentinel */ } |
405 | }; | 405 | }; |
406 | MODULE_DEVICE_TABLE(platform, axp_pek_id_match); | ||
406 | 407 | ||
407 | static struct platform_driver axp20x_pek_driver = { | 408 | static struct platform_driver axp20x_pek_driver = { |
408 | .probe = axp20x_pek_probe, | 409 | .probe = axp20x_pek_probe, |
@@ -417,4 +418,3 @@ module_platform_driver(axp20x_pek_driver); | |||
417 | MODULE_DESCRIPTION("axp20x Power Button"); | 418 | MODULE_DESCRIPTION("axp20x Power Button"); |
418 | MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); | 419 | MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); |
419 | MODULE_LICENSE("GPL"); | 420 | MODULE_LICENSE("GPL"); |
420 | MODULE_ALIAS("platform:axp20x-pek"); | ||
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c index 6bf82ea8c918..ae473123583b 100644 --- a/drivers/input/misc/ims-pcu.c +++ b/drivers/input/misc/ims-pcu.c | |||
@@ -1635,13 +1635,25 @@ ims_pcu_get_cdc_union_desc(struct usb_interface *intf) | |||
1635 | return NULL; | 1635 | return NULL; |
1636 | } | 1636 | } |
1637 | 1637 | ||
1638 | while (buflen > 0) { | 1638 | while (buflen >= sizeof(*union_desc)) { |
1639 | union_desc = (struct usb_cdc_union_desc *)buf; | 1639 | union_desc = (struct usb_cdc_union_desc *)buf; |
1640 | 1640 | ||
1641 | if (union_desc->bLength > buflen) { | ||
1642 | dev_err(&intf->dev, "Too large descriptor\n"); | ||
1643 | return NULL; | ||
1644 | } | ||
1645 | |||
1641 | if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && | 1646 | if (union_desc->bDescriptorType == USB_DT_CS_INTERFACE && |
1642 | union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { | 1647 | union_desc->bDescriptorSubType == USB_CDC_UNION_TYPE) { |
1643 | dev_dbg(&intf->dev, "Found union header\n"); | 1648 | dev_dbg(&intf->dev, "Found union header\n"); |
1644 | return union_desc; | 1649 | |
1650 | if (union_desc->bLength >= sizeof(*union_desc)) | ||
1651 | return union_desc; | ||
1652 | |||
1653 | dev_err(&intf->dev, | ||
1654 | "Union descriptor to short (%d vs %zd\n)", | ||
1655 | union_desc->bLength, sizeof(*union_desc)); | ||
1656 | return NULL; | ||
1645 | } | 1657 | } |
1646 | 1658 | ||
1647 | buflen -= union_desc->bLength; | 1659 | buflen -= union_desc->bLength; |
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c index 0e761d079dc4..6d6b092e2da9 100644 --- a/drivers/input/mouse/elan_i2c_core.c +++ b/drivers/input/mouse/elan_i2c_core.c | |||
@@ -1258,6 +1258,7 @@ static const struct acpi_device_id elan_acpi_id[] = { | |||
1258 | { "ELAN0605", 0 }, | 1258 | { "ELAN0605", 0 }, |
1259 | { "ELAN0609", 0 }, | 1259 | { "ELAN0609", 0 }, |
1260 | { "ELAN060B", 0 }, | 1260 | { "ELAN060B", 0 }, |
1261 | { "ELAN0611", 0 }, | ||
1261 | { "ELAN1000", 0 }, | 1262 | { "ELAN1000", 0 }, |
1262 | { } | 1263 | { } |
1263 | }; | 1264 | }; |
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 5af0b7d200bc..ee5466a374bf 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c | |||
@@ -1709,8 +1709,7 @@ static int synaptics_create_intertouch(struct psmouse *psmouse, | |||
1709 | .sensor_pdata = { | 1709 | .sensor_pdata = { |
1710 | .sensor_type = rmi_sensor_touchpad, | 1710 | .sensor_type = rmi_sensor_touchpad, |
1711 | .axis_align.flip_y = true, | 1711 | .axis_align.flip_y = true, |
1712 | /* to prevent cursors jumps: */ | 1712 | .kernel_tracking = false, |
1713 | .kernel_tracking = true, | ||
1714 | .topbuttonpad = topbuttonpad, | 1713 | .topbuttonpad = topbuttonpad, |
1715 | }, | 1714 | }, |
1716 | .f30_data = { | 1715 | .f30_data = { |
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c index 34dfee555b20..82e0f0d43d55 100644 --- a/drivers/input/rmi4/rmi_f30.c +++ b/drivers/input/rmi4/rmi_f30.c | |||
@@ -232,9 +232,10 @@ static int rmi_f30_map_gpios(struct rmi_function *fn, | |||
232 | unsigned int trackstick_button = BTN_LEFT; | 232 | unsigned int trackstick_button = BTN_LEFT; |
233 | bool button_mapped = false; | 233 | bool button_mapped = false; |
234 | int i; | 234 | int i; |
235 | int button_count = min_t(u8, f30->gpioled_count, TRACKSTICK_RANGE_END); | ||
235 | 236 | ||
236 | f30->gpioled_key_map = devm_kcalloc(&fn->dev, | 237 | f30->gpioled_key_map = devm_kcalloc(&fn->dev, |
237 | f30->gpioled_count, | 238 | button_count, |
238 | sizeof(f30->gpioled_key_map[0]), | 239 | sizeof(f30->gpioled_key_map[0]), |
239 | GFP_KERNEL); | 240 | GFP_KERNEL); |
240 | if (!f30->gpioled_key_map) { | 241 | if (!f30->gpioled_key_map) { |
@@ -242,7 +243,7 @@ static int rmi_f30_map_gpios(struct rmi_function *fn, | |||
242 | return -ENOMEM; | 243 | return -ENOMEM; |
243 | } | 244 | } |
244 | 245 | ||
245 | for (i = 0; i < f30->gpioled_count; i++) { | 246 | for (i = 0; i < button_count; i++) { |
246 | if (!rmi_f30_is_valid_button(i, f30->ctrl)) | 247 | if (!rmi_f30_is_valid_button(i, f30->ctrl)) |
247 | continue; | 248 | continue; |
248 | 249 | ||
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c index b796e891e2ee..4b8b9d7aa75e 100644 --- a/drivers/input/tablet/gtco.c +++ b/drivers/input/tablet/gtco.c | |||
@@ -230,13 +230,17 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report, | |||
230 | 230 | ||
231 | /* Walk this report and pull out the info we need */ | 231 | /* Walk this report and pull out the info we need */ |
232 | while (i < length) { | 232 | while (i < length) { |
233 | prefix = report[i]; | 233 | prefix = report[i++]; |
234 | |||
235 | /* Skip over prefix */ | ||
236 | i++; | ||
237 | 234 | ||
238 | /* Determine data size and save the data in the proper variable */ | 235 | /* Determine data size and save the data in the proper variable */ |
239 | size = PREF_SIZE(prefix); | 236 | size = (1U << PREF_SIZE(prefix)) >> 1; |
237 | if (i + size > length) { | ||
238 | dev_err(ddev, | ||
239 | "Not enough data (need %d, have %d)\n", | ||
240 | i + size, length); | ||
241 | break; | ||
242 | } | ||
243 | |||
240 | switch (size) { | 244 | switch (size) { |
241 | case 1: | 245 | case 1: |
242 | data = report[i]; | 246 | data = report[i]; |
@@ -244,8 +248,7 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report, | |||
244 | case 2: | 248 | case 2: |
245 | data16 = get_unaligned_le16(&report[i]); | 249 | data16 = get_unaligned_le16(&report[i]); |
246 | break; | 250 | break; |
247 | case 3: | 251 | case 4: |
248 | size = 4; | ||
249 | data32 = get_unaligned_le32(&report[i]); | 252 | data32 = get_unaligned_le32(&report[i]); |
250 | break; | 253 | break; |
251 | } | 254 | } |
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 32d2762448aa..b3bbad7d2282 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c | |||
@@ -72,6 +72,9 @@ struct goodix_ts_data { | |||
72 | #define GOODIX_REG_CONFIG_DATA 0x8047 | 72 | #define GOODIX_REG_CONFIG_DATA 0x8047 |
73 | #define GOODIX_REG_ID 0x8140 | 73 | #define GOODIX_REG_ID 0x8140 |
74 | 74 | ||
75 | #define GOODIX_BUFFER_STATUS_READY BIT(7) | ||
76 | #define GOODIX_BUFFER_STATUS_TIMEOUT 20 | ||
77 | |||
75 | #define RESOLUTION_LOC 1 | 78 | #define RESOLUTION_LOC 1 |
76 | #define MAX_CONTACTS_LOC 5 | 79 | #define MAX_CONTACTS_LOC 5 |
77 | #define TRIGGER_LOC 6 | 80 | #define TRIGGER_LOC 6 |
@@ -195,35 +198,53 @@ static int goodix_get_cfg_len(u16 id) | |||
195 | 198 | ||
196 | static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) | 199 | static int goodix_ts_read_input_report(struct goodix_ts_data *ts, u8 *data) |
197 | { | 200 | { |
201 | unsigned long max_timeout; | ||
198 | int touch_num; | 202 | int touch_num; |
199 | int error; | 203 | int error; |
200 | 204 | ||
201 | error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, data, | 205 | /* |
202 | GOODIX_CONTACT_SIZE + 1); | 206 | * The 'buffer status' bit, which indicates that the data is valid, is |
203 | if (error) { | 207 | * not set as soon as the interrupt is raised, but slightly after. |
204 | dev_err(&ts->client->dev, "I2C transfer error: %d\n", error); | 208 | * This takes around 10 ms to happen, so we poll for 20 ms. |
205 | return error; | 209 | */ |
206 | } | 210 | max_timeout = jiffies + msecs_to_jiffies(GOODIX_BUFFER_STATUS_TIMEOUT); |
211 | do { | ||
212 | error = goodix_i2c_read(ts->client, GOODIX_READ_COOR_ADDR, | ||
213 | data, GOODIX_CONTACT_SIZE + 1); | ||
214 | if (error) { | ||
215 | dev_err(&ts->client->dev, "I2C transfer error: %d\n", | ||
216 | error); | ||
217 | return error; | ||
218 | } | ||
207 | 219 | ||
208 | if (!(data[0] & 0x80)) | 220 | if (data[0] & GOODIX_BUFFER_STATUS_READY) { |
209 | return -EAGAIN; | 221 | touch_num = data[0] & 0x0f; |
222 | if (touch_num > ts->max_touch_num) | ||
223 | return -EPROTO; | ||
224 | |||
225 | if (touch_num > 1) { | ||
226 | data += 1 + GOODIX_CONTACT_SIZE; | ||
227 | error = goodix_i2c_read(ts->client, | ||
228 | GOODIX_READ_COOR_ADDR + | ||
229 | 1 + GOODIX_CONTACT_SIZE, | ||
230 | data, | ||
231 | GOODIX_CONTACT_SIZE * | ||
232 | (touch_num - 1)); | ||
233 | if (error) | ||
234 | return error; | ||
235 | } | ||
236 | |||
237 | return touch_num; | ||
238 | } | ||
210 | 239 | ||
211 | touch_num = data[0] & 0x0f; | 240 | usleep_range(1000, 2000); /* Poll every 1 - 2 ms */ |
212 | if (touch_num > ts->max_touch_num) | 241 | } while (time_before(jiffies, max_timeout)); |
213 | return -EPROTO; | ||
214 | |||
215 | if (touch_num > 1) { | ||
216 | data += 1 + GOODIX_CONTACT_SIZE; | ||
217 | error = goodix_i2c_read(ts->client, | ||
218 | GOODIX_READ_COOR_ADDR + | ||
219 | 1 + GOODIX_CONTACT_SIZE, | ||
220 | data, | ||
221 | GOODIX_CONTACT_SIZE * (touch_num - 1)); | ||
222 | if (error) | ||
223 | return error; | ||
224 | } | ||
225 | 242 | ||
226 | return touch_num; | 243 | /* |
244 | * The Goodix panel will send spurious interrupts after a | ||
245 | * 'finger up' event, which will always cause a timeout. | ||
246 | */ | ||
247 | return 0; | ||
227 | } | 248 | } |
228 | 249 | ||
229 | static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) | 250 | static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data) |
diff --git a/drivers/input/touchscreen/stmfts.c b/drivers/input/touchscreen/stmfts.c index 157fdb4bb2e8..8c6c6178ec12 100644 --- a/drivers/input/touchscreen/stmfts.c +++ b/drivers/input/touchscreen/stmfts.c | |||
@@ -663,12 +663,10 @@ static int stmfts_probe(struct i2c_client *client, | |||
663 | sdata->input->open = stmfts_input_open; | 663 | sdata->input->open = stmfts_input_open; |
664 | sdata->input->close = stmfts_input_close; | 664 | sdata->input->close = stmfts_input_close; |
665 | 665 | ||
666 | input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_X); | ||
667 | input_set_capability(sdata->input, EV_ABS, ABS_MT_POSITION_Y); | ||
666 | touchscreen_parse_properties(sdata->input, true, &sdata->prop); | 668 | touchscreen_parse_properties(sdata->input, true, &sdata->prop); |
667 | 669 | ||
668 | input_set_abs_params(sdata->input, ABS_MT_POSITION_X, 0, | ||
669 | sdata->prop.max_x, 0, 0); | ||
670 | input_set_abs_params(sdata->input, ABS_MT_POSITION_Y, 0, | ||
671 | sdata->prop.max_y, 0, 0); | ||
672 | input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); | 670 | input_set_abs_params(sdata->input, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); |
673 | input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0); | 671 | input_set_abs_params(sdata->input, ABS_MT_TOUCH_MINOR, 0, 255, 0, 0); |
674 | input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0); | 672 | input_set_abs_params(sdata->input, ABS_MT_ORIENTATION, 0, 255, 0, 0); |
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c index 7953381d939a..f1043ae71dcc 100644 --- a/drivers/input/touchscreen/ti_am335x_tsc.c +++ b/drivers/input/touchscreen/ti_am335x_tsc.c | |||
@@ -161,7 +161,7 @@ static void titsc_step_config(struct titsc *ts_dev) | |||
161 | break; | 161 | break; |
162 | case 5: | 162 | case 5: |
163 | config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 | | 163 | config |= ts_dev->bit_xp | STEPCONFIG_INP_AN4 | |
164 | ts_dev->bit_xn | ts_dev->bit_yp; | 164 | STEPCONFIG_XNP | STEPCONFIG_YPN; |
165 | break; | 165 | break; |
166 | case 8: | 166 | case 8: |
167 | config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp); | 167 | config |= ts_dev->bit_yp | STEPCONFIG_INP(ts_dev->inp_xp); |
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 51f8215877f5..8e8874d23717 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c | |||
@@ -2773,14 +2773,16 @@ int __init amd_iommu_init_api(void) | |||
2773 | 2773 | ||
2774 | int __init amd_iommu_init_dma_ops(void) | 2774 | int __init amd_iommu_init_dma_ops(void) |
2775 | { | 2775 | { |
2776 | swiotlb = iommu_pass_through ? 1 : 0; | 2776 | swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0; |
2777 | iommu_detected = 1; | 2777 | iommu_detected = 1; |
2778 | 2778 | ||
2779 | /* | 2779 | /* |
2780 | * In case we don't initialize SWIOTLB (actually the common case | 2780 | * In case we don't initialize SWIOTLB (actually the common case |
2781 | * when AMD IOMMU is enabled), make sure there are global | 2781 | * when AMD IOMMU is enabled and SME is not active), make sure there |
2782 | * dma_ops set as a fall-back for devices not handled by this | 2782 | * are global dma_ops set as a fall-back for devices not handled by |
2783 | * driver (for example non-PCI devices). | 2783 | * this driver (for example non-PCI devices). When SME is active, |
2784 | * make sure that swiotlb variable remains set so the global dma_ops | ||
2785 | * continue to be SWIOTLB. | ||
2784 | */ | 2786 | */ |
2785 | if (!swiotlb) | 2787 | if (!swiotlb) |
2786 | dma_ops = &nommu_dma_ops; | 2788 | dma_ops = &nommu_dma_ops; |
@@ -3046,6 +3048,7 @@ static size_t amd_iommu_unmap(struct iommu_domain *dom, unsigned long iova, | |||
3046 | mutex_unlock(&domain->api_lock); | 3048 | mutex_unlock(&domain->api_lock); |
3047 | 3049 | ||
3048 | domain_flush_tlb_pde(domain); | 3050 | domain_flush_tlb_pde(domain); |
3051 | domain_flush_complete(domain); | ||
3049 | 3052 | ||
3050 | return unmap_size; | 3053 | return unmap_size; |
3051 | } | 3054 | } |
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index f596fcc32898..25c2c75f5332 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c | |||
@@ -709,7 +709,7 @@ static const struct dev_pm_ops sysmmu_pm_ops = { | |||
709 | pm_runtime_force_resume) | 709 | pm_runtime_force_resume) |
710 | }; | 710 | }; |
711 | 711 | ||
712 | static const struct of_device_id sysmmu_of_match[] __initconst = { | 712 | static const struct of_device_id sysmmu_of_match[] = { |
713 | { .compatible = "samsung,exynos-sysmmu", }, | 713 | { .compatible = "samsung,exynos-sysmmu", }, |
714 | { }, | 714 | { }, |
715 | }; | 715 | }; |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index e8d89343d613..e88395605e32 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
@@ -107,6 +107,10 @@ struct its_node { | |||
107 | 107 | ||
108 | #define ITS_ITT_ALIGN SZ_256 | 108 | #define ITS_ITT_ALIGN SZ_256 |
109 | 109 | ||
110 | /* The maximum number of VPEID bits supported by VLPI commands */ | ||
111 | #define ITS_MAX_VPEID_BITS (16) | ||
112 | #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) | ||
113 | |||
110 | /* Convert page order to size in bytes */ | 114 | /* Convert page order to size in bytes */ |
111 | #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) | 115 | #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) |
112 | 116 | ||
@@ -308,7 +312,7 @@ static void its_encode_size(struct its_cmd_block *cmd, u8 size) | |||
308 | 312 | ||
309 | static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) | 313 | static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) |
310 | { | 314 | { |
311 | its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 50, 8); | 315 | its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8); |
312 | } | 316 | } |
313 | 317 | ||
314 | static void its_encode_valid(struct its_cmd_block *cmd, int valid) | 318 | static void its_encode_valid(struct its_cmd_block *cmd, int valid) |
@@ -318,7 +322,7 @@ static void its_encode_valid(struct its_cmd_block *cmd, int valid) | |||
318 | 322 | ||
319 | static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) | 323 | static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) |
320 | { | 324 | { |
321 | its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 50, 16); | 325 | its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16); |
322 | } | 326 | } |
323 | 327 | ||
324 | static void its_encode_collection(struct its_cmd_block *cmd, u16 col) | 328 | static void its_encode_collection(struct its_cmd_block *cmd, u16 col) |
@@ -358,7 +362,7 @@ static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) | |||
358 | 362 | ||
359 | static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) | 363 | static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) |
360 | { | 364 | { |
361 | its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 50, 16); | 365 | its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16); |
362 | } | 366 | } |
363 | 367 | ||
364 | static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) | 368 | static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) |
@@ -1478,9 +1482,9 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, | |||
1478 | u64 val = its_read_baser(its, baser); | 1482 | u64 val = its_read_baser(its, baser); |
1479 | u64 esz = GITS_BASER_ENTRY_SIZE(val); | 1483 | u64 esz = GITS_BASER_ENTRY_SIZE(val); |
1480 | u64 type = GITS_BASER_TYPE(val); | 1484 | u64 type = GITS_BASER_TYPE(val); |
1485 | u64 baser_phys, tmp; | ||
1481 | u32 alloc_pages; | 1486 | u32 alloc_pages; |
1482 | void *base; | 1487 | void *base; |
1483 | u64 tmp; | ||
1484 | 1488 | ||
1485 | retry_alloc_baser: | 1489 | retry_alloc_baser: |
1486 | alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); | 1490 | alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); |
@@ -1496,8 +1500,24 @@ retry_alloc_baser: | |||
1496 | if (!base) | 1500 | if (!base) |
1497 | return -ENOMEM; | 1501 | return -ENOMEM; |
1498 | 1502 | ||
1503 | baser_phys = virt_to_phys(base); | ||
1504 | |||
1505 | /* Check if the physical address of the memory is above 48bits */ | ||
1506 | if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { | ||
1507 | |||
1508 | /* 52bit PA is supported only when PageSize=64K */ | ||
1509 | if (psz != SZ_64K) { | ||
1510 | pr_err("ITS: no 52bit PA support when psz=%d\n", psz); | ||
1511 | free_pages((unsigned long)base, order); | ||
1512 | return -ENXIO; | ||
1513 | } | ||
1514 | |||
1515 | /* Convert 52bit PA to 48bit field */ | ||
1516 | baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); | ||
1517 | } | ||
1518 | |||
1499 | retry_baser: | 1519 | retry_baser: |
1500 | val = (virt_to_phys(base) | | 1520 | val = (baser_phys | |
1501 | (type << GITS_BASER_TYPE_SHIFT) | | 1521 | (type << GITS_BASER_TYPE_SHIFT) | |
1502 | ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | | 1522 | ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | |
1503 | ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | | 1523 | ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | |
@@ -1582,13 +1602,12 @@ retry_baser: | |||
1582 | 1602 | ||
1583 | static bool its_parse_indirect_baser(struct its_node *its, | 1603 | static bool its_parse_indirect_baser(struct its_node *its, |
1584 | struct its_baser *baser, | 1604 | struct its_baser *baser, |
1585 | u32 psz, u32 *order) | 1605 | u32 psz, u32 *order, u32 ids) |
1586 | { | 1606 | { |
1587 | u64 tmp = its_read_baser(its, baser); | 1607 | u64 tmp = its_read_baser(its, baser); |
1588 | u64 type = GITS_BASER_TYPE(tmp); | 1608 | u64 type = GITS_BASER_TYPE(tmp); |
1589 | u64 esz = GITS_BASER_ENTRY_SIZE(tmp); | 1609 | u64 esz = GITS_BASER_ENTRY_SIZE(tmp); |
1590 | u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; | 1610 | u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; |
1591 | u32 ids = its->device_ids; | ||
1592 | u32 new_order = *order; | 1611 | u32 new_order = *order; |
1593 | bool indirect = false; | 1612 | bool indirect = false; |
1594 | 1613 | ||
@@ -1680,9 +1699,13 @@ static int its_alloc_tables(struct its_node *its) | |||
1680 | continue; | 1699 | continue; |
1681 | 1700 | ||
1682 | case GITS_BASER_TYPE_DEVICE: | 1701 | case GITS_BASER_TYPE_DEVICE: |
1702 | indirect = its_parse_indirect_baser(its, baser, | ||
1703 | psz, &order, | ||
1704 | its->device_ids); | ||
1683 | case GITS_BASER_TYPE_VCPU: | 1705 | case GITS_BASER_TYPE_VCPU: |
1684 | indirect = its_parse_indirect_baser(its, baser, | 1706 | indirect = its_parse_indirect_baser(its, baser, |
1685 | psz, &order); | 1707 | psz, &order, |
1708 | ITS_MAX_VPEID_BITS); | ||
1686 | break; | 1709 | break; |
1687 | } | 1710 | } |
1688 | 1711 | ||
@@ -2551,7 +2574,7 @@ static struct irq_chip its_vpe_irq_chip = { | |||
2551 | 2574 | ||
2552 | static int its_vpe_id_alloc(void) | 2575 | static int its_vpe_id_alloc(void) |
2553 | { | 2576 | { |
2554 | return ida_simple_get(&its_vpeid_ida, 0, 1 << 16, GFP_KERNEL); | 2577 | return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL); |
2555 | } | 2578 | } |
2556 | 2579 | ||
2557 | static void its_vpe_id_free(u16 id) | 2580 | static void its_vpe_id_free(u16 id) |
@@ -2851,7 +2874,7 @@ static int its_init_vpe_domain(void) | |||
2851 | return -ENOMEM; | 2874 | return -ENOMEM; |
2852 | } | 2875 | } |
2853 | 2876 | ||
2854 | BUG_ON(entries != vpe_proxy.dev->nr_ites); | 2877 | BUG_ON(entries > vpe_proxy.dev->nr_ites); |
2855 | 2878 | ||
2856 | raw_spin_lock_init(&vpe_proxy.lock); | 2879 | raw_spin_lock_init(&vpe_proxy.lock); |
2857 | vpe_proxy.next_victim = 0; | 2880 | vpe_proxy.next_victim = 0; |
diff --git a/drivers/irqchip/irq-tango.c b/drivers/irqchip/irq-tango.c index bdbb5c0ff7fe..0c085303a583 100644 --- a/drivers/irqchip/irq-tango.c +++ b/drivers/irqchip/irq-tango.c | |||
@@ -141,7 +141,7 @@ static void __init tangox_irq_init_chip(struct irq_chip_generic *gc, | |||
141 | for (i = 0; i < 2; i++) { | 141 | for (i = 0; i < 2; i++) { |
142 | ct[i].chip.irq_ack = irq_gc_ack_set_bit; | 142 | ct[i].chip.irq_ack = irq_gc_ack_set_bit; |
143 | ct[i].chip.irq_mask = irq_gc_mask_disable_reg; | 143 | ct[i].chip.irq_mask = irq_gc_mask_disable_reg; |
144 | ct[i].chip.irq_mask_ack = irq_gc_mask_disable_reg_and_ack; | 144 | ct[i].chip.irq_mask_ack = irq_gc_mask_disable_and_ack_set; |
145 | ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg; | 145 | ct[i].chip.irq_unmask = irq_gc_unmask_enable_reg; |
146 | ct[i].chip.irq_set_type = tangox_irq_set_type; | 146 | ct[i].chip.irq_set_type = tangox_irq_set_type; |
147 | ct[i].chip.name = gc->domain->name; | 147 | ct[i].chip.name = gc->domain->name; |
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c index eed6c397d840..f8a808d45034 100644 --- a/drivers/media/cec/cec-adap.c +++ b/drivers/media/cec/cec-adap.c | |||
@@ -1797,12 +1797,19 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, | |||
1797 | */ | 1797 | */ |
1798 | switch (msg->msg[1]) { | 1798 | switch (msg->msg[1]) { |
1799 | case CEC_MSG_GET_CEC_VERSION: | 1799 | case CEC_MSG_GET_CEC_VERSION: |
1800 | case CEC_MSG_GIVE_DEVICE_VENDOR_ID: | ||
1801 | case CEC_MSG_ABORT: | 1800 | case CEC_MSG_ABORT: |
1802 | case CEC_MSG_GIVE_DEVICE_POWER_STATUS: | 1801 | case CEC_MSG_GIVE_DEVICE_POWER_STATUS: |
1803 | case CEC_MSG_GIVE_PHYSICAL_ADDR: | ||
1804 | case CEC_MSG_GIVE_OSD_NAME: | 1802 | case CEC_MSG_GIVE_OSD_NAME: |
1803 | /* | ||
1804 | * These messages reply with a directed message, so ignore if | ||
1805 | * the initiator is Unregistered. | ||
1806 | */ | ||
1807 | if (!adap->passthrough && from_unregistered) | ||
1808 | return 0; | ||
1809 | /* Fall through */ | ||
1810 | case CEC_MSG_GIVE_DEVICE_VENDOR_ID: | ||
1805 | case CEC_MSG_GIVE_FEATURES: | 1811 | case CEC_MSG_GIVE_FEATURES: |
1812 | case CEC_MSG_GIVE_PHYSICAL_ADDR: | ||
1806 | /* | 1813 | /* |
1807 | * Skip processing these messages if the passthrough mode | 1814 | * Skip processing these messages if the passthrough mode |
1808 | * is on. | 1815 | * is on. |
@@ -1810,7 +1817,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg, | |||
1810 | if (adap->passthrough) | 1817 | if (adap->passthrough) |
1811 | goto skip_processing; | 1818 | goto skip_processing; |
1812 | /* Ignore if addressing is wrong */ | 1819 | /* Ignore if addressing is wrong */ |
1813 | if (is_broadcast || from_unregistered) | 1820 | if (is_broadcast) |
1814 | return 0; | 1821 | return 0; |
1815 | break; | 1822 | break; |
1816 | 1823 | ||
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c index 2fcba1616168..9139d01ba7ed 100644 --- a/drivers/media/dvb-core/dvb_frontend.c +++ b/drivers/media/dvb-core/dvb_frontend.c | |||
@@ -141,22 +141,39 @@ struct dvb_frontend_private { | |||
141 | static void dvb_frontend_invoke_release(struct dvb_frontend *fe, | 141 | static void dvb_frontend_invoke_release(struct dvb_frontend *fe, |
142 | void (*release)(struct dvb_frontend *fe)); | 142 | void (*release)(struct dvb_frontend *fe)); |
143 | 143 | ||
144 | static void dvb_frontend_free(struct kref *ref) | 144 | static void __dvb_frontend_free(struct dvb_frontend *fe) |
145 | { | 145 | { |
146 | struct dvb_frontend *fe = | ||
147 | container_of(ref, struct dvb_frontend, refcount); | ||
148 | struct dvb_frontend_private *fepriv = fe->frontend_priv; | 146 | struct dvb_frontend_private *fepriv = fe->frontend_priv; |
149 | 147 | ||
148 | if (!fepriv) | ||
149 | return; | ||
150 | |||
150 | dvb_free_device(fepriv->dvbdev); | 151 | dvb_free_device(fepriv->dvbdev); |
151 | 152 | ||
152 | dvb_frontend_invoke_release(fe, fe->ops.release); | 153 | dvb_frontend_invoke_release(fe, fe->ops.release); |
153 | 154 | ||
154 | kfree(fepriv); | 155 | kfree(fepriv); |
156 | fe->frontend_priv = NULL; | ||
157 | } | ||
158 | |||
159 | static void dvb_frontend_free(struct kref *ref) | ||
160 | { | ||
161 | struct dvb_frontend *fe = | ||
162 | container_of(ref, struct dvb_frontend, refcount); | ||
163 | |||
164 | __dvb_frontend_free(fe); | ||
155 | } | 165 | } |
156 | 166 | ||
157 | static void dvb_frontend_put(struct dvb_frontend *fe) | 167 | static void dvb_frontend_put(struct dvb_frontend *fe) |
158 | { | 168 | { |
159 | kref_put(&fe->refcount, dvb_frontend_free); | 169 | /* |
170 | * Check if the frontend was registered, as otherwise | ||
171 | * kref was not initialized yet. | ||
172 | */ | ||
173 | if (fe->frontend_priv) | ||
174 | kref_put(&fe->refcount, dvb_frontend_free); | ||
175 | else | ||
176 | __dvb_frontend_free(fe); | ||
160 | } | 177 | } |
161 | 178 | ||
162 | static void dvb_frontend_get(struct dvb_frontend *fe) | 179 | static void dvb_frontend_get(struct dvb_frontend *fe) |
diff --git a/drivers/media/dvb-frontends/dib3000mc.c b/drivers/media/dvb-frontends/dib3000mc.c index 224283fe100a..4d086a7248e9 100644 --- a/drivers/media/dvb-frontends/dib3000mc.c +++ b/drivers/media/dvb-frontends/dib3000mc.c | |||
@@ -55,29 +55,57 @@ struct dib3000mc_state { | |||
55 | 55 | ||
56 | static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg) | 56 | static u16 dib3000mc_read_word(struct dib3000mc_state *state, u16 reg) |
57 | { | 57 | { |
58 | u8 wb[2] = { (reg >> 8) | 0x80, reg & 0xff }; | ||
59 | u8 rb[2]; | ||
60 | struct i2c_msg msg[2] = { | 58 | struct i2c_msg msg[2] = { |
61 | { .addr = state->i2c_addr >> 1, .flags = 0, .buf = wb, .len = 2 }, | 59 | { .addr = state->i2c_addr >> 1, .flags = 0, .len = 2 }, |
62 | { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .buf = rb, .len = 2 }, | 60 | { .addr = state->i2c_addr >> 1, .flags = I2C_M_RD, .len = 2 }, |
63 | }; | 61 | }; |
62 | u16 word; | ||
63 | u8 *b; | ||
64 | |||
65 | b = kmalloc(4, GFP_KERNEL); | ||
66 | if (!b) | ||
67 | return 0; | ||
68 | |||
69 | b[0] = (reg >> 8) | 0x80; | ||
70 | b[1] = reg; | ||
71 | b[2] = 0; | ||
72 | b[3] = 0; | ||
73 | |||
74 | msg[0].buf = b; | ||
75 | msg[1].buf = b + 2; | ||
64 | 76 | ||
65 | if (i2c_transfer(state->i2c_adap, msg, 2) != 2) | 77 | if (i2c_transfer(state->i2c_adap, msg, 2) != 2) |
66 | dprintk("i2c read error on %d\n",reg); | 78 | dprintk("i2c read error on %d\n",reg); |
67 | 79 | ||
68 | return (rb[0] << 8) | rb[1]; | 80 | word = (b[2] << 8) | b[3]; |
81 | kfree(b); | ||
82 | |||
83 | return word; | ||
69 | } | 84 | } |
70 | 85 | ||
71 | static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val) | 86 | static int dib3000mc_write_word(struct dib3000mc_state *state, u16 reg, u16 val) |
72 | { | 87 | { |
73 | u8 b[4] = { | ||
74 | (reg >> 8) & 0xff, reg & 0xff, | ||
75 | (val >> 8) & 0xff, val & 0xff, | ||
76 | }; | ||
77 | struct i2c_msg msg = { | 88 | struct i2c_msg msg = { |
78 | .addr = state->i2c_addr >> 1, .flags = 0, .buf = b, .len = 4 | 89 | .addr = state->i2c_addr >> 1, .flags = 0, .len = 4 |
79 | }; | 90 | }; |
80 | return i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0; | 91 | int rc; |
92 | u8 *b; | ||
93 | |||
94 | b = kmalloc(4, GFP_KERNEL); | ||
95 | if (!b) | ||
96 | return -ENOMEM; | ||
97 | |||
98 | b[0] = reg >> 8; | ||
99 | b[1] = reg; | ||
100 | b[2] = val >> 8; | ||
101 | b[3] = val; | ||
102 | |||
103 | msg.buf = b; | ||
104 | |||
105 | rc = i2c_transfer(state->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0; | ||
106 | kfree(b); | ||
107 | |||
108 | return rc; | ||
81 | } | 109 | } |
82 | 110 | ||
83 | static int dib3000mc_identify(struct dib3000mc_state *state) | 111 | static int dib3000mc_identify(struct dib3000mc_state *state) |
diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c index 7bec3e028bee..5553b89b804e 100644 --- a/drivers/media/dvb-frontends/dvb-pll.c +++ b/drivers/media/dvb-frontends/dvb-pll.c | |||
@@ -753,13 +753,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, | |||
753 | struct i2c_adapter *i2c, | 753 | struct i2c_adapter *i2c, |
754 | unsigned int pll_desc_id) | 754 | unsigned int pll_desc_id) |
755 | { | 755 | { |
756 | u8 b1 [] = { 0 }; | 756 | u8 *b1; |
757 | struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, | 757 | struct i2c_msg msg = { .addr = pll_addr, .flags = I2C_M_RD, .len = 1 }; |
758 | .buf = b1, .len = 1 }; | ||
759 | struct dvb_pll_priv *priv = NULL; | 758 | struct dvb_pll_priv *priv = NULL; |
760 | int ret; | 759 | int ret; |
761 | const struct dvb_pll_desc *desc; | 760 | const struct dvb_pll_desc *desc; |
762 | 761 | ||
762 | b1 = kmalloc(1, GFP_KERNEL); | ||
763 | if (!b1) | ||
764 | return NULL; | ||
765 | |||
766 | b1[0] = 0; | ||
767 | msg.buf = b1; | ||
768 | |||
763 | if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) && | 769 | if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) && |
764 | (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list))) | 770 | (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list))) |
765 | pll_desc_id = id[dvb_pll_devcount]; | 771 | pll_desc_id = id[dvb_pll_devcount]; |
@@ -773,15 +779,19 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, | |||
773 | fe->ops.i2c_gate_ctrl(fe, 1); | 779 | fe->ops.i2c_gate_ctrl(fe, 1); |
774 | 780 | ||
775 | ret = i2c_transfer (i2c, &msg, 1); | 781 | ret = i2c_transfer (i2c, &msg, 1); |
776 | if (ret != 1) | 782 | if (ret != 1) { |
783 | kfree(b1); | ||
777 | return NULL; | 784 | return NULL; |
785 | } | ||
778 | if (fe->ops.i2c_gate_ctrl) | 786 | if (fe->ops.i2c_gate_ctrl) |
779 | fe->ops.i2c_gate_ctrl(fe, 0); | 787 | fe->ops.i2c_gate_ctrl(fe, 0); |
780 | } | 788 | } |
781 | 789 | ||
782 | priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); | 790 | priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL); |
783 | if (priv == NULL) | 791 | if (!priv) { |
792 | kfree(b1); | ||
784 | return NULL; | 793 | return NULL; |
794 | } | ||
785 | 795 | ||
786 | priv->pll_i2c_address = pll_addr; | 796 | priv->pll_i2c_address = pll_addr; |
787 | priv->i2c = i2c; | 797 | priv->i2c = i2c; |
@@ -811,6 +821,8 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr, | |||
811 | "insmod option" : "autodetected"); | 821 | "insmod option" : "autodetected"); |
812 | } | 822 | } |
813 | 823 | ||
824 | kfree(b1); | ||
825 | |||
814 | return fe; | 826 | return fe; |
815 | } | 827 | } |
816 | EXPORT_SYMBOL(dvb_pll_attach); | 828 | EXPORT_SYMBOL(dvb_pll_attach); |
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig index 7e7cc49b8674..3c4f7fa7b9d8 100644 --- a/drivers/media/platform/Kconfig +++ b/drivers/media/platform/Kconfig | |||
@@ -112,7 +112,7 @@ config VIDEO_PXA27x | |||
112 | 112 | ||
113 | config VIDEO_QCOM_CAMSS | 113 | config VIDEO_QCOM_CAMSS |
114 | tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver" | 114 | tristate "Qualcomm 8x16 V4L2 Camera Subsystem driver" |
115 | depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API | 115 | depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && HAS_DMA |
116 | depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST | 116 | depends on (ARCH_QCOM && IOMMU_DMA) || COMPILE_TEST |
117 | select VIDEOBUF2_DMA_SG | 117 | select VIDEOBUF2_DMA_SG |
118 | select V4L2_FWNODE | 118 | select V4L2_FWNODE |
diff --git a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c index b21b3c2dc77f..b22d2dfcd3c2 100644 --- a/drivers/media/platform/qcom/camss-8x16/camss-vfe.c +++ b/drivers/media/platform/qcom/camss-8x16/camss-vfe.c | |||
@@ -2660,7 +2660,7 @@ static int vfe_get_selection(struct v4l2_subdev *sd, | |||
2660 | * | 2660 | * |
2661 | * Return -EINVAL or zero on success | 2661 | * Return -EINVAL or zero on success |
2662 | */ | 2662 | */ |
2663 | int vfe_set_selection(struct v4l2_subdev *sd, | 2663 | static int vfe_set_selection(struct v4l2_subdev *sd, |
2664 | struct v4l2_subdev_pad_config *cfg, | 2664 | struct v4l2_subdev_pad_config *cfg, |
2665 | struct v4l2_subdev_selection *sel) | 2665 | struct v4l2_subdev_selection *sel) |
2666 | { | 2666 | { |
diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c index 68933d208063..9b2a401a4891 100644 --- a/drivers/media/platform/qcom/venus/helpers.c +++ b/drivers/media/platform/qcom/venus/helpers.c | |||
@@ -682,6 +682,7 @@ void venus_helper_vb2_stop_streaming(struct vb2_queue *q) | |||
682 | hfi_session_abort(inst); | 682 | hfi_session_abort(inst); |
683 | 683 | ||
684 | load_scale_clocks(core); | 684 | load_scale_clocks(core); |
685 | INIT_LIST_HEAD(&inst->registeredbufs); | ||
685 | } | 686 | } |
686 | 687 | ||
687 | venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR); | 688 | venus_helper_buffers_done(inst, VB2_BUF_STATE_ERROR); |
diff --git a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c index 1edf667d562a..146ae6f25cdb 100644 --- a/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c +++ b/drivers/media/platform/s5p-cec/exynos_hdmi_cecctrl.c | |||
@@ -172,7 +172,8 @@ u32 s5p_cec_get_status(struct s5p_cec_dev *cec) | |||
172 | { | 172 | { |
173 | u32 status = 0; | 173 | u32 status = 0; |
174 | 174 | ||
175 | status = readb(cec->reg + S5P_CEC_STATUS_0); | 175 | status = readb(cec->reg + S5P_CEC_STATUS_0) & 0xf; |
176 | status |= (readb(cec->reg + S5P_CEC_TX_STAT1) & 0xf) << 4; | ||
176 | status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8; | 177 | status |= readb(cec->reg + S5P_CEC_STATUS_1) << 8; |
177 | status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16; | 178 | status |= readb(cec->reg + S5P_CEC_STATUS_2) << 16; |
178 | status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24; | 179 | status |= readb(cec->reg + S5P_CEC_STATUS_3) << 24; |
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.c b/drivers/media/platform/s5p-cec/s5p_cec.c index 58d200e7c838..8837e2678bde 100644 --- a/drivers/media/platform/s5p-cec/s5p_cec.c +++ b/drivers/media/platform/s5p-cec/s5p_cec.c | |||
@@ -92,7 +92,10 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv) | |||
92 | dev_dbg(cec->dev, "irq received\n"); | 92 | dev_dbg(cec->dev, "irq received\n"); |
93 | 93 | ||
94 | if (status & CEC_STATUS_TX_DONE) { | 94 | if (status & CEC_STATUS_TX_DONE) { |
95 | if (status & CEC_STATUS_TX_ERROR) { | 95 | if (status & CEC_STATUS_TX_NACK) { |
96 | dev_dbg(cec->dev, "CEC_STATUS_TX_NACK set\n"); | ||
97 | cec->tx = STATE_NACK; | ||
98 | } else if (status & CEC_STATUS_TX_ERROR) { | ||
96 | dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n"); | 99 | dev_dbg(cec->dev, "CEC_STATUS_TX_ERROR set\n"); |
97 | cec->tx = STATE_ERROR; | 100 | cec->tx = STATE_ERROR; |
98 | } else { | 101 | } else { |
@@ -135,6 +138,12 @@ static irqreturn_t s5p_cec_irq_handler_thread(int irq, void *priv) | |||
135 | cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0); | 138 | cec_transmit_done(cec->adap, CEC_TX_STATUS_OK, 0, 0, 0, 0); |
136 | cec->tx = STATE_IDLE; | 139 | cec->tx = STATE_IDLE; |
137 | break; | 140 | break; |
141 | case STATE_NACK: | ||
142 | cec_transmit_done(cec->adap, | ||
143 | CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_NACK, | ||
144 | 0, 1, 0, 0); | ||
145 | cec->tx = STATE_IDLE; | ||
146 | break; | ||
138 | case STATE_ERROR: | 147 | case STATE_ERROR: |
139 | cec_transmit_done(cec->adap, | 148 | cec_transmit_done(cec->adap, |
140 | CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR, | 149 | CEC_TX_STATUS_MAX_RETRIES | CEC_TX_STATUS_ERROR, |
diff --git a/drivers/media/platform/s5p-cec/s5p_cec.h b/drivers/media/platform/s5p-cec/s5p_cec.h index 8bcd8dc1aeb9..86ded522ef27 100644 --- a/drivers/media/platform/s5p-cec/s5p_cec.h +++ b/drivers/media/platform/s5p-cec/s5p_cec.h | |||
@@ -35,6 +35,7 @@ | |||
35 | #define CEC_STATUS_TX_TRANSFERRING (1 << 1) | 35 | #define CEC_STATUS_TX_TRANSFERRING (1 << 1) |
36 | #define CEC_STATUS_TX_DONE (1 << 2) | 36 | #define CEC_STATUS_TX_DONE (1 << 2) |
37 | #define CEC_STATUS_TX_ERROR (1 << 3) | 37 | #define CEC_STATUS_TX_ERROR (1 << 3) |
38 | #define CEC_STATUS_TX_NACK (1 << 4) | ||
38 | #define CEC_STATUS_TX_BYTES (0xFF << 8) | 39 | #define CEC_STATUS_TX_BYTES (0xFF << 8) |
39 | #define CEC_STATUS_RX_RUNNING (1 << 16) | 40 | #define CEC_STATUS_RX_RUNNING (1 << 16) |
40 | #define CEC_STATUS_RX_RECEIVING (1 << 17) | 41 | #define CEC_STATUS_RX_RECEIVING (1 << 17) |
@@ -55,6 +56,7 @@ enum cec_state { | |||
55 | STATE_IDLE, | 56 | STATE_IDLE, |
56 | STATE_BUSY, | 57 | STATE_BUSY, |
57 | STATE_DONE, | 58 | STATE_DONE, |
59 | STATE_NACK, | ||
58 | STATE_ERROR | 60 | STATE_ERROR |
59 | }; | 61 | }; |
60 | 62 | ||
diff --git a/drivers/media/tuners/mt2060.c b/drivers/media/tuners/mt2060.c index 2e487f9a2cc3..4983eeb39f36 100644 --- a/drivers/media/tuners/mt2060.c +++ b/drivers/media/tuners/mt2060.c | |||
@@ -38,41 +38,74 @@ MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off)."); | |||
38 | static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val) | 38 | static int mt2060_readreg(struct mt2060_priv *priv, u8 reg, u8 *val) |
39 | { | 39 | { |
40 | struct i2c_msg msg[2] = { | 40 | struct i2c_msg msg[2] = { |
41 | { .addr = priv->cfg->i2c_address, .flags = 0, .buf = ®, .len = 1 }, | 41 | { .addr = priv->cfg->i2c_address, .flags = 0, .len = 1 }, |
42 | { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .buf = val, .len = 1 }, | 42 | { .addr = priv->cfg->i2c_address, .flags = I2C_M_RD, .len = 1 }, |
43 | }; | 43 | }; |
44 | int rc = 0; | ||
45 | u8 *b; | ||
46 | |||
47 | b = kmalloc(2, GFP_KERNEL); | ||
48 | if (!b) | ||
49 | return -ENOMEM; | ||
50 | |||
51 | b[0] = reg; | ||
52 | b[1] = 0; | ||
53 | |||
54 | msg[0].buf = b; | ||
55 | msg[1].buf = b + 1; | ||
44 | 56 | ||
45 | if (i2c_transfer(priv->i2c, msg, 2) != 2) { | 57 | if (i2c_transfer(priv->i2c, msg, 2) != 2) { |
46 | printk(KERN_WARNING "mt2060 I2C read failed\n"); | 58 | printk(KERN_WARNING "mt2060 I2C read failed\n"); |
47 | return -EREMOTEIO; | 59 | rc = -EREMOTEIO; |
48 | } | 60 | } |
49 | return 0; | 61 | *val = b[1]; |
62 | kfree(b); | ||
63 | |||
64 | return rc; | ||
50 | } | 65 | } |
51 | 66 | ||
52 | // Writes a single register | 67 | // Writes a single register |
53 | static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val) | 68 | static int mt2060_writereg(struct mt2060_priv *priv, u8 reg, u8 val) |
54 | { | 69 | { |
55 | u8 buf[2] = { reg, val }; | ||
56 | struct i2c_msg msg = { | 70 | struct i2c_msg msg = { |
57 | .addr = priv->cfg->i2c_address, .flags = 0, .buf = buf, .len = 2 | 71 | .addr = priv->cfg->i2c_address, .flags = 0, .len = 2 |
58 | }; | 72 | }; |
73 | u8 *buf; | ||
74 | int rc = 0; | ||
75 | |||
76 | buf = kmalloc(2, GFP_KERNEL); | ||
77 | if (!buf) | ||
78 | return -ENOMEM; | ||
79 | |||
80 | buf[0] = reg; | ||
81 | buf[1] = val; | ||
82 | |||
83 | msg.buf = buf; | ||
59 | 84 | ||
60 | if (i2c_transfer(priv->i2c, &msg, 1) != 1) { | 85 | if (i2c_transfer(priv->i2c, &msg, 1) != 1) { |
61 | printk(KERN_WARNING "mt2060 I2C write failed\n"); | 86 | printk(KERN_WARNING "mt2060 I2C write failed\n"); |
62 | return -EREMOTEIO; | 87 | rc = -EREMOTEIO; |
63 | } | 88 | } |
64 | return 0; | 89 | kfree(buf); |
90 | return rc; | ||
65 | } | 91 | } |
66 | 92 | ||
67 | // Writes a set of consecutive registers | 93 | // Writes a set of consecutive registers |
68 | static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len) | 94 | static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len) |
69 | { | 95 | { |
70 | int rem, val_len; | 96 | int rem, val_len; |
71 | u8 xfer_buf[16]; | 97 | u8 *xfer_buf; |
98 | int rc = 0; | ||
72 | struct i2c_msg msg = { | 99 | struct i2c_msg msg = { |
73 | .addr = priv->cfg->i2c_address, .flags = 0, .buf = xfer_buf | 100 | .addr = priv->cfg->i2c_address, .flags = 0 |
74 | }; | 101 | }; |
75 | 102 | ||
103 | xfer_buf = kmalloc(16, GFP_KERNEL); | ||
104 | if (!xfer_buf) | ||
105 | return -ENOMEM; | ||
106 | |||
107 | msg.buf = xfer_buf; | ||
108 | |||
76 | for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) { | 109 | for (rem = len - 1; rem > 0; rem -= priv->i2c_max_regs) { |
77 | val_len = min_t(int, rem, priv->i2c_max_regs); | 110 | val_len = min_t(int, rem, priv->i2c_max_regs); |
78 | msg.len = 1 + val_len; | 111 | msg.len = 1 + val_len; |
@@ -81,11 +114,13 @@ static int mt2060_writeregs(struct mt2060_priv *priv,u8 *buf, u8 len) | |||
81 | 114 | ||
82 | if (i2c_transfer(priv->i2c, &msg, 1) != 1) { | 115 | if (i2c_transfer(priv->i2c, &msg, 1) != 1) { |
83 | printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len); | 116 | printk(KERN_WARNING "mt2060 I2C write failed (len=%i)\n", val_len); |
84 | return -EREMOTEIO; | 117 | rc = -EREMOTEIO; |
118 | break; | ||
85 | } | 119 | } |
86 | } | 120 | } |
87 | 121 | ||
88 | return 0; | 122 | kfree(xfer_buf); |
123 | return rc; | ||
89 | } | 124 | } |
90 | 125 | ||
91 | // Initialisation sequences | 126 | // Initialisation sequences |
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index c8307e8b4c16..0ccccbaf530d 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h | |||
@@ -127,6 +127,8 @@ | |||
127 | #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ | 127 | #define MEI_DEV_ID_BXT_M 0x1A9A /* Broxton M */ |
128 | #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ | 128 | #define MEI_DEV_ID_APL_I 0x5A9A /* Apollo Lake I */ |
129 | 129 | ||
130 | #define MEI_DEV_ID_GLK 0x319A /* Gemini Lake */ | ||
131 | |||
130 | #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ | 132 | #define MEI_DEV_ID_KBP 0xA2BA /* Kaby Point */ |
131 | #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ | 133 | #define MEI_DEV_ID_KBP_2 0xA2BB /* Kaby Point 2 */ |
132 | 134 | ||
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 4ff40d319676..78b3172c8e6e 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c | |||
@@ -93,6 +93,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { | |||
93 | {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, | 93 | {MEI_PCI_DEVICE(MEI_DEV_ID_BXT_M, MEI_ME_PCH8_CFG)}, |
94 | {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, | 94 | {MEI_PCI_DEVICE(MEI_DEV_ID_APL_I, MEI_ME_PCH8_CFG)}, |
95 | 95 | ||
96 | {MEI_PCI_DEVICE(MEI_DEV_ID_GLK, MEI_ME_PCH8_CFG)}, | ||
97 | |||
96 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, | 98 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP, MEI_ME_PCH8_CFG)}, |
97 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, | 99 | {MEI_PCI_DEVICE(MEI_DEV_ID_KBP_2, MEI_ME_PCH8_CFG)}, |
98 | 100 | ||
@@ -226,12 +228,15 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
226 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; | 228 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; |
227 | 229 | ||
228 | /* | 230 | /* |
229 | * For not wake-able HW runtime pm framework | 231 | * ME maps runtime suspend/resume to D0i states, |
230 | * can't be used on pci device level. | 232 | * hence we need to go around native PCI runtime service which |
231 | * Use domain runtime pm callbacks instead. | 233 | * eventually brings the device into D3cold/hot state, |
232 | */ | 234 | * but the mei device cannot wake up from D3 unlike from D0i3. |
233 | if (!pci_dev_run_wake(pdev)) | 235 | * To get around the PCI device native runtime pm, |
234 | mei_me_set_pm_domain(dev); | 236 | * ME uses runtime pm domain handlers which take precedence |
237 | * over the driver's pm handlers. | ||
238 | */ | ||
239 | mei_me_set_pm_domain(dev); | ||
235 | 240 | ||
236 | if (mei_pg_is_enabled(dev)) | 241 | if (mei_pg_is_enabled(dev)) |
237 | pm_runtime_put_noidle(&pdev->dev); | 242 | pm_runtime_put_noidle(&pdev->dev); |
@@ -271,8 +276,7 @@ static void mei_me_shutdown(struct pci_dev *pdev) | |||
271 | dev_dbg(&pdev->dev, "shutdown\n"); | 276 | dev_dbg(&pdev->dev, "shutdown\n"); |
272 | mei_stop(dev); | 277 | mei_stop(dev); |
273 | 278 | ||
274 | if (!pci_dev_run_wake(pdev)) | 279 | mei_me_unset_pm_domain(dev); |
275 | mei_me_unset_pm_domain(dev); | ||
276 | 280 | ||
277 | mei_disable_interrupts(dev); | 281 | mei_disable_interrupts(dev); |
278 | free_irq(pdev->irq, dev); | 282 | free_irq(pdev->irq, dev); |
@@ -300,8 +304,7 @@ static void mei_me_remove(struct pci_dev *pdev) | |||
300 | dev_dbg(&pdev->dev, "stop\n"); | 304 | dev_dbg(&pdev->dev, "stop\n"); |
301 | mei_stop(dev); | 305 | mei_stop(dev); |
302 | 306 | ||
303 | if (!pci_dev_run_wake(pdev)) | 307 | mei_me_unset_pm_domain(dev); |
304 | mei_me_unset_pm_domain(dev); | ||
305 | 308 | ||
306 | mei_disable_interrupts(dev); | 309 | mei_disable_interrupts(dev); |
307 | 310 | ||
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c index e38a5f144373..0566f9bfa7de 100644 --- a/drivers/misc/mei/pci-txe.c +++ b/drivers/misc/mei/pci-txe.c | |||
@@ -144,12 +144,14 @@ static int mei_txe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
144 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; | 144 | pdev->dev_flags |= PCI_DEV_FLAGS_NEEDS_RESUME; |
145 | 145 | ||
146 | /* | 146 | /* |
147 | * For not wake-able HW runtime pm framework | 147 | * TXE maps runtime suspend/resume to own power gating states, |
148 | * can't be used on pci device level. | 148 | * hence we need to go around native PCI runtime service which |
149 | * Use domain runtime pm callbacks instead. | 149 | * eventually brings the device into D3cold/hot state. |
150 | */ | 150 | * But the TXE device cannot wake up from D3 unlike from own |
151 | if (!pci_dev_run_wake(pdev)) | 151 | * power gating. To get around PCI device native runtime pm, |
152 | mei_txe_set_pm_domain(dev); | 152 | * TXE uses runtime pm domain handlers which take precedence. |
153 | */ | ||
154 | mei_txe_set_pm_domain(dev); | ||
153 | 155 | ||
154 | pm_runtime_put_noidle(&pdev->dev); | 156 | pm_runtime_put_noidle(&pdev->dev); |
155 | 157 | ||
@@ -186,8 +188,7 @@ static void mei_txe_shutdown(struct pci_dev *pdev) | |||
186 | dev_dbg(&pdev->dev, "shutdown\n"); | 188 | dev_dbg(&pdev->dev, "shutdown\n"); |
187 | mei_stop(dev); | 189 | mei_stop(dev); |
188 | 190 | ||
189 | if (!pci_dev_run_wake(pdev)) | 191 | mei_txe_unset_pm_domain(dev); |
190 | mei_txe_unset_pm_domain(dev); | ||
191 | 192 | ||
192 | mei_disable_interrupts(dev); | 193 | mei_disable_interrupts(dev); |
193 | free_irq(pdev->irq, dev); | 194 | free_irq(pdev->irq, dev); |
@@ -215,8 +216,7 @@ static void mei_txe_remove(struct pci_dev *pdev) | |||
215 | 216 | ||
216 | mei_stop(dev); | 217 | mei_stop(dev); |
217 | 218 | ||
218 | if (!pci_dev_run_wake(pdev)) | 219 | mei_txe_unset_pm_domain(dev); |
219 | mei_txe_unset_pm_domain(dev); | ||
220 | 220 | ||
221 | mei_disable_interrupts(dev); | 221 | mei_disable_interrupts(dev); |
222 | free_irq(pdev->irq, dev); | 222 | free_irq(pdev->irq, dev); |
@@ -318,15 +318,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device) | |||
318 | else | 318 | else |
319 | ret = -EAGAIN; | 319 | ret = -EAGAIN; |
320 | 320 | ||
321 | /* | 321 | /* keep irq on we are staying in D0 */ |
322 | * If everything is okay we're about to enter PCI low | ||
323 | * power state (D3) therefor we need to disable the | ||
324 | * interrupts towards host. | ||
325 | * However if device is not wakeable we do not enter | ||
326 | * D-low state and we need to keep the interrupt kicking | ||
327 | */ | ||
328 | if (!ret && pci_dev_run_wake(pdev)) | ||
329 | mei_disable_interrupts(dev); | ||
330 | 322 | ||
331 | dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); | 323 | dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); |
332 | 324 | ||
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index d0ccc6729fd2..67d787fa3306 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c | |||
@@ -448,6 +448,8 @@ static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, | |||
448 | int err; | 448 | int err; |
449 | u32 val; | 449 | u32 val; |
450 | 450 | ||
451 | intel_host->d3_retune = true; | ||
452 | |||
451 | err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); | 453 | err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); |
452 | if (err) { | 454 | if (err) { |
453 | pr_debug("%s: DSM not supported, error %d\n", | 455 | pr_debug("%s: DSM not supported, error %d\n", |
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 13f0f219d8aa..a13a4896a8bd 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c | |||
@@ -182,22 +182,23 @@ | |||
182 | /* FLEXCAN hardware feature flags | 182 | /* FLEXCAN hardware feature flags |
183 | * | 183 | * |
184 | * Below is some version info we got: | 184 | * Below is some version info we got: |
185 | * SOC Version IP-Version Glitch- [TR]WRN_INT Memory err RTR re- | 185 | * SOC Version IP-Version Glitch- [TR]WRN_INT IRQ Err Memory err RTR re- |
186 | * Filter? connected? detection ception in MB | 186 | * Filter? connected? Passive detection ception in MB |
187 | * MX25 FlexCAN2 03.00.00.00 no no no no | 187 | * MX25 FlexCAN2 03.00.00.00 no no ? no no |
188 | * MX28 FlexCAN2 03.00.04.00 yes yes no no | 188 | * MX28 FlexCAN2 03.00.04.00 yes yes no no no |
189 | * MX35 FlexCAN2 03.00.00.00 no no no no | 189 | * MX35 FlexCAN2 03.00.00.00 no no ? no no |
190 | * MX53 FlexCAN2 03.00.00.00 yes no no no | 190 | * MX53 FlexCAN2 03.00.00.00 yes no no no no |
191 | * MX6s FlexCAN3 10.00.12.00 yes yes no yes | 191 | * MX6s FlexCAN3 10.00.12.00 yes yes no no yes |
192 | * VF610 FlexCAN3 ? no yes yes yes? | 192 | * VF610 FlexCAN3 ? no yes ? yes yes? |
193 | * | 193 | * |
194 | * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. | 194 | * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected. |
195 | */ | 195 | */ |
196 | #define FLEXCAN_QUIRK_BROKEN_ERR_STATE BIT(1) /* [TR]WRN_INT not connected */ | 196 | #define FLEXCAN_QUIRK_BROKEN_WERR_STATE BIT(1) /* [TR]WRN_INT not connected */ |
197 | #define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ | 197 | #define FLEXCAN_QUIRK_DISABLE_RXFG BIT(2) /* Disable RX FIFO Global mask */ |
198 | #define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ | 198 | #define FLEXCAN_QUIRK_ENABLE_EACEN_RRS BIT(3) /* Enable EACEN and RRS bit in ctrl2 */ |
199 | #define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ | 199 | #define FLEXCAN_QUIRK_DISABLE_MECR BIT(4) /* Disable Memory error detection */ |
200 | #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ | 200 | #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ |
201 | #define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */ | ||
201 | 202 | ||
202 | /* Structure of the message buffer */ | 203 | /* Structure of the message buffer */ |
203 | struct flexcan_mb { | 204 | struct flexcan_mb { |
@@ -281,14 +282,17 @@ struct flexcan_priv { | |||
281 | }; | 282 | }; |
282 | 283 | ||
283 | static const struct flexcan_devtype_data fsl_p1010_devtype_data = { | 284 | static const struct flexcan_devtype_data fsl_p1010_devtype_data = { |
284 | .quirks = FLEXCAN_QUIRK_BROKEN_ERR_STATE, | 285 | .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | |
286 | FLEXCAN_QUIRK_BROKEN_PERR_STATE, | ||
285 | }; | 287 | }; |
286 | 288 | ||
287 | static const struct flexcan_devtype_data fsl_imx28_devtype_data; | 289 | static const struct flexcan_devtype_data fsl_imx28_devtype_data = { |
290 | .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE, | ||
291 | }; | ||
288 | 292 | ||
289 | static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { | 293 | static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { |
290 | .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | | 294 | .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | |
291 | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP, | 295 | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE, |
292 | }; | 296 | }; |
293 | 297 | ||
294 | static const struct flexcan_devtype_data fsl_vf610_devtype_data = { | 298 | static const struct flexcan_devtype_data fsl_vf610_devtype_data = { |
@@ -335,6 +339,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr) | |||
335 | } | 339 | } |
336 | #endif | 340 | #endif |
337 | 341 | ||
342 | static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) | ||
343 | { | ||
344 | struct flexcan_regs __iomem *regs = priv->regs; | ||
345 | u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK); | ||
346 | |||
347 | flexcan_write(reg_ctrl, ®s->ctrl); | ||
348 | } | ||
349 | |||
350 | static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv) | ||
351 | { | ||
352 | struct flexcan_regs __iomem *regs = priv->regs; | ||
353 | u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK); | ||
354 | |||
355 | flexcan_write(reg_ctrl, ®s->ctrl); | ||
356 | } | ||
357 | |||
338 | static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) | 358 | static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) |
339 | { | 359 | { |
340 | if (!priv->reg_xceiver) | 360 | if (!priv->reg_xceiver) |
@@ -713,6 +733,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) | |||
713 | struct flexcan_regs __iomem *regs = priv->regs; | 733 | struct flexcan_regs __iomem *regs = priv->regs; |
714 | irqreturn_t handled = IRQ_NONE; | 734 | irqreturn_t handled = IRQ_NONE; |
715 | u32 reg_iflag1, reg_esr; | 735 | u32 reg_iflag1, reg_esr; |
736 | enum can_state last_state = priv->can.state; | ||
716 | 737 | ||
717 | reg_iflag1 = flexcan_read(®s->iflag1); | 738 | reg_iflag1 = flexcan_read(®s->iflag1); |
718 | 739 | ||
@@ -765,8 +786,10 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) | |||
765 | flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); | 786 | flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, ®s->esr); |
766 | } | 787 | } |
767 | 788 | ||
768 | /* state change interrupt */ | 789 | /* state change interrupt or broken error state quirk fix is enabled */ |
769 | if (reg_esr & FLEXCAN_ESR_ERR_STATE) | 790 | if ((reg_esr & FLEXCAN_ESR_ERR_STATE) || |
791 | (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE | | ||
792 | FLEXCAN_QUIRK_BROKEN_PERR_STATE))) | ||
770 | flexcan_irq_state(dev, reg_esr); | 793 | flexcan_irq_state(dev, reg_esr); |
771 | 794 | ||
772 | /* bus error IRQ - handle if bus error reporting is activated */ | 795 | /* bus error IRQ - handle if bus error reporting is activated */ |
@@ -774,6 +797,44 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) | |||
774 | (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) | 797 | (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) |
775 | flexcan_irq_bus_err(dev, reg_esr); | 798 | flexcan_irq_bus_err(dev, reg_esr); |
776 | 799 | ||
800 | /* availability of error interrupt among state transitions in case | ||
801 | * bus error reporting is de-activated and | ||
802 | * FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled: | ||
803 | * +--------------------------------------------------------------+ | ||
804 | * | +----------------------------------------------+ [stopped / | | ||
805 | * | | | sleeping] -+ | ||
806 | * +-+-> active <-> warning <-> passive -> bus off -+ | ||
807 | * ___________^^^^^^^^^^^^_______________________________ | ||
808 | * disabled(1) enabled disabled | ||
809 | * | ||
810 | * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled | ||
811 | */ | ||
812 | if ((last_state != priv->can.state) && | ||
813 | (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) && | ||
814 | !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) { | ||
815 | switch (priv->can.state) { | ||
816 | case CAN_STATE_ERROR_ACTIVE: | ||
817 | if (priv->devtype_data->quirks & | ||
818 | FLEXCAN_QUIRK_BROKEN_WERR_STATE) | ||
819 | flexcan_error_irq_enable(priv); | ||
820 | else | ||
821 | flexcan_error_irq_disable(priv); | ||
822 | break; | ||
823 | |||
824 | case CAN_STATE_ERROR_WARNING: | ||
825 | flexcan_error_irq_enable(priv); | ||
826 | break; | ||
827 | |||
828 | case CAN_STATE_ERROR_PASSIVE: | ||
829 | case CAN_STATE_BUS_OFF: | ||
830 | flexcan_error_irq_disable(priv); | ||
831 | break; | ||
832 | |||
833 | default: | ||
834 | break; | ||
835 | } | ||
836 | } | ||
837 | |||
777 | return handled; | 838 | return handled; |
778 | } | 839 | } |
779 | 840 | ||
@@ -887,7 +948,7 @@ static int flexcan_chip_start(struct net_device *dev) | |||
887 | * on most Flexcan cores, too. Otherwise we don't get | 948 | * on most Flexcan cores, too. Otherwise we don't get |
888 | * any error warning or passive interrupts. | 949 | * any error warning or passive interrupts. |
889 | */ | 950 | */ |
890 | if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_ERR_STATE || | 951 | if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE || |
891 | priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) | 952 | priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) |
892 | reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; | 953 | reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; |
893 | else | 954 | else |
diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 68ef0a4cd821..b0c80859f746 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c | |||
@@ -342,7 +342,7 @@ static int sun4i_can_start(struct net_device *dev) | |||
342 | 342 | ||
343 | /* enter the selected mode */ | 343 | /* enter the selected mode */ |
344 | mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR); | 344 | mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR); |
345 | if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK) | 345 | if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) |
346 | mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE; | 346 | mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE; |
347 | else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) | 347 | else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) |
348 | mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE; | 348 | mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE; |
@@ -811,7 +811,6 @@ static int sun4ican_probe(struct platform_device *pdev) | |||
811 | priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING | | 811 | priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING | |
812 | CAN_CTRLMODE_LISTENONLY | | 812 | CAN_CTRLMODE_LISTENONLY | |
813 | CAN_CTRLMODE_LOOPBACK | | 813 | CAN_CTRLMODE_LOOPBACK | |
814 | CAN_CTRLMODE_PRESUME_ACK | | ||
815 | CAN_CTRLMODE_3_SAMPLES; | 814 | CAN_CTRLMODE_3_SAMPLES; |
816 | priv->base = addr; | 815 | priv->base = addr; |
817 | priv->clk = clk; | 816 | priv->clk = clk; |
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c index be928ce62d32..9fdb0f0bfa06 100644 --- a/drivers/net/can/usb/esd_usb2.c +++ b/drivers/net/can/usb/esd_usb2.c | |||
@@ -333,7 +333,7 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv, | |||
333 | } | 333 | } |
334 | 334 | ||
335 | cf->can_id = id & ESD_IDMASK; | 335 | cf->can_id = id & ESD_IDMASK; |
336 | cf->can_dlc = get_can_dlc(msg->msg.rx.dlc); | 336 | cf->can_dlc = get_can_dlc(msg->msg.rx.dlc & ~ESD_RTR); |
337 | 337 | ||
338 | if (id & ESD_EXTID) | 338 | if (id & ESD_EXTID) |
339 | cf->can_id |= CAN_EFF_FLAG; | 339 | cf->can_id |= CAN_EFF_FLAG; |
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index afcc1312dbaf..68ac3e88a8ce 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c | |||
@@ -375,6 +375,8 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) | |||
375 | 375 | ||
376 | gs_free_tx_context(txc); | 376 | gs_free_tx_context(txc); |
377 | 377 | ||
378 | atomic_dec(&dev->active_tx_urbs); | ||
379 | |||
378 | netif_wake_queue(netdev); | 380 | netif_wake_queue(netdev); |
379 | } | 381 | } |
380 | 382 | ||
@@ -463,14 +465,6 @@ static void gs_usb_xmit_callback(struct urb *urb) | |||
463 | urb->transfer_buffer_length, | 465 | urb->transfer_buffer_length, |
464 | urb->transfer_buffer, | 466 | urb->transfer_buffer, |
465 | urb->transfer_dma); | 467 | urb->transfer_dma); |
466 | |||
467 | atomic_dec(&dev->active_tx_urbs); | ||
468 | |||
469 | if (!netif_device_present(netdev)) | ||
470 | return; | ||
471 | |||
472 | if (netif_queue_stopped(netdev)) | ||
473 | netif_wake_queue(netdev); | ||
474 | } | 468 | } |
475 | 469 | ||
476 | static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, | 470 | static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 18cc529fb807..9b18d96ef526 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c | |||
@@ -137,6 +137,7 @@ static inline bool kvaser_is_usbcan(const struct usb_device_id *id) | |||
137 | #define CMD_RESET_ERROR_COUNTER 49 | 137 | #define CMD_RESET_ERROR_COUNTER 49 |
138 | #define CMD_TX_ACKNOWLEDGE 50 | 138 | #define CMD_TX_ACKNOWLEDGE 50 |
139 | #define CMD_CAN_ERROR_EVENT 51 | 139 | #define CMD_CAN_ERROR_EVENT 51 |
140 | #define CMD_FLUSH_QUEUE_REPLY 68 | ||
140 | 141 | ||
141 | #define CMD_LEAF_USB_THROTTLE 77 | 142 | #define CMD_LEAF_USB_THROTTLE 77 |
142 | #define CMD_LEAF_LOG_MESSAGE 106 | 143 | #define CMD_LEAF_LOG_MESSAGE 106 |
@@ -1301,6 +1302,11 @@ static void kvaser_usb_handle_message(const struct kvaser_usb *dev, | |||
1301 | goto warn; | 1302 | goto warn; |
1302 | break; | 1303 | break; |
1303 | 1304 | ||
1305 | case CMD_FLUSH_QUEUE_REPLY: | ||
1306 | if (dev->family != KVASER_LEAF) | ||
1307 | goto warn; | ||
1308 | break; | ||
1309 | |||
1304 | default: | 1310 | default: |
1305 | warn: dev_warn(dev->udev->dev.parent, | 1311 | warn: dev_warn(dev->udev->dev.parent, |
1306 | "Unhandled message (%d)\n", msg->id); | 1312 | "Unhandled message (%d)\n", msg->id); |
@@ -1609,7 +1615,8 @@ static int kvaser_usb_close(struct net_device *netdev) | |||
1609 | if (err) | 1615 | if (err) |
1610 | netdev_warn(netdev, "Cannot flush queue, error %d\n", err); | 1616 | netdev_warn(netdev, "Cannot flush queue, error %d\n", err); |
1611 | 1617 | ||
1612 | if (kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel)) | 1618 | err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, priv->channel); |
1619 | if (err) | ||
1613 | netdev_warn(netdev, "Cannot reset card, error %d\n", err); | 1620 | netdev_warn(netdev, "Cannot reset card, error %d\n", err); |
1614 | 1621 | ||
1615 | err = kvaser_usb_stop_chip(priv); | 1622 | err = kvaser_usb_stop_chip(priv); |
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c index dce7fa57eb55..f123ed57630d 100644 --- a/drivers/net/dsa/mv88e6060.c +++ b/drivers/net/dsa/mv88e6060.c | |||
@@ -214,8 +214,14 @@ static int mv88e6060_setup(struct dsa_switch *ds) | |||
214 | 214 | ||
215 | static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr) | 215 | static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr) |
216 | { | 216 | { |
217 | /* Use the same MAC Address as FD Pause frames for all ports */ | 217 | u16 val = addr[0] << 8 | addr[1]; |
218 | REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]); | 218 | |
219 | /* The multicast bit is always transmitted as a zero, so the switch uses | ||
220 | * bit 8 for "DiffAddr", where 0 means all ports transmit the same SA. | ||
221 | */ | ||
222 | val &= 0xfeff; | ||
223 | |||
224 | REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, val); | ||
219 | REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]); | 225 | REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]); |
220 | REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]); | 226 | REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]); |
221 | 227 | ||
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index b1212debc2e1..967020fb26ee 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c | |||
@@ -742,8 +742,8 @@ static void ena_get_channels(struct net_device *netdev, | |||
742 | { | 742 | { |
743 | struct ena_adapter *adapter = netdev_priv(netdev); | 743 | struct ena_adapter *adapter = netdev_priv(netdev); |
744 | 744 | ||
745 | channels->max_rx = ENA_MAX_NUM_IO_QUEUES; | 745 | channels->max_rx = adapter->num_queues; |
746 | channels->max_tx = ENA_MAX_NUM_IO_QUEUES; | 746 | channels->max_tx = adapter->num_queues; |
747 | channels->max_other = 0; | 747 | channels->max_other = 0; |
748 | channels->max_combined = 0; | 748 | channels->max_combined = 0; |
749 | channels->rx_count = adapter->num_queues; | 749 | channels->rx_count = adapter->num_queues; |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index f7dc22f65d9f..c6bd5e24005d 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
@@ -966,7 +966,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring, | |||
966 | u64_stats_update_begin(&rx_ring->syncp); | 966 | u64_stats_update_begin(&rx_ring->syncp); |
967 | rx_ring->rx_stats.bad_csum++; | 967 | rx_ring->rx_stats.bad_csum++; |
968 | u64_stats_update_end(&rx_ring->syncp); | 968 | u64_stats_update_end(&rx_ring->syncp); |
969 | netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, | 969 | netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, |
970 | "RX IPv4 header checksum error\n"); | 970 | "RX IPv4 header checksum error\n"); |
971 | return; | 971 | return; |
972 | } | 972 | } |
@@ -979,7 +979,7 @@ static inline void ena_rx_checksum(struct ena_ring *rx_ring, | |||
979 | u64_stats_update_begin(&rx_ring->syncp); | 979 | u64_stats_update_begin(&rx_ring->syncp); |
980 | rx_ring->rx_stats.bad_csum++; | 980 | rx_ring->rx_stats.bad_csum++; |
981 | u64_stats_update_end(&rx_ring->syncp); | 981 | u64_stats_update_end(&rx_ring->syncp); |
982 | netif_err(rx_ring->adapter, rx_err, rx_ring->netdev, | 982 | netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev, |
983 | "RX L4 checksum error\n"); | 983 | "RX L4 checksum error\n"); |
984 | skb->ip_summed = CHECKSUM_NONE; | 984 | skb->ip_summed = CHECKSUM_NONE; |
985 | return; | 985 | return; |
@@ -3064,7 +3064,8 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) | |||
3064 | if (ena_dev->mem_bar) | 3064 | if (ena_dev->mem_bar) |
3065 | devm_iounmap(&pdev->dev, ena_dev->mem_bar); | 3065 | devm_iounmap(&pdev->dev, ena_dev->mem_bar); |
3066 | 3066 | ||
3067 | devm_iounmap(&pdev->dev, ena_dev->reg_bar); | 3067 | if (ena_dev->reg_bar) |
3068 | devm_iounmap(&pdev->dev, ena_dev->reg_bar); | ||
3068 | 3069 | ||
3069 | release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; | 3070 | release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; |
3070 | pci_release_selected_regions(pdev, release_bars); | 3071 | pci_release_selected_regions(pdev, release_bars); |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 0fdaaa643073..57e796870595 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h | |||
@@ -22,8 +22,12 @@ | |||
22 | 22 | ||
23 | #define AQ_CFG_FORCE_LEGACY_INT 0U | 23 | #define AQ_CFG_FORCE_LEGACY_INT 0U |
24 | 24 | ||
25 | #define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U | 25 | #define AQ_CFG_INTERRUPT_MODERATION_OFF 0 |
26 | #define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU | 26 | #define AQ_CFG_INTERRUPT_MODERATION_ON 1 |
27 | #define AQ_CFG_INTERRUPT_MODERATION_AUTO 0xFFFFU | ||
28 | |||
29 | #define AQ_CFG_INTERRUPT_MODERATION_USEC_MAX (0x1FF * 2) | ||
30 | |||
27 | #define AQ_CFG_IRQ_MASK 0x1FFU | 31 | #define AQ_CFG_IRQ_MASK 0x1FFU |
28 | 32 | ||
29 | #define AQ_CFG_VECS_MAX 8U | 33 | #define AQ_CFG_VECS_MAX 8U |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index a761e91471df..d5e99b468870 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c | |||
@@ -56,10 +56,6 @@ aq_ethtool_set_link_ksettings(struct net_device *ndev, | |||
56 | return aq_nic_set_link_ksettings(aq_nic, cmd); | 56 | return aq_nic_set_link_ksettings(aq_nic, cmd); |
57 | } | 57 | } |
58 | 58 | ||
59 | /* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */ | ||
60 | static const unsigned int aq_ethtool_stat_queue_lines = 5U; | ||
61 | static const unsigned int aq_ethtool_stat_queue_chars = | ||
62 | 5U * ETH_GSTRING_LEN; | ||
63 | static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { | 59 | static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { |
64 | "InPackets", | 60 | "InPackets", |
65 | "InUCast", | 61 | "InUCast", |
@@ -83,56 +79,26 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { | |||
83 | "InOctetsDma", | 79 | "InOctetsDma", |
84 | "OutOctetsDma", | 80 | "OutOctetsDma", |
85 | "InDroppedDma", | 81 | "InDroppedDma", |
86 | "Queue[0] InPackets", | 82 | }; |
87 | "Queue[0] OutPackets", | 83 | |
88 | "Queue[0] InJumboPackets", | 84 | static const char aq_ethtool_queue_stat_names[][ETH_GSTRING_LEN] = { |
89 | "Queue[0] InLroPackets", | 85 | "Queue[%d] InPackets", |
90 | "Queue[0] InErrors", | 86 | "Queue[%d] OutPackets", |
91 | "Queue[1] InPackets", | 87 | "Queue[%d] Restarts", |
92 | "Queue[1] OutPackets", | 88 | "Queue[%d] InJumboPackets", |
93 | "Queue[1] InJumboPackets", | 89 | "Queue[%d] InLroPackets", |
94 | "Queue[1] InLroPackets", | 90 | "Queue[%d] InErrors", |
95 | "Queue[1] InErrors", | ||
96 | "Queue[2] InPackets", | ||
97 | "Queue[2] OutPackets", | ||
98 | "Queue[2] InJumboPackets", | ||
99 | "Queue[2] InLroPackets", | ||
100 | "Queue[2] InErrors", | ||
101 | "Queue[3] InPackets", | ||
102 | "Queue[3] OutPackets", | ||
103 | "Queue[3] InJumboPackets", | ||
104 | "Queue[3] InLroPackets", | ||
105 | "Queue[3] InErrors", | ||
106 | "Queue[4] InPackets", | ||
107 | "Queue[4] OutPackets", | ||
108 | "Queue[4] InJumboPackets", | ||
109 | "Queue[4] InLroPackets", | ||
110 | "Queue[4] InErrors", | ||
111 | "Queue[5] InPackets", | ||
112 | "Queue[5] OutPackets", | ||
113 | "Queue[5] InJumboPackets", | ||
114 | "Queue[5] InLroPackets", | ||
115 | "Queue[5] InErrors", | ||
116 | "Queue[6] InPackets", | ||
117 | "Queue[6] OutPackets", | ||
118 | "Queue[6] InJumboPackets", | ||
119 | "Queue[6] InLroPackets", | ||
120 | "Queue[6] InErrors", | ||
121 | "Queue[7] InPackets", | ||
122 | "Queue[7] OutPackets", | ||
123 | "Queue[7] InJumboPackets", | ||
124 | "Queue[7] InLroPackets", | ||
125 | "Queue[7] InErrors", | ||
126 | }; | 91 | }; |
127 | 92 | ||
128 | static void aq_ethtool_stats(struct net_device *ndev, | 93 | static void aq_ethtool_stats(struct net_device *ndev, |
129 | struct ethtool_stats *stats, u64 *data) | 94 | struct ethtool_stats *stats, u64 *data) |
130 | { | 95 | { |
131 | struct aq_nic_s *aq_nic = netdev_priv(ndev); | 96 | struct aq_nic_s *aq_nic = netdev_priv(ndev); |
97 | struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); | ||
132 | 98 | ||
133 | /* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */ | 99 | memset(data, 0, (ARRAY_SIZE(aq_ethtool_stat_names) + |
134 | BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8); | 100 | ARRAY_SIZE(aq_ethtool_queue_stat_names) * |
135 | memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64)); | 101 | cfg->vecs) * sizeof(u64)); |
136 | aq_nic_get_stats(aq_nic, data); | 102 | aq_nic_get_stats(aq_nic, data); |
137 | } | 103 | } |
138 | 104 | ||
@@ -154,8 +120,8 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev, | |||
154 | 120 | ||
155 | strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "", | 121 | strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "", |
156 | sizeof(drvinfo->bus_info)); | 122 | sizeof(drvinfo->bus_info)); |
157 | drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) - | 123 | drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) + |
158 | (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines; | 124 | cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names); |
159 | drvinfo->testinfo_len = 0; | 125 | drvinfo->testinfo_len = 0; |
160 | drvinfo->regdump_len = regs_count; | 126 | drvinfo->regdump_len = regs_count; |
161 | drvinfo->eedump_len = 0; | 127 | drvinfo->eedump_len = 0; |
@@ -164,14 +130,25 @@ static void aq_ethtool_get_drvinfo(struct net_device *ndev, | |||
164 | static void aq_ethtool_get_strings(struct net_device *ndev, | 130 | static void aq_ethtool_get_strings(struct net_device *ndev, |
165 | u32 stringset, u8 *data) | 131 | u32 stringset, u8 *data) |
166 | { | 132 | { |
133 | int i, si; | ||
167 | struct aq_nic_s *aq_nic = netdev_priv(ndev); | 134 | struct aq_nic_s *aq_nic = netdev_priv(ndev); |
168 | struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); | 135 | struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); |
169 | 136 | u8 *p = data; | |
170 | if (stringset == ETH_SS_STATS) | 137 | |
171 | memcpy(data, *aq_ethtool_stat_names, | 138 | if (stringset == ETH_SS_STATS) { |
172 | sizeof(aq_ethtool_stat_names) - | 139 | memcpy(p, *aq_ethtool_stat_names, |
173 | (AQ_CFG_VECS_MAX - cfg->vecs) * | 140 | sizeof(aq_ethtool_stat_names)); |
174 | aq_ethtool_stat_queue_chars); | 141 | p = p + sizeof(aq_ethtool_stat_names); |
142 | for (i = 0; i < cfg->vecs; i++) { | ||
143 | for (si = 0; | ||
144 | si < ARRAY_SIZE(aq_ethtool_queue_stat_names); | ||
145 | si++) { | ||
146 | snprintf(p, ETH_GSTRING_LEN, | ||
147 | aq_ethtool_queue_stat_names[si], i); | ||
148 | p += ETH_GSTRING_LEN; | ||
149 | } | ||
150 | } | ||
151 | } | ||
175 | } | 152 | } |
176 | 153 | ||
177 | static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) | 154 | static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) |
@@ -182,9 +159,8 @@ static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset) | |||
182 | 159 | ||
183 | switch (stringset) { | 160 | switch (stringset) { |
184 | case ETH_SS_STATS: | 161 | case ETH_SS_STATS: |
185 | ret = ARRAY_SIZE(aq_ethtool_stat_names) - | 162 | ret = ARRAY_SIZE(aq_ethtool_stat_names) + |
186 | (AQ_CFG_VECS_MAX - cfg->vecs) * | 163 | cfg->vecs * ARRAY_SIZE(aq_ethtool_queue_stat_names); |
187 | aq_ethtool_stat_queue_lines; | ||
188 | break; | 164 | break; |
189 | default: | 165 | default: |
190 | ret = -EOPNOTSUPP; | 166 | ret = -EOPNOTSUPP; |
@@ -245,6 +221,69 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev, | |||
245 | return err; | 221 | return err; |
246 | } | 222 | } |
247 | 223 | ||
224 | int aq_ethtool_get_coalesce(struct net_device *ndev, | ||
225 | struct ethtool_coalesce *coal) | ||
226 | { | ||
227 | struct aq_nic_s *aq_nic = netdev_priv(ndev); | ||
228 | struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); | ||
229 | |||
230 | if (cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON || | ||
231 | cfg->itr == AQ_CFG_INTERRUPT_MODERATION_AUTO) { | ||
232 | coal->rx_coalesce_usecs = cfg->rx_itr; | ||
233 | coal->tx_coalesce_usecs = cfg->tx_itr; | ||
234 | coal->rx_max_coalesced_frames = 0; | ||
235 | coal->tx_max_coalesced_frames = 0; | ||
236 | } else { | ||
237 | coal->rx_coalesce_usecs = 0; | ||
238 | coal->tx_coalesce_usecs = 0; | ||
239 | coal->rx_max_coalesced_frames = 1; | ||
240 | coal->tx_max_coalesced_frames = 1; | ||
241 | } | ||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | int aq_ethtool_set_coalesce(struct net_device *ndev, | ||
246 | struct ethtool_coalesce *coal) | ||
247 | { | ||
248 | struct aq_nic_s *aq_nic = netdev_priv(ndev); | ||
249 | struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic); | ||
250 | |||
251 | /* This is not yet supported | ||
252 | */ | ||
253 | if (coal->use_adaptive_rx_coalesce || coal->use_adaptive_tx_coalesce) | ||
254 | return -EOPNOTSUPP; | ||
255 | |||
256 | /* Atlantic only supports timing based coalescing | ||
257 | */ | ||
258 | if (coal->rx_max_coalesced_frames > 1 || | ||
259 | coal->rx_coalesce_usecs_irq || | ||
260 | coal->rx_max_coalesced_frames_irq) | ||
261 | return -EOPNOTSUPP; | ||
262 | |||
263 | if (coal->tx_max_coalesced_frames > 1 || | ||
264 | coal->tx_coalesce_usecs_irq || | ||
265 | coal->tx_max_coalesced_frames_irq) | ||
266 | return -EOPNOTSUPP; | ||
267 | |||
268 | /* We do not support frame counting. Check this | ||
269 | */ | ||
270 | if (!(coal->rx_max_coalesced_frames == !coal->rx_coalesce_usecs)) | ||
271 | return -EOPNOTSUPP; | ||
272 | if (!(coal->tx_max_coalesced_frames == !coal->tx_coalesce_usecs)) | ||
273 | return -EOPNOTSUPP; | ||
274 | |||
275 | if (coal->rx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX || | ||
276 | coal->tx_coalesce_usecs > AQ_CFG_INTERRUPT_MODERATION_USEC_MAX) | ||
277 | return -EINVAL; | ||
278 | |||
279 | cfg->itr = AQ_CFG_INTERRUPT_MODERATION_ON; | ||
280 | |||
281 | cfg->rx_itr = coal->rx_coalesce_usecs; | ||
282 | cfg->tx_itr = coal->tx_coalesce_usecs; | ||
283 | |||
284 | return aq_nic_update_interrupt_moderation_settings(aq_nic); | ||
285 | } | ||
286 | |||
248 | const struct ethtool_ops aq_ethtool_ops = { | 287 | const struct ethtool_ops aq_ethtool_ops = { |
249 | .get_link = aq_ethtool_get_link, | 288 | .get_link = aq_ethtool_get_link, |
250 | .get_regs_len = aq_ethtool_get_regs_len, | 289 | .get_regs_len = aq_ethtool_get_regs_len, |
@@ -259,4 +298,6 @@ const struct ethtool_ops aq_ethtool_ops = { | |||
259 | .get_ethtool_stats = aq_ethtool_stats, | 298 | .get_ethtool_stats = aq_ethtool_stats, |
260 | .get_link_ksettings = aq_ethtool_get_link_ksettings, | 299 | .get_link_ksettings = aq_ethtool_get_link_ksettings, |
261 | .set_link_ksettings = aq_ethtool_set_link_ksettings, | 300 | .set_link_ksettings = aq_ethtool_set_link_ksettings, |
301 | .get_coalesce = aq_ethtool_get_coalesce, | ||
302 | .set_coalesce = aq_ethtool_set_coalesce, | ||
262 | }; | 303 | }; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index bf9b3f020e10..0207927dc8a6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h | |||
@@ -151,8 +151,7 @@ struct aq_hw_ops { | |||
151 | [ETH_ALEN], | 151 | [ETH_ALEN], |
152 | u32 count); | 152 | u32 count); |
153 | 153 | ||
154 | int (*hw_interrupt_moderation_set)(struct aq_hw_s *self, | 154 | int (*hw_interrupt_moderation_set)(struct aq_hw_s *self); |
155 | bool itr_enabled); | ||
156 | 155 | ||
157 | int (*hw_rss_set)(struct aq_hw_s *self, | 156 | int (*hw_rss_set)(struct aq_hw_s *self, |
158 | struct aq_rss_parameters *rss_params); | 157 | struct aq_rss_parameters *rss_params); |
@@ -163,6 +162,8 @@ struct aq_hw_ops { | |||
163 | int (*hw_get_regs)(struct aq_hw_s *self, | 162 | int (*hw_get_regs)(struct aq_hw_s *self, |
164 | struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); | 163 | struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff); |
165 | 164 | ||
165 | int (*hw_update_stats)(struct aq_hw_s *self); | ||
166 | |||
166 | int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, | 167 | int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, |
167 | unsigned int *p_count); | 168 | unsigned int *p_count); |
168 | 169 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 0a5bb4114eb4..483e97691eea 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include "aq_pci_func.h" | 16 | #include "aq_pci_func.h" |
17 | #include "aq_nic_internal.h" | 17 | #include "aq_nic_internal.h" |
18 | 18 | ||
19 | #include <linux/moduleparam.h> | ||
19 | #include <linux/netdevice.h> | 20 | #include <linux/netdevice.h> |
20 | #include <linux/etherdevice.h> | 21 | #include <linux/etherdevice.h> |
21 | #include <linux/timer.h> | 22 | #include <linux/timer.h> |
@@ -24,6 +25,18 @@ | |||
24 | #include <linux/tcp.h> | 25 | #include <linux/tcp.h> |
25 | #include <net/ip.h> | 26 | #include <net/ip.h> |
26 | 27 | ||
28 | static unsigned int aq_itr = AQ_CFG_INTERRUPT_MODERATION_AUTO; | ||
29 | module_param_named(aq_itr, aq_itr, uint, 0644); | ||
30 | MODULE_PARM_DESC(aq_itr, "Interrupt throttling mode"); | ||
31 | |||
32 | static unsigned int aq_itr_tx; | ||
33 | module_param_named(aq_itr_tx, aq_itr_tx, uint, 0644); | ||
34 | MODULE_PARM_DESC(aq_itr_tx, "TX interrupt throttle rate"); | ||
35 | |||
36 | static unsigned int aq_itr_rx; | ||
37 | module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644); | ||
38 | MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate"); | ||
39 | |||
27 | static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) | 40 | static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) |
28 | { | 41 | { |
29 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; | 42 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; |
@@ -61,9 +74,9 @@ static void aq_nic_cfg_init_defaults(struct aq_nic_s *self) | |||
61 | 74 | ||
62 | cfg->is_polling = AQ_CFG_IS_POLLING_DEF; | 75 | cfg->is_polling = AQ_CFG_IS_POLLING_DEF; |
63 | 76 | ||
64 | cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF; | 77 | cfg->itr = aq_itr; |
65 | cfg->itr = cfg->is_interrupt_moderation ? | 78 | cfg->tx_itr = aq_itr_tx; |
66 | AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U; | 79 | cfg->rx_itr = aq_itr_rx; |
67 | 80 | ||
68 | cfg->is_rss = AQ_CFG_IS_RSS_DEF; | 81 | cfg->is_rss = AQ_CFG_IS_RSS_DEF; |
69 | cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; | 82 | cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF; |
@@ -126,10 +139,12 @@ static int aq_nic_update_link_status(struct aq_nic_s *self) | |||
126 | if (err) | 139 | if (err) |
127 | return err; | 140 | return err; |
128 | 141 | ||
129 | if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) | 142 | if (self->link_status.mbps != self->aq_hw->aq_link_status.mbps) { |
130 | pr_info("%s: link change old %d new %d\n", | 143 | pr_info("%s: link change old %d new %d\n", |
131 | AQ_CFG_DRV_NAME, self->link_status.mbps, | 144 | AQ_CFG_DRV_NAME, self->link_status.mbps, |
132 | self->aq_hw->aq_link_status.mbps); | 145 | self->aq_hw->aq_link_status.mbps); |
146 | aq_nic_update_interrupt_moderation_settings(self); | ||
147 | } | ||
133 | 148 | ||
134 | self->link_status = self->aq_hw->aq_link_status; | 149 | self->link_status = self->aq_hw->aq_link_status; |
135 | if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { | 150 | if (!netif_carrier_ok(self->ndev) && self->link_status.mbps) { |
@@ -164,8 +179,8 @@ static void aq_nic_service_timer_cb(unsigned long param) | |||
164 | if (err) | 179 | if (err) |
165 | goto err_exit; | 180 | goto err_exit; |
166 | 181 | ||
167 | self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, | 182 | if (self->aq_hw_ops.hw_update_stats) |
168 | self->aq_nic_cfg.is_interrupt_moderation); | 183 | self->aq_hw_ops.hw_update_stats(self->aq_hw); |
169 | 184 | ||
170 | memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); | 185 | memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); |
171 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); | 186 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); |
@@ -334,6 +349,7 @@ struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev) | |||
334 | } | 349 | } |
335 | if (netif_running(ndev)) | 350 | if (netif_running(ndev)) |
336 | netif_tx_disable(ndev); | 351 | netif_tx_disable(ndev); |
352 | netif_carrier_off(self->ndev); | ||
337 | 353 | ||
338 | for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; | 354 | for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs; |
339 | self->aq_vecs++) { | 355 | self->aq_vecs++) { |
@@ -421,9 +437,8 @@ int aq_nic_start(struct aq_nic_s *self) | |||
421 | if (err < 0) | 437 | if (err < 0) |
422 | goto err_exit; | 438 | goto err_exit; |
423 | 439 | ||
424 | err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw, | 440 | err = aq_nic_update_interrupt_moderation_settings(self); |
425 | self->aq_nic_cfg.is_interrupt_moderation); | 441 | if (err) |
426 | if (err < 0) | ||
427 | goto err_exit; | 442 | goto err_exit; |
428 | setup_timer(&self->service_timer, &aq_nic_service_timer_cb, | 443 | setup_timer(&self->service_timer, &aq_nic_service_timer_cb, |
429 | (unsigned long)self); | 444 | (unsigned long)self); |
@@ -645,6 +660,11 @@ err_exit: | |||
645 | return err; | 660 | return err; |
646 | } | 661 | } |
647 | 662 | ||
663 | int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self) | ||
664 | { | ||
665 | return self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw); | ||
666 | } | ||
667 | |||
648 | int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) | 668 | int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags) |
649 | { | 669 | { |
650 | int err = 0; | 670 | int err = 0; |
@@ -899,6 +919,7 @@ int aq_nic_stop(struct aq_nic_s *self) | |||
899 | unsigned int i = 0U; | 919 | unsigned int i = 0U; |
900 | 920 | ||
901 | netif_tx_disable(self->ndev); | 921 | netif_tx_disable(self->ndev); |
922 | netif_carrier_off(self->ndev); | ||
902 | 923 | ||
903 | del_timer_sync(&self->service_timer); | 924 | del_timer_sync(&self->service_timer); |
904 | 925 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 0ddd556ff901..4309983acdd6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h | |||
@@ -40,6 +40,8 @@ struct aq_nic_cfg_s { | |||
40 | u32 vecs; /* vecs==allocated irqs */ | 40 | u32 vecs; /* vecs==allocated irqs */ |
41 | u32 irq_type; | 41 | u32 irq_type; |
42 | u32 itr; | 42 | u32 itr; |
43 | u16 rx_itr; | ||
44 | u16 tx_itr; | ||
43 | u32 num_rss_queues; | 45 | u32 num_rss_queues; |
44 | u32 mtu; | 46 | u32 mtu; |
45 | u32 ucp_0x364; | 47 | u32 ucp_0x364; |
@@ -49,7 +51,6 @@ struct aq_nic_cfg_s { | |||
49 | u16 is_mc_list_enabled; | 51 | u16 is_mc_list_enabled; |
50 | u16 mc_list_count; | 52 | u16 mc_list_count; |
51 | bool is_autoneg; | 53 | bool is_autoneg; |
52 | bool is_interrupt_moderation; | ||
53 | bool is_polling; | 54 | bool is_polling; |
54 | bool is_rss; | 55 | bool is_rss; |
55 | bool is_lro; | 56 | bool is_lro; |
@@ -104,5 +105,6 @@ int aq_nic_set_link_ksettings(struct aq_nic_s *self, | |||
104 | struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); | 105 | struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self); |
105 | u32 aq_nic_get_fw_version(struct aq_nic_s *self); | 106 | u32 aq_nic_get_fw_version(struct aq_nic_s *self); |
106 | int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); | 107 | int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg); |
108 | int aq_nic_update_interrupt_moderation_settings(struct aq_nic_s *self); | ||
107 | 109 | ||
108 | #endif /* AQ_NIC_H */ | 110 | #endif /* AQ_NIC_H */ |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 4c6c882c6a1c..cadaa646c89f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c | |||
@@ -85,6 +85,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self) | |||
85 | int err = 0; | 85 | int err = 0; |
86 | unsigned int bar = 0U; | 86 | unsigned int bar = 0U; |
87 | unsigned int port = 0U; | 87 | unsigned int port = 0U; |
88 | unsigned int numvecs = 0U; | ||
88 | 89 | ||
89 | err = pci_enable_device(self->pdev); | 90 | err = pci_enable_device(self->pdev); |
90 | if (err < 0) | 91 | if (err < 0) |
@@ -142,10 +143,12 @@ int aq_pci_func_init(struct aq_pci_func_s *self) | |||
142 | } | 143 | } |
143 | } | 144 | } |
144 | 145 | ||
145 | /*enable interrupts */ | 146 | numvecs = min((u8)AQ_CFG_VECS_DEF, self->aq_hw_caps.msix_irqs); |
147 | numvecs = min(numvecs, num_online_cpus()); | ||
148 | |||
149 | /* enable interrupts */ | ||
146 | #if !AQ_CFG_FORCE_LEGACY_INT | 150 | #if !AQ_CFG_FORCE_LEGACY_INT |
147 | err = pci_alloc_irq_vectors(self->pdev, self->aq_hw_caps.msix_irqs, | 151 | err = pci_alloc_irq_vectors(self->pdev, numvecs, numvecs, PCI_IRQ_MSIX); |
148 | self->aq_hw_caps.msix_irqs, PCI_IRQ_MSIX); | ||
149 | 152 | ||
150 | if (err < 0) { | 153 | if (err < 0) { |
151 | err = pci_alloc_irq_vectors(self->pdev, 1, 1, | 154 | err = pci_alloc_irq_vectors(self->pdev, 1, 1, |
@@ -153,7 +156,7 @@ int aq_pci_func_init(struct aq_pci_func_s *self) | |||
153 | if (err < 0) | 156 | if (err < 0) |
154 | goto err_exit; | 157 | goto err_exit; |
155 | } | 158 | } |
156 | #endif | 159 | #endif /* AQ_CFG_FORCE_LEGACY_INT */ |
157 | 160 | ||
158 | /* net device init */ | 161 | /* net device init */ |
159 | for (port = 0; port < self->ports; ++port) { | 162 | for (port = 0; port < self->ports; ++port) { |
@@ -265,6 +268,9 @@ void aq_pci_func_free(struct aq_pci_func_s *self) | |||
265 | aq_nic_ndev_free(self->port[port]); | 268 | aq_nic_ndev_free(self->port[port]); |
266 | } | 269 | } |
267 | 270 | ||
271 | if (self->mmio) | ||
272 | iounmap(self->mmio); | ||
273 | |||
268 | kfree(self); | 274 | kfree(self); |
269 | 275 | ||
270 | err_exit:; | 276 | err_exit:; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c index 305ff8ffac2c..5fecc9a099ef 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c | |||
@@ -373,8 +373,11 @@ int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count) | |||
373 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); | 373 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); |
374 | aq_vec_add_stats(self, &stats_rx, &stats_tx); | 374 | aq_vec_add_stats(self, &stats_rx, &stats_tx); |
375 | 375 | ||
376 | /* This data should mimic aq_ethtool_queue_stat_names structure | ||
377 | */ | ||
376 | data[count] += stats_rx.packets; | 378 | data[count] += stats_rx.packets; |
377 | data[++count] += stats_tx.packets; | 379 | data[++count] += stats_tx.packets; |
380 | data[++count] += stats_tx.queue_restarts; | ||
378 | data[++count] += stats_rx.jumbo_packets; | 381 | data[++count] += stats_rx.jumbo_packets; |
379 | data[++count] += stats_rx.lro_packets; | 382 | data[++count] += stats_rx.lro_packets; |
380 | data[++count] += stats_rx.errors; | 383 | data[++count] += stats_rx.errors; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index c5a02df7a48b..07b3c49a16a4 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | |||
@@ -765,24 +765,23 @@ err_exit: | |||
765 | return err; | 765 | return err; |
766 | } | 766 | } |
767 | 767 | ||
768 | static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, | 768 | static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self) |
769 | bool itr_enabled) | ||
770 | { | 769 | { |
771 | unsigned int i = 0U; | 770 | unsigned int i = 0U; |
771 | u32 itr_rx; | ||
772 | 772 | ||
773 | if (itr_enabled && self->aq_nic_cfg->itr) { | 773 | if (self->aq_nic_cfg->itr) { |
774 | if (self->aq_nic_cfg->itr != 0xFFFFU) { | 774 | if (self->aq_nic_cfg->itr != AQ_CFG_INTERRUPT_MODERATION_AUTO) { |
775 | u32 itr_ = (self->aq_nic_cfg->itr >> 1); | 775 | u32 itr_ = (self->aq_nic_cfg->itr >> 1); |
776 | 776 | ||
777 | itr_ = min(AQ_CFG_IRQ_MASK, itr_); | 777 | itr_ = min(AQ_CFG_IRQ_MASK, itr_); |
778 | 778 | ||
779 | PHAL_ATLANTIC_A0->itr_rx = 0x80000000U | | 779 | itr_rx = 0x80000000U | (itr_ << 0x10); |
780 | (itr_ << 0x10); | ||
781 | } else { | 780 | } else { |
782 | u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); | 781 | u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U); |
783 | 782 | ||
784 | if (n < self->aq_link_status.mbps) { | 783 | if (n < self->aq_link_status.mbps) { |
785 | PHAL_ATLANTIC_A0->itr_rx = 0U; | 784 | itr_rx = 0U; |
786 | } else { | 785 | } else { |
787 | static unsigned int hw_timers_tbl_[] = { | 786 | static unsigned int hw_timers_tbl_[] = { |
788 | 0x01CU, /* 10Gbit */ | 787 | 0x01CU, /* 10Gbit */ |
@@ -797,8 +796,7 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, | |||
797 | hw_atl_utils_mbps_2_speed_index( | 796 | hw_atl_utils_mbps_2_speed_index( |
798 | self->aq_link_status.mbps); | 797 | self->aq_link_status.mbps); |
799 | 798 | ||
800 | PHAL_ATLANTIC_A0->itr_rx = | 799 | itr_rx = 0x80000000U | |
801 | 0x80000000U | | ||
802 | (hw_timers_tbl_[speed_index] << 0x10U); | 800 | (hw_timers_tbl_[speed_index] << 0x10U); |
803 | } | 801 | } |
804 | 802 | ||
@@ -806,11 +804,11 @@ static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self, | |||
806 | aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); | 804 | aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U); |
807 | } | 805 | } |
808 | } else { | 806 | } else { |
809 | PHAL_ATLANTIC_A0->itr_rx = 0U; | 807 | itr_rx = 0U; |
810 | } | 808 | } |
811 | 809 | ||
812 | for (i = HW_ATL_A0_RINGS_MAX; i--;) | 810 | for (i = HW_ATL_A0_RINGS_MAX; i--;) |
813 | reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i); | 811 | reg_irq_thr_set(self, itr_rx, i); |
814 | 812 | ||
815 | return aq_hw_err_from_flags(self); | 813 | return aq_hw_err_from_flags(self); |
816 | } | 814 | } |
@@ -885,6 +883,7 @@ static struct aq_hw_ops hw_atl_ops_ = { | |||
885 | .hw_rss_set = hw_atl_a0_hw_rss_set, | 883 | .hw_rss_set = hw_atl_a0_hw_rss_set, |
886 | .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, | 884 | .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set, |
887 | .hw_get_regs = hw_atl_utils_hw_get_regs, | 885 | .hw_get_regs = hw_atl_utils_hw_get_regs, |
886 | .hw_update_stats = hw_atl_utils_update_stats, | ||
888 | .hw_get_hw_stats = hw_atl_utils_get_hw_stats, | 887 | .hw_get_hw_stats = hw_atl_utils_get_hw_stats, |
889 | .hw_get_fw_version = hw_atl_utils_get_fw_version, | 888 | .hw_get_fw_version = hw_atl_utils_get_fw_version, |
890 | }; | 889 | }; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index 21784cc39dab..ec68c20efcbd 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | |||
@@ -788,39 +788,45 @@ err_exit: | |||
788 | return err; | 788 | return err; |
789 | } | 789 | } |
790 | 790 | ||
791 | static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, | 791 | static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self) |
792 | bool itr_enabled) | ||
793 | { | 792 | { |
794 | unsigned int i = 0U; | 793 | unsigned int i = 0U; |
794 | u32 itr_tx = 2U; | ||
795 | u32 itr_rx = 2U; | ||
795 | 796 | ||
796 | if (itr_enabled && self->aq_nic_cfg->itr) { | 797 | switch (self->aq_nic_cfg->itr) { |
798 | case AQ_CFG_INTERRUPT_MODERATION_ON: | ||
799 | case AQ_CFG_INTERRUPT_MODERATION_AUTO: | ||
797 | tdm_tx_desc_wr_wb_irq_en_set(self, 0U); | 800 | tdm_tx_desc_wr_wb_irq_en_set(self, 0U); |
798 | tdm_tdm_intr_moder_en_set(self, 1U); | 801 | tdm_tdm_intr_moder_en_set(self, 1U); |
799 | rdm_rx_desc_wr_wb_irq_en_set(self, 0U); | 802 | rdm_rx_desc_wr_wb_irq_en_set(self, 0U); |
800 | rdm_rdm_intr_moder_en_set(self, 1U); | 803 | rdm_rdm_intr_moder_en_set(self, 1U); |
801 | 804 | ||
802 | PHAL_ATLANTIC_B0->itr_tx = 2U; | 805 | if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) { |
803 | PHAL_ATLANTIC_B0->itr_rx = 2U; | 806 | /* HW timers are in 2us units */ |
807 | int tx_max_timer = self->aq_nic_cfg->tx_itr / 2; | ||
808 | int tx_min_timer = tx_max_timer / 2; | ||
804 | 809 | ||
805 | if (self->aq_nic_cfg->itr != 0xFFFFU) { | 810 | int rx_max_timer = self->aq_nic_cfg->rx_itr / 2; |
806 | unsigned int max_timer = self->aq_nic_cfg->itr / 2U; | 811 | int rx_min_timer = rx_max_timer / 2; |
807 | unsigned int min_timer = self->aq_nic_cfg->itr / 32U; | ||
808 | 812 | ||
809 | max_timer = min(0x1FFU, max_timer); | 813 | tx_max_timer = min(HW_ATL_INTR_MODER_MAX, tx_max_timer); |
810 | min_timer = min(0xFFU, min_timer); | 814 | tx_min_timer = min(HW_ATL_INTR_MODER_MIN, tx_min_timer); |
815 | rx_max_timer = min(HW_ATL_INTR_MODER_MAX, rx_max_timer); | ||
816 | rx_min_timer = min(HW_ATL_INTR_MODER_MIN, rx_min_timer); | ||
811 | 817 | ||
812 | PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U; | 818 | itr_tx |= tx_min_timer << 0x8U; |
813 | PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U; | 819 | itr_tx |= tx_max_timer << 0x10U; |
814 | PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U; | 820 | itr_rx |= rx_min_timer << 0x8U; |
815 | PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U; | 821 | itr_rx |= rx_max_timer << 0x10U; |
816 | } else { | 822 | } else { |
817 | static unsigned int hw_atl_b0_timers_table_tx_[][2] = { | 823 | static unsigned int hw_atl_b0_timers_table_tx_[][2] = { |
818 | {0xffU, 0xffU}, /* 10Gbit */ | 824 | {0xfU, 0xffU}, /* 10Gbit */ |
819 | {0xffU, 0x1ffU}, /* 5Gbit */ | 825 | {0xfU, 0x1ffU}, /* 5Gbit */ |
820 | {0xffU, 0x1ffU}, /* 5Gbit 5GS */ | 826 | {0xfU, 0x1ffU}, /* 5Gbit 5GS */ |
821 | {0xffU, 0x1ffU}, /* 2.5Gbit */ | 827 | {0xfU, 0x1ffU}, /* 2.5Gbit */ |
822 | {0xffU, 0x1ffU}, /* 1Gbit */ | 828 | {0xfU, 0x1ffU}, /* 1Gbit */ |
823 | {0xffU, 0x1ffU}, /* 100Mbit */ | 829 | {0xfU, 0x1ffU}, /* 100Mbit */ |
824 | }; | 830 | }; |
825 | 831 | ||
826 | static unsigned int hw_atl_b0_timers_table_rx_[][2] = { | 832 | static unsigned int hw_atl_b0_timers_table_rx_[][2] = { |
@@ -836,34 +842,36 @@ static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self, | |||
836 | hw_atl_utils_mbps_2_speed_index( | 842 | hw_atl_utils_mbps_2_speed_index( |
837 | self->aq_link_status.mbps); | 843 | self->aq_link_status.mbps); |
838 | 844 | ||
839 | PHAL_ATLANTIC_B0->itr_tx |= | 845 | /* Update user visible ITR settings */ |
840 | hw_atl_b0_timers_table_tx_[speed_index] | 846 | self->aq_nic_cfg->tx_itr = hw_atl_b0_timers_table_tx_ |
841 | [0] << 0x8U; /* set min timer value */ | 847 | [speed_index][1] * 2; |
842 | PHAL_ATLANTIC_B0->itr_tx |= | 848 | self->aq_nic_cfg->rx_itr = hw_atl_b0_timers_table_rx_ |
843 | hw_atl_b0_timers_table_tx_[speed_index] | 849 | [speed_index][1] * 2; |
844 | [1] << 0x10U; /* set max timer value */ | 850 | |
845 | 851 | itr_tx |= hw_atl_b0_timers_table_tx_ | |
846 | PHAL_ATLANTIC_B0->itr_rx |= | 852 | [speed_index][0] << 0x8U; |
847 | hw_atl_b0_timers_table_rx_[speed_index] | 853 | itr_tx |= hw_atl_b0_timers_table_tx_ |
848 | [0] << 0x8U; /* set min timer value */ | 854 | [speed_index][1] << 0x10U; |
849 | PHAL_ATLANTIC_B0->itr_rx |= | 855 | |
850 | hw_atl_b0_timers_table_rx_[speed_index] | 856 | itr_rx |= hw_atl_b0_timers_table_rx_ |
851 | [1] << 0x10U; /* set max timer value */ | 857 | [speed_index][0] << 0x8U; |
858 | itr_rx |= hw_atl_b0_timers_table_rx_ | ||
859 | [speed_index][1] << 0x10U; | ||
852 | } | 860 | } |
853 | } else { | 861 | break; |
862 | case AQ_CFG_INTERRUPT_MODERATION_OFF: | ||
854 | tdm_tx_desc_wr_wb_irq_en_set(self, 1U); | 863 | tdm_tx_desc_wr_wb_irq_en_set(self, 1U); |
855 | tdm_tdm_intr_moder_en_set(self, 0U); | 864 | tdm_tdm_intr_moder_en_set(self, 0U); |
856 | rdm_rx_desc_wr_wb_irq_en_set(self, 1U); | 865 | rdm_rx_desc_wr_wb_irq_en_set(self, 1U); |
857 | rdm_rdm_intr_moder_en_set(self, 0U); | 866 | rdm_rdm_intr_moder_en_set(self, 0U); |
858 | PHAL_ATLANTIC_B0->itr_tx = 0U; | 867 | itr_tx = 0U; |
859 | PHAL_ATLANTIC_B0->itr_rx = 0U; | 868 | itr_rx = 0U; |
869 | break; | ||
860 | } | 870 | } |
861 | 871 | ||
862 | for (i = HW_ATL_B0_RINGS_MAX; i--;) { | 872 | for (i = HW_ATL_B0_RINGS_MAX; i--;) { |
863 | reg_tx_intr_moder_ctrl_set(self, | 873 | reg_tx_intr_moder_ctrl_set(self, itr_tx, i); |
864 | PHAL_ATLANTIC_B0->itr_tx, i); | 874 | reg_rx_intr_moder_ctrl_set(self, itr_rx, i); |
865 | reg_rx_intr_moder_ctrl_set(self, | ||
866 | PHAL_ATLANTIC_B0->itr_rx, i); | ||
867 | } | 875 | } |
868 | 876 | ||
869 | return aq_hw_err_from_flags(self); | 877 | return aq_hw_err_from_flags(self); |
@@ -939,6 +947,7 @@ static struct aq_hw_ops hw_atl_ops_ = { | |||
939 | .hw_rss_set = hw_atl_b0_hw_rss_set, | 947 | .hw_rss_set = hw_atl_b0_hw_rss_set, |
940 | .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, | 948 | .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, |
941 | .hw_get_regs = hw_atl_utils_hw_get_regs, | 949 | .hw_get_regs = hw_atl_utils_hw_get_regs, |
950 | .hw_update_stats = hw_atl_utils_update_stats, | ||
942 | .hw_get_hw_stats = hw_atl_utils_get_hw_stats, | 951 | .hw_get_hw_stats = hw_atl_utils_get_hw_stats, |
943 | .hw_get_fw_version = hw_atl_utils_get_fw_version, | 952 | .hw_get_fw_version = hw_atl_utils_get_fw_version, |
944 | }; | 953 | }; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h index fcf89e25a773..9aa2c6edfca2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h | |||
@@ -139,6 +139,9 @@ | |||
139 | 139 | ||
140 | #define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U | 140 | #define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U |
141 | 141 | ||
142 | #define HW_ATL_INTR_MODER_MAX 0x1FF | ||
143 | #define HW_ATL_INTR_MODER_MIN 0xFF | ||
144 | |||
142 | /* Hardware tx descriptor */ | 145 | /* Hardware tx descriptor */ |
143 | struct __packed hw_atl_txd_s { | 146 | struct __packed hw_atl_txd_s { |
144 | u64 buf_addr; | 147 | u64 buf_addr; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index bf734b32e44b..1fe016fc4bc7 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c | |||
@@ -255,6 +255,15 @@ err_exit: | |||
255 | return err; | 255 | return err; |
256 | } | 256 | } |
257 | 257 | ||
258 | int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, | ||
259 | struct hw_aq_atl_utils_mbox_header *pmbox) | ||
260 | { | ||
261 | return hw_atl_utils_fw_downld_dwords(self, | ||
262 | PHAL_ATLANTIC->mbox_addr, | ||
263 | (u32 *)(void *)pmbox, | ||
264 | sizeof(*pmbox) / sizeof(u32)); | ||
265 | } | ||
266 | |||
258 | void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, | 267 | void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, |
259 | struct hw_aq_atl_utils_mbox *pmbox) | 268 | struct hw_aq_atl_utils_mbox *pmbox) |
260 | { | 269 | { |
@@ -267,9 +276,6 @@ void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, | |||
267 | if (err < 0) | 276 | if (err < 0) |
268 | goto err_exit; | 277 | goto err_exit; |
269 | 278 | ||
270 | if (pmbox != &PHAL_ATLANTIC->mbox) | ||
271 | memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox)); | ||
272 | |||
273 | if (IS_CHIP_FEATURE(REVISION_A0)) { | 279 | if (IS_CHIP_FEATURE(REVISION_A0)) { |
274 | unsigned int mtu = self->aq_nic_cfg ? | 280 | unsigned int mtu = self->aq_nic_cfg ? |
275 | self->aq_nic_cfg->mtu : 1514U; | 281 | self->aq_nic_cfg->mtu : 1514U; |
@@ -299,17 +305,17 @@ void hw_atl_utils_mpi_set(struct aq_hw_s *self, | |||
299 | { | 305 | { |
300 | int err = 0; | 306 | int err = 0; |
301 | u32 transaction_id = 0; | 307 | u32 transaction_id = 0; |
308 | struct hw_aq_atl_utils_mbox_header mbox; | ||
302 | 309 | ||
303 | if (state == MPI_RESET) { | 310 | if (state == MPI_RESET) { |
304 | hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); | 311 | hw_atl_utils_mpi_read_mbox(self, &mbox); |
305 | 312 | ||
306 | transaction_id = PHAL_ATLANTIC->mbox.transaction_id; | 313 | transaction_id = mbox.transaction_id; |
307 | 314 | ||
308 | AQ_HW_WAIT_FOR(transaction_id != | 315 | AQ_HW_WAIT_FOR(transaction_id != |
309 | (hw_atl_utils_mpi_read_stats | 316 | (hw_atl_utils_mpi_read_mbox(self, &mbox), |
310 | (self, &PHAL_ATLANTIC->mbox), | 317 | mbox.transaction_id), |
311 | PHAL_ATLANTIC->mbox.transaction_id), | 318 | 1000U, 100U); |
312 | 1000U, 100U); | ||
313 | if (err < 0) | 319 | if (err < 0) |
314 | goto err_exit; | 320 | goto err_exit; |
315 | } | 321 | } |
@@ -492,16 +498,51 @@ int hw_atl_utils_hw_set_power(struct aq_hw_s *self, | |||
492 | return 0; | 498 | return 0; |
493 | } | 499 | } |
494 | 500 | ||
501 | int hw_atl_utils_update_stats(struct aq_hw_s *self) | ||
502 | { | ||
503 | struct hw_atl_s *hw_self = PHAL_ATLANTIC; | ||
504 | struct hw_aq_atl_utils_mbox mbox; | ||
505 | |||
506 | if (!self->aq_link_status.mbps) | ||
507 | return 0; | ||
508 | |||
509 | hw_atl_utils_mpi_read_stats(self, &mbox); | ||
510 | |||
511 | #define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \ | ||
512 | mbox.stats._N_ - hw_self->last_stats._N_) | ||
513 | |||
514 | AQ_SDELTA(uprc); | ||
515 | AQ_SDELTA(mprc); | ||
516 | AQ_SDELTA(bprc); | ||
517 | AQ_SDELTA(erpt); | ||
518 | |||
519 | AQ_SDELTA(uptc); | ||
520 | AQ_SDELTA(mptc); | ||
521 | AQ_SDELTA(bptc); | ||
522 | AQ_SDELTA(erpr); | ||
523 | |||
524 | AQ_SDELTA(ubrc); | ||
525 | AQ_SDELTA(ubtc); | ||
526 | AQ_SDELTA(mbrc); | ||
527 | AQ_SDELTA(mbtc); | ||
528 | AQ_SDELTA(bbrc); | ||
529 | AQ_SDELTA(bbtc); | ||
530 | AQ_SDELTA(dpc); | ||
531 | |||
532 | #undef AQ_SDELTA | ||
533 | |||
534 | memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats)); | ||
535 | |||
536 | return 0; | ||
537 | } | ||
538 | |||
495 | int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, | 539 | int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, |
496 | u64 *data, unsigned int *p_count) | 540 | u64 *data, unsigned int *p_count) |
497 | { | 541 | { |
498 | struct hw_atl_stats_s *stats = NULL; | 542 | struct hw_atl_s *hw_self = PHAL_ATLANTIC; |
543 | struct hw_atl_stats_s *stats = &hw_self->curr_stats; | ||
499 | int i = 0; | 544 | int i = 0; |
500 | 545 | ||
501 | hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox); | ||
502 | |||
503 | stats = &PHAL_ATLANTIC->mbox.stats; | ||
504 | |||
505 | data[i] = stats->uprc + stats->mprc + stats->bprc; | 546 | data[i] = stats->uprc + stats->mprc + stats->bprc; |
506 | data[++i] = stats->uprc; | 547 | data[++i] = stats->uprc; |
507 | data[++i] = stats->mprc; | 548 | data[++i] = stats->mprc; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index e0360a6b2202..c99cc690e425 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h | |||
@@ -115,19 +115,22 @@ struct __packed hw_aq_atl_utils_fw_rpc { | |||
115 | }; | 115 | }; |
116 | }; | 116 | }; |
117 | 117 | ||
118 | struct __packed hw_aq_atl_utils_mbox { | 118 | struct __packed hw_aq_atl_utils_mbox_header { |
119 | u32 version; | 119 | u32 version; |
120 | u32 transaction_id; | 120 | u32 transaction_id; |
121 | int error; | 121 | u32 error; |
122 | }; | ||
123 | |||
124 | struct __packed hw_aq_atl_utils_mbox { | ||
125 | struct hw_aq_atl_utils_mbox_header header; | ||
122 | struct hw_atl_stats_s stats; | 126 | struct hw_atl_stats_s stats; |
123 | }; | 127 | }; |
124 | 128 | ||
125 | struct __packed hw_atl_s { | 129 | struct __packed hw_atl_s { |
126 | struct aq_hw_s base; | 130 | struct aq_hw_s base; |
127 | struct hw_aq_atl_utils_mbox mbox; | 131 | struct hw_atl_stats_s last_stats; |
132 | struct hw_atl_stats_s curr_stats; | ||
128 | u64 speed; | 133 | u64 speed; |
129 | u32 itr_tx; | ||
130 | u32 itr_rx; | ||
131 | unsigned int chip_features; | 134 | unsigned int chip_features; |
132 | u32 fw_ver_actual; | 135 | u32 fw_ver_actual; |
133 | atomic_t dpc; | 136 | atomic_t dpc; |
@@ -170,6 +173,9 @@ enum hal_atl_utils_fw_state_e { | |||
170 | 173 | ||
171 | void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); | 174 | void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p); |
172 | 175 | ||
176 | int hw_atl_utils_mpi_read_mbox(struct aq_hw_s *self, | ||
177 | struct hw_aq_atl_utils_mbox_header *pmbox); | ||
178 | |||
173 | void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, | 179 | void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self, |
174 | struct hw_aq_atl_utils_mbox *pmbox); | 180 | struct hw_aq_atl_utils_mbox *pmbox); |
175 | 181 | ||
@@ -199,6 +205,8 @@ int hw_atl_utils_hw_deinit(struct aq_hw_s *self); | |||
199 | 205 | ||
200 | int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); | 206 | int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); |
201 | 207 | ||
208 | int hw_atl_utils_update_stats(struct aq_hw_s *self); | ||
209 | |||
202 | int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, | 210 | int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, |
203 | u64 *data, | 211 | u64 *data, |
204 | unsigned int *p_count); | 212 | unsigned int *p_count); |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index aacec8bc19d5..dc5de275352a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c | |||
@@ -214,6 +214,8 @@ static const u16 bnxt_async_events_arr[] = { | |||
214 | ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, | 214 | ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE, |
215 | }; | 215 | }; |
216 | 216 | ||
217 | static struct workqueue_struct *bnxt_pf_wq; | ||
218 | |||
217 | static bool bnxt_vf_pciid(enum board_idx idx) | 219 | static bool bnxt_vf_pciid(enum board_idx idx) |
218 | { | 220 | { |
219 | return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); | 221 | return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF); |
@@ -1024,12 +1026,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi, | |||
1024 | return 0; | 1026 | return 0; |
1025 | } | 1027 | } |
1026 | 1028 | ||
1029 | static void bnxt_queue_sp_work(struct bnxt *bp) | ||
1030 | { | ||
1031 | if (BNXT_PF(bp)) | ||
1032 | queue_work(bnxt_pf_wq, &bp->sp_task); | ||
1033 | else | ||
1034 | schedule_work(&bp->sp_task); | ||
1035 | } | ||
1036 | |||
1037 | static void bnxt_cancel_sp_work(struct bnxt *bp) | ||
1038 | { | ||
1039 | if (BNXT_PF(bp)) | ||
1040 | flush_workqueue(bnxt_pf_wq); | ||
1041 | else | ||
1042 | cancel_work_sync(&bp->sp_task); | ||
1043 | } | ||
1044 | |||
1027 | static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) | 1045 | static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) |
1028 | { | 1046 | { |
1029 | if (!rxr->bnapi->in_reset) { | 1047 | if (!rxr->bnapi->in_reset) { |
1030 | rxr->bnapi->in_reset = true; | 1048 | rxr->bnapi->in_reset = true; |
1031 | set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); | 1049 | set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); |
1032 | schedule_work(&bp->sp_task); | 1050 | bnxt_queue_sp_work(bp); |
1033 | } | 1051 | } |
1034 | rxr->rx_next_cons = 0xffff; | 1052 | rxr->rx_next_cons = 0xffff; |
1035 | } | 1053 | } |
@@ -1717,7 +1735,7 @@ static int bnxt_async_event_process(struct bnxt *bp, | |||
1717 | default: | 1735 | default: |
1718 | goto async_event_process_exit; | 1736 | goto async_event_process_exit; |
1719 | } | 1737 | } |
1720 | schedule_work(&bp->sp_task); | 1738 | bnxt_queue_sp_work(bp); |
1721 | async_event_process_exit: | 1739 | async_event_process_exit: |
1722 | bnxt_ulp_async_events(bp, cmpl); | 1740 | bnxt_ulp_async_events(bp, cmpl); |
1723 | return 0; | 1741 | return 0; |
@@ -1751,7 +1769,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) | |||
1751 | 1769 | ||
1752 | set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); | 1770 | set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap); |
1753 | set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); | 1771 | set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event); |
1754 | schedule_work(&bp->sp_task); | 1772 | bnxt_queue_sp_work(bp); |
1755 | break; | 1773 | break; |
1756 | 1774 | ||
1757 | case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: | 1775 | case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: |
@@ -3448,6 +3466,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) | |||
3448 | return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); | 3466 | return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); |
3449 | } | 3467 | } |
3450 | 3468 | ||
3469 | int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, | ||
3470 | int timeout) | ||
3471 | { | ||
3472 | return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); | ||
3473 | } | ||
3474 | |||
3451 | int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) | 3475 | int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) |
3452 | { | 3476 | { |
3453 | int rc; | 3477 | int rc; |
@@ -6327,7 +6351,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) | |||
6327 | } | 6351 | } |
6328 | 6352 | ||
6329 | if (link_re_init) { | 6353 | if (link_re_init) { |
6354 | mutex_lock(&bp->link_lock); | ||
6330 | rc = bnxt_update_phy_setting(bp); | 6355 | rc = bnxt_update_phy_setting(bp); |
6356 | mutex_unlock(&bp->link_lock); | ||
6331 | if (rc) | 6357 | if (rc) |
6332 | netdev_warn(bp->dev, "failed to update phy settings\n"); | 6358 | netdev_warn(bp->dev, "failed to update phy settings\n"); |
6333 | } | 6359 | } |
@@ -6647,7 +6673,7 @@ static void bnxt_set_rx_mode(struct net_device *dev) | |||
6647 | vnic->rx_mask = mask; | 6673 | vnic->rx_mask = mask; |
6648 | 6674 | ||
6649 | set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); | 6675 | set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event); |
6650 | schedule_work(&bp->sp_task); | 6676 | bnxt_queue_sp_work(bp); |
6651 | } | 6677 | } |
6652 | } | 6678 | } |
6653 | 6679 | ||
@@ -6920,7 +6946,7 @@ static void bnxt_tx_timeout(struct net_device *dev) | |||
6920 | 6946 | ||
6921 | netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); | 6947 | netdev_err(bp->dev, "TX timeout detected, starting reset task!\n"); |
6922 | set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); | 6948 | set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event); |
6923 | schedule_work(&bp->sp_task); | 6949 | bnxt_queue_sp_work(bp); |
6924 | } | 6950 | } |
6925 | 6951 | ||
6926 | #ifdef CONFIG_NET_POLL_CONTROLLER | 6952 | #ifdef CONFIG_NET_POLL_CONTROLLER |
@@ -6952,7 +6978,7 @@ static void bnxt_timer(unsigned long data) | |||
6952 | if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && | 6978 | if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) && |
6953 | bp->stats_coal_ticks) { | 6979 | bp->stats_coal_ticks) { |
6954 | set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); | 6980 | set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event); |
6955 | schedule_work(&bp->sp_task); | 6981 | bnxt_queue_sp_work(bp); |
6956 | } | 6982 | } |
6957 | bnxt_restart_timer: | 6983 | bnxt_restart_timer: |
6958 | mod_timer(&bp->timer, jiffies + bp->current_interval); | 6984 | mod_timer(&bp->timer, jiffies + bp->current_interval); |
@@ -7025,30 +7051,28 @@ static void bnxt_sp_task(struct work_struct *work) | |||
7025 | if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) | 7051 | if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) |
7026 | bnxt_hwrm_port_qstats(bp); | 7052 | bnxt_hwrm_port_qstats(bp); |
7027 | 7053 | ||
7028 | /* These functions below will clear BNXT_STATE_IN_SP_TASK. They | ||
7029 | * must be the last functions to be called before exiting. | ||
7030 | */ | ||
7031 | if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { | 7054 | if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) { |
7032 | int rc = 0; | 7055 | int rc; |
7033 | 7056 | ||
7057 | mutex_lock(&bp->link_lock); | ||
7034 | if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, | 7058 | if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, |
7035 | &bp->sp_event)) | 7059 | &bp->sp_event)) |
7036 | bnxt_hwrm_phy_qcaps(bp); | 7060 | bnxt_hwrm_phy_qcaps(bp); |
7037 | 7061 | ||
7038 | bnxt_rtnl_lock_sp(bp); | 7062 | rc = bnxt_update_link(bp, true); |
7039 | if (test_bit(BNXT_STATE_OPEN, &bp->state)) | 7063 | mutex_unlock(&bp->link_lock); |
7040 | rc = bnxt_update_link(bp, true); | ||
7041 | bnxt_rtnl_unlock_sp(bp); | ||
7042 | if (rc) | 7064 | if (rc) |
7043 | netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", | 7065 | netdev_err(bp->dev, "SP task can't update link (rc: %x)\n", |
7044 | rc); | 7066 | rc); |
7045 | } | 7067 | } |
7046 | if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { | 7068 | if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) { |
7047 | bnxt_rtnl_lock_sp(bp); | 7069 | mutex_lock(&bp->link_lock); |
7048 | if (test_bit(BNXT_STATE_OPEN, &bp->state)) | 7070 | bnxt_get_port_module_status(bp); |
7049 | bnxt_get_port_module_status(bp); | 7071 | mutex_unlock(&bp->link_lock); |
7050 | bnxt_rtnl_unlock_sp(bp); | ||
7051 | } | 7072 | } |
7073 | /* These functions below will clear BNXT_STATE_IN_SP_TASK. They | ||
7074 | * must be the last functions to be called before exiting. | ||
7075 | */ | ||
7052 | if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) | 7076 | if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) |
7053 | bnxt_reset(bp, false); | 7077 | bnxt_reset(bp, false); |
7054 | 7078 | ||
@@ -7433,7 +7457,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, | |||
7433 | spin_unlock_bh(&bp->ntp_fltr_lock); | 7457 | spin_unlock_bh(&bp->ntp_fltr_lock); |
7434 | 7458 | ||
7435 | set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); | 7459 | set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event); |
7436 | schedule_work(&bp->sp_task); | 7460 | bnxt_queue_sp_work(bp); |
7437 | 7461 | ||
7438 | return new_fltr->sw_id; | 7462 | return new_fltr->sw_id; |
7439 | 7463 | ||
@@ -7516,7 +7540,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev, | |||
7516 | if (bp->vxlan_port_cnt == 1) { | 7540 | if (bp->vxlan_port_cnt == 1) { |
7517 | bp->vxlan_port = ti->port; | 7541 | bp->vxlan_port = ti->port; |
7518 | set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); | 7542 | set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event); |
7519 | schedule_work(&bp->sp_task); | 7543 | bnxt_queue_sp_work(bp); |
7520 | } | 7544 | } |
7521 | break; | 7545 | break; |
7522 | case UDP_TUNNEL_TYPE_GENEVE: | 7546 | case UDP_TUNNEL_TYPE_GENEVE: |
@@ -7533,7 +7557,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev, | |||
7533 | return; | 7557 | return; |
7534 | } | 7558 | } |
7535 | 7559 | ||
7536 | schedule_work(&bp->sp_task); | 7560 | bnxt_queue_sp_work(bp); |
7537 | } | 7561 | } |
7538 | 7562 | ||
7539 | static void bnxt_udp_tunnel_del(struct net_device *dev, | 7563 | static void bnxt_udp_tunnel_del(struct net_device *dev, |
@@ -7572,7 +7596,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev, | |||
7572 | return; | 7596 | return; |
7573 | } | 7597 | } |
7574 | 7598 | ||
7575 | schedule_work(&bp->sp_task); | 7599 | bnxt_queue_sp_work(bp); |
7576 | } | 7600 | } |
7577 | 7601 | ||
7578 | static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, | 7602 | static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, |
@@ -7720,7 +7744,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) | |||
7720 | pci_disable_pcie_error_reporting(pdev); | 7744 | pci_disable_pcie_error_reporting(pdev); |
7721 | unregister_netdev(dev); | 7745 | unregister_netdev(dev); |
7722 | bnxt_shutdown_tc(bp); | 7746 | bnxt_shutdown_tc(bp); |
7723 | cancel_work_sync(&bp->sp_task); | 7747 | bnxt_cancel_sp_work(bp); |
7724 | bp->sp_event = 0; | 7748 | bp->sp_event = 0; |
7725 | 7749 | ||
7726 | bnxt_clear_int_mode(bp); | 7750 | bnxt_clear_int_mode(bp); |
@@ -7748,6 +7772,7 @@ static int bnxt_probe_phy(struct bnxt *bp) | |||
7748 | rc); | 7772 | rc); |
7749 | return rc; | 7773 | return rc; |
7750 | } | 7774 | } |
7775 | mutex_init(&bp->link_lock); | ||
7751 | 7776 | ||
7752 | rc = bnxt_update_link(bp, false); | 7777 | rc = bnxt_update_link(bp, false); |
7753 | if (rc) { | 7778 | if (rc) { |
@@ -7946,7 +7971,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp) | |||
7946 | enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; | 7971 | enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; |
7947 | enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; | 7972 | enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; |
7948 | 7973 | ||
7949 | if (pcie_get_minimum_link(bp->pdev, &speed, &width) || | 7974 | if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) || |
7950 | speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) | 7975 | speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN) |
7951 | netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); | 7976 | netdev_info(bp->dev, "Failed to determine PCIe Link Info\n"); |
7952 | else | 7977 | else |
@@ -8138,8 +8163,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
8138 | else | 8163 | else |
8139 | device_set_wakeup_capable(&pdev->dev, false); | 8164 | device_set_wakeup_capable(&pdev->dev, false); |
8140 | 8165 | ||
8141 | if (BNXT_PF(bp)) | 8166 | if (BNXT_PF(bp)) { |
8167 | if (!bnxt_pf_wq) { | ||
8168 | bnxt_pf_wq = | ||
8169 | create_singlethread_workqueue("bnxt_pf_wq"); | ||
8170 | if (!bnxt_pf_wq) { | ||
8171 | dev_err(&pdev->dev, "Unable to create workqueue.\n"); | ||
8172 | goto init_err_pci_clean; | ||
8173 | } | ||
8174 | } | ||
8142 | bnxt_init_tc(bp); | 8175 | bnxt_init_tc(bp); |
8176 | } | ||
8143 | 8177 | ||
8144 | rc = register_netdev(dev); | 8178 | rc = register_netdev(dev); |
8145 | if (rc) | 8179 | if (rc) |
@@ -8375,4 +8409,17 @@ static struct pci_driver bnxt_pci_driver = { | |||
8375 | #endif | 8409 | #endif |
8376 | }; | 8410 | }; |
8377 | 8411 | ||
8378 | module_pci_driver(bnxt_pci_driver); | 8412 | static int __init bnxt_init(void) |
8413 | { | ||
8414 | return pci_register_driver(&bnxt_pci_driver); | ||
8415 | } | ||
8416 | |||
8417 | static void __exit bnxt_exit(void) | ||
8418 | { | ||
8419 | pci_unregister_driver(&bnxt_pci_driver); | ||
8420 | if (bnxt_pf_wq) | ||
8421 | destroy_workqueue(bnxt_pf_wq); | ||
8422 | } | ||
8423 | |||
8424 | module_init(bnxt_init); | ||
8425 | module_exit(bnxt_exit); | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 7b888d4b2b55..c911e69ff25f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h | |||
@@ -1290,6 +1290,10 @@ struct bnxt { | |||
1290 | unsigned long *ntp_fltr_bmap; | 1290 | unsigned long *ntp_fltr_bmap; |
1291 | int ntp_fltr_count; | 1291 | int ntp_fltr_count; |
1292 | 1292 | ||
1293 | /* To protect link related settings during link changes and | ||
1294 | * ethtool settings changes. | ||
1295 | */ | ||
1296 | struct mutex link_lock; | ||
1293 | struct bnxt_link_info link_info; | 1297 | struct bnxt_link_info link_info; |
1294 | struct ethtool_eee eee; | 1298 | struct ethtool_eee eee; |
1295 | u32 lpi_tmr_lo; | 1299 | u32 lpi_tmr_lo; |
@@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *); | |||
1358 | int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); | 1362 | int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); |
1359 | void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); | 1363 | void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); |
1360 | int _hwrm_send_message(struct bnxt *, void *, u32, int); | 1364 | int _hwrm_send_message(struct bnxt *, void *, u32, int); |
1365 | int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout); | ||
1361 | int hwrm_send_message(struct bnxt *, void *, u32, int); | 1366 | int hwrm_send_message(struct bnxt *, void *, u32, int); |
1362 | int hwrm_send_message_silent(struct bnxt *, void *, u32, int); | 1367 | int hwrm_send_message_silent(struct bnxt *, void *, u32, int); |
1363 | int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, | 1368 | int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, |
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index aa1f3a2c7a78..fed37cd9ae1d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c | |||
@@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) | |||
50 | 50 | ||
51 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); | 51 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); |
52 | req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); | 52 | req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); |
53 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 53 | |
54 | mutex_lock(&bp->hwrm_cmd_lock); | ||
55 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | ||
54 | if (!rc) { | 56 | if (!rc) { |
55 | u8 *pri2cos = &resp->pri0_cos_queue_id; | 57 | u8 *pri2cos = &resp->pri0_cos_queue_id; |
56 | int i, j; | 58 | int i, j; |
@@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) | |||
66 | } | 68 | } |
67 | } | 69 | } |
68 | } | 70 | } |
71 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
69 | return rc; | 72 | return rc; |
70 | } | 73 | } |
71 | 74 | ||
@@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) | |||
119 | int rc, i; | 122 | int rc, i; |
120 | 123 | ||
121 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); | 124 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); |
122 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 125 | |
123 | if (rc) | 126 | mutex_lock(&bp->hwrm_cmd_lock); |
127 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | ||
128 | if (rc) { | ||
129 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
124 | return rc; | 130 | return rc; |
131 | } | ||
125 | 132 | ||
126 | data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); | 133 | data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id); |
127 | for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { | 134 | for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) { |
@@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) | |||
143 | } | 150 | } |
144 | } | 151 | } |
145 | } | 152 | } |
153 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
146 | return 0; | 154 | return 0; |
147 | } | 155 | } |
148 | 156 | ||
@@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc) | |||
240 | int rc; | 248 | int rc; |
241 | 249 | ||
242 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); | 250 | bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); |
243 | rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 251 | |
244 | if (rc) | 252 | mutex_lock(&bp->hwrm_cmd_lock); |
253 | rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | ||
254 | if (rc) { | ||
255 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
245 | return rc; | 256 | return rc; |
257 | } | ||
246 | 258 | ||
247 | pri_mask = le32_to_cpu(resp->flags); | 259 | pri_mask = le32_to_cpu(resp->flags); |
248 | pfc->pfc_en = pri_mask; | 260 | pfc->pfc_en = pri_mask; |
261 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
249 | return 0; | 262 | return 0; |
250 | } | 263 | } |
251 | 264 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 8eff05a3e0e4..3cbe771b3352 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c | |||
@@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev, | |||
1052 | u32 ethtool_speed; | 1052 | u32 ethtool_speed; |
1053 | 1053 | ||
1054 | ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); | 1054 | ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported); |
1055 | mutex_lock(&bp->link_lock); | ||
1055 | bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); | 1056 | bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings); |
1056 | 1057 | ||
1057 | ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); | 1058 | ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising); |
@@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev, | |||
1099 | base->port = PORT_FIBRE; | 1100 | base->port = PORT_FIBRE; |
1100 | } | 1101 | } |
1101 | base->phy_address = link_info->phy_addr; | 1102 | base->phy_address = link_info->phy_addr; |
1103 | mutex_unlock(&bp->link_lock); | ||
1102 | 1104 | ||
1103 | return 0; | 1105 | return 0; |
1104 | } | 1106 | } |
@@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev, | |||
1190 | if (!BNXT_SINGLE_PF(bp)) | 1192 | if (!BNXT_SINGLE_PF(bp)) |
1191 | return -EOPNOTSUPP; | 1193 | return -EOPNOTSUPP; |
1192 | 1194 | ||
1195 | mutex_lock(&bp->link_lock); | ||
1193 | if (base->autoneg == AUTONEG_ENABLE) { | 1196 | if (base->autoneg == AUTONEG_ENABLE) { |
1194 | BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, | 1197 | BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings, |
1195 | advertising); | 1198 | advertising); |
@@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev, | |||
1234 | rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); | 1237 | rc = bnxt_hwrm_set_link_setting(bp, set_pause, false); |
1235 | 1238 | ||
1236 | set_setting_exit: | 1239 | set_setting_exit: |
1240 | mutex_unlock(&bp->link_lock); | ||
1237 | return rc; | 1241 | return rc; |
1238 | } | 1242 | } |
1239 | 1243 | ||
@@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, | |||
1805 | req.dir_ordinal = cpu_to_le16(ordinal); | 1809 | req.dir_ordinal = cpu_to_le16(ordinal); |
1806 | req.dir_ext = cpu_to_le16(ext); | 1810 | req.dir_ext = cpu_to_le16(ext); |
1807 | req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; | 1811 | req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; |
1808 | rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | 1812 | mutex_lock(&bp->hwrm_cmd_lock); |
1813 | rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); | ||
1809 | if (rc == 0) { | 1814 | if (rc == 0) { |
1810 | if (index) | 1815 | if (index) |
1811 | *index = le16_to_cpu(output->dir_idx); | 1816 | *index = le16_to_cpu(output->dir_idx); |
@@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, | |||
1814 | if (data_length) | 1819 | if (data_length) |
1815 | *data_length = le32_to_cpu(output->dir_data_length); | 1820 | *data_length = le32_to_cpu(output->dir_data_length); |
1816 | } | 1821 | } |
1822 | mutex_unlock(&bp->hwrm_cmd_lock); | ||
1817 | return rc; | 1823 | return rc; |
1818 | } | 1824 | } |
1819 | 1825 | ||
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index d37925a8a65b..5ee18660bc33 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c | |||
@@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) | |||
502 | int rc = 0, vfs_supported; | 502 | int rc = 0, vfs_supported; |
503 | int min_rx_rings, min_tx_rings, min_rss_ctxs; | 503 | int min_rx_rings, min_tx_rings, min_rss_ctxs; |
504 | int tx_ok = 0, rx_ok = 0, rss_ok = 0; | 504 | int tx_ok = 0, rx_ok = 0, rss_ok = 0; |
505 | int avail_cp, avail_stat; | ||
505 | 506 | ||
506 | /* Check if we can enable requested num of vf's. At a mininum | 507 | /* Check if we can enable requested num of vf's. At a mininum |
507 | * we require 1 RX 1 TX rings for each VF. In this minimum conf | 508 | * we require 1 RX 1 TX rings for each VF. In this minimum conf |
@@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) | |||
509 | */ | 510 | */ |
510 | vfs_supported = *num_vfs; | 511 | vfs_supported = *num_vfs; |
511 | 512 | ||
513 | avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings; | ||
514 | avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs; | ||
515 | avail_cp = min_t(int, avail_cp, avail_stat); | ||
516 | |||
512 | while (vfs_supported) { | 517 | while (vfs_supported) { |
513 | min_rx_rings = vfs_supported; | 518 | min_rx_rings = vfs_supported; |
514 | min_tx_rings = vfs_supported; | 519 | min_tx_rings = vfs_supported; |
@@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) | |||
523 | min_rx_rings) | 528 | min_rx_rings) |
524 | rx_ok = 1; | 529 | rx_ok = 1; |
525 | } | 530 | } |
526 | if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings) | 531 | if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings || |
532 | avail_cp < min_rx_rings) | ||
527 | rx_ok = 0; | 533 | rx_ok = 0; |
528 | 534 | ||
529 | if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) | 535 | if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings && |
536 | avail_cp >= min_tx_rings) | ||
530 | tx_ok = 1; | 537 | tx_ok = 1; |
531 | 538 | ||
532 | if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) | 539 | if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs) |
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index e7f54948173f..5b19826a7e16 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c | |||
@@ -1847,7 +1847,7 @@ static int liquidio_ptp_settime(struct ptp_clock_info *ptp, | |||
1847 | struct lio *lio = container_of(ptp, struct lio, ptp_info); | 1847 | struct lio *lio = container_of(ptp, struct lio, ptp_info); |
1848 | struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; | 1848 | struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; |
1849 | 1849 | ||
1850 | ns = timespec_to_ns(ts); | 1850 | ns = timespec64_to_ns(ts); |
1851 | 1851 | ||
1852 | spin_lock_irqsave(&lio->ptp_lock, flags); | 1852 | spin_lock_irqsave(&lio->ptp_lock, flags); |
1853 | lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); | 1853 | lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI); |
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 49b80da51ba7..805ab45e9b5a 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c | |||
@@ -565,8 +565,10 @@ static inline bool nicvf_xdp_rx(struct nicvf *nic, struct bpf_prog *prog, | |||
565 | return true; | 565 | return true; |
566 | default: | 566 | default: |
567 | bpf_warn_invalid_xdp_action(action); | 567 | bpf_warn_invalid_xdp_action(action); |
568 | /* fall through */ | ||
568 | case XDP_ABORTED: | 569 | case XDP_ABORTED: |
569 | trace_xdp_exception(nic->netdev, prog, action); | 570 | trace_xdp_exception(nic->netdev, prog, action); |
571 | /* fall through */ | ||
570 | case XDP_DROP: | 572 | case XDP_DROP: |
571 | /* Check if it's a recycled page, if not | 573 | /* Check if it's a recycled page, if not |
572 | * unmap the DMA mapping. | 574 | * unmap the DMA mapping. |
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index cb8182f4fdfa..c66abd476023 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c | |||
@@ -1093,11 +1093,12 @@ static int build_hdr_data(u8 hdr_field, struct sk_buff *skb, | |||
1093 | * places them in a descriptor array, scrq_arr | 1093 | * places them in a descriptor array, scrq_arr |
1094 | */ | 1094 | */ |
1095 | 1095 | ||
1096 | static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, | 1096 | static int create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, |
1097 | union sub_crq *scrq_arr) | 1097 | union sub_crq *scrq_arr) |
1098 | { | 1098 | { |
1099 | union sub_crq hdr_desc; | 1099 | union sub_crq hdr_desc; |
1100 | int tmp_len = len; | 1100 | int tmp_len = len; |
1101 | int num_descs = 0; | ||
1101 | u8 *data, *cur; | 1102 | u8 *data, *cur; |
1102 | int tmp; | 1103 | int tmp; |
1103 | 1104 | ||
@@ -1126,7 +1127,10 @@ static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len, | |||
1126 | tmp_len -= tmp; | 1127 | tmp_len -= tmp; |
1127 | *scrq_arr = hdr_desc; | 1128 | *scrq_arr = hdr_desc; |
1128 | scrq_arr++; | 1129 | scrq_arr++; |
1130 | num_descs++; | ||
1129 | } | 1131 | } |
1132 | |||
1133 | return num_descs; | ||
1130 | } | 1134 | } |
1131 | 1135 | ||
1132 | /** | 1136 | /** |
@@ -1144,16 +1148,12 @@ static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff, | |||
1144 | int *num_entries, u8 hdr_field) | 1148 | int *num_entries, u8 hdr_field) |
1145 | { | 1149 | { |
1146 | int hdr_len[3] = {0, 0, 0}; | 1150 | int hdr_len[3] = {0, 0, 0}; |
1147 | int tot_len, len; | 1151 | int tot_len; |
1148 | u8 *hdr_data = txbuff->hdr_data; | 1152 | u8 *hdr_data = txbuff->hdr_data; |
1149 | 1153 | ||
1150 | tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, | 1154 | tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len, |
1151 | txbuff->hdr_data); | 1155 | txbuff->hdr_data); |
1152 | len = tot_len; | 1156 | *num_entries += create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, |
1153 | len -= 24; | ||
1154 | if (len > 0) | ||
1155 | num_entries += len % 29 ? len / 29 + 1 : len / 29; | ||
1156 | create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len, | ||
1157 | txbuff->indir_arr + 1); | 1157 | txbuff->indir_arr + 1); |
1158 | } | 1158 | } |
1159 | 1159 | ||
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index ec8aa4562cc9..3b3983a1ffbb 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c | |||
@@ -1824,11 +1824,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, | |||
1824 | { | 1824 | { |
1825 | struct e1000_adapter *adapter = netdev_priv(netdev); | 1825 | struct e1000_adapter *adapter = netdev_priv(netdev); |
1826 | int i; | 1826 | int i; |
1827 | char *p = NULL; | ||
1828 | const struct e1000_stats *stat = e1000_gstrings_stats; | 1827 | const struct e1000_stats *stat = e1000_gstrings_stats; |
1829 | 1828 | ||
1830 | e1000_update_stats(adapter); | 1829 | e1000_update_stats(adapter); |
1831 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { | 1830 | for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) { |
1831 | char *p; | ||
1832 | |||
1832 | switch (stat->type) { | 1833 | switch (stat->type) { |
1833 | case NETDEV_STATS: | 1834 | case NETDEV_STATS: |
1834 | p = (char *)netdev + stat->stat_offset; | 1835 | p = (char *)netdev + stat->stat_offset; |
@@ -1839,15 +1840,13 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, | |||
1839 | default: | 1840 | default: |
1840 | WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n", | 1841 | WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n", |
1841 | stat->type, i); | 1842 | stat->type, i); |
1842 | break; | 1843 | continue; |
1843 | } | 1844 | } |
1844 | 1845 | ||
1845 | if (stat->sizeof_stat == sizeof(u64)) | 1846 | if (stat->sizeof_stat == sizeof(u64)) |
1846 | data[i] = *(u64 *)p; | 1847 | data[i] = *(u64 *)p; |
1847 | else | 1848 | else |
1848 | data[i] = *(u32 *)p; | 1849 | data[i] = *(u32 *)p; |
1849 | |||
1850 | stat++; | ||
1851 | } | 1850 | } |
1852 | /* BUG_ON(i != E1000_STATS_LEN); */ | 1851 | /* BUG_ON(i != E1000_STATS_LEN); */ |
1853 | } | 1852 | } |
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 98375e1e1185..1982f7917a8d 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c | |||
@@ -520,8 +520,6 @@ void e1000_down(struct e1000_adapter *adapter) | |||
520 | struct net_device *netdev = adapter->netdev; | 520 | struct net_device *netdev = adapter->netdev; |
521 | u32 rctl, tctl; | 521 | u32 rctl, tctl; |
522 | 522 | ||
523 | netif_carrier_off(netdev); | ||
524 | |||
525 | /* disable receives in the hardware */ | 523 | /* disable receives in the hardware */ |
526 | rctl = er32(RCTL); | 524 | rctl = er32(RCTL); |
527 | ew32(RCTL, rctl & ~E1000_RCTL_EN); | 525 | ew32(RCTL, rctl & ~E1000_RCTL_EN); |
@@ -537,6 +535,15 @@ void e1000_down(struct e1000_adapter *adapter) | |||
537 | E1000_WRITE_FLUSH(); | 535 | E1000_WRITE_FLUSH(); |
538 | msleep(10); | 536 | msleep(10); |
539 | 537 | ||
538 | /* Set the carrier off after transmits have been disabled in the | ||
539 | * hardware, to avoid race conditions with e1000_watchdog() (which | ||
540 | * may be running concurrently to us, checking for the carrier | ||
541 | * bit to decide whether it should enable transmits again). Such | ||
542 | * a race condition would result into transmission being disabled | ||
543 | * in the hardware until the next IFF_DOWN+IFF_UP cycle. | ||
544 | */ | ||
545 | netif_carrier_off(netdev); | ||
546 | |||
540 | napi_disable(&adapter->napi); | 547 | napi_disable(&adapter->napi); |
541 | 548 | ||
542 | e1000_irq_disable(adapter); | 549 | e1000_irq_disable(adapter); |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 57505b1df98d..d591b3e6bd7c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c | |||
@@ -298,7 +298,7 @@ static i40e_status i40e_read_nvm_word_aq(struct i40e_hw *hw, u16 offset, | |||
298 | } | 298 | } |
299 | 299 | ||
300 | /** | 300 | /** |
301 | * __i40e_read_nvm_word - Reads nvm word, assumes called does the locking | 301 | * __i40e_read_nvm_word - Reads nvm word, assumes caller does the locking |
302 | * @hw: pointer to the HW structure | 302 | * @hw: pointer to the HW structure |
303 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) | 303 | * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF) |
304 | * @data: word read from the Shadow RAM | 304 | * @data: word read from the Shadow RAM |
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 1519dfb851d0..120c68f78951 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c | |||
@@ -1038,6 +1038,32 @@ reset_latency: | |||
1038 | } | 1038 | } |
1039 | 1039 | ||
1040 | /** | 1040 | /** |
1041 | * i40e_reuse_rx_page - page flip buffer and store it back on the ring | ||
1042 | * @rx_ring: rx descriptor ring to store buffers on | ||
1043 | * @old_buff: donor buffer to have page reused | ||
1044 | * | ||
1045 | * Synchronizes page for reuse by the adapter | ||
1046 | **/ | ||
1047 | static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, | ||
1048 | struct i40e_rx_buffer *old_buff) | ||
1049 | { | ||
1050 | struct i40e_rx_buffer *new_buff; | ||
1051 | u16 nta = rx_ring->next_to_alloc; | ||
1052 | |||
1053 | new_buff = &rx_ring->rx_bi[nta]; | ||
1054 | |||
1055 | /* update, and store next to alloc */ | ||
1056 | nta++; | ||
1057 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | ||
1058 | |||
1059 | /* transfer page from old buffer to new buffer */ | ||
1060 | new_buff->dma = old_buff->dma; | ||
1061 | new_buff->page = old_buff->page; | ||
1062 | new_buff->page_offset = old_buff->page_offset; | ||
1063 | new_buff->pagecnt_bias = old_buff->pagecnt_bias; | ||
1064 | } | ||
1065 | |||
1066 | /** | ||
1041 | * i40e_rx_is_programming_status - check for programming status descriptor | 1067 | * i40e_rx_is_programming_status - check for programming status descriptor |
1042 | * @qw: qword representing status_error_len in CPU ordering | 1068 | * @qw: qword representing status_error_len in CPU ordering |
1043 | * | 1069 | * |
@@ -1071,15 +1097,24 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring, | |||
1071 | union i40e_rx_desc *rx_desc, | 1097 | union i40e_rx_desc *rx_desc, |
1072 | u64 qw) | 1098 | u64 qw) |
1073 | { | 1099 | { |
1074 | u32 ntc = rx_ring->next_to_clean + 1; | 1100 | struct i40e_rx_buffer *rx_buffer; |
1101 | u32 ntc = rx_ring->next_to_clean; | ||
1075 | u8 id; | 1102 | u8 id; |
1076 | 1103 | ||
1077 | /* fetch, update, and store next to clean */ | 1104 | /* fetch, update, and store next to clean */ |
1105 | rx_buffer = &rx_ring->rx_bi[ntc++]; | ||
1078 | ntc = (ntc < rx_ring->count) ? ntc : 0; | 1106 | ntc = (ntc < rx_ring->count) ? ntc : 0; |
1079 | rx_ring->next_to_clean = ntc; | 1107 | rx_ring->next_to_clean = ntc; |
1080 | 1108 | ||
1081 | prefetch(I40E_RX_DESC(rx_ring, ntc)); | 1109 | prefetch(I40E_RX_DESC(rx_ring, ntc)); |
1082 | 1110 | ||
1111 | /* place unused page back on the ring */ | ||
1112 | i40e_reuse_rx_page(rx_ring, rx_buffer); | ||
1113 | rx_ring->rx_stats.page_reuse_count++; | ||
1114 | |||
1115 | /* clear contents of buffer_info */ | ||
1116 | rx_buffer->page = NULL; | ||
1117 | |||
1083 | id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> | 1118 | id = (qw & I40E_RX_PROG_STATUS_DESC_QW1_PROGID_MASK) >> |
1084 | I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; | 1119 | I40E_RX_PROG_STATUS_DESC_QW1_PROGID_SHIFT; |
1085 | 1120 | ||
@@ -1639,32 +1674,6 @@ static bool i40e_cleanup_headers(struct i40e_ring *rx_ring, struct sk_buff *skb, | |||
1639 | } | 1674 | } |
1640 | 1675 | ||
1641 | /** | 1676 | /** |
1642 | * i40e_reuse_rx_page - page flip buffer and store it back on the ring | ||
1643 | * @rx_ring: rx descriptor ring to store buffers on | ||
1644 | * @old_buff: donor buffer to have page reused | ||
1645 | * | ||
1646 | * Synchronizes page for reuse by the adapter | ||
1647 | **/ | ||
1648 | static void i40e_reuse_rx_page(struct i40e_ring *rx_ring, | ||
1649 | struct i40e_rx_buffer *old_buff) | ||
1650 | { | ||
1651 | struct i40e_rx_buffer *new_buff; | ||
1652 | u16 nta = rx_ring->next_to_alloc; | ||
1653 | |||
1654 | new_buff = &rx_ring->rx_bi[nta]; | ||
1655 | |||
1656 | /* update, and store next to alloc */ | ||
1657 | nta++; | ||
1658 | rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; | ||
1659 | |||
1660 | /* transfer page from old buffer to new buffer */ | ||
1661 | new_buff->dma = old_buff->dma; | ||
1662 | new_buff->page = old_buff->page; | ||
1663 | new_buff->page_offset = old_buff->page_offset; | ||
1664 | new_buff->pagecnt_bias = old_buff->pagecnt_bias; | ||
1665 | } | ||
1666 | |||
1667 | /** | ||
1668 | * i40e_page_is_reusable - check if any reuse is possible | 1677 | * i40e_page_is_reusable - check if any reuse is possible |
1669 | * @page: page struct to check | 1678 | * @page: page struct to check |
1670 | * | 1679 | * |
@@ -2093,6 +2102,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) | |||
2093 | 2102 | ||
2094 | if (unlikely(i40e_rx_is_programming_status(qword))) { | 2103 | if (unlikely(i40e_rx_is_programming_status(qword))) { |
2095 | i40e_clean_programming_status(rx_ring, rx_desc, qword); | 2104 | i40e_clean_programming_status(rx_ring, rx_desc, qword); |
2105 | cleaned_count++; | ||
2096 | continue; | 2106 | continue; |
2097 | } | 2107 | } |
2098 | size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> | 2108 | size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> |
@@ -2260,7 +2270,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, | |||
2260 | goto enable_int; | 2270 | goto enable_int; |
2261 | } | 2271 | } |
2262 | 2272 | ||
2263 | if (ITR_IS_DYNAMIC(tx_itr_setting)) { | 2273 | if (ITR_IS_DYNAMIC(rx_itr_setting)) { |
2264 | rx = i40e_set_new_dynamic_itr(&q_vector->rx); | 2274 | rx = i40e_set_new_dynamic_itr(&q_vector->rx); |
2265 | rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); | 2275 | rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); |
2266 | } | 2276 | } |
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index fd4a46b03cc8..ea69af267d63 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c | |||
@@ -5326,7 +5326,7 @@ dma_error: | |||
5326 | DMA_TO_DEVICE); | 5326 | DMA_TO_DEVICE); |
5327 | dma_unmap_len_set(tx_buffer, len, 0); | 5327 | dma_unmap_len_set(tx_buffer, len, 0); |
5328 | 5328 | ||
5329 | if (i--) | 5329 | if (i-- == 0) |
5330 | i += tx_ring->count; | 5330 | i += tx_ring->count; |
5331 | tx_buffer = &tx_ring->tx_buffer_info[i]; | 5331 | tx_buffer = &tx_ring->tx_buffer_info[i]; |
5332 | } | 5332 | } |
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c index 523f9d05a810..8a32eb7d47b9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | |||
@@ -175,31 +175,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) | |||
175 | **/ | 175 | **/ |
176 | static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) | 176 | static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) |
177 | { | 177 | { |
178 | #ifndef CONFIG_SPARC | ||
179 | u32 regval; | ||
180 | u32 i; | ||
181 | #endif | ||
182 | s32 ret_val; | 178 | s32 ret_val; |
183 | 179 | ||
184 | ret_val = ixgbe_start_hw_generic(hw); | 180 | ret_val = ixgbe_start_hw_generic(hw); |
185 | |||
186 | #ifndef CONFIG_SPARC | ||
187 | /* Disable relaxed ordering */ | ||
188 | for (i = 0; ((i < hw->mac.max_tx_queues) && | ||
189 | (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { | ||
190 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); | ||
191 | regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; | ||
192 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(i), regval); | ||
193 | } | ||
194 | |||
195 | for (i = 0; ((i < hw->mac.max_rx_queues) && | ||
196 | (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { | ||
197 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); | ||
198 | regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | | ||
199 | IXGBE_DCA_RXCTRL_HEAD_WRO_EN); | ||
200 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); | ||
201 | } | ||
202 | #endif | ||
203 | if (ret_val) | 181 | if (ret_val) |
204 | return ret_val; | 182 | return ret_val; |
205 | 183 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c index 2c19070d2a0b..6e6ab6f6875e 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | |||
@@ -366,25 +366,6 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) | |||
366 | } | 366 | } |
367 | IXGBE_WRITE_FLUSH(hw); | 367 | IXGBE_WRITE_FLUSH(hw); |
368 | 368 | ||
369 | #ifndef CONFIG_ARCH_WANT_RELAX_ORDER | ||
370 | /* Disable relaxed ordering */ | ||
371 | for (i = 0; i < hw->mac.max_tx_queues; i++) { | ||
372 | u32 regval; | ||
373 | |||
374 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); | ||
375 | regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; | ||
376 | IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); | ||
377 | } | ||
378 | |||
379 | for (i = 0; i < hw->mac.max_rx_queues; i++) { | ||
380 | u32 regval; | ||
381 | |||
382 | regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); | ||
383 | regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | | ||
384 | IXGBE_DCA_RXCTRL_HEAD_WRO_EN); | ||
385 | IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); | ||
386 | } | ||
387 | #endif | ||
388 | return 0; | 369 | return 0; |
389 | } | 370 | } |
390 | 371 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 72c565712a5f..c3e7a8191128 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | |||
@@ -1048,7 +1048,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
1048 | { | 1048 | { |
1049 | struct ixgbe_adapter *adapter = netdev_priv(netdev); | 1049 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
1050 | struct ixgbe_ring *temp_ring; | 1050 | struct ixgbe_ring *temp_ring; |
1051 | int i, err = 0; | 1051 | int i, j, err = 0; |
1052 | u32 new_rx_count, new_tx_count; | 1052 | u32 new_rx_count, new_tx_count; |
1053 | 1053 | ||
1054 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) | 1054 | if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) |
@@ -1085,8 +1085,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | /* allocate temporary buffer to store rings in */ | 1087 | /* allocate temporary buffer to store rings in */ |
1088 | i = max_t(int, adapter->num_tx_queues, adapter->num_rx_queues); | 1088 | i = max_t(int, adapter->num_tx_queues + adapter->num_xdp_queues, |
1089 | i = max_t(int, i, adapter->num_xdp_queues); | 1089 | adapter->num_rx_queues); |
1090 | temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); | 1090 | temp_ring = vmalloc(i * sizeof(struct ixgbe_ring)); |
1091 | 1091 | ||
1092 | if (!temp_ring) { | 1092 | if (!temp_ring) { |
@@ -1118,8 +1118,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
1118 | } | 1118 | } |
1119 | } | 1119 | } |
1120 | 1120 | ||
1121 | for (i = 0; i < adapter->num_xdp_queues; i++) { | 1121 | for (j = 0; j < adapter->num_xdp_queues; j++, i++) { |
1122 | memcpy(&temp_ring[i], adapter->xdp_ring[i], | 1122 | memcpy(&temp_ring[i], adapter->xdp_ring[j], |
1123 | sizeof(struct ixgbe_ring)); | 1123 | sizeof(struct ixgbe_ring)); |
1124 | 1124 | ||
1125 | temp_ring[i].count = new_tx_count; | 1125 | temp_ring[i].count = new_tx_count; |
@@ -1139,10 +1139,10 @@ static int ixgbe_set_ringparam(struct net_device *netdev, | |||
1139 | memcpy(adapter->tx_ring[i], &temp_ring[i], | 1139 | memcpy(adapter->tx_ring[i], &temp_ring[i], |
1140 | sizeof(struct ixgbe_ring)); | 1140 | sizeof(struct ixgbe_ring)); |
1141 | } | 1141 | } |
1142 | for (i = 0; i < adapter->num_xdp_queues; i++) { | 1142 | for (j = 0; j < adapter->num_xdp_queues; j++, i++) { |
1143 | ixgbe_free_tx_resources(adapter->xdp_ring[i]); | 1143 | ixgbe_free_tx_resources(adapter->xdp_ring[j]); |
1144 | 1144 | ||
1145 | memcpy(adapter->xdp_ring[i], &temp_ring[i], | 1145 | memcpy(adapter->xdp_ring[j], &temp_ring[i], |
1146 | sizeof(struct ixgbe_ring)); | 1146 | sizeof(struct ixgbe_ring)); |
1147 | } | 1147 | } |
1148 | 1148 | ||
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index d962368d08d0..6d5f31e94358 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | |||
@@ -4881,7 +4881,7 @@ static void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) | |||
4881 | IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) | 4881 | IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) |
4882 | return; | 4882 | return; |
4883 | 4883 | ||
4884 | vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask; | 4884 | vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) & ~mask; |
4885 | IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); | 4885 | IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); |
4886 | 4886 | ||
4887 | if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) | 4887 | if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) |
@@ -8020,29 +8020,23 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, | |||
8020 | return 0; | 8020 | return 0; |
8021 | dma_error: | 8021 | dma_error: |
8022 | dev_err(tx_ring->dev, "TX DMA map failed\n"); | 8022 | dev_err(tx_ring->dev, "TX DMA map failed\n"); |
8023 | tx_buffer = &tx_ring->tx_buffer_info[i]; | ||
8024 | 8023 | ||
8025 | /* clear dma mappings for failed tx_buffer_info map */ | 8024 | /* clear dma mappings for failed tx_buffer_info map */ |
8026 | while (tx_buffer != first) { | 8025 | for (;;) { |
8026 | tx_buffer = &tx_ring->tx_buffer_info[i]; | ||
8027 | if (dma_unmap_len(tx_buffer, len)) | 8027 | if (dma_unmap_len(tx_buffer, len)) |
8028 | dma_unmap_page(tx_ring->dev, | 8028 | dma_unmap_page(tx_ring->dev, |
8029 | dma_unmap_addr(tx_buffer, dma), | 8029 | dma_unmap_addr(tx_buffer, dma), |
8030 | dma_unmap_len(tx_buffer, len), | 8030 | dma_unmap_len(tx_buffer, len), |
8031 | DMA_TO_DEVICE); | 8031 | DMA_TO_DEVICE); |
8032 | dma_unmap_len_set(tx_buffer, len, 0); | 8032 | dma_unmap_len_set(tx_buffer, len, 0); |
8033 | 8033 | if (tx_buffer == first) | |
8034 | if (i--) | 8034 | break; |
8035 | if (i == 0) | ||
8035 | i += tx_ring->count; | 8036 | i += tx_ring->count; |
8036 | tx_buffer = &tx_ring->tx_buffer_info[i]; | 8037 | i--; |
8037 | } | 8038 | } |
8038 | 8039 | ||
8039 | if (dma_unmap_len(tx_buffer, len)) | ||
8040 | dma_unmap_single(tx_ring->dev, | ||
8041 | dma_unmap_addr(tx_buffer, dma), | ||
8042 | dma_unmap_len(tx_buffer, len), | ||
8043 | DMA_TO_DEVICE); | ||
8044 | dma_unmap_len_set(tx_buffer, len, 0); | ||
8045 | |||
8046 | dev_kfree_skb_any(first->skb); | 8040 | dev_kfree_skb_any(first->skb); |
8047 | first->skb = NULL; | 8041 | first->skb = NULL; |
8048 | 8042 | ||
@@ -8529,6 +8523,10 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) | |||
8529 | return ixgbe_ptp_set_ts_config(adapter, req); | 8523 | return ixgbe_ptp_set_ts_config(adapter, req); |
8530 | case SIOCGHWTSTAMP: | 8524 | case SIOCGHWTSTAMP: |
8531 | return ixgbe_ptp_get_ts_config(adapter, req); | 8525 | return ixgbe_ptp_get_ts_config(adapter, req); |
8526 | case SIOCGMIIPHY: | ||
8527 | if (!adapter->hw.phy.ops.read_reg) | ||
8528 | return -EOPNOTSUPP; | ||
8529 | /* fall through */ | ||
8532 | default: | 8530 | default: |
8533 | return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); | 8531 | return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); |
8534 | } | 8532 | } |
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c index 9c86cb7cb988..a37af5813f33 100644 --- a/drivers/net/ethernet/marvell/mvpp2.c +++ b/drivers/net/ethernet/marvell/mvpp2.c | |||
@@ -1167,6 +1167,11 @@ struct mvpp2_bm_pool { | |||
1167 | u32 port_map; | 1167 | u32 port_map; |
1168 | }; | 1168 | }; |
1169 | 1169 | ||
1170 | #define IS_TSO_HEADER(txq_pcpu, addr) \ | ||
1171 | ((addr) >= (txq_pcpu)->tso_headers_dma && \ | ||
1172 | (addr) < (txq_pcpu)->tso_headers_dma + \ | ||
1173 | (txq_pcpu)->size * TSO_HEADER_SIZE) | ||
1174 | |||
1170 | /* Queue modes */ | 1175 | /* Queue modes */ |
1171 | #define MVPP2_QDIST_SINGLE_MODE 0 | 1176 | #define MVPP2_QDIST_SINGLE_MODE 0 |
1172 | #define MVPP2_QDIST_MULTI_MODE 1 | 1177 | #define MVPP2_QDIST_MULTI_MODE 1 |
@@ -1534,7 +1539,7 @@ static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs, | |||
1534 | int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); | 1539 | int off = MVPP2_PRS_TCAM_DATA_BYTE(offs); |
1535 | u16 tcam_data; | 1540 | u16 tcam_data; |
1536 | 1541 | ||
1537 | tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off]; | 1542 | tcam_data = (pe->tcam.byte[off + 1] << 8) | pe->tcam.byte[off]; |
1538 | if (tcam_data != data) | 1543 | if (tcam_data != data) |
1539 | return false; | 1544 | return false; |
1540 | return true; | 1545 | return true; |
@@ -2609,8 +2614,8 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv) | |||
2609 | /* place holders only - no ports */ | 2614 | /* place holders only - no ports */ |
2610 | mvpp2_prs_mac_drop_all_set(priv, 0, false); | 2615 | mvpp2_prs_mac_drop_all_set(priv, 0, false); |
2611 | mvpp2_prs_mac_promisc_set(priv, 0, false); | 2616 | mvpp2_prs_mac_promisc_set(priv, 0, false); |
2612 | mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false); | 2617 | mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_ALL, false); |
2613 | mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false); | 2618 | mvpp2_prs_mac_multi_set(priv, 0, MVPP2_PE_MAC_MC_IP6, false); |
2614 | } | 2619 | } |
2615 | 2620 | ||
2616 | /* Set default entries for various types of dsa packets */ | 2621 | /* Set default entries for various types of dsa packets */ |
@@ -3391,7 +3396,7 @@ mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da, | |||
3391 | struct mvpp2_prs_entry *pe; | 3396 | struct mvpp2_prs_entry *pe; |
3392 | int tid; | 3397 | int tid; |
3393 | 3398 | ||
3394 | pe = kzalloc(sizeof(*pe), GFP_KERNEL); | 3399 | pe = kzalloc(sizeof(*pe), GFP_ATOMIC); |
3395 | if (!pe) | 3400 | if (!pe) |
3396 | return NULL; | 3401 | return NULL; |
3397 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); | 3402 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); |
@@ -3453,7 +3458,7 @@ static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port, | |||
3453 | if (tid < 0) | 3458 | if (tid < 0) |
3454 | return tid; | 3459 | return tid; |
3455 | 3460 | ||
3456 | pe = kzalloc(sizeof(*pe), GFP_KERNEL); | 3461 | pe = kzalloc(sizeof(*pe), GFP_ATOMIC); |
3457 | if (!pe) | 3462 | if (!pe) |
3458 | return -ENOMEM; | 3463 | return -ENOMEM; |
3459 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); | 3464 | mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); |
@@ -5321,8 +5326,9 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, | |||
5321 | struct mvpp2_txq_pcpu_buf *tx_buf = | 5326 | struct mvpp2_txq_pcpu_buf *tx_buf = |
5322 | txq_pcpu->buffs + txq_pcpu->txq_get_index; | 5327 | txq_pcpu->buffs + txq_pcpu->txq_get_index; |
5323 | 5328 | ||
5324 | dma_unmap_single(port->dev->dev.parent, tx_buf->dma, | 5329 | if (!IS_TSO_HEADER(txq_pcpu, tx_buf->dma)) |
5325 | tx_buf->size, DMA_TO_DEVICE); | 5330 | dma_unmap_single(port->dev->dev.parent, tx_buf->dma, |
5331 | tx_buf->size, DMA_TO_DEVICE); | ||
5326 | if (tx_buf->skb) | 5332 | if (tx_buf->skb) |
5327 | dev_kfree_skb_any(tx_buf->skb); | 5333 | dev_kfree_skb_any(tx_buf->skb); |
5328 | 5334 | ||
@@ -5609,7 +5615,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port, | |||
5609 | 5615 | ||
5610 | txq_pcpu->tso_headers = | 5616 | txq_pcpu->tso_headers = |
5611 | dma_alloc_coherent(port->dev->dev.parent, | 5617 | dma_alloc_coherent(port->dev->dev.parent, |
5612 | MVPP2_AGGR_TXQ_SIZE * TSO_HEADER_SIZE, | 5618 | txq_pcpu->size * TSO_HEADER_SIZE, |
5613 | &txq_pcpu->tso_headers_dma, | 5619 | &txq_pcpu->tso_headers_dma, |
5614 | GFP_KERNEL); | 5620 | GFP_KERNEL); |
5615 | if (!txq_pcpu->tso_headers) | 5621 | if (!txq_pcpu->tso_headers) |
@@ -5623,7 +5629,7 @@ cleanup: | |||
5623 | kfree(txq_pcpu->buffs); | 5629 | kfree(txq_pcpu->buffs); |
5624 | 5630 | ||
5625 | dma_free_coherent(port->dev->dev.parent, | 5631 | dma_free_coherent(port->dev->dev.parent, |
5626 | MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, | 5632 | txq_pcpu->size * TSO_HEADER_SIZE, |
5627 | txq_pcpu->tso_headers, | 5633 | txq_pcpu->tso_headers, |
5628 | txq_pcpu->tso_headers_dma); | 5634 | txq_pcpu->tso_headers_dma); |
5629 | } | 5635 | } |
@@ -5647,7 +5653,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, | |||
5647 | kfree(txq_pcpu->buffs); | 5653 | kfree(txq_pcpu->buffs); |
5648 | 5654 | ||
5649 | dma_free_coherent(port->dev->dev.parent, | 5655 | dma_free_coherent(port->dev->dev.parent, |
5650 | MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE, | 5656 | txq_pcpu->size * TSO_HEADER_SIZE, |
5651 | txq_pcpu->tso_headers, | 5657 | txq_pcpu->tso_headers, |
5652 | txq_pcpu->tso_headers_dma); | 5658 | txq_pcpu->tso_headers_dma); |
5653 | } | 5659 | } |
@@ -6212,12 +6218,15 @@ static inline void | |||
6212 | tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, | 6218 | tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, |
6213 | struct mvpp2_tx_desc *desc) | 6219 | struct mvpp2_tx_desc *desc) |
6214 | { | 6220 | { |
6221 | struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu); | ||
6222 | |||
6215 | dma_addr_t buf_dma_addr = | 6223 | dma_addr_t buf_dma_addr = |
6216 | mvpp2_txdesc_dma_addr_get(port, desc); | 6224 | mvpp2_txdesc_dma_addr_get(port, desc); |
6217 | size_t buf_sz = | 6225 | size_t buf_sz = |
6218 | mvpp2_txdesc_size_get(port, desc); | 6226 | mvpp2_txdesc_size_get(port, desc); |
6219 | dma_unmap_single(port->dev->dev.parent, buf_dma_addr, | 6227 | if (!IS_TSO_HEADER(txq_pcpu, buf_dma_addr)) |
6220 | buf_sz, DMA_TO_DEVICE); | 6228 | dma_unmap_single(port->dev->dev.parent, buf_dma_addr, |
6229 | buf_sz, DMA_TO_DEVICE); | ||
6221 | mvpp2_txq_desc_put(txq); | 6230 | mvpp2_txq_desc_put(txq); |
6222 | } | 6231 | } |
6223 | 6232 | ||
@@ -6490,7 +6499,7 @@ out: | |||
6490 | } | 6499 | } |
6491 | 6500 | ||
6492 | /* Finalize TX processing */ | 6501 | /* Finalize TX processing */ |
6493 | if (txq_pcpu->count >= txq->done_pkts_coal) | 6502 | if (!port->has_tx_irqs && txq_pcpu->count >= txq->done_pkts_coal) |
6494 | mvpp2_txq_done(port, txq, txq_pcpu); | 6503 | mvpp2_txq_done(port, txq, txq_pcpu); |
6495 | 6504 | ||
6496 | /* Set the timer in case not all frags were processed */ | 6505 | /* Set the timer in case not all frags were processed */ |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index ff60cf7342ca..fc281712869b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c | |||
@@ -77,35 +77,41 @@ static void add_delayed_event(struct mlx5_priv *priv, | |||
77 | list_add_tail(&delayed_event->list, &priv->waiting_events_list); | 77 | list_add_tail(&delayed_event->list, &priv->waiting_events_list); |
78 | } | 78 | } |
79 | 79 | ||
80 | static void fire_delayed_event_locked(struct mlx5_device_context *dev_ctx, | 80 | static void delayed_event_release(struct mlx5_device_context *dev_ctx, |
81 | struct mlx5_core_dev *dev, | 81 | struct mlx5_priv *priv) |
82 | struct mlx5_priv *priv) | ||
83 | { | 82 | { |
83 | struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); | ||
84 | struct mlx5_delayed_event *de; | 84 | struct mlx5_delayed_event *de; |
85 | struct mlx5_delayed_event *n; | 85 | struct mlx5_delayed_event *n; |
86 | struct list_head temp; | ||
86 | 87 | ||
87 | /* stop delaying events */ | 88 | INIT_LIST_HEAD(&temp); |
88 | priv->is_accum_events = false; | 89 | |
90 | spin_lock_irq(&priv->ctx_lock); | ||
89 | 91 | ||
90 | /* fire all accumulated events before new event comes */ | 92 | priv->is_accum_events = false; |
91 | list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { | 93 | list_splice_init(&priv->waiting_events_list, &temp); |
94 | if (!dev_ctx->context) | ||
95 | goto out; | ||
96 | list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) | ||
92 | dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); | 97 | dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); |
98 | |||
99 | out: | ||
100 | spin_unlock_irq(&priv->ctx_lock); | ||
101 | |||
102 | list_for_each_entry_safe(de, n, &temp, list) { | ||
93 | list_del(&de->list); | 103 | list_del(&de->list); |
94 | kfree(de); | 104 | kfree(de); |
95 | } | 105 | } |
96 | } | 106 | } |
97 | 107 | ||
98 | static void cleanup_delayed_evets(struct mlx5_priv *priv) | 108 | /* accumulating events that can come after mlx5_ib calls to |
109 | * ib_register_device, till adding that interface to the events list. | ||
110 | */ | ||
111 | static void delayed_event_start(struct mlx5_priv *priv) | ||
99 | { | 112 | { |
100 | struct mlx5_delayed_event *de; | ||
101 | struct mlx5_delayed_event *n; | ||
102 | |||
103 | spin_lock_irq(&priv->ctx_lock); | 113 | spin_lock_irq(&priv->ctx_lock); |
104 | priv->is_accum_events = false; | 114 | priv->is_accum_events = true; |
105 | list_for_each_entry_safe(de, n, &priv->waiting_events_list, list) { | ||
106 | list_del(&de->list); | ||
107 | kfree(de); | ||
108 | } | ||
109 | spin_unlock_irq(&priv->ctx_lock); | 115 | spin_unlock_irq(&priv->ctx_lock); |
110 | } | 116 | } |
111 | 117 | ||
@@ -122,11 +128,8 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) | |||
122 | return; | 128 | return; |
123 | 129 | ||
124 | dev_ctx->intf = intf; | 130 | dev_ctx->intf = intf; |
125 | /* accumulating events that can come after mlx5_ib calls to | ||
126 | * ib_register_device, till adding that interface to the events list. | ||
127 | */ | ||
128 | 131 | ||
129 | priv->is_accum_events = true; | 132 | delayed_event_start(priv); |
130 | 133 | ||
131 | dev_ctx->context = intf->add(dev); | 134 | dev_ctx->context = intf->add(dev); |
132 | set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); | 135 | set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); |
@@ -137,8 +140,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) | |||
137 | spin_lock_irq(&priv->ctx_lock); | 140 | spin_lock_irq(&priv->ctx_lock); |
138 | list_add_tail(&dev_ctx->list, &priv->ctx_list); | 141 | list_add_tail(&dev_ctx->list, &priv->ctx_list); |
139 | 142 | ||
140 | fire_delayed_event_locked(dev_ctx, dev, priv); | ||
141 | |||
142 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | 143 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING |
143 | if (dev_ctx->intf->pfault) { | 144 | if (dev_ctx->intf->pfault) { |
144 | if (priv->pfault) { | 145 | if (priv->pfault) { |
@@ -150,11 +151,12 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) | |||
150 | } | 151 | } |
151 | #endif | 152 | #endif |
152 | spin_unlock_irq(&priv->ctx_lock); | 153 | spin_unlock_irq(&priv->ctx_lock); |
153 | } else { | ||
154 | kfree(dev_ctx); | ||
155 | /* delete all accumulated events */ | ||
156 | cleanup_delayed_evets(priv); | ||
157 | } | 154 | } |
155 | |||
156 | delayed_event_release(dev_ctx, priv); | ||
157 | |||
158 | if (!dev_ctx->context) | ||
159 | kfree(dev_ctx); | ||
158 | } | 160 | } |
159 | 161 | ||
160 | static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf, | 162 | static struct mlx5_device_context *mlx5_get_device(struct mlx5_interface *intf, |
@@ -205,17 +207,21 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv | |||
205 | if (!dev_ctx) | 207 | if (!dev_ctx) |
206 | return; | 208 | return; |
207 | 209 | ||
210 | delayed_event_start(priv); | ||
208 | if (intf->attach) { | 211 | if (intf->attach) { |
209 | if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) | 212 | if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) |
210 | return; | 213 | goto out; |
211 | intf->attach(dev, dev_ctx->context); | 214 | intf->attach(dev, dev_ctx->context); |
212 | set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); | 215 | set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); |
213 | } else { | 216 | } else { |
214 | if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) | 217 | if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) |
215 | return; | 218 | goto out; |
216 | dev_ctx->context = intf->add(dev); | 219 | dev_ctx->context = intf->add(dev); |
217 | set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); | 220 | set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); |
218 | } | 221 | } |
222 | |||
223 | out: | ||
224 | delayed_event_release(dev_ctx, priv); | ||
219 | } | 225 | } |
220 | 226 | ||
221 | void mlx5_attach_device(struct mlx5_core_dev *dev) | 227 | void mlx5_attach_device(struct mlx5_core_dev *dev) |
@@ -414,8 +420,14 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, | |||
414 | if (priv->is_accum_events) | 420 | if (priv->is_accum_events) |
415 | add_delayed_event(priv, dev, event, param); | 421 | add_delayed_event(priv, dev, event, param); |
416 | 422 | ||
423 | /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is | ||
424 | * still in priv->ctx_list. In this case, only notify the dev_ctx if its | ||
425 | * ADDED or ATTACHED bit are set. | ||
426 | */ | ||
417 | list_for_each_entry(dev_ctx, &priv->ctx_list, list) | 427 | list_for_each_entry(dev_ctx, &priv->ctx_list, list) |
418 | if (dev_ctx->intf->event) | 428 | if (dev_ctx->intf->event && |
429 | (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) || | ||
430 | test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))) | ||
419 | dev_ctx->intf->event(dev, dev_ctx->context, event, param); | 431 | dev_ctx->intf->event(dev, dev_ctx->context, event, param); |
420 | 432 | ||
421 | spin_unlock_irqrestore(&priv->ctx_lock, flags); | 433 | spin_unlock_irqrestore(&priv->ctx_lock, flags); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index c1d384fca4dc..51c4cc00a186 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c | |||
@@ -41,6 +41,11 @@ | |||
41 | #define MLX5E_CEE_STATE_UP 1 | 41 | #define MLX5E_CEE_STATE_UP 1 |
42 | #define MLX5E_CEE_STATE_DOWN 0 | 42 | #define MLX5E_CEE_STATE_DOWN 0 |
43 | 43 | ||
44 | enum { | ||
45 | MLX5E_VENDOR_TC_GROUP_NUM = 7, | ||
46 | MLX5E_LOWEST_PRIO_GROUP = 0, | ||
47 | }; | ||
48 | |||
44 | /* If dcbx mode is non-host set the dcbx mode to host. | 49 | /* If dcbx mode is non-host set the dcbx mode to host. |
45 | */ | 50 | */ |
46 | static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv, | 51 | static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv, |
@@ -85,6 +90,9 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, | |||
85 | { | 90 | { |
86 | struct mlx5e_priv *priv = netdev_priv(netdev); | 91 | struct mlx5e_priv *priv = netdev_priv(netdev); |
87 | struct mlx5_core_dev *mdev = priv->mdev; | 92 | struct mlx5_core_dev *mdev = priv->mdev; |
93 | u8 tc_group[IEEE_8021QAZ_MAX_TCS]; | ||
94 | bool is_tc_group_6_exist = false; | ||
95 | bool is_zero_bw_ets_tc = false; | ||
88 | int err = 0; | 96 | int err = 0; |
89 | int i; | 97 | int i; |
90 | 98 | ||
@@ -96,37 +104,64 @@ static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev, | |||
96 | err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]); | 104 | err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]); |
97 | if (err) | 105 | if (err) |
98 | return err; | 106 | return err; |
99 | } | ||
100 | 107 | ||
101 | for (i = 0; i < ets->ets_cap; i++) { | 108 | err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]); |
109 | if (err) | ||
110 | return err; | ||
111 | |||
102 | err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]); | 112 | err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]); |
103 | if (err) | 113 | if (err) |
104 | return err; | 114 | return err; |
115 | |||
116 | if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC && | ||
117 | tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1)) | ||
118 | is_zero_bw_ets_tc = true; | ||
119 | |||
120 | if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1)) | ||
121 | is_tc_group_6_exist = true; | ||
122 | } | ||
123 | |||
124 | /* Report 0% ets tc if exits*/ | ||
125 | if (is_zero_bw_ets_tc) { | ||
126 | for (i = 0; i < ets->ets_cap; i++) | ||
127 | if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP) | ||
128 | ets->tc_tx_bw[i] = 0; | ||
129 | } | ||
130 | |||
131 | /* Update tc_tsa based on fw setting*/ | ||
132 | for (i = 0; i < ets->ets_cap; i++) { | ||
105 | if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC) | 133 | if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC) |
106 | priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; | 134 | priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS; |
135 | else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM && | ||
136 | !is_tc_group_6_exist) | ||
137 | priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR; | ||
107 | } | 138 | } |
108 | |||
109 | memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa)); | 139 | memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa)); |
110 | 140 | ||
111 | return err; | 141 | return err; |
112 | } | 142 | } |
113 | 143 | ||
114 | enum { | ||
115 | MLX5E_VENDOR_TC_GROUP_NUM = 7, | ||
116 | MLX5E_ETS_TC_GROUP_NUM = 0, | ||
117 | }; | ||
118 | |||
119 | static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) | 144 | static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) |
120 | { | 145 | { |
121 | bool any_tc_mapped_to_ets = false; | 146 | bool any_tc_mapped_to_ets = false; |
147 | bool ets_zero_bw = false; | ||
122 | int strict_group; | 148 | int strict_group; |
123 | int i; | 149 | int i; |
124 | 150 | ||
125 | for (i = 0; i <= max_tc; i++) | 151 | for (i = 0; i <= max_tc; i++) { |
126 | if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) | 152 | if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { |
127 | any_tc_mapped_to_ets = true; | 153 | any_tc_mapped_to_ets = true; |
154 | if (!ets->tc_tx_bw[i]) | ||
155 | ets_zero_bw = true; | ||
156 | } | ||
157 | } | ||
128 | 158 | ||
129 | strict_group = any_tc_mapped_to_ets ? 1 : 0; | 159 | /* strict group has higher priority than ets group */ |
160 | strict_group = MLX5E_LOWEST_PRIO_GROUP; | ||
161 | if (any_tc_mapped_to_ets) | ||
162 | strict_group++; | ||
163 | if (ets_zero_bw) | ||
164 | strict_group++; | ||
130 | 165 | ||
131 | for (i = 0; i <= max_tc; i++) { | 166 | for (i = 0; i <= max_tc; i++) { |
132 | switch (ets->tc_tsa[i]) { | 167 | switch (ets->tc_tsa[i]) { |
@@ -137,7 +172,9 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) | |||
137 | tc_group[i] = strict_group++; | 172 | tc_group[i] = strict_group++; |
138 | break; | 173 | break; |
139 | case IEEE_8021QAZ_TSA_ETS: | 174 | case IEEE_8021QAZ_TSA_ETS: |
140 | tc_group[i] = MLX5E_ETS_TC_GROUP_NUM; | 175 | tc_group[i] = MLX5E_LOWEST_PRIO_GROUP; |
176 | if (ets->tc_tx_bw[i] && ets_zero_bw) | ||
177 | tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1; | ||
141 | break; | 178 | break; |
142 | } | 179 | } |
143 | } | 180 | } |
@@ -146,9 +183,23 @@ static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc) | |||
146 | static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, | 183 | static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, |
147 | u8 *tc_group, int max_tc) | 184 | u8 *tc_group, int max_tc) |
148 | { | 185 | { |
186 | int bw_for_ets_zero_bw_tc = 0; | ||
187 | int last_ets_zero_bw_tc = -1; | ||
188 | int num_ets_zero_bw = 0; | ||
149 | int i; | 189 | int i; |
150 | 190 | ||
151 | for (i = 0; i <= max_tc; i++) { | 191 | for (i = 0; i <= max_tc; i++) { |
192 | if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS && | ||
193 | !ets->tc_tx_bw[i]) { | ||
194 | num_ets_zero_bw++; | ||
195 | last_ets_zero_bw_tc = i; | ||
196 | } | ||
197 | } | ||
198 | |||
199 | if (num_ets_zero_bw) | ||
200 | bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw; | ||
201 | |||
202 | for (i = 0; i <= max_tc; i++) { | ||
152 | switch (ets->tc_tsa[i]) { | 203 | switch (ets->tc_tsa[i]) { |
153 | case IEEE_8021QAZ_TSA_VENDOR: | 204 | case IEEE_8021QAZ_TSA_VENDOR: |
154 | tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; | 205 | tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; |
@@ -157,12 +208,26 @@ static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw, | |||
157 | tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; | 208 | tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC; |
158 | break; | 209 | break; |
159 | case IEEE_8021QAZ_TSA_ETS: | 210 | case IEEE_8021QAZ_TSA_ETS: |
160 | tc_tx_bw[i] = ets->tc_tx_bw[i]; | 211 | tc_tx_bw[i] = ets->tc_tx_bw[i] ? |
212 | ets->tc_tx_bw[i] : | ||
213 | bw_for_ets_zero_bw_tc; | ||
161 | break; | 214 | break; |
162 | } | 215 | } |
163 | } | 216 | } |
217 | |||
218 | /* Make sure the total bw for ets zero bw group is 100% */ | ||
219 | if (last_ets_zero_bw_tc != -1) | ||
220 | tc_tx_bw[last_ets_zero_bw_tc] += | ||
221 | MLX5E_MAX_BW_ALLOC % num_ets_zero_bw; | ||
164 | } | 222 | } |
165 | 223 | ||
224 | /* If there are ETS BW 0, | ||
225 | * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%. | ||
226 | * Set group #0 to all the ETS BW 0 tcs and | ||
227 | * equally splits the 100% BW between them | ||
228 | * Report both group #0 and #1 as ETS type. | ||
229 | * All the tcs in group #0 will be reported with 0% BW. | ||
230 | */ | ||
166 | int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) | 231 | int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) |
167 | { | 232 | { |
168 | struct mlx5_core_dev *mdev = priv->mdev; | 233 | struct mlx5_core_dev *mdev = priv->mdev; |
@@ -188,7 +253,6 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets) | |||
188 | return err; | 253 | return err; |
189 | 254 | ||
190 | memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); | 255 | memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa)); |
191 | |||
192 | return err; | 256 | return err; |
193 | } | 257 | } |
194 | 258 | ||
@@ -209,17 +273,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev, | |||
209 | } | 273 | } |
210 | 274 | ||
211 | /* Validate Bandwidth Sum */ | 275 | /* Validate Bandwidth Sum */ |
212 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { | 276 | for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) |
213 | if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) { | 277 | if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) |
214 | if (!ets->tc_tx_bw[i]) { | ||
215 | netdev_err(netdev, | ||
216 | "Failed to validate ETS: BW 0 is illegal\n"); | ||
217 | return -EINVAL; | ||
218 | } | ||
219 | |||
220 | bw_sum += ets->tc_tx_bw[i]; | 278 | bw_sum += ets->tc_tx_bw[i]; |
221 | } | ||
222 | } | ||
223 | 279 | ||
224 | if (bw_sum != 0 && bw_sum != 100) { | 280 | if (bw_sum != 0 && bw_sum != 100) { |
225 | netdev_err(netdev, | 281 | netdev_err(netdev, |
@@ -533,8 +589,7 @@ static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev, | |||
533 | static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev, | 589 | static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev, |
534 | int pgid, u8 *bw_pct) | 590 | int pgid, u8 *bw_pct) |
535 | { | 591 | { |
536 | struct mlx5e_priv *priv = netdev_priv(netdev); | 592 | struct ieee_ets ets; |
537 | struct mlx5_core_dev *mdev = priv->mdev; | ||
538 | 593 | ||
539 | if (pgid >= CEE_DCBX_MAX_PGS) { | 594 | if (pgid >= CEE_DCBX_MAX_PGS) { |
540 | netdev_err(netdev, | 595 | netdev_err(netdev, |
@@ -542,8 +597,8 @@ static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev, | |||
542 | return; | 597 | return; |
543 | } | 598 | } |
544 | 599 | ||
545 | if (mlx5_query_port_tc_bw_alloc(mdev, pgid, bw_pct)) | 600 | mlx5e_dcbnl_ieee_getets(netdev, &ets); |
546 | *bw_pct = 0; | 601 | *bw_pct = ets.tc_tx_bw[pgid]; |
547 | } | 602 | } |
548 | 603 | ||
549 | static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev, | 604 | static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev, |
@@ -739,8 +794,6 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv) | |||
739 | ets.prio_tc[i] = i; | 794 | ets.prio_tc[i] = i; |
740 | } | 795 | } |
741 | 796 | ||
742 | memcpy(priv->dcbx.tc_tsa, ets.tc_tsa, sizeof(ets.tc_tsa)); | ||
743 | |||
744 | /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ | 797 | /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */ |
745 | ets.prio_tc[0] = 1; | 798 | ets.prio_tc[0] = 1; |
746 | ets.prio_tc[1] = 0; | 799 | ets.prio_tc[1] = 0; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 1aa2028ed995..9ba1f72060aa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | |||
@@ -78,9 +78,11 @@ struct mlx5e_tc_flow { | |||
78 | }; | 78 | }; |
79 | 79 | ||
80 | struct mlx5e_tc_flow_parse_attr { | 80 | struct mlx5e_tc_flow_parse_attr { |
81 | struct ip_tunnel_info tun_info; | ||
81 | struct mlx5_flow_spec spec; | 82 | struct mlx5_flow_spec spec; |
82 | int num_mod_hdr_actions; | 83 | int num_mod_hdr_actions; |
83 | void *mod_hdr_actions; | 84 | void *mod_hdr_actions; |
85 | int mirred_ifindex; | ||
84 | }; | 86 | }; |
85 | 87 | ||
86 | enum { | 88 | enum { |
@@ -322,6 +324,12 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, | |||
322 | static void mlx5e_detach_encap(struct mlx5e_priv *priv, | 324 | static void mlx5e_detach_encap(struct mlx5e_priv *priv, |
323 | struct mlx5e_tc_flow *flow); | 325 | struct mlx5e_tc_flow *flow); |
324 | 326 | ||
327 | static int mlx5e_attach_encap(struct mlx5e_priv *priv, | ||
328 | struct ip_tunnel_info *tun_info, | ||
329 | struct net_device *mirred_dev, | ||
330 | struct net_device **encap_dev, | ||
331 | struct mlx5e_tc_flow *flow); | ||
332 | |||
325 | static struct mlx5_flow_handle * | 333 | static struct mlx5_flow_handle * |
326 | mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, | 334 | mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, |
327 | struct mlx5e_tc_flow_parse_attr *parse_attr, | 335 | struct mlx5e_tc_flow_parse_attr *parse_attr, |
@@ -329,9 +337,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, | |||
329 | { | 337 | { |
330 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | 338 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; |
331 | struct mlx5_esw_flow_attr *attr = flow->esw_attr; | 339 | struct mlx5_esw_flow_attr *attr = flow->esw_attr; |
332 | struct mlx5_flow_handle *rule; | 340 | struct net_device *out_dev, *encap_dev = NULL; |
341 | struct mlx5_flow_handle *rule = NULL; | ||
342 | struct mlx5e_rep_priv *rpriv; | ||
343 | struct mlx5e_priv *out_priv; | ||
333 | int err; | 344 | int err; |
334 | 345 | ||
346 | if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) { | ||
347 | out_dev = __dev_get_by_index(dev_net(priv->netdev), | ||
348 | attr->parse_attr->mirred_ifindex); | ||
349 | err = mlx5e_attach_encap(priv, &parse_attr->tun_info, | ||
350 | out_dev, &encap_dev, flow); | ||
351 | if (err) { | ||
352 | rule = ERR_PTR(err); | ||
353 | if (err != -EAGAIN) | ||
354 | goto err_attach_encap; | ||
355 | } | ||
356 | out_priv = netdev_priv(encap_dev); | ||
357 | rpriv = out_priv->ppriv; | ||
358 | attr->out_rep = rpriv->rep; | ||
359 | } | ||
360 | |||
335 | err = mlx5_eswitch_add_vlan_action(esw, attr); | 361 | err = mlx5_eswitch_add_vlan_action(esw, attr); |
336 | if (err) { | 362 | if (err) { |
337 | rule = ERR_PTR(err); | 363 | rule = ERR_PTR(err); |
@@ -347,10 +373,14 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, | |||
347 | } | 373 | } |
348 | } | 374 | } |
349 | 375 | ||
350 | rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); | 376 | /* we get here if (1) there's no error (rule being null) or when |
351 | if (IS_ERR(rule)) | 377 | * (2) there's an encap action and we're on -EAGAIN (no valid neigh) |
352 | goto err_add_rule; | 378 | */ |
353 | 379 | if (rule != ERR_PTR(-EAGAIN)) { | |
380 | rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr); | ||
381 | if (IS_ERR(rule)) | ||
382 | goto err_add_rule; | ||
383 | } | ||
354 | return rule; | 384 | return rule; |
355 | 385 | ||
356 | err_add_rule: | 386 | err_add_rule: |
@@ -361,6 +391,7 @@ err_mod_hdr: | |||
361 | err_add_vlan: | 391 | err_add_vlan: |
362 | if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) | 392 | if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) |
363 | mlx5e_detach_encap(priv, flow); | 393 | mlx5e_detach_encap(priv, flow); |
394 | err_attach_encap: | ||
364 | return rule; | 395 | return rule; |
365 | } | 396 | } |
366 | 397 | ||
@@ -389,6 +420,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, | |||
389 | void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, | 420 | void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, |
390 | struct mlx5e_encap_entry *e) | 421 | struct mlx5e_encap_entry *e) |
391 | { | 422 | { |
423 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | ||
424 | struct mlx5_esw_flow_attr *esw_attr; | ||
392 | struct mlx5e_tc_flow *flow; | 425 | struct mlx5e_tc_flow *flow; |
393 | int err; | 426 | int err; |
394 | 427 | ||
@@ -404,10 +437,9 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, | |||
404 | mlx5e_rep_queue_neigh_stats_work(priv); | 437 | mlx5e_rep_queue_neigh_stats_work(priv); |
405 | 438 | ||
406 | list_for_each_entry(flow, &e->flows, encap) { | 439 | list_for_each_entry(flow, &e->flows, encap) { |
407 | flow->esw_attr->encap_id = e->encap_id; | 440 | esw_attr = flow->esw_attr; |
408 | flow->rule = mlx5e_tc_add_fdb_flow(priv, | 441 | esw_attr->encap_id = e->encap_id; |
409 | flow->esw_attr->parse_attr, | 442 | flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr); |
410 | flow); | ||
411 | if (IS_ERR(flow->rule)) { | 443 | if (IS_ERR(flow->rule)) { |
412 | err = PTR_ERR(flow->rule); | 444 | err = PTR_ERR(flow->rule); |
413 | mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", | 445 | mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n", |
@@ -421,15 +453,13 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, | |||
421 | void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, | 453 | void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, |
422 | struct mlx5e_encap_entry *e) | 454 | struct mlx5e_encap_entry *e) |
423 | { | 455 | { |
456 | struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; | ||
424 | struct mlx5e_tc_flow *flow; | 457 | struct mlx5e_tc_flow *flow; |
425 | struct mlx5_fc *counter; | ||
426 | 458 | ||
427 | list_for_each_entry(flow, &e->flows, encap) { | 459 | list_for_each_entry(flow, &e->flows, encap) { |
428 | if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { | 460 | if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { |
429 | flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; | 461 | flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; |
430 | counter = mlx5_flow_rule_counter(flow->rule); | 462 | mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr); |
431 | mlx5_del_flow_rules(flow->rule); | ||
432 | mlx5_fc_destroy(priv->mdev, counter); | ||
433 | } | 463 | } |
434 | } | 464 | } |
435 | 465 | ||
@@ -1942,7 +1972,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
1942 | 1972 | ||
1943 | if (is_tcf_mirred_egress_redirect(a)) { | 1973 | if (is_tcf_mirred_egress_redirect(a)) { |
1944 | int ifindex = tcf_mirred_ifindex(a); | 1974 | int ifindex = tcf_mirred_ifindex(a); |
1945 | struct net_device *out_dev, *encap_dev = NULL; | 1975 | struct net_device *out_dev; |
1946 | struct mlx5e_priv *out_priv; | 1976 | struct mlx5e_priv *out_priv; |
1947 | 1977 | ||
1948 | out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); | 1978 | out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex); |
@@ -1955,17 +1985,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, | |||
1955 | rpriv = out_priv->ppriv; | 1985 | rpriv = out_priv->ppriv; |
1956 | attr->out_rep = rpriv->rep; | 1986 | attr->out_rep = rpriv->rep; |
1957 | } else if (encap) { | 1987 | } else if (encap) { |
1958 | err = mlx5e_attach_encap(priv, info, | 1988 | parse_attr->mirred_ifindex = ifindex; |
1959 | out_dev, &encap_dev, flow); | 1989 | parse_attr->tun_info = *info; |
1960 | if (err && err != -EAGAIN) | 1990 | attr->parse_attr = parse_attr; |
1961 | return err; | ||
1962 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | | 1991 | attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP | |
1963 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | | 1992 | MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | |
1964 | MLX5_FLOW_CONTEXT_ACTION_COUNT; | 1993 | MLX5_FLOW_CONTEXT_ACTION_COUNT; |
1965 | out_priv = netdev_priv(encap_dev); | 1994 | /* attr->out_rep is resolved when we handle encap */ |
1966 | rpriv = out_priv->ppriv; | ||
1967 | attr->out_rep = rpriv->rep; | ||
1968 | attr->parse_attr = parse_attr; | ||
1969 | } else { | 1995 | } else { |
1970 | pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", | 1996 | pr_err("devices %s %s not on same switch HW, can't offload forwarding\n", |
1971 | priv->netdev->name, out_dev->name); | 1997 | priv->netdev->name, out_dev->name); |
@@ -2047,7 +2073,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, | |||
2047 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { | 2073 | if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { |
2048 | err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); | 2074 | err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow); |
2049 | if (err < 0) | 2075 | if (err < 0) |
2050 | goto err_handle_encap_flow; | 2076 | goto err_free; |
2051 | flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); | 2077 | flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow); |
2052 | } else { | 2078 | } else { |
2053 | err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); | 2079 | err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow); |
@@ -2058,10 +2084,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, | |||
2058 | 2084 | ||
2059 | if (IS_ERR(flow->rule)) { | 2085 | if (IS_ERR(flow->rule)) { |
2060 | err = PTR_ERR(flow->rule); | 2086 | err = PTR_ERR(flow->rule); |
2061 | goto err_free; | 2087 | if (err != -EAGAIN) |
2088 | goto err_free; | ||
2062 | } | 2089 | } |
2063 | 2090 | ||
2064 | flow->flags |= MLX5E_TC_FLOW_OFFLOADED; | 2091 | if (err != -EAGAIN) |
2092 | flow->flags |= MLX5E_TC_FLOW_OFFLOADED; | ||
2093 | |||
2065 | err = rhashtable_insert_fast(&tc->ht, &flow->node, | 2094 | err = rhashtable_insert_fast(&tc->ht, &flow->node, |
2066 | tc->ht_params); | 2095 | tc->ht_params); |
2067 | if (err) | 2096 | if (err) |
@@ -2075,16 +2104,6 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, | |||
2075 | err_del_rule: | 2104 | err_del_rule: |
2076 | mlx5e_tc_del_flow(priv, flow); | 2105 | mlx5e_tc_del_flow(priv, flow); |
2077 | 2106 | ||
2078 | err_handle_encap_flow: | ||
2079 | if (err == -EAGAIN) { | ||
2080 | err = rhashtable_insert_fast(&tc->ht, &flow->node, | ||
2081 | tc->ht_params); | ||
2082 | if (err) | ||
2083 | mlx5e_tc_del_flow(priv, flow); | ||
2084 | else | ||
2085 | return 0; | ||
2086 | } | ||
2087 | |||
2088 | err_free: | 2107 | err_free: |
2089 | kvfree(parse_attr); | 2108 | kvfree(parse_attr); |
2090 | kfree(flow); | 2109 | kfree(flow); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 8aea0a065e56..db86e1506c8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -356,10 +356,11 @@ void mlx5_drain_health_wq(struct mlx5_core_dev *dev) | |||
356 | void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) | 356 | void mlx5_drain_health_recovery(struct mlx5_core_dev *dev) |
357 | { | 357 | { |
358 | struct mlx5_core_health *health = &dev->priv.health; | 358 | struct mlx5_core_health *health = &dev->priv.health; |
359 | unsigned long flags; | ||
359 | 360 | ||
360 | spin_lock(&health->wq_lock); | 361 | spin_lock_irqsave(&health->wq_lock, flags); |
361 | set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); | 362 | set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); |
362 | spin_unlock(&health->wq_lock); | 363 | spin_unlock_irqrestore(&health->wq_lock, flags); |
363 | cancel_delayed_work_sync(&dev->priv.health.recover_work); | 364 | cancel_delayed_work_sync(&dev->priv.health.recover_work); |
364 | } | 365 | } |
365 | 366 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 1975d4388d4f..e07061f565d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c | |||
@@ -677,6 +677,27 @@ int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group) | |||
677 | } | 677 | } |
678 | EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group); | 678 | EXPORT_SYMBOL_GPL(mlx5_set_port_tc_group); |
679 | 679 | ||
680 | int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, | ||
681 | u8 tc, u8 *tc_group) | ||
682 | { | ||
683 | u32 out[MLX5_ST_SZ_DW(qetc_reg)]; | ||
684 | void *ets_tcn_conf; | ||
685 | int err; | ||
686 | |||
687 | err = mlx5_query_port_qetcr_reg(mdev, out, sizeof(out)); | ||
688 | if (err) | ||
689 | return err; | ||
690 | |||
691 | ets_tcn_conf = MLX5_ADDR_OF(qetc_reg, out, | ||
692 | tc_configuration[tc]); | ||
693 | |||
694 | *tc_group = MLX5_GET(ets_tcn_config_reg, ets_tcn_conf, | ||
695 | group); | ||
696 | |||
697 | return 0; | ||
698 | } | ||
699 | EXPORT_SYMBOL_GPL(mlx5_query_port_tc_group); | ||
700 | |||
680 | int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) | 701 | int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw) |
681 | { | 702 | { |
682 | u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0}; | 703 | u32 in[MLX5_ST_SZ_DW(qetc_reg)] = {0}; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 9d5e7cf288be..f3315bc874ad 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c | |||
@@ -96,6 +96,7 @@ struct mlxsw_core { | |||
96 | const struct mlxsw_bus *bus; | 96 | const struct mlxsw_bus *bus; |
97 | void *bus_priv; | 97 | void *bus_priv; |
98 | const struct mlxsw_bus_info *bus_info; | 98 | const struct mlxsw_bus_info *bus_info; |
99 | struct workqueue_struct *emad_wq; | ||
99 | struct list_head rx_listener_list; | 100 | struct list_head rx_listener_list; |
100 | struct list_head event_listener_list; | 101 | struct list_head event_listener_list; |
101 | struct { | 102 | struct { |
@@ -465,7 +466,7 @@ static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans) | |||
465 | { | 466 | { |
466 | unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); | 467 | unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS); |
467 | 468 | ||
468 | mlxsw_core_schedule_dw(&trans->timeout_dw, timeout); | 469 | queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw, timeout); |
469 | } | 470 | } |
470 | 471 | ||
471 | static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, | 472 | static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core, |
@@ -587,12 +588,18 @@ static const struct mlxsw_listener mlxsw_emad_rx_listener = | |||
587 | 588 | ||
588 | static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) | 589 | static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) |
589 | { | 590 | { |
591 | struct workqueue_struct *emad_wq; | ||
590 | u64 tid; | 592 | u64 tid; |
591 | int err; | 593 | int err; |
592 | 594 | ||
593 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) | 595 | if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX)) |
594 | return 0; | 596 | return 0; |
595 | 597 | ||
598 | emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0); | ||
599 | if (!emad_wq) | ||
600 | return -ENOMEM; | ||
601 | mlxsw_core->emad_wq = emad_wq; | ||
602 | |||
596 | /* Set the upper 32 bits of the transaction ID field to a random | 603 | /* Set the upper 32 bits of the transaction ID field to a random |
597 | * number. This allows us to discard EMADs addressed to other | 604 | * number. This allows us to discard EMADs addressed to other |
598 | * devices. | 605 | * devices. |
@@ -619,6 +626,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core) | |||
619 | err_emad_trap_set: | 626 | err_emad_trap_set: |
620 | mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, | 627 | mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, |
621 | mlxsw_core); | 628 | mlxsw_core); |
629 | destroy_workqueue(mlxsw_core->emad_wq); | ||
622 | return err; | 630 | return err; |
623 | } | 631 | } |
624 | 632 | ||
@@ -631,6 +639,7 @@ static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core) | |||
631 | mlxsw_core->emad.use_emad = false; | 639 | mlxsw_core->emad.use_emad = false; |
632 | mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, | 640 | mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener, |
633 | mlxsw_core); | 641 | mlxsw_core); |
642 | destroy_workqueue(mlxsw_core->emad_wq); | ||
634 | } | 643 | } |
635 | 644 | ||
636 | static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, | 645 | static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index cc27c5de5a1d..4afc8486eb9a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h | |||
@@ -6401,6 +6401,36 @@ static inline void mlxsw_reg_mgpc_pack(char *payload, u32 counter_index, | |||
6401 | mlxsw_reg_mgpc_opcode_set(payload, opcode); | 6401 | mlxsw_reg_mgpc_opcode_set(payload, opcode); |
6402 | } | 6402 | } |
6403 | 6403 | ||
6404 | /* TIGCR - Tunneling IPinIP General Configuration Register | ||
6405 | * ------------------------------------------------------- | ||
6406 | * The TIGCR register is used for setting up the IPinIP Tunnel configuration. | ||
6407 | */ | ||
6408 | #define MLXSW_REG_TIGCR_ID 0xA801 | ||
6409 | #define MLXSW_REG_TIGCR_LEN 0x10 | ||
6410 | |||
6411 | MLXSW_REG_DEFINE(tigcr, MLXSW_REG_TIGCR_ID, MLXSW_REG_TIGCR_LEN); | ||
6412 | |||
6413 | /* reg_tigcr_ipip_ttlc | ||
6414 | * For IPinIP Tunnel encapsulation: whether to copy the ttl from the packet | ||
6415 | * header. | ||
6416 | * Access: RW | ||
6417 | */ | ||
6418 | MLXSW_ITEM32(reg, tigcr, ttlc, 0x04, 8, 1); | ||
6419 | |||
6420 | /* reg_tigcr_ipip_ttl_uc | ||
6421 | * The TTL for IPinIP Tunnel encapsulation of unicast packets if | ||
6422 | * reg_tigcr_ipip_ttlc is unset. | ||
6423 | * Access: RW | ||
6424 | */ | ||
6425 | MLXSW_ITEM32(reg, tigcr, ttl_uc, 0x04, 0, 8); | ||
6426 | |||
6427 | static inline void mlxsw_reg_tigcr_pack(char *payload, bool ttlc, u8 ttl_uc) | ||
6428 | { | ||
6429 | MLXSW_REG_ZERO(tigcr, payload); | ||
6430 | mlxsw_reg_tigcr_ttlc_set(payload, ttlc); | ||
6431 | mlxsw_reg_tigcr_ttl_uc_set(payload, ttl_uc); | ||
6432 | } | ||
6433 | |||
6404 | /* SBPR - Shared Buffer Pools Register | 6434 | /* SBPR - Shared Buffer Pools Register |
6405 | * ----------------------------------- | 6435 | * ----------------------------------- |
6406 | * The SBPR configures and retrieves the shared buffer pools and configuration. | 6436 | * The SBPR configures and retrieves the shared buffer pools and configuration. |
@@ -6881,6 +6911,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { | |||
6881 | MLXSW_REG(mcc), | 6911 | MLXSW_REG(mcc), |
6882 | MLXSW_REG(mcda), | 6912 | MLXSW_REG(mcda), |
6883 | MLXSW_REG(mgpc), | 6913 | MLXSW_REG(mgpc), |
6914 | MLXSW_REG(tigcr), | ||
6884 | MLXSW_REG(sbpr), | 6915 | MLXSW_REG(sbpr), |
6885 | MLXSW_REG(sbcm), | 6916 | MLXSW_REG(sbcm), |
6886 | MLXSW_REG(sbpm), | 6917 | MLXSW_REG(sbpm), |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 032089efc1a0..5189022a1c8c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c | |||
@@ -3505,20 +3505,6 @@ static int mlxsw_sp_fib_lpm_tree_link(struct mlxsw_sp *mlxsw_sp, | |||
3505 | static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp, | 3505 | static void mlxsw_sp_fib_lpm_tree_unlink(struct mlxsw_sp *mlxsw_sp, |
3506 | struct mlxsw_sp_fib *fib) | 3506 | struct mlxsw_sp_fib *fib) |
3507 | { | 3507 | { |
3508 | struct mlxsw_sp_prefix_usage req_prefix_usage = {{ 0 } }; | ||
3509 | struct mlxsw_sp_lpm_tree *lpm_tree; | ||
3510 | |||
3511 | /* Aggregate prefix lengths across all virtual routers to make | ||
3512 | * sure we only have used prefix lengths in the LPM tree. | ||
3513 | */ | ||
3514 | mlxsw_sp_vrs_prefixes(mlxsw_sp, fib->proto, &req_prefix_usage); | ||
3515 | lpm_tree = mlxsw_sp_lpm_tree_get(mlxsw_sp, &req_prefix_usage, | ||
3516 | fib->proto); | ||
3517 | if (IS_ERR(lpm_tree)) | ||
3518 | goto err_tree_get; | ||
3519 | mlxsw_sp_vrs_lpm_tree_replace(mlxsw_sp, fib, lpm_tree); | ||
3520 | |||
3521 | err_tree_get: | ||
3522 | if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) | 3508 | if (!mlxsw_sp_prefix_usage_none(&fib->prefix_usage)) |
3523 | return; | 3509 | return; |
3524 | mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); | 3510 | mlxsw_sp_vr_lpm_tree_unbind(mlxsw_sp, fib); |
@@ -5910,11 +5896,20 @@ static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp) | |||
5910 | kfree(mlxsw_sp->router->rifs); | 5896 | kfree(mlxsw_sp->router->rifs); |
5911 | } | 5897 | } |
5912 | 5898 | ||
5899 | static int | ||
5900 | mlxsw_sp_ipip_config_tigcr(struct mlxsw_sp *mlxsw_sp) | ||
5901 | { | ||
5902 | char tigcr_pl[MLXSW_REG_TIGCR_LEN]; | ||
5903 | |||
5904 | mlxsw_reg_tigcr_pack(tigcr_pl, true, 0); | ||
5905 | return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tigcr), tigcr_pl); | ||
5906 | } | ||
5907 | |||
5913 | static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) | 5908 | static int mlxsw_sp_ipips_init(struct mlxsw_sp *mlxsw_sp) |
5914 | { | 5909 | { |
5915 | mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; | 5910 | mlxsw_sp->router->ipip_ops_arr = mlxsw_sp_ipip_ops_arr; |
5916 | INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); | 5911 | INIT_LIST_HEAD(&mlxsw_sp->router->ipip_list); |
5917 | return 0; | 5912 | return mlxsw_sp_ipip_config_tigcr(mlxsw_sp); |
5918 | } | 5913 | } |
5919 | 5914 | ||
5920 | static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) | 5915 | static void mlxsw_sp_ipips_fini(struct mlxsw_sp *mlxsw_sp) |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index db9750695dc7..8ea9320014ee 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c | |||
@@ -110,6 +110,8 @@ nfp_fl_output(struct nfp_fl_output *output, const struct tc_action *action, | |||
110 | */ | 110 | */ |
111 | if (!switchdev_port_same_parent_id(in_dev, out_dev)) | 111 | if (!switchdev_port_same_parent_id(in_dev, out_dev)) |
112 | return -EOPNOTSUPP; | 112 | return -EOPNOTSUPP; |
113 | if (!nfp_netdev_is_nfp_repr(out_dev)) | ||
114 | return -EOPNOTSUPP; | ||
113 | 115 | ||
114 | output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev)); | 116 | output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev)); |
115 | if (!output->port) | 117 | if (!output->port) |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 1c0187f0af51..e118b5f23996 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c | |||
@@ -1180,10 +1180,14 @@ static void *nfp_net_rx_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) | |||
1180 | { | 1180 | { |
1181 | void *frag; | 1181 | void *frag; |
1182 | 1182 | ||
1183 | if (!dp->xdp_prog) | 1183 | if (!dp->xdp_prog) { |
1184 | frag = netdev_alloc_frag(dp->fl_bufsz); | 1184 | frag = netdev_alloc_frag(dp->fl_bufsz); |
1185 | else | 1185 | } else { |
1186 | frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD)); | 1186 | struct page *page; |
1187 | |||
1188 | page = alloc_page(GFP_KERNEL | __GFP_COLD); | ||
1189 | frag = page ? page_address(page) : NULL; | ||
1190 | } | ||
1187 | if (!frag) { | 1191 | if (!frag) { |
1188 | nn_dp_warn(dp, "Failed to alloc receive page frag\n"); | 1192 | nn_dp_warn(dp, "Failed to alloc receive page frag\n"); |
1189 | return NULL; | 1193 | return NULL; |
@@ -1203,10 +1207,14 @@ static void *nfp_net_napi_alloc_one(struct nfp_net_dp *dp, dma_addr_t *dma_addr) | |||
1203 | { | 1207 | { |
1204 | void *frag; | 1208 | void *frag; |
1205 | 1209 | ||
1206 | if (!dp->xdp_prog) | 1210 | if (!dp->xdp_prog) { |
1207 | frag = napi_alloc_frag(dp->fl_bufsz); | 1211 | frag = napi_alloc_frag(dp->fl_bufsz); |
1208 | else | 1212 | } else { |
1209 | frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD)); | 1213 | struct page *page; |
1214 | |||
1215 | page = alloc_page(GFP_ATOMIC | __GFP_COLD); | ||
1216 | frag = page ? page_address(page) : NULL; | ||
1217 | } | ||
1210 | if (!frag) { | 1218 | if (!frag) { |
1211 | nn_dp_warn(dp, "Failed to alloc receive page frag\n"); | 1219 | nn_dp_warn(dp, "Failed to alloc receive page frag\n"); |
1212 | return NULL; | 1220 | return NULL; |
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 07969f06df10..dc016dfec64d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c | |||
@@ -464,7 +464,7 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) | |||
464 | 464 | ||
465 | do { | 465 | do { |
466 | start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); | 466 | start = u64_stats_fetch_begin(&nn->r_vecs[i].rx_sync); |
467 | *data++ = nn->r_vecs[i].rx_pkts; | 467 | data[0] = nn->r_vecs[i].rx_pkts; |
468 | tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; | 468 | tmp[0] = nn->r_vecs[i].hw_csum_rx_ok; |
469 | tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; | 469 | tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok; |
470 | tmp[2] = nn->r_vecs[i].hw_csum_rx_error; | 470 | tmp[2] = nn->r_vecs[i].hw_csum_rx_error; |
@@ -472,14 +472,16 @@ static u64 *nfp_vnic_get_sw_stats(struct net_device *netdev, u64 *data) | |||
472 | 472 | ||
473 | do { | 473 | do { |
474 | start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); | 474 | start = u64_stats_fetch_begin(&nn->r_vecs[i].tx_sync); |
475 | *data++ = nn->r_vecs[i].tx_pkts; | 475 | data[1] = nn->r_vecs[i].tx_pkts; |
476 | *data++ = nn->r_vecs[i].tx_busy; | 476 | data[2] = nn->r_vecs[i].tx_busy; |
477 | tmp[3] = nn->r_vecs[i].hw_csum_tx; | 477 | tmp[3] = nn->r_vecs[i].hw_csum_tx; |
478 | tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; | 478 | tmp[4] = nn->r_vecs[i].hw_csum_tx_inner; |
479 | tmp[5] = nn->r_vecs[i].tx_gather; | 479 | tmp[5] = nn->r_vecs[i].tx_gather; |
480 | tmp[6] = nn->r_vecs[i].tx_lso; | 480 | tmp[6] = nn->r_vecs[i].tx_lso; |
481 | } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); | 481 | } while (u64_stats_fetch_retry(&nn->r_vecs[i].tx_sync, start)); |
482 | 482 | ||
483 | data += 3; | ||
484 | |||
483 | for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) | 485 | for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++) |
484 | gathered_stats[j] += tmp[j]; | 486 | gathered_stats[j] += tmp[j]; |
485 | } | 487 | } |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index e03fcf914690..a3c949ea7d1a 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -8491,8 +8491,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
8491 | rtl8168_driver_start(tp); | 8491 | rtl8168_driver_start(tp); |
8492 | } | 8492 | } |
8493 | 8493 | ||
8494 | device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); | ||
8495 | |||
8496 | if (pci_dev_run_wake(pdev)) | 8494 | if (pci_dev_run_wake(pdev)) |
8497 | pm_runtime_put_noidle(&pdev->dev); | 8495 | pm_runtime_put_noidle(&pdev->dev); |
8498 | 8496 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c index 5efef8001edf..3256e5cbad27 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c | |||
@@ -74,7 +74,7 @@ static int dwc_eth_dwmac_config_dt(struct platform_device *pdev, | |||
74 | plat_dat->axi->axi_wr_osr_lmt--; | 74 | plat_dat->axi->axi_wr_osr_lmt--; |
75 | } | 75 | } |
76 | 76 | ||
77 | if (of_property_read_u32(np, "read,read-requests", | 77 | if (of_property_read_u32(np, "snps,read-requests", |
78 | &plat_dat->axi->axi_rd_osr_lmt)) { | 78 | &plat_dat->axi->axi_rd_osr_lmt)) { |
79 | /** | 79 | /** |
80 | * Since the register has a reset value of 1, if property | 80 | * Since the register has a reset value of 1, if property |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c index e0ef02f9503b..4b286e27c4ca 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c | |||
@@ -275,7 +275,7 @@ static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats) | |||
275 | goto exit; | 275 | goto exit; |
276 | i++; | 276 | i++; |
277 | 277 | ||
278 | } while ((ret == 1) || (i < 10)); | 278 | } while ((ret == 1) && (i < 10)); |
279 | 279 | ||
280 | if (i == 10) | 280 | if (i == 10) |
281 | ret = -EBUSY; | 281 | ret = -EBUSY; |
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c index 67af0bdd7f10..7516ca210855 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c | |||
@@ -34,7 +34,7 @@ int dwmac_dma_reset(void __iomem *ioaddr) | |||
34 | 34 | ||
35 | err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, | 35 | err = readl_poll_timeout(ioaddr + DMA_BUS_MODE, value, |
36 | !(value & DMA_BUS_MODE_SFT_RESET), | 36 | !(value & DMA_BUS_MODE_SFT_RESET), |
37 | 100000, 10000); | 37 | 10000, 100000); |
38 | if (err) | 38 | if (err) |
39 | return -EBUSY; | 39 | return -EBUSY; |
40 | 40 | ||
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 1763e48c84e2..16bd50929084 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | |||
@@ -473,19 +473,18 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p, | |||
473 | struct dma_desc *np, struct sk_buff *skb) | 473 | struct dma_desc *np, struct sk_buff *skb) |
474 | { | 474 | { |
475 | struct skb_shared_hwtstamps *shhwtstamp = NULL; | 475 | struct skb_shared_hwtstamps *shhwtstamp = NULL; |
476 | struct dma_desc *desc = p; | ||
476 | u64 ns; | 477 | u64 ns; |
477 | 478 | ||
478 | if (!priv->hwts_rx_en) | 479 | if (!priv->hwts_rx_en) |
479 | return; | 480 | return; |
481 | /* For GMAC4, the valid timestamp is from CTX next desc. */ | ||
482 | if (priv->plat->has_gmac4) | ||
483 | desc = np; | ||
480 | 484 | ||
481 | /* Check if timestamp is available */ | 485 | /* Check if timestamp is available */ |
482 | if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) { | 486 | if (priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts)) { |
483 | /* For GMAC4, the valid timestamp is from CTX next desc. */ | 487 | ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts); |
484 | if (priv->plat->has_gmac4) | ||
485 | ns = priv->hw->desc->get_timestamp(np, priv->adv_ts); | ||
486 | else | ||
487 | ns = priv->hw->desc->get_timestamp(p, priv->adv_ts); | ||
488 | |||
489 | netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); | 488 | netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); |
490 | shhwtstamp = skb_hwtstamps(skb); | 489 | shhwtstamp = skb_hwtstamps(skb); |
491 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); | 490 | memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps)); |
@@ -1800,12 +1799,13 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue) | |||
1800 | { | 1799 | { |
1801 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; | 1800 | struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; |
1802 | unsigned int bytes_compl = 0, pkts_compl = 0; | 1801 | unsigned int bytes_compl = 0, pkts_compl = 0; |
1803 | unsigned int entry = tx_q->dirty_tx; | 1802 | unsigned int entry; |
1804 | 1803 | ||
1805 | netif_tx_lock(priv->dev); | 1804 | netif_tx_lock(priv->dev); |
1806 | 1805 | ||
1807 | priv->xstats.tx_clean++; | 1806 | priv->xstats.tx_clean++; |
1808 | 1807 | ||
1808 | entry = tx_q->dirty_tx; | ||
1809 | while (entry != tx_q->cur_tx) { | 1809 | while (entry != tx_q->cur_tx) { |
1810 | struct sk_buff *skb = tx_q->tx_skbuff[entry]; | 1810 | struct sk_buff *skb = tx_q->tx_skbuff[entry]; |
1811 | struct dma_desc *p; | 1811 | struct dma_desc *p; |
@@ -3333,6 +3333,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) | |||
3333 | * them in stmmac_rx_refill() function so that | 3333 | * them in stmmac_rx_refill() function so that |
3334 | * device can reuse it. | 3334 | * device can reuse it. |
3335 | */ | 3335 | */ |
3336 | dev_kfree_skb_any(rx_q->rx_skbuff[entry]); | ||
3336 | rx_q->rx_skbuff[entry] = NULL; | 3337 | rx_q->rx_skbuff[entry] = NULL; |
3337 | dma_unmap_single(priv->device, | 3338 | dma_unmap_single(priv->device, |
3338 | rx_q->rx_skbuff_dma[entry], | 3339 | rx_q->rx_skbuff_dma[entry], |
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 8a280b48e3a9..6383695004a5 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | |||
@@ -150,6 +150,13 @@ static void stmmac_mtl_setup(struct platform_device *pdev, | |||
150 | plat->rx_queues_to_use = 1; | 150 | plat->rx_queues_to_use = 1; |
151 | plat->tx_queues_to_use = 1; | 151 | plat->tx_queues_to_use = 1; |
152 | 152 | ||
153 | /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need | ||
154 | * to always set this, otherwise Queue will be classified as AVB | ||
155 | * (because MTL_QUEUE_AVB = 0). | ||
156 | */ | ||
157 | plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; | ||
158 | plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB; | ||
159 | |||
153 | rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); | 160 | rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0); |
154 | if (!rx_node) | 161 | if (!rx_node) |
155 | return; | 162 | return; |
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index f6404074b7b0..ed51018a813e 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
@@ -113,13 +113,7 @@ static void tunnel_id_to_vni(__be64 tun_id, __u8 *vni) | |||
113 | 113 | ||
114 | static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni) | 114 | static bool eq_tun_id_and_vni(u8 *tun_id, u8 *vni) |
115 | { | 115 | { |
116 | #ifdef __BIG_ENDIAN | ||
117 | return (vni[0] == tun_id[2]) && | ||
118 | (vni[1] == tun_id[1]) && | ||
119 | (vni[2] == tun_id[0]); | ||
120 | #else | ||
121 | return !memcmp(vni, &tun_id[5], 3); | 116 | return !memcmp(vni, &tun_id[5], 3); |
122 | #endif | ||
123 | } | 117 | } |
124 | 118 | ||
125 | static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) | 119 | static sa_family_t geneve_get_sk_family(struct geneve_sock *gs) |
diff --git a/drivers/net/ipvlan/ipvtap.c b/drivers/net/ipvlan/ipvtap.c index 5dea2063dbc8..0bcc07f346c3 100644 --- a/drivers/net/ipvlan/ipvtap.c +++ b/drivers/net/ipvlan/ipvtap.c | |||
@@ -197,8 +197,8 @@ static int ipvtap_init(void) | |||
197 | { | 197 | { |
198 | int err; | 198 | int err; |
199 | 199 | ||
200 | err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap"); | 200 | err = tap_create_cdev(&ipvtap_cdev, &ipvtap_major, "ipvtap", |
201 | 201 | THIS_MODULE); | |
202 | if (err) | 202 | if (err) |
203 | goto out1; | 203 | goto out1; |
204 | 204 | ||
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 98e4deaa3a6a..5ab1b8849c30 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c | |||
@@ -742,6 +742,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, | |||
742 | sg_init_table(sg, ret); | 742 | sg_init_table(sg, ret); |
743 | ret = skb_to_sgvec(skb, sg, 0, skb->len); | 743 | ret = skb_to_sgvec(skb, sg, 0, skb->len); |
744 | if (unlikely(ret < 0)) { | 744 | if (unlikely(ret < 0)) { |
745 | aead_request_free(req); | ||
745 | macsec_txsa_put(tx_sa); | 746 | macsec_txsa_put(tx_sa); |
746 | kfree_skb(skb); | 747 | kfree_skb(skb); |
747 | return ERR_PTR(ret); | 748 | return ERR_PTR(ret); |
@@ -954,6 +955,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb, | |||
954 | sg_init_table(sg, ret); | 955 | sg_init_table(sg, ret); |
955 | ret = skb_to_sgvec(skb, sg, 0, skb->len); | 956 | ret = skb_to_sgvec(skb, sg, 0, skb->len); |
956 | if (unlikely(ret < 0)) { | 957 | if (unlikely(ret < 0)) { |
958 | aead_request_free(req); | ||
957 | kfree_skb(skb); | 959 | kfree_skb(skb); |
958 | return ERR_PTR(ret); | 960 | return ERR_PTR(ret); |
959 | } | 961 | } |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index c2d0ea2fb019..cba5cb3b849a 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -204,8 +204,8 @@ static int macvtap_init(void) | |||
204 | { | 204 | { |
205 | int err; | 205 | int err; |
206 | 206 | ||
207 | err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap"); | 207 | err = tap_create_cdev(&macvtap_cdev, &macvtap_major, "macvtap", |
208 | 208 | THIS_MODULE); | |
209 | if (err) | 209 | if (err) |
210 | goto out1; | 210 | goto out1; |
211 | 211 | ||
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index c3f77e3b7819..e365866600ba 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c | |||
@@ -1339,7 +1339,17 @@ ppp_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats64) | |||
1339 | 1339 | ||
1340 | static int ppp_dev_init(struct net_device *dev) | 1340 | static int ppp_dev_init(struct net_device *dev) |
1341 | { | 1341 | { |
1342 | struct ppp *ppp; | ||
1343 | |||
1342 | netdev_lockdep_set_classes(dev); | 1344 | netdev_lockdep_set_classes(dev); |
1345 | |||
1346 | ppp = netdev_priv(dev); | ||
1347 | /* Let the netdevice take a reference on the ppp file. This ensures | ||
1348 | * that ppp_destroy_interface() won't run before the device gets | ||
1349 | * unregistered. | ||
1350 | */ | ||
1351 | atomic_inc(&ppp->file.refcnt); | ||
1352 | |||
1343 | return 0; | 1353 | return 0; |
1344 | } | 1354 | } |
1345 | 1355 | ||
@@ -1362,6 +1372,15 @@ static void ppp_dev_uninit(struct net_device *dev) | |||
1362 | wake_up_interruptible(&ppp->file.rwait); | 1372 | wake_up_interruptible(&ppp->file.rwait); |
1363 | } | 1373 | } |
1364 | 1374 | ||
1375 | static void ppp_dev_priv_destructor(struct net_device *dev) | ||
1376 | { | ||
1377 | struct ppp *ppp; | ||
1378 | |||
1379 | ppp = netdev_priv(dev); | ||
1380 | if (atomic_dec_and_test(&ppp->file.refcnt)) | ||
1381 | ppp_destroy_interface(ppp); | ||
1382 | } | ||
1383 | |||
1365 | static const struct net_device_ops ppp_netdev_ops = { | 1384 | static const struct net_device_ops ppp_netdev_ops = { |
1366 | .ndo_init = ppp_dev_init, | 1385 | .ndo_init = ppp_dev_init, |
1367 | .ndo_uninit = ppp_dev_uninit, | 1386 | .ndo_uninit = ppp_dev_uninit, |
@@ -1387,6 +1406,7 @@ static void ppp_setup(struct net_device *dev) | |||
1387 | dev->tx_queue_len = 3; | 1406 | dev->tx_queue_len = 3; |
1388 | dev->type = ARPHRD_PPP; | 1407 | dev->type = ARPHRD_PPP; |
1389 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; | 1408 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; |
1409 | dev->priv_destructor = ppp_dev_priv_destructor; | ||
1390 | netif_keep_dst(dev); | 1410 | netif_keep_dst(dev); |
1391 | } | 1411 | } |
1392 | 1412 | ||
diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 21b71ae947fd..1b10fcc6a58d 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c | |||
@@ -517,6 +517,10 @@ static int tap_open(struct inode *inode, struct file *file) | |||
517 | &tap_proto, 0); | 517 | &tap_proto, 0); |
518 | if (!q) | 518 | if (!q) |
519 | goto err; | 519 | goto err; |
520 | if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) { | ||
521 | sk_free(&q->sk); | ||
522 | goto err; | ||
523 | } | ||
520 | 524 | ||
521 | RCU_INIT_POINTER(q->sock.wq, &q->wq); | 525 | RCU_INIT_POINTER(q->sock.wq, &q->wq); |
522 | init_waitqueue_head(&q->wq.wait); | 526 | init_waitqueue_head(&q->wq.wait); |
@@ -540,22 +544,18 @@ static int tap_open(struct inode *inode, struct file *file) | |||
540 | if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG)) | 544 | if ((tap->dev->features & NETIF_F_HIGHDMA) && (tap->dev->features & NETIF_F_SG)) |
541 | sock_set_flag(&q->sk, SOCK_ZEROCOPY); | 545 | sock_set_flag(&q->sk, SOCK_ZEROCOPY); |
542 | 546 | ||
543 | err = -ENOMEM; | ||
544 | if (skb_array_init(&q->skb_array, tap->dev->tx_queue_len, GFP_KERNEL)) | ||
545 | goto err_array; | ||
546 | |||
547 | err = tap_set_queue(tap, file, q); | 547 | err = tap_set_queue(tap, file, q); |
548 | if (err) | 548 | if (err) { |
549 | goto err_queue; | 549 | /* tap_sock_destruct() will take care of freeing skb_array */ |
550 | goto err_put; | ||
551 | } | ||
550 | 552 | ||
551 | dev_put(tap->dev); | 553 | dev_put(tap->dev); |
552 | 554 | ||
553 | rtnl_unlock(); | 555 | rtnl_unlock(); |
554 | return err; | 556 | return err; |
555 | 557 | ||
556 | err_queue: | 558 | err_put: |
557 | skb_array_cleanup(&q->skb_array); | ||
558 | err_array: | ||
559 | sock_put(&q->sk); | 559 | sock_put(&q->sk); |
560 | err: | 560 | err: |
561 | if (tap) | 561 | if (tap) |
@@ -1249,8 +1249,8 @@ static int tap_list_add(dev_t major, const char *device_name) | |||
1249 | return 0; | 1249 | return 0; |
1250 | } | 1250 | } |
1251 | 1251 | ||
1252 | int tap_create_cdev(struct cdev *tap_cdev, | 1252 | int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major, |
1253 | dev_t *tap_major, const char *device_name) | 1253 | const char *device_name, struct module *module) |
1254 | { | 1254 | { |
1255 | int err; | 1255 | int err; |
1256 | 1256 | ||
@@ -1259,6 +1259,7 @@ int tap_create_cdev(struct cdev *tap_cdev, | |||
1259 | goto out1; | 1259 | goto out1; |
1260 | 1260 | ||
1261 | cdev_init(tap_cdev, &tap_fops); | 1261 | cdev_init(tap_cdev, &tap_fops); |
1262 | tap_cdev->owner = module; | ||
1262 | err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS); | 1263 | err = cdev_add(tap_cdev, *tap_major, TAP_NUM_DEVS); |
1263 | if (err) | 1264 | if (err) |
1264 | goto out2; | 1265 | goto out2; |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 5ce580f413b9..5550f56cb895 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -1286,6 +1286,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, | |||
1286 | buflen += SKB_DATA_ALIGN(len + pad); | 1286 | buflen += SKB_DATA_ALIGN(len + pad); |
1287 | rcu_read_unlock(); | 1287 | rcu_read_unlock(); |
1288 | 1288 | ||
1289 | alloc_frag->offset = ALIGN((u64)alloc_frag->offset, SMP_CACHE_BYTES); | ||
1289 | if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) | 1290 | if (unlikely(!skb_page_frag_refill(buflen, alloc_frag, GFP_KERNEL))) |
1290 | return ERR_PTR(-ENOMEM); | 1291 | return ERR_PTR(-ENOMEM); |
1291 | 1292 | ||
@@ -2027,6 +2028,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
2027 | 2028 | ||
2028 | if (!dev) | 2029 | if (!dev) |
2029 | return -ENOMEM; | 2030 | return -ENOMEM; |
2031 | err = dev_get_valid_name(net, dev, name); | ||
2032 | if (err < 0) | ||
2033 | goto err_free_dev; | ||
2030 | 2034 | ||
2031 | dev_net_set(dev, net); | 2035 | dev_net_set(dev, net); |
2032 | dev->rtnl_link_ops = &tun_link_ops; | 2036 | dev->rtnl_link_ops = &tun_link_ops; |
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 29c7e2ec0dcb..3e7a3ac3a362 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c | |||
@@ -560,6 +560,8 @@ static const struct driver_info wwan_info = { | |||
560 | #define NVIDIA_VENDOR_ID 0x0955 | 560 | #define NVIDIA_VENDOR_ID 0x0955 |
561 | #define HP_VENDOR_ID 0x03f0 | 561 | #define HP_VENDOR_ID 0x03f0 |
562 | #define MICROSOFT_VENDOR_ID 0x045e | 562 | #define MICROSOFT_VENDOR_ID 0x045e |
563 | #define UBLOX_VENDOR_ID 0x1546 | ||
564 | #define TPLINK_VENDOR_ID 0x2357 | ||
563 | 565 | ||
564 | static const struct usb_device_id products[] = { | 566 | static const struct usb_device_id products[] = { |
565 | /* BLACKLIST !! | 567 | /* BLACKLIST !! |
@@ -812,6 +814,13 @@ static const struct usb_device_id products[] = { | |||
812 | .driver_info = 0, | 814 | .driver_info = 0, |
813 | }, | 815 | }, |
814 | 816 | ||
817 | /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ | ||
818 | { | ||
819 | USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM, | ||
820 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | ||
821 | .driver_info = 0, | ||
822 | }, | ||
823 | |||
815 | /* WHITELIST!!! | 824 | /* WHITELIST!!! |
816 | * | 825 | * |
817 | * CDC Ether uses two interfaces, not necessarily consecutive. | 826 | * CDC Ether uses two interfaces, not necessarily consecutive. |
@@ -863,12 +872,30 @@ static const struct usb_device_id products[] = { | |||
863 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), | 872 | USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), |
864 | .driver_info = (kernel_ulong_t)&wwan_info, | 873 | .driver_info = (kernel_ulong_t)&wwan_info, |
865 | }, { | 874 | }, { |
875 | /* Huawei ME906 and ME909 */ | ||
876 | USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x15c1, USB_CLASS_COMM, | ||
877 | USB_CDC_SUBCLASS_ETHERNET, | ||
878 | USB_CDC_PROTO_NONE), | ||
879 | .driver_info = (unsigned long)&wwan_info, | ||
880 | }, { | ||
866 | /* ZTE modules */ | 881 | /* ZTE modules */ |
867 | USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM, | 882 | USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, USB_CLASS_COMM, |
868 | USB_CDC_SUBCLASS_ETHERNET, | 883 | USB_CDC_SUBCLASS_ETHERNET, |
869 | USB_CDC_PROTO_NONE), | 884 | USB_CDC_PROTO_NONE), |
870 | .driver_info = (unsigned long)&zte_cdc_info, | 885 | .driver_info = (unsigned long)&zte_cdc_info, |
871 | }, { | 886 | }, { |
887 | /* U-blox TOBY-L2 */ | ||
888 | USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1143, USB_CLASS_COMM, | ||
889 | USB_CDC_SUBCLASS_ETHERNET, | ||
890 | USB_CDC_PROTO_NONE), | ||
891 | .driver_info = (unsigned long)&wwan_info, | ||
892 | }, { | ||
893 | /* U-blox SARA-U2 */ | ||
894 | USB_DEVICE_AND_INTERFACE_INFO(UBLOX_VENDOR_ID, 0x1104, USB_CLASS_COMM, | ||
895 | USB_CDC_SUBCLASS_ETHERNET, | ||
896 | USB_CDC_PROTO_NONE), | ||
897 | .driver_info = (unsigned long)&wwan_info, | ||
898 | }, { | ||
872 | USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, | 899 | USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, |
873 | USB_CDC_PROTO_NONE), | 900 | USB_CDC_PROTO_NONE), |
874 | .driver_info = (unsigned long) &cdc_info, | 901 | .driver_info = (unsigned long) &cdc_info, |
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 941ece08ba78..d51d9abf7986 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c | |||
@@ -615,6 +615,7 @@ enum rtl8152_flags { | |||
615 | #define VENDOR_ID_LENOVO 0x17ef | 615 | #define VENDOR_ID_LENOVO 0x17ef |
616 | #define VENDOR_ID_LINKSYS 0x13b1 | 616 | #define VENDOR_ID_LINKSYS 0x13b1 |
617 | #define VENDOR_ID_NVIDIA 0x0955 | 617 | #define VENDOR_ID_NVIDIA 0x0955 |
618 | #define VENDOR_ID_TPLINK 0x2357 | ||
618 | 619 | ||
619 | #define MCU_TYPE_PLA 0x0100 | 620 | #define MCU_TYPE_PLA 0x0100 |
620 | #define MCU_TYPE_USB 0x0000 | 621 | #define MCU_TYPE_USB 0x0000 |
@@ -5319,6 +5320,7 @@ static const struct usb_device_id rtl8152_table[] = { | |||
5319 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, | 5320 | {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7214)}, |
5320 | {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)}, | 5321 | {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)}, |
5321 | {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, | 5322 | {REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)}, |
5323 | {REALTEK_USB_DEVICE(VENDOR_ID_TPLINK, 0x0601)}, | ||
5322 | {} | 5324 | {} |
5323 | }; | 5325 | }; |
5324 | 5326 | ||
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c index c9c711dcd0e6..a89b5685e68b 100644 --- a/drivers/net/wimax/i2400m/fw.c +++ b/drivers/net/wimax/i2400m/fw.c | |||
@@ -652,7 +652,7 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk, | |||
652 | struct device *dev = i2400m_dev(i2400m); | 652 | struct device *dev = i2400m_dev(i2400m); |
653 | struct { | 653 | struct { |
654 | struct i2400m_bootrom_header cmd; | 654 | struct i2400m_bootrom_header cmd; |
655 | u8 cmd_payload[chunk_len]; | 655 | u8 cmd_payload[]; |
656 | } __packed *buf; | 656 | } __packed *buf; |
657 | struct i2400m_bootrom_header ack; | 657 | struct i2400m_bootrom_header ack; |
658 | 658 | ||
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index 4eb1e1ce9ace..ef72baf6dd96 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c | |||
@@ -429,7 +429,8 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr, | |||
429 | if (code != BRCMF_E_IF && !fweh->evt_handler[code]) | 429 | if (code != BRCMF_E_IF && !fweh->evt_handler[code]) |
430 | return; | 430 | return; |
431 | 431 | ||
432 | if (datalen > BRCMF_DCMD_MAXLEN) | 432 | if (datalen > BRCMF_DCMD_MAXLEN || |
433 | datalen + sizeof(*event_packet) > packet_len) | ||
433 | return; | 434 | return; |
434 | 435 | ||
435 | if (in_interrupt()) | 436 | if (in_interrupt()) |
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c index b3aab2fe96eb..ef685465f80a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c | |||
@@ -14764,8 +14764,8 @@ static void wlc_phy_ipa_restore_tx_digi_filts_nphy(struct brcms_phy *pi) | |||
14764 | } | 14764 | } |
14765 | 14765 | ||
14766 | static void | 14766 | static void |
14767 | wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, u8 *events, u8 *dlys, | 14767 | wlc_phy_set_rfseq_nphy(struct brcms_phy *pi, u8 cmd, const u8 *events, |
14768 | u8 len) | 14768 | const u8 *dlys, u8 len) |
14769 | { | 14769 | { |
14770 | u32 t1_offset, t2_offset; | 14770 | u32 t1_offset, t2_offset; |
14771 | u8 ctr; | 14771 | u8 ctr; |
@@ -15240,16 +15240,16 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev5(struct brcms_phy *pi) | |||
15240 | static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi) | 15240 | static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi) |
15241 | { | 15241 | { |
15242 | u16 currband; | 15242 | u16 currband; |
15243 | s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 }; | 15243 | static const s8 lna1G_gain_db_rev7[] = { 9, 14, 19, 24 }; |
15244 | s8 *lna1_gain_db = NULL; | 15244 | const s8 *lna1_gain_db = NULL; |
15245 | s8 *lna1_gain_db_2 = NULL; | 15245 | const s8 *lna1_gain_db_2 = NULL; |
15246 | s8 *lna2_gain_db = NULL; | 15246 | const s8 *lna2_gain_db = NULL; |
15247 | s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 }; | 15247 | static const s8 tiaA_gain_db_rev7[] = { -9, -6, -3, 0, 3, 3, 3, 3, 3, 3 }; |
15248 | s8 *tia_gain_db; | 15248 | const s8 *tia_gain_db; |
15249 | s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 }; | 15249 | static const s8 tiaA_gainbits_rev7[] = { 0, 1, 2, 3, 4, 4, 4, 4, 4, 4 }; |
15250 | s8 *tia_gainbits; | 15250 | const s8 *tia_gainbits; |
15251 | u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f }; | 15251 | static const u16 rfseqA_init_gain_rev7[] = { 0x624f, 0x624f }; |
15252 | u16 *rfseq_init_gain; | 15252 | const u16 *rfseq_init_gain; |
15253 | u16 init_gaincode; | 15253 | u16 init_gaincode; |
15254 | u16 clip1hi_gaincode; | 15254 | u16 clip1hi_gaincode; |
15255 | u16 clip1md_gaincode = 0; | 15255 | u16 clip1md_gaincode = 0; |
@@ -15310,10 +15310,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi) | |||
15310 | 15310 | ||
15311 | if ((freq <= 5080) || (freq == 5825)) { | 15311 | if ((freq <= 5080) || (freq == 5825)) { |
15312 | 15312 | ||
15313 | s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 }; | 15313 | static const s8 lna1A_gain_db_rev7[] = { 11, 16, 20, 24 }; |
15314 | s8 lna1A_gain_db_2_rev7[] = { | 15314 | static const s8 lna1A_gain_db_2_rev7[] = { 11, 17, 22, 25}; |
15315 | 11, 17, 22, 25}; | 15315 | static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 }; |
15316 | s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 }; | ||
15317 | 15316 | ||
15318 | crsminu_th = 0x3e; | 15317 | crsminu_th = 0x3e; |
15319 | lna1_gain_db = lna1A_gain_db_rev7; | 15318 | lna1_gain_db = lna1A_gain_db_rev7; |
@@ -15321,10 +15320,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi) | |||
15321 | lna2_gain_db = lna2A_gain_db_rev7; | 15320 | lna2_gain_db = lna2A_gain_db_rev7; |
15322 | } else if ((freq >= 5500) && (freq <= 5700)) { | 15321 | } else if ((freq >= 5500) && (freq <= 5700)) { |
15323 | 15322 | ||
15324 | s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 }; | 15323 | static const s8 lna1A_gain_db_rev7[] = { 11, 17, 21, 25 }; |
15325 | s8 lna1A_gain_db_2_rev7[] = { | 15324 | static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26}; |
15326 | 12, 18, 22, 26}; | 15325 | static const s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 }; |
15327 | s8 lna2A_gain_db_rev7[] = { 1, 8, 12, 16 }; | ||
15328 | 15326 | ||
15329 | crsminu_th = 0x45; | 15327 | crsminu_th = 0x45; |
15330 | clip1md_gaincode_B = 0x14; | 15328 | clip1md_gaincode_B = 0x14; |
@@ -15335,10 +15333,9 @@ static void wlc_phy_workarounds_nphy_gainctrl_2057_rev6(struct brcms_phy *pi) | |||
15335 | lna2_gain_db = lna2A_gain_db_rev7; | 15333 | lna2_gain_db = lna2A_gain_db_rev7; |
15336 | } else { | 15334 | } else { |
15337 | 15335 | ||
15338 | s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 }; | 15336 | static const s8 lna1A_gain_db_rev7[] = { 12, 18, 22, 26 }; |
15339 | s8 lna1A_gain_db_2_rev7[] = { | 15337 | static const s8 lna1A_gain_db_2_rev7[] = { 12, 18, 22, 26}; |
15340 | 12, 18, 22, 26}; | 15338 | static const s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 }; |
15341 | s8 lna2A_gain_db_rev7[] = { -1, 6, 10, 14 }; | ||
15342 | 15339 | ||
15343 | crsminu_th = 0x41; | 15340 | crsminu_th = 0x41; |
15344 | lna1_gain_db = lna1A_gain_db_rev7; | 15341 | lna1_gain_db = lna1A_gain_db_rev7; |
@@ -15450,65 +15447,65 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi) | |||
15450 | NPHY_RFSEQ_CMD_CLR_HIQ_DIS, | 15447 | NPHY_RFSEQ_CMD_CLR_HIQ_DIS, |
15451 | NPHY_RFSEQ_CMD_SET_HPF_BW | 15448 | NPHY_RFSEQ_CMD_SET_HPF_BW |
15452 | }; | 15449 | }; |
15453 | u8 rfseq_updategainu_dlys[] = { 10, 30, 1 }; | 15450 | static const u8 rfseq_updategainu_dlys[] = { 10, 30, 1 }; |
15454 | s8 lna1G_gain_db[] = { 7, 11, 16, 23 }; | 15451 | static const s8 lna1G_gain_db[] = { 7, 11, 16, 23 }; |
15455 | s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 }; | 15452 | static const s8 lna1G_gain_db_rev4[] = { 8, 12, 17, 25 }; |
15456 | s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 }; | 15453 | static const s8 lna1G_gain_db_rev5[] = { 9, 13, 18, 26 }; |
15457 | s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 }; | 15454 | static const s8 lna1G_gain_db_rev6[] = { 8, 13, 18, 25 }; |
15458 | s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 }; | 15455 | static const s8 lna1G_gain_db_rev6_224B0[] = { 10, 14, 19, 27 }; |
15459 | s8 lna1A_gain_db[] = { 7, 11, 17, 23 }; | 15456 | static const s8 lna1A_gain_db[] = { 7, 11, 17, 23 }; |
15460 | s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 }; | 15457 | static const s8 lna1A_gain_db_rev4[] = { 8, 12, 18, 23 }; |
15461 | s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 }; | 15458 | static const s8 lna1A_gain_db_rev5[] = { 6, 10, 16, 21 }; |
15462 | s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 }; | 15459 | static const s8 lna1A_gain_db_rev6[] = { 6, 10, 16, 21 }; |
15463 | s8 *lna1_gain_db = NULL; | 15460 | const s8 *lna1_gain_db = NULL; |
15464 | s8 lna2G_gain_db[] = { -5, 6, 10, 14 }; | 15461 | static const s8 lna2G_gain_db[] = { -5, 6, 10, 14 }; |
15465 | s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 }; | 15462 | static const s8 lna2G_gain_db_rev5[] = { -3, 7, 11, 16 }; |
15466 | s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 }; | 15463 | static const s8 lna2G_gain_db_rev6[] = { -5, 6, 10, 14 }; |
15467 | s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 }; | 15464 | static const s8 lna2G_gain_db_rev6_224B0[] = { -5, 6, 10, 15 }; |
15468 | s8 lna2A_gain_db[] = { -6, 2, 6, 10 }; | 15465 | static const s8 lna2A_gain_db[] = { -6, 2, 6, 10 }; |
15469 | s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 }; | 15466 | static const s8 lna2A_gain_db_rev4[] = { -5, 2, 6, 10 }; |
15470 | s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 }; | 15467 | static const s8 lna2A_gain_db_rev5[] = { -7, 0, 4, 8 }; |
15471 | s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 }; | 15468 | static const s8 lna2A_gain_db_rev6[] = { -7, 0, 4, 8 }; |
15472 | s8 *lna2_gain_db = NULL; | 15469 | const s8 *lna2_gain_db = NULL; |
15473 | s8 tiaG_gain_db[] = { | 15470 | static const s8 tiaG_gain_db[] = { |
15474 | 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A }; | 15471 | 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A, 0x0A }; |
15475 | s8 tiaA_gain_db[] = { | 15472 | static const s8 tiaA_gain_db[] = { |
15476 | 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 }; | 15473 | 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 }; |
15477 | s8 tiaA_gain_db_rev4[] = { | 15474 | static const s8 tiaA_gain_db_rev4[] = { |
15478 | 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d }; | 15475 | 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d }; |
15479 | s8 tiaA_gain_db_rev5[] = { | 15476 | static const s8 tiaA_gain_db_rev5[] = { |
15480 | 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d }; | 15477 | 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d }; |
15481 | s8 tiaA_gain_db_rev6[] = { | 15478 | static const s8 tiaA_gain_db_rev6[] = { |
15482 | 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d }; | 15479 | 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d, 0x0d }; |
15483 | s8 *tia_gain_db; | 15480 | const s8 *tia_gain_db; |
15484 | s8 tiaG_gainbits[] = { | 15481 | static const s8 tiaG_gainbits[] = { |
15485 | 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; | 15482 | 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 }; |
15486 | s8 tiaA_gainbits[] = { | 15483 | static const s8 tiaA_gainbits[] = { |
15487 | 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 }; | 15484 | 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06, 0x06 }; |
15488 | s8 tiaA_gainbits_rev4[] = { | 15485 | static const s8 tiaA_gainbits_rev4[] = { |
15489 | 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 }; | 15486 | 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 }; |
15490 | s8 tiaA_gainbits_rev5[] = { | 15487 | static const s8 tiaA_gainbits_rev5[] = { |
15491 | 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 }; | 15488 | 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 }; |
15492 | s8 tiaA_gainbits_rev6[] = { | 15489 | static const s8 tiaA_gainbits_rev6[] = { |
15493 | 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 }; | 15490 | 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04 }; |
15494 | s8 *tia_gainbits; | 15491 | const s8 *tia_gainbits; |
15495 | s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 }; | 15492 | static const s8 lpf_gain_db[] = { 0x00, 0x06, 0x0c, 0x12, 0x12, 0x12 }; |
15496 | s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 }; | 15493 | static const s8 lpf_gainbits[] = { 0x00, 0x01, 0x02, 0x03, 0x03, 0x03 }; |
15497 | u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f }; | 15494 | static const u16 rfseqG_init_gain[] = { 0x613f, 0x613f, 0x613f, 0x613f }; |
15498 | u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f }; | 15495 | static const u16 rfseqG_init_gain_rev4[] = { 0x513f, 0x513f, 0x513f, 0x513f }; |
15499 | u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f }; | 15496 | static const u16 rfseqG_init_gain_rev5[] = { 0x413f, 0x413f, 0x413f, 0x413f }; |
15500 | u16 rfseqG_init_gain_rev5_elna[] = { | 15497 | static const u16 rfseqG_init_gain_rev5_elna[] = { |
15501 | 0x013f, 0x013f, 0x013f, 0x013f }; | 15498 | 0x013f, 0x013f, 0x013f, 0x013f }; |
15502 | u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f }; | 15499 | static const u16 rfseqG_init_gain_rev6[] = { 0x513f, 0x513f }; |
15503 | u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f }; | 15500 | static const u16 rfseqG_init_gain_rev6_224B0[] = { 0x413f, 0x413f }; |
15504 | u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f }; | 15501 | static const u16 rfseqG_init_gain_rev6_elna[] = { 0x113f, 0x113f }; |
15505 | u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f }; | 15502 | static const u16 rfseqA_init_gain[] = { 0x516f, 0x516f, 0x516f, 0x516f }; |
15506 | u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f }; | 15503 | static const u16 rfseqA_init_gain_rev4[] = { 0x614f, 0x614f, 0x614f, 0x614f }; |
15507 | u16 rfseqA_init_gain_rev4_elna[] = { | 15504 | static const u16 rfseqA_init_gain_rev4_elna[] = { |
15508 | 0x314f, 0x314f, 0x314f, 0x314f }; | 15505 | 0x314f, 0x314f, 0x314f, 0x314f }; |
15509 | u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f }; | 15506 | static const u16 rfseqA_init_gain_rev5[] = { 0x714f, 0x714f, 0x714f, 0x714f }; |
15510 | u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f }; | 15507 | static const u16 rfseqA_init_gain_rev6[] = { 0x714f, 0x714f }; |
15511 | u16 *rfseq_init_gain; | 15508 | const u16 *rfseq_init_gain; |
15512 | u16 initG_gaincode = 0x627e; | 15509 | u16 initG_gaincode = 0x627e; |
15513 | u16 initG_gaincode_rev4 = 0x527e; | 15510 | u16 initG_gaincode_rev4 = 0x527e; |
15514 | u16 initG_gaincode_rev5 = 0x427e; | 15511 | u16 initG_gaincode_rev5 = 0x427e; |
@@ -15538,10 +15535,10 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi) | |||
15538 | u16 clip1mdA_gaincode_rev6 = 0x2084; | 15535 | u16 clip1mdA_gaincode_rev6 = 0x2084; |
15539 | u16 clip1md_gaincode = 0; | 15536 | u16 clip1md_gaincode = 0; |
15540 | u16 clip1loG_gaincode = 0x0074; | 15537 | u16 clip1loG_gaincode = 0x0074; |
15541 | u16 clip1loG_gaincode_rev5[] = { | 15538 | static const u16 clip1loG_gaincode_rev5[] = { |
15542 | 0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c | 15539 | 0x0062, 0x0064, 0x006a, 0x106a, 0x106c, 0x1074, 0x107c, 0x207c |
15543 | }; | 15540 | }; |
15544 | u16 clip1loG_gaincode_rev6[] = { | 15541 | static const u16 clip1loG_gaincode_rev6[] = { |
15545 | 0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e | 15542 | 0x106a, 0x106c, 0x1074, 0x107c, 0x007e, 0x107e, 0x207e, 0x307e |
15546 | }; | 15543 | }; |
15547 | u16 clip1loG_gaincode_rev6_224B0 = 0x1074; | 15544 | u16 clip1loG_gaincode_rev6_224B0 = 0x1074; |
@@ -16066,7 +16063,7 @@ static void wlc_phy_workarounds_nphy_gainctrl(struct brcms_phy *pi) | |||
16066 | 16063 | ||
16067 | static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) | 16064 | static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) |
16068 | { | 16065 | { |
16069 | u8 rfseq_rx2tx_events[] = { | 16066 | static const u8 rfseq_rx2tx_events[] = { |
16070 | NPHY_RFSEQ_CMD_NOP, | 16067 | NPHY_RFSEQ_CMD_NOP, |
16071 | NPHY_RFSEQ_CMD_RXG_FBW, | 16068 | NPHY_RFSEQ_CMD_RXG_FBW, |
16072 | NPHY_RFSEQ_CMD_TR_SWITCH, | 16069 | NPHY_RFSEQ_CMD_TR_SWITCH, |
@@ -16076,7 +16073,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) | |||
16076 | NPHY_RFSEQ_CMD_EXT_PA | 16073 | NPHY_RFSEQ_CMD_EXT_PA |
16077 | }; | 16074 | }; |
16078 | u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 }; | 16075 | u8 rfseq_rx2tx_dlys[] = { 8, 6, 6, 2, 4, 60, 1 }; |
16079 | u8 rfseq_tx2rx_events[] = { | 16076 | static const u8 rfseq_tx2rx_events[] = { |
16080 | NPHY_RFSEQ_CMD_NOP, | 16077 | NPHY_RFSEQ_CMD_NOP, |
16081 | NPHY_RFSEQ_CMD_EXT_PA, | 16078 | NPHY_RFSEQ_CMD_EXT_PA, |
16082 | NPHY_RFSEQ_CMD_TX_GAIN, | 16079 | NPHY_RFSEQ_CMD_TX_GAIN, |
@@ -16085,8 +16082,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) | |||
16085 | NPHY_RFSEQ_CMD_RXG_FBW, | 16082 | NPHY_RFSEQ_CMD_RXG_FBW, |
16086 | NPHY_RFSEQ_CMD_CLR_HIQ_DIS | 16083 | NPHY_RFSEQ_CMD_CLR_HIQ_DIS |
16087 | }; | 16084 | }; |
16088 | u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 }; | 16085 | static const u8 rfseq_tx2rx_dlys[] = { 8, 6, 2, 4, 4, 6, 1 }; |
16089 | u8 rfseq_tx2rx_events_rev3[] = { | 16086 | static const u8 rfseq_tx2rx_events_rev3[] = { |
16090 | NPHY_REV3_RFSEQ_CMD_EXT_PA, | 16087 | NPHY_REV3_RFSEQ_CMD_EXT_PA, |
16091 | NPHY_REV3_RFSEQ_CMD_INT_PA_PU, | 16088 | NPHY_REV3_RFSEQ_CMD_INT_PA_PU, |
16092 | NPHY_REV3_RFSEQ_CMD_TX_GAIN, | 16089 | NPHY_REV3_RFSEQ_CMD_TX_GAIN, |
@@ -16096,7 +16093,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) | |||
16096 | NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS, | 16093 | NPHY_REV3_RFSEQ_CMD_CLR_HIQ_DIS, |
16097 | NPHY_REV3_RFSEQ_CMD_END | 16094 | NPHY_REV3_RFSEQ_CMD_END |
16098 | }; | 16095 | }; |
16099 | u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 }; | 16096 | static const u8 rfseq_tx2rx_dlys_rev3[] = { 8, 4, 2, 2, 4, 4, 6, 1 }; |
16100 | u8 rfseq_rx2tx_events_rev3[] = { | 16097 | u8 rfseq_rx2tx_events_rev3[] = { |
16101 | NPHY_REV3_RFSEQ_CMD_NOP, | 16098 | NPHY_REV3_RFSEQ_CMD_NOP, |
16102 | NPHY_REV3_RFSEQ_CMD_RXG_FBW, | 16099 | NPHY_REV3_RFSEQ_CMD_RXG_FBW, |
@@ -16110,7 +16107,7 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) | |||
16110 | }; | 16107 | }; |
16111 | u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 }; | 16108 | u8 rfseq_rx2tx_dlys_rev3[] = { 8, 6, 6, 4, 4, 18, 42, 1, 1 }; |
16112 | 16109 | ||
16113 | u8 rfseq_rx2tx_events_rev3_ipa[] = { | 16110 | static const u8 rfseq_rx2tx_events_rev3_ipa[] = { |
16114 | NPHY_REV3_RFSEQ_CMD_NOP, | 16111 | NPHY_REV3_RFSEQ_CMD_NOP, |
16115 | NPHY_REV3_RFSEQ_CMD_RXG_FBW, | 16112 | NPHY_REV3_RFSEQ_CMD_RXG_FBW, |
16116 | NPHY_REV3_RFSEQ_CMD_TR_SWITCH, | 16113 | NPHY_REV3_RFSEQ_CMD_TR_SWITCH, |
@@ -16121,15 +16118,15 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) | |||
16121 | NPHY_REV3_RFSEQ_CMD_INT_PA_PU, | 16118 | NPHY_REV3_RFSEQ_CMD_INT_PA_PU, |
16122 | NPHY_REV3_RFSEQ_CMD_END | 16119 | NPHY_REV3_RFSEQ_CMD_END |
16123 | }; | 16120 | }; |
16124 | u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 }; | 16121 | static const u8 rfseq_rx2tx_dlys_rev3_ipa[] = { 8, 6, 6, 4, 4, 16, 43, 1, 1 }; |
16125 | u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f }; | 16122 | static const u16 rfseq_rx2tx_dacbufpu_rev7[] = { 0x10f, 0x10f }; |
16126 | 16123 | ||
16127 | s16 alpha0, alpha1, alpha2; | 16124 | s16 alpha0, alpha1, alpha2; |
16128 | s16 beta0, beta1, beta2; | 16125 | s16 beta0, beta1, beta2; |
16129 | u32 leg_data_weights, ht_data_weights, nss1_data_weights, | 16126 | u32 leg_data_weights, ht_data_weights, nss1_data_weights, |
16130 | stbc_data_weights; | 16127 | stbc_data_weights; |
16131 | u8 chan_freq_range = 0; | 16128 | u8 chan_freq_range = 0; |
16132 | u16 dac_control = 0x0002; | 16129 | static const u16 dac_control = 0x0002; |
16133 | u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 }; | 16130 | u16 aux_adc_vmid_rev7_core0[] = { 0x8e, 0x96, 0x96, 0x96 }; |
16134 | u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 }; | 16131 | u16 aux_adc_vmid_rev7_core1[] = { 0x8f, 0x9f, 0x9f, 0x96 }; |
16135 | u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 }; | 16132 | u16 aux_adc_vmid_rev4[] = { 0xa2, 0xb4, 0xb4, 0x89 }; |
@@ -16139,8 +16136,8 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) | |||
16139 | u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 }; | 16136 | u16 aux_adc_gain_rev4[] = { 0x02, 0x02, 0x02, 0x00 }; |
16140 | u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 }; | 16137 | u16 aux_adc_gain_rev3[] = { 0x02, 0x02, 0x02, 0x00 }; |
16141 | u16 *aux_adc_gain; | 16138 | u16 *aux_adc_gain; |
16142 | u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 }; | 16139 | static const u16 sk_adc_vmid[] = { 0xb4, 0xb4, 0xb4, 0x24 }; |
16143 | u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 }; | 16140 | static const u16 sk_adc_gain[] = { 0x02, 0x02, 0x02, 0x02 }; |
16144 | s32 min_nvar_val = 0x18d; | 16141 | s32 min_nvar_val = 0x18d; |
16145 | s32 min_nvar_offset_6mbps = 20; | 16142 | s32 min_nvar_offset_6mbps = 20; |
16146 | u8 pdetrange; | 16143 | u8 pdetrange; |
@@ -16151,9 +16148,9 @@ static void wlc_phy_workarounds_nphy(struct brcms_phy *pi) | |||
16151 | u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77; | 16148 | u16 rfseq_rx2tx_lpf_h_hpc_rev7 = 0x77; |
16152 | u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77; | 16149 | u16 rfseq_tx2rx_lpf_h_hpc_rev7 = 0x77; |
16153 | u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77; | 16150 | u16 rfseq_pktgn_lpf_h_hpc_rev7 = 0x77; |
16154 | u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 }; | 16151 | static const u16 rfseq_htpktgn_lpf_hpc_rev7[] = { 0x77, 0x11, 0x11 }; |
16155 | u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 }; | 16152 | static const u16 rfseq_pktgn_lpf_hpc_rev7[] = { 0x11, 0x11 }; |
16156 | u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 }; | 16153 | static const u16 rfseq_cckpktgn_lpf_hpc_rev7[] = { 0x11, 0x11 }; |
16157 | u16 ipalvlshift_3p3_war_en = 0; | 16154 | u16 ipalvlshift_3p3_war_en = 0; |
16158 | u16 rccal_bcap_val, rccal_scap_val; | 16155 | u16 rccal_bcap_val, rccal_scap_val; |
16159 | u16 rccal_tx20_11b_bcap = 0; | 16156 | u16 rccal_tx20_11b_bcap = 0; |
@@ -24291,13 +24288,13 @@ static void wlc_phy_update_txcal_ladder_nphy(struct brcms_phy *pi, u16 core) | |||
24291 | u16 bbmult; | 24288 | u16 bbmult; |
24292 | u16 tblentry; | 24289 | u16 tblentry; |
24293 | 24290 | ||
24294 | struct nphy_txiqcal_ladder ladder_lo[] = { | 24291 | static const struct nphy_txiqcal_ladder ladder_lo[] = { |
24295 | {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0}, | 24292 | {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0}, |
24296 | {25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5}, | 24293 | {25, 0}, {25, 1}, {25, 2}, {25, 3}, {25, 4}, {25, 5}, |
24297 | {25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7} | 24294 | {25, 6}, {25, 7}, {35, 7}, {50, 7}, {71, 7}, {100, 7} |
24298 | }; | 24295 | }; |
24299 | 24296 | ||
24300 | struct nphy_txiqcal_ladder ladder_iq[] = { | 24297 | static const struct nphy_txiqcal_ladder ladder_iq[] = { |
24301 | {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0}, | 24298 | {3, 0}, {4, 0}, {6, 0}, {9, 0}, {13, 0}, {18, 0}, |
24302 | {25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1}, | 24299 | {25, 0}, {35, 0}, {50, 0}, {71, 0}, {100, 0}, {100, 1}, |
24303 | {100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7} | 24300 | {100, 2}, {100, 3}, {100, 4}, {100, 5}, {100, 6}, {100, 7} |
@@ -25773,67 +25770,67 @@ wlc_phy_cal_txiqlo_nphy(struct brcms_phy *pi, struct nphy_txgains target_gain, | |||
25773 | u16 cal_gain[2]; | 25770 | u16 cal_gain[2]; |
25774 | struct nphy_iqcal_params cal_params[2]; | 25771 | struct nphy_iqcal_params cal_params[2]; |
25775 | u32 tbl_len; | 25772 | u32 tbl_len; |
25776 | void *tbl_ptr; | 25773 | const void *tbl_ptr; |
25777 | bool ladder_updated[2]; | 25774 | bool ladder_updated[2]; |
25778 | u8 mphase_cal_lastphase = 0; | 25775 | u8 mphase_cal_lastphase = 0; |
25779 | int bcmerror = 0; | 25776 | int bcmerror = 0; |
25780 | bool phyhang_avoid_state = false; | 25777 | bool phyhang_avoid_state = false; |
25781 | 25778 | ||
25782 | u16 tbl_tx_iqlo_cal_loft_ladder_20[] = { | 25779 | static const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = { |
25783 | 0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901, | 25780 | 0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901, |
25784 | 0x1902, | 25781 | 0x1902, |
25785 | 0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607, | 25782 | 0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607, |
25786 | 0x6407 | 25783 | 0x6407 |
25787 | }; | 25784 | }; |
25788 | 25785 | ||
25789 | u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = { | 25786 | static const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = { |
25790 | 0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400, | 25787 | 0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400, |
25791 | 0x3200, | 25788 | 0x3200, |
25792 | 0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406, | 25789 | 0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406, |
25793 | 0x6407 | 25790 | 0x6407 |
25794 | }; | 25791 | }; |
25795 | 25792 | ||
25796 | u16 tbl_tx_iqlo_cal_loft_ladder_40[] = { | 25793 | static const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = { |
25797 | 0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201, | 25794 | 0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201, |
25798 | 0x1202, | 25795 | 0x1202, |
25799 | 0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207, | 25796 | 0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207, |
25800 | 0x4707 | 25797 | 0x4707 |
25801 | }; | 25798 | }; |
25802 | 25799 | ||
25803 | u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = { | 25800 | static const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = { |
25804 | 0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900, | 25801 | 0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900, |
25805 | 0x2300, | 25802 | 0x2300, |
25806 | 0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706, | 25803 | 0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706, |
25807 | 0x4707 | 25804 | 0x4707 |
25808 | }; | 25805 | }; |
25809 | 25806 | ||
25810 | u16 tbl_tx_iqlo_cal_startcoefs[] = { | 25807 | static const u16 tbl_tx_iqlo_cal_startcoefs[] = { |
25811 | 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, | 25808 | 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, |
25812 | 0x0000 | 25809 | 0x0000 |
25813 | }; | 25810 | }; |
25814 | 25811 | ||
25815 | u16 tbl_tx_iqlo_cal_cmds_fullcal[] = { | 25812 | static const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = { |
25816 | 0x8123, 0x8264, 0x8086, 0x8245, 0x8056, | 25813 | 0x8123, 0x8264, 0x8086, 0x8245, 0x8056, |
25817 | 0x9123, 0x9264, 0x9086, 0x9245, 0x9056 | 25814 | 0x9123, 0x9264, 0x9086, 0x9245, 0x9056 |
25818 | }; | 25815 | }; |
25819 | 25816 | ||
25820 | u16 tbl_tx_iqlo_cal_cmds_recal[] = { | 25817 | static const u16 tbl_tx_iqlo_cal_cmds_recal[] = { |
25821 | 0x8101, 0x8253, 0x8053, 0x8234, 0x8034, | 25818 | 0x8101, 0x8253, 0x8053, 0x8234, 0x8034, |
25822 | 0x9101, 0x9253, 0x9053, 0x9234, 0x9034 | 25819 | 0x9101, 0x9253, 0x9053, 0x9234, 0x9034 |
25823 | }; | 25820 | }; |
25824 | 25821 | ||
25825 | u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = { | 25822 | static const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[] = { |
25826 | 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, | 25823 | 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, |
25827 | 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, | 25824 | 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, |
25828 | 0x0000 | 25825 | 0x0000 |
25829 | }; | 25826 | }; |
25830 | 25827 | ||
25831 | u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = { | 25828 | static const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = { |
25832 | 0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234, | 25829 | 0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234, |
25833 | 0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234 | 25830 | 0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234 |
25834 | }; | 25831 | }; |
25835 | 25832 | ||
25836 | u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = { | 25833 | static const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = { |
25837 | 0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223, | 25834 | 0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223, |
25838 | 0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223 | 25835 | 0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223 |
25839 | }; | 25836 | }; |
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c index 45e2efc70d19..ce741beec1fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c | |||
@@ -309,6 +309,7 @@ const struct iwl_cfg iwl3168_2ac_cfg = { | |||
309 | .nvm_calib_ver = IWL3168_TX_POWER_VERSION, | 309 | .nvm_calib_ver = IWL3168_TX_POWER_VERSION, |
310 | .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, | 310 | .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, |
311 | .dccm_len = IWL7265_DCCM_LEN, | 311 | .dccm_len = IWL7265_DCCM_LEN, |
312 | .nvm_type = IWL_NVM_SDP, | ||
312 | }; | 313 | }; |
313 | 314 | ||
314 | const struct iwl_cfg iwl7265_2ac_cfg = { | 315 | const struct iwl_cfg iwl7265_2ac_cfg = { |
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index 2e6c52664cee..c2a5936ccede 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c | |||
@@ -164,7 +164,7 @@ static const struct iwl_tt_params iwl8000_tt_params = { | |||
164 | .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \ | 164 | .default_nvm_file_C_step = DEFAULT_NVM_FILE_FAMILY_8000C, \ |
165 | .thermal_params = &iwl8000_tt_params, \ | 165 | .thermal_params = &iwl8000_tt_params, \ |
166 | .apmg_not_supported = true, \ | 166 | .apmg_not_supported = true, \ |
167 | .ext_nvm = true, \ | 167 | .nvm_type = IWL_NVM_EXT, \ |
168 | .dbgc_supported = true | 168 | .dbgc_supported = true |
169 | 169 | ||
170 | #define IWL_DEVICE_8000 \ | 170 | #define IWL_DEVICE_8000 \ |
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index 2babe0a1f18b..e8b5ff42f5a8 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c | |||
@@ -148,7 +148,7 @@ static const struct iwl_tt_params iwl9000_tt_params = { | |||
148 | .vht_mu_mimo_supported = true, \ | 148 | .vht_mu_mimo_supported = true, \ |
149 | .mac_addr_from_csr = true, \ | 149 | .mac_addr_from_csr = true, \ |
150 | .rf_id = true, \ | 150 | .rf_id = true, \ |
151 | .ext_nvm = true, \ | 151 | .nvm_type = IWL_NVM_EXT, \ |
152 | .dbgc_supported = true | 152 | .dbgc_supported = true |
153 | 153 | ||
154 | const struct iwl_cfg iwl9160_2ac_cfg = { | 154 | const struct iwl_cfg iwl9160_2ac_cfg = { |
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c index 76ba1f8bc72f..a440140ed8dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/a000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/a000.c | |||
@@ -133,7 +133,7 @@ static const struct iwl_ht_params iwl_a000_ht_params = { | |||
133 | .use_tfh = true, \ | 133 | .use_tfh = true, \ |
134 | .rf_id = true, \ | 134 | .rf_id = true, \ |
135 | .gen2 = true, \ | 135 | .gen2 = true, \ |
136 | .ext_nvm = true, \ | 136 | .nvm_type = IWL_NVM_EXT, \ |
137 | .dbgc_supported = true | 137 | .dbgc_supported = true |
138 | 138 | ||
139 | const struct iwl_cfg iwla000_2ac_cfg_hr = { | 139 | const struct iwl_cfg iwla000_2ac_cfg_hr = { |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 00bc7a25dece..3fd07bc80f54 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h | |||
@@ -108,6 +108,7 @@ enum iwl_nvm_access_target { | |||
108 | * @NVM_SECTION_TYPE_REGULATORY: regulatory section | 108 | * @NVM_SECTION_TYPE_REGULATORY: regulatory section |
109 | * @NVM_SECTION_TYPE_CALIBRATION: calibration section | 109 | * @NVM_SECTION_TYPE_CALIBRATION: calibration section |
110 | * @NVM_SECTION_TYPE_PRODUCTION: production section | 110 | * @NVM_SECTION_TYPE_PRODUCTION: production section |
111 | * @NVM_SECTION_TYPE_REGULATORY_SDP: regulatory section used by 3168 series | ||
111 | * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section | 112 | * @NVM_SECTION_TYPE_MAC_OVERRIDE: MAC override section |
112 | * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section | 113 | * @NVM_SECTION_TYPE_PHY_SKU: PHY SKU section |
113 | * @NVM_MAX_NUM_SECTIONS: number of sections | 114 | * @NVM_MAX_NUM_SECTIONS: number of sections |
@@ -117,6 +118,7 @@ enum iwl_nvm_section_type { | |||
117 | NVM_SECTION_TYPE_REGULATORY = 3, | 118 | NVM_SECTION_TYPE_REGULATORY = 3, |
118 | NVM_SECTION_TYPE_CALIBRATION = 4, | 119 | NVM_SECTION_TYPE_CALIBRATION = 4, |
119 | NVM_SECTION_TYPE_PRODUCTION = 5, | 120 | NVM_SECTION_TYPE_PRODUCTION = 5, |
121 | NVM_SECTION_TYPE_REGULATORY_SDP = 8, | ||
120 | NVM_SECTION_TYPE_MAC_OVERRIDE = 11, | 122 | NVM_SECTION_TYPE_MAC_OVERRIDE = 11, |
121 | NVM_SECTION_TYPE_PHY_SKU = 12, | 123 | NVM_SECTION_TYPE_PHY_SKU = 12, |
122 | NVM_MAX_NUM_SECTIONS = 13, | 124 | NVM_MAX_NUM_SECTIONS = 13, |
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 6afc7a799892..f5dd7d83cd0a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c | |||
@@ -1086,7 +1086,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work) | |||
1086 | 1086 | ||
1087 | if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { | 1087 | if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { |
1088 | /* stop recording */ | 1088 | /* stop recording */ |
1089 | iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100); | 1089 | iwl_fw_dbg_stop_recording(fwrt); |
1090 | 1090 | ||
1091 | iwl_fw_error_dump(fwrt); | 1091 | iwl_fw_error_dump(fwrt); |
1092 | 1092 | ||
@@ -1104,10 +1104,7 @@ void iwl_fw_error_dump_wk(struct work_struct *work) | |||
1104 | u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE); | 1104 | u32 in_sample = iwl_read_prph(fwrt->trans, DBGC_IN_SAMPLE); |
1105 | u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL); | 1105 | u32 out_ctrl = iwl_read_prph(fwrt->trans, DBGC_OUT_CTRL); |
1106 | 1106 | ||
1107 | /* stop recording */ | 1107 | iwl_fw_dbg_stop_recording(fwrt); |
1108 | iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0); | ||
1109 | udelay(100); | ||
1110 | iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0); | ||
1111 | /* wait before we collect the data till the DBGC stop */ | 1108 | /* wait before we collect the data till the DBGC stop */ |
1112 | udelay(500); | 1109 | udelay(500); |
1113 | 1110 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 0f810ea89d31..9c889a32fe24 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h | |||
@@ -68,6 +68,8 @@ | |||
68 | #include <linux/workqueue.h> | 68 | #include <linux/workqueue.h> |
69 | #include <net/cfg80211.h> | 69 | #include <net/cfg80211.h> |
70 | #include "runtime.h" | 70 | #include "runtime.h" |
71 | #include "iwl-prph.h" | ||
72 | #include "iwl-io.h" | ||
71 | #include "file.h" | 73 | #include "file.h" |
72 | #include "error-dump.h" | 74 | #include "error-dump.h" |
73 | 75 | ||
@@ -194,8 +196,21 @@ _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, | |||
194 | iwl_fw_dbg_get_trigger((fwrt)->fw,\ | 196 | iwl_fw_dbg_get_trigger((fwrt)->fw,\ |
195 | (trig))) | 197 | (trig))) |
196 | 198 | ||
199 | static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt) | ||
200 | { | ||
201 | if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) { | ||
202 | iwl_set_bits_prph(fwrt->trans, MON_BUFF_SAMPLE_CTL, 0x100); | ||
203 | } else { | ||
204 | iwl_write_prph(fwrt->trans, DBGC_IN_SAMPLE, 0); | ||
205 | udelay(100); | ||
206 | iwl_write_prph(fwrt->trans, DBGC_OUT_CTRL, 0); | ||
207 | } | ||
208 | } | ||
209 | |||
197 | static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) | 210 | static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) |
198 | { | 211 | { |
212 | iwl_fw_dbg_stop_recording(fwrt); | ||
213 | |||
199 | fwrt->dump.conf = FW_DBG_INVALID; | 214 | fwrt->dump.conf = FW_DBG_INVALID; |
200 | } | 215 | } |
201 | 216 | ||
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 3e057b539d5b..71cb1ecde0f7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h | |||
@@ -108,6 +108,18 @@ enum iwl_led_mode { | |||
108 | IWL_LED_DISABLE, | 108 | IWL_LED_DISABLE, |
109 | }; | 109 | }; |
110 | 110 | ||
111 | /** | ||
112 | * enum iwl_nvm_type - nvm formats | ||
113 | * @IWL_NVM: the regular format | ||
114 | * @IWL_NVM_EXT: extended NVM format | ||
115 | * @IWL_NVM_SDP: NVM format used by 3168 series | ||
116 | */ | ||
117 | enum iwl_nvm_type { | ||
118 | IWL_NVM, | ||
119 | IWL_NVM_EXT, | ||
120 | IWL_NVM_SDP, | ||
121 | }; | ||
122 | |||
111 | /* | 123 | /* |
112 | * This is the threshold value of plcp error rate per 100mSecs. It is | 124 | * This is the threshold value of plcp error rate per 100mSecs. It is |
113 | * used to set and check for the validity of plcp_delta. | 125 | * used to set and check for the validity of plcp_delta. |
@@ -320,7 +332,7 @@ struct iwl_pwr_tx_backoff { | |||
320 | * @integrated: discrete or integrated | 332 | * @integrated: discrete or integrated |
321 | * @gen2: a000 and on transport operation | 333 | * @gen2: a000 and on transport operation |
322 | * @cdb: CDB support | 334 | * @cdb: CDB support |
323 | * @ext_nvm: extended NVM format | 335 | * @nvm_type: see &enum iwl_nvm_type |
324 | * | 336 | * |
325 | * We enable the driver to be backward compatible wrt. hardware features. | 337 | * We enable the driver to be backward compatible wrt. hardware features. |
326 | * API differences in uCode shouldn't be handled here but through TLVs | 338 | * API differences in uCode shouldn't be handled here but through TLVs |
@@ -342,6 +354,7 @@ struct iwl_cfg { | |||
342 | const struct iwl_tt_params *thermal_params; | 354 | const struct iwl_tt_params *thermal_params; |
343 | enum iwl_device_family device_family; | 355 | enum iwl_device_family device_family; |
344 | enum iwl_led_mode led_mode; | 356 | enum iwl_led_mode led_mode; |
357 | enum iwl_nvm_type nvm_type; | ||
345 | u32 max_data_size; | 358 | u32 max_data_size; |
346 | u32 max_inst_size; | 359 | u32 max_inst_size; |
347 | netdev_features_t features; | 360 | netdev_features_t features; |
@@ -369,7 +382,6 @@ struct iwl_cfg { | |||
369 | use_tfh:1, | 382 | use_tfh:1, |
370 | gen2:1, | 383 | gen2:1, |
371 | cdb:1, | 384 | cdb:1, |
372 | ext_nvm:1, | ||
373 | dbgc_supported:1; | 385 | dbgc_supported:1; |
374 | u8 valid_tx_ant; | 386 | u8 valid_tx_ant; |
375 | u8 valid_rx_ant; | 387 | u8 valid_rx_ant; |
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 3014beef4873..c3a5d8ccc95e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c | |||
@@ -77,7 +77,7 @@ | |||
77 | #include "iwl-csr.h" | 77 | #include "iwl-csr.h" |
78 | 78 | ||
79 | /* NVM offsets (in words) definitions */ | 79 | /* NVM offsets (in words) definitions */ |
80 | enum wkp_nvm_offsets { | 80 | enum nvm_offsets { |
81 | /* NVM HW-Section offset (in words) definitions */ | 81 | /* NVM HW-Section offset (in words) definitions */ |
82 | SUBSYSTEM_ID = 0x0A, | 82 | SUBSYSTEM_ID = 0x0A, |
83 | HW_ADDR = 0x15, | 83 | HW_ADDR = 0x15, |
@@ -92,7 +92,10 @@ enum wkp_nvm_offsets { | |||
92 | 92 | ||
93 | /* NVM calibration section offset (in words) definitions */ | 93 | /* NVM calibration section offset (in words) definitions */ |
94 | NVM_CALIB_SECTION = 0x2B8, | 94 | NVM_CALIB_SECTION = 0x2B8, |
95 | XTAL_CALIB = 0x316 - NVM_CALIB_SECTION | 95 | XTAL_CALIB = 0x316 - NVM_CALIB_SECTION, |
96 | |||
97 | /* NVM REGULATORY -Section offset (in words) definitions */ | ||
98 | NVM_CHANNELS_SDP = 0, | ||
96 | }; | 99 | }; |
97 | 100 | ||
98 | enum ext_nvm_offsets { | 101 | enum ext_nvm_offsets { |
@@ -206,8 +209,36 @@ enum iwl_nvm_channel_flags { | |||
206 | NVM_CHANNEL_DC_HIGH = BIT(12), | 209 | NVM_CHANNEL_DC_HIGH = BIT(12), |
207 | }; | 210 | }; |
208 | 211 | ||
212 | static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, | ||
213 | int chan, u16 flags) | ||
214 | { | ||
209 | #define CHECK_AND_PRINT_I(x) \ | 215 | #define CHECK_AND_PRINT_I(x) \ |
210 | ((ch_flags & NVM_CHANNEL_##x) ? # x " " : "") | 216 | ((flags & NVM_CHANNEL_##x) ? " " #x : "") |
217 | |||
218 | if (!(flags & NVM_CHANNEL_VALID)) { | ||
219 | IWL_DEBUG_DEV(dev, level, "Ch. %d: 0x%x: No traffic\n", | ||
220 | chan, flags); | ||
221 | return; | ||
222 | } | ||
223 | |||
224 | /* Note: already can print up to 101 characters, 110 is the limit! */ | ||
225 | IWL_DEBUG_DEV(dev, level, | ||
226 | "Ch. %d: 0x%x:%s%s%s%s%s%s%s%s%s%s%s%s\n", | ||
227 | chan, flags, | ||
228 | CHECK_AND_PRINT_I(VALID), | ||
229 | CHECK_AND_PRINT_I(IBSS), | ||
230 | CHECK_AND_PRINT_I(ACTIVE), | ||
231 | CHECK_AND_PRINT_I(RADAR), | ||
232 | CHECK_AND_PRINT_I(INDOOR_ONLY), | ||
233 | CHECK_AND_PRINT_I(GO_CONCURRENT), | ||
234 | CHECK_AND_PRINT_I(UNIFORM), | ||
235 | CHECK_AND_PRINT_I(20MHZ), | ||
236 | CHECK_AND_PRINT_I(40MHZ), | ||
237 | CHECK_AND_PRINT_I(80MHZ), | ||
238 | CHECK_AND_PRINT_I(160MHZ), | ||
239 | CHECK_AND_PRINT_I(DC_HIGH)); | ||
240 | #undef CHECK_AND_PRINT_I | ||
241 | } | ||
211 | 242 | ||
212 | static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, | 243 | static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, |
213 | u16 nvm_flags, const struct iwl_cfg *cfg) | 244 | u16 nvm_flags, const struct iwl_cfg *cfg) |
@@ -215,7 +246,7 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, bool is_5ghz, | |||
215 | u32 flags = IEEE80211_CHAN_NO_HT40; | 246 | u32 flags = IEEE80211_CHAN_NO_HT40; |
216 | u32 last_5ghz_ht = LAST_5GHZ_HT; | 247 | u32 last_5ghz_ht = LAST_5GHZ_HT; |
217 | 248 | ||
218 | if (cfg->ext_nvm) | 249 | if (cfg->nvm_type == IWL_NVM_EXT) |
219 | last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; | 250 | last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; |
220 | 251 | ||
221 | if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) { | 252 | if (!is_5ghz && (nvm_flags & NVM_CHANNEL_40MHZ)) { |
@@ -268,7 +299,7 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, | |||
268 | int num_of_ch, num_2ghz_channels; | 299 | int num_of_ch, num_2ghz_channels; |
269 | const u8 *nvm_chan; | 300 | const u8 *nvm_chan; |
270 | 301 | ||
271 | if (!cfg->ext_nvm) { | 302 | if (cfg->nvm_type != IWL_NVM_EXT) { |
272 | num_of_ch = IWL_NUM_CHANNELS; | 303 | num_of_ch = IWL_NUM_CHANNELS; |
273 | nvm_chan = &iwl_nvm_channels[0]; | 304 | nvm_chan = &iwl_nvm_channels[0]; |
274 | num_2ghz_channels = NUM_2GHZ_CHANNELS; | 305 | num_2ghz_channels = NUM_2GHZ_CHANNELS; |
@@ -302,12 +333,8 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, | |||
302 | * supported, hence we still want to add them to | 333 | * supported, hence we still want to add them to |
303 | * the list of supported channels to cfg80211. | 334 | * the list of supported channels to cfg80211. |
304 | */ | 335 | */ |
305 | IWL_DEBUG_EEPROM(dev, | 336 | iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, |
306 | "Ch. %d Flags %x [%sGHz] - No traffic\n", | 337 | nvm_chan[ch_idx], ch_flags); |
307 | nvm_chan[ch_idx], | ||
308 | ch_flags, | ||
309 | (ch_idx >= num_2ghz_channels) ? | ||
310 | "5.2" : "2.4"); | ||
311 | continue; | 338 | continue; |
312 | } | 339 | } |
313 | 340 | ||
@@ -337,27 +364,10 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, | |||
337 | else | 364 | else |
338 | channel->flags = 0; | 365 | channel->flags = 0; |
339 | 366 | ||
340 | IWL_DEBUG_EEPROM(dev, | 367 | iwl_nvm_print_channel_flags(dev, IWL_DL_EEPROM, |
341 | "Ch. %d [%sGHz] flags 0x%x %s%s%s%s%s%s%s%s%s%s%s%s(%ddBm): Ad-Hoc %ssupported\n", | 368 | channel->hw_value, ch_flags); |
342 | channel->hw_value, | 369 | IWL_DEBUG_EEPROM(dev, "Ch. %d: %ddBm\n", |
343 | is_5ghz ? "5.2" : "2.4", | 370 | channel->hw_value, channel->max_power); |
344 | ch_flags, | ||
345 | CHECK_AND_PRINT_I(VALID), | ||
346 | CHECK_AND_PRINT_I(IBSS), | ||
347 | CHECK_AND_PRINT_I(ACTIVE), | ||
348 | CHECK_AND_PRINT_I(RADAR), | ||
349 | CHECK_AND_PRINT_I(INDOOR_ONLY), | ||
350 | CHECK_AND_PRINT_I(GO_CONCURRENT), | ||
351 | CHECK_AND_PRINT_I(UNIFORM), | ||
352 | CHECK_AND_PRINT_I(20MHZ), | ||
353 | CHECK_AND_PRINT_I(40MHZ), | ||
354 | CHECK_AND_PRINT_I(80MHZ), | ||
355 | CHECK_AND_PRINT_I(160MHZ), | ||
356 | CHECK_AND_PRINT_I(DC_HIGH), | ||
357 | channel->max_power, | ||
358 | ((ch_flags & NVM_CHANNEL_IBSS) && | ||
359 | !(ch_flags & NVM_CHANNEL_RADAR)) | ||
360 | ? "" : "not "); | ||
361 | } | 371 | } |
362 | 372 | ||
363 | return n_channels; | 373 | return n_channels; |
@@ -484,7 +494,7 @@ IWL_EXPORT_SYMBOL(iwl_init_sbands); | |||
484 | static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, | 494 | static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, |
485 | const __le16 *phy_sku) | 495 | const __le16 *phy_sku) |
486 | { | 496 | { |
487 | if (!cfg->ext_nvm) | 497 | if (cfg->nvm_type != IWL_NVM_EXT) |
488 | return le16_to_cpup(nvm_sw + SKU); | 498 | return le16_to_cpup(nvm_sw + SKU); |
489 | 499 | ||
490 | return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000)); | 500 | return le32_to_cpup((__le32 *)(phy_sku + SKU_FAMILY_8000)); |
@@ -492,7 +502,7 @@ static int iwl_get_sku(const struct iwl_cfg *cfg, const __le16 *nvm_sw, | |||
492 | 502 | ||
493 | static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) | 503 | static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) |
494 | { | 504 | { |
495 | if (!cfg->ext_nvm) | 505 | if (cfg->nvm_type != IWL_NVM_EXT) |
496 | return le16_to_cpup(nvm_sw + NVM_VERSION); | 506 | return le16_to_cpup(nvm_sw + NVM_VERSION); |
497 | else | 507 | else |
498 | return le32_to_cpup((__le32 *)(nvm_sw + | 508 | return le32_to_cpup((__le32 *)(nvm_sw + |
@@ -502,7 +512,7 @@ static int iwl_get_nvm_version(const struct iwl_cfg *cfg, const __le16 *nvm_sw) | |||
502 | static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, | 512 | static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw, |
503 | const __le16 *phy_sku) | 513 | const __le16 *phy_sku) |
504 | { | 514 | { |
505 | if (!cfg->ext_nvm) | 515 | if (cfg->nvm_type != IWL_NVM_EXT) |
506 | return le16_to_cpup(nvm_sw + RADIO_CFG); | 516 | return le16_to_cpup(nvm_sw + RADIO_CFG); |
507 | 517 | ||
508 | return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM)); | 518 | return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_EXT_NVM)); |
@@ -513,7 +523,7 @@ static int iwl_get_n_hw_addrs(const struct iwl_cfg *cfg, const __le16 *nvm_sw) | |||
513 | { | 523 | { |
514 | int n_hw_addr; | 524 | int n_hw_addr; |
515 | 525 | ||
516 | if (!cfg->ext_nvm) | 526 | if (cfg->nvm_type != IWL_NVM_EXT) |
517 | return le16_to_cpup(nvm_sw + N_HW_ADDRS); | 527 | return le16_to_cpup(nvm_sw + N_HW_ADDRS); |
518 | 528 | ||
519 | n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); | 529 | n_hw_addr = le32_to_cpup((__le32 *)(nvm_sw + N_HW_ADDRS_FAMILY_8000)); |
@@ -525,7 +535,7 @@ static void iwl_set_radio_cfg(const struct iwl_cfg *cfg, | |||
525 | struct iwl_nvm_data *data, | 535 | struct iwl_nvm_data *data, |
526 | u32 radio_cfg) | 536 | u32 radio_cfg) |
527 | { | 537 | { |
528 | if (!cfg->ext_nvm) { | 538 | if (cfg->nvm_type != IWL_NVM_EXT) { |
529 | data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); | 539 | data->radio_cfg_type = NVM_RF_CFG_TYPE_MSK(radio_cfg); |
530 | data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg); | 540 | data->radio_cfg_step = NVM_RF_CFG_STEP_MSK(radio_cfg); |
531 | data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg); | 541 | data->radio_cfg_dash = NVM_RF_CFG_DASH_MSK(radio_cfg); |
@@ -634,7 +644,7 @@ static int iwl_set_hw_address(struct iwl_trans *trans, | |||
634 | { | 644 | { |
635 | if (cfg->mac_addr_from_csr) { | 645 | if (cfg->mac_addr_from_csr) { |
636 | iwl_set_hw_address_from_csr(trans, data); | 646 | iwl_set_hw_address_from_csr(trans, data); |
637 | } else if (!cfg->ext_nvm) { | 647 | } else if (cfg->nvm_type != IWL_NVM_EXT) { |
638 | const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); | 648 | const u8 *hw_addr = (const u8 *)(nvm_hw + HW_ADDR); |
639 | 649 | ||
640 | /* The byte order is little endian 16 bit, meaning 214365 */ | 650 | /* The byte order is little endian 16 bit, meaning 214365 */ |
@@ -706,7 +716,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, | |||
706 | u16 lar_config; | 716 | u16 lar_config; |
707 | const __le16 *ch_section; | 717 | const __le16 *ch_section; |
708 | 718 | ||
709 | if (!cfg->ext_nvm) | 719 | if (cfg->nvm_type != IWL_NVM_EXT) |
710 | data = kzalloc(sizeof(*data) + | 720 | data = kzalloc(sizeof(*data) + |
711 | sizeof(struct ieee80211_channel) * | 721 | sizeof(struct ieee80211_channel) * |
712 | IWL_NUM_CHANNELS, | 722 | IWL_NUM_CHANNELS, |
@@ -740,7 +750,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, | |||
740 | 750 | ||
741 | data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); | 751 | data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw); |
742 | 752 | ||
743 | if (!cfg->ext_nvm) { | 753 | if (cfg->nvm_type != IWL_NVM_EXT) { |
744 | /* Checking for required sections */ | 754 | /* Checking for required sections */ |
745 | if (!nvm_calib) { | 755 | if (!nvm_calib) { |
746 | IWL_ERR(trans, | 756 | IWL_ERR(trans, |
@@ -748,11 +758,15 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, | |||
748 | kfree(data); | 758 | kfree(data); |
749 | return NULL; | 759 | return NULL; |
750 | } | 760 | } |
761 | |||
762 | ch_section = cfg->nvm_type == IWL_NVM_SDP ? | ||
763 | ®ulatory[NVM_CHANNELS_SDP] : | ||
764 | &nvm_sw[NVM_CHANNELS]; | ||
765 | |||
751 | /* in family 8000 Xtal calibration values moved to OTP */ | 766 | /* in family 8000 Xtal calibration values moved to OTP */ |
752 | data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); | 767 | data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); |
753 | data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); | 768 | data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); |
754 | lar_enabled = true; | 769 | lar_enabled = true; |
755 | ch_section = &nvm_sw[NVM_CHANNELS]; | ||
756 | } else { | 770 | } else { |
757 | u16 lar_offset = data->nvm_version < 0xE39 ? | 771 | u16 lar_offset = data->nvm_version < 0xE39 ? |
758 | NVM_LAR_OFFSET_OLD : | 772 | NVM_LAR_OFFSET_OLD : |
@@ -786,7 +800,7 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u8 *nvm_chan, | |||
786 | u32 flags = NL80211_RRF_NO_HT40; | 800 | u32 flags = NL80211_RRF_NO_HT40; |
787 | u32 last_5ghz_ht = LAST_5GHZ_HT; | 801 | u32 last_5ghz_ht = LAST_5GHZ_HT; |
788 | 802 | ||
789 | if (cfg->ext_nvm) | 803 | if (cfg->nvm_type == IWL_NVM_EXT) |
790 | last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; | 804 | last_5ghz_ht = LAST_5GHZ_HT_FAMILY_8000; |
791 | 805 | ||
792 | if (ch_idx < NUM_2GHZ_CHANNELS && | 806 | if (ch_idx < NUM_2GHZ_CHANNELS && |
@@ -834,7 +848,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
834 | int ch_idx; | 848 | int ch_idx; |
835 | u16 ch_flags; | 849 | u16 ch_flags; |
836 | u32 reg_rule_flags, prev_reg_rule_flags = 0; | 850 | u32 reg_rule_flags, prev_reg_rule_flags = 0; |
837 | const u8 *nvm_chan = cfg->ext_nvm ? | 851 | const u8 *nvm_chan = cfg->nvm_type == IWL_NVM_EXT ? |
838 | iwl_ext_nvm_channels : iwl_nvm_channels; | 852 | iwl_ext_nvm_channels : iwl_nvm_channels; |
839 | struct ieee80211_regdomain *regd; | 853 | struct ieee80211_regdomain *regd; |
840 | int size_of_regd; | 854 | int size_of_regd; |
@@ -843,7 +857,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
843 | int center_freq, prev_center_freq = 0; | 857 | int center_freq, prev_center_freq = 0; |
844 | int valid_rules = 0; | 858 | int valid_rules = 0; |
845 | bool new_rule; | 859 | bool new_rule; |
846 | int max_num_ch = cfg->ext_nvm ? | 860 | int max_num_ch = cfg->nvm_type == IWL_NVM_EXT ? |
847 | IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS; | 861 | IWL_NUM_CHANNELS_EXT : IWL_NUM_CHANNELS; |
848 | 862 | ||
849 | if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) | 863 | if (WARN_ON_ONCE(num_of_ch > NL80211_MAX_SUPP_REG_RULES)) |
@@ -873,12 +887,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
873 | new_rule = false; | 887 | new_rule = false; |
874 | 888 | ||
875 | if (!(ch_flags & NVM_CHANNEL_VALID)) { | 889 | if (!(ch_flags & NVM_CHANNEL_VALID)) { |
876 | IWL_DEBUG_DEV(dev, IWL_DL_LAR, | 890 | iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, |
877 | "Ch. %d Flags %x [%sGHz] - No traffic\n", | 891 | nvm_chan[ch_idx], ch_flags); |
878 | nvm_chan[ch_idx], | ||
879 | ch_flags, | ||
880 | (ch_idx >= NUM_2GHZ_CHANNELS) ? | ||
881 | "5.2" : "2.4"); | ||
882 | continue; | 892 | continue; |
883 | } | 893 | } |
884 | 894 | ||
@@ -914,31 +924,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, | |||
914 | prev_center_freq = center_freq; | 924 | prev_center_freq = center_freq; |
915 | prev_reg_rule_flags = reg_rule_flags; | 925 | prev_reg_rule_flags = reg_rule_flags; |
916 | 926 | ||
917 | IWL_DEBUG_DEV(dev, IWL_DL_LAR, | 927 | iwl_nvm_print_channel_flags(dev, IWL_DL_LAR, |
918 | "Ch. %d [%sGHz] %s%s%s%s%s%s%s%s%s%s%s%s(0x%02x)\n", | 928 | nvm_chan[ch_idx], ch_flags); |
919 | center_freq, | ||
920 | band == NL80211_BAND_5GHZ ? "5.2" : "2.4", | ||
921 | CHECK_AND_PRINT_I(VALID), | ||
922 | CHECK_AND_PRINT_I(IBSS), | ||
923 | CHECK_AND_PRINT_I(ACTIVE), | ||
924 | CHECK_AND_PRINT_I(RADAR), | ||
925 | CHECK_AND_PRINT_I(INDOOR_ONLY), | ||
926 | CHECK_AND_PRINT_I(GO_CONCURRENT), | ||
927 | CHECK_AND_PRINT_I(UNIFORM), | ||
928 | CHECK_AND_PRINT_I(20MHZ), | ||
929 | CHECK_AND_PRINT_I(40MHZ), | ||
930 | CHECK_AND_PRINT_I(80MHZ), | ||
931 | CHECK_AND_PRINT_I(160MHZ), | ||
932 | CHECK_AND_PRINT_I(DC_HIGH), | ||
933 | ch_flags); | ||
934 | IWL_DEBUG_DEV(dev, IWL_DL_LAR, | ||
935 | "Ch. %d [%sGHz] reg_flags 0x%x: %s\n", | ||
936 | center_freq, | ||
937 | band == NL80211_BAND_5GHZ ? "5.2" : "2.4", | ||
938 | reg_rule_flags, | ||
939 | ((ch_flags & NVM_CHANNEL_ACTIVE) && | ||
940 | !(ch_flags & NVM_CHANNEL_RADAR)) | ||
941 | ? "Ad-Hoc" : ""); | ||
942 | } | 929 | } |
943 | 930 | ||
944 | regd->n_reg_rules = valid_rules; | 931 | regd->n_reg_rules = valid_rules; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 3bcaa82f59b2..a9ac872226fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c | |||
@@ -1077,6 +1077,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) | |||
1077 | mvm->vif_count = 0; | 1077 | mvm->vif_count = 0; |
1078 | mvm->rx_ba_sessions = 0; | 1078 | mvm->rx_ba_sessions = 0; |
1079 | mvm->fwrt.dump.conf = FW_DBG_INVALID; | 1079 | mvm->fwrt.dump.conf = FW_DBG_INVALID; |
1080 | mvm->monitor_on = false; | ||
1080 | 1081 | ||
1081 | /* keep statistics ticking */ | 1082 | /* keep statistics ticking */ |
1082 | iwl_mvm_accu_radio_stats(mvm); | 1083 | iwl_mvm_accu_radio_stats(mvm); |
@@ -1437,6 +1438,9 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, | |||
1437 | mvm->p2p_device_vif = vif; | 1438 | mvm->p2p_device_vif = vif; |
1438 | } | 1439 | } |
1439 | 1440 | ||
1441 | if (vif->type == NL80211_IFTYPE_MONITOR) | ||
1442 | mvm->monitor_on = true; | ||
1443 | |||
1440 | iwl_mvm_vif_dbgfs_register(mvm, vif); | 1444 | iwl_mvm_vif_dbgfs_register(mvm, vif); |
1441 | goto out_unlock; | 1445 | goto out_unlock; |
1442 | 1446 | ||
@@ -1526,6 +1530,9 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, | |||
1526 | iwl_mvm_power_update_mac(mvm); | 1530 | iwl_mvm_power_update_mac(mvm); |
1527 | iwl_mvm_mac_ctxt_remove(mvm, vif); | 1531 | iwl_mvm_mac_ctxt_remove(mvm, vif); |
1528 | 1532 | ||
1533 | if (vif->type == NL80211_IFTYPE_MONITOR) | ||
1534 | mvm->monitor_on = false; | ||
1535 | |||
1529 | out_release: | 1536 | out_release: |
1530 | mutex_unlock(&mvm->mutex); | 1537 | mutex_unlock(&mvm->mutex); |
1531 | } | 1538 | } |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 83303bac0e4b..949e63418299 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h | |||
@@ -1015,6 +1015,9 @@ struct iwl_mvm { | |||
1015 | bool drop_bcn_ap_mode; | 1015 | bool drop_bcn_ap_mode; |
1016 | 1016 | ||
1017 | struct delayed_work cs_tx_unblock_dwork; | 1017 | struct delayed_work cs_tx_unblock_dwork; |
1018 | |||
1019 | /* does a monitor vif exist (only one can exist hence bool) */ | ||
1020 | bool monitor_on; | ||
1018 | #ifdef CONFIG_ACPI | 1021 | #ifdef CONFIG_ACPI |
1019 | struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM]; | 1022 | struct iwl_mvm_sar_profile sar_profiles[IWL_MVM_SAR_PROFILE_NUM]; |
1020 | struct iwl_mvm_geo_profile geo_profiles[IWL_NUM_GEO_PROFILES]; | 1023 | struct iwl_mvm_geo_profile geo_profiles[IWL_NUM_GEO_PROFILES]; |
@@ -1159,7 +1162,7 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) | |||
1159 | * Enable LAR only if it is supported by the FW (TLV) && | 1162 | * Enable LAR only if it is supported by the FW (TLV) && |
1160 | * enabled in the NVM | 1163 | * enabled in the NVM |
1161 | */ | 1164 | */ |
1162 | if (mvm->cfg->ext_nvm) | 1165 | if (mvm->cfg->nvm_type == IWL_NVM_EXT) |
1163 | return nvm_lar && tlv_lar; | 1166 | return nvm_lar && tlv_lar; |
1164 | else | 1167 | else |
1165 | return tlv_lar; | 1168 | return tlv_lar; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 422aa6be9932..fb25b6f29323 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c | |||
@@ -295,18 +295,24 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) | |||
295 | const __be16 *hw; | 295 | const __be16 *hw; |
296 | const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; | 296 | const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; |
297 | bool lar_enabled; | 297 | bool lar_enabled; |
298 | int regulatory_type; | ||
298 | 299 | ||
299 | /* Checking for required sections */ | 300 | /* Checking for required sections */ |
300 | if (!mvm->trans->cfg->ext_nvm) { | 301 | if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) { |
301 | if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || | 302 | if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || |
302 | !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { | 303 | !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { |
303 | IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); | 304 | IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); |
304 | return NULL; | 305 | return NULL; |
305 | } | 306 | } |
306 | } else { | 307 | } else { |
308 | if (mvm->trans->cfg->nvm_type == IWL_NVM_SDP) | ||
309 | regulatory_type = NVM_SECTION_TYPE_REGULATORY_SDP; | ||
310 | else | ||
311 | regulatory_type = NVM_SECTION_TYPE_REGULATORY; | ||
312 | |||
307 | /* SW and REGULATORY sections are mandatory */ | 313 | /* SW and REGULATORY sections are mandatory */ |
308 | if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || | 314 | if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || |
309 | !mvm->nvm_sections[NVM_SECTION_TYPE_REGULATORY].data) { | 315 | !mvm->nvm_sections[regulatory_type].data) { |
310 | IWL_ERR(mvm, | 316 | IWL_ERR(mvm, |
311 | "Can't parse empty family 8000 OTP/NVM sections\n"); | 317 | "Can't parse empty family 8000 OTP/NVM sections\n"); |
312 | return NULL; | 318 | return NULL; |
@@ -330,11 +336,14 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) | |||
330 | hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data; | 336 | hw = (const __be16 *)sections[mvm->cfg->nvm_hw_section_num].data; |
331 | sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; | 337 | sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data; |
332 | calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; | 338 | calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data; |
333 | regulatory = (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; | ||
334 | mac_override = | 339 | mac_override = |
335 | (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data; | 340 | (const __le16 *)sections[NVM_SECTION_TYPE_MAC_OVERRIDE].data; |
336 | phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data; | 341 | phy_sku = (const __le16 *)sections[NVM_SECTION_TYPE_PHY_SKU].data; |
337 | 342 | ||
343 | regulatory = mvm->trans->cfg->nvm_type == IWL_NVM_SDP ? | ||
344 | (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data : | ||
345 | (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; | ||
346 | |||
338 | lar_enabled = !iwlwifi_mod_params.lar_disable && | 347 | lar_enabled = !iwlwifi_mod_params.lar_disable && |
339 | fw_has_capa(&mvm->fw->ucode_capa, | 348 | fw_has_capa(&mvm->fw->ucode_capa, |
340 | IWL_UCODE_TLV_CAPA_LAR_SUPPORT); | 349 | IWL_UCODE_TLV_CAPA_LAR_SUPPORT); |
@@ -394,7 +403,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) | |||
394 | IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n"); | 403 | IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from external NVM\n"); |
395 | 404 | ||
396 | /* Maximal size depends on NVM version */ | 405 | /* Maximal size depends on NVM version */ |
397 | if (!mvm->trans->cfg->ext_nvm) | 406 | if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) |
398 | max_section_size = IWL_MAX_NVM_SECTION_SIZE; | 407 | max_section_size = IWL_MAX_NVM_SECTION_SIZE; |
399 | else | 408 | else |
400 | max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; | 409 | max_section_size = IWL_MAX_EXT_NVM_SECTION_SIZE; |
@@ -465,7 +474,7 @@ int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm) | |||
465 | break; | 474 | break; |
466 | } | 475 | } |
467 | 476 | ||
468 | if (!mvm->trans->cfg->ext_nvm) { | 477 | if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) { |
469 | section_size = | 478 | section_size = |
470 | 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); | 479 | 2 * NVM_WORD1_LEN(le16_to_cpu(file_sec->word1)); |
471 | section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); | 480 | section_id = NVM_WORD2_ID(le16_to_cpu(file_sec->word2)); |
@@ -740,7 +749,7 @@ int iwl_mvm_init_mcc(struct iwl_mvm *mvm) | |||
740 | struct ieee80211_regdomain *regd; | 749 | struct ieee80211_regdomain *regd; |
741 | char mcc[3]; | 750 | char mcc[3]; |
742 | 751 | ||
743 | if (mvm->cfg->ext_nvm) { | 752 | if (mvm->cfg->nvm_type == IWL_NVM_EXT) { |
744 | tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, | 753 | tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, |
745 | IWL_UCODE_TLV_CAPA_LAR_SUPPORT); | 754 | IWL_UCODE_TLV_CAPA_LAR_SUPPORT); |
746 | nvm_lar = mvm->nvm_data->lar_enabled; | 755 | nvm_lar = mvm->nvm_data->lar_enabled; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 184c749766f2..2d14a58cbdd7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c | |||
@@ -244,7 +244,9 @@ static u32 iwl_mvm_set_mac80211_rx_flag(struct iwl_mvm *mvm, | |||
244 | return 0; | 244 | return 0; |
245 | 245 | ||
246 | default: | 246 | default: |
247 | IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); | 247 | /* Expected in monitor (not having the keys) */ |
248 | if (!mvm->monitor_on) | ||
249 | IWL_ERR(mvm, "Unhandled alg: 0x%x\n", rx_pkt_status); | ||
248 | } | 250 | } |
249 | 251 | ||
250 | return 0; | 252 | return 0; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 77f77bc5d083..248699c2c4bf 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c | |||
@@ -277,7 +277,9 @@ static int iwl_mvm_rx_crypto(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr, | |||
277 | stats->flag |= RX_FLAG_DECRYPTED; | 277 | stats->flag |= RX_FLAG_DECRYPTED; |
278 | return 0; | 278 | return 0; |
279 | default: | 279 | default: |
280 | IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status); | 280 | /* Expected in monitor (not having the keys) */ |
281 | if (!mvm->monitor_on) | ||
282 | IWL_ERR(mvm, "Unhandled alg: 0x%x\n", status); | ||
281 | } | 283 | } |
282 | 284 | ||
283 | return 0; | 285 | return 0; |
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index 4d907f60bce9..1232f63278eb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c | |||
@@ -631,7 +631,7 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device, | |||
631 | 631 | ||
632 | if (!iwl_mvm_firmware_running(mvm) || | 632 | if (!iwl_mvm_firmware_running(mvm) || |
633 | mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { | 633 | mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { |
634 | ret = -EIO; | 634 | ret = -ENODATA; |
635 | goto out; | 635 | goto out; |
636 | } | 636 | } |
637 | 637 | ||
diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c index 4f73012978e9..1d431d4bf6d2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c | |||
@@ -1122,7 +1122,7 @@ static u8 _rtl8821ae_dbi_read(struct rtl_priv *rtlpriv, u16 addr) | |||
1122 | } | 1122 | } |
1123 | if (0 == tmp) { | 1123 | if (0 == tmp) { |
1124 | read_addr = REG_DBI_RDATA + addr % 4; | 1124 | read_addr = REG_DBI_RDATA + addr % 4; |
1125 | ret = rtl_read_byte(rtlpriv, read_addr); | 1125 | ret = rtl_read_word(rtlpriv, read_addr); |
1126 | } | 1126 | } |
1127 | return ret; | 1127 | return ret; |
1128 | } | 1128 | } |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index ee8ed9da00ad..4491ca5aee90 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -486,7 +486,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, | |||
486 | 486 | ||
487 | dev->tx_queue_len = XENVIF_QUEUE_LENGTH; | 487 | dev->tx_queue_len = XENVIF_QUEUE_LENGTH; |
488 | 488 | ||
489 | dev->min_mtu = 0; | 489 | dev->min_mtu = ETH_MIN_MTU; |
490 | dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; | 490 | dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN; |
491 | 491 | ||
492 | /* | 492 | /* |
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 523387e71a80..8b8689c6d887 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -1316,7 +1316,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) | |||
1316 | netdev->features |= netdev->hw_features; | 1316 | netdev->features |= netdev->hw_features; |
1317 | 1317 | ||
1318 | netdev->ethtool_ops = &xennet_ethtool_ops; | 1318 | netdev->ethtool_ops = &xennet_ethtool_ops; |
1319 | netdev->min_mtu = 0; | 1319 | netdev->min_mtu = ETH_MIN_MTU; |
1320 | netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; | 1320 | netdev->max_mtu = XEN_NETIF_MAX_TX_SIZE; |
1321 | SET_NETDEV_DEV(netdev, &dev->dev); | 1321 | SET_NETDEV_DEV(netdev, &dev->dev); |
1322 | 1322 | ||
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index af075e998944..be49d0f79381 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
@@ -2545,10 +2545,10 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) | |||
2545 | nvme_fc_abort_aen_ops(ctrl); | 2545 | nvme_fc_abort_aen_ops(ctrl); |
2546 | 2546 | ||
2547 | /* wait for all io that had to be aborted */ | 2547 | /* wait for all io that had to be aborted */ |
2548 | spin_lock_irqsave(&ctrl->lock, flags); | 2548 | spin_lock_irq(&ctrl->lock); |
2549 | wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); | 2549 | wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); |
2550 | ctrl->flags &= ~FCCTRL_TERMIO; | 2550 | ctrl->flags &= ~FCCTRL_TERMIO; |
2551 | spin_unlock_irqrestore(&ctrl->lock, flags); | 2551 | spin_unlock_irq(&ctrl->lock); |
2552 | 2552 | ||
2553 | nvme_fc_term_aen_ops(ctrl); | 2553 | nvme_fc_term_aen_ops(ctrl); |
2554 | 2554 | ||
@@ -2734,7 +2734,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
2734 | { | 2734 | { |
2735 | struct nvme_fc_ctrl *ctrl; | 2735 | struct nvme_fc_ctrl *ctrl; |
2736 | unsigned long flags; | 2736 | unsigned long flags; |
2737 | int ret, idx; | 2737 | int ret, idx, retry; |
2738 | 2738 | ||
2739 | if (!(rport->remoteport.port_role & | 2739 | if (!(rport->remoteport.port_role & |
2740 | (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { | 2740 | (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) { |
@@ -2760,6 +2760,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
2760 | ctrl->rport = rport; | 2760 | ctrl->rport = rport; |
2761 | ctrl->dev = lport->dev; | 2761 | ctrl->dev = lport->dev; |
2762 | ctrl->cnum = idx; | 2762 | ctrl->cnum = idx; |
2763 | init_waitqueue_head(&ctrl->ioabort_wait); | ||
2763 | 2764 | ||
2764 | get_device(ctrl->dev); | 2765 | get_device(ctrl->dev); |
2765 | kref_init(&ctrl->ref); | 2766 | kref_init(&ctrl->ref); |
@@ -2825,9 +2826,37 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, | |||
2825 | list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); | 2826 | list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); |
2826 | spin_unlock_irqrestore(&rport->lock, flags); | 2827 | spin_unlock_irqrestore(&rport->lock, flags); |
2827 | 2828 | ||
2828 | ret = nvme_fc_create_association(ctrl); | 2829 | /* |
2830 | * It's possible that transactions used to create the association | ||
2831 | * may fail. Examples: CreateAssociation LS or CreateIOConnection | ||
2832 | * LS gets dropped/corrupted/fails; or a frame gets dropped or a | ||
2833 | * command times out for one of the actions to init the controller | ||
2834 | * (Connect, Get/Set_Property, Set_Features, etc). Many of these | ||
2835 | * transport errors (frame drop, LS failure) inherently must kill | ||
2836 | * the association. The transport is coded so that any command used | ||
2837 | * to create the association (prior to a LIVE state transition | ||
2838 | * while NEW or RECONNECTING) will fail if it completes in error or | ||
2839 | * times out. | ||
2840 | * | ||
2841 | * As such: as the connect request was mostly likely due to a | ||
2842 | * udev event that discovered the remote port, meaning there is | ||
2843 | * not an admin or script there to restart if the connect | ||
2844 | * request fails, retry the initial connection creation up to | ||
2845 | * three times before giving up and declaring failure. | ||
2846 | */ | ||
2847 | for (retry = 0; retry < 3; retry++) { | ||
2848 | ret = nvme_fc_create_association(ctrl); | ||
2849 | if (!ret) | ||
2850 | break; | ||
2851 | } | ||
2852 | |||
2829 | if (ret) { | 2853 | if (ret) { |
2854 | /* couldn't schedule retry - fail out */ | ||
2855 | dev_err(ctrl->ctrl.device, | ||
2856 | "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum); | ||
2857 | |||
2830 | ctrl->ctrl.opts = NULL; | 2858 | ctrl->ctrl.opts = NULL; |
2859 | |||
2831 | /* initiate nvme ctrl ref counting teardown */ | 2860 | /* initiate nvme ctrl ref counting teardown */ |
2832 | nvme_uninit_ctrl(&ctrl->ctrl); | 2861 | nvme_uninit_ctrl(&ctrl->ctrl); |
2833 | nvme_put_ctrl(&ctrl->ctrl); | 2862 | nvme_put_ctrl(&ctrl->ctrl); |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 92a03ff5fb4d..87bac27ec64b 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -571,6 +571,12 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) | |||
571 | if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) | 571 | if (test_and_set_bit(NVME_RDMA_Q_DELETING, &queue->flags)) |
572 | return; | 572 | return; |
573 | 573 | ||
574 | if (nvme_rdma_queue_idx(queue) == 0) { | ||
575 | nvme_rdma_free_qe(queue->device->dev, | ||
576 | &queue->ctrl->async_event_sqe, | ||
577 | sizeof(struct nvme_command), DMA_TO_DEVICE); | ||
578 | } | ||
579 | |||
574 | nvme_rdma_destroy_queue_ib(queue); | 580 | nvme_rdma_destroy_queue_ib(queue); |
575 | rdma_destroy_id(queue->cm_id); | 581 | rdma_destroy_id(queue->cm_id); |
576 | } | 582 | } |
@@ -739,8 +745,6 @@ out: | |||
739 | static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, | 745 | static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, |
740 | bool remove) | 746 | bool remove) |
741 | { | 747 | { |
742 | nvme_rdma_free_qe(ctrl->queues[0].device->dev, &ctrl->async_event_sqe, | ||
743 | sizeof(struct nvme_command), DMA_TO_DEVICE); | ||
744 | nvme_rdma_stop_queue(&ctrl->queues[0]); | 748 | nvme_rdma_stop_queue(&ctrl->queues[0]); |
745 | if (remove) { | 749 | if (remove) { |
746 | blk_cleanup_queue(ctrl->ctrl.admin_q); | 750 | blk_cleanup_queue(ctrl->ctrl.admin_q); |
@@ -765,8 +769,10 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, | |||
765 | 769 | ||
766 | if (new) { | 770 | if (new) { |
767 | ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); | 771 | ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); |
768 | if (IS_ERR(ctrl->ctrl.admin_tagset)) | 772 | if (IS_ERR(ctrl->ctrl.admin_tagset)) { |
773 | error = PTR_ERR(ctrl->ctrl.admin_tagset); | ||
769 | goto out_free_queue; | 774 | goto out_free_queue; |
775 | } | ||
770 | 776 | ||
771 | ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); | 777 | ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); |
772 | if (IS_ERR(ctrl->ctrl.admin_q)) { | 778 | if (IS_ERR(ctrl->ctrl.admin_q)) { |
@@ -846,8 +852,10 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) | |||
846 | 852 | ||
847 | if (new) { | 853 | if (new) { |
848 | ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); | 854 | ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); |
849 | if (IS_ERR(ctrl->ctrl.tagset)) | 855 | if (IS_ERR(ctrl->ctrl.tagset)) { |
856 | ret = PTR_ERR(ctrl->ctrl.tagset); | ||
850 | goto out_free_io_queues; | 857 | goto out_free_io_queues; |
858 | } | ||
851 | 859 | ||
852 | ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); | 860 | ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); |
853 | if (IS_ERR(ctrl->ctrl.connect_q)) { | 861 | if (IS_ERR(ctrl->ctrl.connect_q)) { |
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 1b208beeef50..645ba7eee35d 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -387,12 +387,21 @@ struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid) | |||
387 | 387 | ||
388 | static void __nvmet_req_complete(struct nvmet_req *req, u16 status) | 388 | static void __nvmet_req_complete(struct nvmet_req *req, u16 status) |
389 | { | 389 | { |
390 | u32 old_sqhd, new_sqhd; | ||
391 | u16 sqhd; | ||
392 | |||
390 | if (status) | 393 | if (status) |
391 | nvmet_set_status(req, status); | 394 | nvmet_set_status(req, status); |
392 | 395 | ||
393 | if (req->sq->size) | 396 | if (req->sq->size) { |
394 | req->sq->sqhd = (req->sq->sqhd + 1) % req->sq->size; | 397 | do { |
395 | req->rsp->sq_head = cpu_to_le16(req->sq->sqhd); | 398 | old_sqhd = req->sq->sqhd; |
399 | new_sqhd = (old_sqhd + 1) % req->sq->size; | ||
400 | } while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) != | ||
401 | old_sqhd); | ||
402 | } | ||
403 | sqhd = req->sq->sqhd & 0x0000FFFF; | ||
404 | req->rsp->sq_head = cpu_to_le16(sqhd); | ||
396 | req->rsp->sq_id = cpu_to_le16(req->sq->qid); | 405 | req->rsp->sq_id = cpu_to_le16(req->sq->qid); |
397 | req->rsp->command_id = req->cmd->common.command_id; | 406 | req->rsp->command_id = req->cmd->common.command_id; |
398 | 407 | ||
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 7b8e20adf760..87e429bfcd8a 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h | |||
@@ -74,7 +74,7 @@ struct nvmet_sq { | |||
74 | struct percpu_ref ref; | 74 | struct percpu_ref ref; |
75 | u16 qid; | 75 | u16 qid; |
76 | u16 size; | 76 | u16 size; |
77 | u16 sqhd; | 77 | u32 sqhd; |
78 | struct completion free_done; | 78 | struct completion free_done; |
79 | struct completion confirm_done; | 79 | struct completion confirm_done; |
80 | }; | 80 | }; |
diff --git a/drivers/of/base.c b/drivers/of/base.c index 260d33c0f26c..63897531cd75 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c | |||
@@ -1781,8 +1781,12 @@ bool of_console_check(struct device_node *dn, char *name, int index) | |||
1781 | { | 1781 | { |
1782 | if (!dn || dn != of_stdout || console_set_on_cmdline) | 1782 | if (!dn || dn != of_stdout || console_set_on_cmdline) |
1783 | return false; | 1783 | return false; |
1784 | return !add_preferred_console(name, index, | 1784 | |
1785 | kstrdup(of_stdout_options, GFP_KERNEL)); | 1785 | /* |
1786 | * XXX: cast `options' to char pointer to suppress complication | ||
1787 | * warnings: printk, UART and console drivers expect char pointer. | ||
1788 | */ | ||
1789 | return !add_preferred_console(name, index, (char *)of_stdout_options); | ||
1786 | } | 1790 | } |
1787 | EXPORT_SYMBOL_GPL(of_console_check); | 1791 | EXPORT_SYMBOL_GPL(of_console_check); |
1788 | 1792 | ||
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index d94dd8b77abd..98258583abb0 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c | |||
@@ -44,7 +44,7 @@ static int of_get_phy_id(struct device_node *device, u32 *phy_id) | |||
44 | return -EINVAL; | 44 | return -EINVAL; |
45 | } | 45 | } |
46 | 46 | ||
47 | static void of_mdiobus_register_phy(struct mii_bus *mdio, | 47 | static int of_mdiobus_register_phy(struct mii_bus *mdio, |
48 | struct device_node *child, u32 addr) | 48 | struct device_node *child, u32 addr) |
49 | { | 49 | { |
50 | struct phy_device *phy; | 50 | struct phy_device *phy; |
@@ -60,9 +60,13 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio, | |||
60 | else | 60 | else |
61 | phy = get_phy_device(mdio, addr, is_c45); | 61 | phy = get_phy_device(mdio, addr, is_c45); |
62 | if (IS_ERR(phy)) | 62 | if (IS_ERR(phy)) |
63 | return; | 63 | return PTR_ERR(phy); |
64 | 64 | ||
65 | rc = irq_of_parse_and_map(child, 0); | 65 | rc = of_irq_get(child, 0); |
66 | if (rc == -EPROBE_DEFER) { | ||
67 | phy_device_free(phy); | ||
68 | return rc; | ||
69 | } | ||
66 | if (rc > 0) { | 70 | if (rc > 0) { |
67 | phy->irq = rc; | 71 | phy->irq = rc; |
68 | mdio->irq[addr] = rc; | 72 | mdio->irq[addr] = rc; |
@@ -84,22 +88,23 @@ static void of_mdiobus_register_phy(struct mii_bus *mdio, | |||
84 | if (rc) { | 88 | if (rc) { |
85 | phy_device_free(phy); | 89 | phy_device_free(phy); |
86 | of_node_put(child); | 90 | of_node_put(child); |
87 | return; | 91 | return rc; |
88 | } | 92 | } |
89 | 93 | ||
90 | dev_dbg(&mdio->dev, "registered phy %s at address %i\n", | 94 | dev_dbg(&mdio->dev, "registered phy %s at address %i\n", |
91 | child->name, addr); | 95 | child->name, addr); |
96 | return 0; | ||
92 | } | 97 | } |
93 | 98 | ||
94 | static void of_mdiobus_register_device(struct mii_bus *mdio, | 99 | static int of_mdiobus_register_device(struct mii_bus *mdio, |
95 | struct device_node *child, u32 addr) | 100 | struct device_node *child, u32 addr) |
96 | { | 101 | { |
97 | struct mdio_device *mdiodev; | 102 | struct mdio_device *mdiodev; |
98 | int rc; | 103 | int rc; |
99 | 104 | ||
100 | mdiodev = mdio_device_create(mdio, addr); | 105 | mdiodev = mdio_device_create(mdio, addr); |
101 | if (IS_ERR(mdiodev)) | 106 | if (IS_ERR(mdiodev)) |
102 | return; | 107 | return PTR_ERR(mdiodev); |
103 | 108 | ||
104 | /* Associate the OF node with the device structure so it | 109 | /* Associate the OF node with the device structure so it |
105 | * can be looked up later. | 110 | * can be looked up later. |
@@ -112,11 +117,12 @@ static void of_mdiobus_register_device(struct mii_bus *mdio, | |||
112 | if (rc) { | 117 | if (rc) { |
113 | mdio_device_free(mdiodev); | 118 | mdio_device_free(mdiodev); |
114 | of_node_put(child); | 119 | of_node_put(child); |
115 | return; | 120 | return rc; |
116 | } | 121 | } |
117 | 122 | ||
118 | dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n", | 123 | dev_dbg(&mdio->dev, "registered mdio device %s at address %i\n", |
119 | child->name, addr); | 124 | child->name, addr); |
125 | return 0; | ||
120 | } | 126 | } |
121 | 127 | ||
122 | /* The following is a list of PHY compatible strings which appear in | 128 | /* The following is a list of PHY compatible strings which appear in |
@@ -219,9 +225,11 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) | |||
219 | } | 225 | } |
220 | 226 | ||
221 | if (of_mdiobus_child_is_phy(child)) | 227 | if (of_mdiobus_child_is_phy(child)) |
222 | of_mdiobus_register_phy(mdio, child, addr); | 228 | rc = of_mdiobus_register_phy(mdio, child, addr); |
223 | else | 229 | else |
224 | of_mdiobus_register_device(mdio, child, addr); | 230 | rc = of_mdiobus_register_device(mdio, child, addr); |
231 | if (rc) | ||
232 | goto unregister; | ||
225 | } | 233 | } |
226 | 234 | ||
227 | if (!scanphys) | 235 | if (!scanphys) |
@@ -242,12 +250,19 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) | |||
242 | dev_info(&mdio->dev, "scan phy %s at address %i\n", | 250 | dev_info(&mdio->dev, "scan phy %s at address %i\n", |
243 | child->name, addr); | 251 | child->name, addr); |
244 | 252 | ||
245 | if (of_mdiobus_child_is_phy(child)) | 253 | if (of_mdiobus_child_is_phy(child)) { |
246 | of_mdiobus_register_phy(mdio, child, addr); | 254 | rc = of_mdiobus_register_phy(mdio, child, addr); |
255 | if (rc) | ||
256 | goto unregister; | ||
257 | } | ||
247 | } | 258 | } |
248 | } | 259 | } |
249 | 260 | ||
250 | return 0; | 261 | return 0; |
262 | |||
263 | unregister: | ||
264 | mdiobus_unregister(mdio); | ||
265 | return rc; | ||
251 | } | 266 | } |
252 | EXPORT_SYMBOL(of_mdiobus_register); | 267 | EXPORT_SYMBOL(of_mdiobus_register); |
253 | 268 | ||
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index d507c3569a88..32771c2ced7b 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/sort.h> | 25 | #include <linux/sort.h> |
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | 27 | ||
28 | #define MAX_RESERVED_REGIONS 16 | 28 | #define MAX_RESERVED_REGIONS 32 |
29 | static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; | 29 | static struct reserved_mem reserved_mem[MAX_RESERVED_REGIONS]; |
30 | static int reserved_mem_count; | 30 | static int reserved_mem_count; |
31 | 31 | ||
diff --git a/drivers/of/property.c b/drivers/of/property.c index fbb72116e9d4..264c355ba1ff 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c | |||
@@ -954,7 +954,7 @@ of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode) | |||
954 | struct device_node *np; | 954 | struct device_node *np; |
955 | 955 | ||
956 | /* Get the parent of the port */ | 956 | /* Get the parent of the port */ |
957 | np = of_get_next_parent(to_of_node(fwnode)); | 957 | np = of_get_parent(to_of_node(fwnode)); |
958 | if (!np) | 958 | if (!np) |
959 | return NULL; | 959 | return NULL; |
960 | 960 | ||
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c index 89f4e3d072d7..26ed0c08f209 100644 --- a/drivers/pci/host/pci-aardvark.c +++ b/drivers/pci/host/pci-aardvark.c | |||
@@ -935,6 +935,8 @@ static int advk_pcie_probe(struct platform_device *pdev) | |||
935 | bridge->sysdata = pcie; | 935 | bridge->sysdata = pcie; |
936 | bridge->busnr = 0; | 936 | bridge->busnr = 0; |
937 | bridge->ops = &advk_pcie_ops; | 937 | bridge->ops = &advk_pcie_ops; |
938 | bridge->map_irq = of_irq_parse_and_map_pci; | ||
939 | bridge->swizzle_irq = pci_common_swizzle; | ||
938 | 940 | ||
939 | ret = pci_scan_root_bus_bridge(bridge); | 941 | ret = pci_scan_root_bus_bridge(bridge); |
940 | if (ret < 0) { | 942 | if (ret < 0) { |
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c index 9c40da54f88a..1987fec1f126 100644 --- a/drivers/pci/host/pci-tegra.c +++ b/drivers/pci/host/pci-tegra.c | |||
@@ -233,6 +233,7 @@ struct tegra_msi { | |||
233 | struct msi_controller chip; | 233 | struct msi_controller chip; |
234 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); | 234 | DECLARE_BITMAP(used, INT_PCI_MSI_NR); |
235 | struct irq_domain *domain; | 235 | struct irq_domain *domain; |
236 | unsigned long pages; | ||
236 | struct mutex lock; | 237 | struct mutex lock; |
237 | u64 phys; | 238 | u64 phys; |
238 | int irq; | 239 | int irq; |
@@ -1529,22 +1530,9 @@ static int tegra_pcie_enable_msi(struct tegra_pcie *pcie) | |||
1529 | goto err; | 1530 | goto err; |
1530 | } | 1531 | } |
1531 | 1532 | ||
1532 | /* | 1533 | /* setup AFI/FPCI range */ |
1533 | * The PCI host bridge on Tegra contains some logic that intercepts | 1534 | msi->pages = __get_free_pages(GFP_KERNEL, 0); |
1534 | * MSI writes, which means that the MSI target address doesn't have | 1535 | msi->phys = virt_to_phys((void *)msi->pages); |
1535 | * to point to actual physical memory. Rather than allocating one 4 | ||
1536 | * KiB page of system memory that's never used, we can simply pick | ||
1537 | * an arbitrary address within an area reserved for system memory | ||
1538 | * in the FPCI address map. | ||
1539 | * | ||
1540 | * However, in order to avoid confusion, we pick an address that | ||
1541 | * doesn't map to physical memory. The FPCI address map reserves a | ||
1542 | * 1012 GiB region for system memory and memory-mapped I/O. Since | ||
1543 | * none of the Tegra SoCs that contain this PCI host bridge can | ||
1544 | * address more than 16 GiB of system memory, the last 4 KiB of | ||
1545 | * these 1012 GiB is a good candidate. | ||
1546 | */ | ||
1547 | msi->phys = 0xfcfffff000; | ||
1548 | 1536 | ||
1549 | afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); | 1537 | afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST); |
1550 | afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); | 1538 | afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST); |
@@ -1596,6 +1584,8 @@ static int tegra_pcie_disable_msi(struct tegra_pcie *pcie) | |||
1596 | afi_writel(pcie, 0, AFI_MSI_EN_VEC6); | 1584 | afi_writel(pcie, 0, AFI_MSI_EN_VEC6); |
1597 | afi_writel(pcie, 0, AFI_MSI_EN_VEC7); | 1585 | afi_writel(pcie, 0, AFI_MSI_EN_VEC7); |
1598 | 1586 | ||
1587 | free_pages(msi->pages, 0); | ||
1588 | |||
1599 | if (msi->irq > 0) | 1589 | if (msi->irq > 0) |
1600 | free_irq(msi->irq, pcie); | 1590 | free_irq(msi->irq, pcie); |
1601 | 1591 | ||
diff --git a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c index 73ebad6634a7..89c887ea5557 100644 --- a/drivers/phy/marvell/phy-mvebu-cp110-comphy.c +++ b/drivers/phy/marvell/phy-mvebu-cp110-comphy.c | |||
@@ -111,6 +111,8 @@ | |||
111 | #define MVEBU_COMPHY_CONF6_40B BIT(18) | 111 | #define MVEBU_COMPHY_CONF6_40B BIT(18) |
112 | #define MVEBU_COMPHY_SELECTOR 0x1140 | 112 | #define MVEBU_COMPHY_SELECTOR 0x1140 |
113 | #define MVEBU_COMPHY_SELECTOR_PHY(n) ((n) * 0x4) | 113 | #define MVEBU_COMPHY_SELECTOR_PHY(n) ((n) * 0x4) |
114 | #define MVEBU_COMPHY_PIPE_SELECTOR 0x1144 | ||
115 | #define MVEBU_COMPHY_PIPE_SELECTOR_PIPE(n) ((n) * 0x4) | ||
114 | 116 | ||
115 | #define MVEBU_COMPHY_LANES 6 | 117 | #define MVEBU_COMPHY_LANES 6 |
116 | #define MVEBU_COMPHY_PORTS 3 | 118 | #define MVEBU_COMPHY_PORTS 3 |
@@ -468,13 +470,17 @@ static int mvebu_comphy_power_on(struct phy *phy) | |||
468 | { | 470 | { |
469 | struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); | 471 | struct mvebu_comphy_lane *lane = phy_get_drvdata(phy); |
470 | struct mvebu_comphy_priv *priv = lane->priv; | 472 | struct mvebu_comphy_priv *priv = lane->priv; |
471 | int ret; | 473 | int ret, mux; |
472 | u32 mux, val; | 474 | u32 val; |
473 | 475 | ||
474 | mux = mvebu_comphy_get_mux(lane->id, lane->port, lane->mode); | 476 | mux = mvebu_comphy_get_mux(lane->id, lane->port, lane->mode); |
475 | if (mux < 0) | 477 | if (mux < 0) |
476 | return -ENOTSUPP; | 478 | return -ENOTSUPP; |
477 | 479 | ||
480 | regmap_read(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, &val); | ||
481 | val &= ~(0xf << MVEBU_COMPHY_PIPE_SELECTOR_PIPE(lane->id)); | ||
482 | regmap_write(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, val); | ||
483 | |||
478 | regmap_read(priv->regmap, MVEBU_COMPHY_SELECTOR, &val); | 484 | regmap_read(priv->regmap, MVEBU_COMPHY_SELECTOR, &val); |
479 | val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id)); | 485 | val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id)); |
480 | val |= mux << MVEBU_COMPHY_SELECTOR_PHY(lane->id); | 486 | val |= mux << MVEBU_COMPHY_SELECTOR_PHY(lane->id); |
@@ -526,6 +532,10 @@ static int mvebu_comphy_power_off(struct phy *phy) | |||
526 | val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id)); | 532 | val &= ~(0xf << MVEBU_COMPHY_SELECTOR_PHY(lane->id)); |
527 | regmap_write(priv->regmap, MVEBU_COMPHY_SELECTOR, val); | 533 | regmap_write(priv->regmap, MVEBU_COMPHY_SELECTOR, val); |
528 | 534 | ||
535 | regmap_read(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, &val); | ||
536 | val &= ~(0xf << MVEBU_COMPHY_PIPE_SELECTOR_PIPE(lane->id)); | ||
537 | regmap_write(priv->regmap, MVEBU_COMPHY_PIPE_SELECTOR, val); | ||
538 | |||
529 | return 0; | 539 | return 0; |
530 | } | 540 | } |
531 | 541 | ||
@@ -576,8 +586,8 @@ static int mvebu_comphy_probe(struct platform_device *pdev) | |||
576 | return PTR_ERR(priv->regmap); | 586 | return PTR_ERR(priv->regmap); |
577 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 587 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
578 | priv->base = devm_ioremap_resource(&pdev->dev, res); | 588 | priv->base = devm_ioremap_resource(&pdev->dev, res); |
579 | if (!priv->base) | 589 | if (IS_ERR(priv->base)) |
580 | return -ENOMEM; | 590 | return PTR_ERR(priv->base); |
581 | 591 | ||
582 | for_each_available_child_of_node(pdev->dev.of_node, child) { | 592 | for_each_available_child_of_node(pdev->dev.of_node, child) { |
583 | struct mvebu_comphy_lane *lane; | 593 | struct mvebu_comphy_lane *lane; |
diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index e3baad78521f..721a2a1c97ef 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c | |||
@@ -27,6 +27,7 @@ | |||
27 | /* banks shared by multiple phys */ | 27 | /* banks shared by multiple phys */ |
28 | #define SSUSB_SIFSLV_V1_SPLLC 0x000 /* shared by u3 phys */ | 28 | #define SSUSB_SIFSLV_V1_SPLLC 0x000 /* shared by u3 phys */ |
29 | #define SSUSB_SIFSLV_V1_U2FREQ 0x100 /* shared by u2 phys */ | 29 | #define SSUSB_SIFSLV_V1_U2FREQ 0x100 /* shared by u2 phys */ |
30 | #define SSUSB_SIFSLV_V1_CHIP 0x300 /* shared by u3 phys */ | ||
30 | /* u2 phy bank */ | 31 | /* u2 phy bank */ |
31 | #define SSUSB_SIFSLV_V1_U2PHY_COM 0x000 | 32 | #define SSUSB_SIFSLV_V1_U2PHY_COM 0x000 |
32 | /* u3/pcie/sata phy banks */ | 33 | /* u3/pcie/sata phy banks */ |
@@ -762,7 +763,7 @@ static void phy_v1_banks_init(struct mtk_tphy *tphy, | |||
762 | case PHY_TYPE_USB3: | 763 | case PHY_TYPE_USB3: |
763 | case PHY_TYPE_PCIE: | 764 | case PHY_TYPE_PCIE: |
764 | u3_banks->spllc = tphy->sif_base + SSUSB_SIFSLV_V1_SPLLC; | 765 | u3_banks->spllc = tphy->sif_base + SSUSB_SIFSLV_V1_SPLLC; |
765 | u3_banks->chip = NULL; | 766 | u3_banks->chip = tphy->sif_base + SSUSB_SIFSLV_V1_CHIP; |
766 | u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V1_U3PHYD; | 767 | u3_banks->phyd = instance->port_base + SSUSB_SIFSLV_V1_U3PHYD; |
767 | u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V1_U3PHYA; | 768 | u3_banks->phya = instance->port_base + SSUSB_SIFSLV_V1_U3PHYA; |
768 | break; | 769 | break; |
diff --git a/drivers/phy/rockchip/phy-rockchip-typec.c b/drivers/phy/rockchip/phy-rockchip-typec.c index 4d2c57f21d76..a958c9bced01 100644 --- a/drivers/phy/rockchip/phy-rockchip-typec.c +++ b/drivers/phy/rockchip/phy-rockchip-typec.c | |||
@@ -443,14 +443,34 @@ static inline int property_enable(struct rockchip_typec_phy *tcphy, | |||
443 | return regmap_write(tcphy->grf_regs, reg->offset, val | mask); | 443 | return regmap_write(tcphy->grf_regs, reg->offset, val | mask); |
444 | } | 444 | } |
445 | 445 | ||
446 | static void tcphy_dp_aux_set_flip(struct rockchip_typec_phy *tcphy) | ||
447 | { | ||
448 | u16 tx_ana_ctrl_reg_1; | ||
449 | |||
450 | /* | ||
451 | * Select the polarity of the xcvr: | ||
452 | * 1, Reverses the polarity (If TYPEC, Pulls ups aux_p and pull | ||
453 | * down aux_m) | ||
454 | * 0, Normal polarity (if TYPEC, pulls up aux_m and pulls down | ||
455 | * aux_p) | ||
456 | */ | ||
457 | tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1); | ||
458 | if (!tcphy->flip) | ||
459 | tx_ana_ctrl_reg_1 |= BIT(12); | ||
460 | else | ||
461 | tx_ana_ctrl_reg_1 &= ~BIT(12); | ||
462 | writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1); | ||
463 | } | ||
464 | |||
446 | static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy) | 465 | static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy) |
447 | { | 466 | { |
467 | u16 tx_ana_ctrl_reg_1; | ||
448 | u16 rdata, rdata2, val; | 468 | u16 rdata, rdata2, val; |
449 | 469 | ||
450 | /* disable txda_cal_latch_en for rewrite the calibration values */ | 470 | /* disable txda_cal_latch_en for rewrite the calibration values */ |
451 | rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1); | 471 | tx_ana_ctrl_reg_1 = readl(tcphy->base + TX_ANA_CTRL_REG_1); |
452 | val = rdata & 0xdfff; | 472 | tx_ana_ctrl_reg_1 &= ~BIT(13); |
453 | writel(val, tcphy->base + TX_ANA_CTRL_REG_1); | 473 | writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1); |
454 | 474 | ||
455 | /* | 475 | /* |
456 | * read a resistor calibration code from CMN_TXPUCAL_CTRL[6:0] and | 476 | * read a resistor calibration code from CMN_TXPUCAL_CTRL[6:0] and |
@@ -472,9 +492,8 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy) | |||
472 | * Activate this signal for 1 clock cycle to sample new calibration | 492 | * Activate this signal for 1 clock cycle to sample new calibration |
473 | * values. | 493 | * values. |
474 | */ | 494 | */ |
475 | rdata = readl(tcphy->base + TX_ANA_CTRL_REG_1); | 495 | tx_ana_ctrl_reg_1 |= BIT(13); |
476 | val = rdata | 0x2000; | 496 | writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1); |
477 | writel(val, tcphy->base + TX_ANA_CTRL_REG_1); | ||
478 | usleep_range(150, 200); | 497 | usleep_range(150, 200); |
479 | 498 | ||
480 | /* set TX Voltage Level and TX Deemphasis to 0 */ | 499 | /* set TX Voltage Level and TX Deemphasis to 0 */ |
@@ -482,8 +501,10 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy) | |||
482 | /* re-enable decap */ | 501 | /* re-enable decap */ |
483 | writel(0x100, tcphy->base + TX_ANA_CTRL_REG_2); | 502 | writel(0x100, tcphy->base + TX_ANA_CTRL_REG_2); |
484 | writel(0x300, tcphy->base + TX_ANA_CTRL_REG_2); | 503 | writel(0x300, tcphy->base + TX_ANA_CTRL_REG_2); |
485 | writel(0x2008, tcphy->base + TX_ANA_CTRL_REG_1); | 504 | tx_ana_ctrl_reg_1 |= BIT(3); |
486 | writel(0x2018, tcphy->base + TX_ANA_CTRL_REG_1); | 505 | writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1); |
506 | tx_ana_ctrl_reg_1 |= BIT(4); | ||
507 | writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1); | ||
487 | 508 | ||
488 | writel(0, tcphy->base + TX_ANA_CTRL_REG_5); | 509 | writel(0, tcphy->base + TX_ANA_CTRL_REG_5); |
489 | 510 | ||
@@ -494,8 +515,10 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy) | |||
494 | writel(0x1001, tcphy->base + TX_ANA_CTRL_REG_4); | 515 | writel(0x1001, tcphy->base + TX_ANA_CTRL_REG_4); |
495 | 516 | ||
496 | /* re-enables Bandgap reference for LDO */ | 517 | /* re-enables Bandgap reference for LDO */ |
497 | writel(0x2098, tcphy->base + TX_ANA_CTRL_REG_1); | 518 | tx_ana_ctrl_reg_1 |= BIT(7); |
498 | writel(0x2198, tcphy->base + TX_ANA_CTRL_REG_1); | 519 | writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1); |
520 | tx_ana_ctrl_reg_1 |= BIT(8); | ||
521 | writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1); | ||
499 | 522 | ||
500 | /* | 523 | /* |
501 | * re-enables the transmitter pre-driver, driver data selection MUX, | 524 | * re-enables the transmitter pre-driver, driver data selection MUX, |
@@ -505,27 +528,26 @@ static void tcphy_dp_aux_calibration(struct rockchip_typec_phy *tcphy) | |||
505 | writel(0x303, tcphy->base + TX_ANA_CTRL_REG_2); | 528 | writel(0x303, tcphy->base + TX_ANA_CTRL_REG_2); |
506 | 529 | ||
507 | /* | 530 | /* |
508 | * BIT 12: Controls auxda_polarity, which selects the polarity of the | 531 | * Do some magic undocumented stuff, some of which appears to |
509 | * xcvr: | 532 | * undo the "re-enables Bandgap reference for LDO" above. |
510 | * 1, Reverses the polarity (If TYPEC, Pulls ups aux_p and pull | ||
511 | * down aux_m) | ||
512 | * 0, Normal polarity (if TYPE_C, pulls up aux_m and pulls down | ||
513 | * aux_p) | ||
514 | */ | 533 | */ |
515 | val = 0xa078; | 534 | tx_ana_ctrl_reg_1 |= BIT(15); |
516 | if (!tcphy->flip) | 535 | tx_ana_ctrl_reg_1 &= ~BIT(8); |
517 | val |= BIT(12); | 536 | tx_ana_ctrl_reg_1 &= ~BIT(7); |
518 | writel(val, tcphy->base + TX_ANA_CTRL_REG_1); | 537 | tx_ana_ctrl_reg_1 |= BIT(6); |
538 | tx_ana_ctrl_reg_1 |= BIT(5); | ||
539 | writel(tx_ana_ctrl_reg_1, tcphy->base + TX_ANA_CTRL_REG_1); | ||
519 | 540 | ||
520 | writel(0, tcphy->base + TX_ANA_CTRL_REG_3); | 541 | writel(0, tcphy->base + TX_ANA_CTRL_REG_3); |
521 | writel(0, tcphy->base + TX_ANA_CTRL_REG_4); | 542 | writel(0, tcphy->base + TX_ANA_CTRL_REG_4); |
522 | writel(0, tcphy->base + TX_ANA_CTRL_REG_5); | 543 | writel(0, tcphy->base + TX_ANA_CTRL_REG_5); |
523 | 544 | ||
524 | /* | 545 | /* |
525 | * Controls low_power_swing_en, set the voltage swing of the driver | 546 | * Controls low_power_swing_en, don't set the voltage swing of the |
526 | * to 400mv. The values below are peak to peak (differential) values. | 547 | * driver to 400mv. The values below are peak to peak (differential) |
548 | * values. | ||
527 | */ | 549 | */ |
528 | writel(4, tcphy->base + TXDA_COEFF_CALC_CTRL); | 550 | writel(0, tcphy->base + TXDA_COEFF_CALC_CTRL); |
529 | writel(0, tcphy->base + TXDA_CYA_AUXDA_CYA); | 551 | writel(0, tcphy->base + TXDA_CYA_AUXDA_CYA); |
530 | 552 | ||
531 | /* Controls tx_high_z_tm_en */ | 553 | /* Controls tx_high_z_tm_en */ |
@@ -555,6 +577,7 @@ static int tcphy_phy_init(struct rockchip_typec_phy *tcphy, u8 mode) | |||
555 | reset_control_deassert(tcphy->tcphy_rst); | 577 | reset_control_deassert(tcphy->tcphy_rst); |
556 | 578 | ||
557 | property_enable(tcphy, &cfg->typec_conn_dir, tcphy->flip); | 579 | property_enable(tcphy, &cfg->typec_conn_dir, tcphy->flip); |
580 | tcphy_dp_aux_set_flip(tcphy); | ||
558 | 581 | ||
559 | tcphy_cfg_24m(tcphy); | 582 | tcphy_cfg_24m(tcphy); |
560 | 583 | ||
@@ -685,8 +708,11 @@ static int rockchip_usb3_phy_power_on(struct phy *phy) | |||
685 | if (tcphy->mode == new_mode) | 708 | if (tcphy->mode == new_mode) |
686 | goto unlock_ret; | 709 | goto unlock_ret; |
687 | 710 | ||
688 | if (tcphy->mode == MODE_DISCONNECT) | 711 | if (tcphy->mode == MODE_DISCONNECT) { |
689 | tcphy_phy_init(tcphy, new_mode); | 712 | ret = tcphy_phy_init(tcphy, new_mode); |
713 | if (ret) | ||
714 | goto unlock_ret; | ||
715 | } | ||
690 | 716 | ||
691 | /* wait TCPHY for pipe ready */ | 717 | /* wait TCPHY for pipe ready */ |
692 | for (timeout = 0; timeout < 100; timeout++) { | 718 | for (timeout = 0; timeout < 100; timeout++) { |
@@ -760,10 +786,12 @@ static int rockchip_dp_phy_power_on(struct phy *phy) | |||
760 | */ | 786 | */ |
761 | if (new_mode == MODE_DFP_DP && tcphy->mode != MODE_DISCONNECT) { | 787 | if (new_mode == MODE_DFP_DP && tcphy->mode != MODE_DISCONNECT) { |
762 | tcphy_phy_deinit(tcphy); | 788 | tcphy_phy_deinit(tcphy); |
763 | tcphy_phy_init(tcphy, new_mode); | 789 | ret = tcphy_phy_init(tcphy, new_mode); |
764 | } else if (tcphy->mode == MODE_DISCONNECT) { | 790 | } else if (tcphy->mode == MODE_DISCONNECT) { |
765 | tcphy_phy_init(tcphy, new_mode); | 791 | ret = tcphy_phy_init(tcphy, new_mode); |
766 | } | 792 | } |
793 | if (ret) | ||
794 | goto unlock_ret; | ||
767 | 795 | ||
768 | ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL, | 796 | ret = readx_poll_timeout(readl, tcphy->base + DP_MODE_CTL, |
769 | val, val & DP_MODE_A2, 1000, | 797 | val, val & DP_MODE_A2, 1000, |
diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 3cbcb2537657..4307bf0013e1 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c | |||
@@ -454,6 +454,8 @@ tegra_xusb_find_port_node(struct tegra_xusb_padctl *padctl, const char *type, | |||
454 | char *name; | 454 | char *name; |
455 | 455 | ||
456 | name = kasprintf(GFP_KERNEL, "%s-%u", type, index); | 456 | name = kasprintf(GFP_KERNEL, "%s-%u", type, index); |
457 | if (!name) | ||
458 | return ERR_PTR(-ENOMEM); | ||
457 | np = of_find_node_by_name(np, name); | 459 | np = of_find_node_by_name(np, name); |
458 | kfree(name); | 460 | kfree(name); |
459 | } | 461 | } |
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index 1778cf4f81c7..82cd8b08d71f 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig | |||
@@ -100,6 +100,7 @@ config PINCTRL_AMD | |||
100 | tristate "AMD GPIO pin control" | 100 | tristate "AMD GPIO pin control" |
101 | depends on GPIOLIB | 101 | depends on GPIOLIB |
102 | select GPIOLIB_IRQCHIP | 102 | select GPIOLIB_IRQCHIP |
103 | select PINMUX | ||
103 | select PINCONF | 104 | select PINCONF |
104 | select GENERIC_PINCONF | 105 | select GENERIC_PINCONF |
105 | help | 106 | help |
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index 0944310225db..ff782445dfb7 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c | |||
@@ -373,16 +373,12 @@ static void bcm2835_gpio_irq_handle_bank(struct bcm2835_pinctrl *pc, | |||
373 | unsigned long events; | 373 | unsigned long events; |
374 | unsigned offset; | 374 | unsigned offset; |
375 | unsigned gpio; | 375 | unsigned gpio; |
376 | unsigned int type; | ||
377 | 376 | ||
378 | events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4); | 377 | events = bcm2835_gpio_rd(pc, GPEDS0 + bank * 4); |
379 | events &= mask; | 378 | events &= mask; |
380 | events &= pc->enabled_irq_map[bank]; | 379 | events &= pc->enabled_irq_map[bank]; |
381 | for_each_set_bit(offset, &events, 32) { | 380 | for_each_set_bit(offset, &events, 32) { |
382 | gpio = (32 * bank) + offset; | 381 | gpio = (32 * bank) + offset; |
383 | /* FIXME: no clue why the code looks up the type here */ | ||
384 | type = pc->irq_type[gpio]; | ||
385 | |||
386 | generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain, | 382 | generic_handle_irq(irq_linear_revmap(pc->gpio_chip.irqdomain, |
387 | gpio)); | 383 | gpio)); |
388 | } | 384 | } |
diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 04e929fd0ffe..fadbca907c7c 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c | |||
@@ -1577,6 +1577,7 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) | |||
1577 | struct gpio_chip *chip = &pctrl->chip; | 1577 | struct gpio_chip *chip = &pctrl->chip; |
1578 | bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); | 1578 | bool need_valid_mask = !dmi_check_system(chv_no_valid_mask); |
1579 | int ret, i, offset; | 1579 | int ret, i, offset; |
1580 | int irq_base; | ||
1580 | 1581 | ||
1581 | *chip = chv_gpio_chip; | 1582 | *chip = chv_gpio_chip; |
1582 | 1583 | ||
@@ -1622,7 +1623,18 @@ static int chv_gpio_probe(struct chv_pinctrl *pctrl, int irq) | |||
1622 | /* Clear all interrupts */ | 1623 | /* Clear all interrupts */ |
1623 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); | 1624 | chv_writel(0xffff, pctrl->regs + CHV_INTSTAT); |
1624 | 1625 | ||
1625 | ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, 0, | 1626 | if (!need_valid_mask) { |
1627 | irq_base = devm_irq_alloc_descs(pctrl->dev, -1, 0, | ||
1628 | chip->ngpio, NUMA_NO_NODE); | ||
1629 | if (irq_base < 0) { | ||
1630 | dev_err(pctrl->dev, "Failed to allocate IRQ numbers\n"); | ||
1631 | return irq_base; | ||
1632 | } | ||
1633 | } else { | ||
1634 | irq_base = 0; | ||
1635 | } | ||
1636 | |||
1637 | ret = gpiochip_irqchip_add(chip, &chv_gpio_irqchip, irq_base, | ||
1626 | handle_bad_irq, IRQ_TYPE_NONE); | 1638 | handle_bad_irq, IRQ_TYPE_NONE); |
1627 | if (ret) { | 1639 | if (ret) { |
1628 | dev_err(pctrl->dev, "failed to add IRQ chip\n"); | 1640 | dev_err(pctrl->dev, "failed to add IRQ chip\n"); |
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index 3f6b34febbf1..433af328d981 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c | |||
@@ -534,8 +534,16 @@ static irqreturn_t amd_gpio_irq_handler(int irq, void *dev_id) | |||
534 | continue; | 534 | continue; |
535 | irq = irq_find_mapping(gc->irqdomain, irqnr + i); | 535 | irq = irq_find_mapping(gc->irqdomain, irqnr + i); |
536 | generic_handle_irq(irq); | 536 | generic_handle_irq(irq); |
537 | /* Clear interrupt */ | 537 | |
538 | /* Clear interrupt. | ||
539 | * We must read the pin register again, in case the | ||
540 | * value was changed while executing | ||
541 | * generic_handle_irq() above. | ||
542 | */ | ||
543 | raw_spin_lock_irqsave(&gpio_dev->lock, flags); | ||
544 | regval = readl(regs + i); | ||
538 | writel(regval, regs + i); | 545 | writel(regval, regs + i); |
546 | raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); | ||
539 | ret = IRQ_HANDLED; | 547 | ret = IRQ_HANDLED; |
540 | } | 548 | } |
541 | } | 549 | } |
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c index 3e40d4245512..9c950bbf07ba 100644 --- a/drivers/pinctrl/pinctrl-mcp23s08.c +++ b/drivers/pinctrl/pinctrl-mcp23s08.c | |||
@@ -407,10 +407,10 @@ static int mcp23s08_get(struct gpio_chip *chip, unsigned offset) | |||
407 | ret = mcp_read(mcp, MCP_GPIO, &status); | 407 | ret = mcp_read(mcp, MCP_GPIO, &status); |
408 | if (ret < 0) | 408 | if (ret < 0) |
409 | status = 0; | 409 | status = 0; |
410 | else | 410 | else { |
411 | mcp->cached_gpio = status; | ||
411 | status = !!(status & (1 << offset)); | 412 | status = !!(status & (1 << offset)); |
412 | 413 | } | |
413 | mcp->cached_gpio = status; | ||
414 | 414 | ||
415 | mutex_unlock(&mcp->lock); | 415 | mutex_unlock(&mcp->lock); |
416 | return status; | 416 | return status; |
diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index bb792a52248b..e03fa31446ca 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/suspend.h> | 33 | #include <linux/suspend.h> |
34 | #include <linux/acpi.h> | 34 | #include <linux/acpi.h> |
35 | #include <linux/io-64-nonatomic-lo-hi.h> | 35 | #include <linux/io-64-nonatomic-lo-hi.h> |
36 | #include <linux/spinlock.h> | ||
36 | 37 | ||
37 | #include <asm/intel_pmc_ipc.h> | 38 | #include <asm/intel_pmc_ipc.h> |
38 | 39 | ||
@@ -131,6 +132,7 @@ static struct intel_pmc_ipc_dev { | |||
131 | /* gcr */ | 132 | /* gcr */ |
132 | void __iomem *gcr_mem_base; | 133 | void __iomem *gcr_mem_base; |
133 | bool has_gcr_regs; | 134 | bool has_gcr_regs; |
135 | spinlock_t gcr_lock; | ||
134 | 136 | ||
135 | /* punit */ | 137 | /* punit */ |
136 | struct platform_device *punit_dev; | 138 | struct platform_device *punit_dev; |
@@ -225,17 +227,17 @@ int intel_pmc_gcr_read(u32 offset, u32 *data) | |||
225 | { | 227 | { |
226 | int ret; | 228 | int ret; |
227 | 229 | ||
228 | mutex_lock(&ipclock); | 230 | spin_lock(&ipcdev.gcr_lock); |
229 | 231 | ||
230 | ret = is_gcr_valid(offset); | 232 | ret = is_gcr_valid(offset); |
231 | if (ret < 0) { | 233 | if (ret < 0) { |
232 | mutex_unlock(&ipclock); | 234 | spin_unlock(&ipcdev.gcr_lock); |
233 | return ret; | 235 | return ret; |
234 | } | 236 | } |
235 | 237 | ||
236 | *data = readl(ipcdev.gcr_mem_base + offset); | 238 | *data = readl(ipcdev.gcr_mem_base + offset); |
237 | 239 | ||
238 | mutex_unlock(&ipclock); | 240 | spin_unlock(&ipcdev.gcr_lock); |
239 | 241 | ||
240 | return 0; | 242 | return 0; |
241 | } | 243 | } |
@@ -255,17 +257,17 @@ int intel_pmc_gcr_write(u32 offset, u32 data) | |||
255 | { | 257 | { |
256 | int ret; | 258 | int ret; |
257 | 259 | ||
258 | mutex_lock(&ipclock); | 260 | spin_lock(&ipcdev.gcr_lock); |
259 | 261 | ||
260 | ret = is_gcr_valid(offset); | 262 | ret = is_gcr_valid(offset); |
261 | if (ret < 0) { | 263 | if (ret < 0) { |
262 | mutex_unlock(&ipclock); | 264 | spin_unlock(&ipcdev.gcr_lock); |
263 | return ret; | 265 | return ret; |
264 | } | 266 | } |
265 | 267 | ||
266 | writel(data, ipcdev.gcr_mem_base + offset); | 268 | writel(data, ipcdev.gcr_mem_base + offset); |
267 | 269 | ||
268 | mutex_unlock(&ipclock); | 270 | spin_unlock(&ipcdev.gcr_lock); |
269 | 271 | ||
270 | return 0; | 272 | return 0; |
271 | } | 273 | } |
@@ -287,7 +289,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val) | |||
287 | u32 new_val; | 289 | u32 new_val; |
288 | int ret = 0; | 290 | int ret = 0; |
289 | 291 | ||
290 | mutex_lock(&ipclock); | 292 | spin_lock(&ipcdev.gcr_lock); |
291 | 293 | ||
292 | ret = is_gcr_valid(offset); | 294 | ret = is_gcr_valid(offset); |
293 | if (ret < 0) | 295 | if (ret < 0) |
@@ -309,7 +311,7 @@ int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val) | |||
309 | } | 311 | } |
310 | 312 | ||
311 | gcr_ipc_unlock: | 313 | gcr_ipc_unlock: |
312 | mutex_unlock(&ipclock); | 314 | spin_unlock(&ipcdev.gcr_lock); |
313 | return ret; | 315 | return ret; |
314 | } | 316 | } |
315 | EXPORT_SYMBOL_GPL(intel_pmc_gcr_update); | 317 | EXPORT_SYMBOL_GPL(intel_pmc_gcr_update); |
@@ -480,52 +482,41 @@ static irqreturn_t ioc(int irq, void *dev_id) | |||
480 | 482 | ||
481 | static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 483 | static int ipc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
482 | { | 484 | { |
483 | resource_size_t pci_resource; | 485 | struct intel_pmc_ipc_dev *pmc = &ipcdev; |
484 | int ret; | 486 | int ret; |
485 | int len; | ||
486 | 487 | ||
487 | ipcdev.dev = &pci_dev_get(pdev)->dev; | 488 | /* Only one PMC is supported */ |
488 | ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ; | 489 | if (pmc->dev) |
490 | return -EBUSY; | ||
489 | 491 | ||
490 | ret = pci_enable_device(pdev); | 492 | pmc->irq_mode = IPC_TRIGGER_MODE_IRQ; |
493 | |||
494 | spin_lock_init(&ipcdev.gcr_lock); | ||
495 | |||
496 | ret = pcim_enable_device(pdev); | ||
491 | if (ret) | 497 | if (ret) |
492 | return ret; | 498 | return ret; |
493 | 499 | ||
494 | ret = pci_request_regions(pdev, "intel_pmc_ipc"); | 500 | ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); |
495 | if (ret) | 501 | if (ret) |
496 | return ret; | 502 | return ret; |
497 | 503 | ||
498 | pci_resource = pci_resource_start(pdev, 0); | 504 | init_completion(&pmc->cmd_complete); |
499 | len = pci_resource_len(pdev, 0); | ||
500 | if (!pci_resource || !len) { | ||
501 | dev_err(&pdev->dev, "Failed to get resource\n"); | ||
502 | return -ENOMEM; | ||
503 | } | ||
504 | 505 | ||
505 | init_completion(&ipcdev.cmd_complete); | 506 | pmc->ipc_base = pcim_iomap_table(pdev)[0]; |
506 | 507 | ||
507 | if (request_irq(pdev->irq, ioc, 0, "intel_pmc_ipc", &ipcdev)) { | 508 | ret = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_pmc_ipc", |
509 | pmc); | ||
510 | if (ret) { | ||
508 | dev_err(&pdev->dev, "Failed to request irq\n"); | 511 | dev_err(&pdev->dev, "Failed to request irq\n"); |
509 | return -EBUSY; | 512 | return ret; |
510 | } | 513 | } |
511 | 514 | ||
512 | ipcdev.ipc_base = ioremap_nocache(pci_resource, len); | 515 | pmc->dev = &pdev->dev; |
513 | if (!ipcdev.ipc_base) { | ||
514 | dev_err(&pdev->dev, "Failed to ioremap ipc base\n"); | ||
515 | free_irq(pdev->irq, &ipcdev); | ||
516 | ret = -ENOMEM; | ||
517 | } | ||
518 | 516 | ||
519 | return ret; | 517 | pci_set_drvdata(pdev, pmc); |
520 | } | ||
521 | 518 | ||
522 | static void ipc_pci_remove(struct pci_dev *pdev) | 519 | return 0; |
523 | { | ||
524 | free_irq(pdev->irq, &ipcdev); | ||
525 | pci_release_regions(pdev); | ||
526 | pci_dev_put(pdev); | ||
527 | iounmap(ipcdev.ipc_base); | ||
528 | ipcdev.dev = NULL; | ||
529 | } | 520 | } |
530 | 521 | ||
531 | static const struct pci_device_id ipc_pci_ids[] = { | 522 | static const struct pci_device_id ipc_pci_ids[] = { |
@@ -540,7 +531,6 @@ static struct pci_driver ipc_pci_driver = { | |||
540 | .name = "intel_pmc_ipc", | 531 | .name = "intel_pmc_ipc", |
541 | .id_table = ipc_pci_ids, | 532 | .id_table = ipc_pci_ids, |
542 | .probe = ipc_pci_probe, | 533 | .probe = ipc_pci_probe, |
543 | .remove = ipc_pci_remove, | ||
544 | }; | 534 | }; |
545 | 535 | ||
546 | static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev, | 536 | static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev, |
@@ -850,17 +840,12 @@ static int ipc_plat_get_res(struct platform_device *pdev) | |||
850 | return -ENXIO; | 840 | return -ENXIO; |
851 | } | 841 | } |
852 | size = PLAT_RESOURCE_IPC_SIZE + PLAT_RESOURCE_GCR_SIZE; | 842 | size = PLAT_RESOURCE_IPC_SIZE + PLAT_RESOURCE_GCR_SIZE; |
843 | res->end = res->start + size - 1; | ||
844 | |||
845 | addr = devm_ioremap_resource(&pdev->dev, res); | ||
846 | if (IS_ERR(addr)) | ||
847 | return PTR_ERR(addr); | ||
853 | 848 | ||
854 | if (!request_mem_region(res->start, size, pdev->name)) { | ||
855 | dev_err(&pdev->dev, "Failed to request ipc resource\n"); | ||
856 | return -EBUSY; | ||
857 | } | ||
858 | addr = ioremap_nocache(res->start, size); | ||
859 | if (!addr) { | ||
860 | dev_err(&pdev->dev, "I/O memory remapping failed\n"); | ||
861 | release_mem_region(res->start, size); | ||
862 | return -ENOMEM; | ||
863 | } | ||
864 | ipcdev.ipc_base = addr; | 849 | ipcdev.ipc_base = addr; |
865 | 850 | ||
866 | ipcdev.gcr_mem_base = addr + PLAT_RESOURCE_GCR_OFFSET; | 851 | ipcdev.gcr_mem_base = addr + PLAT_RESOURCE_GCR_OFFSET; |
@@ -917,12 +902,12 @@ MODULE_DEVICE_TABLE(acpi, ipc_acpi_ids); | |||
917 | 902 | ||
918 | static int ipc_plat_probe(struct platform_device *pdev) | 903 | static int ipc_plat_probe(struct platform_device *pdev) |
919 | { | 904 | { |
920 | struct resource *res; | ||
921 | int ret; | 905 | int ret; |
922 | 906 | ||
923 | ipcdev.dev = &pdev->dev; | 907 | ipcdev.dev = &pdev->dev; |
924 | ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ; | 908 | ipcdev.irq_mode = IPC_TRIGGER_MODE_IRQ; |
925 | init_completion(&ipcdev.cmd_complete); | 909 | init_completion(&ipcdev.cmd_complete); |
910 | spin_lock_init(&ipcdev.gcr_lock); | ||
926 | 911 | ||
927 | ipcdev.irq = platform_get_irq(pdev, 0); | 912 | ipcdev.irq = platform_get_irq(pdev, 0); |
928 | if (ipcdev.irq < 0) { | 913 | if (ipcdev.irq < 0) { |
@@ -939,11 +924,11 @@ static int ipc_plat_probe(struct platform_device *pdev) | |||
939 | ret = ipc_create_pmc_devices(); | 924 | ret = ipc_create_pmc_devices(); |
940 | if (ret) { | 925 | if (ret) { |
941 | dev_err(&pdev->dev, "Failed to create pmc devices\n"); | 926 | dev_err(&pdev->dev, "Failed to create pmc devices\n"); |
942 | goto err_device; | 927 | return ret; |
943 | } | 928 | } |
944 | 929 | ||
945 | if (request_irq(ipcdev.irq, ioc, IRQF_NO_SUSPEND, | 930 | if (devm_request_irq(&pdev->dev, ipcdev.irq, ioc, IRQF_NO_SUSPEND, |
946 | "intel_pmc_ipc", &ipcdev)) { | 931 | "intel_pmc_ipc", &ipcdev)) { |
947 | dev_err(&pdev->dev, "Failed to request irq\n"); | 932 | dev_err(&pdev->dev, "Failed to request irq\n"); |
948 | ret = -EBUSY; | 933 | ret = -EBUSY; |
949 | goto err_irq; | 934 | goto err_irq; |
@@ -960,40 +945,22 @@ static int ipc_plat_probe(struct platform_device *pdev) | |||
960 | 945 | ||
961 | return 0; | 946 | return 0; |
962 | err_sys: | 947 | err_sys: |
963 | free_irq(ipcdev.irq, &ipcdev); | 948 | devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev); |
964 | err_irq: | 949 | err_irq: |
965 | platform_device_unregister(ipcdev.tco_dev); | 950 | platform_device_unregister(ipcdev.tco_dev); |
966 | platform_device_unregister(ipcdev.punit_dev); | 951 | platform_device_unregister(ipcdev.punit_dev); |
967 | platform_device_unregister(ipcdev.telemetry_dev); | 952 | platform_device_unregister(ipcdev.telemetry_dev); |
968 | err_device: | 953 | |
969 | iounmap(ipcdev.ipc_base); | ||
970 | res = platform_get_resource(pdev, IORESOURCE_MEM, | ||
971 | PLAT_RESOURCE_IPC_INDEX); | ||
972 | if (res) { | ||
973 | release_mem_region(res->start, | ||
974 | PLAT_RESOURCE_IPC_SIZE + | ||
975 | PLAT_RESOURCE_GCR_SIZE); | ||
976 | } | ||
977 | return ret; | 954 | return ret; |
978 | } | 955 | } |
979 | 956 | ||
980 | static int ipc_plat_remove(struct platform_device *pdev) | 957 | static int ipc_plat_remove(struct platform_device *pdev) |
981 | { | 958 | { |
982 | struct resource *res; | ||
983 | |||
984 | sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group); | 959 | sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group); |
985 | free_irq(ipcdev.irq, &ipcdev); | 960 | devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev); |
986 | platform_device_unregister(ipcdev.tco_dev); | 961 | platform_device_unregister(ipcdev.tco_dev); |
987 | platform_device_unregister(ipcdev.punit_dev); | 962 | platform_device_unregister(ipcdev.punit_dev); |
988 | platform_device_unregister(ipcdev.telemetry_dev); | 963 | platform_device_unregister(ipcdev.telemetry_dev); |
989 | iounmap(ipcdev.ipc_base); | ||
990 | res = platform_get_resource(pdev, IORESOURCE_MEM, | ||
991 | PLAT_RESOURCE_IPC_INDEX); | ||
992 | if (res) { | ||
993 | release_mem_region(res->start, | ||
994 | PLAT_RESOURCE_IPC_SIZE + | ||
995 | PLAT_RESOURCE_GCR_SIZE); | ||
996 | } | ||
997 | ipcdev.dev = NULL; | 964 | ipcdev.dev = NULL; |
998 | return 0; | 965 | return 0; |
999 | } | 966 | } |
diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index d0e5d6ee882c..e2c1988cd7c0 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c | |||
@@ -523,7 +523,7 @@ int __init parse_cec_param(char *str) | |||
523 | if (*str == '=') | 523 | if (*str == '=') |
524 | str++; | 524 | str++; |
525 | 525 | ||
526 | if (!strncmp(str, "cec_disable", 7)) | 526 | if (!strcmp(str, "cec_disable")) |
527 | ce_arr.disabled = 1; | 527 | ce_arr.disabled = 1; |
528 | else | 528 | else |
529 | return 0; | 529 | return 0; |
diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index f18b36dd57dd..376a99b7cf5d 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c | |||
@@ -590,7 +590,7 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id) | |||
590 | case AXP803_DCDC3: | 590 | case AXP803_DCDC3: |
591 | return !!(reg & BIT(6)); | 591 | return !!(reg & BIT(6)); |
592 | case AXP803_DCDC6: | 592 | case AXP803_DCDC6: |
593 | return !!(reg & BIT(7)); | 593 | return !!(reg & BIT(5)); |
594 | } | 594 | } |
595 | break; | 595 | break; |
596 | 596 | ||
diff --git a/drivers/regulator/rn5t618-regulator.c b/drivers/regulator/rn5t618-regulator.c index ef2be56460fe..790a4a73ea2c 100644 --- a/drivers/regulator/rn5t618-regulator.c +++ b/drivers/regulator/rn5t618-regulator.c | |||
@@ -29,7 +29,7 @@ static const struct regulator_ops rn5t618_reg_ops = { | |||
29 | }; | 29 | }; |
30 | 30 | ||
31 | #define REG(rid, ereg, emask, vreg, vmask, min, max, step) \ | 31 | #define REG(rid, ereg, emask, vreg, vmask, min, max, step) \ |
32 | [RN5T618_##rid] = { \ | 32 | { \ |
33 | .name = #rid, \ | 33 | .name = #rid, \ |
34 | .of_match = of_match_ptr(#rid), \ | 34 | .of_match = of_match_ptr(#rid), \ |
35 | .regulators_node = of_match_ptr("regulators"), \ | 35 | .regulators_node = of_match_ptr("regulators"), \ |
diff --git a/drivers/remoteproc/Kconfig b/drivers/remoteproc/Kconfig index df63e44526ac..bf04479456a0 100644 --- a/drivers/remoteproc/Kconfig +++ b/drivers/remoteproc/Kconfig | |||
@@ -109,6 +109,7 @@ config QCOM_Q6V5_PIL | |||
109 | depends on OF && ARCH_QCOM | 109 | depends on OF && ARCH_QCOM |
110 | depends on QCOM_SMEM | 110 | depends on QCOM_SMEM |
111 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) | 111 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) |
112 | depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n | ||
112 | select MFD_SYSCON | 113 | select MFD_SYSCON |
113 | select QCOM_RPROC_COMMON | 114 | select QCOM_RPROC_COMMON |
114 | select QCOM_SCM | 115 | select QCOM_SCM |
@@ -120,6 +121,7 @@ config QCOM_WCNSS_PIL | |||
120 | tristate "Qualcomm WCNSS Peripheral Image Loader" | 121 | tristate "Qualcomm WCNSS Peripheral Image Loader" |
121 | depends on OF && ARCH_QCOM | 122 | depends on OF && ARCH_QCOM |
122 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) | 123 | depends on RPMSG_QCOM_SMD || (COMPILE_TEST && RPMSG_QCOM_SMD=n) |
124 | depends on RPMSG_QCOM_GLINK_SMEM || RPMSG_QCOM_GLINK_SMEM=n | ||
123 | depends on QCOM_SMEM | 125 | depends on QCOM_SMEM |
124 | select QCOM_MDT_LOADER | 126 | select QCOM_MDT_LOADER |
125 | select QCOM_RPROC_COMMON | 127 | select QCOM_RPROC_COMMON |
diff --git a/drivers/remoteproc/imx_rproc.c b/drivers/remoteproc/imx_rproc.c index 612d91403341..633268e9d550 100644 --- a/drivers/remoteproc/imx_rproc.c +++ b/drivers/remoteproc/imx_rproc.c | |||
@@ -264,15 +264,14 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, | |||
264 | if (!(att->flags & ATT_OWN)) | 264 | if (!(att->flags & ATT_OWN)) |
265 | continue; | 265 | continue; |
266 | 266 | ||
267 | if (b > IMX7D_RPROC_MEM_MAX) | 267 | if (b >= IMX7D_RPROC_MEM_MAX) |
268 | break; | 268 | break; |
269 | 269 | ||
270 | priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, | 270 | priv->mem[b].cpu_addr = devm_ioremap(&pdev->dev, |
271 | att->sa, att->size); | 271 | att->sa, att->size); |
272 | if (IS_ERR(priv->mem[b].cpu_addr)) { | 272 | if (!priv->mem[b].cpu_addr) { |
273 | dev_err(dev, "devm_ioremap_resource failed\n"); | 273 | dev_err(dev, "devm_ioremap_resource failed\n"); |
274 | err = PTR_ERR(priv->mem[b].cpu_addr); | 274 | return -ENOMEM; |
275 | return err; | ||
276 | } | 275 | } |
277 | priv->mem[b].sys_addr = att->sa; | 276 | priv->mem[b].sys_addr = att->sa; |
278 | priv->mem[b].size = att->size; | 277 | priv->mem[b].size = att->size; |
@@ -296,7 +295,7 @@ static int imx_rproc_addr_init(struct imx_rproc *priv, | |||
296 | return err; | 295 | return err; |
297 | } | 296 | } |
298 | 297 | ||
299 | if (b > IMX7D_RPROC_MEM_MAX) | 298 | if (b >= IMX7D_RPROC_MEM_MAX) |
300 | break; | 299 | break; |
301 | 300 | ||
302 | priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); | 301 | priv->mem[b].cpu_addr = devm_ioremap_resource(&pdev->dev, &res); |
diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c index c60904ff40b8..3907bbc9c6cf 100644 --- a/drivers/reset/reset-socfpga.c +++ b/drivers/reset/reset-socfpga.c | |||
@@ -40,8 +40,9 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev, | |||
40 | struct socfpga_reset_data *data = container_of(rcdev, | 40 | struct socfpga_reset_data *data = container_of(rcdev, |
41 | struct socfpga_reset_data, | 41 | struct socfpga_reset_data, |
42 | rcdev); | 42 | rcdev); |
43 | int bank = id / BITS_PER_LONG; | 43 | int reg_width = sizeof(u32); |
44 | int offset = id % BITS_PER_LONG; | 44 | int bank = id / (reg_width * BITS_PER_BYTE); |
45 | int offset = id % (reg_width * BITS_PER_BYTE); | ||
45 | unsigned long flags; | 46 | unsigned long flags; |
46 | u32 reg; | 47 | u32 reg; |
47 | 48 | ||
@@ -61,8 +62,9 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev, | |||
61 | struct socfpga_reset_data, | 62 | struct socfpga_reset_data, |
62 | rcdev); | 63 | rcdev); |
63 | 64 | ||
64 | int bank = id / BITS_PER_LONG; | 65 | int reg_width = sizeof(u32); |
65 | int offset = id % BITS_PER_LONG; | 66 | int bank = id / (reg_width * BITS_PER_BYTE); |
67 | int offset = id % (reg_width * BITS_PER_BYTE); | ||
66 | unsigned long flags; | 68 | unsigned long flags; |
67 | u32 reg; | 69 | u32 reg; |
68 | 70 | ||
@@ -81,8 +83,9 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev, | |||
81 | { | 83 | { |
82 | struct socfpga_reset_data *data = container_of(rcdev, | 84 | struct socfpga_reset_data *data = container_of(rcdev, |
83 | struct socfpga_reset_data, rcdev); | 85 | struct socfpga_reset_data, rcdev); |
84 | int bank = id / BITS_PER_LONG; | 86 | int reg_width = sizeof(u32); |
85 | int offset = id % BITS_PER_LONG; | 87 | int bank = id / (reg_width * BITS_PER_BYTE); |
88 | int offset = id % (reg_width * BITS_PER_BYTE); | ||
86 | u32 reg; | 89 | u32 reg; |
87 | 90 | ||
88 | reg = readl(data->membase + (bank * BANK_INCREMENT)); | 91 | reg = readl(data->membase + (bank * BANK_INCREMENT)); |
@@ -132,7 +135,7 @@ static int socfpga_reset_probe(struct platform_device *pdev) | |||
132 | spin_lock_init(&data->lock); | 135 | spin_lock_init(&data->lock); |
133 | 136 | ||
134 | data->rcdev.owner = THIS_MODULE; | 137 | data->rcdev.owner = THIS_MODULE; |
135 | data->rcdev.nr_resets = NR_BANKS * BITS_PER_LONG; | 138 | data->rcdev.nr_resets = NR_BANKS * (sizeof(u32) * BITS_PER_BYTE); |
136 | data->rcdev.ops = &socfpga_reset_ops; | 139 | data->rcdev.ops = &socfpga_reset_ops; |
137 | data->rcdev.of_node = pdev->dev.of_node; | 140 | data->rcdev.of_node = pdev->dev.of_node; |
138 | 141 | ||
diff --git a/drivers/rpmsg/qcom_glink_native.c b/drivers/rpmsg/qcom_glink_native.c index 5a5e927ea50f..5dcc9bf1c5bc 100644 --- a/drivers/rpmsg/qcom_glink_native.c +++ b/drivers/rpmsg/qcom_glink_native.c | |||
@@ -635,19 +635,18 @@ qcom_glink_alloc_intent(struct qcom_glink *glink, | |||
635 | unsigned long flags; | 635 | unsigned long flags; |
636 | 636 | ||
637 | intent = kzalloc(sizeof(*intent), GFP_KERNEL); | 637 | intent = kzalloc(sizeof(*intent), GFP_KERNEL); |
638 | |||
639 | if (!intent) | 638 | if (!intent) |
640 | return NULL; | 639 | return NULL; |
641 | 640 | ||
642 | intent->data = kzalloc(size, GFP_KERNEL); | 641 | intent->data = kzalloc(size, GFP_KERNEL); |
643 | if (!intent->data) | 642 | if (!intent->data) |
644 | return NULL; | 643 | goto free_intent; |
645 | 644 | ||
646 | spin_lock_irqsave(&channel->intent_lock, flags); | 645 | spin_lock_irqsave(&channel->intent_lock, flags); |
647 | ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); | 646 | ret = idr_alloc_cyclic(&channel->liids, intent, 1, -1, GFP_ATOMIC); |
648 | if (ret < 0) { | 647 | if (ret < 0) { |
649 | spin_unlock_irqrestore(&channel->intent_lock, flags); | 648 | spin_unlock_irqrestore(&channel->intent_lock, flags); |
650 | return NULL; | 649 | goto free_data; |
651 | } | 650 | } |
652 | spin_unlock_irqrestore(&channel->intent_lock, flags); | 651 | spin_unlock_irqrestore(&channel->intent_lock, flags); |
653 | 652 | ||
@@ -656,6 +655,12 @@ qcom_glink_alloc_intent(struct qcom_glink *glink, | |||
656 | intent->reuse = reuseable; | 655 | intent->reuse = reuseable; |
657 | 656 | ||
658 | return intent; | 657 | return intent; |
658 | |||
659 | free_data: | ||
660 | kfree(intent->data); | ||
661 | free_intent: | ||
662 | kfree(intent); | ||
663 | return NULL; | ||
659 | } | 664 | } |
660 | 665 | ||
661 | static void qcom_glink_handle_rx_done(struct qcom_glink *glink, | 666 | static void qcom_glink_handle_rx_done(struct qcom_glink *glink, |
@@ -1197,7 +1202,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, | |||
1197 | 1202 | ||
1198 | ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); | 1203 | ret = qcom_glink_tx(glink, &cmd, sizeof(cmd), NULL, 0, true); |
1199 | if (ret) | 1204 | if (ret) |
1200 | return ret; | 1205 | goto unlock; |
1201 | 1206 | ||
1202 | ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); | 1207 | ret = wait_for_completion_timeout(&channel->intent_req_comp, 10 * HZ); |
1203 | if (!ret) { | 1208 | if (!ret) { |
@@ -1207,6 +1212,7 @@ static int qcom_glink_request_intent(struct qcom_glink *glink, | |||
1207 | ret = channel->intent_req_result ? 0 : -ECANCELED; | 1212 | ret = channel->intent_req_result ? 0 : -ECANCELED; |
1208 | } | 1213 | } |
1209 | 1214 | ||
1215 | unlock: | ||
1210 | mutex_unlock(&channel->intent_req_lock); | 1216 | mutex_unlock(&channel->intent_req_lock); |
1211 | return ret; | 1217 | return ret; |
1212 | } | 1218 | } |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 82ac331d9125..84752152d41f 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -357,6 +357,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
357 | 357 | ||
358 | adapter->next_port_scan = jiffies; | 358 | adapter->next_port_scan = jiffies; |
359 | 359 | ||
360 | adapter->erp_action.adapter = adapter; | ||
361 | |||
360 | if (zfcp_qdio_setup(adapter)) | 362 | if (zfcp_qdio_setup(adapter)) |
361 | goto failed; | 363 | goto failed; |
362 | 364 | ||
@@ -513,6 +515,9 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn, | |||
513 | port->dev.groups = zfcp_port_attr_groups; | 515 | port->dev.groups = zfcp_port_attr_groups; |
514 | port->dev.release = zfcp_port_release; | 516 | port->dev.release = zfcp_port_release; |
515 | 517 | ||
518 | port->erp_action.adapter = adapter; | ||
519 | port->erp_action.port = port; | ||
520 | |||
516 | if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { | 521 | if (dev_set_name(&port->dev, "0x%016llx", (unsigned long long)wwpn)) { |
517 | kfree(port); | 522 | kfree(port); |
518 | goto err_out; | 523 | goto err_out; |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 37408f5f81ce..ec2532ee1822 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -193,9 +193,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, | |||
193 | atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, | 193 | atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, |
194 | &zfcp_sdev->status); | 194 | &zfcp_sdev->status); |
195 | erp_action = &zfcp_sdev->erp_action; | 195 | erp_action = &zfcp_sdev->erp_action; |
196 | memset(erp_action, 0, sizeof(struct zfcp_erp_action)); | 196 | WARN_ON_ONCE(erp_action->port != port); |
197 | erp_action->port = port; | 197 | WARN_ON_ONCE(erp_action->sdev != sdev); |
198 | erp_action->sdev = sdev; | ||
199 | if (!(atomic_read(&zfcp_sdev->status) & | 198 | if (!(atomic_read(&zfcp_sdev->status) & |
200 | ZFCP_STATUS_COMMON_RUNNING)) | 199 | ZFCP_STATUS_COMMON_RUNNING)) |
201 | act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; | 200 | act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; |
@@ -208,8 +207,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, | |||
208 | zfcp_erp_action_dismiss_port(port); | 207 | zfcp_erp_action_dismiss_port(port); |
209 | atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); | 208 | atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status); |
210 | erp_action = &port->erp_action; | 209 | erp_action = &port->erp_action; |
211 | memset(erp_action, 0, sizeof(struct zfcp_erp_action)); | 210 | WARN_ON_ONCE(erp_action->port != port); |
212 | erp_action->port = port; | 211 | WARN_ON_ONCE(erp_action->sdev != NULL); |
213 | if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) | 212 | if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING)) |
214 | act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; | 213 | act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; |
215 | break; | 214 | break; |
@@ -219,7 +218,8 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, | |||
219 | zfcp_erp_action_dismiss_adapter(adapter); | 218 | zfcp_erp_action_dismiss_adapter(adapter); |
220 | atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); | 219 | atomic_or(ZFCP_STATUS_COMMON_ERP_INUSE, &adapter->status); |
221 | erp_action = &adapter->erp_action; | 220 | erp_action = &adapter->erp_action; |
222 | memset(erp_action, 0, sizeof(struct zfcp_erp_action)); | 221 | WARN_ON_ONCE(erp_action->port != NULL); |
222 | WARN_ON_ONCE(erp_action->sdev != NULL); | ||
223 | if (!(atomic_read(&adapter->status) & | 223 | if (!(atomic_read(&adapter->status) & |
224 | ZFCP_STATUS_COMMON_RUNNING)) | 224 | ZFCP_STATUS_COMMON_RUNNING)) |
225 | act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; | 225 | act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY; |
@@ -229,7 +229,11 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status, | |||
229 | return NULL; | 229 | return NULL; |
230 | } | 230 | } |
231 | 231 | ||
232 | erp_action->adapter = adapter; | 232 | WARN_ON_ONCE(erp_action->adapter != adapter); |
233 | memset(&erp_action->list, 0, sizeof(erp_action->list)); | ||
234 | memset(&erp_action->timer, 0, sizeof(erp_action->timer)); | ||
235 | erp_action->step = ZFCP_ERP_STEP_UNINITIALIZED; | ||
236 | erp_action->fsf_req_id = 0; | ||
233 | erp_action->action = need; | 237 | erp_action->action = need; |
234 | erp_action->status = act_status; | 238 | erp_action->status = act_status; |
235 | 239 | ||
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index ec3ddd1d31d5..6cf8732627e0 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -115,10 +115,15 @@ static int zfcp_scsi_slave_alloc(struct scsi_device *sdev) | |||
115 | struct zfcp_unit *unit; | 115 | struct zfcp_unit *unit; |
116 | int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE; | 116 | int npiv = adapter->connection_features & FSF_FEATURE_NPIV_MODE; |
117 | 117 | ||
118 | zfcp_sdev->erp_action.adapter = adapter; | ||
119 | zfcp_sdev->erp_action.sdev = sdev; | ||
120 | |||
118 | port = zfcp_get_port_by_wwpn(adapter, rport->port_name); | 121 | port = zfcp_get_port_by_wwpn(adapter, rport->port_name); |
119 | if (!port) | 122 | if (!port) |
120 | return -ENXIO; | 123 | return -ENXIO; |
121 | 124 | ||
125 | zfcp_sdev->erp_action.port = port; | ||
126 | |||
122 | unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); | 127 | unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev)); |
123 | if (unit) | 128 | if (unit) |
124 | put_device(&unit->dev); | 129 | put_device(&unit->dev); |
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index 97d269f16888..1bc623ad3faf 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c | |||
@@ -302,9 +302,11 @@ int aac_send_shutdown(struct aac_dev * dev) | |||
302 | return -ENOMEM; | 302 | return -ENOMEM; |
303 | aac_fib_init(fibctx); | 303 | aac_fib_init(fibctx); |
304 | 304 | ||
305 | mutex_lock(&dev->ioctl_mutex); | 305 | if (!dev->adapter_shutdown) { |
306 | dev->adapter_shutdown = 1; | 306 | mutex_lock(&dev->ioctl_mutex); |
307 | mutex_unlock(&dev->ioctl_mutex); | 307 | dev->adapter_shutdown = 1; |
308 | mutex_unlock(&dev->ioctl_mutex); | ||
309 | } | ||
308 | 310 | ||
309 | cmd = (struct aac_close *) fib_data(fibctx); | 311 | cmd = (struct aac_close *) fib_data(fibctx); |
310 | cmd->command = cpu_to_le32(VM_CloseAll); | 312 | cmd->command = cpu_to_le32(VM_CloseAll); |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 62beb2596466..c9252b138c1f 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -1551,8 +1551,9 @@ static void __aac_shutdown(struct aac_dev * aac) | |||
1551 | { | 1551 | { |
1552 | int i; | 1552 | int i; |
1553 | 1553 | ||
1554 | mutex_lock(&aac->ioctl_mutex); | ||
1554 | aac->adapter_shutdown = 1; | 1555 | aac->adapter_shutdown = 1; |
1555 | aac_send_shutdown(aac); | 1556 | mutex_unlock(&aac->ioctl_mutex); |
1556 | 1557 | ||
1557 | if (aac->aif_thread) { | 1558 | if (aac->aif_thread) { |
1558 | int i; | 1559 | int i; |
@@ -1565,7 +1566,11 @@ static void __aac_shutdown(struct aac_dev * aac) | |||
1565 | } | 1566 | } |
1566 | kthread_stop(aac->thread); | 1567 | kthread_stop(aac->thread); |
1567 | } | 1568 | } |
1569 | |||
1570 | aac_send_shutdown(aac); | ||
1571 | |||
1568 | aac_adapter_disable_int(aac); | 1572 | aac_adapter_disable_int(aac); |
1573 | |||
1569 | if (aac_is_src(aac)) { | 1574 | if (aac_is_src(aac)) { |
1570 | if (aac->max_msix > 1) { | 1575 | if (aac->max_msix > 1) { |
1571 | for (i = 0; i < aac->max_msix; i++) { | 1576 | for (i = 0; i < aac->max_msix; i++) { |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 9abe81021484..4ed3d26ffdde 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -4091,7 +4091,7 @@ static int hpsa_set_local_logical_count(struct ctlr_info *h, | |||
4091 | memset(id_ctlr, 0, sizeof(*id_ctlr)); | 4091 | memset(id_ctlr, 0, sizeof(*id_ctlr)); |
4092 | rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); | 4092 | rc = hpsa_bmic_id_controller(h, id_ctlr, sizeof(*id_ctlr)); |
4093 | if (!rc) | 4093 | if (!rc) |
4094 | if (id_ctlr->configured_logical_drive_count < 256) | 4094 | if (id_ctlr->configured_logical_drive_count < 255) |
4095 | *nlocals = id_ctlr->configured_logical_drive_count; | 4095 | *nlocals = id_ctlr->configured_logical_drive_count; |
4096 | else | 4096 | else |
4097 | *nlocals = le16_to_cpu( | 4097 | *nlocals = le16_to_cpu( |
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c index 520325867e2b..31d31aad3de1 100644 --- a/drivers/scsi/libfc/fc_rport.c +++ b/drivers/scsi/libfc/fc_rport.c | |||
@@ -383,11 +383,11 @@ static void fc_rport_work(struct work_struct *work) | |||
383 | fc_rport_enter_flogi(rdata); | 383 | fc_rport_enter_flogi(rdata); |
384 | mutex_unlock(&rdata->rp_mutex); | 384 | mutex_unlock(&rdata->rp_mutex); |
385 | } else { | 385 | } else { |
386 | mutex_unlock(&rdata->rp_mutex); | ||
386 | FC_RPORT_DBG(rdata, "work delete\n"); | 387 | FC_RPORT_DBG(rdata, "work delete\n"); |
387 | mutex_lock(&lport->disc.disc_mutex); | 388 | mutex_lock(&lport->disc.disc_mutex); |
388 | list_del_rcu(&rdata->peers); | 389 | list_del_rcu(&rdata->peers); |
389 | mutex_unlock(&lport->disc.disc_mutex); | 390 | mutex_unlock(&lport->disc.disc_mutex); |
390 | mutex_unlock(&rdata->rp_mutex); | ||
391 | kref_put(&rdata->kref, fc_rport_destroy); | 391 | kref_put(&rdata->kref, fc_rport_destroy); |
392 | } | 392 | } |
393 | } else { | 393 | } else { |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index c62e8d111fd9..f8dc1601efd5 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
@@ -1728,7 +1728,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc) | |||
1728 | 1728 | ||
1729 | if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { | 1729 | if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) { |
1730 | reason = FAILURE_SESSION_IN_RECOVERY; | 1730 | reason = FAILURE_SESSION_IN_RECOVERY; |
1731 | sc->result = DID_REQUEUE; | 1731 | sc->result = DID_REQUEUE << 16; |
1732 | goto fault; | 1732 | goto fault; |
1733 | } | 1733 | } |
1734 | 1734 | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 5b2437a5ea44..3bd956d3bc5d 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -3061,6 +3061,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3061 | host->max_cmd_len, host->max_channel, host->max_lun, | 3061 | host->max_cmd_len, host->max_channel, host->max_lun, |
3062 | host->transportt, sht->vendor_id); | 3062 | host->transportt, sht->vendor_id); |
3063 | 3063 | ||
3064 | INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); | ||
3065 | |||
3064 | /* Set up the irqs */ | 3066 | /* Set up the irqs */ |
3065 | ret = qla2x00_request_irqs(ha, rsp); | 3067 | ret = qla2x00_request_irqs(ha, rsp); |
3066 | if (ret) | 3068 | if (ret) |
@@ -3223,7 +3225,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) | |||
3223 | */ | 3225 | */ |
3224 | qla2xxx_wake_dpc(base_vha); | 3226 | qla2xxx_wake_dpc(base_vha); |
3225 | 3227 | ||
3226 | INIT_WORK(&base_vha->iocb_work, qla2x00_iocb_work_fn); | ||
3227 | INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); | 3228 | INIT_WORK(&ha->board_disable, qla2x00_disable_board_on_pci_error); |
3228 | 3229 | ||
3229 | if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { | 3230 | if (IS_QLA8031(ha) || IS_MCTP_CAPABLE(ha)) { |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9cf6a80fe297..ad3ea24f0885 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1379,8 +1379,6 @@ static int scsi_prep_fn(struct request_queue *q, struct request *req) | |||
1379 | 1379 | ||
1380 | ret = scsi_setup_cmnd(sdev, req); | 1380 | ret = scsi_setup_cmnd(sdev, req); |
1381 | out: | 1381 | out: |
1382 | if (ret != BLKPREP_OK) | ||
1383 | cmd->flags &= ~SCMD_INITIALIZED; | ||
1384 | return scsi_prep_return(q, req, ret); | 1382 | return scsi_prep_return(q, req, ret); |
1385 | } | 1383 | } |
1386 | 1384 | ||
@@ -1900,7 +1898,6 @@ static int scsi_mq_prep_fn(struct request *req) | |||
1900 | struct scsi_device *sdev = req->q->queuedata; | 1898 | struct scsi_device *sdev = req->q->queuedata; |
1901 | struct Scsi_Host *shost = sdev->host; | 1899 | struct Scsi_Host *shost = sdev->host; |
1902 | struct scatterlist *sg; | 1900 | struct scatterlist *sg; |
1903 | int ret; | ||
1904 | 1901 | ||
1905 | scsi_init_command(sdev, cmd); | 1902 | scsi_init_command(sdev, cmd); |
1906 | 1903 | ||
@@ -1934,10 +1931,7 @@ static int scsi_mq_prep_fn(struct request *req) | |||
1934 | 1931 | ||
1935 | blk_mq_start_request(req); | 1932 | blk_mq_start_request(req); |
1936 | 1933 | ||
1937 | ret = scsi_setup_cmnd(sdev, req); | 1934 | return scsi_setup_cmnd(sdev, req); |
1938 | if (ret != BLK_STS_OK) | ||
1939 | cmd->flags &= ~SCMD_INITIALIZED; | ||
1940 | return ret; | ||
1941 | } | 1935 | } |
1942 | 1936 | ||
1943 | static void scsi_mq_done(struct scsi_cmnd *cmd) | 1937 | static void scsi_mq_done(struct scsi_cmnd *cmd) |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index bf53356f41f0..f796bd61f3f0 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -1376,13 +1376,19 @@ static void __scsi_remove_target(struct scsi_target *starget) | |||
1376 | spin_lock_irqsave(shost->host_lock, flags); | 1376 | spin_lock_irqsave(shost->host_lock, flags); |
1377 | restart: | 1377 | restart: |
1378 | list_for_each_entry(sdev, &shost->__devices, siblings) { | 1378 | list_for_each_entry(sdev, &shost->__devices, siblings) { |
1379 | /* | ||
1380 | * We cannot call scsi_device_get() here, as | ||
1381 | * we might've been called from rmmod() causing | ||
1382 | * scsi_device_get() to fail the module_is_live() | ||
1383 | * check. | ||
1384 | */ | ||
1379 | if (sdev->channel != starget->channel || | 1385 | if (sdev->channel != starget->channel || |
1380 | sdev->id != starget->id || | 1386 | sdev->id != starget->id || |
1381 | scsi_device_get(sdev)) | 1387 | !get_device(&sdev->sdev_gendev)) |
1382 | continue; | 1388 | continue; |
1383 | spin_unlock_irqrestore(shost->host_lock, flags); | 1389 | spin_unlock_irqrestore(shost->host_lock, flags); |
1384 | scsi_remove_device(sdev); | 1390 | scsi_remove_device(sdev); |
1385 | scsi_device_put(sdev); | 1391 | put_device(&sdev->sdev_gendev); |
1386 | spin_lock_irqsave(shost->host_lock, flags); | 1392 | spin_lock_irqsave(shost->host_lock, flags); |
1387 | goto restart; | 1393 | goto restart; |
1388 | } | 1394 | } |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index cbd4495d0ff9..8c46a6d536af 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -3320,6 +3320,9 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd) | |||
3320 | { | 3320 | { |
3321 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); | 3321 | struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); |
3322 | 3322 | ||
3323 | if (WARN_ON_ONCE(!rport)) | ||
3324 | return FAST_IO_FAIL; | ||
3325 | |||
3323 | return fc_block_rport(rport); | 3326 | return fc_block_rport(rport); |
3324 | } | 3327 | } |
3325 | EXPORT_SYMBOL(fc_block_scsi_eh); | 3328 | EXPORT_SYMBOL(fc_block_scsi_eh); |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 0419c2298eab..aa28874e8fb9 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -837,7 +837,7 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) | |||
837 | 837 | ||
838 | val = 0; | 838 | val = 0; |
839 | list_for_each_entry(srp, &sfp->rq_list, entry) { | 839 | list_for_each_entry(srp, &sfp->rq_list, entry) { |
840 | if (val > SG_MAX_QUEUE) | 840 | if (val >= SG_MAX_QUEUE) |
841 | break; | 841 | break; |
842 | rinfo[val].req_state = srp->done + 1; | 842 | rinfo[val].req_state = srp->done + 1; |
843 | rinfo[val].problem = | 843 | rinfo[val].problem = |
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index 6c7d7a460689..568e1c65aa82 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c | |||
@@ -99,11 +99,6 @@ | |||
99 | /* A3700_SPI_IF_TIME_REG */ | 99 | /* A3700_SPI_IF_TIME_REG */ |
100 | #define A3700_SPI_CLK_CAPT_EDGE BIT(7) | 100 | #define A3700_SPI_CLK_CAPT_EDGE BIT(7) |
101 | 101 | ||
102 | /* Flags and macros for struct a3700_spi */ | ||
103 | #define A3700_INSTR_CNT 1 | ||
104 | #define A3700_ADDR_CNT 3 | ||
105 | #define A3700_DUMMY_CNT 1 | ||
106 | |||
107 | struct a3700_spi { | 102 | struct a3700_spi { |
108 | struct spi_master *master; | 103 | struct spi_master *master; |
109 | void __iomem *base; | 104 | void __iomem *base; |
@@ -117,9 +112,6 @@ struct a3700_spi { | |||
117 | u8 byte_len; | 112 | u8 byte_len; |
118 | u32 wait_mask; | 113 | u32 wait_mask; |
119 | struct completion done; | 114 | struct completion done; |
120 | u32 addr_cnt; | ||
121 | u32 instr_cnt; | ||
122 | size_t hdr_cnt; | ||
123 | }; | 115 | }; |
124 | 116 | ||
125 | static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset) | 117 | static u32 spireg_read(struct a3700_spi *a3700_spi, u32 offset) |
@@ -161,7 +153,7 @@ static void a3700_spi_deactivate_cs(struct a3700_spi *a3700_spi, | |||
161 | } | 153 | } |
162 | 154 | ||
163 | static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi, | 155 | static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi, |
164 | unsigned int pin_mode) | 156 | unsigned int pin_mode, bool receiving) |
165 | { | 157 | { |
166 | u32 val; | 158 | u32 val; |
167 | 159 | ||
@@ -177,6 +169,9 @@ static int a3700_spi_pin_mode_set(struct a3700_spi *a3700_spi, | |||
177 | break; | 169 | break; |
178 | case SPI_NBITS_QUAD: | 170 | case SPI_NBITS_QUAD: |
179 | val |= A3700_SPI_DATA_PIN1; | 171 | val |= A3700_SPI_DATA_PIN1; |
172 | /* RX during address reception uses 4-pin */ | ||
173 | if (receiving) | ||
174 | val |= A3700_SPI_ADDR_PIN; | ||
180 | break; | 175 | break; |
181 | default: | 176 | default: |
182 | dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode); | 177 | dev_err(&a3700_spi->master->dev, "wrong pin mode %u", pin_mode); |
@@ -392,7 +387,8 @@ static bool a3700_spi_wait_completion(struct spi_device *spi) | |||
392 | 387 | ||
393 | spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0); | 388 | spireg_write(a3700_spi, A3700_SPI_INT_MASK_REG, 0); |
394 | 389 | ||
395 | return true; | 390 | /* Timeout was reached */ |
391 | return false; | ||
396 | } | 392 | } |
397 | 393 | ||
398 | static bool a3700_spi_transfer_wait(struct spi_device *spi, | 394 | static bool a3700_spi_transfer_wait(struct spi_device *spi, |
@@ -446,59 +442,43 @@ static void a3700_spi_set_cs(struct spi_device *spi, bool enable) | |||
446 | 442 | ||
447 | static void a3700_spi_header_set(struct a3700_spi *a3700_spi) | 443 | static void a3700_spi_header_set(struct a3700_spi *a3700_spi) |
448 | { | 444 | { |
449 | u32 instr_cnt = 0, addr_cnt = 0, dummy_cnt = 0; | 445 | unsigned int addr_cnt; |
450 | u32 val = 0; | 446 | u32 val = 0; |
451 | 447 | ||
452 | /* Clear the header registers */ | 448 | /* Clear the header registers */ |
453 | spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0); | 449 | spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, 0); |
454 | spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0); | 450 | spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, 0); |
455 | spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0); | 451 | spireg_write(a3700_spi, A3700_SPI_IF_RMODE_REG, 0); |
452 | spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, 0); | ||
456 | 453 | ||
457 | /* Set header counters */ | 454 | /* Set header counters */ |
458 | if (a3700_spi->tx_buf) { | 455 | if (a3700_spi->tx_buf) { |
459 | if (a3700_spi->buf_len <= a3700_spi->instr_cnt) { | 456 | /* |
460 | instr_cnt = a3700_spi->buf_len; | 457 | * when tx data is not 4 bytes aligned, there will be unexpected |
461 | } else if (a3700_spi->buf_len <= (a3700_spi->instr_cnt + | 458 | * bytes out of SPI output register, since it always shifts out |
462 | a3700_spi->addr_cnt)) { | 459 | * as whole 4 bytes. This might cause incorrect transaction with |
463 | instr_cnt = a3700_spi->instr_cnt; | 460 | * some devices. To avoid that, use SPI header count feature to |
464 | addr_cnt = a3700_spi->buf_len - instr_cnt; | 461 | * transfer up to 3 bytes of data first, and then make the rest |
465 | } else if (a3700_spi->buf_len <= a3700_spi->hdr_cnt) { | 462 | * of data 4-byte aligned. |
466 | instr_cnt = a3700_spi->instr_cnt; | 463 | */ |
467 | addr_cnt = a3700_spi->addr_cnt; | 464 | addr_cnt = a3700_spi->buf_len % 4; |
468 | /* Need to handle the normal write case with 1 byte | 465 | if (addr_cnt) { |
469 | * data | 466 | val = (addr_cnt & A3700_SPI_ADDR_CNT_MASK) |
470 | */ | 467 | << A3700_SPI_ADDR_CNT_BIT; |
471 | if (!a3700_spi->tx_buf[instr_cnt + addr_cnt]) | 468 | spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val); |
472 | dummy_cnt = a3700_spi->buf_len - instr_cnt - | 469 | |
473 | addr_cnt; | 470 | /* Update the buffer length to be transferred */ |
471 | a3700_spi->buf_len -= addr_cnt; | ||
472 | |||
473 | /* transfer 1~3 bytes through address count */ | ||
474 | val = 0; | ||
475 | while (addr_cnt--) { | ||
476 | val = (val << 8) | a3700_spi->tx_buf[0]; | ||
477 | a3700_spi->tx_buf++; | ||
478 | } | ||
479 | spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val); | ||
474 | } | 480 | } |
475 | val |= ((instr_cnt & A3700_SPI_INSTR_CNT_MASK) | ||
476 | << A3700_SPI_INSTR_CNT_BIT); | ||
477 | val |= ((addr_cnt & A3700_SPI_ADDR_CNT_MASK) | ||
478 | << A3700_SPI_ADDR_CNT_BIT); | ||
479 | val |= ((dummy_cnt & A3700_SPI_DUMMY_CNT_MASK) | ||
480 | << A3700_SPI_DUMMY_CNT_BIT); | ||
481 | } | 481 | } |
482 | spireg_write(a3700_spi, A3700_SPI_IF_HDR_CNT_REG, val); | ||
483 | |||
484 | /* Update the buffer length to be transferred */ | ||
485 | a3700_spi->buf_len -= (instr_cnt + addr_cnt + dummy_cnt); | ||
486 | |||
487 | /* Set Instruction */ | ||
488 | val = 0; | ||
489 | while (instr_cnt--) { | ||
490 | val = (val << 8) | a3700_spi->tx_buf[0]; | ||
491 | a3700_spi->tx_buf++; | ||
492 | } | ||
493 | spireg_write(a3700_spi, A3700_SPI_IF_INST_REG, val); | ||
494 | |||
495 | /* Set Address */ | ||
496 | val = 0; | ||
497 | while (addr_cnt--) { | ||
498 | val = (val << 8) | a3700_spi->tx_buf[0]; | ||
499 | a3700_spi->tx_buf++; | ||
500 | } | ||
501 | spireg_write(a3700_spi, A3700_SPI_IF_ADDR_REG, val); | ||
502 | } | 482 | } |
503 | 483 | ||
504 | static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi) | 484 | static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi) |
@@ -512,35 +492,12 @@ static int a3700_is_wfifo_full(struct a3700_spi *a3700_spi) | |||
512 | static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi) | 492 | static int a3700_spi_fifo_write(struct a3700_spi *a3700_spi) |
513 | { | 493 | { |
514 | u32 val; | 494 | u32 val; |
515 | int i = 0; | ||
516 | 495 | ||
517 | while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) { | 496 | while (!a3700_is_wfifo_full(a3700_spi) && a3700_spi->buf_len) { |
518 | val = 0; | 497 | val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf); |
519 | if (a3700_spi->buf_len >= 4) { | 498 | spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val); |
520 | val = cpu_to_le32(*(u32 *)a3700_spi->tx_buf); | 499 | a3700_spi->buf_len -= 4; |
521 | spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, val); | 500 | a3700_spi->tx_buf += 4; |
522 | |||
523 | a3700_spi->buf_len -= 4; | ||
524 | a3700_spi->tx_buf += 4; | ||
525 | } else { | ||
526 | /* | ||
527 | * If the remained buffer length is less than 4-bytes, | ||
528 | * we should pad the write buffer with all ones. So that | ||
529 | * it avoids overwrite the unexpected bytes following | ||
530 | * the last one. | ||
531 | */ | ||
532 | val = GENMASK(31, 0); | ||
533 | while (a3700_spi->buf_len) { | ||
534 | val &= ~(0xff << (8 * i)); | ||
535 | val |= *a3700_spi->tx_buf++ << (8 * i); | ||
536 | i++; | ||
537 | a3700_spi->buf_len--; | ||
538 | |||
539 | spireg_write(a3700_spi, A3700_SPI_DATA_OUT_REG, | ||
540 | val); | ||
541 | } | ||
542 | break; | ||
543 | } | ||
544 | } | 501 | } |
545 | 502 | ||
546 | return 0; | 503 | return 0; |
@@ -645,15 +602,18 @@ static int a3700_spi_transfer_one(struct spi_master *master, | |||
645 | a3700_spi->rx_buf = xfer->rx_buf; | 602 | a3700_spi->rx_buf = xfer->rx_buf; |
646 | a3700_spi->buf_len = xfer->len; | 603 | a3700_spi->buf_len = xfer->len; |
647 | 604 | ||
648 | /* SPI transfer headers */ | ||
649 | a3700_spi_header_set(a3700_spi); | ||
650 | |||
651 | if (xfer->tx_buf) | 605 | if (xfer->tx_buf) |
652 | nbits = xfer->tx_nbits; | 606 | nbits = xfer->tx_nbits; |
653 | else if (xfer->rx_buf) | 607 | else if (xfer->rx_buf) |
654 | nbits = xfer->rx_nbits; | 608 | nbits = xfer->rx_nbits; |
655 | 609 | ||
656 | a3700_spi_pin_mode_set(a3700_spi, nbits); | 610 | a3700_spi_pin_mode_set(a3700_spi, nbits, xfer->rx_buf ? true : false); |
611 | |||
612 | /* Flush the FIFOs */ | ||
613 | a3700_spi_fifo_flush(a3700_spi); | ||
614 | |||
615 | /* Transfer first bytes of data when buffer is not 4-byte aligned */ | ||
616 | a3700_spi_header_set(a3700_spi); | ||
657 | 617 | ||
658 | if (xfer->rx_buf) { | 618 | if (xfer->rx_buf) { |
659 | /* Set read data length */ | 619 | /* Set read data length */ |
@@ -733,16 +693,11 @@ static int a3700_spi_transfer_one(struct spi_master *master, | |||
733 | dev_err(&spi->dev, "wait wfifo empty timed out\n"); | 693 | dev_err(&spi->dev, "wait wfifo empty timed out\n"); |
734 | return -ETIMEDOUT; | 694 | return -ETIMEDOUT; |
735 | } | 695 | } |
736 | } else { | 696 | } |
737 | /* | 697 | |
738 | * If the instruction in SPI_INSTR does not require data | 698 | if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) { |
739 | * to be written to the SPI device, wait until SPI_RDY | 699 | dev_err(&spi->dev, "wait xfer ready timed out\n"); |
740 | * is 1 for the SPI interface to be in idle. | 700 | return -ETIMEDOUT; |
741 | */ | ||
742 | if (!a3700_spi_transfer_wait(spi, A3700_SPI_XFER_RDY)) { | ||
743 | dev_err(&spi->dev, "wait xfer ready timed out\n"); | ||
744 | return -ETIMEDOUT; | ||
745 | } | ||
746 | } | 701 | } |
747 | 702 | ||
748 | val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); | 703 | val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); |
@@ -834,10 +789,6 @@ static int a3700_spi_probe(struct platform_device *pdev) | |||
834 | memset(spi, 0, sizeof(struct a3700_spi)); | 789 | memset(spi, 0, sizeof(struct a3700_spi)); |
835 | 790 | ||
836 | spi->master = master; | 791 | spi->master = master; |
837 | spi->instr_cnt = A3700_INSTR_CNT; | ||
838 | spi->addr_cnt = A3700_ADDR_CNT; | ||
839 | spi->hdr_cnt = A3700_INSTR_CNT + A3700_ADDR_CNT + | ||
840 | A3700_DUMMY_CNT; | ||
841 | 792 | ||
842 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 793 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
843 | spi->base = devm_ioremap_resource(dev, res); | 794 | spi->base = devm_ioremap_resource(dev, res); |
diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 6ef6c44f39f5..a172ab299e80 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c | |||
@@ -1250,7 +1250,7 @@ int bcm_qspi_probe(struct platform_device *pdev, | |||
1250 | goto qspi_probe_err; | 1250 | goto qspi_probe_err; |
1251 | } | 1251 | } |
1252 | } else { | 1252 | } else { |
1253 | goto qspi_probe_err; | 1253 | goto qspi_resource_err; |
1254 | } | 1254 | } |
1255 | 1255 | ||
1256 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi"); | 1256 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "bspi"); |
@@ -1272,7 +1272,7 @@ int bcm_qspi_probe(struct platform_device *pdev, | |||
1272 | qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res); | 1272 | qspi->base[CHIP_SELECT] = devm_ioremap_resource(dev, res); |
1273 | if (IS_ERR(qspi->base[CHIP_SELECT])) { | 1273 | if (IS_ERR(qspi->base[CHIP_SELECT])) { |
1274 | ret = PTR_ERR(qspi->base[CHIP_SELECT]); | 1274 | ret = PTR_ERR(qspi->base[CHIP_SELECT]); |
1275 | goto qspi_probe_err; | 1275 | goto qspi_resource_err; |
1276 | } | 1276 | } |
1277 | } | 1277 | } |
1278 | 1278 | ||
@@ -1280,7 +1280,7 @@ int bcm_qspi_probe(struct platform_device *pdev, | |||
1280 | GFP_KERNEL); | 1280 | GFP_KERNEL); |
1281 | if (!qspi->dev_ids) { | 1281 | if (!qspi->dev_ids) { |
1282 | ret = -ENOMEM; | 1282 | ret = -ENOMEM; |
1283 | goto qspi_probe_err; | 1283 | goto qspi_resource_err; |
1284 | } | 1284 | } |
1285 | 1285 | ||
1286 | for (val = 0; val < num_irqs; val++) { | 1286 | for (val = 0; val < num_irqs; val++) { |
@@ -1369,8 +1369,9 @@ qspi_reg_err: | |||
1369 | bcm_qspi_hw_uninit(qspi); | 1369 | bcm_qspi_hw_uninit(qspi); |
1370 | clk_disable_unprepare(qspi->clk); | 1370 | clk_disable_unprepare(qspi->clk); |
1371 | qspi_probe_err: | 1371 | qspi_probe_err: |
1372 | spi_master_put(master); | ||
1373 | kfree(qspi->dev_ids); | 1372 | kfree(qspi->dev_ids); |
1373 | qspi_resource_err: | ||
1374 | spi_master_put(master); | ||
1374 | return ret; | 1375 | return ret; |
1375 | } | 1376 | } |
1376 | /* probe function to be called by SoC specific platform driver probe */ | 1377 | /* probe function to be called by SoC specific platform driver probe */ |
diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 680cdf549506..ba9743fa2326 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c | |||
@@ -263,8 +263,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz) | |||
263 | * no need to check it there. | 263 | * no need to check it there. |
264 | * However, we need to ensure the following calculations. | 264 | * However, we need to ensure the following calculations. |
265 | */ | 265 | */ |
266 | if ((div < SPI_MBR_DIV_MIN) && | 266 | if (div < SPI_MBR_DIV_MIN || |
267 | (div > SPI_MBR_DIV_MAX)) | 267 | div > SPI_MBR_DIV_MAX) |
268 | return -EINVAL; | 268 | return -EINVAL; |
269 | 269 | ||
270 | /* Determine the first power of 2 greater than or equal to div */ | 270 | /* Determine the first power of 2 greater than or equal to div */ |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 6e65524cbfd9..e8b5a5e21b2e 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -45,7 +45,6 @@ | |||
45 | 45 | ||
46 | #define CREATE_TRACE_POINTS | 46 | #define CREATE_TRACE_POINTS |
47 | #include <trace/events/spi.h> | 47 | #include <trace/events/spi.h> |
48 | #define SPI_DYN_FIRST_BUS_NUM 0 | ||
49 | 48 | ||
50 | static DEFINE_IDR(spi_master_idr); | 49 | static DEFINE_IDR(spi_master_idr); |
51 | 50 | ||
@@ -2086,7 +2085,7 @@ int spi_register_controller(struct spi_controller *ctlr) | |||
2086 | struct device *dev = ctlr->dev.parent; | 2085 | struct device *dev = ctlr->dev.parent; |
2087 | struct boardinfo *bi; | 2086 | struct boardinfo *bi; |
2088 | int status = -ENODEV; | 2087 | int status = -ENODEV; |
2089 | int id; | 2088 | int id, first_dynamic; |
2090 | 2089 | ||
2091 | if (!dev) | 2090 | if (!dev) |
2092 | return -ENODEV; | 2091 | return -ENODEV; |
@@ -2116,9 +2115,15 @@ int spi_register_controller(struct spi_controller *ctlr) | |||
2116 | } | 2115 | } |
2117 | } | 2116 | } |
2118 | if (ctlr->bus_num < 0) { | 2117 | if (ctlr->bus_num < 0) { |
2118 | first_dynamic = of_alias_get_highest_id("spi"); | ||
2119 | if (first_dynamic < 0) | ||
2120 | first_dynamic = 0; | ||
2121 | else | ||
2122 | first_dynamic++; | ||
2123 | |||
2119 | mutex_lock(&board_lock); | 2124 | mutex_lock(&board_lock); |
2120 | id = idr_alloc(&spi_master_idr, ctlr, SPI_DYN_FIRST_BUS_NUM, 0, | 2125 | id = idr_alloc(&spi_master_idr, ctlr, first_dynamic, |
2121 | GFP_KERNEL); | 2126 | 0, GFP_KERNEL); |
2122 | mutex_unlock(&board_lock); | 2127 | mutex_unlock(&board_lock); |
2123 | if (WARN(id < 0, "couldn't get idr")) | 2128 | if (WARN(id < 0, "couldn't get idr")) |
2124 | return id; | 2129 | return id; |
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c index 1691760339da..02573c517d9d 100644 --- a/drivers/staging/iio/meter/ade7759.c +++ b/drivers/staging/iio/meter/ade7759.c | |||
@@ -172,7 +172,7 @@ static int ade7759_spi_read_reg_40(struct device *dev, | |||
172 | reg_address); | 172 | reg_address); |
173 | goto error_ret; | 173 | goto error_ret; |
174 | } | 174 | } |
175 | *val = ((u64)st->rx[1] << 32) | (st->rx[2] << 24) | | 175 | *val = ((u64)st->rx[1] << 32) | ((u64)st->rx[2] << 24) | |
176 | (st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5]; | 176 | (st->rx[3] << 16) | (st->rx[4] << 8) | st->rx[5]; |
177 | 177 | ||
178 | error_ret: | 178 | error_ret: |
diff --git a/drivers/staging/media/imx/imx-media-dev.c b/drivers/staging/media/imx/imx-media-dev.c index d96f4512224f..b55e5ebba8b4 100644 --- a/drivers/staging/media/imx/imx-media-dev.c +++ b/drivers/staging/media/imx/imx-media-dev.c | |||
@@ -400,10 +400,10 @@ static int imx_media_create_pad_vdev_lists(struct imx_media_dev *imxmd) | |||
400 | struct media_link, list); | 400 | struct media_link, list); |
401 | ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source); | 401 | ret = imx_media_add_vdev_to_pad(imxmd, vdev, link->source); |
402 | if (ret) | 402 | if (ret) |
403 | break; | 403 | return ret; |
404 | } | 404 | } |
405 | 405 | ||
406 | return ret; | 406 | return 0; |
407 | } | 407 | } |
408 | 408 | ||
409 | /* async subdev complete notifier */ | 409 | /* async subdev complete notifier */ |
diff --git a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c index 5f3d8f2339e3..4be864dbd41c 100644 --- a/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c +++ b/drivers/staging/vc04_services/bcm2835-audio/bcm2835-vchiq.c | |||
@@ -390,8 +390,7 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream | |||
390 | __func__, instance); | 390 | __func__, instance); |
391 | instance->alsa_stream = alsa_stream; | 391 | instance->alsa_stream = alsa_stream; |
392 | alsa_stream->instance = instance; | 392 | alsa_stream->instance = instance; |
393 | ret = 0; // xxx todo -1; | 393 | return 0; |
394 | goto err_free_mem; | ||
395 | } | 394 | } |
396 | 395 | ||
397 | /* Initialize and create a VCHI connection */ | 396 | /* Initialize and create a VCHI connection */ |
@@ -401,16 +400,15 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream | |||
401 | LOG_ERR("%s: failed to initialise VCHI instance (ret=%d)\n", | 400 | LOG_ERR("%s: failed to initialise VCHI instance (ret=%d)\n", |
402 | __func__, ret); | 401 | __func__, ret); |
403 | 402 | ||
404 | ret = -EIO; | 403 | return -EIO; |
405 | goto err_free_mem; | ||
406 | } | 404 | } |
407 | ret = vchi_connect(NULL, 0, vchi_instance); | 405 | ret = vchi_connect(NULL, 0, vchi_instance); |
408 | if (ret) { | 406 | if (ret) { |
409 | LOG_ERR("%s: failed to connect VCHI instance (ret=%d)\n", | 407 | LOG_ERR("%s: failed to connect VCHI instance (ret=%d)\n", |
410 | __func__, ret); | 408 | __func__, ret); |
411 | 409 | ||
412 | ret = -EIO; | 410 | kfree(vchi_instance); |
413 | goto err_free_mem; | 411 | return -EIO; |
414 | } | 412 | } |
415 | initted = 1; | 413 | initted = 1; |
416 | } | 414 | } |
@@ -421,19 +419,16 @@ static int bcm2835_audio_open_connection(struct bcm2835_alsa_stream *alsa_stream | |||
421 | if (IS_ERR(instance)) { | 419 | if (IS_ERR(instance)) { |
422 | LOG_ERR("%s: failed to initialize audio service\n", __func__); | 420 | LOG_ERR("%s: failed to initialize audio service\n", __func__); |
423 | 421 | ||
424 | ret = PTR_ERR(instance); | 422 | /* vchi_instance is retained for use the next time. */ |
425 | goto err_free_mem; | 423 | return PTR_ERR(instance); |
426 | } | 424 | } |
427 | 425 | ||
428 | instance->alsa_stream = alsa_stream; | 426 | instance->alsa_stream = alsa_stream; |
429 | alsa_stream->instance = instance; | 427 | alsa_stream->instance = instance; |
430 | 428 | ||
431 | LOG_DBG(" success !\n"); | 429 | LOG_DBG(" success !\n"); |
432 | ret = 0; | ||
433 | err_free_mem: | ||
434 | kfree(vchi_instance); | ||
435 | 430 | ||
436 | return ret; | 431 | return 0; |
437 | } | 432 | } |
438 | 433 | ||
439 | int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream) | 434 | int bcm2835_audio_open(struct bcm2835_alsa_stream *alsa_stream) |
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c index 2fe216b276e2..84a8ac2a779f 100644 --- a/drivers/tty/tty_ldisc.c +++ b/drivers/tty/tty_ldisc.c | |||
@@ -694,10 +694,8 @@ int tty_ldisc_reinit(struct tty_struct *tty, int disc) | |||
694 | tty_set_termios_ldisc(tty, disc); | 694 | tty_set_termios_ldisc(tty, disc); |
695 | retval = tty_ldisc_open(tty, tty->ldisc); | 695 | retval = tty_ldisc_open(tty, tty->ldisc); |
696 | if (retval) { | 696 | if (retval) { |
697 | if (!WARN_ON(disc == N_TTY)) { | 697 | tty_ldisc_put(tty->ldisc); |
698 | tty_ldisc_put(tty->ldisc); | 698 | tty->ldisc = NULL; |
699 | tty->ldisc = NULL; | ||
700 | } | ||
701 | } | 699 | } |
702 | return retval; | 700 | return retval; |
703 | } | 701 | } |
@@ -752,8 +750,9 @@ void tty_ldisc_hangup(struct tty_struct *tty, bool reinit) | |||
752 | 750 | ||
753 | if (tty->ldisc) { | 751 | if (tty->ldisc) { |
754 | if (reinit) { | 752 | if (reinit) { |
755 | if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0) | 753 | if (tty_ldisc_reinit(tty, tty->termios.c_line) < 0 && |
756 | tty_ldisc_reinit(tty, N_TTY); | 754 | tty_ldisc_reinit(tty, N_TTY) < 0) |
755 | WARN_ON(tty_ldisc_reinit(tty, N_NULL) < 0); | ||
757 | } else | 756 | } else |
758 | tty_ldisc_kill(tty); | 757 | tty_ldisc_kill(tty); |
759 | } | 758 | } |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 5e056064259c..18c923a4c16e 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -1832,6 +1832,9 @@ static const struct usb_device_id acm_ids[] = { | |||
1832 | { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */ | 1832 | { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */ |
1833 | .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ | 1833 | .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ |
1834 | }, | 1834 | }, |
1835 | { USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */ | ||
1836 | .driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */ | ||
1837 | }, | ||
1835 | 1838 | ||
1836 | { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ | 1839 | { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ |
1837 | .driver_info = CLEAR_HALT_CONDITIONS, | 1840 | .driver_info = CLEAR_HALT_CONDITIONS, |
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 68b54bd88d1e..883549ee946c 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
@@ -960,10 +960,12 @@ int usb_get_bos_descriptor(struct usb_device *dev) | |||
960 | for (i = 0; i < num; i++) { | 960 | for (i = 0; i < num; i++) { |
961 | buffer += length; | 961 | buffer += length; |
962 | cap = (struct usb_dev_cap_header *)buffer; | 962 | cap = (struct usb_dev_cap_header *)buffer; |
963 | length = cap->bLength; | ||
964 | 963 | ||
965 | if (total_len < length) | 964 | if (total_len < sizeof(*cap) || total_len < cap->bLength) { |
965 | dev->bos->desc->bNumDeviceCaps = i; | ||
966 | break; | 966 | break; |
967 | } | ||
968 | length = cap->bLength; | ||
967 | total_len -= length; | 969 | total_len -= length; |
968 | 970 | ||
969 | if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { | 971 | if (cap->bDescriptorType != USB_DT_DEVICE_CAPABILITY) { |
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 4664e543cf2f..e9326f31db8d 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c | |||
@@ -1576,11 +1576,7 @@ static int proc_do_submiturb(struct usb_dev_state *ps, struct usbdevfs_urb *uurb | |||
1576 | totlen += isopkt[u].length; | 1576 | totlen += isopkt[u].length; |
1577 | } | 1577 | } |
1578 | u *= sizeof(struct usb_iso_packet_descriptor); | 1578 | u *= sizeof(struct usb_iso_packet_descriptor); |
1579 | if (totlen <= uurb->buffer_length) | 1579 | uurb->buffer_length = totlen; |
1580 | uurb->buffer_length = totlen; | ||
1581 | else | ||
1582 | WARN_ONCE(1, "uurb->buffer_length is too short %d vs %d", | ||
1583 | totlen, uurb->buffer_length); | ||
1584 | break; | 1580 | break; |
1585 | 1581 | ||
1586 | default: | 1582 | default: |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index b5c733613823..e9ce6bb0b22d 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -2710,13 +2710,16 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1, | |||
2710 | if (!(portstatus & USB_PORT_STAT_CONNECTION)) | 2710 | if (!(portstatus & USB_PORT_STAT_CONNECTION)) |
2711 | return -ENOTCONN; | 2711 | return -ENOTCONN; |
2712 | 2712 | ||
2713 | /* bomb out completely if the connection bounced. A USB 3.0 | 2713 | /* Retry if connect change is set but status is still connected. |
2714 | * connection may bounce if multiple warm resets were issued, | 2714 | * A USB 3.0 connection may bounce if multiple warm resets were issued, |
2715 | * but the device may have successfully re-connected. Ignore it. | 2715 | * but the device may have successfully re-connected. Ignore it. |
2716 | */ | 2716 | */ |
2717 | if (!hub_is_superspeed(hub->hdev) && | 2717 | if (!hub_is_superspeed(hub->hdev) && |
2718 | (portchange & USB_PORT_STAT_C_CONNECTION)) | 2718 | (portchange & USB_PORT_STAT_C_CONNECTION)) { |
2719 | return -ENOTCONN; | 2719 | usb_clear_port_feature(hub->hdev, port1, |
2720 | USB_PORT_FEAT_C_CONNECTION); | ||
2721 | return -EAGAIN; | ||
2722 | } | ||
2720 | 2723 | ||
2721 | if (!(portstatus & USB_PORT_STAT_ENABLE)) | 2724 | if (!(portstatus & USB_PORT_STAT_ENABLE)) |
2722 | return -EBUSY; | 2725 | return -EBUSY; |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 82806e311202..a6aaf2f193a4 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -221,6 +221,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
221 | /* Corsair Strafe RGB */ | 221 | /* Corsair Strafe RGB */ |
222 | { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, | 222 | { USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT }, |
223 | 223 | ||
224 | /* MIDI keyboard WORLDE MINI */ | ||
225 | { USB_DEVICE(0x1c75, 0x0204), .driver_info = | ||
226 | USB_QUIRK_CONFIG_INTF_STRINGS }, | ||
227 | |||
224 | /* Acer C120 LED Projector */ | 228 | /* Acer C120 LED Projector */ |
225 | { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM }, | 229 | { USB_DEVICE(0x1de1, 0xc102), .driver_info = USB_QUIRK_NO_LPM }, |
226 | 230 | ||
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index dd74c99d6ce1..5d061b3d8224 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c | |||
@@ -2026,6 +2026,8 @@ static DEVICE_ATTR_RO(suspended); | |||
2026 | static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) | 2026 | static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) |
2027 | { | 2027 | { |
2028 | struct usb_composite_dev *cdev = get_gadget_data(gadget); | 2028 | struct usb_composite_dev *cdev = get_gadget_data(gadget); |
2029 | struct usb_gadget_strings *gstr = cdev->driver->strings[0]; | ||
2030 | struct usb_string *dev_str = gstr->strings; | ||
2029 | 2031 | ||
2030 | /* composite_disconnect() must already have been called | 2032 | /* composite_disconnect() must already have been called |
2031 | * by the underlying peripheral controller driver! | 2033 | * by the underlying peripheral controller driver! |
@@ -2045,6 +2047,9 @@ static void __composite_unbind(struct usb_gadget *gadget, bool unbind_driver) | |||
2045 | 2047 | ||
2046 | composite_dev_cleanup(cdev); | 2048 | composite_dev_cleanup(cdev); |
2047 | 2049 | ||
2050 | if (dev_str[USB_GADGET_MANUFACTURER_IDX].s == cdev->def_manufacturer) | ||
2051 | dev_str[USB_GADGET_MANUFACTURER_IDX].s = ""; | ||
2052 | |||
2048 | kfree(cdev->def_manufacturer); | 2053 | kfree(cdev->def_manufacturer); |
2049 | kfree(cdev); | 2054 | kfree(cdev); |
2050 | set_gadget_data(gadget, NULL); | 2055 | set_gadget_data(gadget, NULL); |
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index a22a892de7b7..aeb9f3c40521 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c | |||
@@ -1143,11 +1143,12 @@ static struct configfs_attribute *interf_grp_attrs[] = { | |||
1143 | NULL | 1143 | NULL |
1144 | }; | 1144 | }; |
1145 | 1145 | ||
1146 | int usb_os_desc_prepare_interf_dir(struct config_group *parent, | 1146 | struct config_group *usb_os_desc_prepare_interf_dir( |
1147 | int n_interf, | 1147 | struct config_group *parent, |
1148 | struct usb_os_desc **desc, | 1148 | int n_interf, |
1149 | char **names, | 1149 | struct usb_os_desc **desc, |
1150 | struct module *owner) | 1150 | char **names, |
1151 | struct module *owner) | ||
1151 | { | 1152 | { |
1152 | struct config_group *os_desc_group; | 1153 | struct config_group *os_desc_group; |
1153 | struct config_item_type *os_desc_type, *interface_type; | 1154 | struct config_item_type *os_desc_type, *interface_type; |
@@ -1159,7 +1160,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent, | |||
1159 | 1160 | ||
1160 | char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL); | 1161 | char *vlabuf = kzalloc(vla_group_size(data_chunk), GFP_KERNEL); |
1161 | if (!vlabuf) | 1162 | if (!vlabuf) |
1162 | return -ENOMEM; | 1163 | return ERR_PTR(-ENOMEM); |
1163 | 1164 | ||
1164 | os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group); | 1165 | os_desc_group = vla_ptr(vlabuf, data_chunk, os_desc_group); |
1165 | os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type); | 1166 | os_desc_type = vla_ptr(vlabuf, data_chunk, os_desc_type); |
@@ -1184,7 +1185,7 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent, | |||
1184 | configfs_add_default_group(&d->group, os_desc_group); | 1185 | configfs_add_default_group(&d->group, os_desc_group); |
1185 | } | 1186 | } |
1186 | 1187 | ||
1187 | return 0; | 1188 | return os_desc_group; |
1188 | } | 1189 | } |
1189 | EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir); | 1190 | EXPORT_SYMBOL(usb_os_desc_prepare_interf_dir); |
1190 | 1191 | ||
diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h index 36c468c4f5e9..540d5e92ed22 100644 --- a/drivers/usb/gadget/configfs.h +++ b/drivers/usb/gadget/configfs.h | |||
@@ -5,11 +5,12 @@ | |||
5 | 5 | ||
6 | void unregister_gadget_item(struct config_item *item); | 6 | void unregister_gadget_item(struct config_item *item); |
7 | 7 | ||
8 | int usb_os_desc_prepare_interf_dir(struct config_group *parent, | 8 | struct config_group *usb_os_desc_prepare_interf_dir( |
9 | int n_interf, | 9 | struct config_group *parent, |
10 | struct usb_os_desc **desc, | 10 | int n_interf, |
11 | char **names, | 11 | struct usb_os_desc **desc, |
12 | struct module *owner); | 12 | char **names, |
13 | struct module *owner); | ||
13 | 14 | ||
14 | static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item) | 15 | static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item) |
15 | { | 16 | { |
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c index e1d5853ef1e4..c7c5b3ce1d98 100644 --- a/drivers/usb/gadget/function/f_rndis.c +++ b/drivers/usb/gadget/function/f_rndis.c | |||
@@ -908,6 +908,7 @@ static void rndis_free_inst(struct usb_function_instance *f) | |||
908 | free_netdev(opts->net); | 908 | free_netdev(opts->net); |
909 | } | 909 | } |
910 | 910 | ||
911 | kfree(opts->rndis_interf_group); /* single VLA chunk */ | ||
911 | kfree(opts); | 912 | kfree(opts); |
912 | } | 913 | } |
913 | 914 | ||
@@ -916,6 +917,7 @@ static struct usb_function_instance *rndis_alloc_inst(void) | |||
916 | struct f_rndis_opts *opts; | 917 | struct f_rndis_opts *opts; |
917 | struct usb_os_desc *descs[1]; | 918 | struct usb_os_desc *descs[1]; |
918 | char *names[1]; | 919 | char *names[1]; |
920 | struct config_group *rndis_interf_group; | ||
919 | 921 | ||
920 | opts = kzalloc(sizeof(*opts), GFP_KERNEL); | 922 | opts = kzalloc(sizeof(*opts), GFP_KERNEL); |
921 | if (!opts) | 923 | if (!opts) |
@@ -940,8 +942,14 @@ static struct usb_function_instance *rndis_alloc_inst(void) | |||
940 | names[0] = "rndis"; | 942 | names[0] = "rndis"; |
941 | config_group_init_type_name(&opts->func_inst.group, "", | 943 | config_group_init_type_name(&opts->func_inst.group, "", |
942 | &rndis_func_type); | 944 | &rndis_func_type); |
943 | usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, | 945 | rndis_interf_group = |
944 | names, THIS_MODULE); | 946 | usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, |
947 | names, THIS_MODULE); | ||
948 | if (IS_ERR(rndis_interf_group)) { | ||
949 | rndis_free_inst(&opts->func_inst); | ||
950 | return ERR_CAST(rndis_interf_group); | ||
951 | } | ||
952 | opts->rndis_interf_group = rndis_interf_group; | ||
945 | 953 | ||
946 | return &opts->func_inst; | 954 | return &opts->func_inst; |
947 | } | 955 | } |
diff --git a/drivers/usb/gadget/function/u_rndis.h b/drivers/usb/gadget/function/u_rndis.h index a35ee3c2545d..efdb7ac381d9 100644 --- a/drivers/usb/gadget/function/u_rndis.h +++ b/drivers/usb/gadget/function/u_rndis.h | |||
@@ -26,6 +26,7 @@ struct f_rndis_opts { | |||
26 | bool bound; | 26 | bool bound; |
27 | bool borrowed_net; | 27 | bool borrowed_net; |
28 | 28 | ||
29 | struct config_group *rndis_interf_group; | ||
29 | struct usb_os_desc rndis_os_desc; | 30 | struct usb_os_desc rndis_os_desc; |
30 | char rndis_ext_compat_id[16]; | 31 | char rndis_ext_compat_id[16]; |
31 | 32 | ||
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index b17618a55f1b..f04e91ef9e7c 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c | |||
@@ -419,6 +419,7 @@ static void set_link_state_by_speed(struct dummy_hcd *dum_hcd) | |||
419 | static void set_link_state(struct dummy_hcd *dum_hcd) | 419 | static void set_link_state(struct dummy_hcd *dum_hcd) |
420 | { | 420 | { |
421 | struct dummy *dum = dum_hcd->dum; | 421 | struct dummy *dum = dum_hcd->dum; |
422 | unsigned int power_bit; | ||
422 | 423 | ||
423 | dum_hcd->active = 0; | 424 | dum_hcd->active = 0; |
424 | if (dum->pullup) | 425 | if (dum->pullup) |
@@ -429,17 +430,19 @@ static void set_link_state(struct dummy_hcd *dum_hcd) | |||
429 | return; | 430 | return; |
430 | 431 | ||
431 | set_link_state_by_speed(dum_hcd); | 432 | set_link_state_by_speed(dum_hcd); |
433 | power_bit = (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 ? | ||
434 | USB_SS_PORT_STAT_POWER : USB_PORT_STAT_POWER); | ||
432 | 435 | ||
433 | if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 || | 436 | if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 || |
434 | dum_hcd->active) | 437 | dum_hcd->active) |
435 | dum_hcd->resuming = 0; | 438 | dum_hcd->resuming = 0; |
436 | 439 | ||
437 | /* Currently !connected or in reset */ | 440 | /* Currently !connected or in reset */ |
438 | if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 || | 441 | if ((dum_hcd->port_status & power_bit) == 0 || |
439 | (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) { | 442 | (dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) { |
440 | unsigned disconnect = USB_PORT_STAT_CONNECTION & | 443 | unsigned int disconnect = power_bit & |
441 | dum_hcd->old_status & (~dum_hcd->port_status); | 444 | dum_hcd->old_status & (~dum_hcd->port_status); |
442 | unsigned reset = USB_PORT_STAT_RESET & | 445 | unsigned int reset = USB_PORT_STAT_RESET & |
443 | (~dum_hcd->old_status) & dum_hcd->port_status; | 446 | (~dum_hcd->old_status) & dum_hcd->port_status; |
444 | 447 | ||
445 | /* Report reset and disconnect events to the driver */ | 448 | /* Report reset and disconnect events to the driver */ |
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index da9158f171cb..a2336deb5e36 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c | |||
@@ -420,14 +420,25 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) | |||
420 | GFP_NOWAIT); | 420 | GFP_NOWAIT); |
421 | if (!command) { | 421 | if (!command) { |
422 | spin_unlock_irqrestore(&xhci->lock, flags); | 422 | spin_unlock_irqrestore(&xhci->lock, flags); |
423 | xhci_free_command(xhci, cmd); | 423 | ret = -ENOMEM; |
424 | return -ENOMEM; | 424 | goto cmd_cleanup; |
425 | } | ||
426 | |||
427 | ret = xhci_queue_stop_endpoint(xhci, command, slot_id, | ||
428 | i, suspend); | ||
429 | if (ret) { | ||
430 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
431 | xhci_free_command(xhci, command); | ||
432 | goto cmd_cleanup; | ||
425 | } | 433 | } |
426 | xhci_queue_stop_endpoint(xhci, command, slot_id, i, | ||
427 | suspend); | ||
428 | } | 434 | } |
429 | } | 435 | } |
430 | xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend); | 436 | ret = xhci_queue_stop_endpoint(xhci, cmd, slot_id, 0, suspend); |
437 | if (ret) { | ||
438 | spin_unlock_irqrestore(&xhci->lock, flags); | ||
439 | goto cmd_cleanup; | ||
440 | } | ||
441 | |||
431 | xhci_ring_cmd_db(xhci); | 442 | xhci_ring_cmd_db(xhci); |
432 | spin_unlock_irqrestore(&xhci->lock, flags); | 443 | spin_unlock_irqrestore(&xhci->lock, flags); |
433 | 444 | ||
@@ -439,6 +450,8 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) | |||
439 | xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); | 450 | xhci_warn(xhci, "Timeout while waiting for stop endpoint command\n"); |
440 | ret = -ETIME; | 451 | ret = -ETIME; |
441 | } | 452 | } |
453 | |||
454 | cmd_cleanup: | ||
442 | xhci_free_command(xhci, cmd); | 455 | xhci_free_command(xhci, cmd); |
443 | return ret; | 456 | return ret; |
444 | } | 457 | } |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index a9443651ce0f..82c746e2d85c 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -1309,6 +1309,7 @@ static void xhci_complete_del_and_free_cmd(struct xhci_command *cmd, u32 status) | |||
1309 | void xhci_cleanup_command_queue(struct xhci_hcd *xhci) | 1309 | void xhci_cleanup_command_queue(struct xhci_hcd *xhci) |
1310 | { | 1310 | { |
1311 | struct xhci_command *cur_cmd, *tmp_cmd; | 1311 | struct xhci_command *cur_cmd, *tmp_cmd; |
1312 | xhci->current_cmd = NULL; | ||
1312 | list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) | 1313 | list_for_each_entry_safe(cur_cmd, tmp_cmd, &xhci->cmd_list, cmd_list) |
1313 | xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED); | 1314 | xhci_complete_del_and_free_cmd(cur_cmd, COMP_COMMAND_ABORTED); |
1314 | } | 1315 | } |
@@ -2579,15 +2580,21 @@ static int handle_tx_event(struct xhci_hcd *xhci, | |||
2579 | (struct xhci_generic_trb *) ep_trb); | 2580 | (struct xhci_generic_trb *) ep_trb); |
2580 | 2581 | ||
2581 | /* | 2582 | /* |
2582 | * No-op TRB should not trigger interrupts. | 2583 | * No-op TRB could trigger interrupts in a case where |
2583 | * If ep_trb is a no-op TRB, it means the | 2584 | * a URB was killed and a STALL_ERROR happens right |
2584 | * corresponding TD has been cancelled. Just ignore | 2585 | * after the endpoint ring stopped. Reset the halted |
2585 | * the TD. | 2586 | * endpoint. Otherwise, the endpoint remains stalled |
2587 | * indefinitely. | ||
2586 | */ | 2588 | */ |
2587 | if (trb_is_noop(ep_trb)) { | 2589 | if (trb_is_noop(ep_trb)) { |
2588 | xhci_dbg(xhci, | 2590 | if (trb_comp_code == COMP_STALL_ERROR || |
2589 | "ep_trb is a no-op TRB. Skip it for slot %u ep %u\n", | 2591 | xhci_requires_manual_halt_cleanup(xhci, ep_ctx, |
2590 | slot_id, ep_index); | 2592 | trb_comp_code)) |
2593 | xhci_cleanup_halted_endpoint(xhci, slot_id, | ||
2594 | ep_index, | ||
2595 | ep_ring->stream_id, | ||
2596 | td, ep_trb, | ||
2597 | EP_HARD_RESET); | ||
2591 | goto cleanup; | 2598 | goto cleanup; |
2592 | } | 2599 | } |
2593 | 2600 | ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index ee198ea47f49..51535ba2bcd4 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -4805,7 +4805,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) | |||
4805 | */ | 4805 | */ |
4806 | hcd->has_tt = 1; | 4806 | hcd->has_tt = 1; |
4807 | } else { | 4807 | } else { |
4808 | if (xhci->sbrn == 0x31) { | 4808 | /* Some 3.1 hosts return sbrn 0x30, can't rely on sbrn alone */ |
4809 | if (xhci->sbrn == 0x31 || xhci->usb3_rhub.min_rev >= 1) { | ||
4809 | xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n"); | 4810 | xhci_info(xhci, "Host supports USB 3.1 Enhanced SuperSpeed\n"); |
4810 | hcd->speed = HCD_USB31; | 4811 | hcd->speed = HCD_USB31; |
4811 | hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; | 4812 | hcd->self.root_hub->speed = USB_SPEED_SUPER_PLUS; |
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index eee82ca55b7b..b3fc602b2e24 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c | |||
@@ -202,12 +202,13 @@ found: | |||
202 | return tmp; | 202 | return tmp; |
203 | } | 203 | } |
204 | 204 | ||
205 | if (in) { | 205 | if (in) |
206 | dev->in_pipe = usb_rcvbulkpipe(udev, | 206 | dev->in_pipe = usb_rcvbulkpipe(udev, |
207 | in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); | 207 | in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); |
208 | if (out) | ||
208 | dev->out_pipe = usb_sndbulkpipe(udev, | 209 | dev->out_pipe = usb_sndbulkpipe(udev, |
209 | out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); | 210 | out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); |
210 | } | 211 | |
211 | if (iso_in) { | 212 | if (iso_in) { |
212 | dev->iso_in = &iso_in->desc; | 213 | dev->iso_in = &iso_in->desc; |
213 | dev->in_iso_pipe = usb_rcvisocpipe(udev, | 214 | dev->in_iso_pipe = usb_rcvisocpipe(udev, |
@@ -1964,6 +1965,9 @@ test_queue(struct usbtest_dev *dev, struct usbtest_param_32 *param, | |||
1964 | int status = 0; | 1965 | int status = 0; |
1965 | struct urb *urbs[param->sglen]; | 1966 | struct urb *urbs[param->sglen]; |
1966 | 1967 | ||
1968 | if (!param->sglen || param->iterations > UINT_MAX / param->sglen) | ||
1969 | return -EINVAL; | ||
1970 | |||
1967 | memset(&context, 0, sizeof(context)); | 1971 | memset(&context, 0, sizeof(context)); |
1968 | context.count = param->iterations * param->sglen; | 1972 | context.count = param->iterations * param->sglen; |
1969 | context.dev = dev; | 1973 | context.dev = dev; |
@@ -2087,6 +2091,8 @@ usbtest_do_ioctl(struct usb_interface *intf, struct usbtest_param_32 *param) | |||
2087 | 2091 | ||
2088 | if (param->iterations <= 0) | 2092 | if (param->iterations <= 0) |
2089 | return -EINVAL; | 2093 | return -EINVAL; |
2094 | if (param->sglen > MAX_SGLEN) | ||
2095 | return -EINVAL; | ||
2090 | /* | 2096 | /* |
2091 | * Just a bunch of test cases that every HCD is expected to handle. | 2097 | * Just a bunch of test cases that every HCD is expected to handle. |
2092 | * | 2098 | * |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 029692053dd3..ff5a1a8989d5 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -906,7 +906,7 @@ b_host: | |||
906 | */ | 906 | */ |
907 | if (int_usb & MUSB_INTR_RESET) { | 907 | if (int_usb & MUSB_INTR_RESET) { |
908 | handled = IRQ_HANDLED; | 908 | handled = IRQ_HANDLED; |
909 | if (devctl & MUSB_DEVCTL_HM) { | 909 | if (is_host_active(musb)) { |
910 | /* | 910 | /* |
911 | * When BABBLE happens what we can depends on which | 911 | * When BABBLE happens what we can depends on which |
912 | * platform MUSB is running, because some platforms | 912 | * platform MUSB is running, because some platforms |
@@ -916,9 +916,7 @@ b_host: | |||
916 | * drop the session. | 916 | * drop the session. |
917 | */ | 917 | */ |
918 | dev_err(musb->controller, "Babble\n"); | 918 | dev_err(musb->controller, "Babble\n"); |
919 | 919 | musb_recover_from_babble(musb); | |
920 | if (is_host_active(musb)) | ||
921 | musb_recover_from_babble(musb); | ||
922 | } else { | 920 | } else { |
923 | musb_dbg(musb, "BUS RESET as %s", | 921 | musb_dbg(musb, "BUS RESET as %s", |
924 | usb_otg_state_string(musb->xceiv->otg->state)); | 922 | usb_otg_state_string(musb->xceiv->otg->state)); |
@@ -1861,22 +1859,22 @@ static void musb_pm_runtime_check_session(struct musb *musb) | |||
1861 | MUSB_DEVCTL_HR; | 1859 | MUSB_DEVCTL_HR; |
1862 | switch (devctl & ~s) { | 1860 | switch (devctl & ~s) { |
1863 | case MUSB_QUIRK_B_INVALID_VBUS_91: | 1861 | case MUSB_QUIRK_B_INVALID_VBUS_91: |
1864 | if (musb->quirk_retries--) { | 1862 | if (musb->quirk_retries && !musb->flush_irq_work) { |
1865 | musb_dbg(musb, | 1863 | musb_dbg(musb, |
1866 | "Poll devctl on invalid vbus, assume no session"); | 1864 | "Poll devctl on invalid vbus, assume no session"); |
1867 | schedule_delayed_work(&musb->irq_work, | 1865 | schedule_delayed_work(&musb->irq_work, |
1868 | msecs_to_jiffies(1000)); | 1866 | msecs_to_jiffies(1000)); |
1869 | 1867 | musb->quirk_retries--; | |
1870 | return; | 1868 | return; |
1871 | } | 1869 | } |
1872 | /* fall through */ | 1870 | /* fall through */ |
1873 | case MUSB_QUIRK_A_DISCONNECT_19: | 1871 | case MUSB_QUIRK_A_DISCONNECT_19: |
1874 | if (musb->quirk_retries--) { | 1872 | if (musb->quirk_retries && !musb->flush_irq_work) { |
1875 | musb_dbg(musb, | 1873 | musb_dbg(musb, |
1876 | "Poll devctl on possible host mode disconnect"); | 1874 | "Poll devctl on possible host mode disconnect"); |
1877 | schedule_delayed_work(&musb->irq_work, | 1875 | schedule_delayed_work(&musb->irq_work, |
1878 | msecs_to_jiffies(1000)); | 1876 | msecs_to_jiffies(1000)); |
1879 | 1877 | musb->quirk_retries--; | |
1880 | return; | 1878 | return; |
1881 | } | 1879 | } |
1882 | if (!musb->session) | 1880 | if (!musb->session) |
@@ -2681,8 +2679,15 @@ static int musb_suspend(struct device *dev) | |||
2681 | 2679 | ||
2682 | musb_platform_disable(musb); | 2680 | musb_platform_disable(musb); |
2683 | musb_disable_interrupts(musb); | 2681 | musb_disable_interrupts(musb); |
2682 | |||
2683 | musb->flush_irq_work = true; | ||
2684 | while (flush_delayed_work(&musb->irq_work)) | ||
2685 | ; | ||
2686 | musb->flush_irq_work = false; | ||
2687 | |||
2684 | if (!(musb->io.quirks & MUSB_PRESERVE_SESSION)) | 2688 | if (!(musb->io.quirks & MUSB_PRESERVE_SESSION)) |
2685 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); | 2689 | musb_writeb(musb->mregs, MUSB_DEVCTL, 0); |
2690 | |||
2686 | WARN_ON(!list_empty(&musb->pending_list)); | 2691 | WARN_ON(!list_empty(&musb->pending_list)); |
2687 | 2692 | ||
2688 | spin_lock_irqsave(&musb->lock, flags); | 2693 | spin_lock_irqsave(&musb->lock, flags); |
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index c748f4ac1154..20f4614178d9 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
@@ -428,6 +428,8 @@ struct musb { | |||
428 | unsigned test_mode:1; | 428 | unsigned test_mode:1; |
429 | unsigned softconnect:1; | 429 | unsigned softconnect:1; |
430 | 430 | ||
431 | unsigned flush_irq_work:1; | ||
432 | |||
431 | u8 address; | 433 | u8 address; |
432 | u8 test_mode_nr; | 434 | u8 test_mode_nr; |
433 | u16 ackpend; /* ep0 */ | 435 | u16 ackpend; /* ep0 */ |
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index ba255280a624..1ec0a4947b6b 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c | |||
@@ -26,15 +26,28 @@ | |||
26 | 26 | ||
27 | #define MUSB_DMA_NUM_CHANNELS 15 | 27 | #define MUSB_DMA_NUM_CHANNELS 15 |
28 | 28 | ||
29 | #define DA8XX_USB_MODE 0x10 | ||
30 | #define DA8XX_USB_AUTOREQ 0x14 | ||
31 | #define DA8XX_USB_TEARDOWN 0x1c | ||
32 | |||
33 | #define DA8XX_DMA_NUM_CHANNELS 4 | ||
34 | |||
29 | struct cppi41_dma_controller { | 35 | struct cppi41_dma_controller { |
30 | struct dma_controller controller; | 36 | struct dma_controller controller; |
31 | struct cppi41_dma_channel rx_channel[MUSB_DMA_NUM_CHANNELS]; | 37 | struct cppi41_dma_channel *rx_channel; |
32 | struct cppi41_dma_channel tx_channel[MUSB_DMA_NUM_CHANNELS]; | 38 | struct cppi41_dma_channel *tx_channel; |
33 | struct hrtimer early_tx; | 39 | struct hrtimer early_tx; |
34 | struct list_head early_tx_list; | 40 | struct list_head early_tx_list; |
35 | u32 rx_mode; | 41 | u32 rx_mode; |
36 | u32 tx_mode; | 42 | u32 tx_mode; |
37 | u32 auto_req; | 43 | u32 auto_req; |
44 | |||
45 | u32 tdown_reg; | ||
46 | u32 autoreq_reg; | ||
47 | |||
48 | void (*set_dma_mode)(struct cppi41_dma_channel *cppi41_channel, | ||
49 | unsigned int mode); | ||
50 | u8 num_channels; | ||
38 | }; | 51 | }; |
39 | 52 | ||
40 | static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel) | 53 | static void save_rx_toggle(struct cppi41_dma_channel *cppi41_channel) |
@@ -349,6 +362,32 @@ static void cppi41_set_dma_mode(struct cppi41_dma_channel *cppi41_channel, | |||
349 | } | 362 | } |
350 | } | 363 | } |
351 | 364 | ||
365 | static void da8xx_set_dma_mode(struct cppi41_dma_channel *cppi41_channel, | ||
366 | unsigned int mode) | ||
367 | { | ||
368 | struct cppi41_dma_controller *controller = cppi41_channel->controller; | ||
369 | struct musb *musb = controller->controller.musb; | ||
370 | unsigned int shift; | ||
371 | u32 port; | ||
372 | u32 new_mode; | ||
373 | u32 old_mode; | ||
374 | |||
375 | old_mode = controller->tx_mode; | ||
376 | port = cppi41_channel->port_num; | ||
377 | |||
378 | shift = (port - 1) * 4; | ||
379 | if (!cppi41_channel->is_tx) | ||
380 | shift += 16; | ||
381 | new_mode = old_mode & ~(3 << shift); | ||
382 | new_mode |= mode << shift; | ||
383 | |||
384 | if (new_mode == old_mode) | ||
385 | return; | ||
386 | controller->tx_mode = new_mode; | ||
387 | musb_writel(musb->ctrl_base, DA8XX_USB_MODE, new_mode); | ||
388 | } | ||
389 | |||
390 | |||
352 | static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel, | 391 | static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel, |
353 | unsigned mode) | 392 | unsigned mode) |
354 | { | 393 | { |
@@ -364,8 +403,8 @@ static void cppi41_set_autoreq_mode(struct cppi41_dma_channel *cppi41_channel, | |||
364 | if (new_mode == old_mode) | 403 | if (new_mode == old_mode) |
365 | return; | 404 | return; |
366 | controller->auto_req = new_mode; | 405 | controller->auto_req = new_mode; |
367 | musb_writel(controller->controller.musb->ctrl_base, USB_CTRL_AUTOREQ, | 406 | musb_writel(controller->controller.musb->ctrl_base, |
368 | new_mode); | 407 | controller->autoreq_reg, new_mode); |
369 | } | 408 | } |
370 | 409 | ||
371 | static bool cppi41_configure_channel(struct dma_channel *channel, | 410 | static bool cppi41_configure_channel(struct dma_channel *channel, |
@@ -373,6 +412,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel, | |||
373 | dma_addr_t dma_addr, u32 len) | 412 | dma_addr_t dma_addr, u32 len) |
374 | { | 413 | { |
375 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; | 414 | struct cppi41_dma_channel *cppi41_channel = channel->private_data; |
415 | struct cppi41_dma_controller *controller = cppi41_channel->controller; | ||
376 | struct dma_chan *dc = cppi41_channel->dc; | 416 | struct dma_chan *dc = cppi41_channel->dc; |
377 | struct dma_async_tx_descriptor *dma_desc; | 417 | struct dma_async_tx_descriptor *dma_desc; |
378 | enum dma_transfer_direction direction; | 418 | enum dma_transfer_direction direction; |
@@ -398,7 +438,7 @@ static bool cppi41_configure_channel(struct dma_channel *channel, | |||
398 | musb_writel(musb->ctrl_base, | 438 | musb_writel(musb->ctrl_base, |
399 | RNDIS_REG(cppi41_channel->port_num), len); | 439 | RNDIS_REG(cppi41_channel->port_num), len); |
400 | /* gen rndis */ | 440 | /* gen rndis */ |
401 | cppi41_set_dma_mode(cppi41_channel, | 441 | controller->set_dma_mode(cppi41_channel, |
402 | EP_MODE_DMA_GEN_RNDIS); | 442 | EP_MODE_DMA_GEN_RNDIS); |
403 | 443 | ||
404 | /* auto req */ | 444 | /* auto req */ |
@@ -407,14 +447,15 @@ static bool cppi41_configure_channel(struct dma_channel *channel, | |||
407 | } else { | 447 | } else { |
408 | musb_writel(musb->ctrl_base, | 448 | musb_writel(musb->ctrl_base, |
409 | RNDIS_REG(cppi41_channel->port_num), 0); | 449 | RNDIS_REG(cppi41_channel->port_num), 0); |
410 | cppi41_set_dma_mode(cppi41_channel, | 450 | controller->set_dma_mode(cppi41_channel, |
411 | EP_MODE_DMA_TRANSPARENT); | 451 | EP_MODE_DMA_TRANSPARENT); |
412 | cppi41_set_autoreq_mode(cppi41_channel, | 452 | cppi41_set_autoreq_mode(cppi41_channel, |
413 | EP_MODE_AUTOREQ_NONE); | 453 | EP_MODE_AUTOREQ_NONE); |
414 | } | 454 | } |
415 | } else { | 455 | } else { |
416 | /* fallback mode */ | 456 | /* fallback mode */ |
417 | cppi41_set_dma_mode(cppi41_channel, EP_MODE_DMA_TRANSPARENT); | 457 | controller->set_dma_mode(cppi41_channel, |
458 | EP_MODE_DMA_TRANSPARENT); | ||
418 | cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE); | 459 | cppi41_set_autoreq_mode(cppi41_channel, EP_MODE_AUTOREQ_NONE); |
419 | len = min_t(u32, packet_sz, len); | 460 | len = min_t(u32, packet_sz, len); |
420 | } | 461 | } |
@@ -445,7 +486,7 @@ static struct dma_channel *cppi41_dma_channel_allocate(struct dma_controller *c, | |||
445 | struct cppi41_dma_channel *cppi41_channel = NULL; | 486 | struct cppi41_dma_channel *cppi41_channel = NULL; |
446 | u8 ch_num = hw_ep->epnum - 1; | 487 | u8 ch_num = hw_ep->epnum - 1; |
447 | 488 | ||
448 | if (ch_num >= MUSB_DMA_NUM_CHANNELS) | 489 | if (ch_num >= controller->num_channels) |
449 | return NULL; | 490 | return NULL; |
450 | 491 | ||
451 | if (is_tx) | 492 | if (is_tx) |
@@ -581,12 +622,13 @@ static int cppi41_dma_channel_abort(struct dma_channel *channel) | |||
581 | 622 | ||
582 | do { | 623 | do { |
583 | if (is_tx) | 624 | if (is_tx) |
584 | musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); | 625 | musb_writel(musb->ctrl_base, controller->tdown_reg, |
626 | tdbit); | ||
585 | ret = dmaengine_terminate_all(cppi41_channel->dc); | 627 | ret = dmaengine_terminate_all(cppi41_channel->dc); |
586 | } while (ret == -EAGAIN); | 628 | } while (ret == -EAGAIN); |
587 | 629 | ||
588 | if (is_tx) { | 630 | if (is_tx) { |
589 | musb_writel(musb->ctrl_base, USB_TDOWN, tdbit); | 631 | musb_writel(musb->ctrl_base, controller->tdown_reg, tdbit); |
590 | 632 | ||
591 | csr = musb_readw(epio, MUSB_TXCSR); | 633 | csr = musb_readw(epio, MUSB_TXCSR); |
592 | if (csr & MUSB_TXCSR_TXPKTRDY) { | 634 | if (csr & MUSB_TXCSR_TXPKTRDY) { |
@@ -604,7 +646,7 @@ static void cppi41_release_all_dma_chans(struct cppi41_dma_controller *ctrl) | |||
604 | struct dma_chan *dc; | 646 | struct dma_chan *dc; |
605 | int i; | 647 | int i; |
606 | 648 | ||
607 | for (i = 0; i < MUSB_DMA_NUM_CHANNELS; i++) { | 649 | for (i = 0; i < ctrl->num_channels; i++) { |
608 | dc = ctrl->tx_channel[i].dc; | 650 | dc = ctrl->tx_channel[i].dc; |
609 | if (dc) | 651 | if (dc) |
610 | dma_release_channel(dc); | 652 | dma_release_channel(dc); |
@@ -656,7 +698,7 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller) | |||
656 | goto err; | 698 | goto err; |
657 | 699 | ||
658 | ret = -EINVAL; | 700 | ret = -EINVAL; |
659 | if (port > MUSB_DMA_NUM_CHANNELS || !port) | 701 | if (port > controller->num_channels || !port) |
660 | goto err; | 702 | goto err; |
661 | if (is_tx) | 703 | if (is_tx) |
662 | cppi41_channel = &controller->tx_channel[port - 1]; | 704 | cppi41_channel = &controller->tx_channel[port - 1]; |
@@ -697,6 +739,8 @@ void cppi41_dma_controller_destroy(struct dma_controller *c) | |||
697 | 739 | ||
698 | hrtimer_cancel(&controller->early_tx); | 740 | hrtimer_cancel(&controller->early_tx); |
699 | cppi41_dma_controller_stop(controller); | 741 | cppi41_dma_controller_stop(controller); |
742 | kfree(controller->rx_channel); | ||
743 | kfree(controller->tx_channel); | ||
700 | kfree(controller); | 744 | kfree(controller); |
701 | } | 745 | } |
702 | EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy); | 746 | EXPORT_SYMBOL_GPL(cppi41_dma_controller_destroy); |
@@ -705,6 +749,7 @@ struct dma_controller * | |||
705 | cppi41_dma_controller_create(struct musb *musb, void __iomem *base) | 749 | cppi41_dma_controller_create(struct musb *musb, void __iomem *base) |
706 | { | 750 | { |
707 | struct cppi41_dma_controller *controller; | 751 | struct cppi41_dma_controller *controller; |
752 | int channel_size; | ||
708 | int ret = 0; | 753 | int ret = 0; |
709 | 754 | ||
710 | if (!musb->controller->parent->of_node) { | 755 | if (!musb->controller->parent->of_node) { |
@@ -727,12 +772,37 @@ cppi41_dma_controller_create(struct musb *musb, void __iomem *base) | |||
727 | controller->controller.is_compatible = cppi41_is_compatible; | 772 | controller->controller.is_compatible = cppi41_is_compatible; |
728 | controller->controller.musb = musb; | 773 | controller->controller.musb = musb; |
729 | 774 | ||
775 | if (musb->io.quirks & MUSB_DA8XX) { | ||
776 | controller->tdown_reg = DA8XX_USB_TEARDOWN; | ||
777 | controller->autoreq_reg = DA8XX_USB_AUTOREQ; | ||
778 | controller->set_dma_mode = da8xx_set_dma_mode; | ||
779 | controller->num_channels = DA8XX_DMA_NUM_CHANNELS; | ||
780 | } else { | ||
781 | controller->tdown_reg = USB_TDOWN; | ||
782 | controller->autoreq_reg = USB_CTRL_AUTOREQ; | ||
783 | controller->set_dma_mode = cppi41_set_dma_mode; | ||
784 | controller->num_channels = MUSB_DMA_NUM_CHANNELS; | ||
785 | } | ||
786 | |||
787 | channel_size = controller->num_channels * | ||
788 | sizeof(struct cppi41_dma_channel); | ||
789 | controller->rx_channel = kzalloc(channel_size, GFP_KERNEL); | ||
790 | if (!controller->rx_channel) | ||
791 | goto rx_channel_alloc_fail; | ||
792 | controller->tx_channel = kzalloc(channel_size, GFP_KERNEL); | ||
793 | if (!controller->tx_channel) | ||
794 | goto tx_channel_alloc_fail; | ||
795 | |||
730 | ret = cppi41_dma_controller_start(controller); | 796 | ret = cppi41_dma_controller_start(controller); |
731 | if (ret) | 797 | if (ret) |
732 | goto plat_get_fail; | 798 | goto plat_get_fail; |
733 | return &controller->controller; | 799 | return &controller->controller; |
734 | 800 | ||
735 | plat_get_fail: | 801 | plat_get_fail: |
802 | kfree(controller->tx_channel); | ||
803 | tx_channel_alloc_fail: | ||
804 | kfree(controller->rx_channel); | ||
805 | rx_channel_alloc_fail: | ||
736 | kfree(controller); | 806 | kfree(controller); |
737 | kzalloc_fail: | 807 | kzalloc_fail: |
738 | if (ret == -EPROBE_DEFER) | 808 | if (ret == -EPROBE_DEFER) |
diff --git a/drivers/usb/musb/sunxi.c b/drivers/usb/musb/sunxi.c index c9a09b5bb6e5..dc353e24d53c 100644 --- a/drivers/usb/musb/sunxi.c +++ b/drivers/usb/musb/sunxi.c | |||
@@ -297,6 +297,8 @@ static int sunxi_musb_exit(struct musb *musb) | |||
297 | if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags)) | 297 | if (test_bit(SUNXI_MUSB_FL_HAS_SRAM, &glue->flags)) |
298 | sunxi_sram_release(musb->controller->parent); | 298 | sunxi_sram_release(musb->controller->parent); |
299 | 299 | ||
300 | devm_usb_put_phy(glue->dev, glue->xceiv); | ||
301 | |||
300 | return 0; | 302 | return 0; |
301 | } | 303 | } |
302 | 304 | ||
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c index 5fe4a5704bde..ccc2bf5274b4 100644 --- a/drivers/usb/phy/phy-tegra-usb.c +++ b/drivers/usb/phy/phy-tegra-usb.c | |||
@@ -329,6 +329,14 @@ static void utmi_phy_clk_disable(struct tegra_usb_phy *phy) | |||
329 | unsigned long val; | 329 | unsigned long val; |
330 | void __iomem *base = phy->regs; | 330 | void __iomem *base = phy->regs; |
331 | 331 | ||
332 | /* | ||
333 | * The USB driver may have already initiated the phy clock | ||
334 | * disable so wait to see if the clock turns off and if not | ||
335 | * then proceed with gating the clock. | ||
336 | */ | ||
337 | if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, 0) == 0) | ||
338 | return; | ||
339 | |||
332 | if (phy->is_legacy_phy) { | 340 | if (phy->is_legacy_phy) { |
333 | val = readl(base + USB_SUSP_CTRL); | 341 | val = readl(base + USB_SUSP_CTRL); |
334 | val |= USB_SUSP_SET; | 342 | val |= USB_SUSP_SET; |
@@ -351,6 +359,15 @@ static void utmi_phy_clk_enable(struct tegra_usb_phy *phy) | |||
351 | unsigned long val; | 359 | unsigned long val; |
352 | void __iomem *base = phy->regs; | 360 | void __iomem *base = phy->regs; |
353 | 361 | ||
362 | /* | ||
363 | * The USB driver may have already initiated the phy clock | ||
364 | * enable so wait to see if the clock turns on and if not | ||
365 | * then proceed with ungating the clock. | ||
366 | */ | ||
367 | if (utmi_wait_register(base + USB_SUSP_CTRL, USB_PHY_CLK_VALID, | ||
368 | USB_PHY_CLK_VALID) == 0) | ||
369 | return; | ||
370 | |||
354 | if (phy->is_legacy_phy) { | 371 | if (phy->is_legacy_phy) { |
355 | val = readl(base + USB_SUSP_CTRL); | 372 | val = readl(base + USB_SUSP_CTRL); |
356 | val |= USB_SUSP_CLR; | 373 | val |= USB_SUSP_CLR; |
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index 68f26904c316..50285b01da92 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c | |||
@@ -857,9 +857,9 @@ static void xfer_work(struct work_struct *work) | |||
857 | fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); | 857 | fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero); |
858 | 858 | ||
859 | usbhs_pipe_running(pipe, 1); | 859 | usbhs_pipe_running(pipe, 1); |
860 | usbhsf_dma_start(pipe, fifo); | ||
861 | usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); | 860 | usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->trans); |
862 | dma_async_issue_pending(chan); | 861 | dma_async_issue_pending(chan); |
862 | usbhsf_dma_start(pipe, fifo); | ||
863 | usbhs_pipe_enable(pipe); | 863 | usbhs_pipe_enable(pipe); |
864 | 864 | ||
865 | xfer_work_end: | 865 | xfer_work_end: |
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index fdf89800ebc3..43a862a90a77 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c | |||
@@ -186,6 +186,7 @@ static int usb_console_setup(struct console *co, char *options) | |||
186 | tty_kref_put(tty); | 186 | tty_kref_put(tty); |
187 | reset_open_count: | 187 | reset_open_count: |
188 | port->port.count = 0; | 188 | port->port.count = 0; |
189 | info->port = NULL; | ||
189 | usb_autopm_put_interface(serial->interface); | 190 | usb_autopm_put_interface(serial->interface); |
190 | error_get_interface: | 191 | error_get_interface: |
191 | usb_serial_put(serial); | 192 | usb_serial_put(serial); |
@@ -265,7 +266,7 @@ static struct console usbcons = { | |||
265 | 266 | ||
266 | void usb_serial_console_disconnect(struct usb_serial *serial) | 267 | void usb_serial_console_disconnect(struct usb_serial *serial) |
267 | { | 268 | { |
268 | if (serial->port[0] == usbcons_info.port) { | 269 | if (serial->port[0] && serial->port[0] == usbcons_info.port) { |
269 | usb_serial_console_exit(); | 270 | usb_serial_console_exit(); |
270 | usb_serial_put(serial); | 271 | usb_serial_put(serial); |
271 | } | 272 | } |
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 2d945c9f975c..412f812522ee 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c | |||
@@ -177,6 +177,7 @@ static const struct usb_device_id id_table[] = { | |||
177 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ | 177 | { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ |
178 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ | 178 | { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ |
179 | { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ | 179 | { USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */ |
180 | { USB_DEVICE(0x18EF, 0xE032) }, /* ELV TFD500 Data Logger */ | ||
180 | { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ | 181 | { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */ |
181 | { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ | 182 | { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */ |
182 | { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ | 183 | { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ |
@@ -352,6 +353,7 @@ static struct usb_serial_driver * const serial_drivers[] = { | |||
352 | #define CP210X_PARTNUM_CP2104 0x04 | 353 | #define CP210X_PARTNUM_CP2104 0x04 |
353 | #define CP210X_PARTNUM_CP2105 0x05 | 354 | #define CP210X_PARTNUM_CP2105 0x05 |
354 | #define CP210X_PARTNUM_CP2108 0x08 | 355 | #define CP210X_PARTNUM_CP2108 0x08 |
356 | #define CP210X_PARTNUM_UNKNOWN 0xFF | ||
355 | 357 | ||
356 | /* CP210X_GET_COMM_STATUS returns these 0x13 bytes */ | 358 | /* CP210X_GET_COMM_STATUS returns these 0x13 bytes */ |
357 | struct cp210x_comm_status { | 359 | struct cp210x_comm_status { |
@@ -1491,8 +1493,11 @@ static int cp210x_attach(struct usb_serial *serial) | |||
1491 | result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, | 1493 | result = cp210x_read_vendor_block(serial, REQTYPE_DEVICE_TO_HOST, |
1492 | CP210X_GET_PARTNUM, &priv->partnum, | 1494 | CP210X_GET_PARTNUM, &priv->partnum, |
1493 | sizeof(priv->partnum)); | 1495 | sizeof(priv->partnum)); |
1494 | if (result < 0) | 1496 | if (result < 0) { |
1495 | goto err_free_priv; | 1497 | dev_warn(&serial->interface->dev, |
1498 | "querying part number failed\n"); | ||
1499 | priv->partnum = CP210X_PARTNUM_UNKNOWN; | ||
1500 | } | ||
1496 | 1501 | ||
1497 | usb_set_serial_data(serial, priv); | 1502 | usb_set_serial_data(serial, priv); |
1498 | 1503 | ||
@@ -1505,10 +1510,6 @@ static int cp210x_attach(struct usb_serial *serial) | |||
1505 | } | 1510 | } |
1506 | 1511 | ||
1507 | return 0; | 1512 | return 0; |
1508 | err_free_priv: | ||
1509 | kfree(priv); | ||
1510 | |||
1511 | return result; | ||
1512 | } | 1513 | } |
1513 | 1514 | ||
1514 | static void cp210x_disconnect(struct usb_serial *serial) | 1515 | static void cp210x_disconnect(struct usb_serial *serial) |
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 1cec03799cdf..49d1b2d4606d 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c | |||
@@ -1015,6 +1015,8 @@ static const struct usb_device_id id_table_combined[] = { | |||
1015 | { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, | 1015 | { USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) }, |
1016 | { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), | 1016 | { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID), |
1017 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, | 1017 | .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, |
1018 | { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) }, | ||
1019 | { USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) }, | ||
1018 | { } /* Terminating entry */ | 1020 | { } /* Terminating entry */ |
1019 | }; | 1021 | }; |
1020 | 1022 | ||
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 4fcf1cecb6d7..f9d15bd62785 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h | |||
@@ -610,6 +610,13 @@ | |||
610 | #define ADI_GNICEPLUS_PID 0xF001 | 610 | #define ADI_GNICEPLUS_PID 0xF001 |
611 | 611 | ||
612 | /* | 612 | /* |
613 | * Cypress WICED USB UART | ||
614 | */ | ||
615 | #define CYPRESS_VID 0x04B4 | ||
616 | #define CYPRESS_WICED_BT_USB_PID 0x009B | ||
617 | #define CYPRESS_WICED_WL_USB_PID 0xF900 | ||
618 | |||
619 | /* | ||
613 | * Microchip Technology, Inc. | 620 | * Microchip Technology, Inc. |
614 | * | 621 | * |
615 | * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are | 622 | * MICROCHIP_VID (0x04D8) and MICROCHIP_USB_BOARD_PID (0x000A) are |
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c index cc84da8dbb84..14511d6a7d44 100644 --- a/drivers/usb/serial/metro-usb.c +++ b/drivers/usb/serial/metro-usb.c | |||
@@ -45,6 +45,7 @@ struct metrousb_private { | |||
45 | static const struct usb_device_id id_table[] = { | 45 | static const struct usb_device_id id_table[] = { |
46 | { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) }, | 46 | { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) }, |
47 | { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) }, | 47 | { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) }, |
48 | { USB_DEVICE_INTERFACE_CLASS(0x0c2e, 0x0730, 0xff) }, /* MS7820 */ | ||
48 | { }, /* Terminating entry. */ | 49 | { }, /* Terminating entry. */ |
49 | }; | 50 | }; |
50 | MODULE_DEVICE_TABLE(usb, id_table); | 51 | MODULE_DEVICE_TABLE(usb, id_table); |
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 54bfef13966a..ba672cf4e888 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c | |||
@@ -522,6 +522,7 @@ static void option_instat_callback(struct urb *urb); | |||
522 | 522 | ||
523 | /* TP-LINK Incorporated products */ | 523 | /* TP-LINK Incorporated products */ |
524 | #define TPLINK_VENDOR_ID 0x2357 | 524 | #define TPLINK_VENDOR_ID 0x2357 |
525 | #define TPLINK_PRODUCT_LTE 0x000D | ||
525 | #define TPLINK_PRODUCT_MA180 0x0201 | 526 | #define TPLINK_PRODUCT_MA180 0x0201 |
526 | 527 | ||
527 | /* Changhong products */ | 528 | /* Changhong products */ |
@@ -2011,6 +2012,7 @@ static const struct usb_device_id option_ids[] = { | |||
2011 | { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, | 2012 | { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, |
2012 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, | 2013 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, |
2013 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, | 2014 | { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, |
2015 | { USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */ | ||
2014 | { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), | 2016 | { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), |
2015 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, | 2017 | .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, |
2016 | { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ | 2018 | { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ |
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index ebc0beea69d6..eb9928963a53 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c | |||
@@ -174,6 +174,10 @@ static const struct usb_device_id id_table[] = { | |||
174 | {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ | 174 | {DEVICE_SWI(0x413c, 0x81b3)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */ |
175 | {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ | 175 | {DEVICE_SWI(0x413c, 0x81b5)}, /* Dell Wireless 5811e QDL */ |
176 | {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ | 176 | {DEVICE_SWI(0x413c, 0x81b6)}, /* Dell Wireless 5811e QDL */ |
177 | {DEVICE_SWI(0x413c, 0x81cf)}, /* Dell Wireless 5819 */ | ||
178 | {DEVICE_SWI(0x413c, 0x81d0)}, /* Dell Wireless 5819 */ | ||
179 | {DEVICE_SWI(0x413c, 0x81d1)}, /* Dell Wireless 5818 */ | ||
180 | {DEVICE_SWI(0x413c, 0x81d2)}, /* Dell Wireless 5818 */ | ||
177 | 181 | ||
178 | /* Huawei devices */ | 182 | /* Huawei devices */ |
179 | {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ | 183 | {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ |
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 82360594fa8e..57efbd3b053b 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c | |||
@@ -1024,6 +1024,7 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
1024 | mutex_unlock(&priv->lock); | 1024 | mutex_unlock(&priv->lock); |
1025 | 1025 | ||
1026 | if (use_ptemod) { | 1026 | if (use_ptemod) { |
1027 | map->pages_vm_start = vma->vm_start; | ||
1027 | err = apply_to_page_range(vma->vm_mm, vma->vm_start, | 1028 | err = apply_to_page_range(vma->vm_mm, vma->vm_start, |
1028 | vma->vm_end - vma->vm_start, | 1029 | vma->vm_end - vma->vm_start, |
1029 | find_grant_ptes, map); | 1030 | find_grant_ptes, map); |
@@ -1061,7 +1062,6 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) | |||
1061 | set_grant_ptes_as_special, NULL); | 1062 | set_grant_ptes_as_special, NULL); |
1062 | } | 1063 | } |
1063 | #endif | 1064 | #endif |
1064 | map->pages_vm_start = vma->vm_start; | ||
1065 | } | 1065 | } |
1066 | 1066 | ||
1067 | return 0; | 1067 | return 0; |
diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index e89136ab851e..b437fccd4e62 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c | |||
@@ -57,7 +57,7 @@ static int register_balloon(struct device *dev); | |||
57 | static void watch_target(struct xenbus_watch *watch, | 57 | static void watch_target(struct xenbus_watch *watch, |
58 | const char *path, const char *token) | 58 | const char *path, const char *token) |
59 | { | 59 | { |
60 | unsigned long long new_target; | 60 | unsigned long long new_target, static_max; |
61 | int err; | 61 | int err; |
62 | static bool watch_fired; | 62 | static bool watch_fired; |
63 | static long target_diff; | 63 | static long target_diff; |
@@ -72,13 +72,20 @@ static void watch_target(struct xenbus_watch *watch, | |||
72 | * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. | 72 | * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. |
73 | */ | 73 | */ |
74 | new_target >>= PAGE_SHIFT - 10; | 74 | new_target >>= PAGE_SHIFT - 10; |
75 | if (watch_fired) { | 75 | |
76 | balloon_set_new_target(new_target - target_diff); | 76 | if (!watch_fired) { |
77 | return; | 77 | watch_fired = true; |
78 | err = xenbus_scanf(XBT_NIL, "memory", "static-max", "%llu", | ||
79 | &static_max); | ||
80 | if (err != 1) | ||
81 | static_max = new_target; | ||
82 | else | ||
83 | static_max >>= PAGE_SHIFT - 10; | ||
84 | target_diff = xen_pv_domain() ? 0 | ||
85 | : static_max - balloon_stats.target_pages; | ||
78 | } | 86 | } |
79 | 87 | ||
80 | watch_fired = true; | 88 | balloon_set_new_target(new_target - target_diff); |
81 | target_diff = new_target - balloon_stats.target_pages; | ||
82 | } | 89 | } |
83 | static struct xenbus_watch target_watch = { | 90 | static struct xenbus_watch target_watch = { |
84 | .node = "memory/target", | 91 | .node = "memory/target", |
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c index adaf6f6dd858..e1cbdfdb7c68 100644 --- a/fs/9p/vfs_addr.c +++ b/fs/9p/vfs_addr.c | |||
@@ -310,9 +310,13 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping, | |||
310 | 310 | ||
311 | p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); | 311 | p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping); |
312 | 312 | ||
313 | if (unlikely(copied < len && !PageUptodate(page))) { | 313 | if (!PageUptodate(page)) { |
314 | copied = 0; | 314 | if (unlikely(copied < len)) { |
315 | goto out; | 315 | copied = 0; |
316 | goto out; | ||
317 | } else if (len == PAGE_SIZE) { | ||
318 | SetPageUptodate(page); | ||
319 | } | ||
316 | } | 320 | } |
317 | /* | 321 | /* |
318 | * No need to use i_size_read() here, the i_size | 322 | * No need to use i_size_read() here, the i_size |
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index 2a46762def31..a7c5a9861bef 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c | |||
@@ -596,7 +596,7 @@ static void bm_evict_inode(struct inode *inode) | |||
596 | { | 596 | { |
597 | Node *e = inode->i_private; | 597 | Node *e = inode->i_private; |
598 | 598 | ||
599 | if (e->flags & MISC_FMT_OPEN_FILE) | 599 | if (e && e->flags & MISC_FMT_OPEN_FILE) |
600 | filp_close(e->interp_file, NULL); | 600 | filp_close(e->interp_file, NULL); |
601 | 601 | ||
602 | clear_inode(inode); | 602 | clear_inode(inode); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 93d088ffc05c..789f55e851ae 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -716,10 +716,12 @@ int bdev_write_page(struct block_device *bdev, sector_t sector, | |||
716 | 716 | ||
717 | set_page_writeback(page); | 717 | set_page_writeback(page); |
718 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true); | 718 | result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, true); |
719 | if (result) | 719 | if (result) { |
720 | end_page_writeback(page); | 720 | end_page_writeback(page); |
721 | else | 721 | } else { |
722 | clean_page_buffers(page); | ||
722 | unlock_page(page); | 723 | unlock_page(page); |
724 | } | ||
723 | blk_queue_exit(bdev->bd_queue); | 725 | blk_queue_exit(bdev->bd_queue); |
724 | return result; | 726 | return result; |
725 | } | 727 | } |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 35a128acfbd1..161694b66038 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -1135,7 +1135,7 @@ static int btrfs_fill_super(struct super_block *sb, | |||
1135 | #ifdef CONFIG_BTRFS_FS_POSIX_ACL | 1135 | #ifdef CONFIG_BTRFS_FS_POSIX_ACL |
1136 | sb->s_flags |= MS_POSIXACL; | 1136 | sb->s_flags |= MS_POSIXACL; |
1137 | #endif | 1137 | #endif |
1138 | sb->s_flags |= MS_I_VERSION; | 1138 | sb->s_flags |= SB_I_VERSION; |
1139 | sb->s_iflags |= SB_I_CGROUPWB; | 1139 | sb->s_iflags |= SB_I_CGROUPWB; |
1140 | 1140 | ||
1141 | err = super_setup_bdi(sb); | 1141 | err = super_setup_bdi(sb); |
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 157fe59fbabe..1978a8cb1cb1 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c | |||
@@ -1991,6 +1991,7 @@ static int try_flush_caps(struct inode *inode, u64 *ptid) | |||
1991 | retry: | 1991 | retry: |
1992 | spin_lock(&ci->i_ceph_lock); | 1992 | spin_lock(&ci->i_ceph_lock); |
1993 | if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { | 1993 | if (ci->i_ceph_flags & CEPH_I_NOFLUSH) { |
1994 | spin_unlock(&ci->i_ceph_lock); | ||
1994 | dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode); | 1995 | dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode); |
1995 | goto out; | 1996 | goto out; |
1996 | } | 1997 | } |
@@ -2008,8 +2009,10 @@ retry: | |||
2008 | mutex_lock(&session->s_mutex); | 2009 | mutex_lock(&session->s_mutex); |
2009 | goto retry; | 2010 | goto retry; |
2010 | } | 2011 | } |
2011 | if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) | 2012 | if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) { |
2013 | spin_unlock(&ci->i_ceph_lock); | ||
2012 | goto out; | 2014 | goto out; |
2015 | } | ||
2013 | 2016 | ||
2014 | flushing = __mark_caps_flushing(inode, session, true, | 2017 | flushing = __mark_caps_flushing(inode, session, true, |
2015 | &flush_tid, &oldest_flush_tid); | 2018 | &flush_tid, &oldest_flush_tid); |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index f7243617316c..d5b2e12b5d02 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
@@ -5,9 +5,14 @@ config CIFS | |||
5 | select CRYPTO | 5 | select CRYPTO |
6 | select CRYPTO_MD4 | 6 | select CRYPTO_MD4 |
7 | select CRYPTO_MD5 | 7 | select CRYPTO_MD5 |
8 | select CRYPTO_SHA256 | ||
9 | select CRYPTO_CMAC | ||
8 | select CRYPTO_HMAC | 10 | select CRYPTO_HMAC |
9 | select CRYPTO_ARC4 | 11 | select CRYPTO_ARC4 |
12 | select CRYPTO_AEAD2 | ||
13 | select CRYPTO_CCM | ||
10 | select CRYPTO_ECB | 14 | select CRYPTO_ECB |
15 | select CRYPTO_AES | ||
11 | select CRYPTO_DES | 16 | select CRYPTO_DES |
12 | help | 17 | help |
13 | This is the client VFS module for the SMB3 family of NAS protocols, | 18 | This is the client VFS module for the SMB3 family of NAS protocols, |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index de5b2e1fcce5..e185b2853eab 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -661,7 +661,9 @@ struct TCP_Server_Info { | |||
661 | #endif | 661 | #endif |
662 | unsigned int max_read; | 662 | unsigned int max_read; |
663 | unsigned int max_write; | 663 | unsigned int max_write; |
664 | __u8 preauth_hash[512]; | 664 | #ifdef CONFIG_CIFS_SMB311 |
665 | __u8 preauth_sha_hash[64]; /* save initital negprot hash */ | ||
666 | #endif /* 3.1.1 */ | ||
665 | struct delayed_work reconnect; /* reconnect workqueue job */ | 667 | struct delayed_work reconnect; /* reconnect workqueue job */ |
666 | struct mutex reconnect_mutex; /* prevent simultaneous reconnects */ | 668 | struct mutex reconnect_mutex; /* prevent simultaneous reconnects */ |
667 | unsigned long echo_interval; | 669 | unsigned long echo_interval; |
@@ -849,7 +851,9 @@ struct cifs_ses { | |||
849 | __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE]; | 851 | __u8 smb3signingkey[SMB3_SIGN_KEY_SIZE]; |
850 | __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE]; | 852 | __u8 smb3encryptionkey[SMB3_SIGN_KEY_SIZE]; |
851 | __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE]; | 853 | __u8 smb3decryptionkey[SMB3_SIGN_KEY_SIZE]; |
852 | __u8 preauth_hash[512]; | 854 | #ifdef CONFIG_CIFS_SMB311 |
855 | __u8 preauth_sha_hash[64]; | ||
856 | #endif /* 3.1.1 */ | ||
853 | }; | 857 | }; |
854 | 858 | ||
855 | static inline bool | 859 | static inline bool |
diff --git a/fs/cifs/smb2maperror.c b/fs/cifs/smb2maperror.c index 7ca9808a0daa..62c88dfed57b 100644 --- a/fs/cifs/smb2maperror.c +++ b/fs/cifs/smb2maperror.c | |||
@@ -214,7 +214,7 @@ static const struct status_to_posix_error smb2_error_map_table[] = { | |||
214 | {STATUS_DATATYPE_MISALIGNMENT, -EIO, "STATUS_DATATYPE_MISALIGNMENT"}, | 214 | {STATUS_DATATYPE_MISALIGNMENT, -EIO, "STATUS_DATATYPE_MISALIGNMENT"}, |
215 | {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"}, | 215 | {STATUS_BREAKPOINT, -EIO, "STATUS_BREAKPOINT"}, |
216 | {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"}, | 216 | {STATUS_SINGLE_STEP, -EIO, "STATUS_SINGLE_STEP"}, |
217 | {STATUS_BUFFER_OVERFLOW, -EIO, "STATUS_BUFFER_OVERFLOW"}, | 217 | {STATUS_BUFFER_OVERFLOW, -E2BIG, "STATUS_BUFFER_OVERFLOW"}, |
218 | {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"}, | 218 | {STATUS_NO_MORE_FILES, -ENODATA, "STATUS_NO_MORE_FILES"}, |
219 | {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"}, | 219 | {STATUS_WAKE_SYSTEM_DEBUGGER, -EIO, "STATUS_WAKE_SYSTEM_DEBUGGER"}, |
220 | {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"}, | 220 | {STATUS_HANDLES_CLOSED, -EIO, "STATUS_HANDLES_CLOSED"}, |
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index 0dafdbae1f8c..bdb963d0ba32 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
@@ -522,6 +522,7 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, | |||
522 | struct cifs_open_parms oparms; | 522 | struct cifs_open_parms oparms; |
523 | struct cifs_fid fid; | 523 | struct cifs_fid fid; |
524 | struct smb2_file_full_ea_info *smb2_data; | 524 | struct smb2_file_full_ea_info *smb2_data; |
525 | int ea_buf_size = SMB2_MIN_EA_BUF; | ||
525 | 526 | ||
526 | utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); | 527 | utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); |
527 | if (!utf16_path) | 528 | if (!utf16_path) |
@@ -541,14 +542,32 @@ smb2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, | |||
541 | return rc; | 542 | return rc; |
542 | } | 543 | } |
543 | 544 | ||
544 | smb2_data = kzalloc(SMB2_MAX_EA_BUF, GFP_KERNEL); | 545 | while (1) { |
545 | if (smb2_data == NULL) { | 546 | smb2_data = kzalloc(ea_buf_size, GFP_KERNEL); |
546 | SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); | 547 | if (smb2_data == NULL) { |
547 | return -ENOMEM; | 548 | SMB2_close(xid, tcon, fid.persistent_fid, |
549 | fid.volatile_fid); | ||
550 | return -ENOMEM; | ||
551 | } | ||
552 | |||
553 | rc = SMB2_query_eas(xid, tcon, fid.persistent_fid, | ||
554 | fid.volatile_fid, | ||
555 | ea_buf_size, smb2_data); | ||
556 | |||
557 | if (rc != -E2BIG) | ||
558 | break; | ||
559 | |||
560 | kfree(smb2_data); | ||
561 | ea_buf_size <<= 1; | ||
562 | |||
563 | if (ea_buf_size > SMB2_MAX_EA_BUF) { | ||
564 | cifs_dbg(VFS, "EA size is too large\n"); | ||
565 | SMB2_close(xid, tcon, fid.persistent_fid, | ||
566 | fid.volatile_fid); | ||
567 | return -ENOMEM; | ||
568 | } | ||
548 | } | 569 | } |
549 | 570 | ||
550 | rc = SMB2_query_eas(xid, tcon, fid.persistent_fid, fid.volatile_fid, | ||
551 | smb2_data); | ||
552 | SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); | 571 | SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); |
553 | 572 | ||
554 | if (!rc) | 573 | if (!rc) |
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 6f0e6343c15e..5331631386a2 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
@@ -648,7 +648,7 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | |||
648 | { | 648 | { |
649 | int rc = 0; | 649 | int rc = 0; |
650 | struct validate_negotiate_info_req vneg_inbuf; | 650 | struct validate_negotiate_info_req vneg_inbuf; |
651 | struct validate_negotiate_info_rsp *pneg_rsp; | 651 | struct validate_negotiate_info_rsp *pneg_rsp = NULL; |
652 | u32 rsplen; | 652 | u32 rsplen; |
653 | u32 inbuflen; /* max of 4 dialects */ | 653 | u32 inbuflen; /* max of 4 dialects */ |
654 | 654 | ||
@@ -727,8 +727,9 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | |||
727 | rsplen); | 727 | rsplen); |
728 | 728 | ||
729 | /* relax check since Mac returns max bufsize allowed on ioctl */ | 729 | /* relax check since Mac returns max bufsize allowed on ioctl */ |
730 | if (rsplen > CIFSMaxBufSize) | 730 | if ((rsplen > CIFSMaxBufSize) |
731 | return -EIO; | 731 | || (rsplen < sizeof(struct validate_negotiate_info_rsp))) |
732 | goto err_rsp_free; | ||
732 | } | 733 | } |
733 | 734 | ||
734 | /* check validate negotiate info response matches what we got earlier */ | 735 | /* check validate negotiate info response matches what we got earlier */ |
@@ -747,10 +748,13 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon) | |||
747 | 748 | ||
748 | /* validate negotiate successful */ | 749 | /* validate negotiate successful */ |
749 | cifs_dbg(FYI, "validate negotiate info successful\n"); | 750 | cifs_dbg(FYI, "validate negotiate info successful\n"); |
751 | kfree(pneg_rsp); | ||
750 | return 0; | 752 | return 0; |
751 | 753 | ||
752 | vneg_out: | 754 | vneg_out: |
753 | cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n"); | 755 | cifs_dbg(VFS, "protocol revalidation - security settings mismatch\n"); |
756 | err_rsp_free: | ||
757 | kfree(pneg_rsp); | ||
754 | return -EIO; | 758 | return -EIO; |
755 | } | 759 | } |
756 | 760 | ||
@@ -1255,7 +1259,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree, | |||
1255 | struct smb2_tree_connect_req *req; | 1259 | struct smb2_tree_connect_req *req; |
1256 | struct smb2_tree_connect_rsp *rsp = NULL; | 1260 | struct smb2_tree_connect_rsp *rsp = NULL; |
1257 | struct kvec iov[2]; | 1261 | struct kvec iov[2]; |
1258 | struct kvec rsp_iov; | 1262 | struct kvec rsp_iov = { NULL, 0 }; |
1259 | int rc = 0; | 1263 | int rc = 0; |
1260 | int resp_buftype; | 1264 | int resp_buftype; |
1261 | int unc_path_len; | 1265 | int unc_path_len; |
@@ -1372,7 +1376,7 @@ tcon_exit: | |||
1372 | return rc; | 1376 | return rc; |
1373 | 1377 | ||
1374 | tcon_error_exit: | 1378 | tcon_error_exit: |
1375 | if (rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { | 1379 | if (rsp && rsp->hdr.sync_hdr.Status == STATUS_BAD_NETWORK_NAME) { |
1376 | cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); | 1380 | cifs_dbg(VFS, "BAD_NETWORK_NAME: %s\n", tree); |
1377 | } | 1381 | } |
1378 | goto tcon_exit; | 1382 | goto tcon_exit; |
@@ -1975,6 +1979,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid, | |||
1975 | } else | 1979 | } else |
1976 | iov[0].iov_len = get_rfc1002_length(req) + 4; | 1980 | iov[0].iov_len = get_rfc1002_length(req) + 4; |
1977 | 1981 | ||
1982 | /* validate negotiate request must be signed - see MS-SMB2 3.2.5.5 */ | ||
1983 | if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO) | ||
1984 | req->hdr.sync_hdr.Flags |= SMB2_FLAGS_SIGNED; | ||
1978 | 1985 | ||
1979 | rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov); | 1986 | rc = SendReceive2(xid, ses, iov, n_iov, &resp_buftype, flags, &rsp_iov); |
1980 | cifs_small_buf_release(req); | 1987 | cifs_small_buf_release(req); |
@@ -2191,9 +2198,13 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon, | |||
2191 | req->PersistentFileId = persistent_fid; | 2198 | req->PersistentFileId = persistent_fid; |
2192 | req->VolatileFileId = volatile_fid; | 2199 | req->VolatileFileId = volatile_fid; |
2193 | req->AdditionalInformation = cpu_to_le32(additional_info); | 2200 | req->AdditionalInformation = cpu_to_le32(additional_info); |
2194 | /* 4 for rfc1002 length field and 1 for Buffer */ | 2201 | |
2195 | req->InputBufferOffset = | 2202 | /* |
2196 | cpu_to_le16(sizeof(struct smb2_query_info_req) - 1 - 4); | 2203 | * We do not use the input buffer (do not send extra byte) |
2204 | */ | ||
2205 | req->InputBufferOffset = 0; | ||
2206 | inc_rfc1001_len(req, -1); | ||
2207 | |||
2197 | req->OutputBufferLength = cpu_to_le32(output_len); | 2208 | req->OutputBufferLength = cpu_to_le32(output_len); |
2198 | 2209 | ||
2199 | iov[0].iov_base = (char *)req; | 2210 | iov[0].iov_base = (char *)req; |
@@ -2233,12 +2244,12 @@ qinf_exit: | |||
2233 | } | 2244 | } |
2234 | 2245 | ||
2235 | int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, | 2246 | int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, |
2236 | u64 persistent_fid, u64 volatile_fid, | 2247 | u64 persistent_fid, u64 volatile_fid, |
2237 | struct smb2_file_full_ea_info *data) | 2248 | int ea_buf_size, struct smb2_file_full_ea_info *data) |
2238 | { | 2249 | { |
2239 | return query_info(xid, tcon, persistent_fid, volatile_fid, | 2250 | return query_info(xid, tcon, persistent_fid, volatile_fid, |
2240 | FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0, | 2251 | FILE_FULL_EA_INFORMATION, SMB2_O_INFO_FILE, 0, |
2241 | SMB2_MAX_EA_BUF, | 2252 | ea_buf_size, |
2242 | sizeof(struct smb2_file_full_ea_info), | 2253 | sizeof(struct smb2_file_full_ea_info), |
2243 | (void **)&data, | 2254 | (void **)&data, |
2244 | NULL); | 2255 | NULL); |
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h index 6c9653a130c8..c2ec934be968 100644 --- a/fs/cifs/smb2pdu.h +++ b/fs/cifs/smb2pdu.h | |||
@@ -832,7 +832,7 @@ struct smb2_flush_rsp { | |||
832 | /* Channel field for read and write: exactly one of following flags can be set*/ | 832 | /* Channel field for read and write: exactly one of following flags can be set*/ |
833 | #define SMB2_CHANNEL_NONE 0x00000000 | 833 | #define SMB2_CHANNEL_NONE 0x00000000 |
834 | #define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */ | 834 | #define SMB2_CHANNEL_RDMA_V1 0x00000001 /* SMB3 or later */ |
835 | #define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000001 /* SMB3.02 or later */ | 835 | #define SMB2_CHANNEL_RDMA_V1_INVALIDATE 0x00000002 /* SMB3.02 or later */ |
836 | 836 | ||
837 | /* SMB2 read request without RFC1001 length at the beginning */ | 837 | /* SMB2 read request without RFC1001 length at the beginning */ |
838 | struct smb2_read_plain_req { | 838 | struct smb2_read_plain_req { |
@@ -1178,7 +1178,8 @@ struct smb2_file_link_info { /* encoding of request for level 11 */ | |||
1178 | char FileName[0]; /* Name to be assigned to new link */ | 1178 | char FileName[0]; /* Name to be assigned to new link */ |
1179 | } __packed; /* level 11 Set */ | 1179 | } __packed; /* level 11 Set */ |
1180 | 1180 | ||
1181 | #define SMB2_MAX_EA_BUF 2048 | 1181 | #define SMB2_MIN_EA_BUF 2048 |
1182 | #define SMB2_MAX_EA_BUF 65536 | ||
1182 | 1183 | ||
1183 | struct smb2_file_full_ea_info { /* encoding of response for level 15 */ | 1184 | struct smb2_file_full_ea_info { /* encoding of response for level 15 */ |
1184 | __le32 next_entry_offset; | 1185 | __le32 next_entry_offset; |
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h index 003217099ef3..e9ab5227e7a8 100644 --- a/fs/cifs/smb2proto.h +++ b/fs/cifs/smb2proto.h | |||
@@ -134,6 +134,7 @@ extern int SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, | |||
134 | u64 persistent_file_id, u64 volatile_file_id); | 134 | u64 persistent_file_id, u64 volatile_file_id); |
135 | extern int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, | 135 | extern int SMB2_query_eas(const unsigned int xid, struct cifs_tcon *tcon, |
136 | u64 persistent_file_id, u64 volatile_file_id, | 136 | u64 persistent_file_id, u64 volatile_file_id, |
137 | int ea_buf_size, | ||
137 | struct smb2_file_full_ea_info *data); | 138 | struct smb2_file_full_ea_info *data); |
138 | extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, | 139 | extern int SMB2_query_info(const unsigned int xid, struct cifs_tcon *tcon, |
139 | u64 persistent_file_id, u64 volatile_file_id, | 140 | u64 persistent_file_id, u64 volatile_file_id, |
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 67367cf1f8cd..99493946e2f9 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c | |||
@@ -390,6 +390,7 @@ generate_smb30signingkey(struct cifs_ses *ses) | |||
390 | return generate_smb3signingkey(ses, &triplet); | 390 | return generate_smb3signingkey(ses, &triplet); |
391 | } | 391 | } |
392 | 392 | ||
393 | #ifdef CONFIG_CIFS_SMB311 | ||
393 | int | 394 | int |
394 | generate_smb311signingkey(struct cifs_ses *ses) | 395 | generate_smb311signingkey(struct cifs_ses *ses) |
395 | 396 | ||
@@ -398,25 +399,26 @@ generate_smb311signingkey(struct cifs_ses *ses) | |||
398 | struct derivation *d; | 399 | struct derivation *d; |
399 | 400 | ||
400 | d = &triplet.signing; | 401 | d = &triplet.signing; |
401 | d->label.iov_base = "SMB2AESCMAC"; | 402 | d->label.iov_base = "SMBSigningKey"; |
402 | d->label.iov_len = 12; | 403 | d->label.iov_len = 14; |
403 | d->context.iov_base = "SmbSign"; | 404 | d->context.iov_base = ses->preauth_sha_hash; |
404 | d->context.iov_len = 8; | 405 | d->context.iov_len = 64; |
405 | 406 | ||
406 | d = &triplet.encryption; | 407 | d = &triplet.encryption; |
407 | d->label.iov_base = "SMB2AESCCM"; | 408 | d->label.iov_base = "SMBC2SCipherKey"; |
408 | d->label.iov_len = 11; | 409 | d->label.iov_len = 16; |
409 | d->context.iov_base = "ServerIn "; | 410 | d->context.iov_base = ses->preauth_sha_hash; |
410 | d->context.iov_len = 10; | 411 | d->context.iov_len = 64; |
411 | 412 | ||
412 | d = &triplet.decryption; | 413 | d = &triplet.decryption; |
413 | d->label.iov_base = "SMB2AESCCM"; | 414 | d->label.iov_base = "SMBS2CCipherKey"; |
414 | d->label.iov_len = 11; | 415 | d->label.iov_len = 16; |
415 | d->context.iov_base = "ServerOut"; | 416 | d->context.iov_base = ses->preauth_sha_hash; |
416 | d->context.iov_len = 10; | 417 | d->context.iov_len = 64; |
417 | 418 | ||
418 | return generate_smb3signingkey(ses, &triplet); | 419 | return generate_smb3signingkey(ses, &triplet); |
419 | } | 420 | } |
421 | #endif /* 311 */ | ||
420 | 422 | ||
421 | int | 423 | int |
422 | smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) | 424 | smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server) |
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c index 018c588c7ac3..8e704d12a1cf 100644 --- a/fs/crypto/keyinfo.c +++ b/fs/crypto/keyinfo.c | |||
@@ -109,6 +109,11 @@ static int validate_user_key(struct fscrypt_info *crypt_info, | |||
109 | goto out; | 109 | goto out; |
110 | } | 110 | } |
111 | ukp = user_key_payload_locked(keyring_key); | 111 | ukp = user_key_payload_locked(keyring_key); |
112 | if (!ukp) { | ||
113 | /* key was revoked before we acquired its semaphore */ | ||
114 | res = -EKEYREVOKED; | ||
115 | goto out; | ||
116 | } | ||
112 | if (ukp->datalen != sizeof(struct fscrypt_key)) { | 117 | if (ukp->datalen != sizeof(struct fscrypt_key)) { |
113 | res = -EINVAL; | 118 | res = -EINVAL; |
114 | goto out; | 119 | goto out; |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 62cf812ed0e5..b53e66d9abd7 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -45,6 +45,12 @@ | |||
45 | #define DIO_PAGES 64 | 45 | #define DIO_PAGES 64 |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * Flags for dio_complete() | ||
49 | */ | ||
50 | #define DIO_COMPLETE_ASYNC 0x01 /* This is async IO */ | ||
51 | #define DIO_COMPLETE_INVALIDATE 0x02 /* Can invalidate pages */ | ||
52 | |||
53 | /* | ||
48 | * This code generally works in units of "dio_blocks". A dio_block is | 54 | * This code generally works in units of "dio_blocks". A dio_block is |
49 | * somewhere between the hard sector size and the filesystem block size. it | 55 | * somewhere between the hard sector size and the filesystem block size. it |
50 | * is determined on a per-invocation basis. When talking to the filesystem | 56 | * is determined on a per-invocation basis. When talking to the filesystem |
@@ -225,7 +231,7 @@ static inline struct page *dio_get_page(struct dio *dio, | |||
225 | * filesystems can use it to hold additional state between get_block calls and | 231 | * filesystems can use it to hold additional state between get_block calls and |
226 | * dio_complete. | 232 | * dio_complete. |
227 | */ | 233 | */ |
228 | static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) | 234 | static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) |
229 | { | 235 | { |
230 | loff_t offset = dio->iocb->ki_pos; | 236 | loff_t offset = dio->iocb->ki_pos; |
231 | ssize_t transferred = 0; | 237 | ssize_t transferred = 0; |
@@ -259,14 +265,27 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) | |||
259 | if (ret == 0) | 265 | if (ret == 0) |
260 | ret = transferred; | 266 | ret = transferred; |
261 | 267 | ||
268 | if (dio->end_io) { | ||
269 | // XXX: ki_pos?? | ||
270 | err = dio->end_io(dio->iocb, offset, ret, dio->private); | ||
271 | if (err) | ||
272 | ret = err; | ||
273 | } | ||
274 | |||
262 | /* | 275 | /* |
263 | * Try again to invalidate clean pages which might have been cached by | 276 | * Try again to invalidate clean pages which might have been cached by |
264 | * non-direct readahead, or faulted in by get_user_pages() if the source | 277 | * non-direct readahead, or faulted in by get_user_pages() if the source |
265 | * of the write was an mmap'ed region of the file we're writing. Either | 278 | * of the write was an mmap'ed region of the file we're writing. Either |
266 | * one is a pretty crazy thing to do, so we don't support it 100%. If | 279 | * one is a pretty crazy thing to do, so we don't support it 100%. If |
267 | * this invalidation fails, tough, the write still worked... | 280 | * this invalidation fails, tough, the write still worked... |
281 | * | ||
282 | * And this page cache invalidation has to be after dio->end_io(), as | ||
283 | * some filesystems convert unwritten extents to real allocations in | ||
284 | * end_io() when necessary, otherwise a racing buffer read would cache | ||
285 | * zeros from unwritten extents. | ||
268 | */ | 286 | */ |
269 | if (ret > 0 && dio->op == REQ_OP_WRITE && | 287 | if (flags & DIO_COMPLETE_INVALIDATE && |
288 | ret > 0 && dio->op == REQ_OP_WRITE && | ||
270 | dio->inode->i_mapping->nrpages) { | 289 | dio->inode->i_mapping->nrpages) { |
271 | err = invalidate_inode_pages2_range(dio->inode->i_mapping, | 290 | err = invalidate_inode_pages2_range(dio->inode->i_mapping, |
272 | offset >> PAGE_SHIFT, | 291 | offset >> PAGE_SHIFT, |
@@ -274,18 +293,10 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, bool is_async) | |||
274 | WARN_ON_ONCE(err); | 293 | WARN_ON_ONCE(err); |
275 | } | 294 | } |
276 | 295 | ||
277 | if (dio->end_io) { | ||
278 | |||
279 | // XXX: ki_pos?? | ||
280 | err = dio->end_io(dio->iocb, offset, ret, dio->private); | ||
281 | if (err) | ||
282 | ret = err; | ||
283 | } | ||
284 | |||
285 | if (!(dio->flags & DIO_SKIP_DIO_COUNT)) | 296 | if (!(dio->flags & DIO_SKIP_DIO_COUNT)) |
286 | inode_dio_end(dio->inode); | 297 | inode_dio_end(dio->inode); |
287 | 298 | ||
288 | if (is_async) { | 299 | if (flags & DIO_COMPLETE_ASYNC) { |
289 | /* | 300 | /* |
290 | * generic_write_sync expects ki_pos to have been updated | 301 | * generic_write_sync expects ki_pos to have been updated |
291 | * already, but the submission path only does this for | 302 | * already, but the submission path only does this for |
@@ -306,7 +317,7 @@ static void dio_aio_complete_work(struct work_struct *work) | |||
306 | { | 317 | { |
307 | struct dio *dio = container_of(work, struct dio, complete_work); | 318 | struct dio *dio = container_of(work, struct dio, complete_work); |
308 | 319 | ||
309 | dio_complete(dio, 0, true); | 320 | dio_complete(dio, 0, DIO_COMPLETE_ASYNC | DIO_COMPLETE_INVALIDATE); |
310 | } | 321 | } |
311 | 322 | ||
312 | static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); | 323 | static blk_status_t dio_bio_complete(struct dio *dio, struct bio *bio); |
@@ -348,7 +359,7 @@ static void dio_bio_end_aio(struct bio *bio) | |||
348 | queue_work(dio->inode->i_sb->s_dio_done_wq, | 359 | queue_work(dio->inode->i_sb->s_dio_done_wq, |
349 | &dio->complete_work); | 360 | &dio->complete_work); |
350 | } else { | 361 | } else { |
351 | dio_complete(dio, 0, true); | 362 | dio_complete(dio, 0, DIO_COMPLETE_ASYNC); |
352 | } | 363 | } |
353 | } | 364 | } |
354 | } | 365 | } |
@@ -866,7 +877,8 @@ out: | |||
866 | */ | 877 | */ |
867 | if (sdio->boundary) { | 878 | if (sdio->boundary) { |
868 | ret = dio_send_cur_page(dio, sdio, map_bh); | 879 | ret = dio_send_cur_page(dio, sdio, map_bh); |
869 | dio_bio_submit(dio, sdio); | 880 | if (sdio->bio) |
881 | dio_bio_submit(dio, sdio); | ||
870 | put_page(sdio->cur_page); | 882 | put_page(sdio->cur_page); |
871 | sdio->cur_page = NULL; | 883 | sdio->cur_page = NULL; |
872 | } | 884 | } |
@@ -1359,7 +1371,7 @@ do_blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, | |||
1359 | dio_await_completion(dio); | 1371 | dio_await_completion(dio); |
1360 | 1372 | ||
1361 | if (drop_refcount(dio) == 0) { | 1373 | if (drop_refcount(dio) == 0) { |
1362 | retval = dio_complete(dio, retval, false); | 1374 | retval = dio_complete(dio, retval, DIO_COMPLETE_INVALIDATE); |
1363 | } else | 1375 | } else |
1364 | BUG_ON(retval != -EIOCBQUEUED); | 1376 | BUG_ON(retval != -EIOCBQUEUED); |
1365 | 1377 | ||
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 9c351bf757b2..3fbc0ff79699 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h | |||
@@ -84,11 +84,16 @@ struct ecryptfs_page_crypt_context { | |||
84 | static inline struct ecryptfs_auth_tok * | 84 | static inline struct ecryptfs_auth_tok * |
85 | ecryptfs_get_encrypted_key_payload_data(struct key *key) | 85 | ecryptfs_get_encrypted_key_payload_data(struct key *key) |
86 | { | 86 | { |
87 | if (key->type == &key_type_encrypted) | 87 | struct encrypted_key_payload *payload; |
88 | return (struct ecryptfs_auth_tok *) | 88 | |
89 | (&((struct encrypted_key_payload *)key->payload.data[0])->payload_data); | 89 | if (key->type != &key_type_encrypted) |
90 | else | ||
91 | return NULL; | 90 | return NULL; |
91 | |||
92 | payload = key->payload.data[0]; | ||
93 | if (!payload) | ||
94 | return ERR_PTR(-EKEYREVOKED); | ||
95 | |||
96 | return (struct ecryptfs_auth_tok *)payload->payload_data; | ||
92 | } | 97 | } |
93 | 98 | ||
94 | static inline struct key *ecryptfs_get_encrypted_key(char *sig) | 99 | static inline struct key *ecryptfs_get_encrypted_key(char *sig) |
@@ -114,12 +119,17 @@ static inline struct ecryptfs_auth_tok * | |||
114 | ecryptfs_get_key_payload_data(struct key *key) | 119 | ecryptfs_get_key_payload_data(struct key *key) |
115 | { | 120 | { |
116 | struct ecryptfs_auth_tok *auth_tok; | 121 | struct ecryptfs_auth_tok *auth_tok; |
122 | struct user_key_payload *ukp; | ||
117 | 123 | ||
118 | auth_tok = ecryptfs_get_encrypted_key_payload_data(key); | 124 | auth_tok = ecryptfs_get_encrypted_key_payload_data(key); |
119 | if (!auth_tok) | 125 | if (auth_tok) |
120 | return (struct ecryptfs_auth_tok *)user_key_payload_locked(key)->data; | ||
121 | else | ||
122 | return auth_tok; | 126 | return auth_tok; |
127 | |||
128 | ukp = user_key_payload_locked(key); | ||
129 | if (!ukp) | ||
130 | return ERR_PTR(-EKEYREVOKED); | ||
131 | |||
132 | return (struct ecryptfs_auth_tok *)ukp->data; | ||
123 | } | 133 | } |
124 | 134 | ||
125 | #define ECRYPTFS_MAX_KEYSET_SIZE 1024 | 135 | #define ECRYPTFS_MAX_KEYSET_SIZE 1024 |
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 3cf1546dca82..fa218cd64f74 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c | |||
@@ -459,7 +459,8 @@ out: | |||
459 | * @auth_tok_key: key containing the authentication token | 459 | * @auth_tok_key: key containing the authentication token |
460 | * @auth_tok: authentication token | 460 | * @auth_tok: authentication token |
461 | * | 461 | * |
462 | * Returns zero on valid auth tok; -EINVAL otherwise | 462 | * Returns zero on valid auth tok; -EINVAL if the payload is invalid; or |
463 | * -EKEYREVOKED if the key was revoked before we acquired its semaphore. | ||
463 | */ | 464 | */ |
464 | static int | 465 | static int |
465 | ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key, | 466 | ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key, |
@@ -468,6 +469,12 @@ ecryptfs_verify_auth_tok_from_key(struct key *auth_tok_key, | |||
468 | int rc = 0; | 469 | int rc = 0; |
469 | 470 | ||
470 | (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key); | 471 | (*auth_tok) = ecryptfs_get_key_payload_data(auth_tok_key); |
472 | if (IS_ERR(*auth_tok)) { | ||
473 | rc = PTR_ERR(*auth_tok); | ||
474 | *auth_tok = NULL; | ||
475 | goto out; | ||
476 | } | ||
477 | |||
471 | if (ecryptfs_verify_version((*auth_tok)->version)) { | 478 | if (ecryptfs_verify_version((*auth_tok)->version)) { |
472 | printk(KERN_ERR "Data structure version mismatch. Userspace " | 479 | printk(KERN_ERR "Data structure version mismatch. Userspace " |
473 | "tools must match eCryptfs kernel module with major " | 480 | "tools must match eCryptfs kernel module with major " |
@@ -1802,6 +1802,7 @@ static int do_execveat_common(int fd, struct filename *filename, | |||
1802 | /* execve succeeded */ | 1802 | /* execve succeeded */ |
1803 | current->fs->in_exec = 0; | 1803 | current->fs->in_exec = 0; |
1804 | current->in_execve = 0; | 1804 | current->in_execve = 0; |
1805 | membarrier_execve(current); | ||
1805 | acct_update_integrals(current); | 1806 | acct_update_integrals(current); |
1806 | task_numa_free(current); | 1807 | task_numa_free(current); |
1807 | free_bprm(bprm); | 1808 | free_bprm(bprm); |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index b104096fce9e..b0915b734a38 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -1677,7 +1677,7 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token, | |||
1677 | sbi->s_mount_flags |= EXT4_MF_FS_ABORTED; | 1677 | sbi->s_mount_flags |= EXT4_MF_FS_ABORTED; |
1678 | return 1; | 1678 | return 1; |
1679 | case Opt_i_version: | 1679 | case Opt_i_version: |
1680 | sb->s_flags |= MS_I_VERSION; | 1680 | sb->s_flags |= SB_I_VERSION; |
1681 | return 1; | 1681 | return 1; |
1682 | case Opt_lazytime: | 1682 | case Opt_lazytime: |
1683 | sb->s_flags |= MS_LAZYTIME; | 1683 | sb->s_flags |= MS_LAZYTIME; |
@@ -2060,7 +2060,7 @@ static int _ext4_show_options(struct seq_file *seq, struct super_block *sb, | |||
2060 | SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); | 2060 | SEQ_OPTS_PRINT("min_batch_time=%u", sbi->s_min_batch_time); |
2061 | if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) | 2061 | if (nodefs || sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) |
2062 | SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); | 2062 | SEQ_OPTS_PRINT("max_batch_time=%u", sbi->s_max_batch_time); |
2063 | if (sb->s_flags & MS_I_VERSION) | 2063 | if (sb->s_flags & SB_I_VERSION) |
2064 | SEQ_OPTS_PUTS("i_version"); | 2064 | SEQ_OPTS_PUTS("i_version"); |
2065 | if (nodefs || sbi->s_stripe) | 2065 | if (nodefs || sbi->s_stripe) |
2066 | SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); | 2066 | SEQ_OPTS_PRINT("stripe=%lu", sbi->s_stripe); |
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 9a7c90386947..4b4a72f392be 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
@@ -2525,7 +2525,7 @@ void invalidate_blocks(struct f2fs_sb_info *sbi, block_t addr); | |||
2525 | bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); | 2525 | bool is_checkpointed_data(struct f2fs_sb_info *sbi, block_t blkaddr); |
2526 | void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new); | 2526 | void refresh_sit_entry(struct f2fs_sb_info *sbi, block_t old, block_t new); |
2527 | void stop_discard_thread(struct f2fs_sb_info *sbi); | 2527 | void stop_discard_thread(struct f2fs_sb_info *sbi); |
2528 | void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi); | 2528 | void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount); |
2529 | void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); | 2529 | void clear_prefree_segments(struct f2fs_sb_info *sbi, struct cp_control *cpc); |
2530 | void release_discard_addrs(struct f2fs_sb_info *sbi); | 2530 | void release_discard_addrs(struct f2fs_sb_info *sbi); |
2531 | int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); | 2531 | int npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); |
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 621b9b3d320b..c695ff462ee6 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c | |||
@@ -1210,11 +1210,11 @@ void stop_discard_thread(struct f2fs_sb_info *sbi) | |||
1210 | } | 1210 | } |
1211 | 1211 | ||
1212 | /* This comes from f2fs_put_super and f2fs_trim_fs */ | 1212 | /* This comes from f2fs_put_super and f2fs_trim_fs */ |
1213 | void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi) | 1213 | void f2fs_wait_discard_bios(struct f2fs_sb_info *sbi, bool umount) |
1214 | { | 1214 | { |
1215 | __issue_discard_cmd(sbi, false); | 1215 | __issue_discard_cmd(sbi, false); |
1216 | __drop_discard_cmd(sbi); | 1216 | __drop_discard_cmd(sbi); |
1217 | __wait_discard_cmd(sbi, false); | 1217 | __wait_discard_cmd(sbi, !umount); |
1218 | } | 1218 | } |
1219 | 1219 | ||
1220 | static void mark_discard_range_all(struct f2fs_sb_info *sbi) | 1220 | static void mark_discard_range_all(struct f2fs_sb_info *sbi) |
@@ -2244,7 +2244,7 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range) | |||
2244 | } | 2244 | } |
2245 | /* It's time to issue all the filed discards */ | 2245 | /* It's time to issue all the filed discards */ |
2246 | mark_discard_range_all(sbi); | 2246 | mark_discard_range_all(sbi); |
2247 | f2fs_wait_discard_bios(sbi); | 2247 | f2fs_wait_discard_bios(sbi, false); |
2248 | out: | 2248 | out: |
2249 | range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); | 2249 | range->len = F2FS_BLK_TO_BYTES(cpc.trimmed); |
2250 | return err; | 2250 | return err; |
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 89f61eb3d167..933c3d529e65 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c | |||
@@ -801,7 +801,7 @@ static void f2fs_put_super(struct super_block *sb) | |||
801 | } | 801 | } |
802 | 802 | ||
803 | /* be sure to wait for any on-going discard commands */ | 803 | /* be sure to wait for any on-going discard commands */ |
804 | f2fs_wait_discard_bios(sbi); | 804 | f2fs_wait_discard_bios(sbi, true); |
805 | 805 | ||
806 | if (f2fs_discard_en(sbi) && !sbi->discard_blks) { | 806 | if (f2fs_discard_en(sbi) && !sbi->discard_blks) { |
807 | struct cp_control cpc = { | 807 | struct cp_control cpc = { |
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c index b5ab06fabc60..0438d4cd91ef 100644 --- a/fs/fscache/object-list.c +++ b/fs/fscache/object-list.c | |||
@@ -331,6 +331,13 @@ static void fscache_objlist_config(struct fscache_objlist_data *data) | |||
331 | rcu_read_lock(); | 331 | rcu_read_lock(); |
332 | 332 | ||
333 | confkey = user_key_payload_rcu(key); | 333 | confkey = user_key_payload_rcu(key); |
334 | if (!confkey) { | ||
335 | /* key was revoked */ | ||
336 | rcu_read_unlock(); | ||
337 | key_put(key); | ||
338 | goto no_config; | ||
339 | } | ||
340 | |||
334 | buf = confkey->data; | 341 | buf = confkey->data; |
335 | 342 | ||
336 | for (len = confkey->datalen - 1; len >= 0; len--) { | 343 | for (len = confkey->datalen - 1; len >= 0; len--) { |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 622081b97426..24967382a7b1 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -1308,7 +1308,8 @@ static int parse_dirplusfile(char *buf, size_t nbytes, struct file *file, | |||
1308 | */ | 1308 | */ |
1309 | over = !dir_emit(ctx, dirent->name, dirent->namelen, | 1309 | over = !dir_emit(ctx, dirent->name, dirent->namelen, |
1310 | dirent->ino, dirent->type); | 1310 | dirent->ino, dirent->type); |
1311 | ctx->pos = dirent->off; | 1311 | if (!over) |
1312 | ctx->pos = dirent->off; | ||
1312 | } | 1313 | } |
1313 | 1314 | ||
1314 | buf += reclen; | 1315 | buf += reclen; |
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 65c88379a3a1..94a745acaef8 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c | |||
@@ -1059,7 +1059,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent) | |||
1059 | if (sb->s_flags & MS_MANDLOCK) | 1059 | if (sb->s_flags & MS_MANDLOCK) |
1060 | goto err; | 1060 | goto err; |
1061 | 1061 | ||
1062 | sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION); | 1062 | sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION); |
1063 | 1063 | ||
1064 | if (!parse_fuse_opt(data, &d, is_bdev)) | 1064 | if (!parse_fuse_opt(data, &d, is_bdev)) |
1065 | goto err; | 1065 | goto err; |
diff --git a/fs/iomap.c b/fs/iomap.c index be61cf742b5e..d4801f8dd4fd 100644 --- a/fs/iomap.c +++ b/fs/iomap.c | |||
@@ -714,23 +714,9 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio) | |||
714 | { | 714 | { |
715 | struct kiocb *iocb = dio->iocb; | 715 | struct kiocb *iocb = dio->iocb; |
716 | struct inode *inode = file_inode(iocb->ki_filp); | 716 | struct inode *inode = file_inode(iocb->ki_filp); |
717 | loff_t offset = iocb->ki_pos; | ||
717 | ssize_t ret; | 718 | ssize_t ret; |
718 | 719 | ||
719 | /* | ||
720 | * Try again to invalidate clean pages which might have been cached by | ||
721 | * non-direct readahead, or faulted in by get_user_pages() if the source | ||
722 | * of the write was an mmap'ed region of the file we're writing. Either | ||
723 | * one is a pretty crazy thing to do, so we don't support it 100%. If | ||
724 | * this invalidation fails, tough, the write still worked... | ||
725 | */ | ||
726 | if (!dio->error && | ||
727 | (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { | ||
728 | ret = invalidate_inode_pages2_range(inode->i_mapping, | ||
729 | iocb->ki_pos >> PAGE_SHIFT, | ||
730 | (iocb->ki_pos + dio->size - 1) >> PAGE_SHIFT); | ||
731 | WARN_ON_ONCE(ret); | ||
732 | } | ||
733 | |||
734 | if (dio->end_io) { | 720 | if (dio->end_io) { |
735 | ret = dio->end_io(iocb, | 721 | ret = dio->end_io(iocb, |
736 | dio->error ? dio->error : dio->size, | 722 | dio->error ? dio->error : dio->size, |
@@ -742,12 +728,33 @@ static ssize_t iomap_dio_complete(struct iomap_dio *dio) | |||
742 | if (likely(!ret)) { | 728 | if (likely(!ret)) { |
743 | ret = dio->size; | 729 | ret = dio->size; |
744 | /* check for short read */ | 730 | /* check for short read */ |
745 | if (iocb->ki_pos + ret > dio->i_size && | 731 | if (offset + ret > dio->i_size && |
746 | !(dio->flags & IOMAP_DIO_WRITE)) | 732 | !(dio->flags & IOMAP_DIO_WRITE)) |
747 | ret = dio->i_size - iocb->ki_pos; | 733 | ret = dio->i_size - offset; |
748 | iocb->ki_pos += ret; | 734 | iocb->ki_pos += ret; |
749 | } | 735 | } |
750 | 736 | ||
737 | /* | ||
738 | * Try again to invalidate clean pages which might have been cached by | ||
739 | * non-direct readahead, or faulted in by get_user_pages() if the source | ||
740 | * of the write was an mmap'ed region of the file we're writing. Either | ||
741 | * one is a pretty crazy thing to do, so we don't support it 100%. If | ||
742 | * this invalidation fails, tough, the write still worked... | ||
743 | * | ||
744 | * And this page cache invalidation has to be after dio->end_io(), as | ||
745 | * some filesystems convert unwritten extents to real allocations in | ||
746 | * end_io() when necessary, otherwise a racing buffer read would cache | ||
747 | * zeros from unwritten extents. | ||
748 | */ | ||
749 | if (!dio->error && | ||
750 | (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { | ||
751 | int err; | ||
752 | err = invalidate_inode_pages2_range(inode->i_mapping, | ||
753 | offset >> PAGE_SHIFT, | ||
754 | (offset + dio->size - 1) >> PAGE_SHIFT); | ||
755 | WARN_ON_ONCE(err); | ||
756 | } | ||
757 | |||
751 | inode_dio_end(file_inode(iocb->ki_filp)); | 758 | inode_dio_end(file_inode(iocb->ki_filp)); |
752 | kfree(dio); | 759 | kfree(dio); |
753 | 760 | ||
diff --git a/fs/mpage.c b/fs/mpage.c index 37bb77c1302c..c991faec70b9 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -468,6 +468,16 @@ static void clean_buffers(struct page *page, unsigned first_unmapped) | |||
468 | try_to_free_buffers(page); | 468 | try_to_free_buffers(page); |
469 | } | 469 | } |
470 | 470 | ||
471 | /* | ||
472 | * For situations where we want to clean all buffers attached to a page. | ||
473 | * We don't need to calculate how many buffers are attached to the page, | ||
474 | * we just need to specify a number larger than the maximum number of buffers. | ||
475 | */ | ||
476 | void clean_page_buffers(struct page *page) | ||
477 | { | ||
478 | clean_buffers(page, ~0U); | ||
479 | } | ||
480 | |||
471 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, | 481 | static int __mpage_writepage(struct page *page, struct writeback_control *wbc, |
472 | void *data) | 482 | void *data) |
473 | { | 483 | { |
@@ -605,10 +615,8 @@ alloc_new: | |||
605 | if (bio == NULL) { | 615 | if (bio == NULL) { |
606 | if (first_unmapped == blocks_per_page) { | 616 | if (first_unmapped == blocks_per_page) { |
607 | if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), | 617 | if (!bdev_write_page(bdev, blocks[0] << (blkbits - 9), |
608 | page, wbc)) { | 618 | page, wbc)) |
609 | clean_buffers(page, first_unmapped); | ||
610 | goto out; | 619 | goto out; |
611 | } | ||
612 | } | 620 | } |
613 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), | 621 | bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9), |
614 | BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); | 622 | BIO_MAX_PAGES, GFP_NOFS|__GFP_HIGH); |
diff --git a/fs/namespace.c b/fs/namespace.c index 3b601f115b6c..d18deb4c410b 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -2825,7 +2825,8 @@ long do_mount(const char *dev_name, const char __user *dir_name, | |||
2825 | SB_MANDLOCK | | 2825 | SB_MANDLOCK | |
2826 | SB_DIRSYNC | | 2826 | SB_DIRSYNC | |
2827 | SB_SILENT | | 2827 | SB_SILENT | |
2828 | SB_POSIXACL); | 2828 | SB_POSIXACL | |
2829 | SB_I_VERSION); | ||
2829 | 2830 | ||
2830 | if (flags & MS_REMOUNT) | 2831 | if (flags & MS_REMOUNT) |
2831 | retval = do_remount(&path, flags, sb_flags, mnt_flags, | 2832 | retval = do_remount(&path, flags, sb_flags, mnt_flags, |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index efebe6cf4378..22880ef6d8dd 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -218,7 +218,6 @@ static void nfs_cb_idr_remove_locked(struct nfs_client *clp) | |||
218 | static void pnfs_init_server(struct nfs_server *server) | 218 | static void pnfs_init_server(struct nfs_server *server) |
219 | { | 219 | { |
220 | rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC"); | 220 | rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC"); |
221 | rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC"); | ||
222 | } | 221 | } |
223 | 222 | ||
224 | #else | 223 | #else |
@@ -888,6 +887,7 @@ struct nfs_server *nfs_alloc_server(void) | |||
888 | ida_init(&server->openowner_id); | 887 | ida_init(&server->openowner_id); |
889 | ida_init(&server->lockowner_id); | 888 | ida_init(&server->lockowner_id); |
890 | pnfs_init_server(server); | 889 | pnfs_init_server(server); |
890 | rpc_init_wait_queue(&server->uoc_rpcwaitq, "NFS UOC"); | ||
891 | 891 | ||
892 | return server; | 892 | return server; |
893 | } | 893 | } |
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 44c638b7876c..508126eb49f9 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c | |||
@@ -745,7 +745,8 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg) | |||
745 | struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); | 745 | struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); |
746 | 746 | ||
747 | dprintk("--> %s\n", __func__); | 747 | dprintk("--> %s\n", __func__); |
748 | nfs4_fl_put_deviceid(fl->dsaddr); | 748 | if (fl->dsaddr != NULL) |
749 | nfs4_fl_put_deviceid(fl->dsaddr); | ||
749 | /* This assumes a single RW lseg */ | 750 | /* This assumes a single RW lseg */ |
750 | if (lseg->pls_range.iomode == IOMODE_RW) { | 751 | if (lseg->pls_range.iomode == IOMODE_RW) { |
751 | struct nfs4_filelayout *flo; | 752 | struct nfs4_filelayout *flo; |
diff --git a/fs/nfs/nfs4idmap.c b/fs/nfs/nfs4idmap.c index dd5d27da8c0c..30426c1a1bbd 100644 --- a/fs/nfs/nfs4idmap.c +++ b/fs/nfs/nfs4idmap.c | |||
@@ -274,7 +274,7 @@ static struct key *nfs_idmap_request_key(const char *name, size_t namelen, | |||
274 | ssize_t ret; | 274 | ssize_t ret; |
275 | 275 | ||
276 | ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc); | 276 | ret = nfs_idmap_get_desc(name, namelen, type, strlen(type), &desc); |
277 | if (ret <= 0) | 277 | if (ret < 0) |
278 | return ERR_PTR(ret); | 278 | return ERR_PTR(ret); |
279 | 279 | ||
280 | rkey = request_key(&key_type_id_resolver, desc, ""); | 280 | rkey = request_key(&key_type_id_resolver, desc, ""); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 6c61e2b99635..f90090e8c959 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -8399,8 +8399,7 @@ nfs4_layoutget_handle_exception(struct rpc_task *task, | |||
8399 | lo = NFS_I(inode)->layout; | 8399 | lo = NFS_I(inode)->layout; |
8400 | /* If the open stateid was bad, then recover it. */ | 8400 | /* If the open stateid was bad, then recover it. */ |
8401 | if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || | 8401 | if (!lo || test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags) || |
8402 | nfs4_stateid_match_other(&lgp->args.stateid, | 8402 | !nfs4_stateid_match_other(&lgp->args.stateid, &lo->plh_stateid)) { |
8403 | &lgp->args.ctx->state->stateid)) { | ||
8404 | spin_unlock(&inode->i_lock); | 8403 | spin_unlock(&inode->i_lock); |
8405 | exception->state = lgp->args.ctx->state; | 8404 | exception->state = lgp->args.ctx->state; |
8406 | exception->stateid = &lgp->args.stateid; | 8405 | exception->stateid = &lgp->args.stateid; |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 37c8af003275..14ed9791ec9c 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -1842,8 +1842,8 @@ static void encode_create_session(struct xdr_stream *xdr, | |||
1842 | * Assumes OPEN is the biggest non-idempotent compound. | 1842 | * Assumes OPEN is the biggest non-idempotent compound. |
1843 | * 2 is the verifier. | 1843 | * 2 is the verifier. |
1844 | */ | 1844 | */ |
1845 | max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + | 1845 | max_resp_sz_cached = (NFS4_dec_open_sz + RPC_REPHDRSIZE + 2) |
1846 | RPC_MAX_AUTH_SIZE + 2) * XDR_UNIT; | 1846 | * XDR_UNIT + RPC_MAX_AUTH_SIZE; |
1847 | 1847 | ||
1848 | encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr); | 1848 | encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr); |
1849 | p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12); | 1849 | p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12); |
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c index 3c69db7d4905..8487486ec496 100644 --- a/fs/nfsd/nfs4proc.c +++ b/fs/nfsd/nfs4proc.c | |||
@@ -927,6 +927,13 @@ nfsd4_secinfo_release(union nfsd4_op_u *u) | |||
927 | exp_put(u->secinfo.si_exp); | 927 | exp_put(u->secinfo.si_exp); |
928 | } | 928 | } |
929 | 929 | ||
930 | static void | ||
931 | nfsd4_secinfo_no_name_release(union nfsd4_op_u *u) | ||
932 | { | ||
933 | if (u->secinfo_no_name.sin_exp) | ||
934 | exp_put(u->secinfo_no_name.sin_exp); | ||
935 | } | ||
936 | |||
930 | static __be32 | 937 | static __be32 |
931 | nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, | 938 | nfsd4_setattr(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, |
932 | union nfsd4_op_u *u) | 939 | union nfsd4_op_u *u) |
@@ -2375,7 +2382,7 @@ static const struct nfsd4_operation nfsd4_ops[] = { | |||
2375 | }, | 2382 | }, |
2376 | [OP_SECINFO_NO_NAME] = { | 2383 | [OP_SECINFO_NO_NAME] = { |
2377 | .op_func = nfsd4_secinfo_no_name, | 2384 | .op_func = nfsd4_secinfo_no_name, |
2378 | .op_release = nfsd4_secinfo_release, | 2385 | .op_release = nfsd4_secinfo_no_name_release, |
2379 | .op_flags = OP_HANDLES_WRONGSEC, | 2386 | .op_flags = OP_HANDLES_WRONGSEC, |
2380 | .op_name = "OP_SECINFO_NO_NAME", | 2387 | .op_name = "OP_SECINFO_NO_NAME", |
2381 | .op_rsize_bop = nfsd4_secinfo_rsize, | 2388 | .op_rsize_bop = nfsd4_secinfo_rsize, |
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index a619addecafc..321511ed8c42 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c | |||
@@ -598,18 +598,30 @@ static bool ovl_verify_inode(struct inode *inode, struct dentry *lowerdentry, | |||
598 | return true; | 598 | return true; |
599 | } | 599 | } |
600 | 600 | ||
601 | struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry) | 601 | struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry, |
602 | struct dentry *index) | ||
602 | { | 603 | { |
603 | struct dentry *lowerdentry = ovl_dentry_lower(dentry); | 604 | struct dentry *lowerdentry = ovl_dentry_lower(dentry); |
604 | struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL; | 605 | struct inode *realinode = upperdentry ? d_inode(upperdentry) : NULL; |
605 | struct inode *inode; | 606 | struct inode *inode; |
607 | /* Already indexed or could be indexed on copy up? */ | ||
608 | bool indexed = (index || (ovl_indexdir(dentry->d_sb) && !upperdentry)); | ||
609 | |||
610 | if (WARN_ON(upperdentry && indexed && !lowerdentry)) | ||
611 | return ERR_PTR(-EIO); | ||
606 | 612 | ||
607 | if (!realinode) | 613 | if (!realinode) |
608 | realinode = d_inode(lowerdentry); | 614 | realinode = d_inode(lowerdentry); |
609 | 615 | ||
610 | if (!S_ISDIR(realinode->i_mode) && | 616 | /* |
611 | (upperdentry || (lowerdentry && ovl_indexdir(dentry->d_sb)))) { | 617 | * Copy up origin (lower) may exist for non-indexed upper, but we must |
612 | struct inode *key = d_inode(lowerdentry ?: upperdentry); | 618 | * not use lower as hash key in that case. |
619 | * Hash inodes that are or could be indexed by origin inode and | ||
620 | * non-indexed upper inodes that could be hard linked by upper inode. | ||
621 | */ | ||
622 | if (!S_ISDIR(realinode->i_mode) && (upperdentry || indexed)) { | ||
623 | struct inode *key = d_inode(indexed ? lowerdentry : | ||
624 | upperdentry); | ||
613 | unsigned int nlink; | 625 | unsigned int nlink; |
614 | 626 | ||
615 | inode = iget5_locked(dentry->d_sb, (unsigned long) key, | 627 | inode = iget5_locked(dentry->d_sb, (unsigned long) key, |
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 654bea1a5ac9..a12dc10bf726 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c | |||
@@ -405,14 +405,13 @@ int ovl_verify_index(struct dentry *index, struct path *lowerstack, | |||
405 | * be treated as stale (i.e. after unlink of the overlay inode). | 405 | * be treated as stale (i.e. after unlink of the overlay inode). |
406 | * We don't know the verification rules for directory and whiteout | 406 | * We don't know the verification rules for directory and whiteout |
407 | * index entries, because they have not been implemented yet, so return | 407 | * index entries, because they have not been implemented yet, so return |
408 | * EROFS if those entries are found to avoid corrupting an index that | 408 | * EINVAL if those entries are found to abort the mount to avoid |
409 | * was created by a newer kernel. | 409 | * corrupting an index that was created by a newer kernel. |
410 | */ | 410 | */ |
411 | err = -EROFS; | 411 | err = -EINVAL; |
412 | if (d_is_dir(index) || ovl_is_whiteout(index)) | 412 | if (d_is_dir(index) || ovl_is_whiteout(index)) |
413 | goto fail; | 413 | goto fail; |
414 | 414 | ||
415 | err = -EINVAL; | ||
416 | if (index->d_name.len < sizeof(struct ovl_fh)*2) | 415 | if (index->d_name.len < sizeof(struct ovl_fh)*2) |
417 | goto fail; | 416 | goto fail; |
418 | 417 | ||
@@ -507,6 +506,10 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry, | |||
507 | index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len); | 506 | index = lookup_one_len_unlocked(name.name, ofs->indexdir, name.len); |
508 | if (IS_ERR(index)) { | 507 | if (IS_ERR(index)) { |
509 | err = PTR_ERR(index); | 508 | err = PTR_ERR(index); |
509 | if (err == -ENOENT) { | ||
510 | index = NULL; | ||
511 | goto out; | ||
512 | } | ||
510 | pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n" | 513 | pr_warn_ratelimited("overlayfs: failed inode index lookup (ino=%lu, key=%*s, err=%i);\n" |
511 | "overlayfs: mount with '-o index=off' to disable inodes index.\n", | 514 | "overlayfs: mount with '-o index=off' to disable inodes index.\n", |
512 | d_inode(origin)->i_ino, name.len, name.name, | 515 | d_inode(origin)->i_ino, name.len, name.name, |
@@ -516,18 +519,9 @@ static struct dentry *ovl_lookup_index(struct dentry *dentry, | |||
516 | 519 | ||
517 | inode = d_inode(index); | 520 | inode = d_inode(index); |
518 | if (d_is_negative(index)) { | 521 | if (d_is_negative(index)) { |
519 | if (upper && d_inode(origin)->i_nlink > 1) { | 522 | goto out_dput; |
520 | pr_warn_ratelimited("overlayfs: hard link with origin but no index (ino=%lu).\n", | ||
521 | d_inode(origin)->i_ino); | ||
522 | goto fail; | ||
523 | } | ||
524 | |||
525 | dput(index); | ||
526 | index = NULL; | ||
527 | } else if (upper && d_inode(upper) != inode) { | 523 | } else if (upper && d_inode(upper) != inode) { |
528 | pr_warn_ratelimited("overlayfs: wrong index found (index=%pd2, ino=%lu, upper ino=%lu).\n", | 524 | goto out_dput; |
529 | index, inode->i_ino, d_inode(upper)->i_ino); | ||
530 | goto fail; | ||
531 | } else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) || | 525 | } else if (ovl_dentry_weird(index) || ovl_is_whiteout(index) || |
532 | ((inode->i_mode ^ d_inode(origin)->i_mode) & S_IFMT)) { | 526 | ((inode->i_mode ^ d_inode(origin)->i_mode) & S_IFMT)) { |
533 | /* | 527 | /* |
@@ -547,6 +541,11 @@ out: | |||
547 | kfree(name.name); | 541 | kfree(name.name); |
548 | return index; | 542 | return index; |
549 | 543 | ||
544 | out_dput: | ||
545 | dput(index); | ||
546 | index = NULL; | ||
547 | goto out; | ||
548 | |||
550 | fail: | 549 | fail: |
551 | dput(index); | 550 | dput(index); |
552 | index = ERR_PTR(-EIO); | 551 | index = ERR_PTR(-EIO); |
@@ -635,6 +634,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, | |||
635 | } | 634 | } |
636 | 635 | ||
637 | if (d.redirect) { | 636 | if (d.redirect) { |
637 | err = -ENOMEM; | ||
638 | upperredirect = kstrdup(d.redirect, GFP_KERNEL); | 638 | upperredirect = kstrdup(d.redirect, GFP_KERNEL); |
639 | if (!upperredirect) | 639 | if (!upperredirect) |
640 | goto out_put_upper; | 640 | goto out_put_upper; |
@@ -709,7 +709,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, | |||
709 | upperdentry = dget(index); | 709 | upperdentry = dget(index); |
710 | 710 | ||
711 | if (upperdentry || ctr) { | 711 | if (upperdentry || ctr) { |
712 | inode = ovl_get_inode(dentry, upperdentry); | 712 | inode = ovl_get_inode(dentry, upperdentry, index); |
713 | err = PTR_ERR(inode); | 713 | err = PTR_ERR(inode); |
714 | if (IS_ERR(inode)) | 714 | if (IS_ERR(inode)) |
715 | goto out_free_oe; | 715 | goto out_free_oe; |
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index c706a6f99928..d9a0edd4e57e 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h | |||
@@ -286,7 +286,8 @@ int ovl_update_time(struct inode *inode, struct timespec *ts, int flags); | |||
286 | bool ovl_is_private_xattr(const char *name); | 286 | bool ovl_is_private_xattr(const char *name); |
287 | 287 | ||
288 | struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev); | 288 | struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, dev_t rdev); |
289 | struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry); | 289 | struct inode *ovl_get_inode(struct dentry *dentry, struct dentry *upperdentry, |
290 | struct dentry *index); | ||
290 | static inline void ovl_copyattr(struct inode *from, struct inode *to) | 291 | static inline void ovl_copyattr(struct inode *from, struct inode *to) |
291 | { | 292 | { |
292 | to->i_uid = from->i_uid; | 293 | to->i_uid = from->i_uid; |
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 0f85ee9c3268..698b74dd750e 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c | |||
@@ -1021,13 +1021,12 @@ int ovl_indexdir_cleanup(struct dentry *dentry, struct vfsmount *mnt, | |||
1021 | break; | 1021 | break; |
1022 | } | 1022 | } |
1023 | err = ovl_verify_index(index, lowerstack, numlower); | 1023 | err = ovl_verify_index(index, lowerstack, numlower); |
1024 | if (err) { | 1024 | /* Cleanup stale and orphan index entries */ |
1025 | if (err == -EROFS) | 1025 | if (err && (err == -ESTALE || err == -ENOENT)) |
1026 | break; | ||
1027 | err = ovl_cleanup(dir, index); | 1026 | err = ovl_cleanup(dir, index); |
1028 | if (err) | 1027 | if (err) |
1029 | break; | 1028 | break; |
1030 | } | 1029 | |
1031 | dput(index); | 1030 | dput(index); |
1032 | index = NULL; | 1031 | index = NULL; |
1033 | } | 1032 | } |
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 092d150643c1..f5738e96a052 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c | |||
@@ -174,6 +174,9 @@ static struct inode *ovl_alloc_inode(struct super_block *sb) | |||
174 | { | 174 | { |
175 | struct ovl_inode *oi = kmem_cache_alloc(ovl_inode_cachep, GFP_KERNEL); | 175 | struct ovl_inode *oi = kmem_cache_alloc(ovl_inode_cachep, GFP_KERNEL); |
176 | 176 | ||
177 | if (!oi) | ||
178 | return NULL; | ||
179 | |||
177 | oi->cache = NULL; | 180 | oi->cache = NULL; |
178 | oi->redirect = NULL; | 181 | oi->redirect = NULL; |
179 | oi->version = 0; | 182 | oi->version = 0; |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 50b0556a124f..52ad15192e72 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -1297,21 +1297,18 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space, | |||
1297 | spin_lock(&dquot->dq_dqb_lock); | 1297 | spin_lock(&dquot->dq_dqb_lock); |
1298 | if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || | 1298 | if (!sb_has_quota_limits_enabled(sb, dquot->dq_id.type) || |
1299 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) | 1299 | test_bit(DQ_FAKE_B, &dquot->dq_flags)) |
1300 | goto add; | 1300 | goto finish; |
1301 | 1301 | ||
1302 | tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace | 1302 | tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace |
1303 | + space + rsv_space; | 1303 | + space + rsv_space; |
1304 | 1304 | ||
1305 | if (flags & DQUOT_SPACE_NOFAIL) | ||
1306 | goto add; | ||
1307 | |||
1308 | if (dquot->dq_dqb.dqb_bhardlimit && | 1305 | if (dquot->dq_dqb.dqb_bhardlimit && |
1309 | tspace > dquot->dq_dqb.dqb_bhardlimit && | 1306 | tspace > dquot->dq_dqb.dqb_bhardlimit && |
1310 | !ignore_hardlimit(dquot)) { | 1307 | !ignore_hardlimit(dquot)) { |
1311 | if (flags & DQUOT_SPACE_WARN) | 1308 | if (flags & DQUOT_SPACE_WARN) |
1312 | prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); | 1309 | prepare_warning(warn, dquot, QUOTA_NL_BHARDWARN); |
1313 | ret = -EDQUOT; | 1310 | ret = -EDQUOT; |
1314 | goto out; | 1311 | goto finish; |
1315 | } | 1312 | } |
1316 | 1313 | ||
1317 | if (dquot->dq_dqb.dqb_bsoftlimit && | 1314 | if (dquot->dq_dqb.dqb_bsoftlimit && |
@@ -1322,7 +1319,7 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space, | |||
1322 | if (flags & DQUOT_SPACE_WARN) | 1319 | if (flags & DQUOT_SPACE_WARN) |
1323 | prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); | 1320 | prepare_warning(warn, dquot, QUOTA_NL_BSOFTLONGWARN); |
1324 | ret = -EDQUOT; | 1321 | ret = -EDQUOT; |
1325 | goto out; | 1322 | goto finish; |
1326 | } | 1323 | } |
1327 | 1324 | ||
1328 | if (dquot->dq_dqb.dqb_bsoftlimit && | 1325 | if (dquot->dq_dqb.dqb_bsoftlimit && |
@@ -1338,13 +1335,21 @@ static int dquot_add_space(struct dquot *dquot, qsize_t space, | |||
1338 | * be always printed | 1335 | * be always printed |
1339 | */ | 1336 | */ |
1340 | ret = -EDQUOT; | 1337 | ret = -EDQUOT; |
1341 | goto out; | 1338 | goto finish; |
1342 | } | 1339 | } |
1343 | } | 1340 | } |
1344 | add: | 1341 | finish: |
1345 | dquot->dq_dqb.dqb_rsvspace += rsv_space; | 1342 | /* |
1346 | dquot->dq_dqb.dqb_curspace += space; | 1343 | * We have to be careful and go through warning generation & grace time |
1347 | out: | 1344 | * setting even if DQUOT_SPACE_NOFAIL is set. That's why we check it |
1345 | * only here... | ||
1346 | */ | ||
1347 | if (flags & DQUOT_SPACE_NOFAIL) | ||
1348 | ret = 0; | ||
1349 | if (!ret) { | ||
1350 | dquot->dq_dqb.dqb_rsvspace += rsv_space; | ||
1351 | dquot->dq_dqb.dqb_curspace += space; | ||
1352 | } | ||
1348 | spin_unlock(&dquot->dq_dqb_lock); | 1353 | spin_unlock(&dquot->dq_dqb_lock); |
1349 | return ret; | 1354 | return ret; |
1350 | } | 1355 | } |
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c index 744dcaec34cc..f965ce832bc0 100644 --- a/fs/xfs/libxfs/xfs_alloc.c +++ b/fs/xfs/libxfs/xfs_alloc.c | |||
@@ -1584,6 +1584,10 @@ xfs_alloc_ag_vextent_small( | |||
1584 | 1584 | ||
1585 | bp = xfs_btree_get_bufs(args->mp, args->tp, | 1585 | bp = xfs_btree_get_bufs(args->mp, args->tp, |
1586 | args->agno, fbno, 0); | 1586 | args->agno, fbno, 0); |
1587 | if (!bp) { | ||
1588 | error = -EFSCORRUPTED; | ||
1589 | goto error0; | ||
1590 | } | ||
1587 | xfs_trans_binval(args->tp, bp); | 1591 | xfs_trans_binval(args->tp, bp); |
1588 | } | 1592 | } |
1589 | args->len = 1; | 1593 | args->len = 1; |
@@ -2141,6 +2145,10 @@ xfs_alloc_fix_freelist( | |||
2141 | if (error) | 2145 | if (error) |
2142 | goto out_agbp_relse; | 2146 | goto out_agbp_relse; |
2143 | bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); | 2147 | bp = xfs_btree_get_bufs(mp, tp, args->agno, bno, 0); |
2148 | if (!bp) { | ||
2149 | error = -EFSCORRUPTED; | ||
2150 | goto out_agbp_relse; | ||
2151 | } | ||
2144 | xfs_trans_binval(tp, bp); | 2152 | xfs_trans_binval(tp, bp); |
2145 | } | 2153 | } |
2146 | 2154 | ||
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c index 044a363119be..89263797cf32 100644 --- a/fs/xfs/libxfs/xfs_bmap.c +++ b/fs/xfs/libxfs/xfs_bmap.c | |||
@@ -1477,14 +1477,14 @@ xfs_bmap_isaeof( | |||
1477 | int is_empty; | 1477 | int is_empty; |
1478 | int error; | 1478 | int error; |
1479 | 1479 | ||
1480 | bma->aeof = 0; | 1480 | bma->aeof = false; |
1481 | error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, | 1481 | error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec, |
1482 | &is_empty); | 1482 | &is_empty); |
1483 | if (error) | 1483 | if (error) |
1484 | return error; | 1484 | return error; |
1485 | 1485 | ||
1486 | if (is_empty) { | 1486 | if (is_empty) { |
1487 | bma->aeof = 1; | 1487 | bma->aeof = true; |
1488 | return 0; | 1488 | return 0; |
1489 | } | 1489 | } |
1490 | 1490 | ||
@@ -3852,6 +3852,17 @@ xfs_trim_extent( | |||
3852 | } | 3852 | } |
3853 | } | 3853 | } |
3854 | 3854 | ||
3855 | /* trim extent to within eof */ | ||
3856 | void | ||
3857 | xfs_trim_extent_eof( | ||
3858 | struct xfs_bmbt_irec *irec, | ||
3859 | struct xfs_inode *ip) | ||
3860 | |||
3861 | { | ||
3862 | xfs_trim_extent(irec, 0, XFS_B_TO_FSB(ip->i_mount, | ||
3863 | i_size_read(VFS_I(ip)))); | ||
3864 | } | ||
3865 | |||
3855 | /* | 3866 | /* |
3856 | * Trim the returned map to the required bounds | 3867 | * Trim the returned map to the required bounds |
3857 | */ | 3868 | */ |
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h index 851982a5dfbc..502e0d8fb4ff 100644 --- a/fs/xfs/libxfs/xfs_bmap.h +++ b/fs/xfs/libxfs/xfs_bmap.h | |||
@@ -208,6 +208,7 @@ void xfs_bmap_trace_exlist(struct xfs_inode *ip, xfs_extnum_t cnt, | |||
208 | 208 | ||
209 | void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno, | 209 | void xfs_trim_extent(struct xfs_bmbt_irec *irec, xfs_fileoff_t bno, |
210 | xfs_filblks_t len); | 210 | xfs_filblks_t len); |
211 | void xfs_trim_extent_eof(struct xfs_bmbt_irec *, struct xfs_inode *); | ||
211 | int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd); | 212 | int xfs_bmap_add_attrfork(struct xfs_inode *ip, int size, int rsvd); |
212 | void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork); | 213 | void xfs_bmap_local_to_extents_empty(struct xfs_inode *ip, int whichfork); |
213 | void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops, | 214 | void xfs_bmap_add_free(struct xfs_mount *mp, struct xfs_defer_ops *dfops, |
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index 988bb3f31446..dfd643909f85 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c | |||
@@ -1962,7 +1962,7 @@ xfs_difree_inobt( | |||
1962 | if (!(mp->m_flags & XFS_MOUNT_IKEEP) && | 1962 | if (!(mp->m_flags & XFS_MOUNT_IKEEP) && |
1963 | rec.ir_free == XFS_INOBT_ALL_FREE && | 1963 | rec.ir_free == XFS_INOBT_ALL_FREE && |
1964 | mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { | 1964 | mp->m_sb.sb_inopblock <= XFS_INODES_PER_CHUNK) { |
1965 | xic->deleted = 1; | 1965 | xic->deleted = true; |
1966 | xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); | 1966 | xic->first_ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino); |
1967 | xic->alloc = xfs_inobt_irec_to_allocmask(&rec); | 1967 | xic->alloc = xfs_inobt_irec_to_allocmask(&rec); |
1968 | 1968 | ||
@@ -1989,7 +1989,7 @@ xfs_difree_inobt( | |||
1989 | 1989 | ||
1990 | xfs_difree_inode_chunk(mp, agno, &rec, dfops); | 1990 | xfs_difree_inode_chunk(mp, agno, &rec, dfops); |
1991 | } else { | 1991 | } else { |
1992 | xic->deleted = 0; | 1992 | xic->deleted = false; |
1993 | 1993 | ||
1994 | error = xfs_inobt_update(cur, &rec); | 1994 | error = xfs_inobt_update(cur, &rec); |
1995 | if (error) { | 1995 | if (error) { |
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h index 8372e9bcd7b6..71de185735e0 100644 --- a/fs/xfs/libxfs/xfs_log_format.h +++ b/fs/xfs/libxfs/xfs_log_format.h | |||
@@ -270,6 +270,7 @@ typedef struct xfs_inode_log_format { | |||
270 | uint32_t ilf_fields; /* flags for fields logged */ | 270 | uint32_t ilf_fields; /* flags for fields logged */ |
271 | uint16_t ilf_asize; /* size of attr d/ext/root */ | 271 | uint16_t ilf_asize; /* size of attr d/ext/root */ |
272 | uint16_t ilf_dsize; /* size of data/ext/root */ | 272 | uint16_t ilf_dsize; /* size of data/ext/root */ |
273 | uint32_t ilf_pad; /* pad for 64 bit boundary */ | ||
273 | uint64_t ilf_ino; /* inode number */ | 274 | uint64_t ilf_ino; /* inode number */ |
274 | union { | 275 | union { |
275 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ | 276 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ |
@@ -280,29 +281,17 @@ typedef struct xfs_inode_log_format { | |||
280 | int32_t ilf_boffset; /* off of inode in buffer */ | 281 | int32_t ilf_boffset; /* off of inode in buffer */ |
281 | } xfs_inode_log_format_t; | 282 | } xfs_inode_log_format_t; |
282 | 283 | ||
283 | typedef struct xfs_inode_log_format_32 { | 284 | /* |
284 | uint16_t ilf_type; /* inode log item type */ | 285 | * Old 32 bit systems will log in this format without the 64 bit |
285 | uint16_t ilf_size; /* size of this item */ | 286 | * alignment padding. Recovery will detect this and convert it to the |
286 | uint32_t ilf_fields; /* flags for fields logged */ | 287 | * correct format. |
287 | uint16_t ilf_asize; /* size of attr d/ext/root */ | 288 | */ |
288 | uint16_t ilf_dsize; /* size of data/ext/root */ | 289 | struct xfs_inode_log_format_32 { |
289 | uint64_t ilf_ino; /* inode number */ | ||
290 | union { | ||
291 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ | ||
292 | uuid_t ilfu_uuid; /* mount point value */ | ||
293 | } ilf_u; | ||
294 | int64_t ilf_blkno; /* blkno of inode buffer */ | ||
295 | int32_t ilf_len; /* len of inode buffer */ | ||
296 | int32_t ilf_boffset; /* off of inode in buffer */ | ||
297 | } __attribute__((packed)) xfs_inode_log_format_32_t; | ||
298 | |||
299 | typedef struct xfs_inode_log_format_64 { | ||
300 | uint16_t ilf_type; /* inode log item type */ | 290 | uint16_t ilf_type; /* inode log item type */ |
301 | uint16_t ilf_size; /* size of this item */ | 291 | uint16_t ilf_size; /* size of this item */ |
302 | uint32_t ilf_fields; /* flags for fields logged */ | 292 | uint32_t ilf_fields; /* flags for fields logged */ |
303 | uint16_t ilf_asize; /* size of attr d/ext/root */ | 293 | uint16_t ilf_asize; /* size of attr d/ext/root */ |
304 | uint16_t ilf_dsize; /* size of data/ext/root */ | 294 | uint16_t ilf_dsize; /* size of data/ext/root */ |
305 | uint32_t ilf_pad; /* pad for 64 bit boundary */ | ||
306 | uint64_t ilf_ino; /* inode number */ | 295 | uint64_t ilf_ino; /* inode number */ |
307 | union { | 296 | union { |
308 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ | 297 | uint32_t ilfu_rdev; /* rdev value for dev inode*/ |
@@ -311,7 +300,7 @@ typedef struct xfs_inode_log_format_64 { | |||
311 | int64_t ilf_blkno; /* blkno of inode buffer */ | 300 | int64_t ilf_blkno; /* blkno of inode buffer */ |
312 | int32_t ilf_len; /* len of inode buffer */ | 301 | int32_t ilf_len; /* len of inode buffer */ |
313 | int32_t ilf_boffset; /* off of inode in buffer */ | 302 | int32_t ilf_boffset; /* off of inode in buffer */ |
314 | } xfs_inode_log_format_64_t; | 303 | } __attribute__((packed)); |
315 | 304 | ||
316 | 305 | ||
317 | /* | 306 | /* |
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c index 7034e17535de..3354140de07e 100644 --- a/fs/xfs/xfs_acl.c +++ b/fs/xfs/xfs_acl.c | |||
@@ -247,6 +247,8 @@ xfs_set_mode(struct inode *inode, umode_t mode) | |||
247 | int | 247 | int |
248 | xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) | 248 | xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) |
249 | { | 249 | { |
250 | umode_t mode; | ||
251 | bool set_mode = false; | ||
250 | int error = 0; | 252 | int error = 0; |
251 | 253 | ||
252 | if (!acl) | 254 | if (!acl) |
@@ -257,16 +259,24 @@ xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type) | |||
257 | return error; | 259 | return error; |
258 | 260 | ||
259 | if (type == ACL_TYPE_ACCESS) { | 261 | if (type == ACL_TYPE_ACCESS) { |
260 | umode_t mode; | ||
261 | |||
262 | error = posix_acl_update_mode(inode, &mode, &acl); | 262 | error = posix_acl_update_mode(inode, &mode, &acl); |
263 | if (error) | 263 | if (error) |
264 | return error; | 264 | return error; |
265 | error = xfs_set_mode(inode, mode); | 265 | set_mode = true; |
266 | if (error) | ||
267 | return error; | ||
268 | } | 266 | } |
269 | 267 | ||
270 | set_acl: | 268 | set_acl: |
271 | return __xfs_set_acl(inode, acl, type); | 269 | error = __xfs_set_acl(inode, acl, type); |
270 | if (error) | ||
271 | return error; | ||
272 | |||
273 | /* | ||
274 | * We set the mode after successfully updating the ACL xattr because the | ||
275 | * xattr update can fail at ENOSPC and we don't want to change the mode | ||
276 | * if the ACL update hasn't been applied. | ||
277 | */ | ||
278 | if (set_mode) | ||
279 | error = xfs_set_mode(inode, mode); | ||
280 | |||
281 | return error; | ||
272 | } | 282 | } |
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index f18e5932aec4..a3eeaba156c5 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -446,6 +446,19 @@ xfs_imap_valid( | |||
446 | { | 446 | { |
447 | offset >>= inode->i_blkbits; | 447 | offset >>= inode->i_blkbits; |
448 | 448 | ||
449 | /* | ||
450 | * We have to make sure the cached mapping is within EOF to protect | ||
451 | * against eofblocks trimming on file release leaving us with a stale | ||
452 | * mapping. Otherwise, a page for a subsequent file extending buffered | ||
453 | * write could get picked up by this writeback cycle and written to the | ||
454 | * wrong blocks. | ||
455 | * | ||
456 | * Note that what we really want here is a generic mapping invalidation | ||
457 | * mechanism to protect us from arbitrary extent modifying contexts, not | ||
458 | * just eofblocks. | ||
459 | */ | ||
460 | xfs_trim_extent_eof(imap, XFS_I(inode)); | ||
461 | |||
449 | return offset >= imap->br_startoff && | 462 | return offset >= imap->br_startoff && |
450 | offset < imap->br_startoff + imap->br_blockcount; | 463 | offset < imap->br_startoff + imap->br_blockcount; |
451 | } | 464 | } |
@@ -735,6 +748,14 @@ xfs_vm_invalidatepage( | |||
735 | { | 748 | { |
736 | trace_xfs_invalidatepage(page->mapping->host, page, offset, | 749 | trace_xfs_invalidatepage(page->mapping->host, page, offset, |
737 | length); | 750 | length); |
751 | |||
752 | /* | ||
753 | * If we are invalidating the entire page, clear the dirty state from it | ||
754 | * so that we can check for attempts to release dirty cached pages in | ||
755 | * xfs_vm_releasepage(). | ||
756 | */ | ||
757 | if (offset == 0 && length >= PAGE_SIZE) | ||
758 | cancel_dirty_page(page); | ||
738 | block_invalidatepage(page, offset, length); | 759 | block_invalidatepage(page, offset, length); |
739 | } | 760 | } |
740 | 761 | ||
@@ -1190,25 +1211,27 @@ xfs_vm_releasepage( | |||
1190 | * mm accommodates an old ext3 case where clean pages might not have had | 1211 | * mm accommodates an old ext3 case where clean pages might not have had |
1191 | * the dirty bit cleared. Thus, it can send actual dirty pages to | 1212 | * the dirty bit cleared. Thus, it can send actual dirty pages to |
1192 | * ->releasepage() via shrink_active_list(). Conversely, | 1213 | * ->releasepage() via shrink_active_list(). Conversely, |
1193 | * block_invalidatepage() can send pages that are still marked dirty | 1214 | * block_invalidatepage() can send pages that are still marked dirty but |
1194 | * but otherwise have invalidated buffers. | 1215 | * otherwise have invalidated buffers. |
1195 | * | 1216 | * |
1196 | * We want to release the latter to avoid unnecessary buildup of the | 1217 | * We want to release the latter to avoid unnecessary buildup of the |
1197 | * LRU, skip the former and warn if we've left any lingering | 1218 | * LRU, so xfs_vm_invalidatepage() clears the page dirty flag on pages |
1198 | * delalloc/unwritten buffers on clean pages. Skip pages with delalloc | 1219 | * that are entirely invalidated and need to be released. Hence the |
1199 | * or unwritten buffers and warn if the page is not dirty. Otherwise | 1220 | * only time we should get dirty pages here is through |
1200 | * try to release the buffers. | 1221 | * shrink_active_list() and so we can simply skip those now. |
1222 | * | ||
1223 | * warn if we've left any lingering delalloc/unwritten buffers on clean | ||
1224 | * or invalidated pages we are about to release. | ||
1201 | */ | 1225 | */ |
1226 | if (PageDirty(page)) | ||
1227 | return 0; | ||
1228 | |||
1202 | xfs_count_page_state(page, &delalloc, &unwritten); | 1229 | xfs_count_page_state(page, &delalloc, &unwritten); |
1203 | 1230 | ||
1204 | if (delalloc) { | 1231 | if (WARN_ON_ONCE(delalloc)) |
1205 | WARN_ON_ONCE(!PageDirty(page)); | ||
1206 | return 0; | 1232 | return 0; |
1207 | } | 1233 | if (WARN_ON_ONCE(unwritten)) |
1208 | if (unwritten) { | ||
1209 | WARN_ON_ONCE(!PageDirty(page)); | ||
1210 | return 0; | 1234 | return 0; |
1211 | } | ||
1212 | 1235 | ||
1213 | return try_to_free_buffers(page); | 1236 | return try_to_free_buffers(page); |
1214 | } | 1237 | } |
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c index ebd66b19fbfc..e3a950ed35a8 100644 --- a/fs/xfs/xfs_attr_inactive.c +++ b/fs/xfs/xfs_attr_inactive.c | |||
@@ -302,6 +302,8 @@ xfs_attr3_node_inactive( | |||
302 | &bp, XFS_ATTR_FORK); | 302 | &bp, XFS_ATTR_FORK); |
303 | if (error) | 303 | if (error) |
304 | return error; | 304 | return error; |
305 | node = bp->b_addr; | ||
306 | btree = dp->d_ops->node_tree_p(node); | ||
305 | child_fsb = be32_to_cpu(btree[i + 1].before); | 307 | child_fsb = be32_to_cpu(btree[i + 1].before); |
306 | xfs_trans_brelse(*trans, bp); | 308 | xfs_trans_brelse(*trans, bp); |
307 | } | 309 | } |
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index e9db7fc95b70..6503cfa44262 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c | |||
@@ -84,6 +84,7 @@ xfs_zero_extent( | |||
84 | GFP_NOFS, 0); | 84 | GFP_NOFS, 0); |
85 | } | 85 | } |
86 | 86 | ||
87 | #ifdef CONFIG_XFS_RT | ||
87 | int | 88 | int |
88 | xfs_bmap_rtalloc( | 89 | xfs_bmap_rtalloc( |
89 | struct xfs_bmalloca *ap) /* bmap alloc argument struct */ | 90 | struct xfs_bmalloca *ap) /* bmap alloc argument struct */ |
@@ -190,6 +191,7 @@ xfs_bmap_rtalloc( | |||
190 | } | 191 | } |
191 | return 0; | 192 | return 0; |
192 | } | 193 | } |
194 | #endif /* CONFIG_XFS_RT */ | ||
193 | 195 | ||
194 | /* | 196 | /* |
195 | * Check if the endoff is outside the last extent. If so the caller will grow | 197 | * Check if the endoff is outside the last extent. If so the caller will grow |
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h index 0eaa81dc49be..7d330b3c77c3 100644 --- a/fs/xfs/xfs_bmap_util.h +++ b/fs/xfs/xfs_bmap_util.h | |||
@@ -28,7 +28,20 @@ struct xfs_mount; | |||
28 | struct xfs_trans; | 28 | struct xfs_trans; |
29 | struct xfs_bmalloca; | 29 | struct xfs_bmalloca; |
30 | 30 | ||
31 | #ifdef CONFIG_XFS_RT | ||
31 | int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); | 32 | int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); |
33 | #else /* !CONFIG_XFS_RT */ | ||
34 | /* | ||
35 | * Attempts to allocate RT extents when RT is disable indicates corruption and | ||
36 | * should trigger a shutdown. | ||
37 | */ | ||
38 | static inline int | ||
39 | xfs_bmap_rtalloc(struct xfs_bmalloca *ap) | ||
40 | { | ||
41 | return -EFSCORRUPTED; | ||
42 | } | ||
43 | #endif /* CONFIG_XFS_RT */ | ||
44 | |||
32 | int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, | 45 | int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, |
33 | int whichfork, int *eof); | 46 | int whichfork, int *eof); |
34 | int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip, | 47 | int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip, |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 309e26c9dddb..6526ef0e2a23 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -237,11 +237,13 @@ xfs_file_dax_read( | |||
237 | if (!count) | 237 | if (!count) |
238 | return 0; /* skip atime */ | 238 | return 0; /* skip atime */ |
239 | 239 | ||
240 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { | 240 | if (iocb->ki_flags & IOCB_NOWAIT) { |
241 | if (iocb->ki_flags & IOCB_NOWAIT) | 241 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) |
242 | return -EAGAIN; | 242 | return -EAGAIN; |
243 | } else { | ||
243 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | 244 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
244 | } | 245 | } |
246 | |||
245 | ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops); | 247 | ret = dax_iomap_rw(iocb, to, &xfs_iomap_ops); |
246 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); | 248 | xfs_iunlock(ip, XFS_IOLOCK_SHARED); |
247 | 249 | ||
@@ -259,9 +261,10 @@ xfs_file_buffered_aio_read( | |||
259 | 261 | ||
260 | trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos); | 262 | trace_xfs_file_buffered_read(ip, iov_iter_count(to), iocb->ki_pos); |
261 | 263 | ||
262 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { | 264 | if (iocb->ki_flags & IOCB_NOWAIT) { |
263 | if (iocb->ki_flags & IOCB_NOWAIT) | 265 | if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) |
264 | return -EAGAIN; | 266 | return -EAGAIN; |
267 | } else { | ||
265 | xfs_ilock(ip, XFS_IOLOCK_SHARED); | 268 | xfs_ilock(ip, XFS_IOLOCK_SHARED); |
266 | } | 269 | } |
267 | ret = generic_file_read_iter(iocb, to); | 270 | ret = generic_file_read_iter(iocb, to); |
@@ -552,9 +555,10 @@ xfs_file_dio_aio_write( | |||
552 | iolock = XFS_IOLOCK_SHARED; | 555 | iolock = XFS_IOLOCK_SHARED; |
553 | } | 556 | } |
554 | 557 | ||
555 | if (!xfs_ilock_nowait(ip, iolock)) { | 558 | if (iocb->ki_flags & IOCB_NOWAIT) { |
556 | if (iocb->ki_flags & IOCB_NOWAIT) | 559 | if (!xfs_ilock_nowait(ip, iolock)) |
557 | return -EAGAIN; | 560 | return -EAGAIN; |
561 | } else { | ||
558 | xfs_ilock(ip, iolock); | 562 | xfs_ilock(ip, iolock); |
559 | } | 563 | } |
560 | 564 | ||
@@ -606,9 +610,10 @@ xfs_file_dax_write( | |||
606 | size_t count; | 610 | size_t count; |
607 | loff_t pos; | 611 | loff_t pos; |
608 | 612 | ||
609 | if (!xfs_ilock_nowait(ip, iolock)) { | 613 | if (iocb->ki_flags & IOCB_NOWAIT) { |
610 | if (iocb->ki_flags & IOCB_NOWAIT) | 614 | if (!xfs_ilock_nowait(ip, iolock)) |
611 | return -EAGAIN; | 615 | return -EAGAIN; |
616 | } else { | ||
612 | xfs_ilock(ip, iolock); | 617 | xfs_ilock(ip, iolock); |
613 | } | 618 | } |
614 | 619 | ||
@@ -764,7 +769,7 @@ xfs_file_fallocate( | |||
764 | enum xfs_prealloc_flags flags = 0; | 769 | enum xfs_prealloc_flags flags = 0; |
765 | uint iolock = XFS_IOLOCK_EXCL; | 770 | uint iolock = XFS_IOLOCK_EXCL; |
766 | loff_t new_size = 0; | 771 | loff_t new_size = 0; |
767 | bool do_file_insert = 0; | 772 | bool do_file_insert = false; |
768 | 773 | ||
769 | if (!S_ISREG(inode->i_mode)) | 774 | if (!S_ISREG(inode->i_mode)) |
770 | return -EINVAL; | 775 | return -EINVAL; |
@@ -825,7 +830,7 @@ xfs_file_fallocate( | |||
825 | error = -EINVAL; | 830 | error = -EINVAL; |
826 | goto out_unlock; | 831 | goto out_unlock; |
827 | } | 832 | } |
828 | do_file_insert = 1; | 833 | do_file_insert = true; |
829 | } else { | 834 | } else { |
830 | flags |= XFS_PREALLOC_SET; | 835 | flags |= XFS_PREALLOC_SET; |
831 | 836 | ||
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c index 814ed729881d..43cfc07996a4 100644 --- a/fs/xfs/xfs_fsmap.c +++ b/fs/xfs/xfs_fsmap.c | |||
@@ -367,29 +367,6 @@ xfs_getfsmap_datadev_helper( | |||
367 | return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr); | 367 | return xfs_getfsmap_helper(cur->bc_tp, info, rec, rec_daddr); |
368 | } | 368 | } |
369 | 369 | ||
370 | /* Transform a rtbitmap "record" into a fsmap */ | ||
371 | STATIC int | ||
372 | xfs_getfsmap_rtdev_rtbitmap_helper( | ||
373 | struct xfs_trans *tp, | ||
374 | struct xfs_rtalloc_rec *rec, | ||
375 | void *priv) | ||
376 | { | ||
377 | struct xfs_mount *mp = tp->t_mountp; | ||
378 | struct xfs_getfsmap_info *info = priv; | ||
379 | struct xfs_rmap_irec irec; | ||
380 | xfs_daddr_t rec_daddr; | ||
381 | |||
382 | rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock); | ||
383 | |||
384 | irec.rm_startblock = rec->ar_startblock; | ||
385 | irec.rm_blockcount = rec->ar_blockcount; | ||
386 | irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */ | ||
387 | irec.rm_offset = 0; | ||
388 | irec.rm_flags = 0; | ||
389 | |||
390 | return xfs_getfsmap_helper(tp, info, &irec, rec_daddr); | ||
391 | } | ||
392 | |||
393 | /* Transform a bnobt irec into a fsmap */ | 370 | /* Transform a bnobt irec into a fsmap */ |
394 | STATIC int | 371 | STATIC int |
395 | xfs_getfsmap_datadev_bnobt_helper( | 372 | xfs_getfsmap_datadev_bnobt_helper( |
@@ -475,6 +452,30 @@ xfs_getfsmap_logdev( | |||
475 | return xfs_getfsmap_helper(tp, info, &rmap, 0); | 452 | return xfs_getfsmap_helper(tp, info, &rmap, 0); |
476 | } | 453 | } |
477 | 454 | ||
455 | #ifdef CONFIG_XFS_RT | ||
456 | /* Transform a rtbitmap "record" into a fsmap */ | ||
457 | STATIC int | ||
458 | xfs_getfsmap_rtdev_rtbitmap_helper( | ||
459 | struct xfs_trans *tp, | ||
460 | struct xfs_rtalloc_rec *rec, | ||
461 | void *priv) | ||
462 | { | ||
463 | struct xfs_mount *mp = tp->t_mountp; | ||
464 | struct xfs_getfsmap_info *info = priv; | ||
465 | struct xfs_rmap_irec irec; | ||
466 | xfs_daddr_t rec_daddr; | ||
467 | |||
468 | rec_daddr = XFS_FSB_TO_BB(mp, rec->ar_startblock); | ||
469 | |||
470 | irec.rm_startblock = rec->ar_startblock; | ||
471 | irec.rm_blockcount = rec->ar_blockcount; | ||
472 | irec.rm_owner = XFS_RMAP_OWN_NULL; /* "free" */ | ||
473 | irec.rm_offset = 0; | ||
474 | irec.rm_flags = 0; | ||
475 | |||
476 | return xfs_getfsmap_helper(tp, info, &irec, rec_daddr); | ||
477 | } | ||
478 | |||
478 | /* Execute a getfsmap query against the realtime device. */ | 479 | /* Execute a getfsmap query against the realtime device. */ |
479 | STATIC int | 480 | STATIC int |
480 | __xfs_getfsmap_rtdev( | 481 | __xfs_getfsmap_rtdev( |
@@ -561,6 +562,7 @@ xfs_getfsmap_rtdev_rtbitmap( | |||
561 | return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query, | 562 | return __xfs_getfsmap_rtdev(tp, keys, xfs_getfsmap_rtdev_rtbitmap_query, |
562 | info); | 563 | info); |
563 | } | 564 | } |
565 | #endif /* CONFIG_XFS_RT */ | ||
564 | 566 | ||
565 | /* Execute a getfsmap query against the regular data device. */ | 567 | /* Execute a getfsmap query against the regular data device. */ |
566 | STATIC int | 568 | STATIC int |
@@ -795,7 +797,15 @@ xfs_getfsmap_check_keys( | |||
795 | return false; | 797 | return false; |
796 | } | 798 | } |
797 | 799 | ||
800 | /* | ||
801 | * There are only two devices if we didn't configure RT devices at build time. | ||
802 | */ | ||
803 | #ifdef CONFIG_XFS_RT | ||
798 | #define XFS_GETFSMAP_DEVS 3 | 804 | #define XFS_GETFSMAP_DEVS 3 |
805 | #else | ||
806 | #define XFS_GETFSMAP_DEVS 2 | ||
807 | #endif /* CONFIG_XFS_RT */ | ||
808 | |||
799 | /* | 809 | /* |
800 | * Get filesystem's extents as described in head, and format for | 810 | * Get filesystem's extents as described in head, and format for |
801 | * output. Calls formatter to fill the user's buffer until all | 811 | * output. Calls formatter to fill the user's buffer until all |
@@ -853,10 +863,12 @@ xfs_getfsmap( | |||
853 | handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev); | 863 | handlers[1].dev = new_encode_dev(mp->m_logdev_targp->bt_dev); |
854 | handlers[1].fn = xfs_getfsmap_logdev; | 864 | handlers[1].fn = xfs_getfsmap_logdev; |
855 | } | 865 | } |
866 | #ifdef CONFIG_XFS_RT | ||
856 | if (mp->m_rtdev_targp) { | 867 | if (mp->m_rtdev_targp) { |
857 | handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev); | 868 | handlers[2].dev = new_encode_dev(mp->m_rtdev_targp->bt_dev); |
858 | handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap; | 869 | handlers[2].fn = xfs_getfsmap_rtdev_rtbitmap; |
859 | } | 870 | } |
871 | #endif /* CONFIG_XFS_RT */ | ||
860 | 872 | ||
861 | xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev), | 873 | xfs_sort(handlers, XFS_GETFSMAP_DEVS, sizeof(struct xfs_getfsmap_dev), |
862 | xfs_getfsmap_dev_compare); | 874 | xfs_getfsmap_dev_compare); |
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index a705f34b58fa..9bbc2d7cc8cb 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
@@ -364,6 +364,9 @@ xfs_inode_to_log_dinode( | |||
364 | to->di_dmstate = from->di_dmstate; | 364 | to->di_dmstate = from->di_dmstate; |
365 | to->di_flags = from->di_flags; | 365 | to->di_flags = from->di_flags; |
366 | 366 | ||
367 | /* log a dummy value to ensure log structure is fully initialised */ | ||
368 | to->di_next_unlinked = NULLAGINO; | ||
369 | |||
367 | if (from->di_version == 3) { | 370 | if (from->di_version == 3) { |
368 | to->di_changecount = inode->i_version; | 371 | to->di_changecount = inode->i_version; |
369 | to->di_crtime.t_sec = from->di_crtime.t_sec; | 372 | to->di_crtime.t_sec = from->di_crtime.t_sec; |
@@ -404,6 +407,11 @@ xfs_inode_item_format_core( | |||
404 | * the second with the on-disk inode structure, and a possible third and/or | 407 | * the second with the on-disk inode structure, and a possible third and/or |
405 | * fourth with the inode data/extents/b-tree root and inode attributes | 408 | * fourth with the inode data/extents/b-tree root and inode attributes |
406 | * data/extents/b-tree root. | 409 | * data/extents/b-tree root. |
410 | * | ||
411 | * Note: Always use the 64 bit inode log format structure so we don't | ||
412 | * leave an uninitialised hole in the format item on 64 bit systems. Log | ||
413 | * recovery on 32 bit systems handles this just fine, so there's no reason | ||
414 | * for not using an initialising the properly padded structure all the time. | ||
407 | */ | 415 | */ |
408 | STATIC void | 416 | STATIC void |
409 | xfs_inode_item_format( | 417 | xfs_inode_item_format( |
@@ -412,8 +420,8 @@ xfs_inode_item_format( | |||
412 | { | 420 | { |
413 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); | 421 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
414 | struct xfs_inode *ip = iip->ili_inode; | 422 | struct xfs_inode *ip = iip->ili_inode; |
415 | struct xfs_inode_log_format *ilf; | ||
416 | struct xfs_log_iovec *vecp = NULL; | 423 | struct xfs_log_iovec *vecp = NULL; |
424 | struct xfs_inode_log_format *ilf; | ||
417 | 425 | ||
418 | ASSERT(ip->i_d.di_version > 1); | 426 | ASSERT(ip->i_d.di_version > 1); |
419 | 427 | ||
@@ -425,7 +433,17 @@ xfs_inode_item_format( | |||
425 | ilf->ilf_boffset = ip->i_imap.im_boffset; | 433 | ilf->ilf_boffset = ip->i_imap.im_boffset; |
426 | ilf->ilf_fields = XFS_ILOG_CORE; | 434 | ilf->ilf_fields = XFS_ILOG_CORE; |
427 | ilf->ilf_size = 2; /* format + core */ | 435 | ilf->ilf_size = 2; /* format + core */ |
428 | xlog_finish_iovec(lv, vecp, sizeof(struct xfs_inode_log_format)); | 436 | |
437 | /* | ||
438 | * make sure we don't leak uninitialised data into the log in the case | ||
439 | * when we don't log every field in the inode. | ||
440 | */ | ||
441 | ilf->ilf_dsize = 0; | ||
442 | ilf->ilf_asize = 0; | ||
443 | ilf->ilf_pad = 0; | ||
444 | uuid_copy(&ilf->ilf_u.ilfu_uuid, &uuid_null); | ||
445 | |||
446 | xlog_finish_iovec(lv, vecp, sizeof(*ilf)); | ||
429 | 447 | ||
430 | xfs_inode_item_format_core(ip, lv, &vecp); | 448 | xfs_inode_item_format_core(ip, lv, &vecp); |
431 | xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); | 449 | xfs_inode_item_format_data_fork(iip, ilf, lv, &vecp); |
@@ -855,44 +873,29 @@ xfs_istale_done( | |||
855 | } | 873 | } |
856 | 874 | ||
857 | /* | 875 | /* |
858 | * convert an xfs_inode_log_format struct from either 32 or 64 bit versions | 876 | * convert an xfs_inode_log_format struct from the old 32 bit version |
859 | * (which can have different field alignments) to the native version | 877 | * (which can have different field alignments) to the native 64 bit version |
860 | */ | 878 | */ |
861 | int | 879 | int |
862 | xfs_inode_item_format_convert( | 880 | xfs_inode_item_format_convert( |
863 | xfs_log_iovec_t *buf, | 881 | struct xfs_log_iovec *buf, |
864 | xfs_inode_log_format_t *in_f) | 882 | struct xfs_inode_log_format *in_f) |
865 | { | 883 | { |
866 | if (buf->i_len == sizeof(xfs_inode_log_format_32_t)) { | 884 | struct xfs_inode_log_format_32 *in_f32 = buf->i_addr; |
867 | xfs_inode_log_format_32_t *in_f32 = buf->i_addr; | 885 | |
868 | 886 | if (buf->i_len != sizeof(*in_f32)) | |
869 | in_f->ilf_type = in_f32->ilf_type; | 887 | return -EFSCORRUPTED; |
870 | in_f->ilf_size = in_f32->ilf_size; | 888 | |
871 | in_f->ilf_fields = in_f32->ilf_fields; | 889 | in_f->ilf_type = in_f32->ilf_type; |
872 | in_f->ilf_asize = in_f32->ilf_asize; | 890 | in_f->ilf_size = in_f32->ilf_size; |
873 | in_f->ilf_dsize = in_f32->ilf_dsize; | 891 | in_f->ilf_fields = in_f32->ilf_fields; |
874 | in_f->ilf_ino = in_f32->ilf_ino; | 892 | in_f->ilf_asize = in_f32->ilf_asize; |
875 | /* copy biggest field of ilf_u */ | 893 | in_f->ilf_dsize = in_f32->ilf_dsize; |
876 | uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid); | 894 | in_f->ilf_ino = in_f32->ilf_ino; |
877 | in_f->ilf_blkno = in_f32->ilf_blkno; | 895 | /* copy biggest field of ilf_u */ |
878 | in_f->ilf_len = in_f32->ilf_len; | 896 | uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f32->ilf_u.ilfu_uuid); |
879 | in_f->ilf_boffset = in_f32->ilf_boffset; | 897 | in_f->ilf_blkno = in_f32->ilf_blkno; |
880 | return 0; | 898 | in_f->ilf_len = in_f32->ilf_len; |
881 | } else if (buf->i_len == sizeof(xfs_inode_log_format_64_t)){ | 899 | in_f->ilf_boffset = in_f32->ilf_boffset; |
882 | xfs_inode_log_format_64_t *in_f64 = buf->i_addr; | 900 | return 0; |
883 | |||
884 | in_f->ilf_type = in_f64->ilf_type; | ||
885 | in_f->ilf_size = in_f64->ilf_size; | ||
886 | in_f->ilf_fields = in_f64->ilf_fields; | ||
887 | in_f->ilf_asize = in_f64->ilf_asize; | ||
888 | in_f->ilf_dsize = in_f64->ilf_dsize; | ||
889 | in_f->ilf_ino = in_f64->ilf_ino; | ||
890 | /* copy biggest field of ilf_u */ | ||
891 | uuid_copy(&in_f->ilf_u.ilfu_uuid, &in_f64->ilf_u.ilfu_uuid); | ||
892 | in_f->ilf_blkno = in_f64->ilf_blkno; | ||
893 | in_f->ilf_len = in_f64->ilf_len; | ||
894 | in_f->ilf_boffset = in_f64->ilf_boffset; | ||
895 | return 0; | ||
896 | } | ||
897 | return -EFSCORRUPTED; | ||
898 | } | 901 | } |
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c index c5107c7bc4bf..dc95a49d62e7 100644 --- a/fs/xfs/xfs_log.c +++ b/fs/xfs/xfs_log.c | |||
@@ -2515,7 +2515,7 @@ next_lv: | |||
2515 | if (lv) | 2515 | if (lv) |
2516 | vecp = lv->lv_iovecp; | 2516 | vecp = lv->lv_iovecp; |
2517 | } | 2517 | } |
2518 | if (record_cnt == 0 && ordered == false) { | 2518 | if (record_cnt == 0 && !ordered) { |
2519 | if (!lv) | 2519 | if (!lv) |
2520 | return 0; | 2520 | return 0; |
2521 | break; | 2521 | break; |
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c index ea7d4b4e50d0..e9727d0a541a 100644 --- a/fs/xfs/xfs_mount.c +++ b/fs/xfs/xfs_mount.c | |||
@@ -704,7 +704,7 @@ xfs_mountfs( | |||
704 | xfs_set_maxicount(mp); | 704 | xfs_set_maxicount(mp); |
705 | 705 | ||
706 | /* enable fail_at_unmount as default */ | 706 | /* enable fail_at_unmount as default */ |
707 | mp->m_fail_unmount = 1; | 707 | mp->m_fail_unmount = true; |
708 | 708 | ||
709 | error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname); | 709 | error = xfs_sysfs_init(&mp->m_kobj, &xfs_mp_ktype, NULL, mp->m_fsname); |
710 | if (error) | 710 | if (error) |
diff --git a/fs/xfs/xfs_ondisk.h b/fs/xfs/xfs_ondisk.h index 0c381d71b242..0492436a053f 100644 --- a/fs/xfs/xfs_ondisk.h +++ b/fs/xfs/xfs_ondisk.h | |||
@@ -134,7 +134,7 @@ xfs_check_ondisk_structs(void) | |||
134 | XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28); | 134 | XFS_CHECK_STRUCT_SIZE(struct xfs_icreate_log, 28); |
135 | XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8); | 135 | XFS_CHECK_STRUCT_SIZE(struct xfs_ictimestamp, 8); |
136 | XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52); | 136 | XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_32, 52); |
137 | XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format_64, 56); | 137 | XFS_CHECK_STRUCT_SIZE(struct xfs_inode_log_format, 56); |
138 | XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20); | 138 | XFS_CHECK_STRUCT_SIZE(struct xfs_qoff_logformat, 20); |
139 | XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16); | 139 | XFS_CHECK_STRUCT_SIZE(struct xfs_trans_header, 16); |
140 | } | 140 | } |
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 584cf2d573ba..f663022353c0 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c | |||
@@ -1637,7 +1637,7 @@ xfs_fs_fill_super( | |||
1637 | 1637 | ||
1638 | /* version 5 superblocks support inode version counters. */ | 1638 | /* version 5 superblocks support inode version counters. */ |
1639 | if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) | 1639 | if (XFS_SB_VERSION_NUM(&mp->m_sb) == XFS_SB_VERSION_5) |
1640 | sb->s_flags |= MS_I_VERSION; | 1640 | sb->s_flags |= SB_I_VERSION; |
1641 | 1641 | ||
1642 | if (mp->m_flags & XFS_MOUNT_DAX) { | 1642 | if (mp->m_flags & XFS_MOUNT_DAX) { |
1643 | xfs_warn(mp, | 1643 | xfs_warn(mp, |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 8390859e79e7..f1af7d63d678 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -368,6 +368,11 @@ static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages) | |||
368 | { | 368 | { |
369 | } | 369 | } |
370 | 370 | ||
371 | static inline int bpf_obj_get_user(const char __user *pathname) | ||
372 | { | ||
373 | return -EOPNOTSUPP; | ||
374 | } | ||
375 | |||
371 | static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, | 376 | static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map, |
372 | u32 key) | 377 | u32 key) |
373 | { | 378 | { |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index c8dae555eccf..446b24cac67d 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -232,6 +232,7 @@ int generic_write_end(struct file *, struct address_space *, | |||
232 | loff_t, unsigned, unsigned, | 232 | loff_t, unsigned, unsigned, |
233 | struct page *, void *); | 233 | struct page *, void *); |
234 | void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); | 234 | void page_zero_new_buffers(struct page *page, unsigned from, unsigned to); |
235 | void clean_page_buffers(struct page *page); | ||
235 | int cont_write_begin(struct file *, struct address_space *, loff_t, | 236 | int cont_write_begin(struct file *, struct address_space *, loff_t, |
236 | unsigned, unsigned, struct page **, void **, | 237 | unsigned, unsigned, struct page **, void **, |
237 | get_block_t *, loff_t *); | 238 | get_block_t *, loff_t *); |
diff --git a/include/linux/filter.h b/include/linux/filter.h index d29e58fde364..818a0b26249e 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -728,7 +728,7 @@ void xdp_do_flush_map(void); | |||
728 | void bpf_warn_invalid_xdp_action(u32 act); | 728 | void bpf_warn_invalid_xdp_action(u32 act); |
729 | void bpf_warn_invalid_xdp_redirect(u32 ifindex); | 729 | void bpf_warn_invalid_xdp_redirect(u32 ifindex); |
730 | 730 | ||
731 | struct sock *do_sk_redirect_map(void); | 731 | struct sock *do_sk_redirect_map(struct sk_buff *skb); |
732 | 732 | ||
733 | #ifdef CONFIG_BPF_JIT | 733 | #ifdef CONFIG_BPF_JIT |
734 | extern int bpf_jit_enable; | 734 | extern int bpf_jit_enable; |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index c458d7b7ad19..6431087816ba 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
@@ -1403,7 +1403,7 @@ extern bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp, u8 *buf, | |||
1403 | const int *srv_version, int srv_vercnt, | 1403 | const int *srv_version, int srv_vercnt, |
1404 | int *nego_fw_version, int *nego_srv_version); | 1404 | int *nego_fw_version, int *nego_srv_version); |
1405 | 1405 | ||
1406 | void hv_process_channel_removal(struct vmbus_channel *channel, u32 relid); | 1406 | void hv_process_channel_removal(u32 relid); |
1407 | 1407 | ||
1408 | void vmbus_setevent(struct vmbus_channel *channel); | 1408 | void vmbus_setevent(struct vmbus_channel *channel); |
1409 | /* | 1409 | /* |
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 4837157da0dc..9ae41cdd0d4c 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h | |||
@@ -73,8 +73,8 @@ void tap_del_queues(struct tap_dev *tap); | |||
73 | int tap_get_minor(dev_t major, struct tap_dev *tap); | 73 | int tap_get_minor(dev_t major, struct tap_dev *tap); |
74 | void tap_free_minor(dev_t major, struct tap_dev *tap); | 74 | void tap_free_minor(dev_t major, struct tap_dev *tap); |
75 | int tap_queue_resize(struct tap_dev *tap); | 75 | int tap_queue_resize(struct tap_dev *tap); |
76 | int tap_create_cdev(struct cdev *tap_cdev, | 76 | int tap_create_cdev(struct cdev *tap_cdev, dev_t *tap_major, |
77 | dev_t *tap_major, const char *device_name); | 77 | const char *device_name, struct module *module); |
78 | void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev); | 78 | void tap_destroy_cdev(dev_t major, struct cdev *tap_cdev); |
79 | 79 | ||
80 | #endif /*_LINUX_IF_TAP_H_*/ | 80 | #endif /*_LINUX_IF_TAP_H_*/ |
diff --git a/include/linux/input.h b/include/linux/input.h index fb5e23c7ed98..7c7516eb7d76 100644 --- a/include/linux/input.h +++ b/include/linux/input.h | |||
@@ -234,6 +234,10 @@ struct input_dev { | |||
234 | #error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match" | 234 | #error "SW_MAX and INPUT_DEVICE_ID_SW_MAX do not match" |
235 | #endif | 235 | #endif |
236 | 236 | ||
237 | #if INPUT_PROP_MAX != INPUT_DEVICE_ID_PROP_MAX | ||
238 | #error "INPUT_PROP_MAX and INPUT_DEVICE_ID_PROP_MAX do not match" | ||
239 | #endif | ||
240 | |||
237 | #define INPUT_DEVICE_ID_MATCH_DEVICE \ | 241 | #define INPUT_DEVICE_ID_MATCH_DEVICE \ |
238 | (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT) | 242 | (INPUT_DEVICE_ID_MATCH_BUS | INPUT_DEVICE_ID_MATCH_VENDOR | INPUT_DEVICE_ID_MATCH_PRODUCT) |
239 | #define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \ | 243 | #define INPUT_DEVICE_ID_MATCH_DEVICE_AND_VERSION \ |
@@ -469,6 +473,9 @@ int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke); | |||
469 | int input_set_keycode(struct input_dev *dev, | 473 | int input_set_keycode(struct input_dev *dev, |
470 | const struct input_keymap_entry *ke); | 474 | const struct input_keymap_entry *ke); |
471 | 475 | ||
476 | bool input_match_device_id(const struct input_dev *dev, | ||
477 | const struct input_device_id *id); | ||
478 | |||
472 | void input_enable_softrepeat(struct input_dev *dev, int delay, int period); | 479 | void input_enable_softrepeat(struct input_dev *dev, int delay, int period); |
473 | 480 | ||
474 | extern struct class input_class; | 481 | extern struct class input_class; |
diff --git a/include/linux/irq.h b/include/linux/irq.h index d4728bf6a537..5ad10948ea95 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -1009,7 +1009,7 @@ void irq_gc_mask_clr_bit(struct irq_data *d); | |||
1009 | void irq_gc_unmask_enable_reg(struct irq_data *d); | 1009 | void irq_gc_unmask_enable_reg(struct irq_data *d); |
1010 | void irq_gc_ack_set_bit(struct irq_data *d); | 1010 | void irq_gc_ack_set_bit(struct irq_data *d); |
1011 | void irq_gc_ack_clr_bit(struct irq_data *d); | 1011 | void irq_gc_ack_clr_bit(struct irq_data *d); |
1012 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d); | 1012 | void irq_gc_mask_disable_and_ack_set(struct irq_data *d); |
1013 | void irq_gc_eoi(struct irq_data *d); | 1013 | void irq_gc_eoi(struct irq_data *d); |
1014 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); | 1014 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); |
1015 | 1015 | ||
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 1ea576c8126f..14b74f22d43c 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
@@ -372,6 +372,8 @@ | |||
372 | #define GITS_BASER_ENTRY_SIZE_SHIFT (48) | 372 | #define GITS_BASER_ENTRY_SIZE_SHIFT (48) |
373 | #define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) | 373 | #define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1) |
374 | #define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) | 374 | #define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48) |
375 | #define GITS_BASER_PHYS_52_to_48(phys) \ | ||
376 | (((phys) & GENMASK_ULL(47, 16)) | (((phys) >> 48) & 0xf) << 12) | ||
375 | #define GITS_BASER_SHAREABILITY_SHIFT (10) | 377 | #define GITS_BASER_SHAREABILITY_SHIFT (10) |
376 | #define GITS_BASER_InnerShareable \ | 378 | #define GITS_BASER_InnerShareable \ |
377 | GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) | 379 | GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable) |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 0ad4c3044cf9..91189bb0c818 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -44,6 +44,12 @@ | |||
44 | 44 | ||
45 | #define STACK_MAGIC 0xdeadbeef | 45 | #define STACK_MAGIC 0xdeadbeef |
46 | 46 | ||
47 | /** | ||
48 | * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value | ||
49 | * @x: value to repeat | ||
50 | * | ||
51 | * NOTE: @x is not checked for > 0xff; larger values produce odd results. | ||
52 | */ | ||
47 | #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) | 53 | #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) |
48 | 54 | ||
49 | /* @a is a power of 2 value */ | 55 | /* @a is a power of 2 value */ |
@@ -57,6 +63,10 @@ | |||
57 | #define READ 0 | 63 | #define READ 0 |
58 | #define WRITE 1 | 64 | #define WRITE 1 |
59 | 65 | ||
66 | /** | ||
67 | * ARRAY_SIZE - get the number of elements in array @arr | ||
68 | * @arr: array to be sized | ||
69 | */ | ||
60 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) | 70 | #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr)) |
61 | 71 | ||
62 | #define u64_to_user_ptr(x) ( \ | 72 | #define u64_to_user_ptr(x) ( \ |
@@ -76,7 +86,15 @@ | |||
76 | #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) | 86 | #define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) |
77 | #define round_down(x, y) ((x) & ~__round_mask(x, y)) | 87 | #define round_down(x, y) ((x) & ~__round_mask(x, y)) |
78 | 88 | ||
89 | /** | ||
90 | * FIELD_SIZEOF - get the size of a struct's field | ||
91 | * @t: the target struct | ||
92 | * @f: the target struct's field | ||
93 | * Return: the size of @f in the struct definition without having a | ||
94 | * declared instance of @t. | ||
95 | */ | ||
79 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) | 96 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) |
97 | |||
80 | #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP | 98 | #define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP |
81 | 99 | ||
82 | #define DIV_ROUND_DOWN_ULL(ll, d) \ | 100 | #define DIV_ROUND_DOWN_ULL(ll, d) \ |
@@ -107,7 +125,7 @@ | |||
107 | /* | 125 | /* |
108 | * Divide positive or negative dividend by positive or negative divisor | 126 | * Divide positive or negative dividend by positive or negative divisor |
109 | * and round to closest integer. Result is undefined for negative | 127 | * and round to closest integer. Result is undefined for negative |
110 | * divisors if he dividend variable type is unsigned and for negative | 128 | * divisors if the dividend variable type is unsigned and for negative |
111 | * dividends if the divisor variable type is unsigned. | 129 | * dividends if the divisor variable type is unsigned. |
112 | */ | 130 | */ |
113 | #define DIV_ROUND_CLOSEST(x, divisor)( \ | 131 | #define DIV_ROUND_CLOSEST(x, divisor)( \ |
@@ -247,13 +265,13 @@ extern int _cond_resched(void); | |||
247 | * @ep_ro: right open interval endpoint | 265 | * @ep_ro: right open interval endpoint |
248 | * | 266 | * |
249 | * Perform a "reciprocal multiplication" in order to "scale" a value into | 267 | * Perform a "reciprocal multiplication" in order to "scale" a value into |
250 | * range [0, ep_ro), where the upper interval endpoint is right-open. | 268 | * range [0, @ep_ro), where the upper interval endpoint is right-open. |
251 | * This is useful, e.g. for accessing a index of an array containing | 269 | * This is useful, e.g. for accessing a index of an array containing |
252 | * ep_ro elements, for example. Think of it as sort of modulus, only that | 270 | * @ep_ro elements, for example. Think of it as sort of modulus, only that |
253 | * the result isn't that of modulo. ;) Note that if initial input is a | 271 | * the result isn't that of modulo. ;) Note that if initial input is a |
254 | * small value, then result will return 0. | 272 | * small value, then result will return 0. |
255 | * | 273 | * |
256 | * Return: a result based on val in interval [0, ep_ro). | 274 | * Return: a result based on @val in interval [0, @ep_ro). |
257 | */ | 275 | */ |
258 | static inline u32 reciprocal_scale(u32 val, u32 ep_ro) | 276 | static inline u32 reciprocal_scale(u32 val, u32 ep_ro) |
259 | { | 277 | { |
@@ -618,8 +636,8 @@ do { \ | |||
618 | * trace_printk - printf formatting in the ftrace buffer | 636 | * trace_printk - printf formatting in the ftrace buffer |
619 | * @fmt: the printf format for printing | 637 | * @fmt: the printf format for printing |
620 | * | 638 | * |
621 | * Note: __trace_printk is an internal function for trace_printk and | 639 | * Note: __trace_printk is an internal function for trace_printk() and |
622 | * the @ip is passed in via the trace_printk macro. | 640 | * the @ip is passed in via the trace_printk() macro. |
623 | * | 641 | * |
624 | * This function allows a kernel developer to debug fast path sections | 642 | * This function allows a kernel developer to debug fast path sections |
625 | * that printk is not appropriate for. By scattering in various | 643 | * that printk is not appropriate for. By scattering in various |
@@ -629,7 +647,7 @@ do { \ | |||
629 | * This is intended as a debugging tool for the developer only. | 647 | * This is intended as a debugging tool for the developer only. |
630 | * Please refrain from leaving trace_printks scattered around in | 648 | * Please refrain from leaving trace_printks scattered around in |
631 | * your code. (Extra memory is used for special buffers that are | 649 | * your code. (Extra memory is used for special buffers that are |
632 | * allocated when trace_printk() is used) | 650 | * allocated when trace_printk() is used.) |
633 | * | 651 | * |
634 | * A little optization trick is done here. If there's only one | 652 | * A little optization trick is done here. If there's only one |
635 | * argument, there's no need to scan the string for printf formats. | 653 | * argument, there's no need to scan the string for printf formats. |
@@ -681,7 +699,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...); | |||
681 | * the @ip is passed in via the trace_puts macro. | 699 | * the @ip is passed in via the trace_puts macro. |
682 | * | 700 | * |
683 | * This is similar to trace_printk() but is made for those really fast | 701 | * This is similar to trace_printk() but is made for those really fast |
684 | * paths that a developer wants the least amount of "Heisenbug" affects, | 702 | * paths that a developer wants the least amount of "Heisenbug" effects, |
685 | * where the processing of the print format is still too much. | 703 | * where the processing of the print format is still too much. |
686 | * | 704 | * |
687 | * This function allows a kernel developer to debug fast path sections | 705 | * This function allows a kernel developer to debug fast path sections |
@@ -692,7 +710,7 @@ int __trace_printk(unsigned long ip, const char *fmt, ...); | |||
692 | * This is intended as a debugging tool for the developer only. | 710 | * This is intended as a debugging tool for the developer only. |
693 | * Please refrain from leaving trace_puts scattered around in | 711 | * Please refrain from leaving trace_puts scattered around in |
694 | * your code. (Extra memory is used for special buffers that are | 712 | * your code. (Extra memory is used for special buffers that are |
695 | * allocated when trace_puts() is used) | 713 | * allocated when trace_puts() is used.) |
696 | * | 714 | * |
697 | * Returns: 0 if nothing was written, positive # if string was. | 715 | * Returns: 0 if nothing was written, positive # if string was. |
698 | * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) | 716 | * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) |
@@ -771,6 +789,12 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
771 | t2 min2 = (y); \ | 789 | t2 min2 = (y); \ |
772 | (void) (&min1 == &min2); \ | 790 | (void) (&min1 == &min2); \ |
773 | min1 < min2 ? min1 : min2; }) | 791 | min1 < min2 ? min1 : min2; }) |
792 | |||
793 | /** | ||
794 | * min - return minimum of two values of the same or compatible types | ||
795 | * @x: first value | ||
796 | * @y: second value | ||
797 | */ | ||
774 | #define min(x, y) \ | 798 | #define min(x, y) \ |
775 | __min(typeof(x), typeof(y), \ | 799 | __min(typeof(x), typeof(y), \ |
776 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ | 800 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ |
@@ -781,12 +805,31 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
781 | t2 max2 = (y); \ | 805 | t2 max2 = (y); \ |
782 | (void) (&max1 == &max2); \ | 806 | (void) (&max1 == &max2); \ |
783 | max1 > max2 ? max1 : max2; }) | 807 | max1 > max2 ? max1 : max2; }) |
808 | |||
809 | /** | ||
810 | * max - return maximum of two values of the same or compatible types | ||
811 | * @x: first value | ||
812 | * @y: second value | ||
813 | */ | ||
784 | #define max(x, y) \ | 814 | #define max(x, y) \ |
785 | __max(typeof(x), typeof(y), \ | 815 | __max(typeof(x), typeof(y), \ |
786 | __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \ | 816 | __UNIQUE_ID(max1_), __UNIQUE_ID(max2_), \ |
787 | x, y) | 817 | x, y) |
788 | 818 | ||
819 | /** | ||
820 | * min3 - return minimum of three values | ||
821 | * @x: first value | ||
822 | * @y: second value | ||
823 | * @z: third value | ||
824 | */ | ||
789 | #define min3(x, y, z) min((typeof(x))min(x, y), z) | 825 | #define min3(x, y, z) min((typeof(x))min(x, y), z) |
826 | |||
827 | /** | ||
828 | * max3 - return maximum of three values | ||
829 | * @x: first value | ||
830 | * @y: second value | ||
831 | * @z: third value | ||
832 | */ | ||
790 | #define max3(x, y, z) max((typeof(x))max(x, y), z) | 833 | #define max3(x, y, z) max((typeof(x))max(x, y), z) |
791 | 834 | ||
792 | /** | 835 | /** |
@@ -805,8 +848,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
805 | * @lo: lowest allowable value | 848 | * @lo: lowest allowable value |
806 | * @hi: highest allowable value | 849 | * @hi: highest allowable value |
807 | * | 850 | * |
808 | * This macro does strict typechecking of lo/hi to make sure they are of the | 851 | * This macro does strict typechecking of @lo/@hi to make sure they are of the |
809 | * same type as val. See the unnecessary pointer comparisons. | 852 | * same type as @val. See the unnecessary pointer comparisons. |
810 | */ | 853 | */ |
811 | #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) | 854 | #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) |
812 | 855 | ||
@@ -816,11 +859,24 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
816 | * | 859 | * |
817 | * Or not use min/max/clamp at all, of course. | 860 | * Or not use min/max/clamp at all, of course. |
818 | */ | 861 | */ |
862 | |||
863 | /** | ||
864 | * min_t - return minimum of two values, using the specified type | ||
865 | * @type: data type to use | ||
866 | * @x: first value | ||
867 | * @y: second value | ||
868 | */ | ||
819 | #define min_t(type, x, y) \ | 869 | #define min_t(type, x, y) \ |
820 | __min(type, type, \ | 870 | __min(type, type, \ |
821 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ | 871 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ |
822 | x, y) | 872 | x, y) |
823 | 873 | ||
874 | /** | ||
875 | * max_t - return maximum of two values, using the specified type | ||
876 | * @type: data type to use | ||
877 | * @x: first value | ||
878 | * @y: second value | ||
879 | */ | ||
824 | #define max_t(type, x, y) \ | 880 | #define max_t(type, x, y) \ |
825 | __max(type, type, \ | 881 | __max(type, type, \ |
826 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ | 882 | __UNIQUE_ID(min1_), __UNIQUE_ID(min2_), \ |
@@ -834,7 +890,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
834 | * @hi: maximum allowable value | 890 | * @hi: maximum allowable value |
835 | * | 891 | * |
836 | * This macro does no typechecking and uses temporary variables of type | 892 | * This macro does no typechecking and uses temporary variables of type |
837 | * 'type' to make all the comparisons. | 893 | * @type to make all the comparisons. |
838 | */ | 894 | */ |
839 | #define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) | 895 | #define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) |
840 | 896 | ||
@@ -845,15 +901,17 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
845 | * @hi: maximum allowable value | 901 | * @hi: maximum allowable value |
846 | * | 902 | * |
847 | * This macro does no typechecking and uses temporary variables of whatever | 903 | * This macro does no typechecking and uses temporary variables of whatever |
848 | * type the input argument 'val' is. This is useful when val is an unsigned | 904 | * type the input argument @val is. This is useful when @val is an unsigned |
849 | * type and min and max are literals that will otherwise be assigned a signed | 905 | * type and @lo and @hi are literals that will otherwise be assigned a signed |
850 | * integer type. | 906 | * integer type. |
851 | */ | 907 | */ |
852 | #define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) | 908 | #define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) |
853 | 909 | ||
854 | 910 | ||
855 | /* | 911 | /** |
856 | * swap - swap value of @a and @b | 912 | * swap - swap values of @a and @b |
913 | * @a: first value | ||
914 | * @b: second value | ||
857 | */ | 915 | */ |
858 | #define swap(a, b) \ | 916 | #define swap(a, b) \ |
859 | do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) | 917 | do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) |
diff --git a/include/linux/key.h b/include/linux/key.h index e315e16b6ff8..8a15cabe928d 100644 --- a/include/linux/key.h +++ b/include/linux/key.h | |||
@@ -138,6 +138,11 @@ struct key_restriction { | |||
138 | struct key_type *keytype; | 138 | struct key_type *keytype; |
139 | }; | 139 | }; |
140 | 140 | ||
141 | enum key_state { | ||
142 | KEY_IS_UNINSTANTIATED, | ||
143 | KEY_IS_POSITIVE, /* Positively instantiated */ | ||
144 | }; | ||
145 | |||
141 | /*****************************************************************************/ | 146 | /*****************************************************************************/ |
142 | /* | 147 | /* |
143 | * authentication token / access credential / keyring | 148 | * authentication token / access credential / keyring |
@@ -169,6 +174,7 @@ struct key { | |||
169 | * - may not match RCU dereferenced payload | 174 | * - may not match RCU dereferenced payload |
170 | * - payload should contain own length | 175 | * - payload should contain own length |
171 | */ | 176 | */ |
177 | short state; /* Key state (+) or rejection error (-) */ | ||
172 | 178 | ||
173 | #ifdef KEY_DEBUGGING | 179 | #ifdef KEY_DEBUGGING |
174 | unsigned magic; | 180 | unsigned magic; |
@@ -176,18 +182,16 @@ struct key { | |||
176 | #endif | 182 | #endif |
177 | 183 | ||
178 | unsigned long flags; /* status flags (change with bitops) */ | 184 | unsigned long flags; /* status flags (change with bitops) */ |
179 | #define KEY_FLAG_INSTANTIATED 0 /* set if key has been instantiated */ | 185 | #define KEY_FLAG_DEAD 0 /* set if key type has been deleted */ |
180 | #define KEY_FLAG_DEAD 1 /* set if key type has been deleted */ | 186 | #define KEY_FLAG_REVOKED 1 /* set if key had been revoked */ |
181 | #define KEY_FLAG_REVOKED 2 /* set if key had been revoked */ | 187 | #define KEY_FLAG_IN_QUOTA 2 /* set if key consumes quota */ |
182 | #define KEY_FLAG_IN_QUOTA 3 /* set if key consumes quota */ | 188 | #define KEY_FLAG_USER_CONSTRUCT 3 /* set if key is being constructed in userspace */ |
183 | #define KEY_FLAG_USER_CONSTRUCT 4 /* set if key is being constructed in userspace */ | 189 | #define KEY_FLAG_ROOT_CAN_CLEAR 4 /* set if key can be cleared by root without permission */ |
184 | #define KEY_FLAG_NEGATIVE 5 /* set if key is negative */ | 190 | #define KEY_FLAG_INVALIDATED 5 /* set if key has been invalidated */ |
185 | #define KEY_FLAG_ROOT_CAN_CLEAR 6 /* set if key can be cleared by root without permission */ | 191 | #define KEY_FLAG_BUILTIN 6 /* set if key is built in to the kernel */ |
186 | #define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */ | 192 | #define KEY_FLAG_ROOT_CAN_INVAL 7 /* set if key can be invalidated by root without permission */ |
187 | #define KEY_FLAG_BUILTIN 8 /* set if key is built in to the kernel */ | 193 | #define KEY_FLAG_KEEP 8 /* set if key should not be removed */ |
188 | #define KEY_FLAG_ROOT_CAN_INVAL 9 /* set if key can be invalidated by root without permission */ | 194 | #define KEY_FLAG_UID_KEYRING 9 /* set if key is a user or user session keyring */ |
189 | #define KEY_FLAG_KEEP 10 /* set if key should not be removed */ | ||
190 | #define KEY_FLAG_UID_KEYRING 11 /* set if key is a user or user session keyring */ | ||
191 | 195 | ||
192 | /* the key type and key description string | 196 | /* the key type and key description string |
193 | * - the desc is used to match a key against search criteria | 197 | * - the desc is used to match a key against search criteria |
@@ -213,7 +217,6 @@ struct key { | |||
213 | struct list_head name_link; | 217 | struct list_head name_link; |
214 | struct assoc_array keys; | 218 | struct assoc_array keys; |
215 | }; | 219 | }; |
216 | int reject_error; | ||
217 | }; | 220 | }; |
218 | 221 | ||
219 | /* This is set on a keyring to restrict the addition of a link to a key | 222 | /* This is set on a keyring to restrict the addition of a link to a key |
@@ -353,17 +356,27 @@ extern void key_set_timeout(struct key *, unsigned); | |||
353 | #define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */ | 356 | #define KEY_NEED_SETATTR 0x20 /* Require permission to change attributes */ |
354 | #define KEY_NEED_ALL 0x3f /* All the above permissions */ | 357 | #define KEY_NEED_ALL 0x3f /* All the above permissions */ |
355 | 358 | ||
359 | static inline short key_read_state(const struct key *key) | ||
360 | { | ||
361 | /* Barrier versus mark_key_instantiated(). */ | ||
362 | return smp_load_acquire(&key->state); | ||
363 | } | ||
364 | |||
356 | /** | 365 | /** |
357 | * key_is_instantiated - Determine if a key has been positively instantiated | 366 | * key_is_positive - Determine if a key has been positively instantiated |
358 | * @key: The key to check. | 367 | * @key: The key to check. |
359 | * | 368 | * |
360 | * Return true if the specified key has been positively instantiated, false | 369 | * Return true if the specified key has been positively instantiated, false |
361 | * otherwise. | 370 | * otherwise. |
362 | */ | 371 | */ |
363 | static inline bool key_is_instantiated(const struct key *key) | 372 | static inline bool key_is_positive(const struct key *key) |
373 | { | ||
374 | return key_read_state(key) == KEY_IS_POSITIVE; | ||
375 | } | ||
376 | |||
377 | static inline bool key_is_negative(const struct key *key) | ||
364 | { | 378 | { |
365 | return test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && | 379 | return key_read_state(key) < 0; |
366 | !test_bit(KEY_FLAG_NEGATIVE, &key->flags); | ||
367 | } | 380 | } |
368 | 381 | ||
369 | #define dereference_key_rcu(KEY) \ | 382 | #define dereference_key_rcu(KEY) \ |
diff --git a/include/linux/mbus.h b/include/linux/mbus.h index 0d3f14fd2621..4773145246ed 100644 --- a/include/linux/mbus.h +++ b/include/linux/mbus.h | |||
@@ -31,8 +31,8 @@ struct mbus_dram_target_info | |||
31 | struct mbus_dram_window { | 31 | struct mbus_dram_window { |
32 | u8 cs_index; | 32 | u8 cs_index; |
33 | u8 mbus_attr; | 33 | u8 mbus_attr; |
34 | u32 base; | 34 | u64 base; |
35 | u32 size; | 35 | u64 size; |
36 | } cs[4]; | 36 | } cs[4]; |
37 | }; | 37 | }; |
38 | 38 | ||
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index c57d4b7de3a8..c59af8ab753a 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h | |||
@@ -157,6 +157,8 @@ int mlx5_set_port_prio_tc(struct mlx5_core_dev *mdev, u8 *prio_tc); | |||
157 | int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, | 157 | int mlx5_query_port_prio_tc(struct mlx5_core_dev *mdev, |
158 | u8 prio, u8 *tc); | 158 | u8 prio, u8 *tc); |
159 | int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); | 159 | int mlx5_set_port_tc_group(struct mlx5_core_dev *mdev, u8 *tc_group); |
160 | int mlx5_query_port_tc_group(struct mlx5_core_dev *mdev, | ||
161 | u8 tc, u8 *tc_group); | ||
160 | int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); | 162 | int mlx5_set_port_tc_bw_alloc(struct mlx5_core_dev *mdev, u8 *tc_bw); |
161 | int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, | 163 | int mlx5_query_port_tc_bw_alloc(struct mlx5_core_dev *mdev, |
162 | u8 tc, u8 *bw_pct); | 164 | u8 tc, u8 *bw_pct); |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 46f4ecf5479a..1861ea8dba77 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -445,6 +445,9 @@ struct mm_struct { | |||
445 | unsigned long flags; /* Must use atomic bitops to access the bits */ | 445 | unsigned long flags; /* Must use atomic bitops to access the bits */ |
446 | 446 | ||
447 | struct core_state *core_state; /* coredumping support */ | 447 | struct core_state *core_state; /* coredumping support */ |
448 | #ifdef CONFIG_MEMBARRIER | ||
449 | atomic_t membarrier_state; | ||
450 | #endif | ||
448 | #ifdef CONFIG_AIO | 451 | #ifdef CONFIG_AIO |
449 | spinlock_t ioctx_lock; | 452 | spinlock_t ioctx_lock; |
450 | struct kioctx_table __rcu *ioctx_table; | 453 | struct kioctx_table __rcu *ioctx_table; |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index 694cebb50f72..2657f9f51536 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
@@ -293,6 +293,7 @@ struct pcmcia_device_id { | |||
293 | #define INPUT_DEVICE_ID_SND_MAX 0x07 | 293 | #define INPUT_DEVICE_ID_SND_MAX 0x07 |
294 | #define INPUT_DEVICE_ID_FF_MAX 0x7f | 294 | #define INPUT_DEVICE_ID_FF_MAX 0x7f |
295 | #define INPUT_DEVICE_ID_SW_MAX 0x0f | 295 | #define INPUT_DEVICE_ID_SW_MAX 0x0f |
296 | #define INPUT_DEVICE_ID_PROP_MAX 0x1f | ||
296 | 297 | ||
297 | #define INPUT_DEVICE_ID_MATCH_BUS 1 | 298 | #define INPUT_DEVICE_ID_MATCH_BUS 1 |
298 | #define INPUT_DEVICE_ID_MATCH_VENDOR 2 | 299 | #define INPUT_DEVICE_ID_MATCH_VENDOR 2 |
@@ -308,6 +309,7 @@ struct pcmcia_device_id { | |||
308 | #define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400 | 309 | #define INPUT_DEVICE_ID_MATCH_SNDBIT 0x0400 |
309 | #define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800 | 310 | #define INPUT_DEVICE_ID_MATCH_FFBIT 0x0800 |
310 | #define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000 | 311 | #define INPUT_DEVICE_ID_MATCH_SWBIT 0x1000 |
312 | #define INPUT_DEVICE_ID_MATCH_PROPBIT 0x2000 | ||
311 | 313 | ||
312 | struct input_device_id { | 314 | struct input_device_id { |
313 | 315 | ||
@@ -327,6 +329,7 @@ struct input_device_id { | |||
327 | kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1]; | 329 | kernel_ulong_t sndbit[INPUT_DEVICE_ID_SND_MAX / BITS_PER_LONG + 1]; |
328 | kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1]; | 330 | kernel_ulong_t ffbit[INPUT_DEVICE_ID_FF_MAX / BITS_PER_LONG + 1]; |
329 | kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1]; | 331 | kernel_ulong_t swbit[INPUT_DEVICE_ID_SW_MAX / BITS_PER_LONG + 1]; |
332 | kernel_ulong_t propbit[INPUT_DEVICE_ID_PROP_MAX / BITS_PER_LONG + 1]; | ||
330 | 333 | ||
331 | kernel_ulong_t driver_info; | 334 | kernel_ulong_t driver_info; |
332 | }; | 335 | }; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index f535779d9dc1..2eaac7d75af4 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -3694,6 +3694,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, | |||
3694 | unsigned char name_assign_type, | 3694 | unsigned char name_assign_type, |
3695 | void (*setup)(struct net_device *), | 3695 | void (*setup)(struct net_device *), |
3696 | unsigned int txqs, unsigned int rxqs); | 3696 | unsigned int txqs, unsigned int rxqs); |
3697 | int dev_get_valid_name(struct net *net, struct net_device *dev, | ||
3698 | const char *name); | ||
3699 | |||
3697 | #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ | 3700 | #define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ |
3698 | alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) | 3701 | alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) |
3699 | 3702 | ||
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h index 2c2a5514b0df..528b24c78308 100644 --- a/include/linux/netfilter_bridge/ebtables.h +++ b/include/linux/netfilter_bridge/ebtables.h | |||
@@ -108,9 +108,10 @@ struct ebt_table { | |||
108 | 108 | ||
109 | #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ | 109 | #define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \ |
110 | ~(__alignof__(struct _xt_align)-1)) | 110 | ~(__alignof__(struct _xt_align)-1)) |
111 | extern struct ebt_table *ebt_register_table(struct net *net, | 111 | extern int ebt_register_table(struct net *net, |
112 | const struct ebt_table *table, | 112 | const struct ebt_table *table, |
113 | const struct nf_hook_ops *); | 113 | const struct nf_hook_ops *ops, |
114 | struct ebt_table **res); | ||
114 | extern void ebt_unregister_table(struct net *net, struct ebt_table *table, | 115 | extern void ebt_unregister_table(struct net *net, struct ebt_table *table, |
115 | const struct nf_hook_ops *); | 116 | const struct nf_hook_ops *); |
116 | extern unsigned int ebt_do_table(struct sk_buff *skb, | 117 | extern unsigned int ebt_do_table(struct sk_buff *skb, |
diff --git a/include/linux/of.h b/include/linux/of.h index cfc34117fc92..b240ed69dc96 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
@@ -734,6 +734,16 @@ static inline struct device_node *of_get_cpu_node(int cpu, | |||
734 | return NULL; | 734 | return NULL; |
735 | } | 735 | } |
736 | 736 | ||
737 | static inline int of_n_addr_cells(struct device_node *np) | ||
738 | { | ||
739 | return 0; | ||
740 | |||
741 | } | ||
742 | static inline int of_n_size_cells(struct device_node *np) | ||
743 | { | ||
744 | return 0; | ||
745 | } | ||
746 | |||
737 | static inline int of_property_read_u64(const struct device_node *np, | 747 | static inline int of_property_read_u64(const struct device_node *np, |
738 | const char *propname, u64 *out_value) | 748 | const char *propname, u64 *out_value) |
739 | { | 749 | { |
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 032b55909145..6737a8c9e8c6 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h | |||
@@ -27,16 +27,17 @@ enum pm_qos_flags_status { | |||
27 | PM_QOS_FLAGS_ALL, | 27 | PM_QOS_FLAGS_ALL, |
28 | }; | 28 | }; |
29 | 29 | ||
30 | #define PM_QOS_DEFAULT_VALUE -1 | 30 | #define PM_QOS_DEFAULT_VALUE (-1) |
31 | #define PM_QOS_LATENCY_ANY S32_MAX | ||
31 | 32 | ||
32 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) | 33 | #define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) |
33 | #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) | 34 | #define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) |
34 | #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 | 35 | #define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 |
35 | #define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0 | 36 | #define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0 |
36 | #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0 | 37 | #define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0 |
38 | #define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY | ||
37 | #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 | 39 | #define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 |
38 | #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) | 40 | #define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) |
39 | #define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1)) | ||
40 | 41 | ||
41 | #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) | 42 | #define PM_QOS_FLAG_NO_POWER_OFF (1 << 0) |
42 | #define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1) | 43 | #define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1) |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index b1fd8bf85fdc..2bea1d5e9930 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -276,7 +276,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list, | |||
276 | #define list_entry_rcu(ptr, type, member) \ | 276 | #define list_entry_rcu(ptr, type, member) \ |
277 | container_of(lockless_dereference(ptr), type, member) | 277 | container_of(lockless_dereference(ptr), type, member) |
278 | 278 | ||
279 | /** | 279 | /* |
280 | * Where are list_empty_rcu() and list_first_entry_rcu()? | 280 | * Where are list_empty_rcu() and list_first_entry_rcu()? |
281 | * | 281 | * |
282 | * Implementing those functions following their counterparts list_empty() and | 282 | * Implementing those functions following their counterparts list_empty() and |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index de50d8a4cf41..1a9f70d44af9 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -523,7 +523,7 @@ static inline void rcu_preempt_sleep_check(void) { } | |||
523 | * Return the value of the specified RCU-protected pointer, but omit | 523 | * Return the value of the specified RCU-protected pointer, but omit |
524 | * both the smp_read_barrier_depends() and the READ_ONCE(). This | 524 | * both the smp_read_barrier_depends() and the READ_ONCE(). This |
525 | * is useful in cases where update-side locks prevent the value of the | 525 | * is useful in cases where update-side locks prevent the value of the |
526 | * pointer from changing. Please note that this primitive does -not- | 526 | * pointer from changing. Please note that this primitive does *not* |
527 | * prevent the compiler from repeating this reference or combining it | 527 | * prevent the compiler from repeating this reference or combining it |
528 | * with other references, so it should not be used without protection | 528 | * with other references, so it should not be used without protection |
529 | * of appropriate locks. | 529 | * of appropriate locks. |
@@ -568,7 +568,7 @@ static inline void rcu_preempt_sleep_check(void) { } | |||
568 | * is handed off from RCU to some other synchronization mechanism, for | 568 | * is handed off from RCU to some other synchronization mechanism, for |
569 | * example, reference counting or locking. In C11, it would map to | 569 | * example, reference counting or locking. In C11, it would map to |
570 | * kill_dependency(). It could be used as follows: | 570 | * kill_dependency(). It could be used as follows: |
571 | * | 571 | * `` |
572 | * rcu_read_lock(); | 572 | * rcu_read_lock(); |
573 | * p = rcu_dereference(gp); | 573 | * p = rcu_dereference(gp); |
574 | * long_lived = is_long_lived(p); | 574 | * long_lived = is_long_lived(p); |
@@ -579,6 +579,7 @@ static inline void rcu_preempt_sleep_check(void) { } | |||
579 | * p = rcu_pointer_handoff(p); | 579 | * p = rcu_pointer_handoff(p); |
580 | * } | 580 | * } |
581 | * rcu_read_unlock(); | 581 | * rcu_read_unlock(); |
582 | *`` | ||
582 | */ | 583 | */ |
583 | #define rcu_pointer_handoff(p) (p) | 584 | #define rcu_pointer_handoff(p) (p) |
584 | 585 | ||
@@ -778,18 +779,21 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
778 | 779 | ||
779 | /** | 780 | /** |
780 | * RCU_INIT_POINTER() - initialize an RCU protected pointer | 781 | * RCU_INIT_POINTER() - initialize an RCU protected pointer |
782 | * @p: The pointer to be initialized. | ||
783 | * @v: The value to initialized the pointer to. | ||
781 | * | 784 | * |
782 | * Initialize an RCU-protected pointer in special cases where readers | 785 | * Initialize an RCU-protected pointer in special cases where readers |
783 | * do not need ordering constraints on the CPU or the compiler. These | 786 | * do not need ordering constraints on the CPU or the compiler. These |
784 | * special cases are: | 787 | * special cases are: |
785 | * | 788 | * |
786 | * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer -or- | 789 | * 1. This use of RCU_INIT_POINTER() is NULLing out the pointer *or* |
787 | * 2. The caller has taken whatever steps are required to prevent | 790 | * 2. The caller has taken whatever steps are required to prevent |
788 | * RCU readers from concurrently accessing this pointer -or- | 791 | * RCU readers from concurrently accessing this pointer *or* |
789 | * 3. The referenced data structure has already been exposed to | 792 | * 3. The referenced data structure has already been exposed to |
790 | * readers either at compile time or via rcu_assign_pointer() -and- | 793 | * readers either at compile time or via rcu_assign_pointer() *and* |
791 | * a. You have not made -any- reader-visible changes to | 794 | * |
792 | * this structure since then -or- | 795 | * a. You have not made *any* reader-visible changes to |
796 | * this structure since then *or* | ||
793 | * b. It is OK for readers accessing this structure from its | 797 | * b. It is OK for readers accessing this structure from its |
794 | * new location to see the old state of the structure. (For | 798 | * new location to see the old state of the structure. (For |
795 | * example, the changes were to statistical counters or to | 799 | * example, the changes were to statistical counters or to |
@@ -805,7 +809,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
805 | * by a single external-to-structure RCU-protected pointer, then you may | 809 | * by a single external-to-structure RCU-protected pointer, then you may |
806 | * use RCU_INIT_POINTER() to initialize the internal RCU-protected | 810 | * use RCU_INIT_POINTER() to initialize the internal RCU-protected |
807 | * pointers, but you must use rcu_assign_pointer() to initialize the | 811 | * pointers, but you must use rcu_assign_pointer() to initialize the |
808 | * external-to-structure pointer -after- you have completely initialized | 812 | * external-to-structure pointer *after* you have completely initialized |
809 | * the reader-accessible portions of the linked structure. | 813 | * the reader-accessible portions of the linked structure. |
810 | * | 814 | * |
811 | * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no | 815 | * Note that unlike rcu_assign_pointer(), RCU_INIT_POINTER() provides no |
@@ -819,6 +823,8 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
819 | 823 | ||
820 | /** | 824 | /** |
821 | * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer | 825 | * RCU_POINTER_INITIALIZER() - statically initialize an RCU protected pointer |
826 | * @p: The pointer to be initialized. | ||
827 | * @v: The value to initialized the pointer to. | ||
822 | * | 828 | * |
823 | * GCC-style initialization for an RCU-protected pointer in a structure field. | 829 | * GCC-style initialization for an RCU-protected pointer in a structure field. |
824 | */ | 830 | */ |
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index ae53e413fb13..ab9bf7b73954 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h | |||
@@ -211,4 +211,20 @@ static inline void memalloc_noreclaim_restore(unsigned int flags) | |||
211 | current->flags = (current->flags & ~PF_MEMALLOC) | flags; | 211 | current->flags = (current->flags & ~PF_MEMALLOC) | flags; |
212 | } | 212 | } |
213 | 213 | ||
214 | #ifdef CONFIG_MEMBARRIER | ||
215 | enum { | ||
216 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), | ||
217 | MEMBARRIER_STATE_SWITCH_MM = (1U << 1), | ||
218 | }; | ||
219 | |||
220 | static inline void membarrier_execve(struct task_struct *t) | ||
221 | { | ||
222 | atomic_set(&t->mm->membarrier_state, 0); | ||
223 | } | ||
224 | #else | ||
225 | static inline void membarrier_execve(struct task_struct *t) | ||
226 | { | ||
227 | } | ||
228 | #endif | ||
229 | |||
214 | #endif /* _LINUX_SCHED_MM_H */ | 230 | #endif /* _LINUX_SCHED_MM_H */ |
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index d7b6dab956ec..7d065abc7a47 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h | |||
@@ -71,14 +71,6 @@ struct sched_domain_shared { | |||
71 | atomic_t ref; | 71 | atomic_t ref; |
72 | atomic_t nr_busy_cpus; | 72 | atomic_t nr_busy_cpus; |
73 | int has_idle_cores; | 73 | int has_idle_cores; |
74 | |||
75 | /* | ||
76 | * Some variables from the most recent sd_lb_stats for this domain, | ||
77 | * used by wake_affine(). | ||
78 | */ | ||
79 | unsigned long nr_running; | ||
80 | unsigned long load; | ||
81 | unsigned long capacity; | ||
82 | }; | 74 | }; |
83 | 75 | ||
84 | struct sched_domain { | 76 | struct sched_domain { |
diff --git a/include/linux/sctp.h b/include/linux/sctp.h index 82b171e1aa0b..da803dfc7a39 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h | |||
@@ -231,7 +231,7 @@ struct sctp_datahdr { | |||
231 | __be32 tsn; | 231 | __be32 tsn; |
232 | __be16 stream; | 232 | __be16 stream; |
233 | __be16 ssn; | 233 | __be16 ssn; |
234 | __be32 ppid; | 234 | __u32 ppid; |
235 | __u8 payload[0]; | 235 | __u8 payload[0]; |
236 | }; | 236 | }; |
237 | 237 | ||
@@ -716,28 +716,28 @@ struct sctp_reconf_chunk { | |||
716 | 716 | ||
717 | struct sctp_strreset_outreq { | 717 | struct sctp_strreset_outreq { |
718 | struct sctp_paramhdr param_hdr; | 718 | struct sctp_paramhdr param_hdr; |
719 | __u32 request_seq; | 719 | __be32 request_seq; |
720 | __u32 response_seq; | 720 | __be32 response_seq; |
721 | __u32 send_reset_at_tsn; | 721 | __be32 send_reset_at_tsn; |
722 | __u16 list_of_streams[0]; | 722 | __be16 list_of_streams[0]; |
723 | }; | 723 | }; |
724 | 724 | ||
725 | struct sctp_strreset_inreq { | 725 | struct sctp_strreset_inreq { |
726 | struct sctp_paramhdr param_hdr; | 726 | struct sctp_paramhdr param_hdr; |
727 | __u32 request_seq; | 727 | __be32 request_seq; |
728 | __u16 list_of_streams[0]; | 728 | __be16 list_of_streams[0]; |
729 | }; | 729 | }; |
730 | 730 | ||
731 | struct sctp_strreset_tsnreq { | 731 | struct sctp_strreset_tsnreq { |
732 | struct sctp_paramhdr param_hdr; | 732 | struct sctp_paramhdr param_hdr; |
733 | __u32 request_seq; | 733 | __be32 request_seq; |
734 | }; | 734 | }; |
735 | 735 | ||
736 | struct sctp_strreset_addstrm { | 736 | struct sctp_strreset_addstrm { |
737 | struct sctp_paramhdr param_hdr; | 737 | struct sctp_paramhdr param_hdr; |
738 | __u32 request_seq; | 738 | __be32 request_seq; |
739 | __u16 number_of_streams; | 739 | __be16 number_of_streams; |
740 | __u16 reserved; | 740 | __be16 reserved; |
741 | }; | 741 | }; |
742 | 742 | ||
743 | enum { | 743 | enum { |
@@ -752,16 +752,16 @@ enum { | |||
752 | 752 | ||
753 | struct sctp_strreset_resp { | 753 | struct sctp_strreset_resp { |
754 | struct sctp_paramhdr param_hdr; | 754 | struct sctp_paramhdr param_hdr; |
755 | __u32 response_seq; | 755 | __be32 response_seq; |
756 | __u32 result; | 756 | __be32 result; |
757 | }; | 757 | }; |
758 | 758 | ||
759 | struct sctp_strreset_resptsn { | 759 | struct sctp_strreset_resptsn { |
760 | struct sctp_paramhdr param_hdr; | 760 | struct sctp_paramhdr param_hdr; |
761 | __u32 response_seq; | 761 | __be32 response_seq; |
762 | __u32 result; | 762 | __be32 result; |
763 | __u32 senders_next_tsn; | 763 | __be32 senders_next_tsn; |
764 | __u32 receivers_next_tsn; | 764 | __be32 receivers_next_tsn; |
765 | }; | 765 | }; |
766 | 766 | ||
767 | #endif /* __LINUX_SCTP_H__ */ | 767 | #endif /* __LINUX_SCTP_H__ */ |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 39af9bc0f653..62be8966e837 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -78,6 +78,7 @@ void synchronize_srcu(struct srcu_struct *sp); | |||
78 | 78 | ||
79 | /** | 79 | /** |
80 | * srcu_read_lock_held - might we be in SRCU read-side critical section? | 80 | * srcu_read_lock_held - might we be in SRCU read-side critical section? |
81 | * @sp: The srcu_struct structure to check | ||
81 | * | 82 | * |
82 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU | 83 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU |
83 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, | 84 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
diff --git a/include/linux/swait.h b/include/linux/swait.h index 73e97a08d3d0..cf30f5022472 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h | |||
@@ -9,13 +9,16 @@ | |||
9 | /* | 9 | /* |
10 | * Simple wait queues | 10 | * Simple wait queues |
11 | * | 11 | * |
12 | * While these are very similar to the other/complex wait queues (wait.h) the | 12 | * While these are very similar to regular wait queues (wait.h) the most |
13 | * most important difference is that the simple waitqueue allows for | 13 | * important difference is that the simple waitqueue allows for deterministic |
14 | * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold | 14 | * behaviour -- IOW it has strictly bounded IRQ and lock hold times. |
15 | * times. | ||
16 | * | 15 | * |
17 | * In order to make this so, we had to drop a fair number of features of the | 16 | * Mainly, this is accomplished by two things. Firstly not allowing swake_up_all |
18 | * other waitqueue code; notably: | 17 | * from IRQ disabled, and dropping the lock upon every wakeup, giving a higher |
18 | * priority task a chance to run. | ||
19 | * | ||
20 | * Secondly, we had to drop a fair number of features of the other waitqueue | ||
21 | * code; notably: | ||
19 | * | 22 | * |
20 | * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; | 23 | * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; |
21 | * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right | 24 | * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right |
@@ -24,12 +27,14 @@ | |||
24 | * - the exclusive mode; because this requires preserving the list order | 27 | * - the exclusive mode; because this requires preserving the list order |
25 | * and this is hard. | 28 | * and this is hard. |
26 | * | 29 | * |
27 | * - custom wake functions; because you cannot give any guarantees about | 30 | * - custom wake callback functions; because you cannot give any guarantees |
28 | * random code. | 31 | * about random code. This also allows swait to be used in RT, such that |
29 | * | 32 | * raw spinlock can be used for the swait queue head. |
30 | * As a side effect of this; the data structures are slimmer. | ||
31 | * | 33 | * |
32 | * One would recommend using this wait queue where possible. | 34 | * As a side effect of these; the data structures are slimmer albeit more ad-hoc. |
35 | * For all the above, note that simple wait queues should _only_ be used under | ||
36 | * very specific realtime constraints -- it is best to stick with the regular | ||
37 | * wait queues in most cases. | ||
33 | */ | 38 | */ |
34 | 39 | ||
35 | struct task_struct; | 40 | struct task_struct; |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 905d769d8ddc..5f7eeab990fe 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
@@ -42,7 +42,7 @@ enum { | |||
42 | #define THREAD_ALIGN THREAD_SIZE | 42 | #define THREAD_ALIGN THREAD_SIZE |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | #ifdef CONFIG_DEBUG_STACK_USAGE | 45 | #if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) |
46 | # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ | 46 | # define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \ |
47 | __GFP_ZERO) | 47 | __GFP_ZERO) |
48 | #else | 48 | #else |
diff --git a/include/net/fq_impl.h b/include/net/fq_impl.h index 4e6131cd3f43..ac1a2317941e 100644 --- a/include/net/fq_impl.h +++ b/include/net/fq_impl.h | |||
@@ -146,6 +146,7 @@ static void fq_tin_enqueue(struct fq *fq, | |||
146 | fq_flow_get_default_t get_default_func) | 146 | fq_flow_get_default_t get_default_func) |
147 | { | 147 | { |
148 | struct fq_flow *flow; | 148 | struct fq_flow *flow; |
149 | bool oom; | ||
149 | 150 | ||
150 | lockdep_assert_held(&fq->lock); | 151 | lockdep_assert_held(&fq->lock); |
151 | 152 | ||
@@ -167,8 +168,8 @@ static void fq_tin_enqueue(struct fq *fq, | |||
167 | } | 168 | } |
168 | 169 | ||
169 | __skb_queue_tail(&flow->queue, skb); | 170 | __skb_queue_tail(&flow->queue, skb); |
170 | 171 | oom = (fq->memory_usage > fq->memory_limit); | |
171 | if (fq->backlog > fq->limit || fq->memory_usage > fq->memory_limit) { | 172 | while (fq->backlog > fq->limit || oom) { |
172 | flow = list_first_entry_or_null(&fq->backlogs, | 173 | flow = list_first_entry_or_null(&fq->backlogs, |
173 | struct fq_flow, | 174 | struct fq_flow, |
174 | backlogchain); | 175 | backlogchain); |
@@ -183,8 +184,10 @@ static void fq_tin_enqueue(struct fq *fq, | |||
183 | 184 | ||
184 | flow->tin->overlimit++; | 185 | flow->tin->overlimit++; |
185 | fq->overlimit++; | 186 | fq->overlimit++; |
186 | if (fq->memory_usage > fq->memory_limit) | 187 | if (oom) { |
187 | fq->overmemory++; | 188 | fq->overmemory++; |
189 | oom = (fq->memory_usage > fq->memory_limit); | ||
190 | } | ||
188 | } | 191 | } |
189 | } | 192 | } |
190 | 193 | ||
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index aa95053dfc78..db8162dd8c0b 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h | |||
@@ -96,7 +96,7 @@ struct inet_request_sock { | |||
96 | kmemcheck_bitfield_end(flags); | 96 | kmemcheck_bitfield_end(flags); |
97 | u32 ir_mark; | 97 | u32 ir_mark; |
98 | union { | 98 | union { |
99 | struct ip_options_rcu *opt; | 99 | struct ip_options_rcu __rcu *ireq_opt; |
100 | #if IS_ENABLED(CONFIG_IPV6) | 100 | #if IS_ENABLED(CONFIG_IPV6) |
101 | struct { | 101 | struct { |
102 | struct ipv6_txoptions *ipv6_opt; | 102 | struct ipv6_txoptions *ipv6_opt; |
@@ -132,6 +132,12 @@ static inline int inet_request_bound_dev_if(const struct sock *sk, | |||
132 | return sk->sk_bound_dev_if; | 132 | return sk->sk_bound_dev_if; |
133 | } | 133 | } |
134 | 134 | ||
135 | static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq) | ||
136 | { | ||
137 | return rcu_dereference_check(ireq->ireq_opt, | ||
138 | refcount_read(&ireq->req.rsk_refcnt) > 0); | ||
139 | } | ||
140 | |||
135 | struct inet_cork { | 141 | struct inet_cork { |
136 | unsigned int flags; | 142 | unsigned int flags; |
137 | __be32 addr; | 143 | __be32 addr; |
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index e80edd8879ef..3009547f3c66 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __NET_PKT_CLS_H | 2 | #define __NET_PKT_CLS_H |
3 | 3 | ||
4 | #include <linux/pkt_cls.h> | 4 | #include <linux/pkt_cls.h> |
5 | #include <linux/workqueue.h> | ||
5 | #include <net/sch_generic.h> | 6 | #include <net/sch_generic.h> |
6 | #include <net/act_api.h> | 7 | #include <net/act_api.h> |
7 | 8 | ||
@@ -17,6 +18,8 @@ struct tcf_walker { | |||
17 | int register_tcf_proto_ops(struct tcf_proto_ops *ops); | 18 | int register_tcf_proto_ops(struct tcf_proto_ops *ops); |
18 | int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); | 19 | int unregister_tcf_proto_ops(struct tcf_proto_ops *ops); |
19 | 20 | ||
21 | bool tcf_queue_work(struct work_struct *work); | ||
22 | |||
20 | #ifdef CONFIG_NET_CLS | 23 | #ifdef CONFIG_NET_CLS |
21 | struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, | 24 | struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index, |
22 | bool create); | 25 | bool create); |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 135f5a2dd931..0dec8a23be57 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/dynamic_queue_limits.h> | 10 | #include <linux/dynamic_queue_limits.h> |
11 | #include <linux/list.h> | 11 | #include <linux/list.h> |
12 | #include <linux/refcount.h> | 12 | #include <linux/refcount.h> |
13 | #include <linux/workqueue.h> | ||
13 | #include <net/gen_stats.h> | 14 | #include <net/gen_stats.h> |
14 | #include <net/rtnetlink.h> | 15 | #include <net/rtnetlink.h> |
15 | 16 | ||
@@ -271,6 +272,7 @@ struct tcf_chain { | |||
271 | 272 | ||
272 | struct tcf_block { | 273 | struct tcf_block { |
273 | struct list_head chain_list; | 274 | struct list_head chain_list; |
275 | struct work_struct work; | ||
274 | }; | 276 | }; |
275 | 277 | ||
276 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) | 278 | static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) |
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 2db3d3a9ce1d..88233cf8b8d4 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h | |||
@@ -261,7 +261,7 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, | |||
261 | struct sctp_fwdtsn_skip *skiplist); | 261 | struct sctp_fwdtsn_skip *skiplist); |
262 | struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc); | 262 | struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc); |
263 | struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc, | 263 | struct sctp_chunk *sctp_make_strreset_req(const struct sctp_association *asoc, |
264 | __u16 stream_num, __u16 *stream_list, | 264 | __u16 stream_num, __be16 *stream_list, |
265 | bool out, bool in); | 265 | bool out, bool in); |
266 | struct sctp_chunk *sctp_make_strreset_tsnreq( | 266 | struct sctp_chunk *sctp_make_strreset_tsnreq( |
267 | const struct sctp_association *asoc); | 267 | const struct sctp_association *asoc); |
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h index b8c86ec1a8f5..231dc42f1da6 100644 --- a/include/net/sctp/ulpevent.h +++ b/include/net/sctp/ulpevent.h | |||
@@ -130,7 +130,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event( | |||
130 | 130 | ||
131 | struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( | 131 | struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( |
132 | const struct sctp_association *asoc, __u16 flags, | 132 | const struct sctp_association *asoc, __u16 flags, |
133 | __u16 stream_num, __u16 *stream_list, gfp_t gfp); | 133 | __u16 stream_num, __be16 *stream_list, gfp_t gfp); |
134 | 134 | ||
135 | struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event( | 135 | struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event( |
136 | const struct sctp_association *asoc, __u16 flags, | 136 | const struct sctp_association *asoc, __u16 flags, |
diff --git a/include/net/strparser.h b/include/net/strparser.h index 7dc131d62ad5..d96b59f45eba 100644 --- a/include/net/strparser.h +++ b/include/net/strparser.h | |||
@@ -74,10 +74,9 @@ struct strparser { | |||
74 | u32 unrecov_intr : 1; | 74 | u32 unrecov_intr : 1; |
75 | 75 | ||
76 | struct sk_buff **skb_nextp; | 76 | struct sk_buff **skb_nextp; |
77 | struct timer_list msg_timer; | ||
78 | struct sk_buff *skb_head; | 77 | struct sk_buff *skb_head; |
79 | unsigned int need_bytes; | 78 | unsigned int need_bytes; |
80 | struct delayed_work delayed_work; | 79 | struct delayed_work msg_timer_work; |
81 | struct work_struct work; | 80 | struct work_struct work; |
82 | struct strp_stats stats; | 81 | struct strp_stats stats; |
83 | struct strp_callbacks cb; | 82 | struct strp_callbacks cb; |
diff --git a/include/net/tcp.h b/include/net/tcp.h index 89974c5286d8..33599d17522d 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -840,6 +840,12 @@ struct tcp_skb_cb { | |||
840 | struct inet6_skb_parm h6; | 840 | struct inet6_skb_parm h6; |
841 | #endif | 841 | #endif |
842 | } header; /* For incoming skbs */ | 842 | } header; /* For incoming skbs */ |
843 | struct { | ||
844 | __u32 key; | ||
845 | __u32 flags; | ||
846 | struct bpf_map *map; | ||
847 | void *data_end; | ||
848 | } bpf; | ||
843 | }; | 849 | }; |
844 | }; | 850 | }; |
845 | 851 | ||
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index f90860d1f897..0d7948ce2128 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
@@ -575,7 +575,7 @@ union bpf_attr { | |||
575 | * @map: pointer to sockmap | 575 | * @map: pointer to sockmap |
576 | * @key: key to lookup sock in map | 576 | * @key: key to lookup sock in map |
577 | * @flags: reserved for future use | 577 | * @flags: reserved for future use |
578 | * Return: SK_REDIRECT | 578 | * Return: SK_PASS |
579 | * | 579 | * |
580 | * int bpf_sock_map_update(skops, map, key, flags) | 580 | * int bpf_sock_map_update(skops, map, key, flags) |
581 | * @skops: pointer to bpf_sock_ops | 581 | * @skops: pointer to bpf_sock_ops |
@@ -786,8 +786,8 @@ struct xdp_md { | |||
786 | }; | 786 | }; |
787 | 787 | ||
788 | enum sk_action { | 788 | enum sk_action { |
789 | SK_ABORTED = 0, | 789 | SK_DROP = 0, |
790 | SK_DROP, | 790 | SK_PASS, |
791 | SK_REDIRECT, | 791 | SK_REDIRECT, |
792 | }; | 792 | }; |
793 | 793 | ||
diff --git a/include/uapi/linux/membarrier.h b/include/uapi/linux/membarrier.h index 6d47b3249d8a..4e01ad7ffe98 100644 --- a/include/uapi/linux/membarrier.h +++ b/include/uapi/linux/membarrier.h | |||
@@ -52,21 +52,30 @@ | |||
52 | * (non-running threads are de facto in such a | 52 | * (non-running threads are de facto in such a |
53 | * state). This only covers threads from the | 53 | * state). This only covers threads from the |
54 | * same processes as the caller thread. This | 54 | * same processes as the caller thread. This |
55 | * command returns 0. The "expedited" commands | 55 | * command returns 0 on success. The |
56 | * complete faster than the non-expedited ones, | 56 | * "expedited" commands complete faster than |
57 | * they never block, but have the downside of | 57 | * the non-expedited ones, they never block, |
58 | * causing extra overhead. | 58 | * but have the downside of causing extra |
59 | * overhead. A process needs to register its | ||
60 | * intent to use the private expedited command | ||
61 | * prior to using it, otherwise this command | ||
62 | * returns -EPERM. | ||
63 | * @MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED: | ||
64 | * Register the process intent to use | ||
65 | * MEMBARRIER_CMD_PRIVATE_EXPEDITED. Always | ||
66 | * returns 0. | ||
59 | * | 67 | * |
60 | * Command to be passed to the membarrier system call. The commands need to | 68 | * Command to be passed to the membarrier system call. The commands need to |
61 | * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to | 69 | * be a single bit each, except for MEMBARRIER_CMD_QUERY which is assigned to |
62 | * the value 0. | 70 | * the value 0. |
63 | */ | 71 | */ |
64 | enum membarrier_cmd { | 72 | enum membarrier_cmd { |
65 | MEMBARRIER_CMD_QUERY = 0, | 73 | MEMBARRIER_CMD_QUERY = 0, |
66 | MEMBARRIER_CMD_SHARED = (1 << 0), | 74 | MEMBARRIER_CMD_SHARED = (1 << 0), |
67 | /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ | 75 | /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ |
68 | /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ | 76 | /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ |
69 | MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), | 77 | MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), |
78 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4), | ||
70 | }; | 79 | }; |
71 | 80 | ||
72 | #endif /* _UAPI_LINUX_MEMBARRIER_H */ | 81 | #endif /* _UAPI_LINUX_MEMBARRIER_H */ |
diff --git a/include/uapi/linux/netfilter/xt_bpf.h b/include/uapi/linux/netfilter/xt_bpf.h index b97725af2ac0..da161b56c79e 100644 --- a/include/uapi/linux/netfilter/xt_bpf.h +++ b/include/uapi/linux/netfilter/xt_bpf.h | |||
@@ -23,6 +23,7 @@ enum xt_bpf_modes { | |||
23 | XT_BPF_MODE_FD_PINNED, | 23 | XT_BPF_MODE_FD_PINNED, |
24 | XT_BPF_MODE_FD_ELF, | 24 | XT_BPF_MODE_FD_ELF, |
25 | }; | 25 | }; |
26 | #define XT_BPF_MODE_PATH_PINNED XT_BPF_MODE_FD_PINNED | ||
26 | 27 | ||
27 | struct xt_bpf_info_v1 { | 28 | struct xt_bpf_info_v1 { |
28 | __u16 mode; | 29 | __u16 mode; |
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index 6217ff8500a1..84fc2914b7fb 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h | |||
@@ -376,7 +376,7 @@ struct sctp_remote_error { | |||
376 | __u16 sre_type; | 376 | __u16 sre_type; |
377 | __u16 sre_flags; | 377 | __u16 sre_flags; |
378 | __u32 sre_length; | 378 | __u32 sre_length; |
379 | __u16 sre_error; | 379 | __be16 sre_error; |
380 | sctp_assoc_t sre_assoc_id; | 380 | sctp_assoc_t sre_assoc_id; |
381 | __u8 sre_data[0]; | 381 | __u8 sre_data[0]; |
382 | }; | 382 | }; |
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h index dd5f21e75805..856de39d0b89 100644 --- a/include/uapi/linux/spi/spidev.h +++ b/include/uapi/linux/spi/spidev.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #define SPIDEV_H | 23 | #define SPIDEV_H |
24 | 24 | ||
25 | #include <linux/types.h> | 25 | #include <linux/types.h> |
26 | #include <linux/ioctl.h> | ||
26 | 27 | ||
27 | /* User space versions of kernel symbols for SPI clocking modes, | 28 | /* User space versions of kernel symbols for SPI clocking modes, |
28 | * matching <linux/spi/spi.h> | 29 | * matching <linux/spi/spi.h> |
diff --git a/init/Kconfig b/init/Kconfig index 78cb2461012e..3c1faaa2af4a 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -1033,7 +1033,7 @@ endif | |||
1033 | 1033 | ||
1034 | choice | 1034 | choice |
1035 | prompt "Compiler optimization level" | 1035 | prompt "Compiler optimization level" |
1036 | default CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE | 1036 | default CC_OPTIMIZE_FOR_PERFORMANCE |
1037 | 1037 | ||
1038 | config CC_OPTIMIZE_FOR_PERFORMANCE | 1038 | config CC_OPTIMIZE_FOR_PERFORMANCE |
1039 | bool "Optimize for performance" | 1039 | bool "Optimize for performance" |
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c index 98c0f00c3f5e..e2636737b69b 100644 --- a/kernel/bpf/arraymap.c +++ b/kernel/bpf/arraymap.c | |||
@@ -98,7 +98,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr) | |||
98 | array_size += (u64) attr->max_entries * elem_size * num_possible_cpus(); | 98 | array_size += (u64) attr->max_entries * elem_size * num_possible_cpus(); |
99 | 99 | ||
100 | if (array_size >= U32_MAX - PAGE_SIZE || | 100 | if (array_size >= U32_MAX - PAGE_SIZE || |
101 | elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) { | 101 | bpf_array_alloc_percpu(array)) { |
102 | bpf_map_area_free(array); | 102 | bpf_map_area_free(array); |
103 | return ERR_PTR(-ENOMEM); | 103 | return ERR_PTR(-ENOMEM); |
104 | } | 104 | } |
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index e093d9a2c4dd..e745d6a88224 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c | |||
@@ -69,7 +69,7 @@ static LIST_HEAD(dev_map_list); | |||
69 | 69 | ||
70 | static u64 dev_map_bitmap_size(const union bpf_attr *attr) | 70 | static u64 dev_map_bitmap_size(const union bpf_attr *attr) |
71 | { | 71 | { |
72 | return BITS_TO_LONGS(attr->max_entries) * sizeof(unsigned long); | 72 | return BITS_TO_LONGS((u64) attr->max_entries) * sizeof(unsigned long); |
73 | } | 73 | } |
74 | 74 | ||
75 | static struct bpf_map *dev_map_alloc(union bpf_attr *attr) | 75 | static struct bpf_map *dev_map_alloc(union bpf_attr *attr) |
@@ -78,6 +78,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) | |||
78 | int err = -EINVAL; | 78 | int err = -EINVAL; |
79 | u64 cost; | 79 | u64 cost; |
80 | 80 | ||
81 | if (!capable(CAP_NET_ADMIN)) | ||
82 | return ERR_PTR(-EPERM); | ||
83 | |||
81 | /* check sanity of attributes */ | 84 | /* check sanity of attributes */ |
82 | if (attr->max_entries == 0 || attr->key_size != 4 || | 85 | if (attr->max_entries == 0 || attr->key_size != 4 || |
83 | attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) | 86 | attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) |
@@ -111,8 +114,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr) | |||
111 | err = -ENOMEM; | 114 | err = -ENOMEM; |
112 | 115 | ||
113 | /* A per cpu bitfield with a bit per possible net device */ | 116 | /* A per cpu bitfield with a bit per possible net device */ |
114 | dtab->flush_needed = __alloc_percpu(dev_map_bitmap_size(attr), | 117 | dtab->flush_needed = __alloc_percpu_gfp(dev_map_bitmap_size(attr), |
115 | __alignof__(unsigned long)); | 118 | __alignof__(unsigned long), |
119 | GFP_KERNEL | __GFP_NOWARN); | ||
116 | if (!dtab->flush_needed) | 120 | if (!dtab->flush_needed) |
117 | goto free_dtab; | 121 | goto free_dtab; |
118 | 122 | ||
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 431126f31ea3..6533f08d1238 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
@@ -317,10 +317,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr) | |||
317 | */ | 317 | */ |
318 | goto free_htab; | 318 | goto free_htab; |
319 | 319 | ||
320 | if (percpu && round_up(htab->map.value_size, 8) > PCPU_MIN_UNIT_SIZE) | ||
321 | /* make sure the size for pcpu_alloc() is reasonable */ | ||
322 | goto free_htab; | ||
323 | |||
324 | htab->elem_size = sizeof(struct htab_elem) + | 320 | htab->elem_size = sizeof(struct htab_elem) + |
325 | round_up(htab->map.key_size, 8); | 321 | round_up(htab->map.key_size, 8); |
326 | if (percpu) | 322 | if (percpu) |
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index e833ed914358..be1dde967208 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c | |||
@@ -363,6 +363,7 @@ out: | |||
363 | putname(pname); | 363 | putname(pname); |
364 | return ret; | 364 | return ret; |
365 | } | 365 | } |
366 | EXPORT_SYMBOL_GPL(bpf_obj_get_user); | ||
366 | 367 | ||
367 | static void bpf_evict_inode(struct inode *inode) | 368 | static void bpf_evict_inode(struct inode *inode) |
368 | { | 369 | { |
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 6424ce0e4969..66f00a2b27f4 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/workqueue.h> | 39 | #include <linux/workqueue.h> |
40 | #include <linux/list.h> | 40 | #include <linux/list.h> |
41 | #include <net/strparser.h> | 41 | #include <net/strparser.h> |
42 | #include <net/tcp.h> | ||
42 | 43 | ||
43 | struct bpf_stab { | 44 | struct bpf_stab { |
44 | struct bpf_map map; | 45 | struct bpf_map map; |
@@ -92,6 +93,14 @@ static inline struct smap_psock *smap_psock_sk(const struct sock *sk) | |||
92 | return rcu_dereference_sk_user_data(sk); | 93 | return rcu_dereference_sk_user_data(sk); |
93 | } | 94 | } |
94 | 95 | ||
96 | /* compute the linear packet data range [data, data_end) for skb when | ||
97 | * sk_skb type programs are in use. | ||
98 | */ | ||
99 | static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb) | ||
100 | { | ||
101 | TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb); | ||
102 | } | ||
103 | |||
95 | static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) | 104 | static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) |
96 | { | 105 | { |
97 | struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict); | 106 | struct bpf_prog *prog = READ_ONCE(psock->bpf_verdict); |
@@ -101,12 +110,20 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb) | |||
101 | return SK_DROP; | 110 | return SK_DROP; |
102 | 111 | ||
103 | skb_orphan(skb); | 112 | skb_orphan(skb); |
113 | /* We need to ensure that BPF metadata for maps is also cleared | ||
114 | * when we orphan the skb so that we don't have the possibility | ||
115 | * to reference a stale map. | ||
116 | */ | ||
117 | TCP_SKB_CB(skb)->bpf.map = NULL; | ||
104 | skb->sk = psock->sock; | 118 | skb->sk = psock->sock; |
105 | bpf_compute_data_end(skb); | 119 | bpf_compute_data_end_sk_skb(skb); |
120 | preempt_disable(); | ||
106 | rc = (*prog->bpf_func)(skb, prog->insnsi); | 121 | rc = (*prog->bpf_func)(skb, prog->insnsi); |
122 | preempt_enable(); | ||
107 | skb->sk = NULL; | 123 | skb->sk = NULL; |
108 | 124 | ||
109 | return rc; | 125 | return rc == SK_PASS ? |
126 | (TCP_SKB_CB(skb)->bpf.map ? SK_REDIRECT : SK_PASS) : SK_DROP; | ||
110 | } | 127 | } |
111 | 128 | ||
112 | static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) | 129 | static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) |
@@ -114,17 +131,10 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) | |||
114 | struct sock *sk; | 131 | struct sock *sk; |
115 | int rc; | 132 | int rc; |
116 | 133 | ||
117 | /* Because we use per cpu values to feed input from sock redirect | ||
118 | * in BPF program to do_sk_redirect_map() call we need to ensure we | ||
119 | * are not preempted. RCU read lock is not sufficient in this case | ||
120 | * with CONFIG_PREEMPT_RCU enabled so we must be explicit here. | ||
121 | */ | ||
122 | preempt_disable(); | ||
123 | rc = smap_verdict_func(psock, skb); | 134 | rc = smap_verdict_func(psock, skb); |
124 | switch (rc) { | 135 | switch (rc) { |
125 | case SK_REDIRECT: | 136 | case SK_REDIRECT: |
126 | sk = do_sk_redirect_map(); | 137 | sk = do_sk_redirect_map(skb); |
127 | preempt_enable(); | ||
128 | if (likely(sk)) { | 138 | if (likely(sk)) { |
129 | struct smap_psock *peer = smap_psock_sk(sk); | 139 | struct smap_psock *peer = smap_psock_sk(sk); |
130 | 140 | ||
@@ -141,8 +151,6 @@ static void smap_do_verdict(struct smap_psock *psock, struct sk_buff *skb) | |||
141 | /* Fall through and free skb otherwise */ | 151 | /* Fall through and free skb otherwise */ |
142 | case SK_DROP: | 152 | case SK_DROP: |
143 | default: | 153 | default: |
144 | if (rc != SK_REDIRECT) | ||
145 | preempt_enable(); | ||
146 | kfree_skb(skb); | 154 | kfree_skb(skb); |
147 | } | 155 | } |
148 | } | 156 | } |
@@ -369,7 +377,7 @@ static int smap_parse_func_strparser(struct strparser *strp, | |||
369 | * any socket yet. | 377 | * any socket yet. |
370 | */ | 378 | */ |
371 | skb->sk = psock->sock; | 379 | skb->sk = psock->sock; |
372 | bpf_compute_data_end(skb); | 380 | bpf_compute_data_end_sk_skb(skb); |
373 | rc = (*prog->bpf_func)(skb, prog->insnsi); | 381 | rc = (*prog->bpf_func)(skb, prog->insnsi); |
374 | skb->sk = NULL; | 382 | skb->sk = NULL; |
375 | rcu_read_unlock(); | 383 | rcu_read_unlock(); |
@@ -487,6 +495,9 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) | |||
487 | int err = -EINVAL; | 495 | int err = -EINVAL; |
488 | u64 cost; | 496 | u64 cost; |
489 | 497 | ||
498 | if (!capable(CAP_NET_ADMIN)) | ||
499 | return ERR_PTR(-EPERM); | ||
500 | |||
490 | /* check sanity of attributes */ | 501 | /* check sanity of attributes */ |
491 | if (attr->max_entries == 0 || attr->key_size != 4 || | 502 | if (attr->max_entries == 0 || attr->key_size != 4 || |
492 | attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) | 503 | attr->value_size != 4 || attr->map_flags & ~BPF_F_NUMA_NODE) |
@@ -840,6 +851,12 @@ static int sock_map_update_elem(struct bpf_map *map, | |||
840 | return -EINVAL; | 851 | return -EINVAL; |
841 | } | 852 | } |
842 | 853 | ||
854 | if (skops.sk->sk_type != SOCK_STREAM || | ||
855 | skops.sk->sk_protocol != IPPROTO_TCP) { | ||
856 | fput(socket->file); | ||
857 | return -EOPNOTSUPP; | ||
858 | } | ||
859 | |||
843 | err = sock_map_ctx_update_elem(&skops, map, key, flags); | 860 | err = sock_map_ctx_update_elem(&skops, map, key, flags); |
844 | fput(socket->file); | 861 | fput(socket->file); |
845 | return err; | 862 | return err; |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index b914fbe1383e..c48ca2a34b5e 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -653,6 +653,10 @@ static void mark_reg_read(const struct bpf_verifier_state *state, u32 regno) | |||
653 | { | 653 | { |
654 | struct bpf_verifier_state *parent = state->parent; | 654 | struct bpf_verifier_state *parent = state->parent; |
655 | 655 | ||
656 | if (regno == BPF_REG_FP) | ||
657 | /* We don't need to worry about FP liveness because it's read-only */ | ||
658 | return; | ||
659 | |||
656 | while (parent) { | 660 | while (parent) { |
657 | /* if read wasn't screened by an earlier write ... */ | 661 | /* if read wasn't screened by an earlier write ... */ |
658 | if (state->regs[regno].live & REG_LIVE_WRITTEN) | 662 | if (state->regs[regno].live & REG_LIVE_WRITTEN) |
@@ -1112,7 +1116,12 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn | |||
1112 | /* ctx accesses must be at a fixed offset, so that we can | 1116 | /* ctx accesses must be at a fixed offset, so that we can |
1113 | * determine what type of data were returned. | 1117 | * determine what type of data were returned. |
1114 | */ | 1118 | */ |
1115 | if (!tnum_is_const(reg->var_off)) { | 1119 | if (reg->off) { |
1120 | verbose("dereference of modified ctx ptr R%d off=%d+%d, ctx+const is allowed, ctx+const+const is not\n", | ||
1121 | regno, reg->off, off - reg->off); | ||
1122 | return -EACCES; | ||
1123 | } | ||
1124 | if (!tnum_is_const(reg->var_off) || reg->var_off.value) { | ||
1116 | char tn_buf[48]; | 1125 | char tn_buf[48]; |
1117 | 1126 | ||
1118 | tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); | 1127 | tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off); |
@@ -1120,7 +1129,6 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn | |||
1120 | tn_buf, off, size); | 1129 | tn_buf, off, size); |
1121 | return -EACCES; | 1130 | return -EACCES; |
1122 | } | 1131 | } |
1123 | off += reg->var_off.value; | ||
1124 | err = check_ctx_access(env, insn_idx, off, size, t, ®_type); | 1132 | err = check_ctx_access(env, insn_idx, off, size, t, ®_type); |
1125 | if (!err && t == BPF_READ && value_regno >= 0) { | 1133 | if (!err && t == BPF_READ && value_regno >= 0) { |
1126 | /* ctx access returns either a scalar, or a | 1134 | /* ctx access returns either a scalar, or a |
@@ -2345,6 +2353,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) | |||
2345 | * copy register state to dest reg | 2353 | * copy register state to dest reg |
2346 | */ | 2354 | */ |
2347 | regs[insn->dst_reg] = regs[insn->src_reg]; | 2355 | regs[insn->dst_reg] = regs[insn->src_reg]; |
2356 | regs[insn->dst_reg].live |= REG_LIVE_WRITTEN; | ||
2348 | } else { | 2357 | } else { |
2349 | /* R1 = (u32) R2 */ | 2358 | /* R1 = (u32) R2 */ |
2350 | if (is_pointer_value(env, insn->src_reg)) { | 2359 | if (is_pointer_value(env, insn->src_reg)) { |
@@ -2421,12 +2430,15 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) | |||
2421 | } | 2430 | } |
2422 | 2431 | ||
2423 | static void find_good_pkt_pointers(struct bpf_verifier_state *state, | 2432 | static void find_good_pkt_pointers(struct bpf_verifier_state *state, |
2424 | struct bpf_reg_state *dst_reg) | 2433 | struct bpf_reg_state *dst_reg, |
2434 | bool range_right_open) | ||
2425 | { | 2435 | { |
2426 | struct bpf_reg_state *regs = state->regs, *reg; | 2436 | struct bpf_reg_state *regs = state->regs, *reg; |
2437 | u16 new_range; | ||
2427 | int i; | 2438 | int i; |
2428 | 2439 | ||
2429 | if (dst_reg->off < 0) | 2440 | if (dst_reg->off < 0 || |
2441 | (dst_reg->off == 0 && range_right_open)) | ||
2430 | /* This doesn't give us any range */ | 2442 | /* This doesn't give us any range */ |
2431 | return; | 2443 | return; |
2432 | 2444 | ||
@@ -2437,9 +2449,13 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, | |||
2437 | */ | 2449 | */ |
2438 | return; | 2450 | return; |
2439 | 2451 | ||
2440 | /* LLVM can generate four kind of checks: | 2452 | new_range = dst_reg->off; |
2453 | if (range_right_open) | ||
2454 | new_range--; | ||
2455 | |||
2456 | /* Examples for register markings: | ||
2441 | * | 2457 | * |
2442 | * Type 1/2: | 2458 | * pkt_data in dst register: |
2443 | * | 2459 | * |
2444 | * r2 = r3; | 2460 | * r2 = r3; |
2445 | * r2 += 8; | 2461 | * r2 += 8; |
@@ -2456,7 +2472,7 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, | |||
2456 | * r2=pkt(id=n,off=8,r=0) | 2472 | * r2=pkt(id=n,off=8,r=0) |
2457 | * r3=pkt(id=n,off=0,r=0) | 2473 | * r3=pkt(id=n,off=0,r=0) |
2458 | * | 2474 | * |
2459 | * Type 3/4: | 2475 | * pkt_data in src register: |
2460 | * | 2476 | * |
2461 | * r2 = r3; | 2477 | * r2 = r3; |
2462 | * r2 += 8; | 2478 | * r2 += 8; |
@@ -2474,7 +2490,9 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, | |||
2474 | * r3=pkt(id=n,off=0,r=0) | 2490 | * r3=pkt(id=n,off=0,r=0) |
2475 | * | 2491 | * |
2476 | * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) | 2492 | * Find register r3 and mark its range as r3=pkt(id=n,off=0,r=8) |
2477 | * so that range of bytes [r3, r3 + 8) is safe to access. | 2493 | * or r3=pkt(id=n,off=0,r=8-1), so that range of bytes [r3, r3 + 8) |
2494 | * and [r3, r3 + 8-1) respectively is safe to access depending on | ||
2495 | * the check. | ||
2478 | */ | 2496 | */ |
2479 | 2497 | ||
2480 | /* If our ids match, then we must have the same max_value. And we | 2498 | /* If our ids match, then we must have the same max_value. And we |
@@ -2485,14 +2503,14 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *state, | |||
2485 | for (i = 0; i < MAX_BPF_REG; i++) | 2503 | for (i = 0; i < MAX_BPF_REG; i++) |
2486 | if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) | 2504 | if (regs[i].type == PTR_TO_PACKET && regs[i].id == dst_reg->id) |
2487 | /* keep the maximum range already checked */ | 2505 | /* keep the maximum range already checked */ |
2488 | regs[i].range = max_t(u16, regs[i].range, dst_reg->off); | 2506 | regs[i].range = max(regs[i].range, new_range); |
2489 | 2507 | ||
2490 | for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { | 2508 | for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { |
2491 | if (state->stack_slot_type[i] != STACK_SPILL) | 2509 | if (state->stack_slot_type[i] != STACK_SPILL) |
2492 | continue; | 2510 | continue; |
2493 | reg = &state->spilled_regs[i / BPF_REG_SIZE]; | 2511 | reg = &state->spilled_regs[i / BPF_REG_SIZE]; |
2494 | if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) | 2512 | if (reg->type == PTR_TO_PACKET && reg->id == dst_reg->id) |
2495 | reg->range = max_t(u16, reg->range, dst_reg->off); | 2513 | reg->range = max(reg->range, new_range); |
2496 | } | 2514 | } |
2497 | } | 2515 | } |
2498 | 2516 | ||
@@ -2856,19 +2874,43 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, | |||
2856 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && | 2874 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && |
2857 | dst_reg->type == PTR_TO_PACKET && | 2875 | dst_reg->type == PTR_TO_PACKET && |
2858 | regs[insn->src_reg].type == PTR_TO_PACKET_END) { | 2876 | regs[insn->src_reg].type == PTR_TO_PACKET_END) { |
2859 | find_good_pkt_pointers(this_branch, dst_reg); | 2877 | /* pkt_data' > pkt_end */ |
2878 | find_good_pkt_pointers(this_branch, dst_reg, false); | ||
2879 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && | ||
2880 | dst_reg->type == PTR_TO_PACKET_END && | ||
2881 | regs[insn->src_reg].type == PTR_TO_PACKET) { | ||
2882 | /* pkt_end > pkt_data' */ | ||
2883 | find_good_pkt_pointers(other_branch, ®s[insn->src_reg], true); | ||
2860 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT && | 2884 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT && |
2861 | dst_reg->type == PTR_TO_PACKET && | 2885 | dst_reg->type == PTR_TO_PACKET && |
2862 | regs[insn->src_reg].type == PTR_TO_PACKET_END) { | 2886 | regs[insn->src_reg].type == PTR_TO_PACKET_END) { |
2863 | find_good_pkt_pointers(other_branch, dst_reg); | 2887 | /* pkt_data' < pkt_end */ |
2888 | find_good_pkt_pointers(other_branch, dst_reg, true); | ||
2889 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLT && | ||
2890 | dst_reg->type == PTR_TO_PACKET_END && | ||
2891 | regs[insn->src_reg].type == PTR_TO_PACKET) { | ||
2892 | /* pkt_end < pkt_data' */ | ||
2893 | find_good_pkt_pointers(this_branch, ®s[insn->src_reg], false); | ||
2894 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && | ||
2895 | dst_reg->type == PTR_TO_PACKET && | ||
2896 | regs[insn->src_reg].type == PTR_TO_PACKET_END) { | ||
2897 | /* pkt_data' >= pkt_end */ | ||
2898 | find_good_pkt_pointers(this_branch, dst_reg, true); | ||
2864 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && | 2899 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGE && |
2865 | dst_reg->type == PTR_TO_PACKET_END && | 2900 | dst_reg->type == PTR_TO_PACKET_END && |
2866 | regs[insn->src_reg].type == PTR_TO_PACKET) { | 2901 | regs[insn->src_reg].type == PTR_TO_PACKET) { |
2867 | find_good_pkt_pointers(other_branch, ®s[insn->src_reg]); | 2902 | /* pkt_end >= pkt_data' */ |
2903 | find_good_pkt_pointers(other_branch, ®s[insn->src_reg], false); | ||
2904 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE && | ||
2905 | dst_reg->type == PTR_TO_PACKET && | ||
2906 | regs[insn->src_reg].type == PTR_TO_PACKET_END) { | ||
2907 | /* pkt_data' <= pkt_end */ | ||
2908 | find_good_pkt_pointers(other_branch, dst_reg, false); | ||
2868 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE && | 2909 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JLE && |
2869 | dst_reg->type == PTR_TO_PACKET_END && | 2910 | dst_reg->type == PTR_TO_PACKET_END && |
2870 | regs[insn->src_reg].type == PTR_TO_PACKET) { | 2911 | regs[insn->src_reg].type == PTR_TO_PACKET) { |
2871 | find_good_pkt_pointers(this_branch, ®s[insn->src_reg]); | 2912 | /* pkt_end <= pkt_data' */ |
2913 | find_good_pkt_pointers(this_branch, ®s[insn->src_reg], true); | ||
2872 | } else if (is_pointer_value(env, insn->dst_reg)) { | 2914 | } else if (is_pointer_value(env, insn->dst_reg)) { |
2873 | verbose("R%d pointer comparison prohibited\n", insn->dst_reg); | 2915 | verbose("R%d pointer comparison prohibited\n", insn->dst_reg); |
2874 | return -EACCES; | 2916 | return -EACCES; |
diff --git a/kernel/cpu.c b/kernel/cpu.c index d851df22f5c5..04892a82f6ac 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -632,6 +632,11 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup, | |||
632 | __cpuhp_kick_ap(st); | 632 | __cpuhp_kick_ap(st); |
633 | } | 633 | } |
634 | 634 | ||
635 | /* | ||
636 | * Clean up the leftovers so the next hotplug operation wont use stale | ||
637 | * data. | ||
638 | */ | ||
639 | st->node = st->last = NULL; | ||
635 | return ret; | 640 | return ret; |
636 | } | 641 | } |
637 | 642 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 6bc21e202ae4..9d93db81fa36 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -662,7 +662,7 @@ static inline void update_cgrp_time_from_event(struct perf_event *event) | |||
662 | /* | 662 | /* |
663 | * Do not update time when cgroup is not active | 663 | * Do not update time when cgroup is not active |
664 | */ | 664 | */ |
665 | if (cgrp == event->cgrp) | 665 | if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) |
666 | __update_cgrp_time(event->cgrp); | 666 | __update_cgrp_time(event->cgrp); |
667 | } | 667 | } |
668 | 668 | ||
@@ -8955,6 +8955,14 @@ static struct perf_cpu_context __percpu *find_pmu_context(int ctxn) | |||
8955 | 8955 | ||
8956 | static void free_pmu_context(struct pmu *pmu) | 8956 | static void free_pmu_context(struct pmu *pmu) |
8957 | { | 8957 | { |
8958 | /* | ||
8959 | * Static contexts such as perf_sw_context have a global lifetime | ||
8960 | * and may be shared between different PMUs. Avoid freeing them | ||
8961 | * when a single PMU is going away. | ||
8962 | */ | ||
8963 | if (pmu->task_ctx_nr > perf_invalid_context) | ||
8964 | return; | ||
8965 | |||
8958 | mutex_lock(&pmus_lock); | 8966 | mutex_lock(&pmus_lock); |
8959 | free_percpu(pmu->pmu_cpu_context); | 8967 | free_percpu(pmu->pmu_cpu_context); |
8960 | mutex_unlock(&pmus_lock); | 8968 | mutex_unlock(&pmus_lock); |
diff --git a/kernel/exit.c b/kernel/exit.c index f2cd53e92147..f6cad39f35df 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -1610,6 +1610,9 @@ SYSCALL_DEFINE5(waitid, int, which, pid_t, upid, struct siginfo __user *, | |||
1610 | if (!infop) | 1610 | if (!infop) |
1611 | return err; | 1611 | return err; |
1612 | 1612 | ||
1613 | if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) | ||
1614 | return -EFAULT; | ||
1615 | |||
1613 | user_access_begin(); | 1616 | user_access_begin(); |
1614 | unsafe_put_user(signo, &infop->si_signo, Efault); | 1617 | unsafe_put_user(signo, &infop->si_signo, Efault); |
1615 | unsafe_put_user(0, &infop->si_errno, Efault); | 1618 | unsafe_put_user(0, &infop->si_errno, Efault); |
@@ -1735,6 +1738,9 @@ COMPAT_SYSCALL_DEFINE5(waitid, | |||
1735 | if (!infop) | 1738 | if (!infop) |
1736 | return err; | 1739 | return err; |
1737 | 1740 | ||
1741 | if (!access_ok(VERIFY_WRITE, infop, sizeof(*infop))) | ||
1742 | return -EFAULT; | ||
1743 | |||
1738 | user_access_begin(); | 1744 | user_access_begin(); |
1739 | unsafe_put_user(signo, &infop->si_signo, Efault); | 1745 | unsafe_put_user(signo, &infop->si_signo, Efault); |
1740 | unsafe_put_user(0, &infop->si_errno, Efault); | 1746 | unsafe_put_user(0, &infop->si_errno, Efault); |
diff --git a/kernel/fork.c b/kernel/fork.c index e702cb9ffbd8..07cc743698d3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -215,6 +215,10 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) | |||
215 | if (!s) | 215 | if (!s) |
216 | continue; | 216 | continue; |
217 | 217 | ||
218 | #ifdef CONFIG_DEBUG_KMEMLEAK | ||
219 | /* Clear stale pointers from reused stack. */ | ||
220 | memset(s->addr, 0, THREAD_SIZE); | ||
221 | #endif | ||
218 | tsk->stack_vm_area = s; | 222 | tsk->stack_vm_area = s; |
219 | return s->addr; | 223 | return s->addr; |
220 | } | 224 | } |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 6fc89fd93824..5a2ef92c2782 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -265,8 +265,8 @@ int irq_startup(struct irq_desc *desc, bool resend, bool force) | |||
265 | irq_setup_affinity(desc); | 265 | irq_setup_affinity(desc); |
266 | break; | 266 | break; |
267 | case IRQ_STARTUP_MANAGED: | 267 | case IRQ_STARTUP_MANAGED: |
268 | irq_do_set_affinity(d, aff, false); | ||
268 | ret = __irq_startup(desc); | 269 | ret = __irq_startup(desc); |
269 | irq_set_affinity_locked(d, aff, false); | ||
270 | break; | 270 | break; |
271 | case IRQ_STARTUP_ABORT: | 271 | case IRQ_STARTUP_ABORT: |
272 | return 0; | 272 | return 0; |
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c index 638eb9c83d9f..9eb09aef0313 100644 --- a/kernel/irq/cpuhotplug.c +++ b/kernel/irq/cpuhotplug.c | |||
@@ -18,8 +18,34 @@ | |||
18 | static inline bool irq_needs_fixup(struct irq_data *d) | 18 | static inline bool irq_needs_fixup(struct irq_data *d) |
19 | { | 19 | { |
20 | const struct cpumask *m = irq_data_get_effective_affinity_mask(d); | 20 | const struct cpumask *m = irq_data_get_effective_affinity_mask(d); |
21 | unsigned int cpu = smp_processor_id(); | ||
21 | 22 | ||
22 | return cpumask_test_cpu(smp_processor_id(), m); | 23 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
24 | /* | ||
25 | * The cpumask_empty() check is a workaround for interrupt chips, | ||
26 | * which do not implement effective affinity, but the architecture has | ||
27 | * enabled the config switch. Use the general affinity mask instead. | ||
28 | */ | ||
29 | if (cpumask_empty(m)) | ||
30 | m = irq_data_get_affinity_mask(d); | ||
31 | |||
32 | /* | ||
33 | * Sanity check. If the mask is not empty when excluding the outgoing | ||
34 | * CPU then it must contain at least one online CPU. The outgoing CPU | ||
35 | * has been removed from the online mask already. | ||
36 | */ | ||
37 | if (cpumask_any_but(m, cpu) < nr_cpu_ids && | ||
38 | cpumask_any_and(m, cpu_online_mask) >= nr_cpu_ids) { | ||
39 | /* | ||
40 | * If this happens then there was a missed IRQ fixup at some | ||
41 | * point. Warn about it and enforce fixup. | ||
42 | */ | ||
43 | pr_warn("Eff. affinity %*pbl of IRQ %u contains only offline CPUs after offlining CPU %u\n", | ||
44 | cpumask_pr_args(m), d->irq, cpu); | ||
45 | return true; | ||
46 | } | ||
47 | #endif | ||
48 | return cpumask_test_cpu(cpu, m); | ||
23 | } | 49 | } |
24 | 50 | ||
25 | static bool migrate_one_irq(struct irq_desc *desc) | 51 | static bool migrate_one_irq(struct irq_desc *desc) |
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c index 5270a54b9fa4..c26c5bb6b491 100644 --- a/kernel/irq/generic-chip.c +++ b/kernel/irq/generic-chip.c | |||
@@ -135,17 +135,26 @@ void irq_gc_ack_clr_bit(struct irq_data *d) | |||
135 | } | 135 | } |
136 | 136 | ||
137 | /** | 137 | /** |
138 | * irq_gc_mask_disable_reg_and_ack - Mask and ack pending interrupt | 138 | * irq_gc_mask_disable_and_ack_set - Mask and ack pending interrupt |
139 | * @d: irq_data | 139 | * @d: irq_data |
140 | * | ||
141 | * This generic implementation of the irq_mask_ack method is for chips | ||
142 | * with separate enable/disable registers instead of a single mask | ||
143 | * register and where a pending interrupt is acknowledged by setting a | ||
144 | * bit. | ||
145 | * | ||
146 | * Note: This is the only permutation currently used. Similar generic | ||
147 | * functions should be added here if other permutations are required. | ||
140 | */ | 148 | */ |
141 | void irq_gc_mask_disable_reg_and_ack(struct irq_data *d) | 149 | void irq_gc_mask_disable_and_ack_set(struct irq_data *d) |
142 | { | 150 | { |
143 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 151 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
144 | struct irq_chip_type *ct = irq_data_get_chip_type(d); | 152 | struct irq_chip_type *ct = irq_data_get_chip_type(d); |
145 | u32 mask = d->mask; | 153 | u32 mask = d->mask; |
146 | 154 | ||
147 | irq_gc_lock(gc); | 155 | irq_gc_lock(gc); |
148 | irq_reg_writel(gc, mask, ct->regs.mask); | 156 | irq_reg_writel(gc, mask, ct->regs.disable); |
157 | *ct->mask_cache &= ~mask; | ||
149 | irq_reg_writel(gc, mask, ct->regs.ack); | 158 | irq_reg_writel(gc, mask, ct->regs.ack); |
150 | irq_gc_unlock(gc); | 159 | irq_gc_unlock(gc); |
151 | } | 160 | } |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d00132b5c325..4bff6a10ae8e 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -168,6 +168,19 @@ void irq_set_thread_affinity(struct irq_desc *desc) | |||
168 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | 168 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
169 | } | 169 | } |
170 | 170 | ||
171 | static void irq_validate_effective_affinity(struct irq_data *data) | ||
172 | { | ||
173 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK | ||
174 | const struct cpumask *m = irq_data_get_effective_affinity_mask(data); | ||
175 | struct irq_chip *chip = irq_data_get_irq_chip(data); | ||
176 | |||
177 | if (!cpumask_empty(m)) | ||
178 | return; | ||
179 | pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n", | ||
180 | chip->name, data->irq); | ||
181 | #endif | ||
182 | } | ||
183 | |||
171 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | 184 | int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, |
172 | bool force) | 185 | bool force) |
173 | { | 186 | { |
@@ -175,12 +188,16 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, | |||
175 | struct irq_chip *chip = irq_data_get_irq_chip(data); | 188 | struct irq_chip *chip = irq_data_get_irq_chip(data); |
176 | int ret; | 189 | int ret; |
177 | 190 | ||
191 | if (!chip || !chip->irq_set_affinity) | ||
192 | return -EINVAL; | ||
193 | |||
178 | ret = chip->irq_set_affinity(data, mask, force); | 194 | ret = chip->irq_set_affinity(data, mask, force); |
179 | switch (ret) { | 195 | switch (ret) { |
180 | case IRQ_SET_MASK_OK: | 196 | case IRQ_SET_MASK_OK: |
181 | case IRQ_SET_MASK_OK_DONE: | 197 | case IRQ_SET_MASK_OK_DONE: |
182 | cpumask_copy(desc->irq_common_data.affinity, mask); | 198 | cpumask_copy(desc->irq_common_data.affinity, mask); |
183 | case IRQ_SET_MASK_OK_NOCOPY: | 199 | case IRQ_SET_MASK_OK_NOCOPY: |
200 | irq_validate_effective_affinity(data); | ||
184 | irq_set_thread_affinity(desc); | 201 | irq_set_thread_affinity(desc); |
185 | ret = 0; | 202 | ret = 0; |
186 | } | 203 | } |
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index b9628e43c78f..bf8c8fd72589 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c | |||
@@ -830,6 +830,41 @@ int klp_register_patch(struct klp_patch *patch) | |||
830 | } | 830 | } |
831 | EXPORT_SYMBOL_GPL(klp_register_patch); | 831 | EXPORT_SYMBOL_GPL(klp_register_patch); |
832 | 832 | ||
833 | /* | ||
834 | * Remove parts of patches that touch a given kernel module. The list of | ||
835 | * patches processed might be limited. When limit is NULL, all patches | ||
836 | * will be handled. | ||
837 | */ | ||
838 | static void klp_cleanup_module_patches_limited(struct module *mod, | ||
839 | struct klp_patch *limit) | ||
840 | { | ||
841 | struct klp_patch *patch; | ||
842 | struct klp_object *obj; | ||
843 | |||
844 | list_for_each_entry(patch, &klp_patches, list) { | ||
845 | if (patch == limit) | ||
846 | break; | ||
847 | |||
848 | klp_for_each_object(patch, obj) { | ||
849 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) | ||
850 | continue; | ||
851 | |||
852 | /* | ||
853 | * Only unpatch the module if the patch is enabled or | ||
854 | * is in transition. | ||
855 | */ | ||
856 | if (patch->enabled || patch == klp_transition_patch) { | ||
857 | pr_notice("reverting patch '%s' on unloading module '%s'\n", | ||
858 | patch->mod->name, obj->mod->name); | ||
859 | klp_unpatch_object(obj); | ||
860 | } | ||
861 | |||
862 | klp_free_object_loaded(obj); | ||
863 | break; | ||
864 | } | ||
865 | } | ||
866 | } | ||
867 | |||
833 | int klp_module_coming(struct module *mod) | 868 | int klp_module_coming(struct module *mod) |
834 | { | 869 | { |
835 | int ret; | 870 | int ret; |
@@ -894,7 +929,7 @@ err: | |||
894 | pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", | 929 | pr_warn("patch '%s' failed for module '%s', refusing to load module '%s'\n", |
895 | patch->mod->name, obj->mod->name, obj->mod->name); | 930 | patch->mod->name, obj->mod->name, obj->mod->name); |
896 | mod->klp_alive = false; | 931 | mod->klp_alive = false; |
897 | klp_free_object_loaded(obj); | 932 | klp_cleanup_module_patches_limited(mod, patch); |
898 | mutex_unlock(&klp_mutex); | 933 | mutex_unlock(&klp_mutex); |
899 | 934 | ||
900 | return ret; | 935 | return ret; |
@@ -902,9 +937,6 @@ err: | |||
902 | 937 | ||
903 | void klp_module_going(struct module *mod) | 938 | void klp_module_going(struct module *mod) |
904 | { | 939 | { |
905 | struct klp_patch *patch; | ||
906 | struct klp_object *obj; | ||
907 | |||
908 | if (WARN_ON(mod->state != MODULE_STATE_GOING && | 940 | if (WARN_ON(mod->state != MODULE_STATE_GOING && |
909 | mod->state != MODULE_STATE_COMING)) | 941 | mod->state != MODULE_STATE_COMING)) |
910 | return; | 942 | return; |
@@ -917,25 +949,7 @@ void klp_module_going(struct module *mod) | |||
917 | */ | 949 | */ |
918 | mod->klp_alive = false; | 950 | mod->klp_alive = false; |
919 | 951 | ||
920 | list_for_each_entry(patch, &klp_patches, list) { | 952 | klp_cleanup_module_patches_limited(mod, NULL); |
921 | klp_for_each_object(patch, obj) { | ||
922 | if (!klp_is_module(obj) || strcmp(obj->name, mod->name)) | ||
923 | continue; | ||
924 | |||
925 | /* | ||
926 | * Only unpatch the module if the patch is enabled or | ||
927 | * is in transition. | ||
928 | */ | ||
929 | if (patch->enabled || patch == klp_transition_patch) { | ||
930 | pr_notice("reverting patch '%s' on unloading module '%s'\n", | ||
931 | patch->mod->name, obj->mod->name); | ||
932 | klp_unpatch_object(obj); | ||
933 | } | ||
934 | |||
935 | klp_free_object_loaded(obj); | ||
936 | break; | ||
937 | } | ||
938 | } | ||
939 | 953 | ||
940 | mutex_unlock(&klp_mutex); | 954 | mutex_unlock(&klp_mutex); |
941 | } | 955 | } |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 44c8d0d17170..e36e652d996f 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -1873,10 +1873,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1873 | struct held_lock *next, int distance, struct stack_trace *trace, | 1873 | struct held_lock *next, int distance, struct stack_trace *trace, |
1874 | int (*save)(struct stack_trace *trace)) | 1874 | int (*save)(struct stack_trace *trace)) |
1875 | { | 1875 | { |
1876 | struct lock_list *uninitialized_var(target_entry); | ||
1876 | struct lock_list *entry; | 1877 | struct lock_list *entry; |
1877 | int ret; | ||
1878 | struct lock_list this; | 1878 | struct lock_list this; |
1879 | struct lock_list *uninitialized_var(target_entry); | 1879 | int ret; |
1880 | 1880 | ||
1881 | /* | 1881 | /* |
1882 | * Prove that the new <prev> -> <next> dependency would not | 1882 | * Prove that the new <prev> -> <next> dependency would not |
@@ -1890,8 +1890,17 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1890 | this.class = hlock_class(next); | 1890 | this.class = hlock_class(next); |
1891 | this.parent = NULL; | 1891 | this.parent = NULL; |
1892 | ret = check_noncircular(&this, hlock_class(prev), &target_entry); | 1892 | ret = check_noncircular(&this, hlock_class(prev), &target_entry); |
1893 | if (unlikely(!ret)) | 1893 | if (unlikely(!ret)) { |
1894 | if (!trace->entries) { | ||
1895 | /* | ||
1896 | * If @save fails here, the printing might trigger | ||
1897 | * a WARN but because of the !nr_entries it should | ||
1898 | * not do bad things. | ||
1899 | */ | ||
1900 | save(trace); | ||
1901 | } | ||
1894 | return print_circular_bug(&this, target_entry, next, prev, trace); | 1902 | return print_circular_bug(&this, target_entry, next, prev, trace); |
1903 | } | ||
1895 | else if (unlikely(ret < 0)) | 1904 | else if (unlikely(ret < 0)) |
1896 | return print_bfs_bug(ret); | 1905 | return print_bfs_bug(ret); |
1897 | 1906 | ||
@@ -1938,7 +1947,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1938 | return print_bfs_bug(ret); | 1947 | return print_bfs_bug(ret); |
1939 | 1948 | ||
1940 | 1949 | ||
1941 | if (save && !save(trace)) | 1950 | if (!trace->entries && !save(trace)) |
1942 | return 0; | 1951 | return 0; |
1943 | 1952 | ||
1944 | /* | 1953 | /* |
@@ -1958,20 +1967,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1958 | if (!ret) | 1967 | if (!ret) |
1959 | return 0; | 1968 | return 0; |
1960 | 1969 | ||
1961 | /* | ||
1962 | * Debugging printouts: | ||
1963 | */ | ||
1964 | if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { | ||
1965 | graph_unlock(); | ||
1966 | printk("\n new dependency: "); | ||
1967 | print_lock_name(hlock_class(prev)); | ||
1968 | printk(KERN_CONT " => "); | ||
1969 | print_lock_name(hlock_class(next)); | ||
1970 | printk(KERN_CONT "\n"); | ||
1971 | dump_stack(); | ||
1972 | if (!graph_lock()) | ||
1973 | return 0; | ||
1974 | } | ||
1975 | return 2; | 1970 | return 2; |
1976 | } | 1971 | } |
1977 | 1972 | ||
@@ -1986,8 +1981,12 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
1986 | { | 1981 | { |
1987 | int depth = curr->lockdep_depth; | 1982 | int depth = curr->lockdep_depth; |
1988 | struct held_lock *hlock; | 1983 | struct held_lock *hlock; |
1989 | struct stack_trace trace; | 1984 | struct stack_trace trace = { |
1990 | int (*save)(struct stack_trace *trace) = save_trace; | 1985 | .nr_entries = 0, |
1986 | .max_entries = 0, | ||
1987 | .entries = NULL, | ||
1988 | .skip = 0, | ||
1989 | }; | ||
1991 | 1990 | ||
1992 | /* | 1991 | /* |
1993 | * Debugging checks. | 1992 | * Debugging checks. |
@@ -2018,18 +2017,11 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
2018 | */ | 2017 | */ |
2019 | if (hlock->read != 2 && hlock->check) { | 2018 | if (hlock->read != 2 && hlock->check) { |
2020 | int ret = check_prev_add(curr, hlock, next, | 2019 | int ret = check_prev_add(curr, hlock, next, |
2021 | distance, &trace, save); | 2020 | distance, &trace, save_trace); |
2022 | if (!ret) | 2021 | if (!ret) |
2023 | return 0; | 2022 | return 0; |
2024 | 2023 | ||
2025 | /* | 2024 | /* |
2026 | * Stop saving stack_trace if save_trace() was | ||
2027 | * called at least once: | ||
2028 | */ | ||
2029 | if (save && ret == 2) | ||
2030 | save = NULL; | ||
2031 | |||
2032 | /* | ||
2033 | * Stop after the first non-trylock entry, | 2025 | * Stop after the first non-trylock entry, |
2034 | * as non-trylock entries have added their | 2026 | * as non-trylock entries have added their |
2035 | * own direct dependencies already, so this | 2027 | * own direct dependencies already, so this |
diff --git a/kernel/rcu/srcutree.c b/kernel/rcu/srcutree.c index 729a8706751d..6d5880089ff6 100644 --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c | |||
@@ -854,7 +854,7 @@ void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp, | |||
854 | /** | 854 | /** |
855 | * call_srcu() - Queue a callback for invocation after an SRCU grace period | 855 | * call_srcu() - Queue a callback for invocation after an SRCU grace period |
856 | * @sp: srcu_struct in queue the callback | 856 | * @sp: srcu_struct in queue the callback |
857 | * @head: structure to be used for queueing the SRCU callback. | 857 | * @rhp: structure to be used for queueing the SRCU callback. |
858 | * @func: function to be invoked after the SRCU grace period | 858 | * @func: function to be invoked after the SRCU grace period |
859 | * | 859 | * |
860 | * The callback function will be invoked some time after a full SRCU | 860 | * The callback function will be invoked some time after a full SRCU |
diff --git a/kernel/rcu/sync.c b/kernel/rcu/sync.c index 50d1861f7759..3f943efcf61c 100644 --- a/kernel/rcu/sync.c +++ b/kernel/rcu/sync.c | |||
@@ -85,6 +85,9 @@ void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type) | |||
85 | } | 85 | } |
86 | 86 | ||
87 | /** | 87 | /** |
88 | * rcu_sync_enter_start - Force readers onto slow path for multiple updates | ||
89 | * @rsp: Pointer to rcu_sync structure to use for synchronization | ||
90 | * | ||
88 | * Must be called after rcu_sync_init() and before first use. | 91 | * Must be called after rcu_sync_init() and before first use. |
89 | * | 92 | * |
90 | * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}() | 93 | * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}() |
@@ -142,7 +145,7 @@ void rcu_sync_enter(struct rcu_sync *rsp) | |||
142 | 145 | ||
143 | /** | 146 | /** |
144 | * rcu_sync_func() - Callback function managing reader access to fastpath | 147 | * rcu_sync_func() - Callback function managing reader access to fastpath |
145 | * @rsp: Pointer to rcu_sync structure to use for synchronization | 148 | * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization |
146 | * | 149 | * |
147 | * This function is passed to one of the call_rcu() functions by | 150 | * This function is passed to one of the call_rcu() functions by |
148 | * rcu_sync_exit(), so that it is invoked after a grace period following the | 151 | * rcu_sync_exit(), so that it is invoked after a grace period following the |
@@ -158,9 +161,9 @@ void rcu_sync_enter(struct rcu_sync *rsp) | |||
158 | * rcu_sync_exit(). Otherwise, set all state back to idle so that readers | 161 | * rcu_sync_exit(). Otherwise, set all state back to idle so that readers |
159 | * can again use their fastpaths. | 162 | * can again use their fastpaths. |
160 | */ | 163 | */ |
161 | static void rcu_sync_func(struct rcu_head *rcu) | 164 | static void rcu_sync_func(struct rcu_head *rhp) |
162 | { | 165 | { |
163 | struct rcu_sync *rsp = container_of(rcu, struct rcu_sync, cb_head); | 166 | struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head); |
164 | unsigned long flags; | 167 | unsigned long flags; |
165 | 168 | ||
166 | BUG_ON(rsp->gp_state != GP_PASSED); | 169 | BUG_ON(rsp->gp_state != GP_PASSED); |
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index b0ad62b0e7b8..3e3650e94ae6 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -3097,9 +3097,10 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func, | |||
3097 | * read-side critical sections have completed. call_rcu_sched() assumes | 3097 | * read-side critical sections have completed. call_rcu_sched() assumes |
3098 | * that the read-side critical sections end on enabling of preemption | 3098 | * that the read-side critical sections end on enabling of preemption |
3099 | * or on voluntary preemption. | 3099 | * or on voluntary preemption. |
3100 | * RCU read-side critical sections are delimited by : | 3100 | * RCU read-side critical sections are delimited by: |
3101 | * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR | 3101 | * |
3102 | * - anything that disables preemption. | 3102 | * - rcu_read_lock_sched() and rcu_read_unlock_sched(), OR |
3103 | * - anything that disables preemption. | ||
3103 | * | 3104 | * |
3104 | * These may be nested. | 3105 | * These may be nested. |
3105 | * | 3106 | * |
@@ -3124,11 +3125,12 @@ EXPORT_SYMBOL_GPL(call_rcu_sched); | |||
3124 | * handler. This means that read-side critical sections in process | 3125 | * handler. This means that read-side critical sections in process |
3125 | * context must not be interrupted by softirqs. This interface is to be | 3126 | * context must not be interrupted by softirqs. This interface is to be |
3126 | * used when most of the read-side critical sections are in softirq context. | 3127 | * used when most of the read-side critical sections are in softirq context. |
3127 | * RCU read-side critical sections are delimited by : | 3128 | * RCU read-side critical sections are delimited by: |
3128 | * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. | 3129 | * |
3129 | * OR | 3130 | * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context, OR |
3130 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. | 3131 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. |
3131 | * These may be nested. | 3132 | * |
3133 | * These may be nested. | ||
3132 | * | 3134 | * |
3133 | * See the description of call_rcu() for more detailed information on | 3135 | * See the description of call_rcu() for more detailed information on |
3134 | * memory ordering guarantees. | 3136 | * memory ordering guarantees. |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 70ba32e08a23..d3f3094856fe 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -5356,91 +5356,62 @@ static int wake_wide(struct task_struct *p) | |||
5356 | return 1; | 5356 | return 1; |
5357 | } | 5357 | } |
5358 | 5358 | ||
5359 | struct llc_stats { | 5359 | /* |
5360 | unsigned long nr_running; | 5360 | * The purpose of wake_affine() is to quickly determine on which CPU we can run |
5361 | unsigned long load; | 5361 | * soonest. For the purpose of speed we only consider the waking and previous |
5362 | unsigned long capacity; | 5362 | * CPU. |
5363 | int has_capacity; | 5363 | * |
5364 | }; | 5364 | * wake_affine_idle() - only considers 'now', it check if the waking CPU is (or |
5365 | * will be) idle. | ||
5366 | * | ||
5367 | * wake_affine_weight() - considers the weight to reflect the average | ||
5368 | * scheduling latency of the CPUs. This seems to work | ||
5369 | * for the overloaded case. | ||
5370 | */ | ||
5365 | 5371 | ||
5366 | static bool get_llc_stats(struct llc_stats *stats, int cpu) | 5372 | static bool |
5373 | wake_affine_idle(struct sched_domain *sd, struct task_struct *p, | ||
5374 | int this_cpu, int prev_cpu, int sync) | ||
5367 | { | 5375 | { |
5368 | struct sched_domain_shared *sds = rcu_dereference(per_cpu(sd_llc_shared, cpu)); | 5376 | if (idle_cpu(this_cpu)) |
5369 | 5377 | return true; | |
5370 | if (!sds) | ||
5371 | return false; | ||
5372 | 5378 | ||
5373 | stats->nr_running = READ_ONCE(sds->nr_running); | 5379 | if (sync && cpu_rq(this_cpu)->nr_running == 1) |
5374 | stats->load = READ_ONCE(sds->load); | 5380 | return true; |
5375 | stats->capacity = READ_ONCE(sds->capacity); | ||
5376 | stats->has_capacity = stats->nr_running < per_cpu(sd_llc_size, cpu); | ||
5377 | 5381 | ||
5378 | return true; | 5382 | return false; |
5379 | } | 5383 | } |
5380 | 5384 | ||
5381 | /* | ||
5382 | * Can a task be moved from prev_cpu to this_cpu without causing a load | ||
5383 | * imbalance that would trigger the load balancer? | ||
5384 | * | ||
5385 | * Since we're running on 'stale' values, we might in fact create an imbalance | ||
5386 | * but recomputing these values is expensive, as that'd mean iteration 2 cache | ||
5387 | * domains worth of CPUs. | ||
5388 | */ | ||
5389 | static bool | 5385 | static bool |
5390 | wake_affine_llc(struct sched_domain *sd, struct task_struct *p, | 5386 | wake_affine_weight(struct sched_domain *sd, struct task_struct *p, |
5391 | int this_cpu, int prev_cpu, int sync) | 5387 | int this_cpu, int prev_cpu, int sync) |
5392 | { | 5388 | { |
5393 | struct llc_stats prev_stats, this_stats; | ||
5394 | s64 this_eff_load, prev_eff_load; | 5389 | s64 this_eff_load, prev_eff_load; |
5395 | unsigned long task_load; | 5390 | unsigned long task_load; |
5396 | 5391 | ||
5397 | if (!get_llc_stats(&prev_stats, prev_cpu) || | 5392 | this_eff_load = target_load(this_cpu, sd->wake_idx); |
5398 | !get_llc_stats(&this_stats, this_cpu)) | 5393 | prev_eff_load = source_load(prev_cpu, sd->wake_idx); |
5399 | return false; | ||
5400 | 5394 | ||
5401 | /* | ||
5402 | * If sync wakeup then subtract the (maximum possible) | ||
5403 | * effect of the currently running task from the load | ||
5404 | * of the current LLC. | ||
5405 | */ | ||
5406 | if (sync) { | 5395 | if (sync) { |
5407 | unsigned long current_load = task_h_load(current); | 5396 | unsigned long current_load = task_h_load(current); |
5408 | 5397 | ||
5409 | /* in this case load hits 0 and this LLC is considered 'idle' */ | 5398 | if (current_load > this_eff_load) |
5410 | if (current_load > this_stats.load) | ||
5411 | return true; | 5399 | return true; |
5412 | 5400 | ||
5413 | this_stats.load -= current_load; | 5401 | this_eff_load -= current_load; |
5414 | } | 5402 | } |
5415 | 5403 | ||
5416 | /* | ||
5417 | * The has_capacity stuff is not SMT aware, but by trying to balance | ||
5418 | * the nr_running on both ends we try and fill the domain at equal | ||
5419 | * rates, thereby first consuming cores before siblings. | ||
5420 | */ | ||
5421 | |||
5422 | /* if the old cache has capacity, stay there */ | ||
5423 | if (prev_stats.has_capacity && prev_stats.nr_running < this_stats.nr_running+1) | ||
5424 | return false; | ||
5425 | |||
5426 | /* if this cache has capacity, come here */ | ||
5427 | if (this_stats.has_capacity && this_stats.nr_running+1 < prev_stats.nr_running) | ||
5428 | return true; | ||
5429 | |||
5430 | /* | ||
5431 | * Check to see if we can move the load without causing too much | ||
5432 | * imbalance. | ||
5433 | */ | ||
5434 | task_load = task_h_load(p); | 5404 | task_load = task_h_load(p); |
5435 | 5405 | ||
5436 | this_eff_load = 100; | 5406 | this_eff_load += task_load; |
5437 | this_eff_load *= prev_stats.capacity; | 5407 | if (sched_feat(WA_BIAS)) |
5438 | 5408 | this_eff_load *= 100; | |
5439 | prev_eff_load = 100 + (sd->imbalance_pct - 100) / 2; | 5409 | this_eff_load *= capacity_of(prev_cpu); |
5440 | prev_eff_load *= this_stats.capacity; | ||
5441 | 5410 | ||
5442 | this_eff_load *= this_stats.load + task_load; | 5411 | prev_eff_load -= task_load; |
5443 | prev_eff_load *= prev_stats.load - task_load; | 5412 | if (sched_feat(WA_BIAS)) |
5413 | prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; | ||
5414 | prev_eff_load *= capacity_of(this_cpu); | ||
5444 | 5415 | ||
5445 | return this_eff_load <= prev_eff_load; | 5416 | return this_eff_load <= prev_eff_load; |
5446 | } | 5417 | } |
@@ -5449,22 +5420,13 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p, | |||
5449 | int prev_cpu, int sync) | 5420 | int prev_cpu, int sync) |
5450 | { | 5421 | { |
5451 | int this_cpu = smp_processor_id(); | 5422 | int this_cpu = smp_processor_id(); |
5452 | bool affine; | 5423 | bool affine = false; |
5453 | 5424 | ||
5454 | /* | 5425 | if (sched_feat(WA_IDLE) && !affine) |
5455 | * Default to no affine wakeups; wake_affine() should not effect a task | 5426 | affine = wake_affine_idle(sd, p, this_cpu, prev_cpu, sync); |
5456 | * placement the load-balancer feels inclined to undo. The conservative | ||
5457 | * option is therefore to not move tasks when they wake up. | ||
5458 | */ | ||
5459 | affine = false; | ||
5460 | 5427 | ||
5461 | /* | 5428 | if (sched_feat(WA_WEIGHT) && !affine) |
5462 | * If the wakeup is across cache domains, try to evaluate if movement | 5429 | affine = wake_affine_weight(sd, p, this_cpu, prev_cpu, sync); |
5463 | * makes sense, otherwise rely on select_idle_siblings() to do | ||
5464 | * placement inside the cache domain. | ||
5465 | */ | ||
5466 | if (!cpus_share_cache(prev_cpu, this_cpu)) | ||
5467 | affine = wake_affine_llc(sd, p, this_cpu, prev_cpu, sync); | ||
5468 | 5430 | ||
5469 | schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); | 5431 | schedstat_inc(p->se.statistics.nr_wakeups_affine_attempts); |
5470 | if (affine) { | 5432 | if (affine) { |
@@ -7600,7 +7562,6 @@ static inline enum fbq_type fbq_classify_rq(struct rq *rq) | |||
7600 | */ | 7562 | */ |
7601 | static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) | 7563 | static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sds) |
7602 | { | 7564 | { |
7603 | struct sched_domain_shared *shared = env->sd->shared; | ||
7604 | struct sched_domain *child = env->sd->child; | 7565 | struct sched_domain *child = env->sd->child; |
7605 | struct sched_group *sg = env->sd->groups; | 7566 | struct sched_group *sg = env->sd->groups; |
7606 | struct sg_lb_stats *local = &sds->local_stat; | 7567 | struct sg_lb_stats *local = &sds->local_stat; |
@@ -7672,22 +7633,6 @@ next_group: | |||
7672 | if (env->dst_rq->rd->overload != overload) | 7633 | if (env->dst_rq->rd->overload != overload) |
7673 | env->dst_rq->rd->overload = overload; | 7634 | env->dst_rq->rd->overload = overload; |
7674 | } | 7635 | } |
7675 | |||
7676 | if (!shared) | ||
7677 | return; | ||
7678 | |||
7679 | /* | ||
7680 | * Since these are sums over groups they can contain some CPUs | ||
7681 | * multiple times for the NUMA domains. | ||
7682 | * | ||
7683 | * Currently only wake_affine_llc() and find_busiest_group() | ||
7684 | * uses these numbers, only the last is affected by this problem. | ||
7685 | * | ||
7686 | * XXX fix that. | ||
7687 | */ | ||
7688 | WRITE_ONCE(shared->nr_running, sds->total_running); | ||
7689 | WRITE_ONCE(shared->load, sds->total_load); | ||
7690 | WRITE_ONCE(shared->capacity, sds->total_capacity); | ||
7691 | } | 7636 | } |
7692 | 7637 | ||
7693 | /** | 7638 | /** |
@@ -8098,6 +8043,13 @@ static int should_we_balance(struct lb_env *env) | |||
8098 | int cpu, balance_cpu = -1; | 8043 | int cpu, balance_cpu = -1; |
8099 | 8044 | ||
8100 | /* | 8045 | /* |
8046 | * Ensure the balancing environment is consistent; can happen | ||
8047 | * when the softirq triggers 'during' hotplug. | ||
8048 | */ | ||
8049 | if (!cpumask_test_cpu(env->dst_cpu, env->cpus)) | ||
8050 | return 0; | ||
8051 | |||
8052 | /* | ||
8101 | * In the newly idle case, we will allow all the cpu's | 8053 | * In the newly idle case, we will allow all the cpu's |
8102 | * to do the newly idle load balance. | 8054 | * to do the newly idle load balance. |
8103 | */ | 8055 | */ |
diff --git a/kernel/sched/features.h b/kernel/sched/features.h index d3fb15555291..319ed0e8a347 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h | |||
@@ -81,3 +81,6 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true) | |||
81 | SCHED_FEAT(LB_MIN, false) | 81 | SCHED_FEAT(LB_MIN, false) |
82 | SCHED_FEAT(ATTACH_AGE_LOAD, true) | 82 | SCHED_FEAT(ATTACH_AGE_LOAD, true) |
83 | 83 | ||
84 | SCHED_FEAT(WA_IDLE, true) | ||
85 | SCHED_FEAT(WA_WEIGHT, true) | ||
86 | SCHED_FEAT(WA_BIAS, true) | ||
diff --git a/kernel/sched/membarrier.c b/kernel/sched/membarrier.c index a92fddc22747..dd7908743dab 100644 --- a/kernel/sched/membarrier.c +++ b/kernel/sched/membarrier.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/membarrier.h> | 18 | #include <linux/membarrier.h> |
19 | #include <linux/tick.h> | 19 | #include <linux/tick.h> |
20 | #include <linux/cpumask.h> | 20 | #include <linux/cpumask.h> |
21 | #include <linux/atomic.h> | ||
21 | 22 | ||
22 | #include "sched.h" /* for cpu_rq(). */ | 23 | #include "sched.h" /* for cpu_rq(). */ |
23 | 24 | ||
@@ -26,21 +27,26 @@ | |||
26 | * except MEMBARRIER_CMD_QUERY. | 27 | * except MEMBARRIER_CMD_QUERY. |
27 | */ | 28 | */ |
28 | #define MEMBARRIER_CMD_BITMASK \ | 29 | #define MEMBARRIER_CMD_BITMASK \ |
29 | (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED) | 30 | (MEMBARRIER_CMD_SHARED | MEMBARRIER_CMD_PRIVATE_EXPEDITED \ |
31 | | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED) | ||
30 | 32 | ||
31 | static void ipi_mb(void *info) | 33 | static void ipi_mb(void *info) |
32 | { | 34 | { |
33 | smp_mb(); /* IPIs should be serializing but paranoid. */ | 35 | smp_mb(); /* IPIs should be serializing but paranoid. */ |
34 | } | 36 | } |
35 | 37 | ||
36 | static void membarrier_private_expedited(void) | 38 | static int membarrier_private_expedited(void) |
37 | { | 39 | { |
38 | int cpu; | 40 | int cpu; |
39 | bool fallback = false; | 41 | bool fallback = false; |
40 | cpumask_var_t tmpmask; | 42 | cpumask_var_t tmpmask; |
41 | 43 | ||
44 | if (!(atomic_read(¤t->mm->membarrier_state) | ||
45 | & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY)) | ||
46 | return -EPERM; | ||
47 | |||
42 | if (num_online_cpus() == 1) | 48 | if (num_online_cpus() == 1) |
43 | return; | 49 | return 0; |
44 | 50 | ||
45 | /* | 51 | /* |
46 | * Matches memory barriers around rq->curr modification in | 52 | * Matches memory barriers around rq->curr modification in |
@@ -94,6 +100,24 @@ static void membarrier_private_expedited(void) | |||
94 | * rq->curr modification in scheduler. | 100 | * rq->curr modification in scheduler. |
95 | */ | 101 | */ |
96 | smp_mb(); /* exit from system call is not a mb */ | 102 | smp_mb(); /* exit from system call is not a mb */ |
103 | return 0; | ||
104 | } | ||
105 | |||
106 | static void membarrier_register_private_expedited(void) | ||
107 | { | ||
108 | struct task_struct *p = current; | ||
109 | struct mm_struct *mm = p->mm; | ||
110 | |||
111 | /* | ||
112 | * We need to consider threads belonging to different thread | ||
113 | * groups, which use the same mm. (CLONE_VM but not | ||
114 | * CLONE_THREAD). | ||
115 | */ | ||
116 | if (atomic_read(&mm->membarrier_state) | ||
117 | & MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY) | ||
118 | return; | ||
119 | atomic_or(MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY, | ||
120 | &mm->membarrier_state); | ||
97 | } | 121 | } |
98 | 122 | ||
99 | /** | 123 | /** |
@@ -144,7 +168,9 @@ SYSCALL_DEFINE2(membarrier, int, cmd, int, flags) | |||
144 | synchronize_sched(); | 168 | synchronize_sched(); |
145 | return 0; | 169 | return 0; |
146 | case MEMBARRIER_CMD_PRIVATE_EXPEDITED: | 170 | case MEMBARRIER_CMD_PRIVATE_EXPEDITED: |
147 | membarrier_private_expedited(); | 171 | return membarrier_private_expedited(); |
172 | case MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED: | ||
173 | membarrier_register_private_expedited(); | ||
148 | return 0; | 174 | return 0; |
149 | default: | 175 | default: |
150 | return -EINVAL; | 176 | return -EINVAL; |
diff --git a/kernel/seccomp.c b/kernel/seccomp.c index bb3a38005b9c..0ae832e13b97 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c | |||
@@ -473,7 +473,7 @@ static long seccomp_attach_filter(unsigned int flags, | |||
473 | return 0; | 473 | return 0; |
474 | } | 474 | } |
475 | 475 | ||
476 | void __get_seccomp_filter(struct seccomp_filter *filter) | 476 | static void __get_seccomp_filter(struct seccomp_filter *filter) |
477 | { | 477 | { |
478 | /* Reference count is bounded by the number of total processes. */ | 478 | /* Reference count is bounded by the number of total processes. */ |
479 | refcount_inc(&filter->usage); | 479 | refcount_inc(&filter->usage); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 64d0edf428f8..a2dccfe1acec 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -68,6 +68,7 @@ enum { | |||
68 | * attach_mutex to avoid changing binding state while | 68 | * attach_mutex to avoid changing binding state while |
69 | * worker_attach_to_pool() is in progress. | 69 | * worker_attach_to_pool() is in progress. |
70 | */ | 70 | */ |
71 | POOL_MANAGER_ACTIVE = 1 << 0, /* being managed */ | ||
71 | POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ | 72 | POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ |
72 | 73 | ||
73 | /* worker flags */ | 74 | /* worker flags */ |
@@ -165,7 +166,6 @@ struct worker_pool { | |||
165 | /* L: hash of busy workers */ | 166 | /* L: hash of busy workers */ |
166 | 167 | ||
167 | /* see manage_workers() for details on the two manager mutexes */ | 168 | /* see manage_workers() for details on the two manager mutexes */ |
168 | struct mutex manager_arb; /* manager arbitration */ | ||
169 | struct worker *manager; /* L: purely informational */ | 169 | struct worker *manager; /* L: purely informational */ |
170 | struct mutex attach_mutex; /* attach/detach exclusion */ | 170 | struct mutex attach_mutex; /* attach/detach exclusion */ |
171 | struct list_head workers; /* A: attached workers */ | 171 | struct list_head workers; /* A: attached workers */ |
@@ -299,6 +299,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; | |||
299 | 299 | ||
300 | static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ | 300 | static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ |
301 | static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ | 301 | static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ |
302 | static DECLARE_WAIT_QUEUE_HEAD(wq_manager_wait); /* wait for manager to go away */ | ||
302 | 303 | ||
303 | static LIST_HEAD(workqueues); /* PR: list of all workqueues */ | 304 | static LIST_HEAD(workqueues); /* PR: list of all workqueues */ |
304 | static bool workqueue_freezing; /* PL: have wqs started freezing? */ | 305 | static bool workqueue_freezing; /* PL: have wqs started freezing? */ |
@@ -801,7 +802,7 @@ static bool need_to_create_worker(struct worker_pool *pool) | |||
801 | /* Do we have too many workers and should some go away? */ | 802 | /* Do we have too many workers and should some go away? */ |
802 | static bool too_many_workers(struct worker_pool *pool) | 803 | static bool too_many_workers(struct worker_pool *pool) |
803 | { | 804 | { |
804 | bool managing = mutex_is_locked(&pool->manager_arb); | 805 | bool managing = pool->flags & POOL_MANAGER_ACTIVE; |
805 | int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ | 806 | int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ |
806 | int nr_busy = pool->nr_workers - nr_idle; | 807 | int nr_busy = pool->nr_workers - nr_idle; |
807 | 808 | ||
@@ -1980,24 +1981,17 @@ static bool manage_workers(struct worker *worker) | |||
1980 | { | 1981 | { |
1981 | struct worker_pool *pool = worker->pool; | 1982 | struct worker_pool *pool = worker->pool; |
1982 | 1983 | ||
1983 | /* | 1984 | if (pool->flags & POOL_MANAGER_ACTIVE) |
1984 | * Anyone who successfully grabs manager_arb wins the arbitration | ||
1985 | * and becomes the manager. mutex_trylock() on pool->manager_arb | ||
1986 | * failure while holding pool->lock reliably indicates that someone | ||
1987 | * else is managing the pool and the worker which failed trylock | ||
1988 | * can proceed to executing work items. This means that anyone | ||
1989 | * grabbing manager_arb is responsible for actually performing | ||
1990 | * manager duties. If manager_arb is grabbed and released without | ||
1991 | * actual management, the pool may stall indefinitely. | ||
1992 | */ | ||
1993 | if (!mutex_trylock(&pool->manager_arb)) | ||
1994 | return false; | 1985 | return false; |
1986 | |||
1987 | pool->flags |= POOL_MANAGER_ACTIVE; | ||
1995 | pool->manager = worker; | 1988 | pool->manager = worker; |
1996 | 1989 | ||
1997 | maybe_create_worker(pool); | 1990 | maybe_create_worker(pool); |
1998 | 1991 | ||
1999 | pool->manager = NULL; | 1992 | pool->manager = NULL; |
2000 | mutex_unlock(&pool->manager_arb); | 1993 | pool->flags &= ~POOL_MANAGER_ACTIVE; |
1994 | wake_up(&wq_manager_wait); | ||
2001 | return true; | 1995 | return true; |
2002 | } | 1996 | } |
2003 | 1997 | ||
@@ -3248,7 +3242,6 @@ static int init_worker_pool(struct worker_pool *pool) | |||
3248 | setup_timer(&pool->mayday_timer, pool_mayday_timeout, | 3242 | setup_timer(&pool->mayday_timer, pool_mayday_timeout, |
3249 | (unsigned long)pool); | 3243 | (unsigned long)pool); |
3250 | 3244 | ||
3251 | mutex_init(&pool->manager_arb); | ||
3252 | mutex_init(&pool->attach_mutex); | 3245 | mutex_init(&pool->attach_mutex); |
3253 | INIT_LIST_HEAD(&pool->workers); | 3246 | INIT_LIST_HEAD(&pool->workers); |
3254 | 3247 | ||
@@ -3318,13 +3311,15 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
3318 | hash_del(&pool->hash_node); | 3311 | hash_del(&pool->hash_node); |
3319 | 3312 | ||
3320 | /* | 3313 | /* |
3321 | * Become the manager and destroy all workers. Grabbing | 3314 | * Become the manager and destroy all workers. This prevents |
3322 | * manager_arb prevents @pool's workers from blocking on | 3315 | * @pool's workers from blocking on attach_mutex. We're the last |
3323 | * attach_mutex. | 3316 | * manager and @pool gets freed with the flag set. |
3324 | */ | 3317 | */ |
3325 | mutex_lock(&pool->manager_arb); | ||
3326 | |||
3327 | spin_lock_irq(&pool->lock); | 3318 | spin_lock_irq(&pool->lock); |
3319 | wait_event_lock_irq(wq_manager_wait, | ||
3320 | !(pool->flags & POOL_MANAGER_ACTIVE), pool->lock); | ||
3321 | pool->flags |= POOL_MANAGER_ACTIVE; | ||
3322 | |||
3328 | while ((worker = first_idle_worker(pool))) | 3323 | while ((worker = first_idle_worker(pool))) |
3329 | destroy_worker(worker); | 3324 | destroy_worker(worker); |
3330 | WARN_ON(pool->nr_workers || pool->nr_idle); | 3325 | WARN_ON(pool->nr_workers || pool->nr_idle); |
@@ -3338,8 +3333,6 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
3338 | if (pool->detach_completion) | 3333 | if (pool->detach_completion) |
3339 | wait_for_completion(pool->detach_completion); | 3334 | wait_for_completion(pool->detach_completion); |
3340 | 3335 | ||
3341 | mutex_unlock(&pool->manager_arb); | ||
3342 | |||
3343 | /* shut down the timers */ | 3336 | /* shut down the timers */ |
3344 | del_timer_sync(&pool->idle_timer); | 3337 | del_timer_sync(&pool->idle_timer); |
3345 | del_timer_sync(&pool->mayday_timer); | 3338 | del_timer_sync(&pool->mayday_timer); |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 2689b7c50c52..dfdad67d8f6c 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -1092,8 +1092,8 @@ config PROVE_LOCKING | |||
1092 | select DEBUG_MUTEXES | 1092 | select DEBUG_MUTEXES |
1093 | select DEBUG_RT_MUTEXES if RT_MUTEXES | 1093 | select DEBUG_RT_MUTEXES if RT_MUTEXES |
1094 | select DEBUG_LOCK_ALLOC | 1094 | select DEBUG_LOCK_ALLOC |
1095 | select LOCKDEP_CROSSRELEASE | 1095 | select LOCKDEP_CROSSRELEASE if BROKEN |
1096 | select LOCKDEP_COMPLETIONS | 1096 | select LOCKDEP_COMPLETIONS if BROKEN |
1097 | select TRACE_IRQFLAGS | 1097 | select TRACE_IRQFLAGS |
1098 | default n | 1098 | default n |
1099 | help | 1099 | help |
@@ -1590,6 +1590,54 @@ config LATENCYTOP | |||
1590 | 1590 | ||
1591 | source kernel/trace/Kconfig | 1591 | source kernel/trace/Kconfig |
1592 | 1592 | ||
1593 | config PROVIDE_OHCI1394_DMA_INIT | ||
1594 | bool "Remote debugging over FireWire early on boot" | ||
1595 | depends on PCI && X86 | ||
1596 | help | ||
1597 | If you want to debug problems which hang or crash the kernel early | ||
1598 | on boot and the crashing machine has a FireWire port, you can use | ||
1599 | this feature to remotely access the memory of the crashed machine | ||
1600 | over FireWire. This employs remote DMA as part of the OHCI1394 | ||
1601 | specification which is now the standard for FireWire controllers. | ||
1602 | |||
1603 | With remote DMA, you can monitor the printk buffer remotely using | ||
1604 | firescope and access all memory below 4GB using fireproxy from gdb. | ||
1605 | Even controlling a kernel debugger is possible using remote DMA. | ||
1606 | |||
1607 | Usage: | ||
1608 | |||
1609 | If ohci1394_dma=early is used as boot parameter, it will initialize | ||
1610 | all OHCI1394 controllers which are found in the PCI config space. | ||
1611 | |||
1612 | As all changes to the FireWire bus such as enabling and disabling | ||
1613 | devices cause a bus reset and thereby disable remote DMA for all | ||
1614 | devices, be sure to have the cable plugged and FireWire enabled on | ||
1615 | the debugging host before booting the debug target for debugging. | ||
1616 | |||
1617 | This code (~1k) is freed after boot. By then, the firewire stack | ||
1618 | in charge of the OHCI-1394 controllers should be used instead. | ||
1619 | |||
1620 | See Documentation/debugging-via-ohci1394.txt for more information. | ||
1621 | |||
1622 | config DMA_API_DEBUG | ||
1623 | bool "Enable debugging of DMA-API usage" | ||
1624 | depends on HAVE_DMA_API_DEBUG | ||
1625 | help | ||
1626 | Enable this option to debug the use of the DMA API by device drivers. | ||
1627 | With this option you will be able to detect common bugs in device | ||
1628 | drivers like double-freeing of DMA mappings or freeing mappings that | ||
1629 | were never allocated. | ||
1630 | |||
1631 | This also attempts to catch cases where a page owned by DMA is | ||
1632 | accessed by the cpu in a way that could cause data corruption. For | ||
1633 | example, this enables cow_user_page() to check that the source page is | ||
1634 | not undergoing DMA. | ||
1635 | |||
1636 | This option causes a performance degradation. Use only if you want to | ||
1637 | debug device drivers and dma interactions. | ||
1638 | |||
1639 | If unsure, say N. | ||
1640 | |||
1593 | menu "Runtime Testing" | 1641 | menu "Runtime Testing" |
1594 | 1642 | ||
1595 | config LKDTM | 1643 | config LKDTM |
@@ -1749,56 +1797,6 @@ config TEST_PARMAN | |||
1749 | 1797 | ||
1750 | If unsure, say N. | 1798 | If unsure, say N. |
1751 | 1799 | ||
1752 | endmenu # runtime tests | ||
1753 | |||
1754 | config PROVIDE_OHCI1394_DMA_INIT | ||
1755 | bool "Remote debugging over FireWire early on boot" | ||
1756 | depends on PCI && X86 | ||
1757 | help | ||
1758 | If you want to debug problems which hang or crash the kernel early | ||
1759 | on boot and the crashing machine has a FireWire port, you can use | ||
1760 | this feature to remotely access the memory of the crashed machine | ||
1761 | over FireWire. This employs remote DMA as part of the OHCI1394 | ||
1762 | specification which is now the standard for FireWire controllers. | ||
1763 | |||
1764 | With remote DMA, you can monitor the printk buffer remotely using | ||
1765 | firescope and access all memory below 4GB using fireproxy from gdb. | ||
1766 | Even controlling a kernel debugger is possible using remote DMA. | ||
1767 | |||
1768 | Usage: | ||
1769 | |||
1770 | If ohci1394_dma=early is used as boot parameter, it will initialize | ||
1771 | all OHCI1394 controllers which are found in the PCI config space. | ||
1772 | |||
1773 | As all changes to the FireWire bus such as enabling and disabling | ||
1774 | devices cause a bus reset and thereby disable remote DMA for all | ||
1775 | devices, be sure to have the cable plugged and FireWire enabled on | ||
1776 | the debugging host before booting the debug target for debugging. | ||
1777 | |||
1778 | This code (~1k) is freed after boot. By then, the firewire stack | ||
1779 | in charge of the OHCI-1394 controllers should be used instead. | ||
1780 | |||
1781 | See Documentation/debugging-via-ohci1394.txt for more information. | ||
1782 | |||
1783 | config DMA_API_DEBUG | ||
1784 | bool "Enable debugging of DMA-API usage" | ||
1785 | depends on HAVE_DMA_API_DEBUG | ||
1786 | help | ||
1787 | Enable this option to debug the use of the DMA API by device drivers. | ||
1788 | With this option you will be able to detect common bugs in device | ||
1789 | drivers like double-freeing of DMA mappings or freeing mappings that | ||
1790 | were never allocated. | ||
1791 | |||
1792 | This also attempts to catch cases where a page owned by DMA is | ||
1793 | accessed by the cpu in a way that could cause data corruption. For | ||
1794 | example, this enables cow_user_page() to check that the source page is | ||
1795 | not undergoing DMA. | ||
1796 | |||
1797 | This option causes a performance degradation. Use only if you want to | ||
1798 | debug device drivers and dma interactions. | ||
1799 | |||
1800 | If unsure, say N. | ||
1801 | |||
1802 | config TEST_LKM | 1800 | config TEST_LKM |
1803 | tristate "Test module loading with 'hello world' module" | 1801 | tristate "Test module loading with 'hello world' module" |
1804 | default n | 1802 | default n |
@@ -1873,18 +1871,6 @@ config TEST_UDELAY | |||
1873 | 1871 | ||
1874 | If unsure, say N. | 1872 | If unsure, say N. |
1875 | 1873 | ||
1876 | config MEMTEST | ||
1877 | bool "Memtest" | ||
1878 | depends on HAVE_MEMBLOCK | ||
1879 | ---help--- | ||
1880 | This option adds a kernel parameter 'memtest', which allows memtest | ||
1881 | to be set. | ||
1882 | memtest=0, mean disabled; -- default | ||
1883 | memtest=1, mean do 1 test pattern; | ||
1884 | ... | ||
1885 | memtest=17, mean do 17 test patterns. | ||
1886 | If you are unsure how to answer this question, answer N. | ||
1887 | |||
1888 | config TEST_STATIC_KEYS | 1874 | config TEST_STATIC_KEYS |
1889 | tristate "Test static keys" | 1875 | tristate "Test static keys" |
1890 | default n | 1876 | default n |
@@ -1894,16 +1880,6 @@ config TEST_STATIC_KEYS | |||
1894 | 1880 | ||
1895 | If unsure, say N. | 1881 | If unsure, say N. |
1896 | 1882 | ||
1897 | config BUG_ON_DATA_CORRUPTION | ||
1898 | bool "Trigger a BUG when data corruption is detected" | ||
1899 | select DEBUG_LIST | ||
1900 | help | ||
1901 | Select this option if the kernel should BUG when it encounters | ||
1902 | data corruption in kernel memory structures when they get checked | ||
1903 | for validity. | ||
1904 | |||
1905 | If unsure, say N. | ||
1906 | |||
1907 | config TEST_KMOD | 1883 | config TEST_KMOD |
1908 | tristate "kmod stress tester" | 1884 | tristate "kmod stress tester" |
1909 | default n | 1885 | default n |
@@ -1941,6 +1917,29 @@ config TEST_DEBUG_VIRTUAL | |||
1941 | 1917 | ||
1942 | If unsure, say N. | 1918 | If unsure, say N. |
1943 | 1919 | ||
1920 | endmenu # runtime tests | ||
1921 | |||
1922 | config MEMTEST | ||
1923 | bool "Memtest" | ||
1924 | depends on HAVE_MEMBLOCK | ||
1925 | ---help--- | ||
1926 | This option adds a kernel parameter 'memtest', which allows memtest | ||
1927 | to be set. | ||
1928 | memtest=0, mean disabled; -- default | ||
1929 | memtest=1, mean do 1 test pattern; | ||
1930 | ... | ||
1931 | memtest=17, mean do 17 test patterns. | ||
1932 | If you are unsure how to answer this question, answer N. | ||
1933 | |||
1934 | config BUG_ON_DATA_CORRUPTION | ||
1935 | bool "Trigger a BUG when data corruption is detected" | ||
1936 | select DEBUG_LIST | ||
1937 | help | ||
1938 | Select this option if the kernel should BUG when it encounters | ||
1939 | data corruption in kernel memory structures when they get checked | ||
1940 | for validity. | ||
1941 | |||
1942 | If unsure, say N. | ||
1944 | 1943 | ||
1945 | source "samples/Kconfig" | 1944 | source "samples/Kconfig" |
1946 | 1945 | ||
diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 155c55d8db5f..4e53be8bc590 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c | |||
@@ -598,21 +598,31 @@ static bool assoc_array_insert_into_terminal_node(struct assoc_array_edit *edit, | |||
598 | if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0) | 598 | if ((edit->segment_cache[ASSOC_ARRAY_FAN_OUT] ^ base_seg) == 0) |
599 | goto all_leaves_cluster_together; | 599 | goto all_leaves_cluster_together; |
600 | 600 | ||
601 | /* Otherwise we can just insert a new node ahead of the old | 601 | /* Otherwise all the old leaves cluster in the same slot, but |
602 | * one. | 602 | * the new leaf wants to go into a different slot - so we |
603 | * create a new node (n0) to hold the new leaf and a pointer to | ||
604 | * a new node (n1) holding all the old leaves. | ||
605 | * | ||
606 | * This can be done by falling through to the node splitting | ||
607 | * path. | ||
603 | */ | 608 | */ |
604 | goto present_leaves_cluster_but_not_new_leaf; | 609 | pr_devel("present leaves cluster but not new leaf\n"); |
605 | } | 610 | } |
606 | 611 | ||
607 | split_node: | 612 | split_node: |
608 | pr_devel("split node\n"); | 613 | pr_devel("split node\n"); |
609 | 614 | ||
610 | /* We need to split the current node; we know that the node doesn't | 615 | /* We need to split the current node. The node must contain anything |
611 | * simply contain a full set of leaves that cluster together (it | 616 | * from a single leaf (in the one leaf case, this leaf will cluster |
612 | * contains meta pointers and/or non-clustering leaves). | 617 | * with the new leaf) and the rest meta-pointers, to all leaves, some |
618 | * of which may cluster. | ||
619 | * | ||
620 | * It won't contain the case in which all the current leaves plus the | ||
621 | * new leaves want to cluster in the same slot. | ||
613 | * | 622 | * |
614 | * We need to expel at least two leaves out of a set consisting of the | 623 | * We need to expel at least two leaves out of a set consisting of the |
615 | * leaves in the node and the new leaf. | 624 | * leaves in the node and the new leaf. The current meta pointers can |
625 | * just be copied as they shouldn't cluster with any of the leaves. | ||
616 | * | 626 | * |
617 | * We need a new node (n0) to replace the current one and a new node to | 627 | * We need a new node (n0) to replace the current one and a new node to |
618 | * take the expelled nodes (n1). | 628 | * take the expelled nodes (n1). |
@@ -717,33 +727,6 @@ found_slot_for_multiple_occupancy: | |||
717 | pr_devel("<--%s() = ok [split node]\n", __func__); | 727 | pr_devel("<--%s() = ok [split node]\n", __func__); |
718 | return true; | 728 | return true; |
719 | 729 | ||
720 | present_leaves_cluster_but_not_new_leaf: | ||
721 | /* All the old leaves cluster in the same slot, but the new leaf wants | ||
722 | * to go into a different slot, so we create a new node to hold the new | ||
723 | * leaf and a pointer to a new node holding all the old leaves. | ||
724 | */ | ||
725 | pr_devel("present leaves cluster but not new leaf\n"); | ||
726 | |||
727 | new_n0->back_pointer = node->back_pointer; | ||
728 | new_n0->parent_slot = node->parent_slot; | ||
729 | new_n0->nr_leaves_on_branch = node->nr_leaves_on_branch; | ||
730 | new_n1->back_pointer = assoc_array_node_to_ptr(new_n0); | ||
731 | new_n1->parent_slot = edit->segment_cache[0]; | ||
732 | new_n1->nr_leaves_on_branch = node->nr_leaves_on_branch; | ||
733 | edit->adjust_count_on = new_n0; | ||
734 | |||
735 | for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) | ||
736 | new_n1->slots[i] = node->slots[i]; | ||
737 | |||
738 | new_n0->slots[edit->segment_cache[0]] = assoc_array_node_to_ptr(new_n0); | ||
739 | edit->leaf_p = &new_n0->slots[edit->segment_cache[ASSOC_ARRAY_FAN_OUT]]; | ||
740 | |||
741 | edit->set[0].ptr = &assoc_array_ptr_to_node(node->back_pointer)->slots[node->parent_slot]; | ||
742 | edit->set[0].to = assoc_array_node_to_ptr(new_n0); | ||
743 | edit->excised_meta[0] = assoc_array_node_to_ptr(node); | ||
744 | pr_devel("<--%s() = ok [insert node before]\n", __func__); | ||
745 | return true; | ||
746 | |||
747 | all_leaves_cluster_together: | 730 | all_leaves_cluster_together: |
748 | /* All the leaves, new and old, want to cluster together in this node | 731 | /* All the leaves, new and old, want to cluster together in this node |
749 | * in the same slot, so we have to replace this node with a shortcut to | 732 | * in the same slot, so we have to replace this node with a shortcut to |
diff --git a/lib/digsig.c b/lib/digsig.c index 03d7c63837ae..6ba6fcd92dd1 100644 --- a/lib/digsig.c +++ b/lib/digsig.c | |||
@@ -87,6 +87,12 @@ static int digsig_verify_rsa(struct key *key, | |||
87 | down_read(&key->sem); | 87 | down_read(&key->sem); |
88 | ukp = user_key_payload_locked(key); | 88 | ukp = user_key_payload_locked(key); |
89 | 89 | ||
90 | if (!ukp) { | ||
91 | /* key was revoked before we acquired its semaphore */ | ||
92 | err = -EKEYREVOKED; | ||
93 | goto err1; | ||
94 | } | ||
95 | |||
90 | if (ukp->datalen < sizeof(*pkh)) | 96 | if (ukp->datalen < sizeof(*pkh)) |
91 | goto err1; | 97 | goto err1; |
92 | 98 | ||
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index cd0b5c964bd0..2b827b8a1d8c 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c | |||
@@ -2031,11 +2031,13 @@ void locking_selftest(void) | |||
2031 | print_testname("mixed read-lock/lock-write ABBA"); | 2031 | print_testname("mixed read-lock/lock-write ABBA"); |
2032 | pr_cont(" |"); | 2032 | pr_cont(" |"); |
2033 | dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK); | 2033 | dotest(rlock_ABBA1, FAILURE, LOCKTYPE_RWLOCK); |
2034 | #ifdef CONFIG_PROVE_LOCKING | ||
2034 | /* | 2035 | /* |
2035 | * Lockdep does indeed fail here, but there's nothing we can do about | 2036 | * Lockdep does indeed fail here, but there's nothing we can do about |
2036 | * that now. Don't kill lockdep for it. | 2037 | * that now. Don't kill lockdep for it. |
2037 | */ | 2038 | */ |
2038 | unexpected_testcase_failures--; | 2039 | unexpected_testcase_failures--; |
2040 | #endif | ||
2039 | 2041 | ||
2040 | pr_cont(" |"); | 2042 | pr_cont(" |"); |
2041 | dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM); | 2043 | dotest(rwsem_ABBA1, FAILURE, LOCKTYPE_RWSEM); |
diff --git a/lib/ts_fsm.c b/lib/ts_fsm.c index 5696a35184e4..69557c74ef9f 100644 --- a/lib/ts_fsm.c +++ b/lib/ts_fsm.c | |||
@@ -11,7 +11,7 @@ | |||
11 | * ========================================================================== | 11 | * ========================================================================== |
12 | * | 12 | * |
13 | * A finite state machine consists of n states (struct ts_fsm_token) | 13 | * A finite state machine consists of n states (struct ts_fsm_token) |
14 | * representing the pattern as a finite automation. The data is read | 14 | * representing the pattern as a finite automaton. The data is read |
15 | * sequentially on an octet basis. Every state token specifies the number | 15 | * sequentially on an octet basis. Every state token specifies the number |
16 | * of recurrences and the type of value accepted which can be either a | 16 | * of recurrences and the type of value accepted which can be either a |
17 | * specific character or ctype based set of characters. The available | 17 | * specific character or ctype based set of characters. The available |
diff --git a/lib/ts_kmp.c b/lib/ts_kmp.c index 632f783e65f1..ffbe66cbb0ed 100644 --- a/lib/ts_kmp.c +++ b/lib/ts_kmp.c | |||
@@ -27,7 +27,7 @@ | |||
27 | * | 27 | * |
28 | * [1] Cormen, Leiserson, Rivest, Stein | 28 | * [1] Cormen, Leiserson, Rivest, Stein |
29 | * Introdcution to Algorithms, 2nd Edition, MIT Press | 29 | * Introdcution to Algorithms, 2nd Edition, MIT Press |
30 | * [2] See finite automation theory | 30 | * [2] See finite automaton theory |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/module.h> | 33 | #include <linux/module.h> |
@@ -460,7 +460,7 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, | |||
460 | 460 | ||
461 | trace_cma_alloc(pfn, page, count, align); | 461 | trace_cma_alloc(pfn, page, count, align); |
462 | 462 | ||
463 | if (ret) { | 463 | if (ret && !(gfp_mask & __GFP_NOWARN)) { |
464 | pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n", | 464 | pr_info("%s: alloc failed, req-size: %zu pages, ret: %d\n", |
465 | __func__, count, ret); | 465 | __func__, count, ret); |
466 | cma_debug_show_areas(cma); | 466 | cma_debug_show_areas(cma); |
diff --git a/mm/madvise.c b/mm/madvise.c index 25bade36e9ca..fd70d6aabc3e 100644 --- a/mm/madvise.c +++ b/mm/madvise.c | |||
@@ -757,6 +757,9 @@ madvise_behavior_valid(int behavior) | |||
757 | * MADV_DONTFORK - omit this area from child's address space when forking: | 757 | * MADV_DONTFORK - omit this area from child's address space when forking: |
758 | * typically, to avoid COWing pages pinned by get_user_pages(). | 758 | * typically, to avoid COWing pages pinned by get_user_pages(). |
759 | * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. | 759 | * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking. |
760 | * MADV_WIPEONFORK - present the child process with zero-filled memory in this | ||
761 | * range after a fork. | ||
762 | * MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK | ||
760 | * MADV_HWPOISON - trigger memory error handler as if the given memory range | 763 | * MADV_HWPOISON - trigger memory error handler as if the given memory range |
761 | * were corrupted by unrecoverable hardware memory failure. | 764 | * were corrupted by unrecoverable hardware memory failure. |
762 | * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. | 765 | * MADV_SOFT_OFFLINE - try to soft-offline the given range of memory. |
@@ -777,7 +780,9 @@ madvise_behavior_valid(int behavior) | |||
777 | * zero - success | 780 | * zero - success |
778 | * -EINVAL - start + len < 0, start is not page-aligned, | 781 | * -EINVAL - start + len < 0, start is not page-aligned, |
779 | * "behavior" is not a valid value, or application | 782 | * "behavior" is not a valid value, or application |
780 | * is attempting to release locked or shared pages. | 783 | * is attempting to release locked or shared pages, |
784 | * or the specified address range includes file, Huge TLB, | ||
785 | * MAP_SHARED or VMPFNMAP range. | ||
781 | * -ENOMEM - addresses in the specified range are not currently | 786 | * -ENOMEM - addresses in the specified range are not currently |
782 | * mapped, or are outside the AS of the process. | 787 | * mapped, or are outside the AS of the process. |
783 | * -EIO - an I/O error occurred while paging in data. | 788 | * -EIO - an I/O error occurred while paging in data. |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d5f3a62887cf..661f046ad318 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -5828,21 +5828,6 @@ void mem_cgroup_sk_alloc(struct sock *sk) | |||
5828 | if (!mem_cgroup_sockets_enabled) | 5828 | if (!mem_cgroup_sockets_enabled) |
5829 | return; | 5829 | return; |
5830 | 5830 | ||
5831 | /* | ||
5832 | * Socket cloning can throw us here with sk_memcg already | ||
5833 | * filled. It won't however, necessarily happen from | ||
5834 | * process context. So the test for root memcg given | ||
5835 | * the current task's memcg won't help us in this case. | ||
5836 | * | ||
5837 | * Respecting the original socket's memcg is a better | ||
5838 | * decision in this case. | ||
5839 | */ | ||
5840 | if (sk->sk_memcg) { | ||
5841 | BUG_ON(mem_cgroup_is_root(sk->sk_memcg)); | ||
5842 | css_get(&sk->sk_memcg->css); | ||
5843 | return; | ||
5844 | } | ||
5845 | |||
5846 | rcu_read_lock(); | 5831 | rcu_read_lock(); |
5847 | memcg = mem_cgroup_from_task(current); | 5832 | memcg = mem_cgroup_from_task(current); |
5848 | if (memcg == root_mem_cgroup) | 5833 | if (memcg == root_mem_cgroup) |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 006ba625c0b8..a2af6d58a68f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1920,8 +1920,11 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, | |||
1920 | struct page *page; | 1920 | struct page *page; |
1921 | 1921 | ||
1922 | page = __alloc_pages(gfp, order, nid); | 1922 | page = __alloc_pages(gfp, order, nid); |
1923 | if (page && page_to_nid(page) == nid) | 1923 | if (page && page_to_nid(page) == nid) { |
1924 | inc_zone_page_state(page, NUMA_INTERLEAVE_HIT); | 1924 | preempt_disable(); |
1925 | __inc_numa_state(page_zone(page), NUMA_INTERLEAVE_HIT); | ||
1926 | preempt_enable(); | ||
1927 | } | ||
1925 | return page; | 1928 | return page; |
1926 | } | 1929 | } |
1927 | 1930 | ||
diff --git a/mm/migrate.c b/mm/migrate.c index 6954c1435833..e00814ca390e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -2146,8 +2146,9 @@ static int migrate_vma_collect_hole(unsigned long start, | |||
2146 | unsigned long addr; | 2146 | unsigned long addr; |
2147 | 2147 | ||
2148 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { | 2148 | for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { |
2149 | migrate->src[migrate->npages++] = MIGRATE_PFN_MIGRATE; | 2149 | migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; |
2150 | migrate->dst[migrate->npages] = 0; | 2150 | migrate->dst[migrate->npages] = 0; |
2151 | migrate->npages++; | ||
2151 | migrate->cpages++; | 2152 | migrate->cpages++; |
2152 | } | 2153 | } |
2153 | 2154 | ||
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 6a03946469a9..53afbb919a1c 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c | |||
@@ -6,17 +6,6 @@ | |||
6 | 6 | ||
7 | #include "internal.h" | 7 | #include "internal.h" |
8 | 8 | ||
9 | static inline bool check_pmd(struct page_vma_mapped_walk *pvmw) | ||
10 | { | ||
11 | pmd_t pmde; | ||
12 | /* | ||
13 | * Make sure we don't re-load pmd between present and !trans_huge check. | ||
14 | * We need a consistent view. | ||
15 | */ | ||
16 | pmde = READ_ONCE(*pvmw->pmd); | ||
17 | return pmd_present(pmde) && !pmd_trans_huge(pmde); | ||
18 | } | ||
19 | |||
20 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) | 9 | static inline bool not_found(struct page_vma_mapped_walk *pvmw) |
21 | { | 10 | { |
22 | page_vma_mapped_walk_done(pvmw); | 11 | page_vma_mapped_walk_done(pvmw); |
@@ -116,6 +105,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) | |||
116 | pgd_t *pgd; | 105 | pgd_t *pgd; |
117 | p4d_t *p4d; | 106 | p4d_t *p4d; |
118 | pud_t *pud; | 107 | pud_t *pud; |
108 | pmd_t pmde; | ||
119 | 109 | ||
120 | /* The only possible pmd mapping has been handled on last iteration */ | 110 | /* The only possible pmd mapping has been handled on last iteration */ |
121 | if (pvmw->pmd && !pvmw->pte) | 111 | if (pvmw->pmd && !pvmw->pte) |
@@ -148,7 +138,13 @@ restart: | |||
148 | if (!pud_present(*pud)) | 138 | if (!pud_present(*pud)) |
149 | return false; | 139 | return false; |
150 | pvmw->pmd = pmd_offset(pud, pvmw->address); | 140 | pvmw->pmd = pmd_offset(pud, pvmw->address); |
151 | if (pmd_trans_huge(*pvmw->pmd) || is_pmd_migration_entry(*pvmw->pmd)) { | 141 | /* |
142 | * Make sure the pmd value isn't cached in a register by the | ||
143 | * compiler and used as a stale value after we've observed a | ||
144 | * subsequent update. | ||
145 | */ | ||
146 | pmde = READ_ONCE(*pvmw->pmd); | ||
147 | if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) { | ||
152 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); | 148 | pvmw->ptl = pmd_lock(mm, pvmw->pmd); |
153 | if (likely(pmd_trans_huge(*pvmw->pmd))) { | 149 | if (likely(pmd_trans_huge(*pvmw->pmd))) { |
154 | if (pvmw->flags & PVMW_MIGRATION) | 150 | if (pvmw->flags & PVMW_MIGRATION) |
@@ -167,17 +163,15 @@ restart: | |||
167 | return not_found(pvmw); | 163 | return not_found(pvmw); |
168 | return true; | 164 | return true; |
169 | } | 165 | } |
170 | } else | 166 | } |
171 | WARN_ONCE(1, "Non present huge pmd without pmd migration enabled!"); | ||
172 | return not_found(pvmw); | 167 | return not_found(pvmw); |
173 | } else { | 168 | } else { |
174 | /* THP pmd was split under us: handle on pte level */ | 169 | /* THP pmd was split under us: handle on pte level */ |
175 | spin_unlock(pvmw->ptl); | 170 | spin_unlock(pvmw->ptl); |
176 | pvmw->ptl = NULL; | 171 | pvmw->ptl = NULL; |
177 | } | 172 | } |
178 | } else { | 173 | } else if (!pmd_present(pmde)) { |
179 | if (!check_pmd(pvmw)) | 174 | return false; |
180 | return false; | ||
181 | } | 175 | } |
182 | if (!map_pte(pvmw)) | 176 | if (!map_pte(pvmw)) |
183 | goto next_pte; | 177 | goto next_pte; |
diff --git a/mm/percpu.c b/mm/percpu.c index aa121cef76de..a0e0c82c1e4c 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -1329,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | |||
1329 | * @gfp: allocation flags | 1329 | * @gfp: allocation flags |
1330 | * | 1330 | * |
1331 | * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't | 1331 | * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't |
1332 | * contain %GFP_KERNEL, the allocation is atomic. | 1332 | * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN |
1333 | * then no warning will be triggered on invalid or failed allocation | ||
1334 | * requests. | ||
1333 | * | 1335 | * |
1334 | * RETURNS: | 1336 | * RETURNS: |
1335 | * Percpu pointer to the allocated area on success, NULL on failure. | 1337 | * Percpu pointer to the allocated area on success, NULL on failure. |
@@ -1337,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr) | |||
1337 | static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, | 1339 | static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, |
1338 | gfp_t gfp) | 1340 | gfp_t gfp) |
1339 | { | 1341 | { |
1342 | bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; | ||
1343 | bool do_warn = !(gfp & __GFP_NOWARN); | ||
1340 | static int warn_limit = 10; | 1344 | static int warn_limit = 10; |
1341 | struct pcpu_chunk *chunk; | 1345 | struct pcpu_chunk *chunk; |
1342 | const char *err; | 1346 | const char *err; |
1343 | bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL; | ||
1344 | int slot, off, cpu, ret; | 1347 | int slot, off, cpu, ret; |
1345 | unsigned long flags; | 1348 | unsigned long flags; |
1346 | void __percpu *ptr; | 1349 | void __percpu *ptr; |
@@ -1361,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved, | |||
1361 | 1364 | ||
1362 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || | 1365 | if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE || |
1363 | !is_power_of_2(align))) { | 1366 | !is_power_of_2(align))) { |
1364 | WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n", | 1367 | WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n", |
1365 | size, align); | 1368 | size, align); |
1366 | return NULL; | 1369 | return NULL; |
1367 | } | 1370 | } |
@@ -1482,7 +1485,7 @@ fail_unlock: | |||
1482 | fail: | 1485 | fail: |
1483 | trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); | 1486 | trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align); |
1484 | 1487 | ||
1485 | if (!is_atomic && warn_limit) { | 1488 | if (!is_atomic && do_warn && warn_limit) { |
1486 | pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", | 1489 | pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n", |
1487 | size, align, is_atomic, err); | 1490 | size, align, is_atomic, err); |
1488 | dump_stack(); | 1491 | dump_stack(); |
@@ -1507,7 +1510,9 @@ fail: | |||
1507 | * | 1510 | * |
1508 | * Allocate zero-filled percpu area of @size bytes aligned at @align. If | 1511 | * Allocate zero-filled percpu area of @size bytes aligned at @align. If |
1509 | * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can | 1512 | * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can |
1510 | * be called from any context but is a lot more likely to fail. | 1513 | * be called from any context but is a lot more likely to fail. If @gfp |
1514 | * has __GFP_NOWARN then no warning will be triggered on invalid or failed | ||
1515 | * allocation requests. | ||
1511 | * | 1516 | * |
1512 | * RETURNS: | 1517 | * RETURNS: |
1513 | * Percpu pointer to the allocated area on success, NULL on failure. | 1518 | * Percpu pointer to the allocated area on success, NULL on failure. |
diff --git a/mm/swap_state.c b/mm/swap_state.c index ed91091d1e68..05b6803f0cce 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c | |||
@@ -39,10 +39,6 @@ struct address_space *swapper_spaces[MAX_SWAPFILES]; | |||
39 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; | 39 | static unsigned int nr_swapper_spaces[MAX_SWAPFILES]; |
40 | bool swap_vma_readahead = true; | 40 | bool swap_vma_readahead = true; |
41 | 41 | ||
42 | #define SWAP_RA_MAX_ORDER_DEFAULT 3 | ||
43 | |||
44 | static int swap_ra_max_order = SWAP_RA_MAX_ORDER_DEFAULT; | ||
45 | |||
46 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) | 42 | #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) |
47 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) | 43 | #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) |
48 | #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK | 44 | #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK |
@@ -664,6 +660,13 @@ struct page *swap_readahead_detect(struct vm_fault *vmf, | |||
664 | pte_t *tpte; | 660 | pte_t *tpte; |
665 | #endif | 661 | #endif |
666 | 662 | ||
663 | max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), | ||
664 | SWAP_RA_ORDER_CEILING); | ||
665 | if (max_win == 1) { | ||
666 | swap_ra->win = 1; | ||
667 | return NULL; | ||
668 | } | ||
669 | |||
667 | faddr = vmf->address; | 670 | faddr = vmf->address; |
668 | entry = pte_to_swp_entry(vmf->orig_pte); | 671 | entry = pte_to_swp_entry(vmf->orig_pte); |
669 | if ((unlikely(non_swap_entry(entry)))) | 672 | if ((unlikely(non_swap_entry(entry)))) |
@@ -672,12 +675,6 @@ struct page *swap_readahead_detect(struct vm_fault *vmf, | |||
672 | if (page) | 675 | if (page) |
673 | return page; | 676 | return page; |
674 | 677 | ||
675 | max_win = 1 << READ_ONCE(swap_ra_max_order); | ||
676 | if (max_win == 1) { | ||
677 | swap_ra->win = 1; | ||
678 | return NULL; | ||
679 | } | ||
680 | |||
681 | fpfn = PFN_DOWN(faddr); | 678 | fpfn = PFN_DOWN(faddr); |
682 | swap_ra_info = GET_SWAP_RA_VAL(vma); | 679 | swap_ra_info = GET_SWAP_RA_VAL(vma); |
683 | pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info)); | 680 | pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info)); |
@@ -786,32 +783,8 @@ static struct kobj_attribute vma_ra_enabled_attr = | |||
786 | __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, | 783 | __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, |
787 | vma_ra_enabled_store); | 784 | vma_ra_enabled_store); |
788 | 785 | ||
789 | static ssize_t vma_ra_max_order_show(struct kobject *kobj, | ||
790 | struct kobj_attribute *attr, char *buf) | ||
791 | { | ||
792 | return sprintf(buf, "%d\n", swap_ra_max_order); | ||
793 | } | ||
794 | static ssize_t vma_ra_max_order_store(struct kobject *kobj, | ||
795 | struct kobj_attribute *attr, | ||
796 | const char *buf, size_t count) | ||
797 | { | ||
798 | int err, v; | ||
799 | |||
800 | err = kstrtoint(buf, 10, &v); | ||
801 | if (err || v > SWAP_RA_ORDER_CEILING || v <= 0) | ||
802 | return -EINVAL; | ||
803 | |||
804 | swap_ra_max_order = v; | ||
805 | |||
806 | return count; | ||
807 | } | ||
808 | static struct kobj_attribute vma_ra_max_order_attr = | ||
809 | __ATTR(vma_ra_max_order, 0644, vma_ra_max_order_show, | ||
810 | vma_ra_max_order_store); | ||
811 | |||
812 | static struct attribute *swap_attrs[] = { | 786 | static struct attribute *swap_attrs[] = { |
813 | &vma_ra_enabled_attr.attr, | 787 | &vma_ra_enabled_attr.attr, |
814 | &vma_ra_max_order_attr.attr, | ||
815 | NULL, | 788 | NULL, |
816 | }; | 789 | }; |
817 | 790 | ||
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 8a43db6284eb..673942094328 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -1695,11 +1695,6 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, | |||
1695 | for (i = 0; i < area->nr_pages; i++) { | 1695 | for (i = 0; i < area->nr_pages; i++) { |
1696 | struct page *page; | 1696 | struct page *page; |
1697 | 1697 | ||
1698 | if (fatal_signal_pending(current)) { | ||
1699 | area->nr_pages = i; | ||
1700 | goto fail_no_warn; | ||
1701 | } | ||
1702 | |||
1703 | if (node == NUMA_NO_NODE) | 1698 | if (node == NUMA_NO_NODE) |
1704 | page = alloc_page(alloc_mask|highmem_mask); | 1699 | page = alloc_page(alloc_mask|highmem_mask); |
1705 | else | 1700 | else |
@@ -1723,7 +1718,6 @@ fail: | |||
1723 | warn_alloc(gfp_mask, NULL, | 1718 | warn_alloc(gfp_mask, NULL, |
1724 | "vmalloc: allocation failure, allocated %ld of %ld bytes", | 1719 | "vmalloc: allocation failure, allocated %ld of %ld bytes", |
1725 | (area->nr_pages*PAGE_SIZE), area->size); | 1720 | (area->nr_pages*PAGE_SIZE), area->size); |
1726 | fail_no_warn: | ||
1727 | vfree(area->addr); | 1721 | vfree(area->addr); |
1728 | return NULL; | 1722 | return NULL; |
1729 | } | 1723 | } |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 3bc890716c89..de2152730809 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -573,7 +573,7 @@ static int br_process_vlan_info(struct net_bridge *br, | |||
573 | } | 573 | } |
574 | *vinfo_last = NULL; | 574 | *vinfo_last = NULL; |
575 | 575 | ||
576 | return 0; | 576 | return err; |
577 | } | 577 | } |
578 | 578 | ||
579 | return br_vlan_info(br, p, cmd, vinfo_curr); | 579 | return br_vlan_info(br, p, cmd, vinfo_curr); |
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index 2585b100ebbb..276b60262981 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c | |||
@@ -65,8 +65,8 @@ static int ebt_broute(struct sk_buff *skb) | |||
65 | 65 | ||
66 | static int __net_init broute_net_init(struct net *net) | 66 | static int __net_init broute_net_init(struct net *net) |
67 | { | 67 | { |
68 | net->xt.broute_table = ebt_register_table(net, &broute_table, NULL); | 68 | return ebt_register_table(net, &broute_table, NULL, |
69 | return PTR_ERR_OR_ZERO(net->xt.broute_table); | 69 | &net->xt.broute_table); |
70 | } | 70 | } |
71 | 71 | ||
72 | static void __net_exit broute_net_exit(struct net *net) | 72 | static void __net_exit broute_net_exit(struct net *net) |
diff --git a/net/bridge/netfilter/ebtable_filter.c b/net/bridge/netfilter/ebtable_filter.c index 45a00dbdbcad..c41da5fac84f 100644 --- a/net/bridge/netfilter/ebtable_filter.c +++ b/net/bridge/netfilter/ebtable_filter.c | |||
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_filter[] = { | |||
93 | 93 | ||
94 | static int __net_init frame_filter_net_init(struct net *net) | 94 | static int __net_init frame_filter_net_init(struct net *net) |
95 | { | 95 | { |
96 | net->xt.frame_filter = ebt_register_table(net, &frame_filter, ebt_ops_filter); | 96 | return ebt_register_table(net, &frame_filter, ebt_ops_filter, |
97 | return PTR_ERR_OR_ZERO(net->xt.frame_filter); | 97 | &net->xt.frame_filter); |
98 | } | 98 | } |
99 | 99 | ||
100 | static void __net_exit frame_filter_net_exit(struct net *net) | 100 | static void __net_exit frame_filter_net_exit(struct net *net) |
diff --git a/net/bridge/netfilter/ebtable_nat.c b/net/bridge/netfilter/ebtable_nat.c index 57cd5bb154e7..08df7406ecb3 100644 --- a/net/bridge/netfilter/ebtable_nat.c +++ b/net/bridge/netfilter/ebtable_nat.c | |||
@@ -93,8 +93,8 @@ static const struct nf_hook_ops ebt_ops_nat[] = { | |||
93 | 93 | ||
94 | static int __net_init frame_nat_net_init(struct net *net) | 94 | static int __net_init frame_nat_net_init(struct net *net) |
95 | { | 95 | { |
96 | net->xt.frame_nat = ebt_register_table(net, &frame_nat, ebt_ops_nat); | 96 | return ebt_register_table(net, &frame_nat, ebt_ops_nat, |
97 | return PTR_ERR_OR_ZERO(net->xt.frame_nat); | 97 | &net->xt.frame_nat); |
98 | } | 98 | } |
99 | 99 | ||
100 | static void __net_exit frame_nat_net_exit(struct net *net) | 100 | static void __net_exit frame_nat_net_exit(struct net *net) |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 83951f978445..3b3dcf719e07 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1169,9 +1169,8 @@ static void __ebt_unregister_table(struct net *net, struct ebt_table *table) | |||
1169 | kfree(table); | 1169 | kfree(table); |
1170 | } | 1170 | } |
1171 | 1171 | ||
1172 | struct ebt_table * | 1172 | int ebt_register_table(struct net *net, const struct ebt_table *input_table, |
1173 | ebt_register_table(struct net *net, const struct ebt_table *input_table, | 1173 | const struct nf_hook_ops *ops, struct ebt_table **res) |
1174 | const struct nf_hook_ops *ops) | ||
1175 | { | 1174 | { |
1176 | struct ebt_table_info *newinfo; | 1175 | struct ebt_table_info *newinfo; |
1177 | struct ebt_table *t, *table; | 1176 | struct ebt_table *t, *table; |
@@ -1183,7 +1182,7 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table, | |||
1183 | repl->entries == NULL || repl->entries_size == 0 || | 1182 | repl->entries == NULL || repl->entries_size == 0 || |
1184 | repl->counters != NULL || input_table->private != NULL) { | 1183 | repl->counters != NULL || input_table->private != NULL) { |
1185 | BUGPRINT("Bad table data for ebt_register_table!!!\n"); | 1184 | BUGPRINT("Bad table data for ebt_register_table!!!\n"); |
1186 | return ERR_PTR(-EINVAL); | 1185 | return -EINVAL; |
1187 | } | 1186 | } |
1188 | 1187 | ||
1189 | /* Don't add one table to multiple lists. */ | 1188 | /* Don't add one table to multiple lists. */ |
@@ -1252,16 +1251,18 @@ ebt_register_table(struct net *net, const struct ebt_table *input_table, | |||
1252 | list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]); | 1251 | list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]); |
1253 | mutex_unlock(&ebt_mutex); | 1252 | mutex_unlock(&ebt_mutex); |
1254 | 1253 | ||
1254 | WRITE_ONCE(*res, table); | ||
1255 | |||
1255 | if (!ops) | 1256 | if (!ops) |
1256 | return table; | 1257 | return 0; |
1257 | 1258 | ||
1258 | ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); | 1259 | ret = nf_register_net_hooks(net, ops, hweight32(table->valid_hooks)); |
1259 | if (ret) { | 1260 | if (ret) { |
1260 | __ebt_unregister_table(net, table); | 1261 | __ebt_unregister_table(net, table); |
1261 | return ERR_PTR(ret); | 1262 | *res = NULL; |
1262 | } | 1263 | } |
1263 | 1264 | ||
1264 | return table; | 1265 | return ret; |
1265 | free_unlock: | 1266 | free_unlock: |
1266 | mutex_unlock(&ebt_mutex); | 1267 | mutex_unlock(&ebt_mutex); |
1267 | free_chainstack: | 1268 | free_chainstack: |
@@ -1276,7 +1277,7 @@ free_newinfo: | |||
1276 | free_table: | 1277 | free_table: |
1277 | kfree(table); | 1278 | kfree(table); |
1278 | out: | 1279 | out: |
1279 | return ERR_PTR(ret); | 1280 | return ret; |
1280 | } | 1281 | } |
1281 | 1282 | ||
1282 | void ebt_unregister_table(struct net *net, struct ebt_table *table, | 1283 | void ebt_unregister_table(struct net *net, struct ebt_table *table, |
diff --git a/net/can/af_can.c b/net/can/af_can.c index 88edac0f3e36..ecd5c703d11e 100644 --- a/net/can/af_can.c +++ b/net/can/af_can.c | |||
@@ -78,7 +78,7 @@ MODULE_PARM_DESC(stats_timer, "enable timer for statistics (default:on)"); | |||
78 | static struct kmem_cache *rcv_cache __read_mostly; | 78 | static struct kmem_cache *rcv_cache __read_mostly; |
79 | 79 | ||
80 | /* table of registered CAN protocols */ | 80 | /* table of registered CAN protocols */ |
81 | static const struct can_proto *proto_tab[CAN_NPROTO] __read_mostly; | 81 | static const struct can_proto __rcu *proto_tab[CAN_NPROTO] __read_mostly; |
82 | static DEFINE_MUTEX(proto_tab_lock); | 82 | static DEFINE_MUTEX(proto_tab_lock); |
83 | 83 | ||
84 | static atomic_t skbcounter = ATOMIC_INIT(0); | 84 | static atomic_t skbcounter = ATOMIC_INIT(0); |
@@ -788,7 +788,7 @@ int can_proto_register(const struct can_proto *cp) | |||
788 | 788 | ||
789 | mutex_lock(&proto_tab_lock); | 789 | mutex_lock(&proto_tab_lock); |
790 | 790 | ||
791 | if (proto_tab[proto]) { | 791 | if (rcu_access_pointer(proto_tab[proto])) { |
792 | pr_err("can: protocol %d already registered\n", proto); | 792 | pr_err("can: protocol %d already registered\n", proto); |
793 | err = -EBUSY; | 793 | err = -EBUSY; |
794 | } else | 794 | } else |
@@ -812,7 +812,7 @@ void can_proto_unregister(const struct can_proto *cp) | |||
812 | int proto = cp->protocol; | 812 | int proto = cp->protocol; |
813 | 813 | ||
814 | mutex_lock(&proto_tab_lock); | 814 | mutex_lock(&proto_tab_lock); |
815 | BUG_ON(proto_tab[proto] != cp); | 815 | BUG_ON(rcu_access_pointer(proto_tab[proto]) != cp); |
816 | RCU_INIT_POINTER(proto_tab[proto], NULL); | 816 | RCU_INIT_POINTER(proto_tab[proto], NULL); |
817 | mutex_unlock(&proto_tab_lock); | 817 | mutex_unlock(&proto_tab_lock); |
818 | 818 | ||
@@ -875,9 +875,14 @@ static int can_pernet_init(struct net *net) | |||
875 | spin_lock_init(&net->can.can_rcvlists_lock); | 875 | spin_lock_init(&net->can.can_rcvlists_lock); |
876 | net->can.can_rx_alldev_list = | 876 | net->can.can_rx_alldev_list = |
877 | kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); | 877 | kzalloc(sizeof(struct dev_rcv_lists), GFP_KERNEL); |
878 | 878 | if (!net->can.can_rx_alldev_list) | |
879 | goto out; | ||
879 | net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL); | 880 | net->can.can_stats = kzalloc(sizeof(struct s_stats), GFP_KERNEL); |
881 | if (!net->can.can_stats) | ||
882 | goto out_free_alldev_list; | ||
880 | net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL); | 883 | net->can.can_pstats = kzalloc(sizeof(struct s_pstats), GFP_KERNEL); |
884 | if (!net->can.can_pstats) | ||
885 | goto out_free_can_stats; | ||
881 | 886 | ||
882 | if (IS_ENABLED(CONFIG_PROC_FS)) { | 887 | if (IS_ENABLED(CONFIG_PROC_FS)) { |
883 | /* the statistics are updated every second (timer triggered) */ | 888 | /* the statistics are updated every second (timer triggered) */ |
@@ -892,6 +897,13 @@ static int can_pernet_init(struct net *net) | |||
892 | } | 897 | } |
893 | 898 | ||
894 | return 0; | 899 | return 0; |
900 | |||
901 | out_free_can_stats: | ||
902 | kfree(net->can.can_stats); | ||
903 | out_free_alldev_list: | ||
904 | kfree(net->can.can_rx_alldev_list); | ||
905 | out: | ||
906 | return -ENOMEM; | ||
895 | } | 907 | } |
896 | 908 | ||
897 | static void can_pernet_exit(struct net *net) | 909 | static void can_pernet_exit(struct net *net) |
diff --git a/net/can/bcm.c b/net/can/bcm.c index 47a8748d953a..13690334efa3 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -1493,13 +1493,14 @@ static int bcm_init(struct sock *sk) | |||
1493 | static int bcm_release(struct socket *sock) | 1493 | static int bcm_release(struct socket *sock) |
1494 | { | 1494 | { |
1495 | struct sock *sk = sock->sk; | 1495 | struct sock *sk = sock->sk; |
1496 | struct net *net = sock_net(sk); | 1496 | struct net *net; |
1497 | struct bcm_sock *bo; | 1497 | struct bcm_sock *bo; |
1498 | struct bcm_op *op, *next; | 1498 | struct bcm_op *op, *next; |
1499 | 1499 | ||
1500 | if (sk == NULL) | 1500 | if (!sk) |
1501 | return 0; | 1501 | return 0; |
1502 | 1502 | ||
1503 | net = sock_net(sk); | ||
1503 | bo = bcm_sk(sk); | 1504 | bo = bcm_sk(sk); |
1504 | 1505 | ||
1505 | /* remove bcm_ops, timer, rx_unregister(), etc. */ | 1506 | /* remove bcm_ops, timer, rx_unregister(), etc. */ |
diff --git a/net/core/dev.c b/net/core/dev.c index 588b473194a8..11596a302a26 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1147,9 +1147,8 @@ static int dev_alloc_name_ns(struct net *net, | |||
1147 | return ret; | 1147 | return ret; |
1148 | } | 1148 | } |
1149 | 1149 | ||
1150 | static int dev_get_valid_name(struct net *net, | 1150 | int dev_get_valid_name(struct net *net, struct net_device *dev, |
1151 | struct net_device *dev, | 1151 | const char *name) |
1152 | const char *name) | ||
1153 | { | 1152 | { |
1154 | BUG_ON(!net); | 1153 | BUG_ON(!net); |
1155 | 1154 | ||
@@ -1165,6 +1164,7 @@ static int dev_get_valid_name(struct net *net, | |||
1165 | 1164 | ||
1166 | return 0; | 1165 | return 0; |
1167 | } | 1166 | } |
1167 | EXPORT_SYMBOL(dev_get_valid_name); | ||
1168 | 1168 | ||
1169 | /** | 1169 | /** |
1170 | * dev_change_name - change name of a device | 1170 | * dev_change_name - change name of a device |
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 709a4e6fb447..f9c7a88cd981 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c | |||
@@ -303,7 +303,18 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd) | |||
303 | case SIOCSIFTXQLEN: | 303 | case SIOCSIFTXQLEN: |
304 | if (ifr->ifr_qlen < 0) | 304 | if (ifr->ifr_qlen < 0) |
305 | return -EINVAL; | 305 | return -EINVAL; |
306 | dev->tx_queue_len = ifr->ifr_qlen; | 306 | if (dev->tx_queue_len ^ ifr->ifr_qlen) { |
307 | unsigned int orig_len = dev->tx_queue_len; | ||
308 | |||
309 | dev->tx_queue_len = ifr->ifr_qlen; | ||
310 | err = call_netdevice_notifiers( | ||
311 | NETDEV_CHANGE_TX_QUEUE_LEN, dev); | ||
312 | err = notifier_to_errno(err); | ||
313 | if (err) { | ||
314 | dev->tx_queue_len = orig_len; | ||
315 | return err; | ||
316 | } | ||
317 | } | ||
307 | return 0; | 318 | return 0; |
308 | 319 | ||
309 | case SIOCSIFNAME: | 320 | case SIOCSIFNAME: |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 3228411ada0f..9a9a3d77e327 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -436,7 +436,7 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, | |||
436 | EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32); | 436 | EXPORT_SYMBOL(ethtool_convert_link_mode_to_legacy_u32); |
437 | 437 | ||
438 | /* return false if legacy contained non-0 deprecated fields | 438 | /* return false if legacy contained non-0 deprecated fields |
439 | * transceiver/maxtxpkt/maxrxpkt. rest of ksettings always updated | 439 | * maxtxpkt/maxrxpkt. rest of ksettings always updated |
440 | */ | 440 | */ |
441 | static bool | 441 | static bool |
442 | convert_legacy_settings_to_link_ksettings( | 442 | convert_legacy_settings_to_link_ksettings( |
@@ -451,8 +451,7 @@ convert_legacy_settings_to_link_ksettings( | |||
451 | * deprecated legacy fields, and they should not use | 451 | * deprecated legacy fields, and they should not use |
452 | * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS | 452 | * %ETHTOOL_GLINKSETTINGS/%ETHTOOL_SLINKSETTINGS |
453 | */ | 453 | */ |
454 | if (legacy_settings->transceiver || | 454 | if (legacy_settings->maxtxpkt || |
455 | legacy_settings->maxtxpkt || | ||
456 | legacy_settings->maxrxpkt) | 455 | legacy_settings->maxrxpkt) |
457 | retval = false; | 456 | retval = false; |
458 | 457 | ||
diff --git a/net/core/filter.c b/net/core/filter.c index 74b8c91fb5f4..6ae94f825f72 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -1839,31 +1839,32 @@ static const struct bpf_func_proto bpf_redirect_proto = { | |||
1839 | .arg2_type = ARG_ANYTHING, | 1839 | .arg2_type = ARG_ANYTHING, |
1840 | }; | 1840 | }; |
1841 | 1841 | ||
1842 | BPF_CALL_3(bpf_sk_redirect_map, struct bpf_map *, map, u32, key, u64, flags) | 1842 | BPF_CALL_4(bpf_sk_redirect_map, struct sk_buff *, skb, |
1843 | struct bpf_map *, map, u32, key, u64, flags) | ||
1843 | { | 1844 | { |
1844 | struct redirect_info *ri = this_cpu_ptr(&redirect_info); | 1845 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
1845 | 1846 | ||
1847 | /* If user passes invalid input drop the packet. */ | ||
1846 | if (unlikely(flags)) | 1848 | if (unlikely(flags)) |
1847 | return SK_ABORTED; | 1849 | return SK_DROP; |
1848 | 1850 | ||
1849 | ri->ifindex = key; | 1851 | tcb->bpf.key = key; |
1850 | ri->flags = flags; | 1852 | tcb->bpf.flags = flags; |
1851 | ri->map = map; | 1853 | tcb->bpf.map = map; |
1852 | 1854 | ||
1853 | return SK_REDIRECT; | 1855 | return SK_PASS; |
1854 | } | 1856 | } |
1855 | 1857 | ||
1856 | struct sock *do_sk_redirect_map(void) | 1858 | struct sock *do_sk_redirect_map(struct sk_buff *skb) |
1857 | { | 1859 | { |
1858 | struct redirect_info *ri = this_cpu_ptr(&redirect_info); | 1860 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
1859 | struct sock *sk = NULL; | 1861 | struct sock *sk = NULL; |
1860 | 1862 | ||
1861 | if (ri->map) { | 1863 | if (tcb->bpf.map) { |
1862 | sk = __sock_map_lookup_elem(ri->map, ri->ifindex); | 1864 | sk = __sock_map_lookup_elem(tcb->bpf.map, tcb->bpf.key); |
1863 | 1865 | ||
1864 | ri->ifindex = 0; | 1866 | tcb->bpf.key = 0; |
1865 | ri->map = NULL; | 1867 | tcb->bpf.map = NULL; |
1866 | /* we do not clear flags for future lookup */ | ||
1867 | } | 1868 | } |
1868 | 1869 | ||
1869 | return sk; | 1870 | return sk; |
@@ -1873,9 +1874,10 @@ static const struct bpf_func_proto bpf_sk_redirect_map_proto = { | |||
1873 | .func = bpf_sk_redirect_map, | 1874 | .func = bpf_sk_redirect_map, |
1874 | .gpl_only = false, | 1875 | .gpl_only = false, |
1875 | .ret_type = RET_INTEGER, | 1876 | .ret_type = RET_INTEGER, |
1876 | .arg1_type = ARG_CONST_MAP_PTR, | 1877 | .arg1_type = ARG_PTR_TO_CTX, |
1877 | .arg2_type = ARG_ANYTHING, | 1878 | .arg2_type = ARG_CONST_MAP_PTR, |
1878 | .arg3_type = ARG_ANYTHING, | 1879 | .arg3_type = ARG_ANYTHING, |
1880 | .arg4_type = ARG_ANYTHING, | ||
1879 | }; | 1881 | }; |
1880 | 1882 | ||
1881 | BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) | 1883 | BPF_CALL_1(bpf_get_cgroup_classid, const struct sk_buff *, skb) |
@@ -3683,7 +3685,6 @@ static bool sk_skb_is_valid_access(int off, int size, | |||
3683 | { | 3685 | { |
3684 | if (type == BPF_WRITE) { | 3686 | if (type == BPF_WRITE) { |
3685 | switch (off) { | 3687 | switch (off) { |
3686 | case bpf_ctx_range(struct __sk_buff, mark): | ||
3687 | case bpf_ctx_range(struct __sk_buff, tc_index): | 3688 | case bpf_ctx_range(struct __sk_buff, tc_index): |
3688 | case bpf_ctx_range(struct __sk_buff, priority): | 3689 | case bpf_ctx_range(struct __sk_buff, priority): |
3689 | break; | 3690 | break; |
@@ -3693,6 +3694,7 @@ static bool sk_skb_is_valid_access(int off, int size, | |||
3693 | } | 3694 | } |
3694 | 3695 | ||
3695 | switch (off) { | 3696 | switch (off) { |
3697 | case bpf_ctx_range(struct __sk_buff, mark): | ||
3696 | case bpf_ctx_range(struct __sk_buff, tc_classid): | 3698 | case bpf_ctx_range(struct __sk_buff, tc_classid): |
3697 | return false; | 3699 | return false; |
3698 | case bpf_ctx_range(struct __sk_buff, data): | 3700 | case bpf_ctx_range(struct __sk_buff, data): |
@@ -4242,6 +4244,31 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type, | |||
4242 | return insn - insn_buf; | 4244 | return insn - insn_buf; |
4243 | } | 4245 | } |
4244 | 4246 | ||
4247 | static u32 sk_skb_convert_ctx_access(enum bpf_access_type type, | ||
4248 | const struct bpf_insn *si, | ||
4249 | struct bpf_insn *insn_buf, | ||
4250 | struct bpf_prog *prog, u32 *target_size) | ||
4251 | { | ||
4252 | struct bpf_insn *insn = insn_buf; | ||
4253 | int off; | ||
4254 | |||
4255 | switch (si->off) { | ||
4256 | case offsetof(struct __sk_buff, data_end): | ||
4257 | off = si->off; | ||
4258 | off -= offsetof(struct __sk_buff, data_end); | ||
4259 | off += offsetof(struct sk_buff, cb); | ||
4260 | off += offsetof(struct tcp_skb_cb, bpf.data_end); | ||
4261 | *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg, | ||
4262 | si->src_reg, off); | ||
4263 | break; | ||
4264 | default: | ||
4265 | return bpf_convert_ctx_access(type, si, insn_buf, prog, | ||
4266 | target_size); | ||
4267 | } | ||
4268 | |||
4269 | return insn - insn_buf; | ||
4270 | } | ||
4271 | |||
4245 | const struct bpf_verifier_ops sk_filter_prog_ops = { | 4272 | const struct bpf_verifier_ops sk_filter_prog_ops = { |
4246 | .get_func_proto = sk_filter_func_proto, | 4273 | .get_func_proto = sk_filter_func_proto, |
4247 | .is_valid_access = sk_filter_is_valid_access, | 4274 | .is_valid_access = sk_filter_is_valid_access, |
@@ -4300,7 +4327,7 @@ const struct bpf_verifier_ops sock_ops_prog_ops = { | |||
4300 | const struct bpf_verifier_ops sk_skb_prog_ops = { | 4327 | const struct bpf_verifier_ops sk_skb_prog_ops = { |
4301 | .get_func_proto = sk_skb_func_proto, | 4328 | .get_func_proto = sk_skb_func_proto, |
4302 | .is_valid_access = sk_skb_is_valid_access, | 4329 | .is_valid_access = sk_skb_is_valid_access, |
4303 | .convert_ctx_access = bpf_convert_ctx_access, | 4330 | .convert_ctx_access = sk_skb_convert_ctx_access, |
4304 | .gen_prologue = sk_skb_prologue, | 4331 | .gen_prologue = sk_skb_prologue, |
4305 | }; | 4332 | }; |
4306 | 4333 | ||
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index d4bcdcc68e92..5ace48926b19 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c | |||
@@ -1483,7 +1483,10 @@ static const struct nla_policy ifla_policy[IFLA_MAX+1] = { | |||
1483 | [IFLA_LINKINFO] = { .type = NLA_NESTED }, | 1483 | [IFLA_LINKINFO] = { .type = NLA_NESTED }, |
1484 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, | 1484 | [IFLA_NET_NS_PID] = { .type = NLA_U32 }, |
1485 | [IFLA_NET_NS_FD] = { .type = NLA_U32 }, | 1485 | [IFLA_NET_NS_FD] = { .type = NLA_U32 }, |
1486 | [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 }, | 1486 | /* IFLA_IFALIAS is a string, but policy is set to NLA_BINARY to |
1487 | * allow 0-length string (needed to remove an alias). | ||
1488 | */ | ||
1489 | [IFLA_IFALIAS] = { .type = NLA_BINARY, .len = IFALIASZ - 1 }, | ||
1487 | [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, | 1490 | [IFLA_VFINFO_LIST] = {. type = NLA_NESTED }, |
1488 | [IFLA_VF_PORTS] = { .type = NLA_NESTED }, | 1491 | [IFLA_VF_PORTS] = { .type = NLA_NESTED }, |
1489 | [IFLA_PORT_SELF] = { .type = NLA_NESTED }, | 1492 | [IFLA_PORT_SELF] = { .type = NLA_NESTED }, |
@@ -2093,7 +2096,7 @@ static int do_setlink(const struct sk_buff *skb, | |||
2093 | dev->tx_queue_len = orig_len; | 2096 | dev->tx_queue_len = orig_len; |
2094 | goto errout; | 2097 | goto errout; |
2095 | } | 2098 | } |
2096 | status |= DO_SETLINK_NOTIFY; | 2099 | status |= DO_SETLINK_MODIFIED; |
2097 | } | 2100 | } |
2098 | } | 2101 | } |
2099 | 2102 | ||
@@ -2248,7 +2251,7 @@ static int do_setlink(const struct sk_buff *skb, | |||
2248 | 2251 | ||
2249 | errout: | 2252 | errout: |
2250 | if (status & DO_SETLINK_MODIFIED) { | 2253 | if (status & DO_SETLINK_MODIFIED) { |
2251 | if (status & DO_SETLINK_NOTIFY) | 2254 | if ((status & DO_SETLINK_NOTIFY) == DO_SETLINK_NOTIFY) |
2252 | netdev_state_change(dev); | 2255 | netdev_state_change(dev); |
2253 | 2256 | ||
2254 | if (err < 0) | 2257 | if (err < 0) |
@@ -4279,13 +4282,17 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi | |||
4279 | 4282 | ||
4280 | switch (event) { | 4283 | switch (event) { |
4281 | case NETDEV_REBOOT: | 4284 | case NETDEV_REBOOT: |
4285 | case NETDEV_CHANGEMTU: | ||
4282 | case NETDEV_CHANGEADDR: | 4286 | case NETDEV_CHANGEADDR: |
4283 | case NETDEV_CHANGENAME: | 4287 | case NETDEV_CHANGENAME: |
4284 | case NETDEV_FEAT_CHANGE: | 4288 | case NETDEV_FEAT_CHANGE: |
4285 | case NETDEV_BONDING_FAILOVER: | 4289 | case NETDEV_BONDING_FAILOVER: |
4290 | case NETDEV_POST_TYPE_CHANGE: | ||
4286 | case NETDEV_NOTIFY_PEERS: | 4291 | case NETDEV_NOTIFY_PEERS: |
4292 | case NETDEV_CHANGEUPPER: | ||
4287 | case NETDEV_RESEND_IGMP: | 4293 | case NETDEV_RESEND_IGMP: |
4288 | case NETDEV_CHANGEINFODATA: | 4294 | case NETDEV_CHANGEINFODATA: |
4295 | case NETDEV_CHANGE_TX_QUEUE_LEN: | ||
4289 | rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), | 4296 | rtmsg_ifinfo_event(RTM_NEWLINK, dev, 0, rtnl_get_event(event), |
4290 | GFP_KERNEL); | 4297 | GFP_KERNEL); |
4291 | break; | 4298 | break; |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 16982de649b9..24656076906d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -1124,9 +1124,13 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, | |||
1124 | 1124 | ||
1125 | err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); | 1125 | err = __zerocopy_sg_from_iter(sk, skb, &msg->msg_iter, len); |
1126 | if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { | 1126 | if (err == -EFAULT || (err == -EMSGSIZE && skb->len == orig_len)) { |
1127 | struct sock *save_sk = skb->sk; | ||
1128 | |||
1127 | /* Streams do not free skb on error. Reset to prev state. */ | 1129 | /* Streams do not free skb on error. Reset to prev state. */ |
1128 | msg->msg_iter = orig_iter; | 1130 | msg->msg_iter = orig_iter; |
1131 | skb->sk = sk; | ||
1129 | ___pskb_trim(skb, orig_len); | 1132 | ___pskb_trim(skb, orig_len); |
1133 | skb->sk = save_sk; | ||
1130 | return err; | 1134 | return err; |
1131 | } | 1135 | } |
1132 | 1136 | ||
@@ -1896,7 +1900,7 @@ void *__pskb_pull_tail(struct sk_buff *skb, int delta) | |||
1896 | } | 1900 | } |
1897 | 1901 | ||
1898 | /* If we need update frag list, we are in troubles. | 1902 | /* If we need update frag list, we are in troubles. |
1899 | * Certainly, it possible to add an offset to skb data, | 1903 | * Certainly, it is possible to add an offset to skb data, |
1900 | * but taking into account that pulling is expected to | 1904 | * but taking into account that pulling is expected to |
1901 | * be very rare operation, it is worth to fight against | 1905 | * be very rare operation, it is worth to fight against |
1902 | * further bloating skb head and crucify ourselves here instead. | 1906 | * further bloating skb head and crucify ourselves here instead. |
diff --git a/net/core/sock.c b/net/core/sock.c index 23953b741a41..415f441c63b9 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -1677,12 +1677,17 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
1677 | newsk->sk_dst_pending_confirm = 0; | 1677 | newsk->sk_dst_pending_confirm = 0; |
1678 | newsk->sk_wmem_queued = 0; | 1678 | newsk->sk_wmem_queued = 0; |
1679 | newsk->sk_forward_alloc = 0; | 1679 | newsk->sk_forward_alloc = 0; |
1680 | |||
1681 | /* sk->sk_memcg will be populated at accept() time */ | ||
1682 | newsk->sk_memcg = NULL; | ||
1683 | |||
1680 | atomic_set(&newsk->sk_drops, 0); | 1684 | atomic_set(&newsk->sk_drops, 0); |
1681 | newsk->sk_send_head = NULL; | 1685 | newsk->sk_send_head = NULL; |
1682 | newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; | 1686 | newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; |
1683 | atomic_set(&newsk->sk_zckey, 0); | 1687 | atomic_set(&newsk->sk_zckey, 0); |
1684 | 1688 | ||
1685 | sock_reset_flag(newsk, SOCK_DONE); | 1689 | sock_reset_flag(newsk, SOCK_DONE); |
1690 | cgroup_sk_alloc(&newsk->sk_cgrp_data); | ||
1686 | 1691 | ||
1687 | rcu_read_lock(); | 1692 | rcu_read_lock(); |
1688 | filter = rcu_dereference(sk->sk_filter); | 1693 | filter = rcu_dereference(sk->sk_filter); |
@@ -1714,9 +1719,6 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) | |||
1714 | newsk->sk_incoming_cpu = raw_smp_processor_id(); | 1719 | newsk->sk_incoming_cpu = raw_smp_processor_id(); |
1715 | atomic64_set(&newsk->sk_cookie, 0); | 1720 | atomic64_set(&newsk->sk_cookie, 0); |
1716 | 1721 | ||
1717 | mem_cgroup_sk_alloc(newsk); | ||
1718 | cgroup_sk_alloc(&newsk->sk_cgrp_data); | ||
1719 | |||
1720 | /* | 1722 | /* |
1721 | * Before updating sk_refcnt, we must commit prior changes to memory | 1723 | * Before updating sk_refcnt, we must commit prior changes to memory |
1722 | * (Documentation/RCU/rculist_nulls.txt for details) | 1724 | * (Documentation/RCU/rculist_nulls.txt for details) |
diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index eed1ebf7f29d..b1e0dbea1e8c 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c | |||
@@ -36,9 +36,14 @@ int reuseport_alloc(struct sock *sk) | |||
36 | * soft irq of receive path or setsockopt from process context | 36 | * soft irq of receive path or setsockopt from process context |
37 | */ | 37 | */ |
38 | spin_lock_bh(&reuseport_lock); | 38 | spin_lock_bh(&reuseport_lock); |
39 | WARN_ONCE(rcu_dereference_protected(sk->sk_reuseport_cb, | 39 | |
40 | lockdep_is_held(&reuseport_lock)), | 40 | /* Allocation attempts can occur concurrently via the setsockopt path |
41 | "multiple allocations for the same socket"); | 41 | * and the bind/hash path. Nothing to do when we lose the race. |
42 | */ | ||
43 | if (rcu_dereference_protected(sk->sk_reuseport_cb, | ||
44 | lockdep_is_held(&reuseport_lock))) | ||
45 | goto out; | ||
46 | |||
42 | reuse = __reuseport_alloc(INIT_SOCKS); | 47 | reuse = __reuseport_alloc(INIT_SOCKS); |
43 | if (!reuse) { | 48 | if (!reuse) { |
44 | spin_unlock_bh(&reuseport_lock); | 49 | spin_unlock_bh(&reuseport_lock); |
@@ -49,6 +54,7 @@ int reuseport_alloc(struct sock *sk) | |||
49 | reuse->num_socks = 1; | 54 | reuse->num_socks = 1; |
50 | rcu_assign_pointer(sk->sk_reuseport_cb, reuse); | 55 | rcu_assign_pointer(sk->sk_reuseport_cb, reuse); |
51 | 56 | ||
57 | out: | ||
52 | spin_unlock_bh(&reuseport_lock); | 58 | spin_unlock_bh(&reuseport_lock); |
53 | 59 | ||
54 | return 0; | 60 | return 0; |
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index 001c08696334..e65fcb45c3f6 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c | |||
@@ -414,8 +414,7 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, | |||
414 | sk_daddr_set(newsk, ireq->ir_rmt_addr); | 414 | sk_daddr_set(newsk, ireq->ir_rmt_addr); |
415 | sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); | 415 | sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); |
416 | newinet->inet_saddr = ireq->ir_loc_addr; | 416 | newinet->inet_saddr = ireq->ir_loc_addr; |
417 | newinet->inet_opt = ireq->opt; | 417 | RCU_INIT_POINTER(newinet->inet_opt, rcu_dereference(ireq->ireq_opt)); |
418 | ireq->opt = NULL; | ||
419 | newinet->mc_index = inet_iif(skb); | 418 | newinet->mc_index = inet_iif(skb); |
420 | newinet->mc_ttl = ip_hdr(skb)->ttl; | 419 | newinet->mc_ttl = ip_hdr(skb)->ttl; |
421 | newinet->inet_id = jiffies; | 420 | newinet->inet_id = jiffies; |
@@ -430,7 +429,10 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk, | |||
430 | if (__inet_inherit_port(sk, newsk) < 0) | 429 | if (__inet_inherit_port(sk, newsk) < 0) |
431 | goto put_and_exit; | 430 | goto put_and_exit; |
432 | *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); | 431 | *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); |
433 | 432 | if (*own_req) | |
433 | ireq->ireq_opt = NULL; | ||
434 | else | ||
435 | newinet->inet_opt = NULL; | ||
434 | return newsk; | 436 | return newsk; |
435 | 437 | ||
436 | exit_overflow: | 438 | exit_overflow: |
@@ -441,6 +443,7 @@ exit: | |||
441 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); | 443 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS); |
442 | return NULL; | 444 | return NULL; |
443 | put_and_exit: | 445 | put_and_exit: |
446 | newinet->inet_opt = NULL; | ||
444 | inet_csk_prepare_forced_close(newsk); | 447 | inet_csk_prepare_forced_close(newsk); |
445 | dccp_done(newsk); | 448 | dccp_done(newsk); |
446 | goto exit; | 449 | goto exit; |
@@ -492,7 +495,7 @@ static int dccp_v4_send_response(const struct sock *sk, struct request_sock *req | |||
492 | ireq->ir_rmt_addr); | 495 | ireq->ir_rmt_addr); |
493 | err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, | 496 | err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, |
494 | ireq->ir_rmt_addr, | 497 | ireq->ir_rmt_addr, |
495 | ireq->opt); | 498 | ireq_opt_deref(ireq)); |
496 | err = net_xmit_eval(err); | 499 | err = net_xmit_eval(err); |
497 | } | 500 | } |
498 | 501 | ||
@@ -548,7 +551,7 @@ out: | |||
548 | static void dccp_v4_reqsk_destructor(struct request_sock *req) | 551 | static void dccp_v4_reqsk_destructor(struct request_sock *req) |
549 | { | 552 | { |
550 | dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); | 553 | dccp_feat_list_purge(&dccp_rsk(req)->dreq_featneg); |
551 | kfree(inet_rsk(req)->opt); | 554 | kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1)); |
552 | } | 555 | } |
553 | 556 | ||
554 | void dccp_syn_ack_timeout(const struct request_sock *req) | 557 | void dccp_syn_ack_timeout(const struct request_sock *req) |
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 8737412c7b27..e1d4d898a007 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c | |||
@@ -224,7 +224,7 @@ static int dns_resolver_match_preparse(struct key_match_data *match_data) | |||
224 | static void dns_resolver_describe(const struct key *key, struct seq_file *m) | 224 | static void dns_resolver_describe(const struct key *key, struct seq_file *m) |
225 | { | 225 | { |
226 | seq_puts(m, key->description); | 226 | seq_puts(m, key->description); |
227 | if (key_is_instantiated(key)) { | 227 | if (key_is_positive(key)) { |
228 | int err = PTR_ERR(key->payload.data[dns_key_error]); | 228 | int err = PTR_ERR(key->payload.data[dns_key_error]); |
229 | 229 | ||
230 | if (err) | 230 | if (err) |
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c index 873af0108e24..045d8a176279 100644 --- a/net/dsa/dsa2.c +++ b/net/dsa/dsa2.c | |||
@@ -496,14 +496,15 @@ static int dsa_cpu_parse(struct dsa_port *port, u32 index, | |||
496 | if (!ethernet) | 496 | if (!ethernet) |
497 | return -EINVAL; | 497 | return -EINVAL; |
498 | ethernet_dev = of_find_net_device_by_node(ethernet); | 498 | ethernet_dev = of_find_net_device_by_node(ethernet); |
499 | if (!ethernet_dev) | ||
500 | return -EPROBE_DEFER; | ||
499 | } else { | 501 | } else { |
500 | ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]); | 502 | ethernet_dev = dsa_dev_to_net_device(ds->cd->netdev[index]); |
503 | if (!ethernet_dev) | ||
504 | return -EPROBE_DEFER; | ||
501 | dev_put(ethernet_dev); | 505 | dev_put(ethernet_dev); |
502 | } | 506 | } |
503 | 507 | ||
504 | if (!ethernet_dev) | ||
505 | return -EPROBE_DEFER; | ||
506 | |||
507 | if (!dst->cpu_dp) { | 508 | if (!dst->cpu_dp) { |
508 | dst->cpu_dp = port; | 509 | dst->cpu_dp = port; |
509 | dst->cpu_dp->netdev = ethernet_dev; | 510 | dst->cpu_dp->netdev = ethernet_dev; |
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig index 91a2557942fa..f48fe6fc7e8c 100644 --- a/net/ipv4/Kconfig +++ b/net/ipv4/Kconfig | |||
@@ -70,11 +70,9 @@ config IP_MULTIPLE_TABLES | |||
70 | address into account. Furthermore, the TOS (Type-Of-Service) field | 70 | address into account. Furthermore, the TOS (Type-Of-Service) field |
71 | of the packet can be used for routing decisions as well. | 71 | of the packet can be used for routing decisions as well. |
72 | 72 | ||
73 | If you are interested in this, please see the preliminary | 73 | If you need more information, see the Linux Advanced |
74 | documentation at <http://www.compendium.com.ar/policy-routing.txt> | 74 | Routing and Traffic Control documentation at |
75 | and <ftp://post.tepkom.ru/pub/vol2/Linux/docs/advanced-routing.tex>. | 75 | <http://lartc.org/howto/lartc.rpdb.html> |
76 | You will need supporting software from | ||
77 | <ftp://ftp.tux.org/pub/net/ip-routing/>. | ||
78 | 76 | ||
79 | If unsure, say N. | 77 | If unsure, say N. |
80 | 78 | ||
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index 2ae8f54cb321..82178cc69c96 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c | |||
@@ -1951,7 +1951,7 @@ int cipso_v4_req_setattr(struct request_sock *req, | |||
1951 | buf = NULL; | 1951 | buf = NULL; |
1952 | 1952 | ||
1953 | req_inet = inet_rsk(req); | 1953 | req_inet = inet_rsk(req); |
1954 | opt = xchg(&req_inet->opt, opt); | 1954 | opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt); |
1955 | if (opt) | 1955 | if (opt) |
1956 | kfree_rcu(opt, rcu); | 1956 | kfree_rcu(opt, rcu); |
1957 | 1957 | ||
@@ -1973,11 +1973,13 @@ req_setattr_failure: | |||
1973 | * values on failure. | 1973 | * values on failure. |
1974 | * | 1974 | * |
1975 | */ | 1975 | */ |
1976 | static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) | 1976 | static int cipso_v4_delopt(struct ip_options_rcu __rcu **opt_ptr) |
1977 | { | 1977 | { |
1978 | struct ip_options_rcu *opt = rcu_dereference_protected(*opt_ptr, 1); | ||
1978 | int hdr_delta = 0; | 1979 | int hdr_delta = 0; |
1979 | struct ip_options_rcu *opt = *opt_ptr; | ||
1980 | 1980 | ||
1981 | if (!opt || opt->opt.cipso == 0) | ||
1982 | return 0; | ||
1981 | if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { | 1983 | if (opt->opt.srr || opt->opt.rr || opt->opt.ts || opt->opt.router_alert) { |
1982 | u8 cipso_len; | 1984 | u8 cipso_len; |
1983 | u8 cipso_off; | 1985 | u8 cipso_off; |
@@ -2039,14 +2041,10 @@ static int cipso_v4_delopt(struct ip_options_rcu **opt_ptr) | |||
2039 | */ | 2041 | */ |
2040 | void cipso_v4_sock_delattr(struct sock *sk) | 2042 | void cipso_v4_sock_delattr(struct sock *sk) |
2041 | { | 2043 | { |
2042 | int hdr_delta; | ||
2043 | struct ip_options_rcu *opt; | ||
2044 | struct inet_sock *sk_inet; | 2044 | struct inet_sock *sk_inet; |
2045 | int hdr_delta; | ||
2045 | 2046 | ||
2046 | sk_inet = inet_sk(sk); | 2047 | sk_inet = inet_sk(sk); |
2047 | opt = rcu_dereference_protected(sk_inet->inet_opt, 1); | ||
2048 | if (!opt || opt->opt.cipso == 0) | ||
2049 | return; | ||
2050 | 2048 | ||
2051 | hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); | 2049 | hdr_delta = cipso_v4_delopt(&sk_inet->inet_opt); |
2052 | if (sk_inet->is_icsk && hdr_delta > 0) { | 2050 | if (sk_inet->is_icsk && hdr_delta > 0) { |
@@ -2066,15 +2064,7 @@ void cipso_v4_sock_delattr(struct sock *sk) | |||
2066 | */ | 2064 | */ |
2067 | void cipso_v4_req_delattr(struct request_sock *req) | 2065 | void cipso_v4_req_delattr(struct request_sock *req) |
2068 | { | 2066 | { |
2069 | struct ip_options_rcu *opt; | 2067 | cipso_v4_delopt(&inet_rsk(req)->ireq_opt); |
2070 | struct inet_request_sock *req_inet; | ||
2071 | |||
2072 | req_inet = inet_rsk(req); | ||
2073 | opt = req_inet->opt; | ||
2074 | if (!opt || opt->opt.cipso == 0) | ||
2075 | return; | ||
2076 | |||
2077 | cipso_v4_delopt(&req_inet->opt); | ||
2078 | } | 2068 | } |
2079 | 2069 | ||
2080 | /** | 2070 | /** |
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c index 416bb304a281..1859c473b21a 100644 --- a/net/ipv4/gre_offload.c +++ b/net/ipv4/gre_offload.c | |||
@@ -86,7 +86,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb, | |||
86 | greh = (struct gre_base_hdr *)skb_transport_header(skb); | 86 | greh = (struct gre_base_hdr *)skb_transport_header(skb); |
87 | pcsum = (__sum16 *)(greh + 1); | 87 | pcsum = (__sum16 *)(greh + 1); |
88 | 88 | ||
89 | if (gso_partial) { | 89 | if (gso_partial && skb_is_gso(skb)) { |
90 | unsigned int partial_adj; | 90 | unsigned int partial_adj; |
91 | 91 | ||
92 | /* Adjust checksum to account for the fact that | 92 | /* Adjust checksum to account for the fact that |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index c039c937ba90..b47a59cb3573 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -475,6 +475,7 @@ struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern) | |||
475 | } | 475 | } |
476 | spin_unlock_bh(&queue->fastopenq.lock); | 476 | spin_unlock_bh(&queue->fastopenq.lock); |
477 | } | 477 | } |
478 | mem_cgroup_sk_alloc(newsk); | ||
478 | out: | 479 | out: |
479 | release_sock(sk); | 480 | release_sock(sk); |
480 | if (req) | 481 | if (req) |
@@ -539,9 +540,11 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk, | |||
539 | { | 540 | { |
540 | const struct inet_request_sock *ireq = inet_rsk(req); | 541 | const struct inet_request_sock *ireq = inet_rsk(req); |
541 | struct net *net = read_pnet(&ireq->ireq_net); | 542 | struct net *net = read_pnet(&ireq->ireq_net); |
542 | struct ip_options_rcu *opt = ireq->opt; | 543 | struct ip_options_rcu *opt; |
543 | struct rtable *rt; | 544 | struct rtable *rt; |
544 | 545 | ||
546 | opt = ireq_opt_deref(ireq); | ||
547 | |||
545 | flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, | 548 | flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, |
546 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, | 549 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, |
547 | sk->sk_protocol, inet_sk_flowi_flags(sk), | 550 | sk->sk_protocol, inet_sk_flowi_flags(sk), |
@@ -575,10 +578,9 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, | |||
575 | struct flowi4 *fl4; | 578 | struct flowi4 *fl4; |
576 | struct rtable *rt; | 579 | struct rtable *rt; |
577 | 580 | ||
581 | opt = rcu_dereference(ireq->ireq_opt); | ||
578 | fl4 = &newinet->cork.fl.u.ip4; | 582 | fl4 = &newinet->cork.fl.u.ip4; |
579 | 583 | ||
580 | rcu_read_lock(); | ||
581 | opt = rcu_dereference(newinet->inet_opt); | ||
582 | flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, | 584 | flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark, |
583 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, | 585 | RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, |
584 | sk->sk_protocol, inet_sk_flowi_flags(sk), | 586 | sk->sk_protocol, inet_sk_flowi_flags(sk), |
@@ -591,13 +593,11 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk, | |||
591 | goto no_route; | 593 | goto no_route; |
592 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) | 594 | if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway) |
593 | goto route_err; | 595 | goto route_err; |
594 | rcu_read_unlock(); | ||
595 | return &rt->dst; | 596 | return &rt->dst; |
596 | 597 | ||
597 | route_err: | 598 | route_err: |
598 | ip_rt_put(rt); | 599 | ip_rt_put(rt); |
599 | no_route: | 600 | no_route: |
600 | rcu_read_unlock(); | ||
601 | __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); | 601 | __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); |
602 | return NULL; | 602 | return NULL; |
603 | } | 603 | } |
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 597bb4cfe805..e7d15fb0d94d 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c | |||
@@ -456,10 +456,7 @@ static int inet_reuseport_add_sock(struct sock *sk, | |||
456 | return reuseport_add_sock(sk, sk2); | 456 | return reuseport_add_sock(sk, sk2); |
457 | } | 457 | } |
458 | 458 | ||
459 | /* Initial allocation may have already happened via setsockopt */ | 459 | return reuseport_alloc(sk); |
460 | if (!rcu_access_pointer(sk->sk_reuseport_cb)) | ||
461 | return reuseport_alloc(sk); | ||
462 | return 0; | ||
463 | } | 460 | } |
464 | 461 | ||
465 | int __inet_hash(struct sock *sk, struct sock *osk) | 462 | int __inet_hash(struct sock *sk, struct sock *osk) |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index fb1ad22b5e29..cdd627355ed1 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -128,43 +128,68 @@ static struct rtnl_link_ops ipip_link_ops __read_mostly; | |||
128 | 128 | ||
129 | static int ipip_err(struct sk_buff *skb, u32 info) | 129 | static int ipip_err(struct sk_buff *skb, u32 info) |
130 | { | 130 | { |
131 | 131 | /* All the routers (except for Linux) return only | |
132 | /* All the routers (except for Linux) return only | 132 | * 8 bytes of packet payload. It means, that precise relaying of |
133 | 8 bytes of packet payload. It means, that precise relaying of | 133 | * ICMP in the real Internet is absolutely infeasible. |
134 | ICMP in the real Internet is absolutely infeasible. | 134 | */ |
135 | */ | ||
136 | struct net *net = dev_net(skb->dev); | 135 | struct net *net = dev_net(skb->dev); |
137 | struct ip_tunnel_net *itn = net_generic(net, ipip_net_id); | 136 | struct ip_tunnel_net *itn = net_generic(net, ipip_net_id); |
138 | const struct iphdr *iph = (const struct iphdr *)skb->data; | 137 | const struct iphdr *iph = (const struct iphdr *)skb->data; |
139 | struct ip_tunnel *t; | ||
140 | int err; | ||
141 | const int type = icmp_hdr(skb)->type; | 138 | const int type = icmp_hdr(skb)->type; |
142 | const int code = icmp_hdr(skb)->code; | 139 | const int code = icmp_hdr(skb)->code; |
140 | struct ip_tunnel *t; | ||
141 | int err = 0; | ||
142 | |||
143 | switch (type) { | ||
144 | case ICMP_DEST_UNREACH: | ||
145 | switch (code) { | ||
146 | case ICMP_SR_FAILED: | ||
147 | /* Impossible event. */ | ||
148 | goto out; | ||
149 | default: | ||
150 | /* All others are translated to HOST_UNREACH. | ||
151 | * rfc2003 contains "deep thoughts" about NET_UNREACH, | ||
152 | * I believe they are just ether pollution. --ANK | ||
153 | */ | ||
154 | break; | ||
155 | } | ||
156 | break; | ||
157 | |||
158 | case ICMP_TIME_EXCEEDED: | ||
159 | if (code != ICMP_EXC_TTL) | ||
160 | goto out; | ||
161 | break; | ||
162 | |||
163 | case ICMP_REDIRECT: | ||
164 | break; | ||
165 | |||
166 | default: | ||
167 | goto out; | ||
168 | } | ||
143 | 169 | ||
144 | err = -ENOENT; | ||
145 | t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, | 170 | t = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, |
146 | iph->daddr, iph->saddr, 0); | 171 | iph->daddr, iph->saddr, 0); |
147 | if (!t) | 172 | if (!t) { |
173 | err = -ENOENT; | ||
148 | goto out; | 174 | goto out; |
175 | } | ||
149 | 176 | ||
150 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { | 177 | if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) { |
151 | ipv4_update_pmtu(skb, dev_net(skb->dev), info, | 178 | ipv4_update_pmtu(skb, net, info, t->parms.link, 0, |
152 | t->parms.link, 0, iph->protocol, 0); | 179 | iph->protocol, 0); |
153 | err = 0; | ||
154 | goto out; | 180 | goto out; |
155 | } | 181 | } |
156 | 182 | ||
157 | if (type == ICMP_REDIRECT) { | 183 | if (type == ICMP_REDIRECT) { |
158 | ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0, | 184 | ipv4_redirect(skb, net, t->parms.link, 0, iph->protocol, 0); |
159 | iph->protocol, 0); | ||
160 | err = 0; | ||
161 | goto out; | 185 | goto out; |
162 | } | 186 | } |
163 | 187 | ||
164 | if (t->parms.iph.daddr == 0) | 188 | if (t->parms.iph.daddr == 0) { |
189 | err = -ENOENT; | ||
165 | goto out; | 190 | goto out; |
191 | } | ||
166 | 192 | ||
167 | err = 0; | ||
168 | if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) | 193 | if (t->parms.iph.ttl == 0 && type == ICMP_TIME_EXCEEDED) |
169 | goto out; | 194 | goto out; |
170 | 195 | ||
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c index 811689e523c3..f75fc6b53115 100644 --- a/net/ipv4/netfilter/ipt_SYNPROXY.c +++ b/net/ipv4/netfilter/ipt_SYNPROXY.c | |||
@@ -330,7 +330,8 @@ static unsigned int ipv4_synproxy_hook(void *priv, | |||
330 | if (synproxy == NULL) | 330 | if (synproxy == NULL) |
331 | return NF_ACCEPT; | 331 | return NF_ACCEPT; |
332 | 332 | ||
333 | if (nf_is_loopback_packet(skb)) | 333 | if (nf_is_loopback_packet(skb) || |
334 | ip_hdr(skb)->protocol != IPPROTO_TCP) | ||
334 | return NF_ACCEPT; | 335 | return NF_ACCEPT; |
335 | 336 | ||
336 | thoff = ip_hdrlen(skb); | 337 | thoff = ip_hdrlen(skb); |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ac6fde5d45f1..3d9f1c2f81c5 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -2513,7 +2513,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or | |||
2513 | struct rtable *ort = (struct rtable *) dst_orig; | 2513 | struct rtable *ort = (struct rtable *) dst_orig; |
2514 | struct rtable *rt; | 2514 | struct rtable *rt; |
2515 | 2515 | ||
2516 | rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0); | 2516 | rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0); |
2517 | if (rt) { | 2517 | if (rt) { |
2518 | struct dst_entry *new = &rt->dst; | 2518 | struct dst_entry *new = &rt->dst; |
2519 | 2519 | ||
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index b1bb1b3a1082..77cf32a80952 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c | |||
@@ -355,7 +355,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) | |||
355 | /* We throwed the options of the initial SYN away, so we hope | 355 | /* We throwed the options of the initial SYN away, so we hope |
356 | * the ACK carries the same options again (see RFC1122 4.2.3.8) | 356 | * the ACK carries the same options again (see RFC1122 4.2.3.8) |
357 | */ | 357 | */ |
358 | ireq->opt = tcp_v4_save_options(sock_net(sk), skb); | 358 | RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(sock_net(sk), skb)); |
359 | 359 | ||
360 | if (security_inet_conn_request(sk, skb, req)) { | 360 | if (security_inet_conn_request(sk, skb, req)) { |
361 | reqsk_free(req); | 361 | reqsk_free(req); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index c5d7656beeee..7eec3383702b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -6196,7 +6196,7 @@ struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, | |||
6196 | struct inet_request_sock *ireq = inet_rsk(req); | 6196 | struct inet_request_sock *ireq = inet_rsk(req); |
6197 | 6197 | ||
6198 | kmemcheck_annotate_bitfield(ireq, flags); | 6198 | kmemcheck_annotate_bitfield(ireq, flags); |
6199 | ireq->opt = NULL; | 6199 | ireq->ireq_opt = NULL; |
6200 | #if IS_ENABLED(CONFIG_IPV6) | 6200 | #if IS_ENABLED(CONFIG_IPV6) |
6201 | ireq->pktopts = NULL; | 6201 | ireq->pktopts = NULL; |
6202 | #endif | 6202 | #endif |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 85164d4d3e53..5b027c69cbc5 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -877,7 +877,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
877 | 877 | ||
878 | err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, | 878 | err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, |
879 | ireq->ir_rmt_addr, | 879 | ireq->ir_rmt_addr, |
880 | ireq->opt); | 880 | ireq_opt_deref(ireq)); |
881 | err = net_xmit_eval(err); | 881 | err = net_xmit_eval(err); |
882 | } | 882 | } |
883 | 883 | ||
@@ -889,7 +889,7 @@ static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, | |||
889 | */ | 889 | */ |
890 | static void tcp_v4_reqsk_destructor(struct request_sock *req) | 890 | static void tcp_v4_reqsk_destructor(struct request_sock *req) |
891 | { | 891 | { |
892 | kfree(inet_rsk(req)->opt); | 892 | kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1)); |
893 | } | 893 | } |
894 | 894 | ||
895 | #ifdef CONFIG_TCP_MD5SIG | 895 | #ifdef CONFIG_TCP_MD5SIG |
@@ -1265,10 +1265,11 @@ static void tcp_v4_init_req(struct request_sock *req, | |||
1265 | struct sk_buff *skb) | 1265 | struct sk_buff *skb) |
1266 | { | 1266 | { |
1267 | struct inet_request_sock *ireq = inet_rsk(req); | 1267 | struct inet_request_sock *ireq = inet_rsk(req); |
1268 | struct net *net = sock_net(sk_listener); | ||
1268 | 1269 | ||
1269 | sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); | 1270 | sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); |
1270 | sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); | 1271 | sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); |
1271 | ireq->opt = tcp_v4_save_options(sock_net(sk_listener), skb); | 1272 | RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb)); |
1272 | } | 1273 | } |
1273 | 1274 | ||
1274 | static struct dst_entry *tcp_v4_route_req(const struct sock *sk, | 1275 | static struct dst_entry *tcp_v4_route_req(const struct sock *sk, |
@@ -1355,10 +1356,9 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, | |||
1355 | sk_daddr_set(newsk, ireq->ir_rmt_addr); | 1356 | sk_daddr_set(newsk, ireq->ir_rmt_addr); |
1356 | sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); | 1357 | sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); |
1357 | newsk->sk_bound_dev_if = ireq->ir_iif; | 1358 | newsk->sk_bound_dev_if = ireq->ir_iif; |
1358 | newinet->inet_saddr = ireq->ir_loc_addr; | 1359 | newinet->inet_saddr = ireq->ir_loc_addr; |
1359 | inet_opt = ireq->opt; | 1360 | inet_opt = rcu_dereference(ireq->ireq_opt); |
1360 | rcu_assign_pointer(newinet->inet_opt, inet_opt); | 1361 | RCU_INIT_POINTER(newinet->inet_opt, inet_opt); |
1361 | ireq->opt = NULL; | ||
1362 | newinet->mc_index = inet_iif(skb); | 1362 | newinet->mc_index = inet_iif(skb); |
1363 | newinet->mc_ttl = ip_hdr(skb)->ttl; | 1363 | newinet->mc_ttl = ip_hdr(skb)->ttl; |
1364 | newinet->rcv_tos = ip_hdr(skb)->tos; | 1364 | newinet->rcv_tos = ip_hdr(skb)->tos; |
@@ -1403,9 +1403,12 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, | |||
1403 | if (__inet_inherit_port(sk, newsk) < 0) | 1403 | if (__inet_inherit_port(sk, newsk) < 0) |
1404 | goto put_and_exit; | 1404 | goto put_and_exit; |
1405 | *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); | 1405 | *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); |
1406 | if (*own_req) | 1406 | if (likely(*own_req)) { |
1407 | tcp_move_syn(newtp, req); | 1407 | tcp_move_syn(newtp, req); |
1408 | 1408 | ireq->ireq_opt = NULL; | |
1409 | } else { | ||
1410 | newinet->inet_opt = NULL; | ||
1411 | } | ||
1409 | return newsk; | 1412 | return newsk; |
1410 | 1413 | ||
1411 | exit_overflow: | 1414 | exit_overflow: |
@@ -1416,6 +1419,7 @@ exit: | |||
1416 | tcp_listendrop(sk); | 1419 | tcp_listendrop(sk); |
1417 | return NULL; | 1420 | return NULL; |
1418 | put_and_exit: | 1421 | put_and_exit: |
1422 | newinet->inet_opt = NULL; | ||
1419 | inet_csk_prepare_forced_close(newsk); | 1423 | inet_csk_prepare_forced_close(newsk); |
1420 | tcp_done(newsk); | 1424 | tcp_done(newsk); |
1421 | goto exit; | 1425 | goto exit; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 0bc9e46a5369..ae60dd3faed0 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -739,8 +739,10 @@ static void tcp_tsq_handler(struct sock *sk) | |||
739 | struct tcp_sock *tp = tcp_sk(sk); | 739 | struct tcp_sock *tp = tcp_sk(sk); |
740 | 740 | ||
741 | if (tp->lost_out > tp->retrans_out && | 741 | if (tp->lost_out > tp->retrans_out && |
742 | tp->snd_cwnd > tcp_packets_in_flight(tp)) | 742 | tp->snd_cwnd > tcp_packets_in_flight(tp)) { |
743 | tcp_mstamp_refresh(tp); | ||
743 | tcp_xmit_retransmit_queue(sk); | 744 | tcp_xmit_retransmit_queue(sk); |
745 | } | ||
744 | 746 | ||
745 | tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, | 747 | tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, |
746 | 0, GFP_ATOMIC); | 748 | 0, GFP_ATOMIC); |
@@ -2237,6 +2239,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
2237 | 2239 | ||
2238 | sent_pkts = 0; | 2240 | sent_pkts = 0; |
2239 | 2241 | ||
2242 | tcp_mstamp_refresh(tp); | ||
2240 | if (!push_one) { | 2243 | if (!push_one) { |
2241 | /* Do MTU probing. */ | 2244 | /* Do MTU probing. */ |
2242 | result = tcp_mtu_probe(sk); | 2245 | result = tcp_mtu_probe(sk); |
@@ -2248,7 +2251,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
2248 | } | 2251 | } |
2249 | 2252 | ||
2250 | max_segs = tcp_tso_segs(sk, mss_now); | 2253 | max_segs = tcp_tso_segs(sk, mss_now); |
2251 | tcp_mstamp_refresh(tp); | ||
2252 | while ((skb = tcp_send_head(sk))) { | 2254 | while ((skb = tcp_send_head(sk))) { |
2253 | unsigned int limit; | 2255 | unsigned int limit; |
2254 | 2256 | ||
@@ -2841,8 +2843,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) | |||
2841 | nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); | 2843 | nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); |
2842 | err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : | 2844 | err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : |
2843 | -ENOBUFS; | 2845 | -ENOBUFS; |
2844 | if (!err) | 2846 | if (!err) { |
2845 | skb->skb_mstamp = tp->tcp_mstamp; | 2847 | skb->skb_mstamp = tp->tcp_mstamp; |
2848 | tcp_rate_skb_sent(sk, skb); | ||
2849 | } | ||
2846 | } else { | 2850 | } else { |
2847 | err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); | 2851 | err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); |
2848 | } | 2852 | } |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 5676237d2b0f..ebfbccae62fd 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c | |||
@@ -231,10 +231,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot) | |||
231 | } | 231 | } |
232 | } | 232 | } |
233 | 233 | ||
234 | /* Initial allocation may have already happened via setsockopt */ | 234 | return reuseport_alloc(sk); |
235 | if (!rcu_access_pointer(sk->sk_reuseport_cb)) | ||
236 | return reuseport_alloc(sk); | ||
237 | return 0; | ||
238 | } | 235 | } |
239 | 236 | ||
240 | /** | 237 | /** |
@@ -1061,7 +1058,7 @@ back_from_confirm: | |||
1061 | /* ... which is an evident application bug. --ANK */ | 1058 | /* ... which is an evident application bug. --ANK */ |
1062 | release_sock(sk); | 1059 | release_sock(sk); |
1063 | 1060 | ||
1064 | net_dbg_ratelimited("cork app bug 2\n"); | 1061 | net_dbg_ratelimited("socket already corked\n"); |
1065 | err = -EINVAL; | 1062 | err = -EINVAL; |
1066 | goto out; | 1063 | goto out; |
1067 | } | 1064 | } |
@@ -1144,7 +1141,7 @@ int udp_sendpage(struct sock *sk, struct page *page, int offset, | |||
1144 | if (unlikely(!up->pending)) { | 1141 | if (unlikely(!up->pending)) { |
1145 | release_sock(sk); | 1142 | release_sock(sk); |
1146 | 1143 | ||
1147 | net_dbg_ratelimited("udp cork app bug 3\n"); | 1144 | net_dbg_ratelimited("cork failed\n"); |
1148 | return -EINVAL; | 1145 | return -EINVAL; |
1149 | } | 1146 | } |
1150 | 1147 | ||
@@ -2240,20 +2237,16 @@ int udp_v4_early_demux(struct sk_buff *skb) | |||
2240 | iph = ip_hdr(skb); | 2237 | iph = ip_hdr(skb); |
2241 | uh = udp_hdr(skb); | 2238 | uh = udp_hdr(skb); |
2242 | 2239 | ||
2243 | if (skb->pkt_type == PACKET_BROADCAST || | 2240 | if (skb->pkt_type == PACKET_MULTICAST) { |
2244 | skb->pkt_type == PACKET_MULTICAST) { | ||
2245 | in_dev = __in_dev_get_rcu(skb->dev); | 2241 | in_dev = __in_dev_get_rcu(skb->dev); |
2246 | 2242 | ||
2247 | if (!in_dev) | 2243 | if (!in_dev) |
2248 | return 0; | 2244 | return 0; |
2249 | 2245 | ||
2250 | /* we are supposed to accept bcast packets */ | 2246 | ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, |
2251 | if (skb->pkt_type == PACKET_MULTICAST) { | 2247 | iph->protocol); |
2252 | ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr, | 2248 | if (!ours) |
2253 | iph->protocol); | 2249 | return 0; |
2254 | if (!ours) | ||
2255 | return 0; | ||
2256 | } | ||
2257 | 2250 | ||
2258 | sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, | 2251 | sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr, |
2259 | uh->source, iph->saddr, | 2252 | uh->source, iph->saddr, |
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 97658bfc1b58..e360d55be555 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c | |||
@@ -120,7 +120,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb, | |||
120 | * will be using a length value equal to only one MSS sized | 120 | * will be using a length value equal to only one MSS sized |
121 | * segment instead of the entire frame. | 121 | * segment instead of the entire frame. |
122 | */ | 122 | */ |
123 | if (gso_partial) { | 123 | if (gso_partial && skb_is_gso(skb)) { |
124 | uh->len = htons(skb_shinfo(skb)->gso_size + | 124 | uh->len = htons(skb_shinfo(skb)->gso_size + |
125 | SKB_GSO_CB(skb)->data_offset + | 125 | SKB_GSO_CB(skb)->data_offset + |
126 | skb->head - (unsigned char *)uh); | 126 | skb->head - (unsigned char *)uh); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 96861c702c06..4a96ebbf8eda 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -3820,8 +3820,8 @@ static void addrconf_dad_begin(struct inet6_ifaddr *ifp) | |||
3820 | goto out; | 3820 | goto out; |
3821 | 3821 | ||
3822 | if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || | 3822 | if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) || |
3823 | dev_net(dev)->ipv6.devconf_all->accept_dad < 1 || | 3823 | (dev_net(dev)->ipv6.devconf_all->accept_dad < 1 && |
3824 | idev->cnf.accept_dad < 1 || | 3824 | idev->cnf.accept_dad < 1) || |
3825 | !(ifp->flags&IFA_F_TENTATIVE) || | 3825 | !(ifp->flags&IFA_F_TENTATIVE) || |
3826 | ifp->flags & IFA_F_NODAD) { | 3826 | ifp->flags & IFA_F_NODAD) { |
3827 | bump_id = ifp->flags & IFA_F_TENTATIVE; | 3827 | bump_id = ifp->flags & IFA_F_TENTATIVE; |
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 8081bafe441b..15535ee327c5 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c | |||
@@ -315,6 +315,7 @@ struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, | |||
315 | } | 315 | } |
316 | opt_space->dst1opt = fopt->dst1opt; | 316 | opt_space->dst1opt = fopt->dst1opt; |
317 | opt_space->opt_flen = fopt->opt_flen; | 317 | opt_space->opt_flen = fopt->opt_flen; |
318 | opt_space->tot_len = fopt->tot_len; | ||
318 | return opt_space; | 319 | return opt_space; |
319 | } | 320 | } |
320 | EXPORT_SYMBOL_GPL(fl6_merge_options); | 321 | EXPORT_SYMBOL_GPL(fl6_merge_options); |
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 1602b491b281..59c121b932ac 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c | |||
@@ -408,13 +408,16 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
408 | case ICMPV6_DEST_UNREACH: | 408 | case ICMPV6_DEST_UNREACH: |
409 | net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", | 409 | net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n", |
410 | t->parms.name); | 410 | t->parms.name); |
411 | break; | 411 | if (code != ICMPV6_PORT_UNREACH) |
412 | break; | ||
413 | return; | ||
412 | case ICMPV6_TIME_EXCEED: | 414 | case ICMPV6_TIME_EXCEED: |
413 | if (code == ICMPV6_EXC_HOPLIMIT) { | 415 | if (code == ICMPV6_EXC_HOPLIMIT) { |
414 | net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", | 416 | net_dbg_ratelimited("%s: Too small hop limit or routing loop in tunnel!\n", |
415 | t->parms.name); | 417 | t->parms.name); |
418 | break; | ||
416 | } | 419 | } |
417 | break; | 420 | return; |
418 | case ICMPV6_PARAMPROB: | 421 | case ICMPV6_PARAMPROB: |
419 | teli = 0; | 422 | teli = 0; |
420 | if (code == ICMPV6_HDR_FIELD) | 423 | if (code == ICMPV6_HDR_FIELD) |
@@ -430,7 +433,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
430 | net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", | 433 | net_dbg_ratelimited("%s: Recipient unable to parse tunneled packet!\n", |
431 | t->parms.name); | 434 | t->parms.name); |
432 | } | 435 | } |
433 | break; | 436 | return; |
434 | case ICMPV6_PKT_TOOBIG: | 437 | case ICMPV6_PKT_TOOBIG: |
435 | mtu = be32_to_cpu(info) - offset - t->tun_hlen; | 438 | mtu = be32_to_cpu(info) - offset - t->tun_hlen; |
436 | if (t->dev->type == ARPHRD_ETHER) | 439 | if (t->dev->type == ARPHRD_ETHER) |
@@ -438,7 +441,7 @@ static void ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
438 | if (mtu < IPV6_MIN_MTU) | 441 | if (mtu < IPV6_MIN_MTU) |
439 | mtu = IPV6_MIN_MTU; | 442 | mtu = IPV6_MIN_MTU; |
440 | t->dev->mtu = mtu; | 443 | t->dev->mtu = mtu; |
441 | break; | 444 | return; |
442 | } | 445 | } |
443 | 446 | ||
444 | if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) | 447 | if (time_before(jiffies, t->err_time + IP6TUNNEL_ERR_TIMEO)) |
@@ -500,8 +503,8 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb, | |||
500 | __u32 *pmtu, __be16 proto) | 503 | __u32 *pmtu, __be16 proto) |
501 | { | 504 | { |
502 | struct ip6_tnl *tunnel = netdev_priv(dev); | 505 | struct ip6_tnl *tunnel = netdev_priv(dev); |
503 | __be16 protocol = (dev->type == ARPHRD_ETHER) ? | 506 | struct dst_entry *dst = skb_dst(skb); |
504 | htons(ETH_P_TEB) : proto; | 507 | __be16 protocol; |
505 | 508 | ||
506 | if (dev->type == ARPHRD_ETHER) | 509 | if (dev->type == ARPHRD_ETHER) |
507 | IPCB(skb)->flags = 0; | 510 | IPCB(skb)->flags = 0; |
@@ -515,9 +518,14 @@ static netdev_tx_t __gre6_xmit(struct sk_buff *skb, | |||
515 | tunnel->o_seqno++; | 518 | tunnel->o_seqno++; |
516 | 519 | ||
517 | /* Push GRE header. */ | 520 | /* Push GRE header. */ |
521 | protocol = (dev->type == ARPHRD_ETHER) ? htons(ETH_P_TEB) : proto; | ||
518 | gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, | 522 | gre_build_header(skb, tunnel->tun_hlen, tunnel->parms.o_flags, |
519 | protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno)); | 523 | protocol, tunnel->parms.o_key, htonl(tunnel->o_seqno)); |
520 | 524 | ||
525 | /* TooBig packet may have updated dst->dev's mtu */ | ||
526 | if (dst && dst_mtu(dst) > dst->dev->mtu) | ||
527 | dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu); | ||
528 | |||
521 | return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, | 529 | return ip6_tnl_xmit(skb, dev, dsfield, fl6, encap_limit, pmtu, |
522 | NEXTHDR_GRE); | 530 | NEXTHDR_GRE); |
523 | } | 531 | } |
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c index cdb3728faca7..4a87f9428ca5 100644 --- a/net/ipv6/ip6_offload.c +++ b/net/ipv6/ip6_offload.c | |||
@@ -105,7 +105,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, | |||
105 | 105 | ||
106 | for (skb = segs; skb; skb = skb->next) { | 106 | for (skb = segs; skb; skb = skb->next) { |
107 | ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); | 107 | ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff); |
108 | if (gso_partial) | 108 | if (gso_partial && skb_is_gso(skb)) |
109 | payload_len = skb_shinfo(skb)->gso_size + | 109 | payload_len = skb_shinfo(skb)->gso_size + |
110 | SKB_GSO_CB(skb)->data_offset + | 110 | SKB_GSO_CB(skb)->data_offset + |
111 | skb->head - (unsigned char *)(ipv6h + 1); | 111 | skb->head - (unsigned char *)(ipv6h + 1); |
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 43ca864327c7..5110a418cc4d 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c | |||
@@ -1161,11 +1161,11 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork, | |||
1161 | if (WARN_ON(v6_cork->opt)) | 1161 | if (WARN_ON(v6_cork->opt)) |
1162 | return -EINVAL; | 1162 | return -EINVAL; |
1163 | 1163 | ||
1164 | v6_cork->opt = kzalloc(opt->tot_len, sk->sk_allocation); | 1164 | v6_cork->opt = kzalloc(sizeof(*opt), sk->sk_allocation); |
1165 | if (unlikely(!v6_cork->opt)) | 1165 | if (unlikely(!v6_cork->opt)) |
1166 | return -ENOBUFS; | 1166 | return -ENOBUFS; |
1167 | 1167 | ||
1168 | v6_cork->opt->tot_len = opt->tot_len; | 1168 | v6_cork->opt->tot_len = sizeof(*opt); |
1169 | v6_cork->opt->opt_flen = opt->opt_flen; | 1169 | v6_cork->opt->opt_flen = opt->opt_flen; |
1170 | v6_cork->opt->opt_nflen = opt->opt_nflen; | 1170 | v6_cork->opt->opt_nflen = opt->opt_nflen; |
1171 | 1171 | ||
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c index a5cd43d75393..437af8c95277 100644 --- a/net/ipv6/netfilter/ip6t_SYNPROXY.c +++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c | |||
@@ -353,7 +353,7 @@ static unsigned int ipv6_synproxy_hook(void *priv, | |||
353 | nexthdr = ipv6_hdr(skb)->nexthdr; | 353 | nexthdr = ipv6_hdr(skb)->nexthdr; |
354 | thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, | 354 | thoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr, |
355 | &frag_off); | 355 | &frag_off); |
356 | if (thoff < 0) | 356 | if (thoff < 0 || nexthdr != IPPROTO_TCP) |
357 | return NF_ACCEPT; | 357 | return NF_ACCEPT; |
358 | 358 | ||
359 | th = skb_header_pointer(skb, thoff, sizeof(_th), &_th); | 359 | th = skb_header_pointer(skb, thoff, sizeof(_th), &_th); |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 26cc9f483b6d..a96d5b385d8f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -1325,7 +1325,7 @@ struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_ori | |||
1325 | struct dst_entry *new = NULL; | 1325 | struct dst_entry *new = NULL; |
1326 | 1326 | ||
1327 | rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1, | 1327 | rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1, |
1328 | DST_OBSOLETE_NONE, 0); | 1328 | DST_OBSOLETE_DEAD, 0); |
1329 | if (rt) { | 1329 | if (rt) { |
1330 | rt6_info_init(rt); | 1330 | rt6_info_init(rt); |
1331 | 1331 | ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index bc6e8bfc5be4..f50452b919d5 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c | |||
@@ -988,6 +988,9 @@ static int pppol2tp_session_ioctl(struct l2tp_session *session, | |||
988 | session->name, cmd, arg); | 988 | session->name, cmd, arg); |
989 | 989 | ||
990 | sk = ps->sock; | 990 | sk = ps->sock; |
991 | if (!sk) | ||
992 | return -EBADR; | ||
993 | |||
991 | sock_hold(sk); | 994 | sock_hold(sk); |
992 | 995 | ||
993 | switch (cmd) { | 996 | switch (cmd) { |
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index a354f1939e49..fb15d3b97cb2 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c | |||
@@ -2727,12 +2727,6 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, | |||
2727 | if (!ieee80211_sdata_running(sdata)) | 2727 | if (!ieee80211_sdata_running(sdata)) |
2728 | return -ENETDOWN; | 2728 | return -ENETDOWN; |
2729 | 2729 | ||
2730 | if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { | ||
2731 | ret = drv_set_bitrate_mask(local, sdata, mask); | ||
2732 | if (ret) | ||
2733 | return ret; | ||
2734 | } | ||
2735 | |||
2736 | /* | 2730 | /* |
2737 | * If active validate the setting and reject it if it doesn't leave | 2731 | * If active validate the setting and reject it if it doesn't leave |
2738 | * at least one basic rate usable, since we really have to be able | 2732 | * at least one basic rate usable, since we really have to be able |
@@ -2748,6 +2742,12 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, | |||
2748 | return -EINVAL; | 2742 | return -EINVAL; |
2749 | } | 2743 | } |
2750 | 2744 | ||
2745 | if (ieee80211_hw_check(&local->hw, HAS_RATE_CONTROL)) { | ||
2746 | ret = drv_set_bitrate_mask(local, sdata, mask); | ||
2747 | if (ret) | ||
2748 | return ret; | ||
2749 | } | ||
2750 | |||
2751 | for (i = 0; i < NUM_NL80211_BANDS; i++) { | 2751 | for (i = 0; i < NUM_NL80211_BANDS; i++) { |
2752 | struct ieee80211_supported_band *sband = wiphy->bands[i]; | 2752 | struct ieee80211_supported_band *sband = wiphy->bands[i]; |
2753 | int j; | 2753 | int j; |
diff --git a/net/mac80211/key.c b/net/mac80211/key.c index a98fc2b5e0dc..938049395f90 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> | 4 | * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz> |
5 | * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net> | 5 | * Copyright 2007-2008 Johannes Berg <johannes@sipsolutions.net> |
6 | * Copyright 2013-2014 Intel Mobile Communications GmbH | 6 | * Copyright 2013-2014 Intel Mobile Communications GmbH |
7 | * Copyright 2015 Intel Deutschland GmbH | 7 | * Copyright 2015-2017 Intel Deutschland GmbH |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/export.h> | 20 | #include <linux/export.h> |
21 | #include <net/mac80211.h> | 21 | #include <net/mac80211.h> |
22 | #include <crypto/algapi.h> | ||
22 | #include <asm/unaligned.h> | 23 | #include <asm/unaligned.h> |
23 | #include "ieee80211_i.h" | 24 | #include "ieee80211_i.h" |
24 | #include "driver-ops.h" | 25 | #include "driver-ops.h" |
@@ -609,6 +610,39 @@ void ieee80211_key_free_unused(struct ieee80211_key *key) | |||
609 | ieee80211_key_free_common(key); | 610 | ieee80211_key_free_common(key); |
610 | } | 611 | } |
611 | 612 | ||
613 | static bool ieee80211_key_identical(struct ieee80211_sub_if_data *sdata, | ||
614 | struct ieee80211_key *old, | ||
615 | struct ieee80211_key *new) | ||
616 | { | ||
617 | u8 tkip_old[WLAN_KEY_LEN_TKIP], tkip_new[WLAN_KEY_LEN_TKIP]; | ||
618 | u8 *tk_old, *tk_new; | ||
619 | |||
620 | if (!old || new->conf.keylen != old->conf.keylen) | ||
621 | return false; | ||
622 | |||
623 | tk_old = old->conf.key; | ||
624 | tk_new = new->conf.key; | ||
625 | |||
626 | /* | ||
627 | * In station mode, don't compare the TX MIC key, as it's never used | ||
628 | * and offloaded rekeying may not care to send it to the host. This | ||
629 | * is the case in iwlwifi, for example. | ||
630 | */ | ||
631 | if (sdata->vif.type == NL80211_IFTYPE_STATION && | ||
632 | new->conf.cipher == WLAN_CIPHER_SUITE_TKIP && | ||
633 | new->conf.keylen == WLAN_KEY_LEN_TKIP && | ||
634 | !(new->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) { | ||
635 | memcpy(tkip_old, tk_old, WLAN_KEY_LEN_TKIP); | ||
636 | memcpy(tkip_new, tk_new, WLAN_KEY_LEN_TKIP); | ||
637 | memset(tkip_old + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); | ||
638 | memset(tkip_new + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY, 0, 8); | ||
639 | tk_old = tkip_old; | ||
640 | tk_new = tkip_new; | ||
641 | } | ||
642 | |||
643 | return !crypto_memneq(tk_old, tk_new, new->conf.keylen); | ||
644 | } | ||
645 | |||
612 | int ieee80211_key_link(struct ieee80211_key *key, | 646 | int ieee80211_key_link(struct ieee80211_key *key, |
613 | struct ieee80211_sub_if_data *sdata, | 647 | struct ieee80211_sub_if_data *sdata, |
614 | struct sta_info *sta) | 648 | struct sta_info *sta) |
@@ -620,9 +654,6 @@ int ieee80211_key_link(struct ieee80211_key *key, | |||
620 | 654 | ||
621 | pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; | 655 | pairwise = key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE; |
622 | idx = key->conf.keyidx; | 656 | idx = key->conf.keyidx; |
623 | key->local = sdata->local; | ||
624 | key->sdata = sdata; | ||
625 | key->sta = sta; | ||
626 | 657 | ||
627 | mutex_lock(&sdata->local->key_mtx); | 658 | mutex_lock(&sdata->local->key_mtx); |
628 | 659 | ||
@@ -633,6 +664,20 @@ int ieee80211_key_link(struct ieee80211_key *key, | |||
633 | else | 664 | else |
634 | old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]); | 665 | old_key = key_mtx_dereference(sdata->local, sdata->keys[idx]); |
635 | 666 | ||
667 | /* | ||
668 | * Silently accept key re-installation without really installing the | ||
669 | * new version of the key to avoid nonce reuse or replay issues. | ||
670 | */ | ||
671 | if (ieee80211_key_identical(sdata, old_key, key)) { | ||
672 | ieee80211_key_free_unused(key); | ||
673 | ret = 0; | ||
674 | goto out; | ||
675 | } | ||
676 | |||
677 | key->local = sdata->local; | ||
678 | key->sdata = sdata; | ||
679 | key->sta = sta; | ||
680 | |||
636 | increment_tailroom_need_count(sdata); | 681 | increment_tailroom_need_count(sdata); |
637 | 682 | ||
638 | ieee80211_key_replace(sdata, sta, pairwise, old_key, key); | 683 | ieee80211_key_replace(sdata, sta, pairwise, old_key, key); |
@@ -648,6 +693,7 @@ int ieee80211_key_link(struct ieee80211_key *key, | |||
648 | ret = 0; | 693 | ret = 0; |
649 | } | 694 | } |
650 | 695 | ||
696 | out: | ||
651 | mutex_unlock(&sdata->local->key_mtx); | 697 | mutex_unlock(&sdata->local->key_mtx); |
652 | 698 | ||
653 | return ret; | 699 | return ret; |
diff --git a/net/ncsi/internal.h b/net/ncsi/internal.h index af3d636534ef..d30f7bd741d0 100644 --- a/net/ncsi/internal.h +++ b/net/ncsi/internal.h | |||
@@ -286,6 +286,7 @@ struct ncsi_dev_priv { | |||
286 | struct work_struct work; /* For channel management */ | 286 | struct work_struct work; /* For channel management */ |
287 | struct packet_type ptype; /* NCSI packet Rx handler */ | 287 | struct packet_type ptype; /* NCSI packet Rx handler */ |
288 | struct list_head node; /* Form NCSI device list */ | 288 | struct list_head node; /* Form NCSI device list */ |
289 | #define NCSI_MAX_VLAN_VIDS 15 | ||
289 | struct list_head vlan_vids; /* List of active VLAN IDs */ | 290 | struct list_head vlan_vids; /* List of active VLAN IDs */ |
290 | }; | 291 | }; |
291 | 292 | ||
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c index 6898e7229285..f135938bf781 100644 --- a/net/ncsi/ncsi-aen.c +++ b/net/ncsi/ncsi-aen.c | |||
@@ -187,7 +187,7 @@ static struct ncsi_aen_handler { | |||
187 | } ncsi_aen_handlers[] = { | 187 | } ncsi_aen_handlers[] = { |
188 | { NCSI_PKT_AEN_LSC, 12, ncsi_aen_handler_lsc }, | 188 | { NCSI_PKT_AEN_LSC, 12, ncsi_aen_handler_lsc }, |
189 | { NCSI_PKT_AEN_CR, 4, ncsi_aen_handler_cr }, | 189 | { NCSI_PKT_AEN_CR, 4, ncsi_aen_handler_cr }, |
190 | { NCSI_PKT_AEN_HNCDSC, 4, ncsi_aen_handler_hncdsc } | 190 | { NCSI_PKT_AEN_HNCDSC, 8, ncsi_aen_handler_hncdsc } |
191 | }; | 191 | }; |
192 | 192 | ||
193 | int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb) | 193 | int ncsi_aen_handler(struct ncsi_dev_priv *ndp, struct sk_buff *skb) |
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c index 3fd3c39e6278..28c42b22b748 100644 --- a/net/ncsi/ncsi-manage.c +++ b/net/ncsi/ncsi-manage.c | |||
@@ -189,6 +189,7 @@ static void ncsi_channel_monitor(unsigned long data) | |||
189 | struct ncsi_channel *nc = (struct ncsi_channel *)data; | 189 | struct ncsi_channel *nc = (struct ncsi_channel *)data; |
190 | struct ncsi_package *np = nc->package; | 190 | struct ncsi_package *np = nc->package; |
191 | struct ncsi_dev_priv *ndp = np->ndp; | 191 | struct ncsi_dev_priv *ndp = np->ndp; |
192 | struct ncsi_channel_mode *ncm; | ||
192 | struct ncsi_cmd_arg nca; | 193 | struct ncsi_cmd_arg nca; |
193 | bool enabled, chained; | 194 | bool enabled, chained; |
194 | unsigned int monitor_state; | 195 | unsigned int monitor_state; |
@@ -202,11 +203,15 @@ static void ncsi_channel_monitor(unsigned long data) | |||
202 | monitor_state = nc->monitor.state; | 203 | monitor_state = nc->monitor.state; |
203 | spin_unlock_irqrestore(&nc->lock, flags); | 204 | spin_unlock_irqrestore(&nc->lock, flags); |
204 | 205 | ||
205 | if (!enabled || chained) | 206 | if (!enabled || chained) { |
207 | ncsi_stop_channel_monitor(nc); | ||
206 | return; | 208 | return; |
209 | } | ||
207 | if (state != NCSI_CHANNEL_INACTIVE && | 210 | if (state != NCSI_CHANNEL_INACTIVE && |
208 | state != NCSI_CHANNEL_ACTIVE) | 211 | state != NCSI_CHANNEL_ACTIVE) { |
212 | ncsi_stop_channel_monitor(nc); | ||
209 | return; | 213 | return; |
214 | } | ||
210 | 215 | ||
211 | switch (monitor_state) { | 216 | switch (monitor_state) { |
212 | case NCSI_CHANNEL_MONITOR_START: | 217 | case NCSI_CHANNEL_MONITOR_START: |
@@ -217,28 +222,28 @@ static void ncsi_channel_monitor(unsigned long data) | |||
217 | nca.type = NCSI_PKT_CMD_GLS; | 222 | nca.type = NCSI_PKT_CMD_GLS; |
218 | nca.req_flags = 0; | 223 | nca.req_flags = 0; |
219 | ret = ncsi_xmit_cmd(&nca); | 224 | ret = ncsi_xmit_cmd(&nca); |
220 | if (ret) { | 225 | if (ret) |
221 | netdev_err(ndp->ndev.dev, "Error %d sending GLS\n", | 226 | netdev_err(ndp->ndev.dev, "Error %d sending GLS\n", |
222 | ret); | 227 | ret); |
223 | return; | ||
224 | } | ||
225 | |||
226 | break; | 228 | break; |
227 | case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX: | 229 | case NCSI_CHANNEL_MONITOR_WAIT ... NCSI_CHANNEL_MONITOR_WAIT_MAX: |
228 | break; | 230 | break; |
229 | default: | 231 | default: |
230 | if (!(ndp->flags & NCSI_DEV_HWA) && | 232 | if (!(ndp->flags & NCSI_DEV_HWA)) { |
231 | state == NCSI_CHANNEL_ACTIVE) { | ||
232 | ncsi_report_link(ndp, true); | 233 | ncsi_report_link(ndp, true); |
233 | ndp->flags |= NCSI_DEV_RESHUFFLE; | 234 | ndp->flags |= NCSI_DEV_RESHUFFLE; |
234 | } | 235 | } |
235 | 236 | ||
237 | ncsi_stop_channel_monitor(nc); | ||
238 | |||
239 | ncm = &nc->modes[NCSI_MODE_LINK]; | ||
236 | spin_lock_irqsave(&nc->lock, flags); | 240 | spin_lock_irqsave(&nc->lock, flags); |
237 | nc->state = NCSI_CHANNEL_INVISIBLE; | 241 | nc->state = NCSI_CHANNEL_INVISIBLE; |
242 | ncm->data[2] &= ~0x1; | ||
238 | spin_unlock_irqrestore(&nc->lock, flags); | 243 | spin_unlock_irqrestore(&nc->lock, flags); |
239 | 244 | ||
240 | spin_lock_irqsave(&ndp->lock, flags); | 245 | spin_lock_irqsave(&ndp->lock, flags); |
241 | nc->state = NCSI_CHANNEL_INACTIVE; | 246 | nc->state = NCSI_CHANNEL_ACTIVE; |
242 | list_add_tail_rcu(&nc->link, &ndp->channel_queue); | 247 | list_add_tail_rcu(&nc->link, &ndp->channel_queue); |
243 | spin_unlock_irqrestore(&ndp->lock, flags); | 248 | spin_unlock_irqrestore(&ndp->lock, flags); |
244 | ncsi_process_next_channel(ndp); | 249 | ncsi_process_next_channel(ndp); |
@@ -732,6 +737,10 @@ static int set_one_vid(struct ncsi_dev_priv *ndp, struct ncsi_channel *nc, | |||
732 | if (index < 0) { | 737 | if (index < 0) { |
733 | netdev_err(ndp->ndev.dev, | 738 | netdev_err(ndp->ndev.dev, |
734 | "Failed to add new VLAN tag, error %d\n", index); | 739 | "Failed to add new VLAN tag, error %d\n", index); |
740 | if (index == -ENOSPC) | ||
741 | netdev_err(ndp->ndev.dev, | ||
742 | "Channel %u already has all VLAN filters set\n", | ||
743 | nc->id); | ||
735 | return -1; | 744 | return -1; |
736 | } | 745 | } |
737 | 746 | ||
@@ -998,12 +1007,15 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp) | |||
998 | struct ncsi_package *np; | 1007 | struct ncsi_package *np; |
999 | struct ncsi_channel *nc; | 1008 | struct ncsi_channel *nc; |
1000 | unsigned int cap; | 1009 | unsigned int cap; |
1010 | bool has_channel = false; | ||
1001 | 1011 | ||
1002 | /* The hardware arbitration is disabled if any one channel | 1012 | /* The hardware arbitration is disabled if any one channel |
1003 | * doesn't support explicitly. | 1013 | * doesn't support explicitly. |
1004 | */ | 1014 | */ |
1005 | NCSI_FOR_EACH_PACKAGE(ndp, np) { | 1015 | NCSI_FOR_EACH_PACKAGE(ndp, np) { |
1006 | NCSI_FOR_EACH_CHANNEL(np, nc) { | 1016 | NCSI_FOR_EACH_CHANNEL(np, nc) { |
1017 | has_channel = true; | ||
1018 | |||
1007 | cap = nc->caps[NCSI_CAP_GENERIC].cap; | 1019 | cap = nc->caps[NCSI_CAP_GENERIC].cap; |
1008 | if (!(cap & NCSI_CAP_GENERIC_HWA) || | 1020 | if (!(cap & NCSI_CAP_GENERIC_HWA) || |
1009 | (cap & NCSI_CAP_GENERIC_HWA_MASK) != | 1021 | (cap & NCSI_CAP_GENERIC_HWA_MASK) != |
@@ -1014,8 +1026,13 @@ static bool ncsi_check_hwa(struct ncsi_dev_priv *ndp) | |||
1014 | } | 1026 | } |
1015 | } | 1027 | } |
1016 | 1028 | ||
1017 | ndp->flags |= NCSI_DEV_HWA; | 1029 | if (has_channel) { |
1018 | return true; | 1030 | ndp->flags |= NCSI_DEV_HWA; |
1031 | return true; | ||
1032 | } | ||
1033 | |||
1034 | ndp->flags &= ~NCSI_DEV_HWA; | ||
1035 | return false; | ||
1019 | } | 1036 | } |
1020 | 1037 | ||
1021 | static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp) | 1038 | static int ncsi_enable_hwa(struct ncsi_dev_priv *ndp) |
@@ -1403,7 +1420,6 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp) | |||
1403 | 1420 | ||
1404 | int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) | 1421 | int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) |
1405 | { | 1422 | { |
1406 | struct ncsi_channel_filter *ncf; | ||
1407 | struct ncsi_dev_priv *ndp; | 1423 | struct ncsi_dev_priv *ndp; |
1408 | unsigned int n_vids = 0; | 1424 | unsigned int n_vids = 0; |
1409 | struct vlan_vid *vlan; | 1425 | struct vlan_vid *vlan; |
@@ -1420,7 +1436,6 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) | |||
1420 | } | 1436 | } |
1421 | 1437 | ||
1422 | ndp = TO_NCSI_DEV_PRIV(nd); | 1438 | ndp = TO_NCSI_DEV_PRIV(nd); |
1423 | ncf = ndp->hot_channel->filters[NCSI_FILTER_VLAN]; | ||
1424 | 1439 | ||
1425 | /* Add the VLAN id to our internal list */ | 1440 | /* Add the VLAN id to our internal list */ |
1426 | list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { | 1441 | list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) { |
@@ -1431,12 +1446,11 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) | |||
1431 | return 0; | 1446 | return 0; |
1432 | } | 1447 | } |
1433 | } | 1448 | } |
1434 | 1449 | if (n_vids >= NCSI_MAX_VLAN_VIDS) { | |
1435 | if (n_vids >= ncf->total) { | 1450 | netdev_warn(dev, |
1436 | netdev_info(dev, | 1451 | "tried to add vlan id %u but NCSI max already registered (%u)\n", |
1437 | "NCSI Channel supports up to %u VLAN tags but %u are already set\n", | 1452 | vid, NCSI_MAX_VLAN_VIDS); |
1438 | ncf->total, n_vids); | 1453 | return -ENOSPC; |
1439 | return -EINVAL; | ||
1440 | } | 1454 | } |
1441 | 1455 | ||
1442 | vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); | 1456 | vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); |
diff --git a/net/ncsi/ncsi-rsp.c b/net/ncsi/ncsi-rsp.c index 265b9a892d41..927dad4759d1 100644 --- a/net/ncsi/ncsi-rsp.c +++ b/net/ncsi/ncsi-rsp.c | |||
@@ -959,7 +959,7 @@ static struct ncsi_rsp_handler { | |||
959 | { NCSI_PKT_RSP_EGMF, 4, ncsi_rsp_handler_egmf }, | 959 | { NCSI_PKT_RSP_EGMF, 4, ncsi_rsp_handler_egmf }, |
960 | { NCSI_PKT_RSP_DGMF, 4, ncsi_rsp_handler_dgmf }, | 960 | { NCSI_PKT_RSP_DGMF, 4, ncsi_rsp_handler_dgmf }, |
961 | { NCSI_PKT_RSP_SNFC, 4, ncsi_rsp_handler_snfc }, | 961 | { NCSI_PKT_RSP_SNFC, 4, ncsi_rsp_handler_snfc }, |
962 | { NCSI_PKT_RSP_GVI, 36, ncsi_rsp_handler_gvi }, | 962 | { NCSI_PKT_RSP_GVI, 40, ncsi_rsp_handler_gvi }, |
963 | { NCSI_PKT_RSP_GC, 32, ncsi_rsp_handler_gc }, | 963 | { NCSI_PKT_RSP_GC, 32, ncsi_rsp_handler_gc }, |
964 | { NCSI_PKT_RSP_GP, -1, ncsi_rsp_handler_gp }, | 964 | { NCSI_PKT_RSP_GP, -1, ncsi_rsp_handler_gp }, |
965 | { NCSI_PKT_RSP_GCPS, 172, ncsi_rsp_handler_gcps }, | 965 | { NCSI_PKT_RSP_GCPS, 172, ncsi_rsp_handler_gcps }, |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index e495b5e484b1..cf84f7b37cd9 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
@@ -1191,14 +1191,17 @@ static int ip_set_swap(struct net *net, struct sock *ctnl, struct sk_buff *skb, | |||
1191 | from->family == to->family)) | 1191 | from->family == to->family)) |
1192 | return -IPSET_ERR_TYPE_MISMATCH; | 1192 | return -IPSET_ERR_TYPE_MISMATCH; |
1193 | 1193 | ||
1194 | if (from->ref_netlink || to->ref_netlink) | 1194 | write_lock_bh(&ip_set_ref_lock); |
1195 | |||
1196 | if (from->ref_netlink || to->ref_netlink) { | ||
1197 | write_unlock_bh(&ip_set_ref_lock); | ||
1195 | return -EBUSY; | 1198 | return -EBUSY; |
1199 | } | ||
1196 | 1200 | ||
1197 | strncpy(from_name, from->name, IPSET_MAXNAMELEN); | 1201 | strncpy(from_name, from->name, IPSET_MAXNAMELEN); |
1198 | strncpy(from->name, to->name, IPSET_MAXNAMELEN); | 1202 | strncpy(from->name, to->name, IPSET_MAXNAMELEN); |
1199 | strncpy(to->name, from_name, IPSET_MAXNAMELEN); | 1203 | strncpy(to->name, from_name, IPSET_MAXNAMELEN); |
1200 | 1204 | ||
1201 | write_lock_bh(&ip_set_ref_lock); | ||
1202 | swap(from->ref, to->ref); | 1205 | swap(from->ref, to->ref); |
1203 | ip_set(inst, from_id) = to; | 1206 | ip_set(inst, from_id) = to; |
1204 | ip_set(inst, to_id) = from; | 1207 | ip_set(inst, to_id) = from; |
@@ -2072,25 +2075,28 @@ static struct pernet_operations ip_set_net_ops = { | |||
2072 | static int __init | 2075 | static int __init |
2073 | ip_set_init(void) | 2076 | ip_set_init(void) |
2074 | { | 2077 | { |
2075 | int ret = nfnetlink_subsys_register(&ip_set_netlink_subsys); | 2078 | int ret = register_pernet_subsys(&ip_set_net_ops); |
2079 | |||
2080 | if (ret) { | ||
2081 | pr_err("ip_set: cannot register pernet_subsys.\n"); | ||
2082 | return ret; | ||
2083 | } | ||
2076 | 2084 | ||
2085 | ret = nfnetlink_subsys_register(&ip_set_netlink_subsys); | ||
2077 | if (ret != 0) { | 2086 | if (ret != 0) { |
2078 | pr_err("ip_set: cannot register with nfnetlink.\n"); | 2087 | pr_err("ip_set: cannot register with nfnetlink.\n"); |
2088 | unregister_pernet_subsys(&ip_set_net_ops); | ||
2079 | return ret; | 2089 | return ret; |
2080 | } | 2090 | } |
2091 | |||
2081 | ret = nf_register_sockopt(&so_set); | 2092 | ret = nf_register_sockopt(&so_set); |
2082 | if (ret != 0) { | 2093 | if (ret != 0) { |
2083 | pr_err("SO_SET registry failed: %d\n", ret); | 2094 | pr_err("SO_SET registry failed: %d\n", ret); |
2084 | nfnetlink_subsys_unregister(&ip_set_netlink_subsys); | 2095 | nfnetlink_subsys_unregister(&ip_set_netlink_subsys); |
2096 | unregister_pernet_subsys(&ip_set_net_ops); | ||
2085 | return ret; | 2097 | return ret; |
2086 | } | 2098 | } |
2087 | ret = register_pernet_subsys(&ip_set_net_ops); | 2099 | |
2088 | if (ret) { | ||
2089 | pr_err("ip_set: cannot register pernet_subsys.\n"); | ||
2090 | nf_unregister_sockopt(&so_set); | ||
2091 | nfnetlink_subsys_unregister(&ip_set_netlink_subsys); | ||
2092 | return ret; | ||
2093 | } | ||
2094 | pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL); | 2100 | pr_info("ip_set: protocol %u\n", IPSET_PROTOCOL); |
2095 | return 0; | 2101 | return 0; |
2096 | } | 2102 | } |
@@ -2098,9 +2104,10 @@ ip_set_init(void) | |||
2098 | static void __exit | 2104 | static void __exit |
2099 | ip_set_fini(void) | 2105 | ip_set_fini(void) |
2100 | { | 2106 | { |
2101 | unregister_pernet_subsys(&ip_set_net_ops); | ||
2102 | nf_unregister_sockopt(&so_set); | 2107 | nf_unregister_sockopt(&so_set); |
2103 | nfnetlink_subsys_unregister(&ip_set_netlink_subsys); | 2108 | nfnetlink_subsys_unregister(&ip_set_netlink_subsys); |
2109 | |||
2110 | unregister_pernet_subsys(&ip_set_net_ops); | ||
2104 | pr_debug("these are the famous last words\n"); | 2111 | pr_debug("these are the famous last words\n"); |
2105 | } | 2112 | } |
2106 | 2113 | ||
diff --git a/net/netfilter/ipset/ip_set_hash_ip.c b/net/netfilter/ipset/ip_set_hash_ip.c index 20bfbd315f61..613eb212cb48 100644 --- a/net/netfilter/ipset/ip_set_hash_ip.c +++ b/net/netfilter/ipset/ip_set_hash_ip.c | |||
@@ -123,13 +123,12 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
123 | return ret; | 123 | return ret; |
124 | 124 | ||
125 | ip &= ip_set_hostmask(h->netmask); | 125 | ip &= ip_set_hostmask(h->netmask); |
126 | e.ip = htonl(ip); | ||
127 | if (e.ip == 0) | ||
128 | return -IPSET_ERR_HASH_ELEM; | ||
126 | 129 | ||
127 | if (adt == IPSET_TEST) { | 130 | if (adt == IPSET_TEST) |
128 | e.ip = htonl(ip); | ||
129 | if (e.ip == 0) | ||
130 | return -IPSET_ERR_HASH_ELEM; | ||
131 | return adtfn(set, &e, &ext, &ext, flags); | 131 | return adtfn(set, &e, &ext, &ext, flags); |
132 | } | ||
133 | 132 | ||
134 | ip_to = ip; | 133 | ip_to = ip; |
135 | if (tb[IPSET_ATTR_IP_TO]) { | 134 | if (tb[IPSET_ATTR_IP_TO]) { |
@@ -148,17 +147,20 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
148 | 147 | ||
149 | hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); | 148 | hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1); |
150 | 149 | ||
151 | if (retried) | 150 | if (retried) { |
152 | ip = ntohl(h->next.ip); | 151 | ip = ntohl(h->next.ip); |
153 | for (; !before(ip_to, ip); ip += hosts) { | ||
154 | e.ip = htonl(ip); | 152 | e.ip = htonl(ip); |
155 | if (e.ip == 0) | 153 | } |
156 | return -IPSET_ERR_HASH_ELEM; | 154 | for (; ip <= ip_to;) { |
157 | ret = adtfn(set, &e, &ext, &ext, flags); | 155 | ret = adtfn(set, &e, &ext, &ext, flags); |
158 | |||
159 | if (ret && !ip_set_eexist(ret, flags)) | 156 | if (ret && !ip_set_eexist(ret, flags)) |
160 | return ret; | 157 | return ret; |
161 | 158 | ||
159 | ip += hosts; | ||
160 | e.ip = htonl(ip); | ||
161 | if (e.ip == 0) | ||
162 | return 0; | ||
163 | |||
162 | ret = 0; | 164 | ret = 0; |
163 | } | 165 | } |
164 | return ret; | 166 | return ret; |
diff --git a/net/netfilter/ipset/ip_set_hash_ipmark.c b/net/netfilter/ipset/ip_set_hash_ipmark.c index b64cf14e8352..f3ba8348cf9d 100644 --- a/net/netfilter/ipset/ip_set_hash_ipmark.c +++ b/net/netfilter/ipset/ip_set_hash_ipmark.c | |||
@@ -149,7 +149,7 @@ hash_ipmark4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
149 | 149 | ||
150 | if (retried) | 150 | if (retried) |
151 | ip = ntohl(h->next.ip); | 151 | ip = ntohl(h->next.ip); |
152 | for (; !before(ip_to, ip); ip++) { | 152 | for (; ip <= ip_to; ip++) { |
153 | e.ip = htonl(ip); | 153 | e.ip = htonl(ip); |
154 | ret = adtfn(set, &e, &ext, &ext, flags); | 154 | ret = adtfn(set, &e, &ext, &ext, flags); |
155 | 155 | ||
diff --git a/net/netfilter/ipset/ip_set_hash_ipport.c b/net/netfilter/ipset/ip_set_hash_ipport.c index f438740e6c6a..ddb8039ec1d2 100644 --- a/net/netfilter/ipset/ip_set_hash_ipport.c +++ b/net/netfilter/ipset/ip_set_hash_ipport.c | |||
@@ -178,7 +178,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
178 | 178 | ||
179 | if (retried) | 179 | if (retried) |
180 | ip = ntohl(h->next.ip); | 180 | ip = ntohl(h->next.ip); |
181 | for (; !before(ip_to, ip); ip++) { | 181 | for (; ip <= ip_to; ip++) { |
182 | p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) | 182 | p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) |
183 | : port; | 183 | : port; |
184 | for (; p <= port_to; p++) { | 184 | for (; p <= port_to; p++) { |
diff --git a/net/netfilter/ipset/ip_set_hash_ipportip.c b/net/netfilter/ipset/ip_set_hash_ipportip.c index 6215fb898c50..a7f4d7a85420 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportip.c +++ b/net/netfilter/ipset/ip_set_hash_ipportip.c | |||
@@ -185,7 +185,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
185 | 185 | ||
186 | if (retried) | 186 | if (retried) |
187 | ip = ntohl(h->next.ip); | 187 | ip = ntohl(h->next.ip); |
188 | for (; !before(ip_to, ip); ip++) { | 188 | for (; ip <= ip_to; ip++) { |
189 | p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) | 189 | p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) |
190 | : port; | 190 | : port; |
191 | for (; p <= port_to; p++) { | 191 | for (; p <= port_to; p++) { |
diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index 5ab1b99a53c2..a2f19b9906e9 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c | |||
@@ -271,7 +271,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
271 | 271 | ||
272 | if (retried) | 272 | if (retried) |
273 | ip = ntohl(h->next.ip); | 273 | ip = ntohl(h->next.ip); |
274 | for (; !before(ip_to, ip); ip++) { | 274 | for (; ip <= ip_to; ip++) { |
275 | e.ip = htonl(ip); | 275 | e.ip = htonl(ip); |
276 | p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) | 276 | p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port) |
277 | : port; | 277 | : port; |
@@ -281,7 +281,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
281 | ip == ntohl(h->next.ip) && | 281 | ip == ntohl(h->next.ip) && |
282 | p == ntohs(h->next.port) | 282 | p == ntohs(h->next.port) |
283 | ? ntohl(h->next.ip2) : ip2_from; | 283 | ? ntohl(h->next.ip2) : ip2_from; |
284 | while (!after(ip2, ip2_to)) { | 284 | while (ip2 <= ip2_to) { |
285 | e.ip2 = htonl(ip2); | 285 | e.ip2 = htonl(ip2); |
286 | ip2_last = ip_set_range_to_cidr(ip2, ip2_to, | 286 | ip2_last = ip_set_range_to_cidr(ip2, ip2_to, |
287 | &cidr); | 287 | &cidr); |
diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 5d9e895452e7..1c67a1761e45 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c | |||
@@ -193,7 +193,7 @@ hash_net4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
193 | } | 193 | } |
194 | if (retried) | 194 | if (retried) |
195 | ip = ntohl(h->next.ip); | 195 | ip = ntohl(h->next.ip); |
196 | while (!after(ip, ip_to)) { | 196 | while (ip <= ip_to) { |
197 | e.ip = htonl(ip); | 197 | e.ip = htonl(ip); |
198 | last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); | 198 | last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); |
199 | ret = adtfn(set, &e, &ext, &ext, flags); | 199 | ret = adtfn(set, &e, &ext, &ext, flags); |
diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 44cf11939c91..d417074f1c1a 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c | |||
@@ -255,7 +255,7 @@ hash_netiface4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
255 | 255 | ||
256 | if (retried) | 256 | if (retried) |
257 | ip = ntohl(h->next.ip); | 257 | ip = ntohl(h->next.ip); |
258 | while (!after(ip, ip_to)) { | 258 | while (ip <= ip_to) { |
259 | e.ip = htonl(ip); | 259 | e.ip = htonl(ip); |
260 | last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); | 260 | last = ip_set_range_to_cidr(ip, ip_to, &e.cidr); |
261 | ret = adtfn(set, &e, &ext, &ext, flags); | 261 | ret = adtfn(set, &e, &ext, &ext, flags); |
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c index db614e13b193..7f9ae2e9645b 100644 --- a/net/netfilter/ipset/ip_set_hash_netnet.c +++ b/net/netfilter/ipset/ip_set_hash_netnet.c | |||
@@ -250,13 +250,13 @@ hash_netnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
250 | if (retried) | 250 | if (retried) |
251 | ip = ntohl(h->next.ip[0]); | 251 | ip = ntohl(h->next.ip[0]); |
252 | 252 | ||
253 | while (!after(ip, ip_to)) { | 253 | while (ip <= ip_to) { |
254 | e.ip[0] = htonl(ip); | 254 | e.ip[0] = htonl(ip); |
255 | last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); | 255 | last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); |
256 | ip2 = (retried && | 256 | ip2 = (retried && |
257 | ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1]) | 257 | ip == ntohl(h->next.ip[0])) ? ntohl(h->next.ip[1]) |
258 | : ip2_from; | 258 | : ip2_from; |
259 | while (!after(ip2, ip2_to)) { | 259 | while (ip2 <= ip2_to) { |
260 | e.ip[1] = htonl(ip2); | 260 | e.ip[1] = htonl(ip2); |
261 | last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]); | 261 | last2 = ip_set_range_to_cidr(ip2, ip2_to, &e.cidr[1]); |
262 | ret = adtfn(set, &e, &ext, &ext, flags); | 262 | ret = adtfn(set, &e, &ext, &ext, flags); |
diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index 54b64b6cd0cd..e6ef382febe4 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c | |||
@@ -241,7 +241,7 @@ hash_netport4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
241 | 241 | ||
242 | if (retried) | 242 | if (retried) |
243 | ip = ntohl(h->next.ip); | 243 | ip = ntohl(h->next.ip); |
244 | while (!after(ip, ip_to)) { | 244 | while (ip <= ip_to) { |
245 | e.ip = htonl(ip); | 245 | e.ip = htonl(ip); |
246 | last = ip_set_range_to_cidr(ip, ip_to, &cidr); | 246 | last = ip_set_range_to_cidr(ip, ip_to, &cidr); |
247 | e.cidr = cidr - 1; | 247 | e.cidr = cidr - 1; |
diff --git a/net/netfilter/ipset/ip_set_hash_netportnet.c b/net/netfilter/ipset/ip_set_hash_netportnet.c index aff846960ac4..8602f2595a1a 100644 --- a/net/netfilter/ipset/ip_set_hash_netportnet.c +++ b/net/netfilter/ipset/ip_set_hash_netportnet.c | |||
@@ -291,7 +291,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
291 | if (retried) | 291 | if (retried) |
292 | ip = ntohl(h->next.ip[0]); | 292 | ip = ntohl(h->next.ip[0]); |
293 | 293 | ||
294 | while (!after(ip, ip_to)) { | 294 | while (ip <= ip_to) { |
295 | e.ip[0] = htonl(ip); | 295 | e.ip[0] = htonl(ip); |
296 | ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); | 296 | ip_last = ip_set_range_to_cidr(ip, ip_to, &e.cidr[0]); |
297 | p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port) | 297 | p = retried && ip == ntohl(h->next.ip[0]) ? ntohs(h->next.port) |
@@ -301,7 +301,7 @@ hash_netportnet4_uadt(struct ip_set *set, struct nlattr *tb[], | |||
301 | ip2 = (retried && ip == ntohl(h->next.ip[0]) && | 301 | ip2 = (retried && ip == ntohl(h->next.ip[0]) && |
302 | p == ntohs(h->next.port)) ? ntohl(h->next.ip[1]) | 302 | p == ntohs(h->next.port)) ? ntohl(h->next.ip[1]) |
303 | : ip2_from; | 303 | : ip2_from; |
304 | while (!after(ip2, ip2_to)) { | 304 | while (ip2 <= ip2_to) { |
305 | e.ip[1] = htonl(ip2); | 305 | e.ip[1] = htonl(ip2); |
306 | ip2_last = ip_set_range_to_cidr(ip2, ip2_to, | 306 | ip2_last = ip_set_range_to_cidr(ip2, ip2_to, |
307 | &e.cidr[1]); | 307 | &e.cidr[1]); |
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 90d396814798..4527921b1c3a 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c | |||
@@ -921,6 +921,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, | |||
921 | { | 921 | { |
922 | struct sk_buff *new_skb = NULL; | 922 | struct sk_buff *new_skb = NULL; |
923 | struct iphdr *old_iph = NULL; | 923 | struct iphdr *old_iph = NULL; |
924 | __u8 old_dsfield; | ||
924 | #ifdef CONFIG_IP_VS_IPV6 | 925 | #ifdef CONFIG_IP_VS_IPV6 |
925 | struct ipv6hdr *old_ipv6h = NULL; | 926 | struct ipv6hdr *old_ipv6h = NULL; |
926 | #endif | 927 | #endif |
@@ -945,7 +946,7 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, | |||
945 | *payload_len = | 946 | *payload_len = |
946 | ntohs(old_ipv6h->payload_len) + | 947 | ntohs(old_ipv6h->payload_len) + |
947 | sizeof(*old_ipv6h); | 948 | sizeof(*old_ipv6h); |
948 | *dsfield = ipv6_get_dsfield(old_ipv6h); | 949 | old_dsfield = ipv6_get_dsfield(old_ipv6h); |
949 | *ttl = old_ipv6h->hop_limit; | 950 | *ttl = old_ipv6h->hop_limit; |
950 | if (df) | 951 | if (df) |
951 | *df = 0; | 952 | *df = 0; |
@@ -960,12 +961,15 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af, | |||
960 | 961 | ||
961 | /* fix old IP header checksum */ | 962 | /* fix old IP header checksum */ |
962 | ip_send_check(old_iph); | 963 | ip_send_check(old_iph); |
963 | *dsfield = ipv4_get_dsfield(old_iph); | 964 | old_dsfield = ipv4_get_dsfield(old_iph); |
964 | *ttl = old_iph->ttl; | 965 | *ttl = old_iph->ttl; |
965 | if (payload_len) | 966 | if (payload_len) |
966 | *payload_len = ntohs(old_iph->tot_len); | 967 | *payload_len = ntohs(old_iph->tot_len); |
967 | } | 968 | } |
968 | 969 | ||
970 | /* Implement full-functionality option for ECN encapsulation */ | ||
971 | *dsfield = INET_ECN_encapsulate(old_dsfield, old_dsfield); | ||
972 | |||
969 | return skb; | 973 | return skb; |
970 | error: | 974 | error: |
971 | kfree_skb(skb); | 975 | kfree_skb(skb); |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 929927171426..64e1ee091225 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
@@ -1048,7 +1048,7 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, | |||
1048 | if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name)) | 1048 | if (nla_put_string(skb, NFTA_CHAIN_TYPE, basechain->type->name)) |
1049 | goto nla_put_failure; | 1049 | goto nla_put_failure; |
1050 | 1050 | ||
1051 | if (nft_dump_stats(skb, nft_base_chain(chain)->stats)) | 1051 | if (basechain->stats && nft_dump_stats(skb, basechain->stats)) |
1052 | goto nla_put_failure; | 1052 | goto nla_put_failure; |
1053 | } | 1053 | } |
1054 | 1054 | ||
@@ -1487,8 +1487,8 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, | |||
1487 | 1487 | ||
1488 | chain2 = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], | 1488 | chain2 = nf_tables_chain_lookup(table, nla[NFTA_CHAIN_NAME], |
1489 | genmask); | 1489 | genmask); |
1490 | if (IS_ERR(chain2)) | 1490 | if (!IS_ERR(chain2)) |
1491 | return PTR_ERR(chain2); | 1491 | return -EEXIST; |
1492 | } | 1492 | } |
1493 | 1493 | ||
1494 | if (nla[NFTA_CHAIN_COUNTERS]) { | 1494 | if (nla[NFTA_CHAIN_COUNTERS]) { |
@@ -2741,8 +2741,10 @@ cont: | |||
2741 | list_for_each_entry(i, &ctx->table->sets, list) { | 2741 | list_for_each_entry(i, &ctx->table->sets, list) { |
2742 | if (!nft_is_active_next(ctx->net, i)) | 2742 | if (!nft_is_active_next(ctx->net, i)) |
2743 | continue; | 2743 | continue; |
2744 | if (!strcmp(set->name, i->name)) | 2744 | if (!strcmp(set->name, i->name)) { |
2745 | kfree(set->name); | ||
2745 | return -ENFILE; | 2746 | return -ENFILE; |
2747 | } | ||
2746 | } | 2748 | } |
2747 | return 0; | 2749 | return 0; |
2748 | } | 2750 | } |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index c83a3b5e1c6c..d8571f414208 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
@@ -892,7 +892,7 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, | |||
892 | if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) | 892 | if (copy_from_user(&compat_tmp, user, sizeof(compat_tmp)) != 0) |
893 | return ERR_PTR(-EFAULT); | 893 | return ERR_PTR(-EFAULT); |
894 | 894 | ||
895 | strlcpy(info->name, compat_tmp.name, sizeof(info->name)); | 895 | memcpy(info->name, compat_tmp.name, sizeof(info->name) - 1); |
896 | info->num_counters = compat_tmp.num_counters; | 896 | info->num_counters = compat_tmp.num_counters; |
897 | user += sizeof(compat_tmp); | 897 | user += sizeof(compat_tmp); |
898 | } else | 898 | } else |
@@ -905,9 +905,9 @@ void *xt_copy_counters_from_user(const void __user *user, unsigned int len, | |||
905 | if (copy_from_user(info, user, sizeof(*info)) != 0) | 905 | if (copy_from_user(info, user, sizeof(*info)) != 0) |
906 | return ERR_PTR(-EFAULT); | 906 | return ERR_PTR(-EFAULT); |
907 | 907 | ||
908 | info->name[sizeof(info->name) - 1] = '\0'; | ||
909 | user += sizeof(*info); | 908 | user += sizeof(*info); |
910 | } | 909 | } |
910 | info->name[sizeof(info->name) - 1] = '\0'; | ||
911 | 911 | ||
912 | size = sizeof(struct xt_counters); | 912 | size = sizeof(struct xt_counters); |
913 | size *= info->num_counters; | 913 | size *= info->num_counters; |
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c index 38986a95216c..29123934887b 100644 --- a/net/netfilter/xt_bpf.c +++ b/net/netfilter/xt_bpf.c | |||
@@ -8,6 +8,7 @@ | |||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/syscalls.h> | ||
11 | #include <linux/skbuff.h> | 12 | #include <linux/skbuff.h> |
12 | #include <linux/filter.h> | 13 | #include <linux/filter.h> |
13 | #include <linux/bpf.h> | 14 | #include <linux/bpf.h> |
@@ -49,6 +50,22 @@ static int __bpf_mt_check_fd(int fd, struct bpf_prog **ret) | |||
49 | return 0; | 50 | return 0; |
50 | } | 51 | } |
51 | 52 | ||
53 | static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) | ||
54 | { | ||
55 | mm_segment_t oldfs = get_fs(); | ||
56 | int retval, fd; | ||
57 | |||
58 | set_fs(KERNEL_DS); | ||
59 | fd = bpf_obj_get_user(path); | ||
60 | set_fs(oldfs); | ||
61 | if (fd < 0) | ||
62 | return fd; | ||
63 | |||
64 | retval = __bpf_mt_check_fd(fd, ret); | ||
65 | sys_close(fd); | ||
66 | return retval; | ||
67 | } | ||
68 | |||
52 | static int bpf_mt_check(const struct xt_mtchk_param *par) | 69 | static int bpf_mt_check(const struct xt_mtchk_param *par) |
53 | { | 70 | { |
54 | struct xt_bpf_info *info = par->matchinfo; | 71 | struct xt_bpf_info *info = par->matchinfo; |
@@ -66,9 +83,10 @@ static int bpf_mt_check_v1(const struct xt_mtchk_param *par) | |||
66 | return __bpf_mt_check_bytecode(info->bpf_program, | 83 | return __bpf_mt_check_bytecode(info->bpf_program, |
67 | info->bpf_program_num_elem, | 84 | info->bpf_program_num_elem, |
68 | &info->filter); | 85 | &info->filter); |
69 | else if (info->mode == XT_BPF_MODE_FD_PINNED || | 86 | else if (info->mode == XT_BPF_MODE_FD_ELF) |
70 | info->mode == XT_BPF_MODE_FD_ELF) | ||
71 | return __bpf_mt_check_fd(info->fd, &info->filter); | 87 | return __bpf_mt_check_fd(info->fd, &info->filter); |
88 | else if (info->mode == XT_BPF_MODE_PATH_PINNED) | ||
89 | return __bpf_mt_check_path(info->path, &info->filter); | ||
72 | else | 90 | else |
73 | return -EINVAL; | 91 | return -EINVAL; |
74 | } | 92 | } |
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index e75ef39669c5..575d2153e3b8 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c | |||
@@ -76,7 +76,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, | |||
76 | transparent = nf_sk_is_transparent(sk); | 76 | transparent = nf_sk_is_transparent(sk); |
77 | 77 | ||
78 | if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && | 78 | if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && |
79 | transparent) | 79 | transparent && sk_fullsock(sk)) |
80 | pskb->mark = sk->sk_mark; | 80 | pskb->mark = sk->sk_mark; |
81 | 81 | ||
82 | if (sk != skb->sk) | 82 | if (sk != skb->sk) |
@@ -133,7 +133,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par) | |||
133 | transparent = nf_sk_is_transparent(sk); | 133 | transparent = nf_sk_is_transparent(sk); |
134 | 134 | ||
135 | if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && | 135 | if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard && |
136 | transparent) | 136 | transparent && sk_fullsock(sk)) |
137 | pskb->mark = sk->sk_mark; | 137 | pskb->mark = sk->sk_mark; |
138 | 138 | ||
139 | if (sk != skb->sk) | 139 | if (sk != skb->sk) |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 94c11cf0459d..b93148e8e9fb 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -2266,16 +2266,17 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
2266 | cb->min_dump_alloc = control->min_dump_alloc; | 2266 | cb->min_dump_alloc = control->min_dump_alloc; |
2267 | cb->skb = skb; | 2267 | cb->skb = skb; |
2268 | 2268 | ||
2269 | if (cb->start) { | ||
2270 | ret = cb->start(cb); | ||
2271 | if (ret) | ||
2272 | goto error_unlock; | ||
2273 | } | ||
2274 | |||
2269 | nlk->cb_running = true; | 2275 | nlk->cb_running = true; |
2270 | 2276 | ||
2271 | mutex_unlock(nlk->cb_mutex); | 2277 | mutex_unlock(nlk->cb_mutex); |
2272 | 2278 | ||
2273 | ret = 0; | 2279 | ret = netlink_dump(sk); |
2274 | if (cb->start) | ||
2275 | ret = cb->start(cb); | ||
2276 | |||
2277 | if (!ret) | ||
2278 | ret = netlink_dump(sk); | ||
2279 | 2280 | ||
2280 | sock_put(sk); | 2281 | sock_put(sk); |
2281 | 2282 | ||
@@ -2306,6 +2307,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, | |||
2306 | size_t tlvlen = 0; | 2307 | size_t tlvlen = 0; |
2307 | struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk); | 2308 | struct netlink_sock *nlk = nlk_sk(NETLINK_CB(in_skb).sk); |
2308 | unsigned int flags = 0; | 2309 | unsigned int flags = 0; |
2310 | bool nlk_has_extack = nlk->flags & NETLINK_F_EXT_ACK; | ||
2309 | 2311 | ||
2310 | /* Error messages get the original request appened, unless the user | 2312 | /* Error messages get the original request appened, unless the user |
2311 | * requests to cap the error message, and get extra error data if | 2313 | * requests to cap the error message, and get extra error data if |
@@ -2316,7 +2318,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, | |||
2316 | payload += nlmsg_len(nlh); | 2318 | payload += nlmsg_len(nlh); |
2317 | else | 2319 | else |
2318 | flags |= NLM_F_CAPPED; | 2320 | flags |= NLM_F_CAPPED; |
2319 | if (nlk->flags & NETLINK_F_EXT_ACK && extack) { | 2321 | if (nlk_has_extack && extack) { |
2320 | if (extack->_msg) | 2322 | if (extack->_msg) |
2321 | tlvlen += nla_total_size(strlen(extack->_msg) + 1); | 2323 | tlvlen += nla_total_size(strlen(extack->_msg) + 1); |
2322 | if (extack->bad_attr) | 2324 | if (extack->bad_attr) |
@@ -2325,8 +2327,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, | |||
2325 | } else { | 2327 | } else { |
2326 | flags |= NLM_F_CAPPED; | 2328 | flags |= NLM_F_CAPPED; |
2327 | 2329 | ||
2328 | if (nlk->flags & NETLINK_F_EXT_ACK && | 2330 | if (nlk_has_extack && extack && extack->cookie_len) |
2329 | extack && extack->cookie_len) | ||
2330 | tlvlen += nla_total_size(extack->cookie_len); | 2331 | tlvlen += nla_total_size(extack->cookie_len); |
2331 | } | 2332 | } |
2332 | 2333 | ||
@@ -2354,7 +2355,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, | |||
2354 | errmsg->error = err; | 2355 | errmsg->error = err; |
2355 | memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh)); | 2356 | memcpy(&errmsg->msg, nlh, payload > sizeof(*errmsg) ? nlh->nlmsg_len : sizeof(*nlh)); |
2356 | 2357 | ||
2357 | if (nlk->flags & NETLINK_F_EXT_ACK && extack) { | 2358 | if (nlk_has_extack && extack) { |
2358 | if (err) { | 2359 | if (err) { |
2359 | if (extack->_msg) | 2360 | if (extack->_msg) |
2360 | WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, | 2361 | WARN_ON(nla_put_string(skb, NLMSGERR_ATTR_MSG, |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index bec01a3daf5b..2986941164b1 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -1769,7 +1769,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags) | |||
1769 | 1769 | ||
1770 | out: | 1770 | out: |
1771 | if (err && rollover) { | 1771 | if (err && rollover) { |
1772 | kfree(rollover); | 1772 | kfree_rcu(rollover, rcu); |
1773 | po->rollover = NULL; | 1773 | po->rollover = NULL; |
1774 | } | 1774 | } |
1775 | mutex_unlock(&fanout_mutex); | 1775 | mutex_unlock(&fanout_mutex); |
@@ -1796,8 +1796,10 @@ static struct packet_fanout *fanout_release(struct sock *sk) | |||
1796 | else | 1796 | else |
1797 | f = NULL; | 1797 | f = NULL; |
1798 | 1798 | ||
1799 | if (po->rollover) | 1799 | if (po->rollover) { |
1800 | kfree_rcu(po->rollover, rcu); | 1800 | kfree_rcu(po->rollover, rcu); |
1801 | po->rollover = NULL; | ||
1802 | } | ||
1801 | } | 1803 | } |
1802 | mutex_unlock(&fanout_mutex); | 1804 | mutex_unlock(&fanout_mutex); |
1803 | 1805 | ||
@@ -3851,6 +3853,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
3851 | void *data = &val; | 3853 | void *data = &val; |
3852 | union tpacket_stats_u st; | 3854 | union tpacket_stats_u st; |
3853 | struct tpacket_rollover_stats rstats; | 3855 | struct tpacket_rollover_stats rstats; |
3856 | struct packet_rollover *rollover; | ||
3854 | 3857 | ||
3855 | if (level != SOL_PACKET) | 3858 | if (level != SOL_PACKET) |
3856 | return -ENOPROTOOPT; | 3859 | return -ENOPROTOOPT; |
@@ -3929,13 +3932,18 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, | |||
3929 | 0); | 3932 | 0); |
3930 | break; | 3933 | break; |
3931 | case PACKET_ROLLOVER_STATS: | 3934 | case PACKET_ROLLOVER_STATS: |
3932 | if (!po->rollover) | 3935 | rcu_read_lock(); |
3936 | rollover = rcu_dereference(po->rollover); | ||
3937 | if (rollover) { | ||
3938 | rstats.tp_all = atomic_long_read(&rollover->num); | ||
3939 | rstats.tp_huge = atomic_long_read(&rollover->num_huge); | ||
3940 | rstats.tp_failed = atomic_long_read(&rollover->num_failed); | ||
3941 | data = &rstats; | ||
3942 | lv = sizeof(rstats); | ||
3943 | } | ||
3944 | rcu_read_unlock(); | ||
3945 | if (!rollover) | ||
3933 | return -EINVAL; | 3946 | return -EINVAL; |
3934 | rstats.tp_all = atomic_long_read(&po->rollover->num); | ||
3935 | rstats.tp_huge = atomic_long_read(&po->rollover->num_huge); | ||
3936 | rstats.tp_failed = atomic_long_read(&po->rollover->num_failed); | ||
3937 | data = &rstats; | ||
3938 | lv = sizeof(rstats); | ||
3939 | break; | 3947 | break; |
3940 | case PACKET_TX_HAS_OFF: | 3948 | case PACKET_TX_HAS_OFF: |
3941 | val = po->tp_tx_has_off; | 3949 | val = po->tp_tx_has_off; |
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 6ab39dbcca01..8557a1cae041 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -661,13 +661,15 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
661 | } | 661 | } |
662 | } | 662 | } |
663 | 663 | ||
664 | rds_ib_set_wr_signal_state(ic, send, 0); | 664 | rds_ib_set_wr_signal_state(ic, send, false); |
665 | 665 | ||
666 | /* | 666 | /* |
667 | * Always signal the last one if we're stopping due to flow control. | 667 | * Always signal the last one if we're stopping due to flow control. |
668 | */ | 668 | */ |
669 | if (ic->i_flowctl && flow_controlled && i == (work_alloc-1)) | 669 | if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) { |
670 | send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED; | 670 | rds_ib_set_wr_signal_state(ic, send, true); |
671 | send->s_wr.send_flags |= IB_SEND_SOLICITED; | ||
672 | } | ||
671 | 673 | ||
672 | if (send->s_wr.send_flags & IB_SEND_SIGNALED) | 674 | if (send->s_wr.send_flags & IB_SEND_SIGNALED) |
673 | nr_sig++; | 675 | nr_sig++; |
@@ -705,11 +707,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
705 | if (scat == &rm->data.op_sg[rm->data.op_count]) { | 707 | if (scat == &rm->data.op_sg[rm->data.op_count]) { |
706 | prev->s_op = ic->i_data_op; | 708 | prev->s_op = ic->i_data_op; |
707 | prev->s_wr.send_flags |= IB_SEND_SOLICITED; | 709 | prev->s_wr.send_flags |= IB_SEND_SOLICITED; |
708 | if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) { | 710 | if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) |
709 | ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; | 711 | nr_sig += rds_ib_set_wr_signal_state(ic, prev, true); |
710 | prev->s_wr.send_flags |= IB_SEND_SIGNALED; | ||
711 | nr_sig++; | ||
712 | } | ||
713 | ic->i_data_op = NULL; | 712 | ic->i_data_op = NULL; |
714 | } | 713 | } |
715 | 714 | ||
@@ -792,6 +791,7 @@ int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) | |||
792 | send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask; | 791 | send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask; |
793 | send->s_atomic_wr.swap_mask = 0; | 792 | send->s_atomic_wr.swap_mask = 0; |
794 | } | 793 | } |
794 | send->s_wr.send_flags = 0; | ||
795 | nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); | 795 | nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); |
796 | send->s_atomic_wr.wr.num_sge = 1; | 796 | send->s_atomic_wr.wr.num_sge = 1; |
797 | send->s_atomic_wr.wr.next = NULL; | 797 | send->s_atomic_wr.wr.next = NULL; |
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c index fb17552fd292..4b0a8288c98a 100644 --- a/net/rxrpc/af_rxrpc.c +++ b/net/rxrpc/af_rxrpc.c | |||
@@ -308,10 +308,11 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock, | |||
308 | call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len, | 308 | call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len, |
309 | gfp); | 309 | gfp); |
310 | /* The socket has been unlocked. */ | 310 | /* The socket has been unlocked. */ |
311 | if (!IS_ERR(call)) | 311 | if (!IS_ERR(call)) { |
312 | call->notify_rx = notify_rx; | 312 | call->notify_rx = notify_rx; |
313 | mutex_unlock(&call->user_mutex); | ||
314 | } | ||
313 | 315 | ||
314 | mutex_unlock(&call->user_mutex); | ||
315 | _leave(" = %p", call); | 316 | _leave(" = %p", call); |
316 | return call; | 317 | return call; |
317 | } | 318 | } |
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index ec986ae52808..a9f9a2ccc664 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c | |||
@@ -264,6 +264,7 @@ static int __init sample_init_module(void) | |||
264 | 264 | ||
265 | static void __exit sample_cleanup_module(void) | 265 | static void __exit sample_cleanup_module(void) |
266 | { | 266 | { |
267 | rcu_barrier(); | ||
267 | tcf_unregister_action(&act_sample_ops, &sample_net_ops); | 268 | tcf_unregister_action(&act_sample_ops, &sample_net_ops); |
268 | } | 269 | } |
269 | 270 | ||
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 0b2219adf520..231181c602ed 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -77,6 +77,8 @@ out: | |||
77 | } | 77 | } |
78 | EXPORT_SYMBOL(register_tcf_proto_ops); | 78 | EXPORT_SYMBOL(register_tcf_proto_ops); |
79 | 79 | ||
80 | static struct workqueue_struct *tc_filter_wq; | ||
81 | |||
80 | int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) | 82 | int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) |
81 | { | 83 | { |
82 | struct tcf_proto_ops *t; | 84 | struct tcf_proto_ops *t; |
@@ -86,6 +88,7 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) | |||
86 | * tcf_proto_ops's destroy() handler. | 88 | * tcf_proto_ops's destroy() handler. |
87 | */ | 89 | */ |
88 | rcu_barrier(); | 90 | rcu_barrier(); |
91 | flush_workqueue(tc_filter_wq); | ||
89 | 92 | ||
90 | write_lock(&cls_mod_lock); | 93 | write_lock(&cls_mod_lock); |
91 | list_for_each_entry(t, &tcf_proto_base, head) { | 94 | list_for_each_entry(t, &tcf_proto_base, head) { |
@@ -100,6 +103,12 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops) | |||
100 | } | 103 | } |
101 | EXPORT_SYMBOL(unregister_tcf_proto_ops); | 104 | EXPORT_SYMBOL(unregister_tcf_proto_ops); |
102 | 105 | ||
106 | bool tcf_queue_work(struct work_struct *work) | ||
107 | { | ||
108 | return queue_work(tc_filter_wq, work); | ||
109 | } | ||
110 | EXPORT_SYMBOL(tcf_queue_work); | ||
111 | |||
103 | /* Select new prio value from the range, managed by kernel. */ | 112 | /* Select new prio value from the range, managed by kernel. */ |
104 | 113 | ||
105 | static inline u32 tcf_auto_prio(struct tcf_proto *tp) | 114 | static inline u32 tcf_auto_prio(struct tcf_proto *tp) |
@@ -266,23 +275,30 @@ err_chain_create: | |||
266 | } | 275 | } |
267 | EXPORT_SYMBOL(tcf_block_get); | 276 | EXPORT_SYMBOL(tcf_block_get); |
268 | 277 | ||
269 | void tcf_block_put(struct tcf_block *block) | 278 | static void tcf_block_put_final(struct work_struct *work) |
270 | { | 279 | { |
280 | struct tcf_block *block = container_of(work, struct tcf_block, work); | ||
271 | struct tcf_chain *chain, *tmp; | 281 | struct tcf_chain *chain, *tmp; |
272 | 282 | ||
273 | if (!block) | 283 | /* At this point, all the chains should have refcnt == 1. */ |
274 | return; | 284 | rtnl_lock(); |
275 | 285 | list_for_each_entry_safe(chain, tmp, &block->chain_list, list) | |
276 | /* XXX: Standalone actions are not allowed to jump to any chain, and | 286 | tcf_chain_put(chain); |
277 | * bound actions should be all removed after flushing. However, | 287 | rtnl_unlock(); |
278 | * filters are destroyed in RCU callbacks, we have to hold the chains | 288 | kfree(block); |
279 | * first, otherwise we would always race with RCU callbacks on this list | 289 | } |
280 | * without proper locking. | ||
281 | */ | ||
282 | 290 | ||
283 | /* Wait for existing RCU callbacks to cool down. */ | 291 | /* XXX: Standalone actions are not allowed to jump to any chain, and bound |
284 | rcu_barrier(); | 292 | * actions should be all removed after flushing. However, filters are destroyed |
293 | * in RCU callbacks, we have to hold the chains first, otherwise we would | ||
294 | * always race with RCU callbacks on this list without proper locking. | ||
295 | */ | ||
296 | static void tcf_block_put_deferred(struct work_struct *work) | ||
297 | { | ||
298 | struct tcf_block *block = container_of(work, struct tcf_block, work); | ||
299 | struct tcf_chain *chain; | ||
285 | 300 | ||
301 | rtnl_lock(); | ||
286 | /* Hold a refcnt for all chains, except 0, in case they are gone. */ | 302 | /* Hold a refcnt for all chains, except 0, in case they are gone. */ |
287 | list_for_each_entry(chain, &block->chain_list, list) | 303 | list_for_each_entry(chain, &block->chain_list, list) |
288 | if (chain->index) | 304 | if (chain->index) |
@@ -292,13 +308,27 @@ void tcf_block_put(struct tcf_block *block) | |||
292 | list_for_each_entry(chain, &block->chain_list, list) | 308 | list_for_each_entry(chain, &block->chain_list, list) |
293 | tcf_chain_flush(chain); | 309 | tcf_chain_flush(chain); |
294 | 310 | ||
295 | /* Wait for RCU callbacks to release the reference count. */ | 311 | INIT_WORK(&block->work, tcf_block_put_final); |
312 | /* Wait for RCU callbacks to release the reference count and make | ||
313 | * sure their works have been queued before this. | ||
314 | */ | ||
296 | rcu_barrier(); | 315 | rcu_barrier(); |
316 | tcf_queue_work(&block->work); | ||
317 | rtnl_unlock(); | ||
318 | } | ||
297 | 319 | ||
298 | /* At this point, all the chains should have refcnt == 1. */ | 320 | void tcf_block_put(struct tcf_block *block) |
299 | list_for_each_entry_safe(chain, tmp, &block->chain_list, list) | 321 | { |
300 | tcf_chain_put(chain); | 322 | if (!block) |
301 | kfree(block); | 323 | return; |
324 | |||
325 | INIT_WORK(&block->work, tcf_block_put_deferred); | ||
326 | /* Wait for existing RCU callbacks to cool down, make sure their works | ||
327 | * have been queued before this. We can not flush pending works here | ||
328 | * because we are holding the RTNL lock. | ||
329 | */ | ||
330 | rcu_barrier(); | ||
331 | tcf_queue_work(&block->work); | ||
302 | } | 332 | } |
303 | EXPORT_SYMBOL(tcf_block_put); | 333 | EXPORT_SYMBOL(tcf_block_put); |
304 | 334 | ||
@@ -879,6 +909,7 @@ void tcf_exts_destroy(struct tcf_exts *exts) | |||
879 | #ifdef CONFIG_NET_CLS_ACT | 909 | #ifdef CONFIG_NET_CLS_ACT |
880 | LIST_HEAD(actions); | 910 | LIST_HEAD(actions); |
881 | 911 | ||
912 | ASSERT_RTNL(); | ||
882 | tcf_exts_to_list(exts, &actions); | 913 | tcf_exts_to_list(exts, &actions); |
883 | tcf_action_destroy(&actions, TCA_ACT_UNBIND); | 914 | tcf_action_destroy(&actions, TCA_ACT_UNBIND); |
884 | kfree(exts->actions); | 915 | kfree(exts->actions); |
@@ -1030,6 +1061,10 @@ EXPORT_SYMBOL(tcf_exts_get_dev); | |||
1030 | 1061 | ||
1031 | static int __init tc_filter_init(void) | 1062 | static int __init tc_filter_init(void) |
1032 | { | 1063 | { |
1064 | tc_filter_wq = alloc_ordered_workqueue("tc_filter_workqueue", 0); | ||
1065 | if (!tc_filter_wq) | ||
1066 | return -ENOMEM; | ||
1067 | |||
1033 | rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0); | 1068 | rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_ctl_tfilter, NULL, 0); |
1034 | rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0); | 1069 | rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_ctl_tfilter, NULL, 0); |
1035 | rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, | 1070 | rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_ctl_tfilter, |
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index d89ebafd2239..f177649a2419 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c | |||
@@ -34,7 +34,10 @@ struct basic_filter { | |||
34 | struct tcf_result res; | 34 | struct tcf_result res; |
35 | struct tcf_proto *tp; | 35 | struct tcf_proto *tp; |
36 | struct list_head link; | 36 | struct list_head link; |
37 | struct rcu_head rcu; | 37 | union { |
38 | struct work_struct work; | ||
39 | struct rcu_head rcu; | ||
40 | }; | ||
38 | }; | 41 | }; |
39 | 42 | ||
40 | static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp, | 43 | static int basic_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
@@ -82,15 +85,26 @@ static int basic_init(struct tcf_proto *tp) | |||
82 | return 0; | 85 | return 0; |
83 | } | 86 | } |
84 | 87 | ||
85 | static void basic_delete_filter(struct rcu_head *head) | 88 | static void basic_delete_filter_work(struct work_struct *work) |
86 | { | 89 | { |
87 | struct basic_filter *f = container_of(head, struct basic_filter, rcu); | 90 | struct basic_filter *f = container_of(work, struct basic_filter, work); |
88 | 91 | ||
92 | rtnl_lock(); | ||
89 | tcf_exts_destroy(&f->exts); | 93 | tcf_exts_destroy(&f->exts); |
90 | tcf_em_tree_destroy(&f->ematches); | 94 | tcf_em_tree_destroy(&f->ematches); |
95 | rtnl_unlock(); | ||
96 | |||
91 | kfree(f); | 97 | kfree(f); |
92 | } | 98 | } |
93 | 99 | ||
100 | static void basic_delete_filter(struct rcu_head *head) | ||
101 | { | ||
102 | struct basic_filter *f = container_of(head, struct basic_filter, rcu); | ||
103 | |||
104 | INIT_WORK(&f->work, basic_delete_filter_work); | ||
105 | tcf_queue_work(&f->work); | ||
106 | } | ||
107 | |||
94 | static void basic_destroy(struct tcf_proto *tp) | 108 | static void basic_destroy(struct tcf_proto *tp) |
95 | { | 109 | { |
96 | struct basic_head *head = rtnl_dereference(tp->root); | 110 | struct basic_head *head = rtnl_dereference(tp->root); |
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 520c5027646a..037a3ae86829 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c | |||
@@ -49,7 +49,10 @@ struct cls_bpf_prog { | |||
49 | struct sock_filter *bpf_ops; | 49 | struct sock_filter *bpf_ops; |
50 | const char *bpf_name; | 50 | const char *bpf_name; |
51 | struct tcf_proto *tp; | 51 | struct tcf_proto *tp; |
52 | struct rcu_head rcu; | 52 | union { |
53 | struct work_struct work; | ||
54 | struct rcu_head rcu; | ||
55 | }; | ||
53 | }; | 56 | }; |
54 | 57 | ||
55 | static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { | 58 | static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = { |
@@ -257,9 +260,21 @@ static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog) | |||
257 | kfree(prog); | 260 | kfree(prog); |
258 | } | 261 | } |
259 | 262 | ||
263 | static void cls_bpf_delete_prog_work(struct work_struct *work) | ||
264 | { | ||
265 | struct cls_bpf_prog *prog = container_of(work, struct cls_bpf_prog, work); | ||
266 | |||
267 | rtnl_lock(); | ||
268 | __cls_bpf_delete_prog(prog); | ||
269 | rtnl_unlock(); | ||
270 | } | ||
271 | |||
260 | static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu) | 272 | static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu) |
261 | { | 273 | { |
262 | __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu)); | 274 | struct cls_bpf_prog *prog = container_of(rcu, struct cls_bpf_prog, rcu); |
275 | |||
276 | INIT_WORK(&prog->work, cls_bpf_delete_prog_work); | ||
277 | tcf_queue_work(&prog->work); | ||
263 | } | 278 | } |
264 | 279 | ||
265 | static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog) | 280 | static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog) |
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index d48452f87975..a97e069bee89 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c | |||
@@ -23,7 +23,10 @@ struct cls_cgroup_head { | |||
23 | struct tcf_exts exts; | 23 | struct tcf_exts exts; |
24 | struct tcf_ematch_tree ematches; | 24 | struct tcf_ematch_tree ematches; |
25 | struct tcf_proto *tp; | 25 | struct tcf_proto *tp; |
26 | struct rcu_head rcu; | 26 | union { |
27 | struct work_struct work; | ||
28 | struct rcu_head rcu; | ||
29 | }; | ||
27 | }; | 30 | }; |
28 | 31 | ||
29 | static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, | 32 | static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
@@ -57,15 +60,26 @@ static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = { | |||
57 | [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, | 60 | [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED }, |
58 | }; | 61 | }; |
59 | 62 | ||
63 | static void cls_cgroup_destroy_work(struct work_struct *work) | ||
64 | { | ||
65 | struct cls_cgroup_head *head = container_of(work, | ||
66 | struct cls_cgroup_head, | ||
67 | work); | ||
68 | rtnl_lock(); | ||
69 | tcf_exts_destroy(&head->exts); | ||
70 | tcf_em_tree_destroy(&head->ematches); | ||
71 | kfree(head); | ||
72 | rtnl_unlock(); | ||
73 | } | ||
74 | |||
60 | static void cls_cgroup_destroy_rcu(struct rcu_head *root) | 75 | static void cls_cgroup_destroy_rcu(struct rcu_head *root) |
61 | { | 76 | { |
62 | struct cls_cgroup_head *head = container_of(root, | 77 | struct cls_cgroup_head *head = container_of(root, |
63 | struct cls_cgroup_head, | 78 | struct cls_cgroup_head, |
64 | rcu); | 79 | rcu); |
65 | 80 | ||
66 | tcf_exts_destroy(&head->exts); | 81 | INIT_WORK(&head->work, cls_cgroup_destroy_work); |
67 | tcf_em_tree_destroy(&head->ematches); | 82 | tcf_queue_work(&head->work); |
68 | kfree(head); | ||
69 | } | 83 | } |
70 | 84 | ||
71 | static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, | 85 | static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb, |
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 2a3a60ec5b86..67f3a2af6aab 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c | |||
@@ -57,7 +57,10 @@ struct flow_filter { | |||
57 | u32 divisor; | 57 | u32 divisor; |
58 | u32 baseclass; | 58 | u32 baseclass; |
59 | u32 hashrnd; | 59 | u32 hashrnd; |
60 | struct rcu_head rcu; | 60 | union { |
61 | struct work_struct work; | ||
62 | struct rcu_head rcu; | ||
63 | }; | ||
61 | }; | 64 | }; |
62 | 65 | ||
63 | static inline u32 addr_fold(void *addr) | 66 | static inline u32 addr_fold(void *addr) |
@@ -369,14 +372,24 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = { | |||
369 | [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, | 372 | [TCA_FLOW_PERTURB] = { .type = NLA_U32 }, |
370 | }; | 373 | }; |
371 | 374 | ||
372 | static void flow_destroy_filter(struct rcu_head *head) | 375 | static void flow_destroy_filter_work(struct work_struct *work) |
373 | { | 376 | { |
374 | struct flow_filter *f = container_of(head, struct flow_filter, rcu); | 377 | struct flow_filter *f = container_of(work, struct flow_filter, work); |
375 | 378 | ||
379 | rtnl_lock(); | ||
376 | del_timer_sync(&f->perturb_timer); | 380 | del_timer_sync(&f->perturb_timer); |
377 | tcf_exts_destroy(&f->exts); | 381 | tcf_exts_destroy(&f->exts); |
378 | tcf_em_tree_destroy(&f->ematches); | 382 | tcf_em_tree_destroy(&f->ematches); |
379 | kfree(f); | 383 | kfree(f); |
384 | rtnl_unlock(); | ||
385 | } | ||
386 | |||
387 | static void flow_destroy_filter(struct rcu_head *head) | ||
388 | { | ||
389 | struct flow_filter *f = container_of(head, struct flow_filter, rcu); | ||
390 | |||
391 | INIT_WORK(&f->work, flow_destroy_filter_work); | ||
392 | tcf_queue_work(&f->work); | ||
380 | } | 393 | } |
381 | 394 | ||
382 | static int flow_change(struct net *net, struct sk_buff *in_skb, | 395 | static int flow_change(struct net *net, struct sk_buff *in_skb, |
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index d230cb4c8094..5b5722c8b32c 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c | |||
@@ -87,7 +87,10 @@ struct cls_fl_filter { | |||
87 | struct list_head list; | 87 | struct list_head list; |
88 | u32 handle; | 88 | u32 handle; |
89 | u32 flags; | 89 | u32 flags; |
90 | struct rcu_head rcu; | 90 | union { |
91 | struct work_struct work; | ||
92 | struct rcu_head rcu; | ||
93 | }; | ||
91 | struct net_device *hw_dev; | 94 | struct net_device *hw_dev; |
92 | }; | 95 | }; |
93 | 96 | ||
@@ -215,12 +218,22 @@ static int fl_init(struct tcf_proto *tp) | |||
215 | return 0; | 218 | return 0; |
216 | } | 219 | } |
217 | 220 | ||
218 | static void fl_destroy_filter(struct rcu_head *head) | 221 | static void fl_destroy_filter_work(struct work_struct *work) |
219 | { | 222 | { |
220 | struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu); | 223 | struct cls_fl_filter *f = container_of(work, struct cls_fl_filter, work); |
221 | 224 | ||
225 | rtnl_lock(); | ||
222 | tcf_exts_destroy(&f->exts); | 226 | tcf_exts_destroy(&f->exts); |
223 | kfree(f); | 227 | kfree(f); |
228 | rtnl_unlock(); | ||
229 | } | ||
230 | |||
231 | static void fl_destroy_filter(struct rcu_head *head) | ||
232 | { | ||
233 | struct cls_fl_filter *f = container_of(head, struct cls_fl_filter, rcu); | ||
234 | |||
235 | INIT_WORK(&f->work, fl_destroy_filter_work); | ||
236 | tcf_queue_work(&f->work); | ||
224 | } | 237 | } |
225 | 238 | ||
226 | static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) | 239 | static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) |
@@ -234,6 +247,7 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f) | |||
234 | tc_cls_common_offload_init(&cls_flower.common, tp); | 247 | tc_cls_common_offload_init(&cls_flower.common, tp); |
235 | cls_flower.command = TC_CLSFLOWER_DESTROY; | 248 | cls_flower.command = TC_CLSFLOWER_DESTROY; |
236 | cls_flower.cookie = (unsigned long) f; | 249 | cls_flower.cookie = (unsigned long) f; |
250 | cls_flower.egress_dev = f->hw_dev != tp->q->dev_queue->dev; | ||
237 | 251 | ||
238 | dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower); | 252 | dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, &cls_flower); |
239 | } | 253 | } |
@@ -289,6 +303,7 @@ static void fl_hw_update_stats(struct tcf_proto *tp, struct cls_fl_filter *f) | |||
289 | cls_flower.command = TC_CLSFLOWER_STATS; | 303 | cls_flower.command = TC_CLSFLOWER_STATS; |
290 | cls_flower.cookie = (unsigned long) f; | 304 | cls_flower.cookie = (unsigned long) f; |
291 | cls_flower.exts = &f->exts; | 305 | cls_flower.exts = &f->exts; |
306 | cls_flower.egress_dev = f->hw_dev != tp->q->dev_queue->dev; | ||
292 | 307 | ||
293 | dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, | 308 | dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER, |
294 | &cls_flower); | 309 | &cls_flower); |
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 941245ad07fd..99183b8621ec 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c | |||
@@ -46,7 +46,10 @@ struct fw_filter { | |||
46 | #endif /* CONFIG_NET_CLS_IND */ | 46 | #endif /* CONFIG_NET_CLS_IND */ |
47 | struct tcf_exts exts; | 47 | struct tcf_exts exts; |
48 | struct tcf_proto *tp; | 48 | struct tcf_proto *tp; |
49 | struct rcu_head rcu; | 49 | union { |
50 | struct work_struct work; | ||
51 | struct rcu_head rcu; | ||
52 | }; | ||
50 | }; | 53 | }; |
51 | 54 | ||
52 | static u32 fw_hash(u32 handle) | 55 | static u32 fw_hash(u32 handle) |
@@ -119,12 +122,22 @@ static int fw_init(struct tcf_proto *tp) | |||
119 | return 0; | 122 | return 0; |
120 | } | 123 | } |
121 | 124 | ||
122 | static void fw_delete_filter(struct rcu_head *head) | 125 | static void fw_delete_filter_work(struct work_struct *work) |
123 | { | 126 | { |
124 | struct fw_filter *f = container_of(head, struct fw_filter, rcu); | 127 | struct fw_filter *f = container_of(work, struct fw_filter, work); |
125 | 128 | ||
129 | rtnl_lock(); | ||
126 | tcf_exts_destroy(&f->exts); | 130 | tcf_exts_destroy(&f->exts); |
127 | kfree(f); | 131 | kfree(f); |
132 | rtnl_unlock(); | ||
133 | } | ||
134 | |||
135 | static void fw_delete_filter(struct rcu_head *head) | ||
136 | { | ||
137 | struct fw_filter *f = container_of(head, struct fw_filter, rcu); | ||
138 | |||
139 | INIT_WORK(&f->work, fw_delete_filter_work); | ||
140 | tcf_queue_work(&f->work); | ||
128 | } | 141 | } |
129 | 142 | ||
130 | static void fw_destroy(struct tcf_proto *tp) | 143 | static void fw_destroy(struct tcf_proto *tp) |
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index eeac606c95ab..c33f711b9019 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c | |||
@@ -21,7 +21,10 @@ struct cls_mall_head { | |||
21 | struct tcf_result res; | 21 | struct tcf_result res; |
22 | u32 handle; | 22 | u32 handle; |
23 | u32 flags; | 23 | u32 flags; |
24 | struct rcu_head rcu; | 24 | union { |
25 | struct work_struct work; | ||
26 | struct rcu_head rcu; | ||
27 | }; | ||
25 | }; | 28 | }; |
26 | 29 | ||
27 | static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp, | 30 | static int mall_classify(struct sk_buff *skb, const struct tcf_proto *tp, |
@@ -41,13 +44,23 @@ static int mall_init(struct tcf_proto *tp) | |||
41 | return 0; | 44 | return 0; |
42 | } | 45 | } |
43 | 46 | ||
47 | static void mall_destroy_work(struct work_struct *work) | ||
48 | { | ||
49 | struct cls_mall_head *head = container_of(work, struct cls_mall_head, | ||
50 | work); | ||
51 | rtnl_lock(); | ||
52 | tcf_exts_destroy(&head->exts); | ||
53 | kfree(head); | ||
54 | rtnl_unlock(); | ||
55 | } | ||
56 | |||
44 | static void mall_destroy_rcu(struct rcu_head *rcu) | 57 | static void mall_destroy_rcu(struct rcu_head *rcu) |
45 | { | 58 | { |
46 | struct cls_mall_head *head = container_of(rcu, struct cls_mall_head, | 59 | struct cls_mall_head *head = container_of(rcu, struct cls_mall_head, |
47 | rcu); | 60 | rcu); |
48 | 61 | ||
49 | tcf_exts_destroy(&head->exts); | 62 | INIT_WORK(&head->work, mall_destroy_work); |
50 | kfree(head); | 63 | tcf_queue_work(&head->work); |
51 | } | 64 | } |
52 | 65 | ||
53 | static int mall_replace_hw_filter(struct tcf_proto *tp, | 66 | static int mall_replace_hw_filter(struct tcf_proto *tp, |
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 9ddde65915d2..4b14ccd8b8f2 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c | |||
@@ -57,7 +57,10 @@ struct route4_filter { | |||
57 | u32 handle; | 57 | u32 handle; |
58 | struct route4_bucket *bkt; | 58 | struct route4_bucket *bkt; |
59 | struct tcf_proto *tp; | 59 | struct tcf_proto *tp; |
60 | struct rcu_head rcu; | 60 | union { |
61 | struct work_struct work; | ||
62 | struct rcu_head rcu; | ||
63 | }; | ||
61 | }; | 64 | }; |
62 | 65 | ||
63 | #define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) | 66 | #define ROUTE4_FAILURE ((struct route4_filter *)(-1L)) |
@@ -254,12 +257,22 @@ static int route4_init(struct tcf_proto *tp) | |||
254 | return 0; | 257 | return 0; |
255 | } | 258 | } |
256 | 259 | ||
257 | static void route4_delete_filter(struct rcu_head *head) | 260 | static void route4_delete_filter_work(struct work_struct *work) |
258 | { | 261 | { |
259 | struct route4_filter *f = container_of(head, struct route4_filter, rcu); | 262 | struct route4_filter *f = container_of(work, struct route4_filter, work); |
260 | 263 | ||
264 | rtnl_lock(); | ||
261 | tcf_exts_destroy(&f->exts); | 265 | tcf_exts_destroy(&f->exts); |
262 | kfree(f); | 266 | kfree(f); |
267 | rtnl_unlock(); | ||
268 | } | ||
269 | |||
270 | static void route4_delete_filter(struct rcu_head *head) | ||
271 | { | ||
272 | struct route4_filter *f = container_of(head, struct route4_filter, rcu); | ||
273 | |||
274 | INIT_WORK(&f->work, route4_delete_filter_work); | ||
275 | tcf_queue_work(&f->work); | ||
263 | } | 276 | } |
264 | 277 | ||
265 | static void route4_destroy(struct tcf_proto *tp) | 278 | static void route4_destroy(struct tcf_proto *tp) |
diff --git a/net/sched/cls_rsvp.h b/net/sched/cls_rsvp.h index b1f6ed48bc72..bdbc541787f8 100644 --- a/net/sched/cls_rsvp.h +++ b/net/sched/cls_rsvp.h | |||
@@ -97,7 +97,10 @@ struct rsvp_filter { | |||
97 | 97 | ||
98 | u32 handle; | 98 | u32 handle; |
99 | struct rsvp_session *sess; | 99 | struct rsvp_session *sess; |
100 | struct rcu_head rcu; | 100 | union { |
101 | struct work_struct work; | ||
102 | struct rcu_head rcu; | ||
103 | }; | ||
101 | }; | 104 | }; |
102 | 105 | ||
103 | static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) | 106 | static inline unsigned int hash_dst(__be32 *dst, u8 protocol, u8 tunnelid) |
@@ -282,12 +285,22 @@ static int rsvp_init(struct tcf_proto *tp) | |||
282 | return -ENOBUFS; | 285 | return -ENOBUFS; |
283 | } | 286 | } |
284 | 287 | ||
285 | static void rsvp_delete_filter_rcu(struct rcu_head *head) | 288 | static void rsvp_delete_filter_work(struct work_struct *work) |
286 | { | 289 | { |
287 | struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu); | 290 | struct rsvp_filter *f = container_of(work, struct rsvp_filter, work); |
288 | 291 | ||
292 | rtnl_lock(); | ||
289 | tcf_exts_destroy(&f->exts); | 293 | tcf_exts_destroy(&f->exts); |
290 | kfree(f); | 294 | kfree(f); |
295 | rtnl_unlock(); | ||
296 | } | ||
297 | |||
298 | static void rsvp_delete_filter_rcu(struct rcu_head *head) | ||
299 | { | ||
300 | struct rsvp_filter *f = container_of(head, struct rsvp_filter, rcu); | ||
301 | |||
302 | INIT_WORK(&f->work, rsvp_delete_filter_work); | ||
303 | tcf_queue_work(&f->work); | ||
291 | } | 304 | } |
292 | 305 | ||
293 | static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) | 306 | static void rsvp_delete_filter(struct tcf_proto *tp, struct rsvp_filter *f) |
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c index 14a7e08b2fa9..beaa95e09c25 100644 --- a/net/sched/cls_tcindex.c +++ b/net/sched/cls_tcindex.c | |||
@@ -27,14 +27,20 @@ | |||
27 | struct tcindex_filter_result { | 27 | struct tcindex_filter_result { |
28 | struct tcf_exts exts; | 28 | struct tcf_exts exts; |
29 | struct tcf_result res; | 29 | struct tcf_result res; |
30 | struct rcu_head rcu; | 30 | union { |
31 | struct work_struct work; | ||
32 | struct rcu_head rcu; | ||
33 | }; | ||
31 | }; | 34 | }; |
32 | 35 | ||
33 | struct tcindex_filter { | 36 | struct tcindex_filter { |
34 | u16 key; | 37 | u16 key; |
35 | struct tcindex_filter_result result; | 38 | struct tcindex_filter_result result; |
36 | struct tcindex_filter __rcu *next; | 39 | struct tcindex_filter __rcu *next; |
37 | struct rcu_head rcu; | 40 | union { |
41 | struct work_struct work; | ||
42 | struct rcu_head rcu; | ||
43 | }; | ||
38 | }; | 44 | }; |
39 | 45 | ||
40 | 46 | ||
@@ -133,12 +139,34 @@ static int tcindex_init(struct tcf_proto *tp) | |||
133 | return 0; | 139 | return 0; |
134 | } | 140 | } |
135 | 141 | ||
142 | static void tcindex_destroy_rexts_work(struct work_struct *work) | ||
143 | { | ||
144 | struct tcindex_filter_result *r; | ||
145 | |||
146 | r = container_of(work, struct tcindex_filter_result, work); | ||
147 | rtnl_lock(); | ||
148 | tcf_exts_destroy(&r->exts); | ||
149 | rtnl_unlock(); | ||
150 | } | ||
151 | |||
136 | static void tcindex_destroy_rexts(struct rcu_head *head) | 152 | static void tcindex_destroy_rexts(struct rcu_head *head) |
137 | { | 153 | { |
138 | struct tcindex_filter_result *r; | 154 | struct tcindex_filter_result *r; |
139 | 155 | ||
140 | r = container_of(head, struct tcindex_filter_result, rcu); | 156 | r = container_of(head, struct tcindex_filter_result, rcu); |
141 | tcf_exts_destroy(&r->exts); | 157 | INIT_WORK(&r->work, tcindex_destroy_rexts_work); |
158 | tcf_queue_work(&r->work); | ||
159 | } | ||
160 | |||
161 | static void tcindex_destroy_fexts_work(struct work_struct *work) | ||
162 | { | ||
163 | struct tcindex_filter *f = container_of(work, struct tcindex_filter, | ||
164 | work); | ||
165 | |||
166 | rtnl_lock(); | ||
167 | tcf_exts_destroy(&f->result.exts); | ||
168 | kfree(f); | ||
169 | rtnl_unlock(); | ||
142 | } | 170 | } |
143 | 171 | ||
144 | static void tcindex_destroy_fexts(struct rcu_head *head) | 172 | static void tcindex_destroy_fexts(struct rcu_head *head) |
@@ -146,8 +174,8 @@ static void tcindex_destroy_fexts(struct rcu_head *head) | |||
146 | struct tcindex_filter *f = container_of(head, struct tcindex_filter, | 174 | struct tcindex_filter *f = container_of(head, struct tcindex_filter, |
147 | rcu); | 175 | rcu); |
148 | 176 | ||
149 | tcf_exts_destroy(&f->result.exts); | 177 | INIT_WORK(&f->work, tcindex_destroy_fexts_work); |
150 | kfree(f); | 178 | tcf_queue_work(&f->work); |
151 | } | 179 | } |
152 | 180 | ||
153 | static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last) | 181 | static int tcindex_delete(struct tcf_proto *tp, void *arg, bool *last) |
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 10b8d851fc6b..dadd1b344497 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
@@ -68,7 +68,10 @@ struct tc_u_knode { | |||
68 | u32 __percpu *pcpu_success; | 68 | u32 __percpu *pcpu_success; |
69 | #endif | 69 | #endif |
70 | struct tcf_proto *tp; | 70 | struct tcf_proto *tp; |
71 | struct rcu_head rcu; | 71 | union { |
72 | struct work_struct work; | ||
73 | struct rcu_head rcu; | ||
74 | }; | ||
72 | /* The 'sel' field MUST be the last field in structure to allow for | 75 | /* The 'sel' field MUST be the last field in structure to allow for |
73 | * tc_u32_keys allocated at end of structure. | 76 | * tc_u32_keys allocated at end of structure. |
74 | */ | 77 | */ |
@@ -418,11 +421,21 @@ static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n, | |||
418 | * this the u32_delete_key_rcu variant does not free the percpu | 421 | * this the u32_delete_key_rcu variant does not free the percpu |
419 | * statistics. | 422 | * statistics. |
420 | */ | 423 | */ |
424 | static void u32_delete_key_work(struct work_struct *work) | ||
425 | { | ||
426 | struct tc_u_knode *key = container_of(work, struct tc_u_knode, work); | ||
427 | |||
428 | rtnl_lock(); | ||
429 | u32_destroy_key(key->tp, key, false); | ||
430 | rtnl_unlock(); | ||
431 | } | ||
432 | |||
421 | static void u32_delete_key_rcu(struct rcu_head *rcu) | 433 | static void u32_delete_key_rcu(struct rcu_head *rcu) |
422 | { | 434 | { |
423 | struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); | 435 | struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); |
424 | 436 | ||
425 | u32_destroy_key(key->tp, key, false); | 437 | INIT_WORK(&key->work, u32_delete_key_work); |
438 | tcf_queue_work(&key->work); | ||
426 | } | 439 | } |
427 | 440 | ||
428 | /* u32_delete_key_freepf_rcu is the rcu callback variant | 441 | /* u32_delete_key_freepf_rcu is the rcu callback variant |
@@ -432,11 +445,21 @@ static void u32_delete_key_rcu(struct rcu_head *rcu) | |||
432 | * for the variant that should be used with keys return from | 445 | * for the variant that should be used with keys return from |
433 | * u32_init_knode() | 446 | * u32_init_knode() |
434 | */ | 447 | */ |
448 | static void u32_delete_key_freepf_work(struct work_struct *work) | ||
449 | { | ||
450 | struct tc_u_knode *key = container_of(work, struct tc_u_knode, work); | ||
451 | |||
452 | rtnl_lock(); | ||
453 | u32_destroy_key(key->tp, key, true); | ||
454 | rtnl_unlock(); | ||
455 | } | ||
456 | |||
435 | static void u32_delete_key_freepf_rcu(struct rcu_head *rcu) | 457 | static void u32_delete_key_freepf_rcu(struct rcu_head *rcu) |
436 | { | 458 | { |
437 | struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); | 459 | struct tc_u_knode *key = container_of(rcu, struct tc_u_knode, rcu); |
438 | 460 | ||
439 | u32_destroy_key(key->tp, key, true); | 461 | INIT_WORK(&key->work, u32_delete_key_freepf_work); |
462 | tcf_queue_work(&key->work); | ||
440 | } | 463 | } |
441 | 464 | ||
442 | static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) | 465 | static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key) |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index c6deb74e3d2f..22bc6fc48311 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -301,6 +301,8 @@ struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle) | |||
301 | { | 301 | { |
302 | struct Qdisc *q; | 302 | struct Qdisc *q; |
303 | 303 | ||
304 | if (!handle) | ||
305 | return NULL; | ||
304 | q = qdisc_match_from_root(dev->qdisc, handle); | 306 | q = qdisc_match_from_root(dev->qdisc, handle); |
305 | if (q) | 307 | if (q) |
306 | goto out; | 308 | goto out; |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 92a07141fd07..621b5ca3fd1c 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -421,7 +421,7 @@ void sctp_icmp_redirect(struct sock *sk, struct sctp_transport *t, | |||
421 | { | 421 | { |
422 | struct dst_entry *dst; | 422 | struct dst_entry *dst; |
423 | 423 | ||
424 | if (!t) | 424 | if (sock_owned_by_user(sk) || !t) |
425 | return; | 425 | return; |
426 | dst = sctp_transport_dst_check(t); | 426 | dst = sctp_transport_dst_check(t); |
427 | if (dst) | 427 | if (dst) |
@@ -794,7 +794,7 @@ hit: | |||
794 | struct sctp_hash_cmp_arg { | 794 | struct sctp_hash_cmp_arg { |
795 | const union sctp_addr *paddr; | 795 | const union sctp_addr *paddr; |
796 | const struct net *net; | 796 | const struct net *net; |
797 | u16 lport; | 797 | __be16 lport; |
798 | }; | 798 | }; |
799 | 799 | ||
800 | static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg, | 800 | static inline int sctp_hash_cmp(struct rhashtable_compare_arg *arg, |
@@ -820,37 +820,37 @@ out: | |||
820 | return err; | 820 | return err; |
821 | } | 821 | } |
822 | 822 | ||
823 | static inline u32 sctp_hash_obj(const void *data, u32 len, u32 seed) | 823 | static inline __u32 sctp_hash_obj(const void *data, u32 len, u32 seed) |
824 | { | 824 | { |
825 | const struct sctp_transport *t = data; | 825 | const struct sctp_transport *t = data; |
826 | const union sctp_addr *paddr = &t->ipaddr; | 826 | const union sctp_addr *paddr = &t->ipaddr; |
827 | const struct net *net = sock_net(t->asoc->base.sk); | 827 | const struct net *net = sock_net(t->asoc->base.sk); |
828 | u16 lport = htons(t->asoc->base.bind_addr.port); | 828 | __be16 lport = htons(t->asoc->base.bind_addr.port); |
829 | u32 addr; | 829 | __u32 addr; |
830 | 830 | ||
831 | if (paddr->sa.sa_family == AF_INET6) | 831 | if (paddr->sa.sa_family == AF_INET6) |
832 | addr = jhash(&paddr->v6.sin6_addr, 16, seed); | 832 | addr = jhash(&paddr->v6.sin6_addr, 16, seed); |
833 | else | 833 | else |
834 | addr = paddr->v4.sin_addr.s_addr; | 834 | addr = (__force __u32)paddr->v4.sin_addr.s_addr; |
835 | 835 | ||
836 | return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 | | 836 | return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 | |
837 | (__force __u32)lport, net_hash_mix(net), seed); | 837 | (__force __u32)lport, net_hash_mix(net), seed); |
838 | } | 838 | } |
839 | 839 | ||
840 | static inline u32 sctp_hash_key(const void *data, u32 len, u32 seed) | 840 | static inline __u32 sctp_hash_key(const void *data, u32 len, u32 seed) |
841 | { | 841 | { |
842 | const struct sctp_hash_cmp_arg *x = data; | 842 | const struct sctp_hash_cmp_arg *x = data; |
843 | const union sctp_addr *paddr = x->paddr; | 843 | const union sctp_addr *paddr = x->paddr; |
844 | const struct net *net = x->net; | 844 | const struct net *net = x->net; |
845 | u16 lport = x->lport; | 845 | __be16 lport = x->lport; |
846 | u32 addr; | 846 | __u32 addr; |
847 | 847 | ||
848 | if (paddr->sa.sa_family == AF_INET6) | 848 | if (paddr->sa.sa_family == AF_INET6) |
849 | addr = jhash(&paddr->v6.sin6_addr, 16, seed); | 849 | addr = jhash(&paddr->v6.sin6_addr, 16, seed); |
850 | else | 850 | else |
851 | addr = paddr->v4.sin_addr.s_addr; | 851 | addr = (__force __u32)paddr->v4.sin_addr.s_addr; |
852 | 852 | ||
853 | return jhash_3words(addr, ((__u32)paddr->v4.sin_port) << 16 | | 853 | return jhash_3words(addr, ((__force __u32)paddr->v4.sin_port) << 16 | |
854 | (__force __u32)lport, net_hash_mix(net), seed); | 854 | (__force __u32)lport, net_hash_mix(net), seed); |
855 | } | 855 | } |
856 | 856 | ||
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 51c488769590..a6dfa86c0201 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c | |||
@@ -738,7 +738,7 @@ static int sctp_v6_skb_iif(const struct sk_buff *skb) | |||
738 | /* Was this packet marked by Explicit Congestion Notification? */ | 738 | /* Was this packet marked by Explicit Congestion Notification? */ |
739 | static int sctp_v6_is_ce(const struct sk_buff *skb) | 739 | static int sctp_v6_is_ce(const struct sk_buff *skb) |
740 | { | 740 | { |
741 | return *((__u32 *)(ipv6_hdr(skb))) & htonl(1 << 20); | 741 | return *((__u32 *)(ipv6_hdr(skb))) & (__force __u32)htonl(1 << 20); |
742 | } | 742 | } |
743 | 743 | ||
744 | /* Dump the v6 addr to the seq file. */ | 744 | /* Dump the v6 addr to the seq file. */ |
@@ -882,8 +882,10 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) | |||
882 | net = sock_net(&opt->inet.sk); | 882 | net = sock_net(&opt->inet.sk); |
883 | rcu_read_lock(); | 883 | rcu_read_lock(); |
884 | dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id); | 884 | dev = dev_get_by_index_rcu(net, addr->v6.sin6_scope_id); |
885 | if (!dev || | 885 | if (!dev || !(opt->inet.freebind || |
886 | !ipv6_chk_addr(net, &addr->v6.sin6_addr, dev, 0)) { | 886 | net->ipv6.sysctl.ip_nonlocal_bind || |
887 | ipv6_chk_addr(net, &addr->v6.sin6_addr, | ||
888 | dev, 0))) { | ||
887 | rcu_read_unlock(); | 889 | rcu_read_unlock(); |
888 | return 0; | 890 | return 0; |
889 | } | 891 | } |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index ca8f196b6c6c..514465b03829 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -2854,7 +2854,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, | |||
2854 | addr_param_len = af->to_addr_param(addr, &addr_param); | 2854 | addr_param_len = af->to_addr_param(addr, &addr_param); |
2855 | param.param_hdr.type = flags; | 2855 | param.param_hdr.type = flags; |
2856 | param.param_hdr.length = htons(paramlen + addr_param_len); | 2856 | param.param_hdr.length = htons(paramlen + addr_param_len); |
2857 | param.crr_id = i; | 2857 | param.crr_id = htonl(i); |
2858 | 2858 | ||
2859 | sctp_addto_chunk(retval, paramlen, ¶m); | 2859 | sctp_addto_chunk(retval, paramlen, ¶m); |
2860 | sctp_addto_chunk(retval, addr_param_len, &addr_param); | 2860 | sctp_addto_chunk(retval, addr_param_len, &addr_param); |
@@ -2867,7 +2867,7 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, | |||
2867 | addr_param_len = af->to_addr_param(addr, &addr_param); | 2867 | addr_param_len = af->to_addr_param(addr, &addr_param); |
2868 | param.param_hdr.type = SCTP_PARAM_DEL_IP; | 2868 | param.param_hdr.type = SCTP_PARAM_DEL_IP; |
2869 | param.param_hdr.length = htons(paramlen + addr_param_len); | 2869 | param.param_hdr.length = htons(paramlen + addr_param_len); |
2870 | param.crr_id = i; | 2870 | param.crr_id = htonl(i); |
2871 | 2871 | ||
2872 | sctp_addto_chunk(retval, paramlen, ¶m); | 2872 | sctp_addto_chunk(retval, paramlen, ¶m); |
2873 | sctp_addto_chunk(retval, addr_param_len, &addr_param); | 2873 | sctp_addto_chunk(retval, addr_param_len, &addr_param); |
@@ -3591,7 +3591,7 @@ static struct sctp_chunk *sctp_make_reconf(const struct sctp_association *asoc, | |||
3591 | */ | 3591 | */ |
3592 | struct sctp_chunk *sctp_make_strreset_req( | 3592 | struct sctp_chunk *sctp_make_strreset_req( |
3593 | const struct sctp_association *asoc, | 3593 | const struct sctp_association *asoc, |
3594 | __u16 stream_num, __u16 *stream_list, | 3594 | __u16 stream_num, __be16 *stream_list, |
3595 | bool out, bool in) | 3595 | bool out, bool in) |
3596 | { | 3596 | { |
3597 | struct sctp_strreset_outreq outreq; | 3597 | struct sctp_strreset_outreq outreq; |
@@ -3788,7 +3788,8 @@ bool sctp_verify_reconf(const struct sctp_association *asoc, | |||
3788 | { | 3788 | { |
3789 | struct sctp_reconf_chunk *hdr; | 3789 | struct sctp_reconf_chunk *hdr; |
3790 | union sctp_params param; | 3790 | union sctp_params param; |
3791 | __u16 last = 0, cnt = 0; | 3791 | __be16 last = 0; |
3792 | __u16 cnt = 0; | ||
3792 | 3793 | ||
3793 | hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; | 3794 | hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr; |
3794 | sctp_walk_params(param, hdr, params) { | 3795 | sctp_walk_params(param, hdr, params) { |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index e6a2974e020e..e2d9a4b49c9c 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -1607,12 +1607,12 @@ static int sctp_cmd_interpreter(enum sctp_event event_type, | |||
1607 | break; | 1607 | break; |
1608 | 1608 | ||
1609 | case SCTP_CMD_INIT_FAILED: | 1609 | case SCTP_CMD_INIT_FAILED: |
1610 | sctp_cmd_init_failed(commands, asoc, cmd->obj.err); | 1610 | sctp_cmd_init_failed(commands, asoc, cmd->obj.u32); |
1611 | break; | 1611 | break; |
1612 | 1612 | ||
1613 | case SCTP_CMD_ASSOC_FAILED: | 1613 | case SCTP_CMD_ASSOC_FAILED: |
1614 | sctp_cmd_assoc_failed(commands, asoc, event_type, | 1614 | sctp_cmd_assoc_failed(commands, asoc, event_type, |
1615 | subtype, chunk, cmd->obj.err); | 1615 | subtype, chunk, cmd->obj.u32); |
1616 | break; | 1616 | break; |
1617 | 1617 | ||
1618 | case SCTP_CMD_INIT_COUNTER_INC: | 1618 | case SCTP_CMD_INIT_COUNTER_INC: |
@@ -1680,8 +1680,8 @@ static int sctp_cmd_interpreter(enum sctp_event event_type, | |||
1680 | case SCTP_CMD_PROCESS_CTSN: | 1680 | case SCTP_CMD_PROCESS_CTSN: |
1681 | /* Dummy up a SACK for processing. */ | 1681 | /* Dummy up a SACK for processing. */ |
1682 | sackh.cum_tsn_ack = cmd->obj.be32; | 1682 | sackh.cum_tsn_ack = cmd->obj.be32; |
1683 | sackh.a_rwnd = asoc->peer.rwnd + | 1683 | sackh.a_rwnd = htonl(asoc->peer.rwnd + |
1684 | asoc->outqueue.outstanding_bytes; | 1684 | asoc->outqueue.outstanding_bytes); |
1685 | sackh.num_gap_ack_blocks = 0; | 1685 | sackh.num_gap_ack_blocks = 0; |
1686 | sackh.num_dup_tsns = 0; | 1686 | sackh.num_dup_tsns = 0; |
1687 | chunk->subh.sack_hdr = &sackh; | 1687 | chunk->subh.sack_hdr = &sackh; |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index d4730ada7f32..6f45d1713452 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -170,6 +170,36 @@ static inline void sctp_set_owner_w(struct sctp_chunk *chunk) | |||
170 | sk_mem_charge(sk, chunk->skb->truesize); | 170 | sk_mem_charge(sk, chunk->skb->truesize); |
171 | } | 171 | } |
172 | 172 | ||
173 | static void sctp_clear_owner_w(struct sctp_chunk *chunk) | ||
174 | { | ||
175 | skb_orphan(chunk->skb); | ||
176 | } | ||
177 | |||
178 | static void sctp_for_each_tx_datachunk(struct sctp_association *asoc, | ||
179 | void (*cb)(struct sctp_chunk *)) | ||
180 | |||
181 | { | ||
182 | struct sctp_outq *q = &asoc->outqueue; | ||
183 | struct sctp_transport *t; | ||
184 | struct sctp_chunk *chunk; | ||
185 | |||
186 | list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) | ||
187 | list_for_each_entry(chunk, &t->transmitted, transmitted_list) | ||
188 | cb(chunk); | ||
189 | |||
190 | list_for_each_entry(chunk, &q->retransmit, list) | ||
191 | cb(chunk); | ||
192 | |||
193 | list_for_each_entry(chunk, &q->sacked, list) | ||
194 | cb(chunk); | ||
195 | |||
196 | list_for_each_entry(chunk, &q->abandoned, list) | ||
197 | cb(chunk); | ||
198 | |||
199 | list_for_each_entry(chunk, &q->out_chunk_list, list) | ||
200 | cb(chunk); | ||
201 | } | ||
202 | |||
173 | /* Verify that this is a valid address. */ | 203 | /* Verify that this is a valid address. */ |
174 | static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, | 204 | static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, |
175 | int len) | 205 | int len) |
@@ -4906,6 +4936,10 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp) | |||
4906 | struct socket *sock; | 4936 | struct socket *sock; |
4907 | int err = 0; | 4937 | int err = 0; |
4908 | 4938 | ||
4939 | /* Do not peel off from one netns to another one. */ | ||
4940 | if (!net_eq(current->nsproxy->net_ns, sock_net(sk))) | ||
4941 | return -EINVAL; | ||
4942 | |||
4909 | if (!asoc) | 4943 | if (!asoc) |
4910 | return -EINVAL; | 4944 | return -EINVAL; |
4911 | 4945 | ||
@@ -8208,7 +8242,9 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
8208 | * paths won't try to lock it and then oldsk. | 8242 | * paths won't try to lock it and then oldsk. |
8209 | */ | 8243 | */ |
8210 | lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); | 8244 | lock_sock_nested(newsk, SINGLE_DEPTH_NESTING); |
8245 | sctp_for_each_tx_datachunk(assoc, sctp_clear_owner_w); | ||
8211 | sctp_assoc_migrate(assoc, newsk); | 8246 | sctp_assoc_migrate(assoc, newsk); |
8247 | sctp_for_each_tx_datachunk(assoc, sctp_set_owner_w); | ||
8212 | 8248 | ||
8213 | /* If the association on the newsk is already closed before accept() | 8249 | /* If the association on the newsk is already closed before accept() |
8214 | * is called, set RCV_SHUTDOWN flag. | 8250 | * is called, set RCV_SHUTDOWN flag. |
diff --git a/net/sctp/stream.c b/net/sctp/stream.c index 63ea15503714..fa8371ff05c4 100644 --- a/net/sctp/stream.c +++ b/net/sctp/stream.c | |||
@@ -118,6 +118,7 @@ int sctp_send_reset_streams(struct sctp_association *asoc, | |||
118 | __u16 i, str_nums, *str_list; | 118 | __u16 i, str_nums, *str_list; |
119 | struct sctp_chunk *chunk; | 119 | struct sctp_chunk *chunk; |
120 | int retval = -EINVAL; | 120 | int retval = -EINVAL; |
121 | __be16 *nstr_list; | ||
121 | bool out, in; | 122 | bool out, in; |
122 | 123 | ||
123 | if (!asoc->peer.reconf_capable || | 124 | if (!asoc->peer.reconf_capable || |
@@ -148,13 +149,18 @@ int sctp_send_reset_streams(struct sctp_association *asoc, | |||
148 | if (str_list[i] >= stream->incnt) | 149 | if (str_list[i] >= stream->incnt) |
149 | goto out; | 150 | goto out; |
150 | 151 | ||
152 | nstr_list = kcalloc(str_nums, sizeof(__be16), GFP_KERNEL); | ||
153 | if (!nstr_list) { | ||
154 | retval = -ENOMEM; | ||
155 | goto out; | ||
156 | } | ||
157 | |||
151 | for (i = 0; i < str_nums; i++) | 158 | for (i = 0; i < str_nums; i++) |
152 | str_list[i] = htons(str_list[i]); | 159 | nstr_list[i] = htons(str_list[i]); |
153 | 160 | ||
154 | chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in); | 161 | chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in); |
155 | 162 | ||
156 | for (i = 0; i < str_nums; i++) | 163 | kfree(nstr_list); |
157 | str_list[i] = ntohs(str_list[i]); | ||
158 | 164 | ||
159 | if (!chunk) { | 165 | if (!chunk) { |
160 | retval = -ENOMEM; | 166 | retval = -ENOMEM; |
@@ -305,7 +311,7 @@ out: | |||
305 | } | 311 | } |
306 | 312 | ||
307 | static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param( | 313 | static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param( |
308 | struct sctp_association *asoc, __u32 resp_seq, | 314 | struct sctp_association *asoc, __be32 resp_seq, |
309 | __be16 type) | 315 | __be16 type) |
310 | { | 316 | { |
311 | struct sctp_chunk *chunk = asoc->strreset_chunk; | 317 | struct sctp_chunk *chunk = asoc->strreset_chunk; |
@@ -345,8 +351,9 @@ struct sctp_chunk *sctp_process_strreset_outreq( | |||
345 | { | 351 | { |
346 | struct sctp_strreset_outreq *outreq = param.v; | 352 | struct sctp_strreset_outreq *outreq = param.v; |
347 | struct sctp_stream *stream = &asoc->stream; | 353 | struct sctp_stream *stream = &asoc->stream; |
348 | __u16 i, nums, flags = 0, *str_p = NULL; | ||
349 | __u32 result = SCTP_STRRESET_DENIED; | 354 | __u32 result = SCTP_STRRESET_DENIED; |
355 | __u16 i, nums, flags = 0; | ||
356 | __be16 *str_p = NULL; | ||
350 | __u32 request_seq; | 357 | __u32 request_seq; |
351 | 358 | ||
352 | request_seq = ntohl(outreq->request_seq); | 359 | request_seq = ntohl(outreq->request_seq); |
@@ -439,8 +446,9 @@ struct sctp_chunk *sctp_process_strreset_inreq( | |||
439 | struct sctp_stream *stream = &asoc->stream; | 446 | struct sctp_stream *stream = &asoc->stream; |
440 | __u32 result = SCTP_STRRESET_DENIED; | 447 | __u32 result = SCTP_STRRESET_DENIED; |
441 | struct sctp_chunk *chunk = NULL; | 448 | struct sctp_chunk *chunk = NULL; |
442 | __u16 i, nums, *str_p; | ||
443 | __u32 request_seq; | 449 | __u32 request_seq; |
450 | __u16 i, nums; | ||
451 | __be16 *str_p; | ||
444 | 452 | ||
445 | request_seq = ntohl(inreq->request_seq); | 453 | request_seq = ntohl(inreq->request_seq); |
446 | if (TSN_lt(asoc->strreset_inseq, request_seq) || | 454 | if (TSN_lt(asoc->strreset_inseq, request_seq) || |
@@ -769,7 +777,7 @@ struct sctp_chunk *sctp_process_strreset_resp( | |||
769 | 777 | ||
770 | if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) { | 778 | if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) { |
771 | struct sctp_strreset_outreq *outreq; | 779 | struct sctp_strreset_outreq *outreq; |
772 | __u16 *str_p; | 780 | __be16 *str_p; |
773 | 781 | ||
774 | outreq = (struct sctp_strreset_outreq *)req; | 782 | outreq = (struct sctp_strreset_outreq *)req; |
775 | str_p = outreq->list_of_streams; | 783 | str_p = outreq->list_of_streams; |
@@ -794,7 +802,7 @@ struct sctp_chunk *sctp_process_strreset_resp( | |||
794 | nums, str_p, GFP_ATOMIC); | 802 | nums, str_p, GFP_ATOMIC); |
795 | } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) { | 803 | } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) { |
796 | struct sctp_strreset_inreq *inreq; | 804 | struct sctp_strreset_inreq *inreq; |
797 | __u16 *str_p; | 805 | __be16 *str_p; |
798 | 806 | ||
799 | /* if the result is performed, it's impossible for inreq */ | 807 | /* if the result is performed, it's impossible for inreq */ |
800 | if (result == SCTP_STRRESET_PERFORMED) | 808 | if (result == SCTP_STRRESET_PERFORMED) |
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 67abc0194f30..5447228bf1a0 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c | |||
@@ -847,7 +847,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event( | |||
847 | 847 | ||
848 | struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( | 848 | struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event( |
849 | const struct sctp_association *asoc, __u16 flags, __u16 stream_num, | 849 | const struct sctp_association *asoc, __u16 flags, __u16 stream_num, |
850 | __u16 *stream_list, gfp_t gfp) | 850 | __be16 *stream_list, gfp_t gfp) |
851 | { | 851 | { |
852 | struct sctp_stream_reset_event *sreset; | 852 | struct sctp_stream_reset_event *sreset; |
853 | struct sctp_ulpevent *event; | 853 | struct sctp_ulpevent *event; |
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index d4ea46a5f233..c5fda15ba319 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c | |||
@@ -49,7 +49,7 @@ static void strp_abort_strp(struct strparser *strp, int err) | |||
49 | { | 49 | { |
50 | /* Unrecoverable error in receive */ | 50 | /* Unrecoverable error in receive */ |
51 | 51 | ||
52 | del_timer(&strp->msg_timer); | 52 | cancel_delayed_work(&strp->msg_timer_work); |
53 | 53 | ||
54 | if (strp->stopped) | 54 | if (strp->stopped) |
55 | return; | 55 | return; |
@@ -68,7 +68,7 @@ static void strp_abort_strp(struct strparser *strp, int err) | |||
68 | static void strp_start_timer(struct strparser *strp, long timeo) | 68 | static void strp_start_timer(struct strparser *strp, long timeo) |
69 | { | 69 | { |
70 | if (timeo) | 70 | if (timeo) |
71 | mod_timer(&strp->msg_timer, timeo); | 71 | mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo); |
72 | } | 72 | } |
73 | 73 | ||
74 | /* Lower lock held */ | 74 | /* Lower lock held */ |
@@ -319,7 +319,7 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb, | |||
319 | eaten += (cand_len - extra); | 319 | eaten += (cand_len - extra); |
320 | 320 | ||
321 | /* Hurray, we have a new message! */ | 321 | /* Hurray, we have a new message! */ |
322 | del_timer(&strp->msg_timer); | 322 | cancel_delayed_work(&strp->msg_timer_work); |
323 | strp->skb_head = NULL; | 323 | strp->skb_head = NULL; |
324 | STRP_STATS_INCR(strp->stats.msgs); | 324 | STRP_STATS_INCR(strp->stats.msgs); |
325 | 325 | ||
@@ -450,9 +450,10 @@ static void strp_work(struct work_struct *w) | |||
450 | do_strp_work(container_of(w, struct strparser, work)); | 450 | do_strp_work(container_of(w, struct strparser, work)); |
451 | } | 451 | } |
452 | 452 | ||
453 | static void strp_msg_timeout(unsigned long arg) | 453 | static void strp_msg_timeout(struct work_struct *w) |
454 | { | 454 | { |
455 | struct strparser *strp = (struct strparser *)arg; | 455 | struct strparser *strp = container_of(w, struct strparser, |
456 | msg_timer_work.work); | ||
456 | 457 | ||
457 | /* Message assembly timed out */ | 458 | /* Message assembly timed out */ |
458 | STRP_STATS_INCR(strp->stats.msg_timeouts); | 459 | STRP_STATS_INCR(strp->stats.msg_timeouts); |
@@ -505,9 +506,7 @@ int strp_init(struct strparser *strp, struct sock *sk, | |||
505 | strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done; | 506 | strp->cb.read_sock_done = cb->read_sock_done ? : default_read_sock_done; |
506 | strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp; | 507 | strp->cb.abort_parser = cb->abort_parser ? : strp_abort_strp; |
507 | 508 | ||
508 | setup_timer(&strp->msg_timer, strp_msg_timeout, | 509 | INIT_DELAYED_WORK(&strp->msg_timer_work, strp_msg_timeout); |
509 | (unsigned long)strp); | ||
510 | |||
511 | INIT_WORK(&strp->work, strp_work); | 510 | INIT_WORK(&strp->work, strp_work); |
512 | 511 | ||
513 | return 0; | 512 | return 0; |
@@ -532,7 +531,7 @@ void strp_done(struct strparser *strp) | |||
532 | { | 531 | { |
533 | WARN_ON(!strp->stopped); | 532 | WARN_ON(!strp->stopped); |
534 | 533 | ||
535 | del_timer_sync(&strp->msg_timer); | 534 | cancel_delayed_work_sync(&strp->msg_timer_work); |
536 | cancel_work_sync(&strp->work); | 535 | cancel_work_sync(&strp->work); |
537 | 536 | ||
538 | if (strp->skb_head) { | 537 | if (strp->skb_head) { |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index e741ec2b4d8e..898485e3ece4 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -1333,7 +1333,7 @@ void xprt_release(struct rpc_task *task) | |||
1333 | rpc_count_iostats(task, task->tk_client->cl_metrics); | 1333 | rpc_count_iostats(task, task->tk_client->cl_metrics); |
1334 | spin_lock(&xprt->recv_lock); | 1334 | spin_lock(&xprt->recv_lock); |
1335 | if (!list_empty(&req->rq_list)) { | 1335 | if (!list_empty(&req->rq_list)) { |
1336 | list_del(&req->rq_list); | 1336 | list_del_init(&req->rq_list); |
1337 | xprt_wait_on_pinned_rqst(req); | 1337 | xprt_wait_on_pinned_rqst(req); |
1338 | } | 1338 | } |
1339 | spin_unlock(&xprt->recv_lock); | 1339 | spin_unlock(&xprt->recv_lock); |
@@ -1445,6 +1445,23 @@ out: | |||
1445 | return xprt; | 1445 | return xprt; |
1446 | } | 1446 | } |
1447 | 1447 | ||
1448 | static void xprt_destroy_cb(struct work_struct *work) | ||
1449 | { | ||
1450 | struct rpc_xprt *xprt = | ||
1451 | container_of(work, struct rpc_xprt, task_cleanup); | ||
1452 | |||
1453 | rpc_xprt_debugfs_unregister(xprt); | ||
1454 | rpc_destroy_wait_queue(&xprt->binding); | ||
1455 | rpc_destroy_wait_queue(&xprt->pending); | ||
1456 | rpc_destroy_wait_queue(&xprt->sending); | ||
1457 | rpc_destroy_wait_queue(&xprt->backlog); | ||
1458 | kfree(xprt->servername); | ||
1459 | /* | ||
1460 | * Tear down transport state and free the rpc_xprt | ||
1461 | */ | ||
1462 | xprt->ops->destroy(xprt); | ||
1463 | } | ||
1464 | |||
1448 | /** | 1465 | /** |
1449 | * xprt_destroy - destroy an RPC transport, killing off all requests. | 1466 | * xprt_destroy - destroy an RPC transport, killing off all requests. |
1450 | * @xprt: transport to destroy | 1467 | * @xprt: transport to destroy |
@@ -1454,22 +1471,19 @@ static void xprt_destroy(struct rpc_xprt *xprt) | |||
1454 | { | 1471 | { |
1455 | dprintk("RPC: destroying transport %p\n", xprt); | 1472 | dprintk("RPC: destroying transport %p\n", xprt); |
1456 | 1473 | ||
1457 | /* Exclude transport connect/disconnect handlers */ | 1474 | /* |
1475 | * Exclude transport connect/disconnect handlers and autoclose | ||
1476 | */ | ||
1458 | wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); | 1477 | wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE); |
1459 | 1478 | ||
1460 | del_timer_sync(&xprt->timer); | 1479 | del_timer_sync(&xprt->timer); |
1461 | 1480 | ||
1462 | rpc_xprt_debugfs_unregister(xprt); | ||
1463 | rpc_destroy_wait_queue(&xprt->binding); | ||
1464 | rpc_destroy_wait_queue(&xprt->pending); | ||
1465 | rpc_destroy_wait_queue(&xprt->sending); | ||
1466 | rpc_destroy_wait_queue(&xprt->backlog); | ||
1467 | cancel_work_sync(&xprt->task_cleanup); | ||
1468 | kfree(xprt->servername); | ||
1469 | /* | 1481 | /* |
1470 | * Tear down transport state and free the rpc_xprt | 1482 | * Destroy sockets etc from the system workqueue so they can |
1483 | * safely flush receive work running on rpciod. | ||
1471 | */ | 1484 | */ |
1472 | xprt->ops->destroy(xprt); | 1485 | INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb); |
1486 | schedule_work(&xprt->task_cleanup); | ||
1473 | } | 1487 | } |
1474 | 1488 | ||
1475 | static void xprt_destroy_kref(struct kref *kref) | 1489 | static void xprt_destroy_kref(struct kref *kref) |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 9b5de31aa429..c1841f234a71 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -2203,7 +2203,7 @@ static void xs_udp_setup_socket(struct work_struct *work) | |||
2203 | struct sock_xprt *transport = | 2203 | struct sock_xprt *transport = |
2204 | container_of(work, struct sock_xprt, connect_worker.work); | 2204 | container_of(work, struct sock_xprt, connect_worker.work); |
2205 | struct rpc_xprt *xprt = &transport->xprt; | 2205 | struct rpc_xprt *xprt = &transport->xprt; |
2206 | struct socket *sock = transport->sock; | 2206 | struct socket *sock; |
2207 | int status = -EIO; | 2207 | int status = -EIO; |
2208 | 2208 | ||
2209 | sock = xs_create_sock(xprt, transport, | 2209 | sock = xs_create_sock(xprt, transport, |
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 7d99029df342..a140dd4a84af 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -233,7 +233,7 @@ static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, | |||
233 | struct sk_buff_head xmitq; | 233 | struct sk_buff_head xmitq; |
234 | int rc = 0; | 234 | int rc = 0; |
235 | 235 | ||
236 | __skb_queue_head_init(&xmitq); | 236 | skb_queue_head_init(&xmitq); |
237 | tipc_bcast_lock(net); | 237 | tipc_bcast_lock(net); |
238 | if (tipc_link_bc_peers(l)) | 238 | if (tipc_link_bc_peers(l)) |
239 | rc = tipc_link_xmit(l, pkts, &xmitq); | 239 | rc = tipc_link_xmit(l, pkts, &xmitq); |
@@ -263,7 +263,7 @@ static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts, | |||
263 | u32 dst, selector; | 263 | u32 dst, selector; |
264 | 264 | ||
265 | selector = msg_link_selector(buf_msg(skb_peek(pkts))); | 265 | selector = msg_link_selector(buf_msg(skb_peek(pkts))); |
266 | __skb_queue_head_init(&_pkts); | 266 | skb_queue_head_init(&_pkts); |
267 | 267 | ||
268 | list_for_each_entry_safe(n, tmp, &dests->list, list) { | 268 | list_for_each_entry_safe(n, tmp, &dests->list, list) { |
269 | dst = n->value; | 269 | dst = n->value; |
diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 121e59a1d0e7..17146c16ee2d 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c | |||
@@ -568,6 +568,14 @@ bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err) | |||
568 | msg_set_destnode(msg, dnode); | 568 | msg_set_destnode(msg, dnode); |
569 | msg_set_destport(msg, dport); | 569 | msg_set_destport(msg, dport); |
570 | *err = TIPC_OK; | 570 | *err = TIPC_OK; |
571 | |||
572 | if (!skb_cloned(skb)) | ||
573 | return true; | ||
574 | |||
575 | /* Unclone buffer in case it was bundled */ | ||
576 | if (pskb_expand_head(skb, BUF_HEADROOM, BUF_TAILROOM, GFP_ATOMIC)) | ||
577 | return false; | ||
578 | |||
571 | return true; | 579 | return true; |
572 | } | 580 | } |
573 | 581 | ||
diff --git a/net/unix/diag.c b/net/unix/diag.c index 4d9679701a6d..384c84e83462 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c | |||
@@ -257,6 +257,8 @@ static int unix_diag_get_exact(struct sk_buff *in_skb, | |||
257 | err = -ENOENT; | 257 | err = -ENOENT; |
258 | if (sk == NULL) | 258 | if (sk == NULL) |
259 | goto out_nosk; | 259 | goto out_nosk; |
260 | if (!net_eq(sock_net(sk), net)) | ||
261 | goto out; | ||
260 | 262 | ||
261 | err = sock_diag_check_cookie(sk, req->udiag_cookie); | 263 | err = sock_diag_check_cookie(sk, req->udiag_cookie); |
262 | if (err) | 264 | if (err) |
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c index 14ed5a344cdf..e21991fe883a 100644 --- a/net/vmw_vsock/hyperv_transport.c +++ b/net/vmw_vsock/hyperv_transport.c | |||
@@ -310,11 +310,15 @@ static void hvs_close_connection(struct vmbus_channel *chan) | |||
310 | struct sock *sk = get_per_channel_state(chan); | 310 | struct sock *sk = get_per_channel_state(chan); |
311 | struct vsock_sock *vsk = vsock_sk(sk); | 311 | struct vsock_sock *vsk = vsock_sk(sk); |
312 | 312 | ||
313 | lock_sock(sk); | ||
314 | |||
313 | sk->sk_state = SS_UNCONNECTED; | 315 | sk->sk_state = SS_UNCONNECTED; |
314 | sock_set_flag(sk, SOCK_DONE); | 316 | sock_set_flag(sk, SOCK_DONE); |
315 | vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN; | 317 | vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN; |
316 | 318 | ||
317 | sk->sk_state_change(sk); | 319 | sk->sk_state_change(sk); |
320 | |||
321 | release_sock(sk); | ||
318 | } | 322 | } |
319 | 323 | ||
320 | static void hvs_open_connection(struct vmbus_channel *chan) | 324 | static void hvs_open_connection(struct vmbus_channel *chan) |
@@ -344,6 +348,8 @@ static void hvs_open_connection(struct vmbus_channel *chan) | |||
344 | if (!sk) | 348 | if (!sk) |
345 | return; | 349 | return; |
346 | 350 | ||
351 | lock_sock(sk); | ||
352 | |||
347 | if ((conn_from_host && sk->sk_state != VSOCK_SS_LISTEN) || | 353 | if ((conn_from_host && sk->sk_state != VSOCK_SS_LISTEN) || |
348 | (!conn_from_host && sk->sk_state != SS_CONNECTING)) | 354 | (!conn_from_host && sk->sk_state != SS_CONNECTING)) |
349 | goto out; | 355 | goto out; |
@@ -395,9 +401,7 @@ static void hvs_open_connection(struct vmbus_channel *chan) | |||
395 | 401 | ||
396 | vsock_insert_connected(vnew); | 402 | vsock_insert_connected(vnew); |
397 | 403 | ||
398 | lock_sock(sk); | ||
399 | vsock_enqueue_accept(sk, new); | 404 | vsock_enqueue_accept(sk, new); |
400 | release_sock(sk); | ||
401 | } else { | 405 | } else { |
402 | sk->sk_state = SS_CONNECTED; | 406 | sk->sk_state = SS_CONNECTED; |
403 | sk->sk_socket->state = SS_CONNECTED; | 407 | sk->sk_socket->state = SS_CONNECTED; |
@@ -410,6 +414,8 @@ static void hvs_open_connection(struct vmbus_channel *chan) | |||
410 | out: | 414 | out: |
411 | /* Release refcnt obtained when we called vsock_find_bound_socket() */ | 415 | /* Release refcnt obtained when we called vsock_find_bound_socket() */ |
412 | sock_put(sk); | 416 | sock_put(sk); |
417 | |||
418 | release_sock(sk); | ||
413 | } | 419 | } |
414 | 420 | ||
415 | static u32 hvs_get_local_cid(void) | 421 | static u32 hvs_get_local_cid(void) |
@@ -476,13 +482,21 @@ out: | |||
476 | 482 | ||
477 | static void hvs_release(struct vsock_sock *vsk) | 483 | static void hvs_release(struct vsock_sock *vsk) |
478 | { | 484 | { |
485 | struct sock *sk = sk_vsock(vsk); | ||
479 | struct hvsock *hvs = vsk->trans; | 486 | struct hvsock *hvs = vsk->trans; |
480 | struct vmbus_channel *chan = hvs->chan; | 487 | struct vmbus_channel *chan; |
481 | 488 | ||
489 | lock_sock(sk); | ||
490 | |||
491 | sk->sk_state = SS_DISCONNECTING; | ||
492 | vsock_remove_sock(vsk); | ||
493 | |||
494 | release_sock(sk); | ||
495 | |||
496 | chan = hvs->chan; | ||
482 | if (chan) | 497 | if (chan) |
483 | hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN); | 498 | hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN); |
484 | 499 | ||
485 | vsock_remove_sock(vsk); | ||
486 | } | 500 | } |
487 | 501 | ||
488 | static void hvs_destruct(struct vsock_sock *vsk) | 502 | static void hvs_destruct(struct vsock_sock *vsk) |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 690874293cfc..d396cb61a280 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -549,6 +549,14 @@ nl80211_nan_srf_policy[NL80211_NAN_SRF_ATTR_MAX + 1] = { | |||
549 | [NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED }, | 549 | [NL80211_NAN_SRF_MAC_ADDRS] = { .type = NLA_NESTED }, |
550 | }; | 550 | }; |
551 | 551 | ||
552 | /* policy for packet pattern attributes */ | ||
553 | static const struct nla_policy | ||
554 | nl80211_packet_pattern_policy[MAX_NL80211_PKTPAT + 1] = { | ||
555 | [NL80211_PKTPAT_MASK] = { .type = NLA_BINARY, }, | ||
556 | [NL80211_PKTPAT_PATTERN] = { .type = NLA_BINARY, }, | ||
557 | [NL80211_PKTPAT_OFFSET] = { .type = NLA_U32 }, | ||
558 | }; | ||
559 | |||
552 | static int nl80211_prepare_wdev_dump(struct sk_buff *skb, | 560 | static int nl80211_prepare_wdev_dump(struct sk_buff *skb, |
553 | struct netlink_callback *cb, | 561 | struct netlink_callback *cb, |
554 | struct cfg80211_registered_device **rdev, | 562 | struct cfg80211_registered_device **rdev, |
@@ -10532,7 +10540,8 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) | |||
10532 | u8 *mask_pat; | 10540 | u8 *mask_pat; |
10533 | 10541 | ||
10534 | nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, | 10542 | nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, |
10535 | NULL, info->extack); | 10543 | nl80211_packet_pattern_policy, |
10544 | info->extack); | ||
10536 | err = -EINVAL; | 10545 | err = -EINVAL; |
10537 | if (!pat_tb[NL80211_PKTPAT_MASK] || | 10546 | if (!pat_tb[NL80211_PKTPAT_MASK] || |
10538 | !pat_tb[NL80211_PKTPAT_PATTERN]) | 10547 | !pat_tb[NL80211_PKTPAT_PATTERN]) |
@@ -10781,7 +10790,8 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, | |||
10781 | rem) { | 10790 | rem) { |
10782 | u8 *mask_pat; | 10791 | u8 *mask_pat; |
10783 | 10792 | ||
10784 | nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, NULL, NULL); | 10793 | nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat, |
10794 | nl80211_packet_pattern_policy, NULL); | ||
10785 | if (!pat_tb[NL80211_PKTPAT_MASK] || | 10795 | if (!pat_tb[NL80211_PKTPAT_MASK] || |
10786 | !pat_tb[NL80211_PKTPAT_PATTERN]) | 10796 | !pat_tb[NL80211_PKTPAT_PATTERN]) |
10787 | return -EINVAL; | 10797 | return -EINVAL; |
diff --git a/net/wireless/sme.c b/net/wireless/sme.c index 0a49b88070d0..b6533ecbf5b1 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c | |||
@@ -522,11 +522,6 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev, | |||
522 | return -EOPNOTSUPP; | 522 | return -EOPNOTSUPP; |
523 | 523 | ||
524 | if (wdev->current_bss) { | 524 | if (wdev->current_bss) { |
525 | if (!prev_bssid) | ||
526 | return -EALREADY; | ||
527 | if (prev_bssid && | ||
528 | !ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid)) | ||
529 | return -ENOTCONN; | ||
530 | cfg80211_unhold_bss(wdev->current_bss); | 525 | cfg80211_unhold_bss(wdev->current_bss); |
531 | cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); | 526 | cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub); |
532 | wdev->current_bss = NULL; | 527 | wdev->current_bss = NULL; |
@@ -1063,11 +1058,35 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, | |||
1063 | 1058 | ||
1064 | ASSERT_WDEV_LOCK(wdev); | 1059 | ASSERT_WDEV_LOCK(wdev); |
1065 | 1060 | ||
1066 | if (WARN_ON(wdev->connect_keys)) { | 1061 | /* |
1067 | kzfree(wdev->connect_keys); | 1062 | * If we have an ssid_len, we're trying to connect or are |
1068 | wdev->connect_keys = NULL; | 1063 | * already connected, so reject a new SSID unless it's the |
1064 | * same (which is the case for re-association.) | ||
1065 | */ | ||
1066 | if (wdev->ssid_len && | ||
1067 | (wdev->ssid_len != connect->ssid_len || | ||
1068 | memcmp(wdev->ssid, connect->ssid, wdev->ssid_len))) | ||
1069 | return -EALREADY; | ||
1070 | |||
1071 | /* | ||
1072 | * If connected, reject (re-)association unless prev_bssid | ||
1073 | * matches the current BSSID. | ||
1074 | */ | ||
1075 | if (wdev->current_bss) { | ||
1076 | if (!prev_bssid) | ||
1077 | return -EALREADY; | ||
1078 | if (!ether_addr_equal(prev_bssid, wdev->current_bss->pub.bssid)) | ||
1079 | return -ENOTCONN; | ||
1069 | } | 1080 | } |
1070 | 1081 | ||
1082 | /* | ||
1083 | * Reject if we're in the process of connecting with WEP, | ||
1084 | * this case isn't very interesting and trying to handle | ||
1085 | * it would make the code much more complex. | ||
1086 | */ | ||
1087 | if (wdev->connect_keys) | ||
1088 | return -EINPROGRESS; | ||
1089 | |||
1071 | cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, | 1090 | cfg80211_oper_and_ht_capa(&connect->ht_capa_mask, |
1072 | rdev->wiphy.ht_capa_mod_mask); | 1091 | rdev->wiphy.ht_capa_mod_mask); |
1073 | 1092 | ||
@@ -1118,7 +1137,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev, | |||
1118 | 1137 | ||
1119 | if (err) { | 1138 | if (err) { |
1120 | wdev->connect_keys = NULL; | 1139 | wdev->connect_keys = NULL; |
1121 | wdev->ssid_len = 0; | 1140 | /* |
1141 | * This could be reassoc getting refused, don't clear | ||
1142 | * ssid_len in that case. | ||
1143 | */ | ||
1144 | if (!wdev->current_bss) | ||
1145 | wdev->ssid_len = 0; | ||
1122 | return err; | 1146 | return err; |
1123 | } | 1147 | } |
1124 | 1148 | ||
@@ -1145,6 +1169,14 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev, | |||
1145 | else if (wdev->ssid_len) | 1169 | else if (wdev->ssid_len) |
1146 | err = rdev_disconnect(rdev, dev, reason); | 1170 | err = rdev_disconnect(rdev, dev, reason); |
1147 | 1171 | ||
1172 | /* | ||
1173 | * Clear ssid_len unless we actually were fully connected, | ||
1174 | * in which case cfg80211_disconnected() will take care of | ||
1175 | * this later. | ||
1176 | */ | ||
1177 | if (!wdev->current_bss) | ||
1178 | wdev->ssid_len = 0; | ||
1179 | |||
1148 | return err; | 1180 | return err; |
1149 | } | 1181 | } |
1150 | 1182 | ||
diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index acf00104ef31..30e5746085b8 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c | |||
@@ -91,6 +91,7 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, | |||
91 | } | 91 | } |
92 | 92 | ||
93 | if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { | 93 | if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) { |
94 | xso->dev = NULL; | ||
94 | dev_put(dev); | 95 | dev_put(dev); |
95 | return 0; | 96 | return 0; |
96 | } | 97 | } |
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index 2515cd2bc5db..8ac9d32fb79d 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c | |||
@@ -429,7 +429,8 @@ resume: | |||
429 | nf_reset(skb); | 429 | nf_reset(skb); |
430 | 430 | ||
431 | if (decaps) { | 431 | if (decaps) { |
432 | skb->sp->olen = 0; | 432 | if (skb->sp) |
433 | skb->sp->olen = 0; | ||
433 | skb_dst_drop(skb); | 434 | skb_dst_drop(skb); |
434 | gro_cells_receive(&gro_cells, skb); | 435 | gro_cells_receive(&gro_cells, skb); |
435 | return 0; | 436 | return 0; |
@@ -440,7 +441,8 @@ resume: | |||
440 | 441 | ||
441 | err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async); | 442 | err = x->inner_mode->afinfo->transport_finish(skb, xfrm_gro || async); |
442 | if (xfrm_gro) { | 443 | if (xfrm_gro) { |
443 | skb->sp->olen = 0; | 444 | if (skb->sp) |
445 | skb->sp->olen = 0; | ||
444 | skb_dst_drop(skb); | 446 | skb_dst_drop(skb); |
445 | gro_cells_receive(&gro_cells, skb); | 447 | gro_cells_receive(&gro_cells, skb); |
446 | return err; | 448 | return err; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f06253969972..2746b62a8944 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1573,6 +1573,14 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1573 | goto put_states; | 1573 | goto put_states; |
1574 | } | 1574 | } |
1575 | 1575 | ||
1576 | if (!dst_prev) | ||
1577 | dst0 = dst1; | ||
1578 | else | ||
1579 | /* Ref count is taken during xfrm_alloc_dst() | ||
1580 | * No need to do dst_clone() on dst1 | ||
1581 | */ | ||
1582 | dst_prev->child = dst1; | ||
1583 | |||
1576 | if (xfrm[i]->sel.family == AF_UNSPEC) { | 1584 | if (xfrm[i]->sel.family == AF_UNSPEC) { |
1577 | inner_mode = xfrm_ip2inner_mode(xfrm[i], | 1585 | inner_mode = xfrm_ip2inner_mode(xfrm[i], |
1578 | xfrm_af2proto(family)); | 1586 | xfrm_af2proto(family)); |
@@ -1584,14 +1592,6 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, | |||
1584 | } else | 1592 | } else |
1585 | inner_mode = xfrm[i]->inner_mode; | 1593 | inner_mode = xfrm[i]->inner_mode; |
1586 | 1594 | ||
1587 | if (!dst_prev) | ||
1588 | dst0 = dst1; | ||
1589 | else | ||
1590 | /* Ref count is taken during xfrm_alloc_dst() | ||
1591 | * No need to do dst_clone() on dst1 | ||
1592 | */ | ||
1593 | dst_prev->child = dst1; | ||
1594 | |||
1595 | xdst->route = dst; | 1595 | xdst->route = dst; |
1596 | dst_copy_metrics(dst1, dst); | 1596 | dst_copy_metrics(dst1, dst); |
1597 | 1597 | ||
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 0dab1cd79ce4..12213477cd3a 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c | |||
@@ -732,12 +732,12 @@ restart: | |||
732 | } | 732 | } |
733 | } | 733 | } |
734 | } | 734 | } |
735 | out: | ||
736 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); | ||
735 | if (cnt) { | 737 | if (cnt) { |
736 | err = 0; | 738 | err = 0; |
737 | xfrm_policy_cache_flush(); | 739 | xfrm_policy_cache_flush(); |
738 | } | 740 | } |
739 | out: | ||
740 | spin_unlock_bh(&net->xfrm.xfrm_state_lock); | ||
741 | return err; | 741 | return err; |
742 | } | 742 | } |
743 | EXPORT_SYMBOL(xfrm_state_flush); | 743 | EXPORT_SYMBOL(xfrm_state_flush); |
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 2bfbd9121e3b..e44a0fed48dd 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c | |||
@@ -657,6 +657,7 @@ static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
657 | 657 | ||
658 | if (err < 0) { | 658 | if (err < 0) { |
659 | x->km.state = XFRM_STATE_DEAD; | 659 | x->km.state = XFRM_STATE_DEAD; |
660 | xfrm_dev_state_delete(x); | ||
660 | __xfrm_state_put(x); | 661 | __xfrm_state_put(x); |
661 | goto out; | 662 | goto out; |
662 | } | 663 | } |
@@ -1692,32 +1693,34 @@ static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr | |||
1692 | 1693 | ||
1693 | static int xfrm_dump_policy_done(struct netlink_callback *cb) | 1694 | static int xfrm_dump_policy_done(struct netlink_callback *cb) |
1694 | { | 1695 | { |
1695 | struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; | 1696 | struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; |
1696 | struct net *net = sock_net(cb->skb->sk); | 1697 | struct net *net = sock_net(cb->skb->sk); |
1697 | 1698 | ||
1698 | xfrm_policy_walk_done(walk, net); | 1699 | xfrm_policy_walk_done(walk, net); |
1699 | return 0; | 1700 | return 0; |
1700 | } | 1701 | } |
1701 | 1702 | ||
1703 | static int xfrm_dump_policy_start(struct netlink_callback *cb) | ||
1704 | { | ||
1705 | struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; | ||
1706 | |||
1707 | BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args)); | ||
1708 | |||
1709 | xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); | ||
1710 | return 0; | ||
1711 | } | ||
1712 | |||
1702 | static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) | 1713 | static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb) |
1703 | { | 1714 | { |
1704 | struct net *net = sock_net(skb->sk); | 1715 | struct net *net = sock_net(skb->sk); |
1705 | struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *) &cb->args[1]; | 1716 | struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args; |
1706 | struct xfrm_dump_info info; | 1717 | struct xfrm_dump_info info; |
1707 | 1718 | ||
1708 | BUILD_BUG_ON(sizeof(struct xfrm_policy_walk) > | ||
1709 | sizeof(cb->args) - sizeof(cb->args[0])); | ||
1710 | |||
1711 | info.in_skb = cb->skb; | 1719 | info.in_skb = cb->skb; |
1712 | info.out_skb = skb; | 1720 | info.out_skb = skb; |
1713 | info.nlmsg_seq = cb->nlh->nlmsg_seq; | 1721 | info.nlmsg_seq = cb->nlh->nlmsg_seq; |
1714 | info.nlmsg_flags = NLM_F_MULTI; | 1722 | info.nlmsg_flags = NLM_F_MULTI; |
1715 | 1723 | ||
1716 | if (!cb->args[0]) { | ||
1717 | cb->args[0] = 1; | ||
1718 | xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY); | ||
1719 | } | ||
1720 | |||
1721 | (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); | 1724 | (void) xfrm_policy_walk(net, walk, dump_one_policy, &info); |
1722 | 1725 | ||
1723 | return skb->len; | 1726 | return skb->len; |
@@ -2473,6 +2476,7 @@ static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = { | |||
2473 | 2476 | ||
2474 | static const struct xfrm_link { | 2477 | static const struct xfrm_link { |
2475 | int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); | 2478 | int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **); |
2479 | int (*start)(struct netlink_callback *); | ||
2476 | int (*dump)(struct sk_buff *, struct netlink_callback *); | 2480 | int (*dump)(struct sk_buff *, struct netlink_callback *); |
2477 | int (*done)(struct netlink_callback *); | 2481 | int (*done)(struct netlink_callback *); |
2478 | const struct nla_policy *nla_pol; | 2482 | const struct nla_policy *nla_pol; |
@@ -2486,6 +2490,7 @@ static const struct xfrm_link { | |||
2486 | [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, | 2490 | [XFRM_MSG_NEWPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_add_policy }, |
2487 | [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, | 2491 | [XFRM_MSG_DELPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy }, |
2488 | [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, | 2492 | [XFRM_MSG_GETPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_get_policy, |
2493 | .start = xfrm_dump_policy_start, | ||
2489 | .dump = xfrm_dump_policy, | 2494 | .dump = xfrm_dump_policy, |
2490 | .done = xfrm_dump_policy_done }, | 2495 | .done = xfrm_dump_policy_done }, |
2491 | [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, | 2496 | [XFRM_MSG_ALLOCSPI - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi }, |
@@ -2538,6 +2543,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, | |||
2538 | 2543 | ||
2539 | { | 2544 | { |
2540 | struct netlink_dump_control c = { | 2545 | struct netlink_dump_control c = { |
2546 | .start = link->start, | ||
2541 | .dump = link->dump, | 2547 | .dump = link->dump, |
2542 | .done = link->done, | 2548 | .done = link->done, |
2543 | }; | 2549 | }; |
diff --git a/samples/sockmap/sockmap_kern.c b/samples/sockmap/sockmap_kern.c index f9b38ef82dc2..52b0053274f4 100644 --- a/samples/sockmap/sockmap_kern.c +++ b/samples/sockmap/sockmap_kern.c | |||
@@ -62,7 +62,7 @@ int bpf_prog2(struct __sk_buff *skb) | |||
62 | ret = 1; | 62 | ret = 1; |
63 | 63 | ||
64 | bpf_printk("sockmap: %d -> %d @ %d\n", lport, bpf_ntohl(rport), ret); | 64 | bpf_printk("sockmap: %d -> %d @ %d\n", lport, bpf_ntohl(rport), ret); |
65 | return bpf_sk_redirect_map(&sock_map, ret, 0); | 65 | return bpf_sk_redirect_map(skb, &sock_map, ret, 0); |
66 | } | 66 | } |
67 | 67 | ||
68 | SEC("sockops") | 68 | SEC("sockops") |
diff --git a/samples/trace_events/trace-events-sample.c b/samples/trace_events/trace-events-sample.c index bc7fcf010a5b..5522692100ba 100644 --- a/samples/trace_events/trace-events-sample.c +++ b/samples/trace_events/trace-events-sample.c | |||
@@ -78,29 +78,37 @@ static int simple_thread_fn(void *arg) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | static DEFINE_MUTEX(thread_mutex); | 80 | static DEFINE_MUTEX(thread_mutex); |
81 | static int simple_thread_cnt; | ||
81 | 82 | ||
82 | int foo_bar_reg(void) | 83 | int foo_bar_reg(void) |
83 | { | 84 | { |
85 | mutex_lock(&thread_mutex); | ||
86 | if (simple_thread_cnt++) | ||
87 | goto out; | ||
88 | |||
84 | pr_info("Starting thread for foo_bar_fn\n"); | 89 | pr_info("Starting thread for foo_bar_fn\n"); |
85 | /* | 90 | /* |
86 | * We shouldn't be able to start a trace when the module is | 91 | * We shouldn't be able to start a trace when the module is |
87 | * unloading (there's other locks to prevent that). But | 92 | * unloading (there's other locks to prevent that). But |
88 | * for consistency sake, we still take the thread_mutex. | 93 | * for consistency sake, we still take the thread_mutex. |
89 | */ | 94 | */ |
90 | mutex_lock(&thread_mutex); | ||
91 | simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn"); | 95 | simple_tsk_fn = kthread_run(simple_thread_fn, NULL, "event-sample-fn"); |
96 | out: | ||
92 | mutex_unlock(&thread_mutex); | 97 | mutex_unlock(&thread_mutex); |
93 | return 0; | 98 | return 0; |
94 | } | 99 | } |
95 | 100 | ||
96 | void foo_bar_unreg(void) | 101 | void foo_bar_unreg(void) |
97 | { | 102 | { |
98 | pr_info("Killing thread for foo_bar_fn\n"); | ||
99 | /* protect against module unloading */ | ||
100 | mutex_lock(&thread_mutex); | 103 | mutex_lock(&thread_mutex); |
104 | if (--simple_thread_cnt) | ||
105 | goto out; | ||
106 | |||
107 | pr_info("Killing thread for foo_bar_fn\n"); | ||
101 | if (simple_tsk_fn) | 108 | if (simple_tsk_fn) |
102 | kthread_stop(simple_tsk_fn); | 109 | kthread_stop(simple_tsk_fn); |
103 | simple_tsk_fn = NULL; | 110 | simple_tsk_fn = NULL; |
111 | out: | ||
104 | mutex_unlock(&thread_mutex); | 112 | mutex_unlock(&thread_mutex); |
105 | } | 113 | } |
106 | 114 | ||
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost index 16923ba4b5b1..756d14f0d763 100644 --- a/scripts/Makefile.modpost +++ b/scripts/Makefile.modpost | |||
@@ -97,7 +97,6 @@ vmlinux.o: FORCE | |||
97 | $(call cmd,kernel-mod) | 97 | $(call cmd,kernel-mod) |
98 | 98 | ||
99 | # Declare generated files as targets for modpost | 99 | # Declare generated files as targets for modpost |
100 | $(symverfile): __modpost ; | ||
101 | $(modules:.ko=.mod.c): __modpost ; | 100 | $(modules:.ko=.mod.c): __modpost ; |
102 | 101 | ||
103 | 102 | ||
diff --git a/scripts/faddr2line b/scripts/faddr2line index 29df825d375c..2f6ce802397d 100755 --- a/scripts/faddr2line +++ b/scripts/faddr2line | |||
@@ -103,11 +103,12 @@ __faddr2line() { | |||
103 | 103 | ||
104 | # Go through each of the object's symbols which match the func name. | 104 | # Go through each of the object's symbols which match the func name. |
105 | # In rare cases there might be duplicates. | 105 | # In rare cases there might be duplicates. |
106 | file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}') | ||
106 | while read symbol; do | 107 | while read symbol; do |
107 | local fields=($symbol) | 108 | local fields=($symbol) |
108 | local sym_base=0x${fields[0]} | 109 | local sym_base=0x${fields[0]} |
109 | local sym_type=${fields[1]} | 110 | local sym_type=${fields[1]} |
110 | local sym_end=0x${fields[3]} | 111 | local sym_end=${fields[3]} |
111 | 112 | ||
112 | # calculate the size | 113 | # calculate the size |
113 | local sym_size=$(($sym_end - $sym_base)) | 114 | local sym_size=$(($sym_end - $sym_base)) |
@@ -157,7 +158,7 @@ __faddr2line() { | |||
157 | addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;" | 158 | addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;" |
158 | DONE=1 | 159 | DONE=1 |
159 | 160 | ||
160 | done < <(nm -n $objfile | awk -v fn=$func '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, $1 }') | 161 | done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }') |
161 | } | 162 | } |
162 | 163 | ||
163 | [[ $# -lt 2 ]] && usage | 164 | [[ $# -lt 2 ]] && usage |
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 5d554419170b..9ee9bf7fd1a2 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c | |||
@@ -158,7 +158,7 @@ static int read_symbol(FILE *in, struct sym_entry *s) | |||
158 | else if (str[0] == '$') | 158 | else if (str[0] == '$') |
159 | return -1; | 159 | return -1; |
160 | /* exclude debugging symbols */ | 160 | /* exclude debugging symbols */ |
161 | else if (stype == 'N') | 161 | else if (stype == 'N' || stype == 'n') |
162 | return -1; | 162 | return -1; |
163 | 163 | ||
164 | /* include the type field in the symbol name, so that it gets | 164 | /* include the type field in the symbol name, so that it gets |
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore index d5b291e94264..9cdec70d72b8 100644 --- a/security/apparmor/.gitignore +++ b/security/apparmor/.gitignore | |||
@@ -1,6 +1,5 @@ | |||
1 | # | 1 | # |
2 | # Generated include files | 2 | # Generated include files |
3 | # | 3 | # |
4 | net_names.h | ||
5 | capability_names.h | 4 | capability_names.h |
6 | rlim_names.h | 5 | rlim_names.h |
diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile index dafdd387d42b..81a34426d024 100644 --- a/security/apparmor/Makefile +++ b/security/apparmor/Makefile | |||
@@ -4,44 +4,11 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o | |||
4 | 4 | ||
5 | apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \ | 5 | apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \ |
6 | path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \ | 6 | path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \ |
7 | resource.o secid.o file.o policy_ns.o label.o mount.o net.o | 7 | resource.o secid.o file.o policy_ns.o label.o mount.o |
8 | apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o | 8 | apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o |
9 | 9 | ||
10 | clean-files := capability_names.h rlim_names.h net_names.h | 10 | clean-files := capability_names.h rlim_names.h |
11 | 11 | ||
12 | # Build a lower case string table of address family names | ||
13 | # Transform lines from | ||
14 | # #define AF_LOCAL 1 /* POSIX name for AF_UNIX */ | ||
15 | # #define AF_INET 2 /* Internet IP Protocol */ | ||
16 | # to | ||
17 | # [1] = "local", | ||
18 | # [2] = "inet", | ||
19 | # | ||
20 | # and build the securityfs entries for the mapping. | ||
21 | # Transforms lines from | ||
22 | # #define AF_INET 2 /* Internet IP Protocol */ | ||
23 | # to | ||
24 | # #define AA_SFS_AF_MASK "local inet" | ||
25 | quiet_cmd_make-af = GEN $@ | ||
26 | cmd_make-af = echo "static const char *address_family_names[] = {" > $@ ;\ | ||
27 | sed $< >>$@ -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \ | ||
28 | 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\ | ||
29 | echo "};" >> $@ ;\ | ||
30 | printf '%s' '\#define AA_SFS_AF_MASK "' >> $@ ;\ | ||
31 | sed -r -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "/AF_ROUTE/d" -e \ | ||
32 | 's/^\#define[ \t]+AF_([A-Z0-9_]+)[ \t]+([0-9]+)(.*)/\L\1/p'\ | ||
33 | $< | tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@ | ||
34 | |||
35 | # Build a lower case string table of sock type names | ||
36 | # Transform lines from | ||
37 | # SOCK_STREAM = 1, | ||
38 | # to | ||
39 | # [1] = "stream", | ||
40 | quiet_cmd_make-sock = GEN $@ | ||
41 | cmd_make-sock = echo "static const char *sock_type_names[] = {" >> $@ ;\ | ||
42 | sed $^ >>$@ -r -n \ | ||
43 | -e 's/^\tSOCK_([A-Z0-9_]+)[\t]+=[ \t]+([0-9]+)(.*)/[\2] = "\L\1",/p';\ | ||
44 | echo "};" >> $@ | ||
45 | 12 | ||
46 | # Build a lower case string table of capability names | 13 | # Build a lower case string table of capability names |
47 | # Transforms lines from | 14 | # Transforms lines from |
@@ -94,7 +61,6 @@ cmd_make-rlim = echo "static const char *const rlim_names[RLIM_NLIMITS] = {" \ | |||
94 | tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@ | 61 | tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@ |
95 | 62 | ||
96 | $(obj)/capability.o : $(obj)/capability_names.h | 63 | $(obj)/capability.o : $(obj)/capability_names.h |
97 | $(obj)/net.o : $(obj)/net_names.h | ||
98 | $(obj)/resource.o : $(obj)/rlim_names.h | 64 | $(obj)/resource.o : $(obj)/rlim_names.h |
99 | $(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \ | 65 | $(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \ |
100 | $(src)/Makefile | 66 | $(src)/Makefile |
@@ -102,8 +68,3 @@ $(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \ | |||
102 | $(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \ | 68 | $(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \ |
103 | $(src)/Makefile | 69 | $(src)/Makefile |
104 | $(call cmd,make-rlim) | 70 | $(call cmd,make-rlim) |
105 | $(obj)/net_names.h : $(srctree)/include/linux/socket.h \ | ||
106 | $(srctree)/include/linux/net.h \ | ||
107 | $(src)/Makefile | ||
108 | $(call cmd,make-af) | ||
109 | $(call cmd,make-sock) | ||
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index 518d5928661b..caaf51dda648 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c | |||
@@ -2202,7 +2202,6 @@ static struct aa_sfs_entry aa_sfs_entry_features[] = { | |||
2202 | AA_SFS_DIR("policy", aa_sfs_entry_policy), | 2202 | AA_SFS_DIR("policy", aa_sfs_entry_policy), |
2203 | AA_SFS_DIR("domain", aa_sfs_entry_domain), | 2203 | AA_SFS_DIR("domain", aa_sfs_entry_domain), |
2204 | AA_SFS_DIR("file", aa_sfs_entry_file), | 2204 | AA_SFS_DIR("file", aa_sfs_entry_file), |
2205 | AA_SFS_DIR("network", aa_sfs_entry_network), | ||
2206 | AA_SFS_DIR("mount", aa_sfs_entry_mount), | 2205 | AA_SFS_DIR("mount", aa_sfs_entry_mount), |
2207 | AA_SFS_DIR("namespaces", aa_sfs_entry_ns), | 2206 | AA_SFS_DIR("namespaces", aa_sfs_entry_ns), |
2208 | AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK), | 2207 | AA_SFS_FILE_U64("capability", VFS_CAP_FLAGS_MASK), |
diff --git a/security/apparmor/file.c b/security/apparmor/file.c index db80221891c6..3382518b87fa 100644 --- a/security/apparmor/file.c +++ b/security/apparmor/file.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include "include/context.h" | 21 | #include "include/context.h" |
22 | #include "include/file.h" | 22 | #include "include/file.h" |
23 | #include "include/match.h" | 23 | #include "include/match.h" |
24 | #include "include/net.h" | ||
25 | #include "include/path.h" | 24 | #include "include/path.h" |
26 | #include "include/policy.h" | 25 | #include "include/policy.h" |
27 | #include "include/label.h" | 26 | #include "include/label.h" |
@@ -567,32 +566,6 @@ static int __file_path_perm(const char *op, struct aa_label *label, | |||
567 | return error; | 566 | return error; |
568 | } | 567 | } |
569 | 568 | ||
570 | static int __file_sock_perm(const char *op, struct aa_label *label, | ||
571 | struct aa_label *flabel, struct file *file, | ||
572 | u32 request, u32 denied) | ||
573 | { | ||
574 | struct socket *sock = (struct socket *) file->private_data; | ||
575 | int error; | ||
576 | |||
577 | AA_BUG(!sock); | ||
578 | |||
579 | /* revalidation due to label out of date. No revocation at this time */ | ||
580 | if (!denied && aa_label_is_subset(flabel, label)) | ||
581 | return 0; | ||
582 | |||
583 | /* TODO: improve to skip profiles cached in flabel */ | ||
584 | error = aa_sock_file_perm(label, op, request, sock); | ||
585 | if (denied) { | ||
586 | /* TODO: improve to skip profiles checked above */ | ||
587 | /* check every profile in file label to is cached */ | ||
588 | last_error(error, aa_sock_file_perm(flabel, op, request, sock)); | ||
589 | } | ||
590 | if (!error) | ||
591 | update_file_ctx(file_ctx(file), label, request); | ||
592 | |||
593 | return error; | ||
594 | } | ||
595 | |||
596 | /** | 569 | /** |
597 | * aa_file_perm - do permission revalidation check & audit for @file | 570 | * aa_file_perm - do permission revalidation check & audit for @file |
598 | * @op: operation being checked | 571 | * @op: operation being checked |
@@ -637,9 +610,6 @@ int aa_file_perm(const char *op, struct aa_label *label, struct file *file, | |||
637 | error = __file_path_perm(op, label, flabel, file, request, | 610 | error = __file_path_perm(op, label, flabel, file, request, |
638 | denied); | 611 | denied); |
639 | 612 | ||
640 | else if (S_ISSOCK(file_inode(file)->i_mode)) | ||
641 | error = __file_sock_perm(op, label, flabel, file, request, | ||
642 | denied); | ||
643 | done: | 613 | done: |
644 | rcu_read_unlock(); | 614 | rcu_read_unlock(); |
645 | 615 | ||
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h index ff4316e1068d..620e81169659 100644 --- a/security/apparmor/include/audit.h +++ b/security/apparmor/include/audit.h | |||
@@ -121,29 +121,21 @@ struct apparmor_audit_data { | |||
121 | /* these entries require a custom callback fn */ | 121 | /* these entries require a custom callback fn */ |
122 | struct { | 122 | struct { |
123 | struct aa_label *peer; | 123 | struct aa_label *peer; |
124 | union { | 124 | struct { |
125 | struct { | 125 | const char *target; |
126 | kuid_t ouid; | 126 | kuid_t ouid; |
127 | const char *target; | 127 | } fs; |
128 | } fs; | ||
129 | struct { | ||
130 | int type, protocol; | ||
131 | struct sock *peer_sk; | ||
132 | void *addr; | ||
133 | int addrlen; | ||
134 | } net; | ||
135 | int signal; | ||
136 | struct { | ||
137 | int rlim; | ||
138 | unsigned long max; | ||
139 | } rlim; | ||
140 | }; | ||
141 | }; | 128 | }; |
142 | struct { | 129 | struct { |
143 | struct aa_profile *profile; | 130 | struct aa_profile *profile; |
144 | const char *ns; | 131 | const char *ns; |
145 | long pos; | 132 | long pos; |
146 | } iface; | 133 | } iface; |
134 | int signal; | ||
135 | struct { | ||
136 | int rlim; | ||
137 | unsigned long max; | ||
138 | } rlim; | ||
147 | struct { | 139 | struct { |
148 | const char *src_name; | 140 | const char *src_name; |
149 | const char *type; | 141 | const char *type; |
diff --git a/security/apparmor/include/net.h b/security/apparmor/include/net.h deleted file mode 100644 index 140c8efcf364..000000000000 --- a/security/apparmor/include/net.h +++ /dev/null | |||
@@ -1,114 +0,0 @@ | |||
1 | /* | ||
2 | * AppArmor security module | ||
3 | * | ||
4 | * This file contains AppArmor network mediation definitions. | ||
5 | * | ||
6 | * Copyright (C) 1998-2008 Novell/SUSE | ||
7 | * Copyright 2009-2017 Canonical Ltd. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation, version 2 of the | ||
12 | * License. | ||
13 | */ | ||
14 | |||
15 | #ifndef __AA_NET_H | ||
16 | #define __AA_NET_H | ||
17 | |||
18 | #include <net/sock.h> | ||
19 | #include <linux/path.h> | ||
20 | |||
21 | #include "apparmorfs.h" | ||
22 | #include "label.h" | ||
23 | #include "perms.h" | ||
24 | #include "policy.h" | ||
25 | |||
26 | #define AA_MAY_SEND AA_MAY_WRITE | ||
27 | #define AA_MAY_RECEIVE AA_MAY_READ | ||
28 | |||
29 | #define AA_MAY_SHUTDOWN AA_MAY_DELETE | ||
30 | |||
31 | #define AA_MAY_CONNECT AA_MAY_OPEN | ||
32 | #define AA_MAY_ACCEPT 0x00100000 | ||
33 | |||
34 | #define AA_MAY_BIND 0x00200000 | ||
35 | #define AA_MAY_LISTEN 0x00400000 | ||
36 | |||
37 | #define AA_MAY_SETOPT 0x01000000 | ||
38 | #define AA_MAY_GETOPT 0x02000000 | ||
39 | |||
40 | #define NET_PERMS_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \ | ||
41 | AA_MAY_SHUTDOWN | AA_MAY_BIND | AA_MAY_LISTEN | \ | ||
42 | AA_MAY_CONNECT | AA_MAY_ACCEPT | AA_MAY_SETATTR | \ | ||
43 | AA_MAY_GETATTR | AA_MAY_SETOPT | AA_MAY_GETOPT) | ||
44 | |||
45 | #define NET_FS_PERMS (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CREATE | \ | ||
46 | AA_MAY_SHUTDOWN | AA_MAY_CONNECT | AA_MAY_RENAME |\ | ||
47 | AA_MAY_SETATTR | AA_MAY_GETATTR | AA_MAY_CHMOD | \ | ||
48 | AA_MAY_CHOWN | AA_MAY_CHGRP | AA_MAY_LOCK | \ | ||
49 | AA_MAY_MPROT) | ||
50 | |||
51 | #define NET_PEER_MASK (AA_MAY_SEND | AA_MAY_RECEIVE | AA_MAY_CONNECT | \ | ||
52 | AA_MAY_ACCEPT) | ||
53 | struct aa_sk_ctx { | ||
54 | struct aa_label *label; | ||
55 | struct aa_label *peer; | ||
56 | struct path path; | ||
57 | }; | ||
58 | |||
59 | #define SK_CTX(X) ((X)->sk_security) | ||
60 | #define SOCK_ctx(X) SOCK_INODE(X)->i_security | ||
61 | #define DEFINE_AUDIT_NET(NAME, OP, SK, F, T, P) \ | ||
62 | struct lsm_network_audit NAME ## _net = { .sk = (SK), \ | ||
63 | .family = (F)}; \ | ||
64 | DEFINE_AUDIT_DATA(NAME, \ | ||
65 | ((SK) && (F) != AF_UNIX) ? LSM_AUDIT_DATA_NET : \ | ||
66 | LSM_AUDIT_DATA_NONE, \ | ||
67 | OP); \ | ||
68 | NAME.u.net = &(NAME ## _net); \ | ||
69 | aad(&NAME)->net.type = (T); \ | ||
70 | aad(&NAME)->net.protocol = (P) | ||
71 | |||
72 | #define DEFINE_AUDIT_SK(NAME, OP, SK) \ | ||
73 | DEFINE_AUDIT_NET(NAME, OP, SK, (SK)->sk_family, (SK)->sk_type, \ | ||
74 | (SK)->sk_protocol) | ||
75 | |||
76 | /* struct aa_net - network confinement data | ||
77 | * @allow: basic network families permissions | ||
78 | * @audit: which network permissions to force audit | ||
79 | * @quiet: which network permissions to quiet rejects | ||
80 | */ | ||
81 | struct aa_net { | ||
82 | u16 allow[AF_MAX]; | ||
83 | u16 audit[AF_MAX]; | ||
84 | u16 quiet[AF_MAX]; | ||
85 | }; | ||
86 | |||
87 | |||
88 | extern struct aa_sfs_entry aa_sfs_entry_network[]; | ||
89 | |||
90 | void audit_net_cb(struct audit_buffer *ab, void *va); | ||
91 | int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa, | ||
92 | u32 request, u16 family, int type); | ||
93 | int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family, | ||
94 | int type, int protocol); | ||
95 | static inline int aa_profile_af_sk_perm(struct aa_profile *profile, | ||
96 | struct common_audit_data *sa, | ||
97 | u32 request, | ||
98 | struct sock *sk) | ||
99 | { | ||
100 | return aa_profile_af_perm(profile, sa, request, sk->sk_family, | ||
101 | sk->sk_type); | ||
102 | } | ||
103 | int aa_sk_perm(const char *op, u32 request, struct sock *sk); | ||
104 | |||
105 | int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request, | ||
106 | struct socket *sock); | ||
107 | |||
108 | |||
109 | static inline void aa_free_net_rules(struct aa_net *new) | ||
110 | { | ||
111 | /* NOP */ | ||
112 | } | ||
113 | |||
114 | #endif /* __AA_NET_H */ | ||
diff --git a/security/apparmor/include/perms.h b/security/apparmor/include/perms.h index af04d5a7d73d..2b27bb79aec4 100644 --- a/security/apparmor/include/perms.h +++ b/security/apparmor/include/perms.h | |||
@@ -135,10 +135,9 @@ extern struct aa_perms allperms; | |||
135 | 135 | ||
136 | 136 | ||
137 | void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask); | 137 | void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask); |
138 | void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names, | 138 | void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask); |
139 | u32 mask); | ||
140 | void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs, | 139 | void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs, |
141 | u32 chrsmask, const char * const *names, u32 namesmask); | 140 | u32 chrsmask, const char **names, u32 namesmask); |
142 | void aa_apply_modes_to_perms(struct aa_profile *profile, | 141 | void aa_apply_modes_to_perms(struct aa_profile *profile, |
143 | struct aa_perms *perms); | 142 | struct aa_perms *perms); |
144 | void aa_compute_perms(struct aa_dfa *dfa, unsigned int state, | 143 | void aa_compute_perms(struct aa_dfa *dfa, unsigned int state, |
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h index 4364088a0b9e..17fe41a9cac3 100644 --- a/security/apparmor/include/policy.h +++ b/security/apparmor/include/policy.h | |||
@@ -30,7 +30,6 @@ | |||
30 | #include "file.h" | 30 | #include "file.h" |
31 | #include "lib.h" | 31 | #include "lib.h" |
32 | #include "label.h" | 32 | #include "label.h" |
33 | #include "net.h" | ||
34 | #include "perms.h" | 33 | #include "perms.h" |
35 | #include "resource.h" | 34 | #include "resource.h" |
36 | 35 | ||
@@ -112,7 +111,6 @@ struct aa_data { | |||
112 | * @policy: general match rules governing policy | 111 | * @policy: general match rules governing policy |
113 | * @file: The set of rules governing basic file access and domain transitions | 112 | * @file: The set of rules governing basic file access and domain transitions |
114 | * @caps: capabilities for the profile | 113 | * @caps: capabilities for the profile |
115 | * @net: network controls for the profile | ||
116 | * @rlimits: rlimits for the profile | 114 | * @rlimits: rlimits for the profile |
117 | * | 115 | * |
118 | * @dents: dentries for the profiles file entries in apparmorfs | 116 | * @dents: dentries for the profiles file entries in apparmorfs |
@@ -150,7 +148,6 @@ struct aa_profile { | |||
150 | struct aa_policydb policy; | 148 | struct aa_policydb policy; |
151 | struct aa_file_rules file; | 149 | struct aa_file_rules file; |
152 | struct aa_caps caps; | 150 | struct aa_caps caps; |
153 | struct aa_net net; | ||
154 | struct aa_rlimit rlimits; | 151 | struct aa_rlimit rlimits; |
155 | 152 | ||
156 | struct aa_loaddata *rawdata; | 153 | struct aa_loaddata *rawdata; |
@@ -223,16 +220,6 @@ static inline unsigned int PROFILE_MEDIATES_SAFE(struct aa_profile *profile, | |||
223 | return 0; | 220 | return 0; |
224 | } | 221 | } |
225 | 222 | ||
226 | static inline unsigned int PROFILE_MEDIATES_AF(struct aa_profile *profile, | ||
227 | u16 AF) { | ||
228 | unsigned int state = PROFILE_MEDIATES(profile, AA_CLASS_NET); | ||
229 | u16 be_af = cpu_to_be16(AF); | ||
230 | |||
231 | if (!state) | ||
232 | return 0; | ||
233 | return aa_dfa_match_len(profile->policy.dfa, state, (char *) &be_af, 2); | ||
234 | } | ||
235 | |||
236 | /** | 223 | /** |
237 | * aa_get_profile - increment refcount on profile @p | 224 | * aa_get_profile - increment refcount on profile @p |
238 | * @p: profile (MAYBE NULL) | 225 | * @p: profile (MAYBE NULL) |
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index 8818621b5d95..08ca26bcca77 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c | |||
@@ -211,8 +211,7 @@ void aa_perm_mask_to_str(char *str, const char *chrs, u32 mask) | |||
211 | *str = '\0'; | 211 | *str = '\0'; |
212 | } | 212 | } |
213 | 213 | ||
214 | void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names, | 214 | void aa_audit_perm_names(struct audit_buffer *ab, const char **names, u32 mask) |
215 | u32 mask) | ||
216 | { | 215 | { |
217 | const char *fmt = "%s"; | 216 | const char *fmt = "%s"; |
218 | unsigned int i, perm = 1; | 217 | unsigned int i, perm = 1; |
@@ -230,7 +229,7 @@ void aa_audit_perm_names(struct audit_buffer *ab, const char * const *names, | |||
230 | } | 229 | } |
231 | 230 | ||
232 | void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs, | 231 | void aa_audit_perm_mask(struct audit_buffer *ab, u32 mask, const char *chrs, |
233 | u32 chrsmask, const char * const *names, u32 namesmask) | 232 | u32 chrsmask, const char **names, u32 namesmask) |
234 | { | 233 | { |
235 | char str[33]; | 234 | char str[33]; |
236 | 235 | ||
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 72b915dfcaf7..1346ee5be04f 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c | |||
@@ -33,7 +33,6 @@ | |||
33 | #include "include/context.h" | 33 | #include "include/context.h" |
34 | #include "include/file.h" | 34 | #include "include/file.h" |
35 | #include "include/ipc.h" | 35 | #include "include/ipc.h" |
36 | #include "include/net.h" | ||
37 | #include "include/path.h" | 36 | #include "include/path.h" |
38 | #include "include/label.h" | 37 | #include "include/label.h" |
39 | #include "include/policy.h" | 38 | #include "include/policy.h" |
@@ -737,368 +736,6 @@ static int apparmor_task_kill(struct task_struct *target, struct siginfo *info, | |||
737 | return error; | 736 | return error; |
738 | } | 737 | } |
739 | 738 | ||
740 | /** | ||
741 | * apparmor_sk_alloc_security - allocate and attach the sk_security field | ||
742 | */ | ||
743 | static int apparmor_sk_alloc_security(struct sock *sk, int family, gfp_t flags) | ||
744 | { | ||
745 | struct aa_sk_ctx *ctx; | ||
746 | |||
747 | ctx = kzalloc(sizeof(*ctx), flags); | ||
748 | if (!ctx) | ||
749 | return -ENOMEM; | ||
750 | |||
751 | SK_CTX(sk) = ctx; | ||
752 | |||
753 | return 0; | ||
754 | } | ||
755 | |||
756 | /** | ||
757 | * apparmor_sk_free_security - free the sk_security field | ||
758 | */ | ||
759 | static void apparmor_sk_free_security(struct sock *sk) | ||
760 | { | ||
761 | struct aa_sk_ctx *ctx = SK_CTX(sk); | ||
762 | |||
763 | SK_CTX(sk) = NULL; | ||
764 | aa_put_label(ctx->label); | ||
765 | aa_put_label(ctx->peer); | ||
766 | path_put(&ctx->path); | ||
767 | kfree(ctx); | ||
768 | } | ||
769 | |||
770 | /** | ||
771 | * apparmor_clone_security - clone the sk_security field | ||
772 | */ | ||
773 | static void apparmor_sk_clone_security(const struct sock *sk, | ||
774 | struct sock *newsk) | ||
775 | { | ||
776 | struct aa_sk_ctx *ctx = SK_CTX(sk); | ||
777 | struct aa_sk_ctx *new = SK_CTX(newsk); | ||
778 | |||
779 | new->label = aa_get_label(ctx->label); | ||
780 | new->peer = aa_get_label(ctx->peer); | ||
781 | new->path = ctx->path; | ||
782 | path_get(&new->path); | ||
783 | } | ||
784 | |||
785 | static int aa_sock_create_perm(struct aa_label *label, int family, int type, | ||
786 | int protocol) | ||
787 | { | ||
788 | AA_BUG(!label); | ||
789 | AA_BUG(in_interrupt()); | ||
790 | |||
791 | return aa_af_perm(label, OP_CREATE, AA_MAY_CREATE, family, type, | ||
792 | protocol); | ||
793 | } | ||
794 | |||
795 | |||
796 | /** | ||
797 | * apparmor_socket_create - check perms before creating a new socket | ||
798 | */ | ||
799 | static int apparmor_socket_create(int family, int type, int protocol, int kern) | ||
800 | { | ||
801 | struct aa_label *label; | ||
802 | int error = 0; | ||
803 | |||
804 | label = begin_current_label_crit_section(); | ||
805 | if (!(kern || unconfined(label))) | ||
806 | error = aa_sock_create_perm(label, family, type, protocol); | ||
807 | end_current_label_crit_section(label); | ||
808 | |||
809 | return error; | ||
810 | } | ||
811 | |||
812 | /** | ||
813 | * apparmor_socket_post_create - setup the per-socket security struct | ||
814 | * | ||
815 | * Note: | ||
816 | * - kernel sockets currently labeled unconfined but we may want to | ||
817 | * move to a special kernel label | ||
818 | * - socket may not have sk here if created with sock_create_lite or | ||
819 | * sock_alloc. These should be accept cases which will be handled in | ||
820 | * sock_graft. | ||
821 | */ | ||
822 | static int apparmor_socket_post_create(struct socket *sock, int family, | ||
823 | int type, int protocol, int kern) | ||
824 | { | ||
825 | struct aa_label *label; | ||
826 | |||
827 | if (kern) { | ||
828 | struct aa_ns *ns = aa_get_current_ns(); | ||
829 | |||
830 | label = aa_get_label(ns_unconfined(ns)); | ||
831 | aa_put_ns(ns); | ||
832 | } else | ||
833 | label = aa_get_current_label(); | ||
834 | |||
835 | if (sock->sk) { | ||
836 | struct aa_sk_ctx *ctx = SK_CTX(sock->sk); | ||
837 | |||
838 | aa_put_label(ctx->label); | ||
839 | ctx->label = aa_get_label(label); | ||
840 | } | ||
841 | aa_put_label(label); | ||
842 | |||
843 | return 0; | ||
844 | } | ||
845 | |||
846 | /** | ||
847 | * apparmor_socket_bind - check perms before bind addr to socket | ||
848 | */ | ||
849 | static int apparmor_socket_bind(struct socket *sock, | ||
850 | struct sockaddr *address, int addrlen) | ||
851 | { | ||
852 | AA_BUG(!sock); | ||
853 | AA_BUG(!sock->sk); | ||
854 | AA_BUG(!address); | ||
855 | AA_BUG(in_interrupt()); | ||
856 | |||
857 | return aa_sk_perm(OP_BIND, AA_MAY_BIND, sock->sk); | ||
858 | } | ||
859 | |||
860 | /** | ||
861 | * apparmor_socket_connect - check perms before connecting @sock to @address | ||
862 | */ | ||
863 | static int apparmor_socket_connect(struct socket *sock, | ||
864 | struct sockaddr *address, int addrlen) | ||
865 | { | ||
866 | AA_BUG(!sock); | ||
867 | AA_BUG(!sock->sk); | ||
868 | AA_BUG(!address); | ||
869 | AA_BUG(in_interrupt()); | ||
870 | |||
871 | return aa_sk_perm(OP_CONNECT, AA_MAY_CONNECT, sock->sk); | ||
872 | } | ||
873 | |||
874 | /** | ||
875 | * apparmor_socket_list - check perms before allowing listen | ||
876 | */ | ||
877 | static int apparmor_socket_listen(struct socket *sock, int backlog) | ||
878 | { | ||
879 | AA_BUG(!sock); | ||
880 | AA_BUG(!sock->sk); | ||
881 | AA_BUG(in_interrupt()); | ||
882 | |||
883 | return aa_sk_perm(OP_LISTEN, AA_MAY_LISTEN, sock->sk); | ||
884 | } | ||
885 | |||
886 | /** | ||
887 | * apparmor_socket_accept - check perms before accepting a new connection. | ||
888 | * | ||
889 | * Note: while @newsock is created and has some information, the accept | ||
890 | * has not been done. | ||
891 | */ | ||
892 | static int apparmor_socket_accept(struct socket *sock, struct socket *newsock) | ||
893 | { | ||
894 | AA_BUG(!sock); | ||
895 | AA_BUG(!sock->sk); | ||
896 | AA_BUG(!newsock); | ||
897 | AA_BUG(in_interrupt()); | ||
898 | |||
899 | return aa_sk_perm(OP_ACCEPT, AA_MAY_ACCEPT, sock->sk); | ||
900 | } | ||
901 | |||
902 | static int aa_sock_msg_perm(const char *op, u32 request, struct socket *sock, | ||
903 | struct msghdr *msg, int size) | ||
904 | { | ||
905 | AA_BUG(!sock); | ||
906 | AA_BUG(!sock->sk); | ||
907 | AA_BUG(!msg); | ||
908 | AA_BUG(in_interrupt()); | ||
909 | |||
910 | return aa_sk_perm(op, request, sock->sk); | ||
911 | } | ||
912 | |||
913 | /** | ||
914 | * apparmor_socket_sendmsg - check perms before sending msg to another socket | ||
915 | */ | ||
916 | static int apparmor_socket_sendmsg(struct socket *sock, | ||
917 | struct msghdr *msg, int size) | ||
918 | { | ||
919 | return aa_sock_msg_perm(OP_SENDMSG, AA_MAY_SEND, sock, msg, size); | ||
920 | } | ||
921 | |||
922 | /** | ||
923 | * apparmor_socket_recvmsg - check perms before receiving a message | ||
924 | */ | ||
925 | static int apparmor_socket_recvmsg(struct socket *sock, | ||
926 | struct msghdr *msg, int size, int flags) | ||
927 | { | ||
928 | return aa_sock_msg_perm(OP_RECVMSG, AA_MAY_RECEIVE, sock, msg, size); | ||
929 | } | ||
930 | |||
931 | /* revaliation, get/set attr, shutdown */ | ||
932 | static int aa_sock_perm(const char *op, u32 request, struct socket *sock) | ||
933 | { | ||
934 | AA_BUG(!sock); | ||
935 | AA_BUG(!sock->sk); | ||
936 | AA_BUG(in_interrupt()); | ||
937 | |||
938 | return aa_sk_perm(op, request, sock->sk); | ||
939 | } | ||
940 | |||
941 | /** | ||
942 | * apparmor_socket_getsockname - check perms before getting the local address | ||
943 | */ | ||
944 | static int apparmor_socket_getsockname(struct socket *sock) | ||
945 | { | ||
946 | return aa_sock_perm(OP_GETSOCKNAME, AA_MAY_GETATTR, sock); | ||
947 | } | ||
948 | |||
949 | /** | ||
950 | * apparmor_socket_getpeername - check perms before getting remote address | ||
951 | */ | ||
952 | static int apparmor_socket_getpeername(struct socket *sock) | ||
953 | { | ||
954 | return aa_sock_perm(OP_GETPEERNAME, AA_MAY_GETATTR, sock); | ||
955 | } | ||
956 | |||
957 | /* revaliation, get/set attr, opt */ | ||
958 | static int aa_sock_opt_perm(const char *op, u32 request, struct socket *sock, | ||
959 | int level, int optname) | ||
960 | { | ||
961 | AA_BUG(!sock); | ||
962 | AA_BUG(!sock->sk); | ||
963 | AA_BUG(in_interrupt()); | ||
964 | |||
965 | return aa_sk_perm(op, request, sock->sk); | ||
966 | } | ||
967 | |||
968 | /** | ||
969 | * apparmor_getsockopt - check perms before getting socket options | ||
970 | */ | ||
971 | static int apparmor_socket_getsockopt(struct socket *sock, int level, | ||
972 | int optname) | ||
973 | { | ||
974 | return aa_sock_opt_perm(OP_GETSOCKOPT, AA_MAY_GETOPT, sock, | ||
975 | level, optname); | ||
976 | } | ||
977 | |||
978 | /** | ||
979 | * apparmor_setsockopt - check perms before setting socket options | ||
980 | */ | ||
981 | static int apparmor_socket_setsockopt(struct socket *sock, int level, | ||
982 | int optname) | ||
983 | { | ||
984 | return aa_sock_opt_perm(OP_SETSOCKOPT, AA_MAY_SETOPT, sock, | ||
985 | level, optname); | ||
986 | } | ||
987 | |||
988 | /** | ||
989 | * apparmor_socket_shutdown - check perms before shutting down @sock conn | ||
990 | */ | ||
991 | static int apparmor_socket_shutdown(struct socket *sock, int how) | ||
992 | { | ||
993 | return aa_sock_perm(OP_SHUTDOWN, AA_MAY_SHUTDOWN, sock); | ||
994 | } | ||
995 | |||
996 | /** | ||
997 | * apparmor_socket_sock_recv_skb - check perms before associating skb to sk | ||
998 | * | ||
999 | * Note: can not sleep may be called with locks held | ||
1000 | * | ||
1001 | * dont want protocol specific in __skb_recv_datagram() | ||
1002 | * to deny an incoming connection socket_sock_rcv_skb() | ||
1003 | */ | ||
1004 | static int apparmor_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) | ||
1005 | { | ||
1006 | return 0; | ||
1007 | } | ||
1008 | |||
1009 | |||
1010 | static struct aa_label *sk_peer_label(struct sock *sk) | ||
1011 | { | ||
1012 | struct aa_sk_ctx *ctx = SK_CTX(sk); | ||
1013 | |||
1014 | if (ctx->peer) | ||
1015 | return ctx->peer; | ||
1016 | |||
1017 | return ERR_PTR(-ENOPROTOOPT); | ||
1018 | } | ||
1019 | |||
1020 | /** | ||
1021 | * apparmor_socket_getpeersec_stream - get security context of peer | ||
1022 | * | ||
1023 | * Note: for tcp only valid if using ipsec or cipso on lan | ||
1024 | */ | ||
1025 | static int apparmor_socket_getpeersec_stream(struct socket *sock, | ||
1026 | char __user *optval, | ||
1027 | int __user *optlen, | ||
1028 | unsigned int len) | ||
1029 | { | ||
1030 | char *name; | ||
1031 | int slen, error = 0; | ||
1032 | struct aa_label *label; | ||
1033 | struct aa_label *peer; | ||
1034 | |||
1035 | label = begin_current_label_crit_section(); | ||
1036 | peer = sk_peer_label(sock->sk); | ||
1037 | if (IS_ERR(peer)) { | ||
1038 | error = PTR_ERR(peer); | ||
1039 | goto done; | ||
1040 | } | ||
1041 | slen = aa_label_asxprint(&name, labels_ns(label), peer, | ||
1042 | FLAG_SHOW_MODE | FLAG_VIEW_SUBNS | | ||
1043 | FLAG_HIDDEN_UNCONFINED, GFP_KERNEL); | ||
1044 | /* don't include terminating \0 in slen, it breaks some apps */ | ||
1045 | if (slen < 0) { | ||
1046 | error = -ENOMEM; | ||
1047 | } else { | ||
1048 | if (slen > len) { | ||
1049 | error = -ERANGE; | ||
1050 | } else if (copy_to_user(optval, name, slen)) { | ||
1051 | error = -EFAULT; | ||
1052 | goto out; | ||
1053 | } | ||
1054 | if (put_user(slen, optlen)) | ||
1055 | error = -EFAULT; | ||
1056 | out: | ||
1057 | kfree(name); | ||
1058 | |||
1059 | } | ||
1060 | |||
1061 | done: | ||
1062 | end_current_label_crit_section(label); | ||
1063 | |||
1064 | return error; | ||
1065 | } | ||
1066 | |||
1067 | /** | ||
1068 | * apparmor_socket_getpeersec_dgram - get security label of packet | ||
1069 | * @sock: the peer socket | ||
1070 | * @skb: packet data | ||
1071 | * @secid: pointer to where to put the secid of the packet | ||
1072 | * | ||
1073 | * Sets the netlabel socket state on sk from parent | ||
1074 | */ | ||
1075 | static int apparmor_socket_getpeersec_dgram(struct socket *sock, | ||
1076 | struct sk_buff *skb, u32 *secid) | ||
1077 | |||
1078 | { | ||
1079 | /* TODO: requires secid support */ | ||
1080 | return -ENOPROTOOPT; | ||
1081 | } | ||
1082 | |||
1083 | /** | ||
1084 | * apparmor_sock_graft - Initialize newly created socket | ||
1085 | * @sk: child sock | ||
1086 | * @parent: parent socket | ||
1087 | * | ||
1088 | * Note: could set off of SOCK_CTX(parent) but need to track inode and we can | ||
1089 | * just set sk security information off of current creating process label | ||
1090 | * Labeling of sk for accept case - probably should be sock based | ||
1091 | * instead of task, because of the case where an implicitly labeled | ||
1092 | * socket is shared by different tasks. | ||
1093 | */ | ||
1094 | static void apparmor_sock_graft(struct sock *sk, struct socket *parent) | ||
1095 | { | ||
1096 | struct aa_sk_ctx *ctx = SK_CTX(sk); | ||
1097 | |||
1098 | if (!ctx->label) | ||
1099 | ctx->label = aa_get_current_label(); | ||
1100 | } | ||
1101 | |||
1102 | static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = { | 739 | static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = { |
1103 | LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check), | 740 | LSM_HOOK_INIT(ptrace_access_check, apparmor_ptrace_access_check), |
1104 | LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme), | 741 | LSM_HOOK_INIT(ptrace_traceme, apparmor_ptrace_traceme), |
@@ -1133,30 +770,6 @@ static struct security_hook_list apparmor_hooks[] __lsm_ro_after_init = { | |||
1133 | LSM_HOOK_INIT(getprocattr, apparmor_getprocattr), | 770 | LSM_HOOK_INIT(getprocattr, apparmor_getprocattr), |
1134 | LSM_HOOK_INIT(setprocattr, apparmor_setprocattr), | 771 | LSM_HOOK_INIT(setprocattr, apparmor_setprocattr), |
1135 | 772 | ||
1136 | LSM_HOOK_INIT(sk_alloc_security, apparmor_sk_alloc_security), | ||
1137 | LSM_HOOK_INIT(sk_free_security, apparmor_sk_free_security), | ||
1138 | LSM_HOOK_INIT(sk_clone_security, apparmor_sk_clone_security), | ||
1139 | |||
1140 | LSM_HOOK_INIT(socket_create, apparmor_socket_create), | ||
1141 | LSM_HOOK_INIT(socket_post_create, apparmor_socket_post_create), | ||
1142 | LSM_HOOK_INIT(socket_bind, apparmor_socket_bind), | ||
1143 | LSM_HOOK_INIT(socket_connect, apparmor_socket_connect), | ||
1144 | LSM_HOOK_INIT(socket_listen, apparmor_socket_listen), | ||
1145 | LSM_HOOK_INIT(socket_accept, apparmor_socket_accept), | ||
1146 | LSM_HOOK_INIT(socket_sendmsg, apparmor_socket_sendmsg), | ||
1147 | LSM_HOOK_INIT(socket_recvmsg, apparmor_socket_recvmsg), | ||
1148 | LSM_HOOK_INIT(socket_getsockname, apparmor_socket_getsockname), | ||
1149 | LSM_HOOK_INIT(socket_getpeername, apparmor_socket_getpeername), | ||
1150 | LSM_HOOK_INIT(socket_getsockopt, apparmor_socket_getsockopt), | ||
1151 | LSM_HOOK_INIT(socket_setsockopt, apparmor_socket_setsockopt), | ||
1152 | LSM_HOOK_INIT(socket_shutdown, apparmor_socket_shutdown), | ||
1153 | LSM_HOOK_INIT(socket_sock_rcv_skb, apparmor_socket_sock_rcv_skb), | ||
1154 | LSM_HOOK_INIT(socket_getpeersec_stream, | ||
1155 | apparmor_socket_getpeersec_stream), | ||
1156 | LSM_HOOK_INIT(socket_getpeersec_dgram, | ||
1157 | apparmor_socket_getpeersec_dgram), | ||
1158 | LSM_HOOK_INIT(sock_graft, apparmor_sock_graft), | ||
1159 | |||
1160 | LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank), | 773 | LSM_HOOK_INIT(cred_alloc_blank, apparmor_cred_alloc_blank), |
1161 | LSM_HOOK_INIT(cred_free, apparmor_cred_free), | 774 | LSM_HOOK_INIT(cred_free, apparmor_cred_free), |
1162 | LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare), | 775 | LSM_HOOK_INIT(cred_prepare, apparmor_cred_prepare), |
diff --git a/security/apparmor/net.c b/security/apparmor/net.c deleted file mode 100644 index 33d54435f8d6..000000000000 --- a/security/apparmor/net.c +++ /dev/null | |||
@@ -1,184 +0,0 @@ | |||
1 | /* | ||
2 | * AppArmor security module | ||
3 | * | ||
4 | * This file contains AppArmor network mediation | ||
5 | * | ||
6 | * Copyright (C) 1998-2008 Novell/SUSE | ||
7 | * Copyright 2009-2017 Canonical Ltd. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation, version 2 of the | ||
12 | * License. | ||
13 | */ | ||
14 | |||
15 | #include "include/apparmor.h" | ||
16 | #include "include/audit.h" | ||
17 | #include "include/context.h" | ||
18 | #include "include/label.h" | ||
19 | #include "include/net.h" | ||
20 | #include "include/policy.h" | ||
21 | |||
22 | #include "net_names.h" | ||
23 | |||
24 | |||
25 | struct aa_sfs_entry aa_sfs_entry_network[] = { | ||
26 | AA_SFS_FILE_STRING("af_mask", AA_SFS_AF_MASK), | ||
27 | { } | ||
28 | }; | ||
29 | |||
30 | static const char * const net_mask_names[] = { | ||
31 | "unknown", | ||
32 | "send", | ||
33 | "receive", | ||
34 | "unknown", | ||
35 | |||
36 | "create", | ||
37 | "shutdown", | ||
38 | "connect", | ||
39 | "unknown", | ||
40 | |||
41 | "setattr", | ||
42 | "getattr", | ||
43 | "setcred", | ||
44 | "getcred", | ||
45 | |||
46 | "chmod", | ||
47 | "chown", | ||
48 | "chgrp", | ||
49 | "lock", | ||
50 | |||
51 | "mmap", | ||
52 | "mprot", | ||
53 | "unknown", | ||
54 | "unknown", | ||
55 | |||
56 | "accept", | ||
57 | "bind", | ||
58 | "listen", | ||
59 | "unknown", | ||
60 | |||
61 | "setopt", | ||
62 | "getopt", | ||
63 | "unknown", | ||
64 | "unknown", | ||
65 | |||
66 | "unknown", | ||
67 | "unknown", | ||
68 | "unknown", | ||
69 | "unknown", | ||
70 | }; | ||
71 | |||
72 | |||
73 | /* audit callback for net specific fields */ | ||
74 | void audit_net_cb(struct audit_buffer *ab, void *va) | ||
75 | { | ||
76 | struct common_audit_data *sa = va; | ||
77 | |||
78 | audit_log_format(ab, " family="); | ||
79 | if (address_family_names[sa->u.net->family]) | ||
80 | audit_log_string(ab, address_family_names[sa->u.net->family]); | ||
81 | else | ||
82 | audit_log_format(ab, "\"unknown(%d)\"", sa->u.net->family); | ||
83 | audit_log_format(ab, " sock_type="); | ||
84 | if (sock_type_names[aad(sa)->net.type]) | ||
85 | audit_log_string(ab, sock_type_names[aad(sa)->net.type]); | ||
86 | else | ||
87 | audit_log_format(ab, "\"unknown(%d)\"", aad(sa)->net.type); | ||
88 | audit_log_format(ab, " protocol=%d", aad(sa)->net.protocol); | ||
89 | |||
90 | if (aad(sa)->request & NET_PERMS_MASK) { | ||
91 | audit_log_format(ab, " requested_mask="); | ||
92 | aa_audit_perm_mask(ab, aad(sa)->request, NULL, 0, | ||
93 | net_mask_names, NET_PERMS_MASK); | ||
94 | |||
95 | if (aad(sa)->denied & NET_PERMS_MASK) { | ||
96 | audit_log_format(ab, " denied_mask="); | ||
97 | aa_audit_perm_mask(ab, aad(sa)->denied, NULL, 0, | ||
98 | net_mask_names, NET_PERMS_MASK); | ||
99 | } | ||
100 | } | ||
101 | if (aad(sa)->peer) { | ||
102 | audit_log_format(ab, " peer="); | ||
103 | aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer, | ||
104 | FLAGS_NONE, GFP_ATOMIC); | ||
105 | } | ||
106 | } | ||
107 | |||
108 | |||
109 | /* Generic af perm */ | ||
110 | int aa_profile_af_perm(struct aa_profile *profile, struct common_audit_data *sa, | ||
111 | u32 request, u16 family, int type) | ||
112 | { | ||
113 | struct aa_perms perms = { }; | ||
114 | |||
115 | AA_BUG(family >= AF_MAX); | ||
116 | AA_BUG(type < 0 || type >= SOCK_MAX); | ||
117 | |||
118 | if (profile_unconfined(profile)) | ||
119 | return 0; | ||
120 | |||
121 | perms.allow = (profile->net.allow[family] & (1 << type)) ? | ||
122 | ALL_PERMS_MASK : 0; | ||
123 | perms.audit = (profile->net.audit[family] & (1 << type)) ? | ||
124 | ALL_PERMS_MASK : 0; | ||
125 | perms.quiet = (profile->net.quiet[family] & (1 << type)) ? | ||
126 | ALL_PERMS_MASK : 0; | ||
127 | aa_apply_modes_to_perms(profile, &perms); | ||
128 | |||
129 | return aa_check_perms(profile, &perms, request, sa, audit_net_cb); | ||
130 | } | ||
131 | |||
132 | int aa_af_perm(struct aa_label *label, const char *op, u32 request, u16 family, | ||
133 | int type, int protocol) | ||
134 | { | ||
135 | struct aa_profile *profile; | ||
136 | DEFINE_AUDIT_NET(sa, op, NULL, family, type, protocol); | ||
137 | |||
138 | return fn_for_each_confined(label, profile, | ||
139 | aa_profile_af_perm(profile, &sa, request, family, | ||
140 | type)); | ||
141 | } | ||
142 | |||
143 | static int aa_label_sk_perm(struct aa_label *label, const char *op, u32 request, | ||
144 | struct sock *sk) | ||
145 | { | ||
146 | struct aa_profile *profile; | ||
147 | DEFINE_AUDIT_SK(sa, op, sk); | ||
148 | |||
149 | AA_BUG(!label); | ||
150 | AA_BUG(!sk); | ||
151 | |||
152 | if (unconfined(label)) | ||
153 | return 0; | ||
154 | |||
155 | return fn_for_each_confined(label, profile, | ||
156 | aa_profile_af_sk_perm(profile, &sa, request, sk)); | ||
157 | } | ||
158 | |||
159 | int aa_sk_perm(const char *op, u32 request, struct sock *sk) | ||
160 | { | ||
161 | struct aa_label *label; | ||
162 | int error; | ||
163 | |||
164 | AA_BUG(!sk); | ||
165 | AA_BUG(in_interrupt()); | ||
166 | |||
167 | /* TODO: switch to begin_current_label ???? */ | ||
168 | label = begin_current_label_crit_section(); | ||
169 | error = aa_label_sk_perm(label, op, request, sk); | ||
170 | end_current_label_crit_section(label); | ||
171 | |||
172 | return error; | ||
173 | } | ||
174 | |||
175 | |||
176 | int aa_sock_file_perm(struct aa_label *label, const char *op, u32 request, | ||
177 | struct socket *sock) | ||
178 | { | ||
179 | AA_BUG(!label); | ||
180 | AA_BUG(!sock); | ||
181 | AA_BUG(!sock->sk); | ||
182 | |||
183 | return aa_label_sk_perm(label, op, request, sock->sk); | ||
184 | } | ||
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index 5a2aec358322..4ede87c30f8b 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c | |||
@@ -275,19 +275,6 @@ fail: | |||
275 | return 0; | 275 | return 0; |
276 | } | 276 | } |
277 | 277 | ||
278 | static bool unpack_u16(struct aa_ext *e, u16 *data, const char *name) | ||
279 | { | ||
280 | if (unpack_nameX(e, AA_U16, name)) { | ||
281 | if (!inbounds(e, sizeof(u16))) | ||
282 | return 0; | ||
283 | if (data) | ||
284 | *data = le16_to_cpu(get_unaligned((__le16 *) e->pos)); | ||
285 | e->pos += sizeof(u16); | ||
286 | return 1; | ||
287 | } | ||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name) | 278 | static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name) |
292 | { | 279 | { |
293 | if (unpack_nameX(e, AA_U32, name)) { | 280 | if (unpack_nameX(e, AA_U32, name)) { |
@@ -597,7 +584,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
597 | struct aa_profile *profile = NULL; | 584 | struct aa_profile *profile = NULL; |
598 | const char *tmpname, *tmpns = NULL, *name = NULL; | 585 | const char *tmpname, *tmpns = NULL, *name = NULL; |
599 | const char *info = "failed to unpack profile"; | 586 | const char *info = "failed to unpack profile"; |
600 | size_t size = 0, ns_len; | 587 | size_t ns_len; |
601 | struct rhashtable_params params = { 0 }; | 588 | struct rhashtable_params params = { 0 }; |
602 | char *key = NULL; | 589 | char *key = NULL; |
603 | struct aa_data *data; | 590 | struct aa_data *data; |
@@ -730,38 +717,6 @@ static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name) | |||
730 | goto fail; | 717 | goto fail; |
731 | } | 718 | } |
732 | 719 | ||
733 | size = unpack_array(e, "net_allowed_af"); | ||
734 | if (size) { | ||
735 | |||
736 | for (i = 0; i < size; i++) { | ||
737 | /* discard extraneous rules that this kernel will | ||
738 | * never request | ||
739 | */ | ||
740 | if (i >= AF_MAX) { | ||
741 | u16 tmp; | ||
742 | |||
743 | if (!unpack_u16(e, &tmp, NULL) || | ||
744 | !unpack_u16(e, &tmp, NULL) || | ||
745 | !unpack_u16(e, &tmp, NULL)) | ||
746 | goto fail; | ||
747 | continue; | ||
748 | } | ||
749 | if (!unpack_u16(e, &profile->net.allow[i], NULL)) | ||
750 | goto fail; | ||
751 | if (!unpack_u16(e, &profile->net.audit[i], NULL)) | ||
752 | goto fail; | ||
753 | if (!unpack_u16(e, &profile->net.quiet[i], NULL)) | ||
754 | goto fail; | ||
755 | } | ||
756 | if (!unpack_nameX(e, AA_ARRAYEND, NULL)) | ||
757 | goto fail; | ||
758 | } | ||
759 | if (VERSION_LT(e->version, v7)) { | ||
760 | /* pre v7 policy always allowed these */ | ||
761 | profile->net.allow[AF_UNIX] = 0xffff; | ||
762 | profile->net.allow[AF_NETLINK] = 0xffff; | ||
763 | } | ||
764 | |||
765 | if (unpack_nameX(e, AA_STRUCT, "policydb")) { | 720 | if (unpack_nameX(e, AA_STRUCT, "policydb")) { |
766 | /* generic policy dfa - optional and may be NULL */ | 721 | /* generic policy dfa - optional and may be NULL */ |
767 | info = "failed to unpack policydb"; | 722 | info = "failed to unpack policydb"; |
diff --git a/security/commoncap.c b/security/commoncap.c index c25e0d27537f..fc46f5b85251 100644 --- a/security/commoncap.c +++ b/security/commoncap.c | |||
@@ -585,13 +585,14 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data | |||
585 | struct vfs_ns_cap_data data, *nscaps = &data; | 585 | struct vfs_ns_cap_data data, *nscaps = &data; |
586 | struct vfs_cap_data *caps = (struct vfs_cap_data *) &data; | 586 | struct vfs_cap_data *caps = (struct vfs_cap_data *) &data; |
587 | kuid_t rootkuid; | 587 | kuid_t rootkuid; |
588 | struct user_namespace *fs_ns = inode->i_sb->s_user_ns; | 588 | struct user_namespace *fs_ns; |
589 | 589 | ||
590 | memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data)); | 590 | memset(cpu_caps, 0, sizeof(struct cpu_vfs_cap_data)); |
591 | 591 | ||
592 | if (!inode) | 592 | if (!inode) |
593 | return -ENODATA; | 593 | return -ENODATA; |
594 | 594 | ||
595 | fs_ns = inode->i_sb->s_user_ns; | ||
595 | size = __vfs_getxattr((struct dentry *)dentry, inode, | 596 | size = __vfs_getxattr((struct dentry *)dentry, inode, |
596 | XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ); | 597 | XATTR_NAME_CAPS, &data, XATTR_CAPS_SZ); |
597 | if (size == -ENODATA || size == -EOPNOTSUPP) | 598 | if (size == -ENODATA || size == -EOPNOTSUPP) |
diff --git a/security/keys/Kconfig b/security/keys/Kconfig index 91eafada3164..6462e6654ccf 100644 --- a/security/keys/Kconfig +++ b/security/keys/Kconfig | |||
@@ -45,6 +45,7 @@ config BIG_KEYS | |||
45 | bool "Large payload keys" | 45 | bool "Large payload keys" |
46 | depends on KEYS | 46 | depends on KEYS |
47 | depends on TMPFS | 47 | depends on TMPFS |
48 | select CRYPTO | ||
48 | select CRYPTO_AES | 49 | select CRYPTO_AES |
49 | select CRYPTO_GCM | 50 | select CRYPTO_GCM |
50 | help | 51 | help |
diff --git a/security/keys/big_key.c b/security/keys/big_key.c index e607830b6154..929e14978c42 100644 --- a/security/keys/big_key.c +++ b/security/keys/big_key.c | |||
@@ -247,7 +247,7 @@ void big_key_revoke(struct key *key) | |||
247 | 247 | ||
248 | /* clear the quota */ | 248 | /* clear the quota */ |
249 | key_payload_reserve(key, 0); | 249 | key_payload_reserve(key, 0); |
250 | if (key_is_instantiated(key) && | 250 | if (key_is_positive(key) && |
251 | (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD) | 251 | (size_t)key->payload.data[big_key_len] > BIG_KEY_FILE_THRESHOLD) |
252 | vfs_truncate(path, 0); | 252 | vfs_truncate(path, 0); |
253 | } | 253 | } |
@@ -279,7 +279,7 @@ void big_key_describe(const struct key *key, struct seq_file *m) | |||
279 | 279 | ||
280 | seq_puts(m, key->description); | 280 | seq_puts(m, key->description); |
281 | 281 | ||
282 | if (key_is_instantiated(key)) | 282 | if (key_is_positive(key)) |
283 | seq_printf(m, ": %zu [%s]", | 283 | seq_printf(m, ": %zu [%s]", |
284 | datalen, | 284 | datalen, |
285 | datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff"); | 285 | datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff"); |
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c index 69855ba0d3b3..d92cbf9687c3 100644 --- a/security/keys/encrypted-keys/encrypted.c +++ b/security/keys/encrypted-keys/encrypted.c | |||
@@ -309,6 +309,13 @@ static struct key *request_user_key(const char *master_desc, const u8 **master_k | |||
309 | 309 | ||
310 | down_read(&ukey->sem); | 310 | down_read(&ukey->sem); |
311 | upayload = user_key_payload_locked(ukey); | 311 | upayload = user_key_payload_locked(ukey); |
312 | if (!upayload) { | ||
313 | /* key was revoked before we acquired its semaphore */ | ||
314 | up_read(&ukey->sem); | ||
315 | key_put(ukey); | ||
316 | ukey = ERR_PTR(-EKEYREVOKED); | ||
317 | goto error; | ||
318 | } | ||
312 | *master_key = upayload->data; | 319 | *master_key = upayload->data; |
313 | *master_keylen = upayload->datalen; | 320 | *master_keylen = upayload->datalen; |
314 | error: | 321 | error: |
@@ -847,7 +854,7 @@ static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) | |||
847 | size_t datalen = prep->datalen; | 854 | size_t datalen = prep->datalen; |
848 | int ret = 0; | 855 | int ret = 0; |
849 | 856 | ||
850 | if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) | 857 | if (key_is_negative(key)) |
851 | return -ENOKEY; | 858 | return -ENOKEY; |
852 | if (datalen <= 0 || datalen > 32767 || !prep->data) | 859 | if (datalen <= 0 || datalen > 32767 || !prep->data) |
853 | return -EINVAL; | 860 | return -EINVAL; |
diff --git a/security/keys/gc.c b/security/keys/gc.c index 87cb260e4890..f01d48cb3de1 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c | |||
@@ -129,15 +129,15 @@ static noinline void key_gc_unused_keys(struct list_head *keys) | |||
129 | while (!list_empty(keys)) { | 129 | while (!list_empty(keys)) { |
130 | struct key *key = | 130 | struct key *key = |
131 | list_entry(keys->next, struct key, graveyard_link); | 131 | list_entry(keys->next, struct key, graveyard_link); |
132 | short state = key->state; | ||
133 | |||
132 | list_del(&key->graveyard_link); | 134 | list_del(&key->graveyard_link); |
133 | 135 | ||
134 | kdebug("- %u", key->serial); | 136 | kdebug("- %u", key->serial); |
135 | key_check(key); | 137 | key_check(key); |
136 | 138 | ||
137 | /* Throw away the key data if the key is instantiated */ | 139 | /* Throw away the key data if the key is instantiated */ |
138 | if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags) && | 140 | if (state == KEY_IS_POSITIVE && key->type->destroy) |
139 | !test_bit(KEY_FLAG_NEGATIVE, &key->flags) && | ||
140 | key->type->destroy) | ||
141 | key->type->destroy(key); | 141 | key->type->destroy(key); |
142 | 142 | ||
143 | security_key_free(key); | 143 | security_key_free(key); |
@@ -151,7 +151,7 @@ static noinline void key_gc_unused_keys(struct list_head *keys) | |||
151 | } | 151 | } |
152 | 152 | ||
153 | atomic_dec(&key->user->nkeys); | 153 | atomic_dec(&key->user->nkeys); |
154 | if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) | 154 | if (state != KEY_IS_UNINSTANTIATED) |
155 | atomic_dec(&key->user->nikeys); | 155 | atomic_dec(&key->user->nikeys); |
156 | 156 | ||
157 | key_user_put(key->user); | 157 | key_user_put(key->user); |
diff --git a/security/keys/key.c b/security/keys/key.c index eb914a838840..83bf4b4afd49 100644 --- a/security/keys/key.c +++ b/security/keys/key.c | |||
@@ -402,6 +402,18 @@ int key_payload_reserve(struct key *key, size_t datalen) | |||
402 | EXPORT_SYMBOL(key_payload_reserve); | 402 | EXPORT_SYMBOL(key_payload_reserve); |
403 | 403 | ||
404 | /* | 404 | /* |
405 | * Change the key state to being instantiated. | ||
406 | */ | ||
407 | static void mark_key_instantiated(struct key *key, int reject_error) | ||
408 | { | ||
409 | /* Commit the payload before setting the state; barrier versus | ||
410 | * key_read_state(). | ||
411 | */ | ||
412 | smp_store_release(&key->state, | ||
413 | (reject_error < 0) ? reject_error : KEY_IS_POSITIVE); | ||
414 | } | ||
415 | |||
416 | /* | ||
405 | * Instantiate a key and link it into the target keyring atomically. Must be | 417 | * Instantiate a key and link it into the target keyring atomically. Must be |
406 | * called with the target keyring's semaphore writelocked. The target key's | 418 | * called with the target keyring's semaphore writelocked. The target key's |
407 | * semaphore need not be locked as instantiation is serialised by | 419 | * semaphore need not be locked as instantiation is serialised by |
@@ -424,14 +436,14 @@ static int __key_instantiate_and_link(struct key *key, | |||
424 | mutex_lock(&key_construction_mutex); | 436 | mutex_lock(&key_construction_mutex); |
425 | 437 | ||
426 | /* can't instantiate twice */ | 438 | /* can't instantiate twice */ |
427 | if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { | 439 | if (key->state == KEY_IS_UNINSTANTIATED) { |
428 | /* instantiate the key */ | 440 | /* instantiate the key */ |
429 | ret = key->type->instantiate(key, prep); | 441 | ret = key->type->instantiate(key, prep); |
430 | 442 | ||
431 | if (ret == 0) { | 443 | if (ret == 0) { |
432 | /* mark the key as being instantiated */ | 444 | /* mark the key as being instantiated */ |
433 | atomic_inc(&key->user->nikeys); | 445 | atomic_inc(&key->user->nikeys); |
434 | set_bit(KEY_FLAG_INSTANTIATED, &key->flags); | 446 | mark_key_instantiated(key, 0); |
435 | 447 | ||
436 | if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) | 448 | if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) |
437 | awaken = 1; | 449 | awaken = 1; |
@@ -577,13 +589,10 @@ int key_reject_and_link(struct key *key, | |||
577 | mutex_lock(&key_construction_mutex); | 589 | mutex_lock(&key_construction_mutex); |
578 | 590 | ||
579 | /* can't instantiate twice */ | 591 | /* can't instantiate twice */ |
580 | if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { | 592 | if (key->state == KEY_IS_UNINSTANTIATED) { |
581 | /* mark the key as being negatively instantiated */ | 593 | /* mark the key as being negatively instantiated */ |
582 | atomic_inc(&key->user->nikeys); | 594 | atomic_inc(&key->user->nikeys); |
583 | key->reject_error = -error; | 595 | mark_key_instantiated(key, -error); |
584 | smp_wmb(); | ||
585 | set_bit(KEY_FLAG_NEGATIVE, &key->flags); | ||
586 | set_bit(KEY_FLAG_INSTANTIATED, &key->flags); | ||
587 | now = current_kernel_time(); | 596 | now = current_kernel_time(); |
588 | key->expiry = now.tv_sec + timeout; | 597 | key->expiry = now.tv_sec + timeout; |
589 | key_schedule_gc(key->expiry + key_gc_delay); | 598 | key_schedule_gc(key->expiry + key_gc_delay); |
@@ -752,8 +761,8 @@ static inline key_ref_t __key_update(key_ref_t key_ref, | |||
752 | 761 | ||
753 | ret = key->type->update(key, prep); | 762 | ret = key->type->update(key, prep); |
754 | if (ret == 0) | 763 | if (ret == 0) |
755 | /* updating a negative key instantiates it */ | 764 | /* Updating a negative key positively instantiates it */ |
756 | clear_bit(KEY_FLAG_NEGATIVE, &key->flags); | 765 | mark_key_instantiated(key, 0); |
757 | 766 | ||
758 | up_write(&key->sem); | 767 | up_write(&key->sem); |
759 | 768 | ||
@@ -936,6 +945,16 @@ error: | |||
936 | */ | 945 | */ |
937 | __key_link_end(keyring, &index_key, edit); | 946 | __key_link_end(keyring, &index_key, edit); |
938 | 947 | ||
948 | key = key_ref_to_ptr(key_ref); | ||
949 | if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) { | ||
950 | ret = wait_for_key_construction(key, true); | ||
951 | if (ret < 0) { | ||
952 | key_ref_put(key_ref); | ||
953 | key_ref = ERR_PTR(ret); | ||
954 | goto error_free_prep; | ||
955 | } | ||
956 | } | ||
957 | |||
939 | key_ref = __key_update(key_ref, &prep); | 958 | key_ref = __key_update(key_ref, &prep); |
940 | goto error_free_prep; | 959 | goto error_free_prep; |
941 | } | 960 | } |
@@ -986,8 +1005,8 @@ int key_update(key_ref_t key_ref, const void *payload, size_t plen) | |||
986 | 1005 | ||
987 | ret = key->type->update(key, &prep); | 1006 | ret = key->type->update(key, &prep); |
988 | if (ret == 0) | 1007 | if (ret == 0) |
989 | /* updating a negative key instantiates it */ | 1008 | /* Updating a negative key positively instantiates it */ |
990 | clear_bit(KEY_FLAG_NEGATIVE, &key->flags); | 1009 | mark_key_instantiated(key, 0); |
991 | 1010 | ||
992 | up_write(&key->sem); | 1011 | up_write(&key->sem); |
993 | 1012 | ||
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 365ff85d7e27..76d22f726ae4 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c | |||
@@ -766,10 +766,9 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) | |||
766 | 766 | ||
767 | key = key_ref_to_ptr(key_ref); | 767 | key = key_ref_to_ptr(key_ref); |
768 | 768 | ||
769 | if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { | 769 | ret = key_read_state(key); |
770 | ret = -ENOKEY; | 770 | if (ret < 0) |
771 | goto error2; | 771 | goto error2; /* Negatively instantiated */ |
772 | } | ||
773 | 772 | ||
774 | /* see if we can read it directly */ | 773 | /* see if we can read it directly */ |
775 | ret = key_permission(key_ref, KEY_NEED_READ); | 774 | ret = key_permission(key_ref, KEY_NEED_READ); |
@@ -901,7 +900,7 @@ long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) | |||
901 | atomic_dec(&key->user->nkeys); | 900 | atomic_dec(&key->user->nkeys); |
902 | atomic_inc(&newowner->nkeys); | 901 | atomic_inc(&newowner->nkeys); |
903 | 902 | ||
904 | if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { | 903 | if (key->state != KEY_IS_UNINSTANTIATED) { |
905 | atomic_dec(&key->user->nikeys); | 904 | atomic_dec(&key->user->nikeys); |
906 | atomic_inc(&newowner->nikeys); | 905 | atomic_inc(&newowner->nikeys); |
907 | } | 906 | } |
diff --git a/security/keys/keyring.c b/security/keys/keyring.c index 4fa82a8a9c0e..a7e51f793867 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c | |||
@@ -414,7 +414,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m) | |||
414 | else | 414 | else |
415 | seq_puts(m, "[anon]"); | 415 | seq_puts(m, "[anon]"); |
416 | 416 | ||
417 | if (key_is_instantiated(keyring)) { | 417 | if (key_is_positive(keyring)) { |
418 | if (keyring->keys.nr_leaves_on_tree != 0) | 418 | if (keyring->keys.nr_leaves_on_tree != 0) |
419 | seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); | 419 | seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); |
420 | else | 420 | else |
@@ -553,7 +553,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data) | |||
553 | { | 553 | { |
554 | struct keyring_search_context *ctx = iterator_data; | 554 | struct keyring_search_context *ctx = iterator_data; |
555 | const struct key *key = keyring_ptr_to_key(object); | 555 | const struct key *key = keyring_ptr_to_key(object); |
556 | unsigned long kflags = key->flags; | 556 | unsigned long kflags = READ_ONCE(key->flags); |
557 | short state = READ_ONCE(key->state); | ||
557 | 558 | ||
558 | kenter("{%d}", key->serial); | 559 | kenter("{%d}", key->serial); |
559 | 560 | ||
@@ -565,6 +566,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data) | |||
565 | 566 | ||
566 | /* skip invalidated, revoked and expired keys */ | 567 | /* skip invalidated, revoked and expired keys */ |
567 | if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { | 568 | if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { |
569 | time_t expiry = READ_ONCE(key->expiry); | ||
570 | |||
568 | if (kflags & ((1 << KEY_FLAG_INVALIDATED) | | 571 | if (kflags & ((1 << KEY_FLAG_INVALIDATED) | |
569 | (1 << KEY_FLAG_REVOKED))) { | 572 | (1 << KEY_FLAG_REVOKED))) { |
570 | ctx->result = ERR_PTR(-EKEYREVOKED); | 573 | ctx->result = ERR_PTR(-EKEYREVOKED); |
@@ -572,7 +575,7 @@ static int keyring_search_iterator(const void *object, void *iterator_data) | |||
572 | goto skipped; | 575 | goto skipped; |
573 | } | 576 | } |
574 | 577 | ||
575 | if (key->expiry && ctx->now.tv_sec >= key->expiry) { | 578 | if (expiry && ctx->now.tv_sec >= expiry) { |
576 | if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) | 579 | if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) |
577 | ctx->result = ERR_PTR(-EKEYEXPIRED); | 580 | ctx->result = ERR_PTR(-EKEYEXPIRED); |
578 | kleave(" = %d [expire]", ctx->skipped_ret); | 581 | kleave(" = %d [expire]", ctx->skipped_ret); |
@@ -597,9 +600,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data) | |||
597 | 600 | ||
598 | if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { | 601 | if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { |
599 | /* we set a different error code if we pass a negative key */ | 602 | /* we set a different error code if we pass a negative key */ |
600 | if (kflags & (1 << KEY_FLAG_NEGATIVE)) { | 603 | if (state < 0) { |
601 | smp_rmb(); | 604 | ctx->result = ERR_PTR(state); |
602 | ctx->result = ERR_PTR(key->reject_error); | ||
603 | kleave(" = %d [neg]", ctx->skipped_ret); | 605 | kleave(" = %d [neg]", ctx->skipped_ret); |
604 | goto skipped; | 606 | goto skipped; |
605 | } | 607 | } |
diff --git a/security/keys/permission.c b/security/keys/permission.c index 732cc0beffdf..a72b4dd70c8a 100644 --- a/security/keys/permission.c +++ b/security/keys/permission.c | |||
@@ -88,7 +88,8 @@ EXPORT_SYMBOL(key_task_permission); | |||
88 | */ | 88 | */ |
89 | int key_validate(const struct key *key) | 89 | int key_validate(const struct key *key) |
90 | { | 90 | { |
91 | unsigned long flags = key->flags; | 91 | unsigned long flags = READ_ONCE(key->flags); |
92 | time_t expiry = READ_ONCE(key->expiry); | ||
92 | 93 | ||
93 | if (flags & (1 << KEY_FLAG_INVALIDATED)) | 94 | if (flags & (1 << KEY_FLAG_INVALIDATED)) |
94 | return -ENOKEY; | 95 | return -ENOKEY; |
@@ -99,9 +100,9 @@ int key_validate(const struct key *key) | |||
99 | return -EKEYREVOKED; | 100 | return -EKEYREVOKED; |
100 | 101 | ||
101 | /* check it hasn't expired */ | 102 | /* check it hasn't expired */ |
102 | if (key->expiry) { | 103 | if (expiry) { |
103 | struct timespec now = current_kernel_time(); | 104 | struct timespec now = current_kernel_time(); |
104 | if (now.tv_sec >= key->expiry) | 105 | if (now.tv_sec >= expiry) |
105 | return -EKEYEXPIRED; | 106 | return -EKEYEXPIRED; |
106 | } | 107 | } |
107 | 108 | ||
diff --git a/security/keys/proc.c b/security/keys/proc.c index de834309d100..6d1fcbba1e09 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c | |||
@@ -179,9 +179,12 @@ static int proc_keys_show(struct seq_file *m, void *v) | |||
179 | struct rb_node *_p = v; | 179 | struct rb_node *_p = v; |
180 | struct key *key = rb_entry(_p, struct key, serial_node); | 180 | struct key *key = rb_entry(_p, struct key, serial_node); |
181 | struct timespec now; | 181 | struct timespec now; |
182 | time_t expiry; | ||
182 | unsigned long timo; | 183 | unsigned long timo; |
184 | unsigned long flags; | ||
183 | key_ref_t key_ref, skey_ref; | 185 | key_ref_t key_ref, skey_ref; |
184 | char xbuf[16]; | 186 | char xbuf[16]; |
187 | short state; | ||
185 | int rc; | 188 | int rc; |
186 | 189 | ||
187 | struct keyring_search_context ctx = { | 190 | struct keyring_search_context ctx = { |
@@ -217,12 +220,13 @@ static int proc_keys_show(struct seq_file *m, void *v) | |||
217 | rcu_read_lock(); | 220 | rcu_read_lock(); |
218 | 221 | ||
219 | /* come up with a suitable timeout value */ | 222 | /* come up with a suitable timeout value */ |
220 | if (key->expiry == 0) { | 223 | expiry = READ_ONCE(key->expiry); |
224 | if (expiry == 0) { | ||
221 | memcpy(xbuf, "perm", 5); | 225 | memcpy(xbuf, "perm", 5); |
222 | } else if (now.tv_sec >= key->expiry) { | 226 | } else if (now.tv_sec >= expiry) { |
223 | memcpy(xbuf, "expd", 5); | 227 | memcpy(xbuf, "expd", 5); |
224 | } else { | 228 | } else { |
225 | timo = key->expiry - now.tv_sec; | 229 | timo = expiry - now.tv_sec; |
226 | 230 | ||
227 | if (timo < 60) | 231 | if (timo < 60) |
228 | sprintf(xbuf, "%lus", timo); | 232 | sprintf(xbuf, "%lus", timo); |
@@ -236,18 +240,21 @@ static int proc_keys_show(struct seq_file *m, void *v) | |||
236 | sprintf(xbuf, "%luw", timo / (60*60*24*7)); | 240 | sprintf(xbuf, "%luw", timo / (60*60*24*7)); |
237 | } | 241 | } |
238 | 242 | ||
239 | #define showflag(KEY, LETTER, FLAG) \ | 243 | state = key_read_state(key); |
240 | (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-') | ||
241 | 244 | ||
245 | #define showflag(FLAGS, LETTER, FLAG) \ | ||
246 | ((FLAGS & (1 << FLAG)) ? LETTER : '-') | ||
247 | |||
248 | flags = READ_ONCE(key->flags); | ||
242 | seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", | 249 | seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", |
243 | key->serial, | 250 | key->serial, |
244 | showflag(key, 'I', KEY_FLAG_INSTANTIATED), | 251 | state != KEY_IS_UNINSTANTIATED ? 'I' : '-', |
245 | showflag(key, 'R', KEY_FLAG_REVOKED), | 252 | showflag(flags, 'R', KEY_FLAG_REVOKED), |
246 | showflag(key, 'D', KEY_FLAG_DEAD), | 253 | showflag(flags, 'D', KEY_FLAG_DEAD), |
247 | showflag(key, 'Q', KEY_FLAG_IN_QUOTA), | 254 | showflag(flags, 'Q', KEY_FLAG_IN_QUOTA), |
248 | showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT), | 255 | showflag(flags, 'U', KEY_FLAG_USER_CONSTRUCT), |
249 | showflag(key, 'N', KEY_FLAG_NEGATIVE), | 256 | state < 0 ? 'N' : '-', |
250 | showflag(key, 'i', KEY_FLAG_INVALIDATED), | 257 | showflag(flags, 'i', KEY_FLAG_INVALIDATED), |
251 | refcount_read(&key->usage), | 258 | refcount_read(&key->usage), |
252 | xbuf, | 259 | xbuf, |
253 | key->perm, | 260 | key->perm, |
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 293d3598153b..740affd65ee9 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c | |||
@@ -730,7 +730,7 @@ try_again: | |||
730 | 730 | ||
731 | ret = -EIO; | 731 | ret = -EIO; |
732 | if (!(lflags & KEY_LOOKUP_PARTIAL) && | 732 | if (!(lflags & KEY_LOOKUP_PARTIAL) && |
733 | !test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) | 733 | key_read_state(key) == KEY_IS_UNINSTANTIATED) |
734 | goto invalid_key; | 734 | goto invalid_key; |
735 | 735 | ||
736 | /* check the permissions */ | 736 | /* check the permissions */ |
diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 63e63a42db3c..e8036cd0ad54 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c | |||
@@ -595,10 +595,9 @@ int wait_for_key_construction(struct key *key, bool intr) | |||
595 | intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); | 595 | intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); |
596 | if (ret) | 596 | if (ret) |
597 | return -ERESTARTSYS; | 597 | return -ERESTARTSYS; |
598 | if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { | 598 | ret = key_read_state(key); |
599 | smp_rmb(); | 599 | if (ret < 0) |
600 | return key->reject_error; | 600 | return ret; |
601 | } | ||
602 | return key_validate(key); | 601 | return key_validate(key); |
603 | } | 602 | } |
604 | EXPORT_SYMBOL(wait_for_key_construction); | 603 | EXPORT_SYMBOL(wait_for_key_construction); |
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index 6ebf1af8fce9..424e1d90412e 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c | |||
@@ -73,7 +73,7 @@ static void request_key_auth_describe(const struct key *key, | |||
73 | 73 | ||
74 | seq_puts(m, "key:"); | 74 | seq_puts(m, "key:"); |
75 | seq_puts(m, key->description); | 75 | seq_puts(m, key->description); |
76 | if (key_is_instantiated(key)) | 76 | if (key_is_positive(key)) |
77 | seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); | 77 | seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); |
78 | } | 78 | } |
79 | 79 | ||
diff --git a/security/keys/trusted.c b/security/keys/trusted.c index ddfaebf60fc8..bd85315cbfeb 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c | |||
@@ -1066,7 +1066,7 @@ static int trusted_update(struct key *key, struct key_preparsed_payload *prep) | |||
1066 | char *datablob; | 1066 | char *datablob; |
1067 | int ret = 0; | 1067 | int ret = 0; |
1068 | 1068 | ||
1069 | if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) | 1069 | if (key_is_negative(key)) |
1070 | return -ENOKEY; | 1070 | return -ENOKEY; |
1071 | p = key->payload.data[0]; | 1071 | p = key->payload.data[0]; |
1072 | if (!p->migratable) | 1072 | if (!p->migratable) |
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index 3d8c68eba516..9f558bedba23 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c | |||
@@ -114,7 +114,7 @@ int user_update(struct key *key, struct key_preparsed_payload *prep) | |||
114 | 114 | ||
115 | /* attach the new data, displacing the old */ | 115 | /* attach the new data, displacing the old */ |
116 | key->expiry = prep->expiry; | 116 | key->expiry = prep->expiry; |
117 | if (!test_bit(KEY_FLAG_NEGATIVE, &key->flags)) | 117 | if (key_is_positive(key)) |
118 | zap = dereference_key_locked(key); | 118 | zap = dereference_key_locked(key); |
119 | rcu_assign_keypointer(key, prep->payload.data[0]); | 119 | rcu_assign_keypointer(key, prep->payload.data[0]); |
120 | prep->payload.data[0] = NULL; | 120 | prep->payload.data[0] = NULL; |
@@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(user_destroy); | |||
162 | void user_describe(const struct key *key, struct seq_file *m) | 162 | void user_describe(const struct key *key, struct seq_file *m) |
163 | { | 163 | { |
164 | seq_puts(m, key->description); | 164 | seq_puts(m, key->description); |
165 | if (key_is_instantiated(key)) | 165 | if (key_is_positive(key)) |
166 | seq_printf(m, ": %u", key->datalen); | 166 | seq_printf(m, ": %u", key->datalen); |
167 | } | 167 | } |
168 | 168 | ||
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c index 2c1bd2763864..6758f789b712 100644 --- a/sound/soc/codecs/adau17x1.c +++ b/sound/soc/codecs/adau17x1.c | |||
@@ -90,6 +90,27 @@ static int adau17x1_pll_event(struct snd_soc_dapm_widget *w, | |||
90 | return 0; | 90 | return 0; |
91 | } | 91 | } |
92 | 92 | ||
93 | static int adau17x1_adc_fixup(struct snd_soc_dapm_widget *w, | ||
94 | struct snd_kcontrol *kcontrol, int event) | ||
95 | { | ||
96 | struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm); | ||
97 | struct adau *adau = snd_soc_codec_get_drvdata(codec); | ||
98 | |||
99 | /* | ||
100 | * If we are capturing, toggle the ADOSR bit in Converter Control 0 to | ||
101 | * avoid losing SNR (workaround from ADI). This must be done after | ||
102 | * the ADC(s) have been enabled. According to the data sheet, it is | ||
103 | * normally illegal to set this bit when the sampling rate is 96 kHz, | ||
104 | * but according to ADI it is acceptable for this workaround. | ||
105 | */ | ||
106 | regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0, | ||
107 | ADAU17X1_CONVERTER0_ADOSR, ADAU17X1_CONVERTER0_ADOSR); | ||
108 | regmap_update_bits(adau->regmap, ADAU17X1_CONVERTER0, | ||
109 | ADAU17X1_CONVERTER0_ADOSR, 0); | ||
110 | |||
111 | return 0; | ||
112 | } | ||
113 | |||
93 | static const char * const adau17x1_mono_stereo_text[] = { | 114 | static const char * const adau17x1_mono_stereo_text[] = { |
94 | "Stereo", | 115 | "Stereo", |
95 | "Mono Left Channel (L+R)", | 116 | "Mono Left Channel (L+R)", |
@@ -121,7 +142,8 @@ static const struct snd_soc_dapm_widget adau17x1_dapm_widgets[] = { | |||
121 | SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0, | 142 | SND_SOC_DAPM_MUX("Right DAC Mode Mux", SND_SOC_NOPM, 0, 0, |
122 | &adau17x1_dac_mode_mux), | 143 | &adau17x1_dac_mode_mux), |
123 | 144 | ||
124 | SND_SOC_DAPM_ADC("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0), | 145 | SND_SOC_DAPM_ADC_E("Left Decimator", NULL, ADAU17X1_ADC_CONTROL, 0, 0, |
146 | adau17x1_adc_fixup, SND_SOC_DAPM_POST_PMU), | ||
125 | SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0), | 147 | SND_SOC_DAPM_ADC("Right Decimator", NULL, ADAU17X1_ADC_CONTROL, 1, 0), |
126 | SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0), | 148 | SND_SOC_DAPM_DAC("Left DAC", NULL, ADAU17X1_DAC_CONTROL0, 0, 0), |
127 | SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0), | 149 | SND_SOC_DAPM_DAC("Right DAC", NULL, ADAU17X1_DAC_CONTROL0, 1, 0), |
diff --git a/sound/soc/codecs/adau17x1.h b/sound/soc/codecs/adau17x1.h index bf04b7efee40..db350035fad7 100644 --- a/sound/soc/codecs/adau17x1.h +++ b/sound/soc/codecs/adau17x1.h | |||
@@ -129,5 +129,7 @@ bool adau17x1_has_dsp(struct adau *adau); | |||
129 | 129 | ||
130 | #define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7 | 130 | #define ADAU17X1_CONVERTER0_CONVSR_MASK 0x7 |
131 | 131 | ||
132 | #define ADAU17X1_CONVERTER0_ADOSR BIT(3) | ||
133 | |||
132 | 134 | ||
133 | #endif | 135 | #endif |
diff --git a/sound/soc/codecs/rt5514-spi.c b/sound/soc/codecs/rt5514-spi.c index ed6e5373916c..12f2ecf3a4fe 100644 --- a/sound/soc/codecs/rt5514-spi.c +++ b/sound/soc/codecs/rt5514-spi.c | |||
@@ -145,9 +145,8 @@ done: | |||
145 | mutex_unlock(&rt5514_dsp->dma_lock); | 145 | mutex_unlock(&rt5514_dsp->dma_lock); |
146 | } | 146 | } |
147 | 147 | ||
148 | static irqreturn_t rt5514_spi_irq(int irq, void *data) | 148 | static void rt5514_schedule_copy(struct rt5514_dsp *rt5514_dsp) |
149 | { | 149 | { |
150 | struct rt5514_dsp *rt5514_dsp = data; | ||
151 | u8 buf[8]; | 150 | u8 buf[8]; |
152 | 151 | ||
153 | rt5514_dsp->get_size = 0; | 152 | rt5514_dsp->get_size = 0; |
@@ -180,6 +179,13 @@ static irqreturn_t rt5514_spi_irq(int irq, void *data) | |||
180 | if (rt5514_dsp->buf_base && rt5514_dsp->buf_limit && | 179 | if (rt5514_dsp->buf_base && rt5514_dsp->buf_limit && |
181 | rt5514_dsp->buf_rp && rt5514_dsp->buf_size) | 180 | rt5514_dsp->buf_rp && rt5514_dsp->buf_size) |
182 | schedule_delayed_work(&rt5514_dsp->copy_work, 0); | 181 | schedule_delayed_work(&rt5514_dsp->copy_work, 0); |
182 | } | ||
183 | |||
184 | static irqreturn_t rt5514_spi_irq(int irq, void *data) | ||
185 | { | ||
186 | struct rt5514_dsp *rt5514_dsp = data; | ||
187 | |||
188 | rt5514_schedule_copy(rt5514_dsp); | ||
183 | 189 | ||
184 | return IRQ_HANDLED; | 190 | return IRQ_HANDLED; |
185 | } | 191 | } |
@@ -199,12 +205,19 @@ static int rt5514_spi_hw_params(struct snd_pcm_substream *substream, | |||
199 | struct rt5514_dsp *rt5514_dsp = | 205 | struct rt5514_dsp *rt5514_dsp = |
200 | snd_soc_platform_get_drvdata(rtd->platform); | 206 | snd_soc_platform_get_drvdata(rtd->platform); |
201 | int ret; | 207 | int ret; |
208 | u8 buf[8]; | ||
202 | 209 | ||
203 | mutex_lock(&rt5514_dsp->dma_lock); | 210 | mutex_lock(&rt5514_dsp->dma_lock); |
204 | ret = snd_pcm_lib_alloc_vmalloc_buffer(substream, | 211 | ret = snd_pcm_lib_alloc_vmalloc_buffer(substream, |
205 | params_buffer_bytes(hw_params)); | 212 | params_buffer_bytes(hw_params)); |
206 | rt5514_dsp->substream = substream; | 213 | rt5514_dsp->substream = substream; |
207 | rt5514_dsp->dma_offset = 0; | 214 | rt5514_dsp->dma_offset = 0; |
215 | |||
216 | /* Read IRQ status and schedule copy accordingly. */ | ||
217 | rt5514_spi_burst_read(RT5514_IRQ_CTRL, (u8 *)&buf, sizeof(buf)); | ||
218 | if (buf[0] & RT5514_IRQ_STATUS_BIT) | ||
219 | rt5514_schedule_copy(rt5514_dsp); | ||
220 | |||
208 | mutex_unlock(&rt5514_dsp->dma_lock); | 221 | mutex_unlock(&rt5514_dsp->dma_lock); |
209 | 222 | ||
210 | return ret; | 223 | return ret; |
diff --git a/sound/soc/codecs/rt5514-spi.h b/sound/soc/codecs/rt5514-spi.h index a6434ee6ff03..c1a36647c119 100644 --- a/sound/soc/codecs/rt5514-spi.h +++ b/sound/soc/codecs/rt5514-spi.h | |||
@@ -20,6 +20,9 @@ | |||
20 | #define RT5514_BUFFER_VOICE_BASE 0x18000200 | 20 | #define RT5514_BUFFER_VOICE_BASE 0x18000200 |
21 | #define RT5514_BUFFER_VOICE_LIMIT 0x18000204 | 21 | #define RT5514_BUFFER_VOICE_LIMIT 0x18000204 |
22 | #define RT5514_BUFFER_VOICE_WP 0x1800020c | 22 | #define RT5514_BUFFER_VOICE_WP 0x1800020c |
23 | #define RT5514_IRQ_CTRL 0x18002094 | ||
24 | |||
25 | #define RT5514_IRQ_STATUS_BIT (0x1 << 5) | ||
23 | 26 | ||
24 | /* SPI Command */ | 27 | /* SPI Command */ |
25 | enum { | 28 | enum { |
diff --git a/sound/soc/codecs/rt5514.c b/sound/soc/codecs/rt5514.c index 0945d212b8dc..d7956ababd11 100644 --- a/sound/soc/codecs/rt5514.c +++ b/sound/soc/codecs/rt5514.c | |||
@@ -338,39 +338,6 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol, | |||
338 | fw = NULL; | 338 | fw = NULL; |
339 | } | 339 | } |
340 | 340 | ||
341 | if (rt5514->model_buf && rt5514->model_len) { | ||
342 | #if IS_ENABLED(CONFIG_SND_SOC_RT5514_SPI) | ||
343 | int ret; | ||
344 | |||
345 | ret = rt5514_spi_burst_write(0x4ff80000, | ||
346 | rt5514->model_buf, | ||
347 | ((rt5514->model_len / 8) + 1) * 8); | ||
348 | if (ret) { | ||
349 | dev_err(codec->dev, | ||
350 | "Model load failed %d\n", ret); | ||
351 | return ret; | ||
352 | } | ||
353 | #else | ||
354 | dev_err(codec->dev, | ||
355 | "No SPI driver for loading firmware\n"); | ||
356 | #endif | ||
357 | } else { | ||
358 | request_firmware(&fw, RT5514_FIRMWARE3, | ||
359 | codec->dev); | ||
360 | if (fw) { | ||
361 | #if IS_ENABLED(CONFIG_SND_SOC_RT5514_SPI) | ||
362 | rt5514_spi_burst_write(0x4ff80000, | ||
363 | fw->data, | ||
364 | ((fw->size/8)+1)*8); | ||
365 | #else | ||
366 | dev_err(codec->dev, | ||
367 | "No SPI driver to load fw\n"); | ||
368 | #endif | ||
369 | release_firmware(fw); | ||
370 | fw = NULL; | ||
371 | } | ||
372 | } | ||
373 | |||
374 | /* DSP run */ | 341 | /* DSP run */ |
375 | regmap_write(rt5514->i2c_regmap, 0x18002f00, | 342 | regmap_write(rt5514->i2c_regmap, 0x18002f00, |
376 | 0x00055148); | 343 | 0x00055148); |
@@ -385,34 +352,6 @@ static int rt5514_dsp_voice_wake_up_put(struct snd_kcontrol *kcontrol, | |||
385 | return 0; | 352 | return 0; |
386 | } | 353 | } |
387 | 354 | ||
388 | static int rt5514_hotword_model_put(struct snd_kcontrol *kcontrol, | ||
389 | const unsigned int __user *bytes, unsigned int size) | ||
390 | { | ||
391 | struct snd_soc_component *component = snd_kcontrol_chip(kcontrol); | ||
392 | struct rt5514_priv *rt5514 = snd_soc_component_get_drvdata(component); | ||
393 | struct snd_soc_codec *codec = rt5514->codec; | ||
394 | int ret = 0; | ||
395 | |||
396 | if (rt5514->model_buf || rt5514->model_len < size) { | ||
397 | if (rt5514->model_buf) | ||
398 | devm_kfree(codec->dev, rt5514->model_buf); | ||
399 | rt5514->model_buf = devm_kmalloc(codec->dev, size, GFP_KERNEL); | ||
400 | if (!rt5514->model_buf) { | ||
401 | ret = -ENOMEM; | ||
402 | goto done; | ||
403 | } | ||
404 | } | ||
405 | |||
406 | /* Skips the TLV header. */ | ||
407 | bytes += 2; | ||
408 | |||
409 | if (copy_from_user(rt5514->model_buf, bytes, size)) | ||
410 | ret = -EFAULT; | ||
411 | done: | ||
412 | rt5514->model_len = (ret ? 0 : size); | ||
413 | return ret; | ||
414 | } | ||
415 | |||
416 | static const struct snd_kcontrol_new rt5514_snd_controls[] = { | 355 | static const struct snd_kcontrol_new rt5514_snd_controls[] = { |
417 | SOC_DOUBLE_TLV("MIC Boost Volume", RT5514_ANA_CTRL_MICBST, | 356 | SOC_DOUBLE_TLV("MIC Boost Volume", RT5514_ANA_CTRL_MICBST, |
418 | RT5514_SEL_BSTL_SFT, RT5514_SEL_BSTR_SFT, 8, 0, bst_tlv), | 357 | RT5514_SEL_BSTL_SFT, RT5514_SEL_BSTR_SFT, 8, 0, bst_tlv), |
@@ -424,8 +363,6 @@ static const struct snd_kcontrol_new rt5514_snd_controls[] = { | |||
424 | adc_vol_tlv), | 363 | adc_vol_tlv), |
425 | SOC_SINGLE_EXT("DSP Voice Wake Up", SND_SOC_NOPM, 0, 1, 0, | 364 | SOC_SINGLE_EXT("DSP Voice Wake Up", SND_SOC_NOPM, 0, 1, 0, |
426 | rt5514_dsp_voice_wake_up_get, rt5514_dsp_voice_wake_up_put), | 365 | rt5514_dsp_voice_wake_up_get, rt5514_dsp_voice_wake_up_put), |
427 | SND_SOC_BYTES_TLV("Hotword Model", 0x8504, | ||
428 | NULL, rt5514_hotword_model_put), | ||
429 | }; | 366 | }; |
430 | 367 | ||
431 | /* ADC Mixer*/ | 368 | /* ADC Mixer*/ |
diff --git a/sound/soc/codecs/rt5514.h b/sound/soc/codecs/rt5514.h index 803311cb7e2a..2dc40e6d8b3f 100644 --- a/sound/soc/codecs/rt5514.h +++ b/sound/soc/codecs/rt5514.h | |||
@@ -255,7 +255,6 @@ | |||
255 | 255 | ||
256 | #define RT5514_FIRMWARE1 "rt5514_dsp_fw1.bin" | 256 | #define RT5514_FIRMWARE1 "rt5514_dsp_fw1.bin" |
257 | #define RT5514_FIRMWARE2 "rt5514_dsp_fw2.bin" | 257 | #define RT5514_FIRMWARE2 "rt5514_dsp_fw2.bin" |
258 | #define RT5514_FIRMWARE3 "rt5514_dsp_fw3.bin" | ||
259 | 258 | ||
260 | /* System Clock Source */ | 259 | /* System Clock Source */ |
261 | enum { | 260 | enum { |
@@ -282,8 +281,6 @@ struct rt5514_priv { | |||
282 | int pll_in; | 281 | int pll_in; |
283 | int pll_out; | 282 | int pll_out; |
284 | int dsp_enabled; | 283 | int dsp_enabled; |
285 | u8 *model_buf; | ||
286 | unsigned int model_len; | ||
287 | }; | 284 | }; |
288 | 285 | ||
289 | #endif /* __RT5514_H__ */ | 286 | #endif /* __RT5514_H__ */ |
diff --git a/sound/soc/codecs/rt5616.c b/sound/soc/codecs/rt5616.c index c94e94fe8297..0e5f54a9bc7e 100644 --- a/sound/soc/codecs/rt5616.c +++ b/sound/soc/codecs/rt5616.c | |||
@@ -98,7 +98,7 @@ static const struct reg_default rt5616_reg[] = { | |||
98 | { 0x8e, 0x0004 }, | 98 | { 0x8e, 0x0004 }, |
99 | { 0x8f, 0x1100 }, | 99 | { 0x8f, 0x1100 }, |
100 | { 0x90, 0x0000 }, | 100 | { 0x90, 0x0000 }, |
101 | { 0x91, 0x0000 }, | 101 | { 0x91, 0x0c00 }, |
102 | { 0x92, 0x0000 }, | 102 | { 0x92, 0x0000 }, |
103 | { 0x93, 0x2000 }, | 103 | { 0x93, 0x2000 }, |
104 | { 0x94, 0x0200 }, | 104 | { 0x94, 0x0200 }, |
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c index 71216db15eab..fa66b11df8d4 100644 --- a/sound/soc/codecs/rt5659.c +++ b/sound/soc/codecs/rt5659.c | |||
@@ -2744,7 +2744,8 @@ static const struct snd_soc_dapm_widget rt5659_dapm_widgets[] = { | |||
2744 | SND_SOC_DAPM_PRE_PMU), | 2744 | SND_SOC_DAPM_PRE_PMU), |
2745 | SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0, rt5659_hp_event, | 2745 | SND_SOC_DAPM_PGA_S("HP Amp", 1, SND_SOC_NOPM, 0, 0, rt5659_hp_event, |
2746 | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), | 2746 | SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMU), |
2747 | SND_SOC_DAPM_PGA("LOUT Amp", SND_SOC_NOPM, 0, 0, NULL, 0), | 2747 | SND_SOC_DAPM_PGA_S("LOUT Amp", 1, RT5659_PWR_ANLG_1, RT5659_PWR_LM_BIT, |
2748 | 0, NULL, 0), | ||
2748 | 2749 | ||
2749 | SND_SOC_DAPM_SUPPLY("Charge Pump", SND_SOC_NOPM, 0, 0, | 2750 | SND_SOC_DAPM_SUPPLY("Charge Pump", SND_SOC_NOPM, 0, 0, |
2750 | rt5659_charge_pump_event, SND_SOC_DAPM_PRE_PMU | | 2751 | rt5659_charge_pump_event, SND_SOC_DAPM_PRE_PMU | |
@@ -3208,6 +3209,7 @@ static const struct snd_soc_dapm_route rt5659_dapm_routes[] = { | |||
3208 | { "LOUT R MIX", "OUTVOL R Switch", "OUTVOL R" }, | 3209 | { "LOUT R MIX", "OUTVOL R Switch", "OUTVOL R" }, |
3209 | { "LOUT Amp", NULL, "LOUT L MIX" }, | 3210 | { "LOUT Amp", NULL, "LOUT L MIX" }, |
3210 | { "LOUT Amp", NULL, "LOUT R MIX" }, | 3211 | { "LOUT Amp", NULL, "LOUT R MIX" }, |
3212 | { "LOUT Amp", NULL, "Charge Pump" }, | ||
3211 | { "LOUT Amp", NULL, "SYS CLK DET" }, | 3213 | { "LOUT Amp", NULL, "SYS CLK DET" }, |
3212 | { "LOUT L Playback", "Switch", "LOUT Amp" }, | 3214 | { "LOUT L Playback", "Switch", "LOUT Amp" }, |
3213 | { "LOUT R Playback", "Switch", "LOUT Amp" }, | 3215 | { "LOUT R Playback", "Switch", "LOUT Amp" }, |
diff --git a/sound/soc/codecs/rt5663.c b/sound/soc/codecs/rt5663.c index ab9e0ebff5a7..e45b895d8279 100644 --- a/sound/soc/codecs/rt5663.c +++ b/sound/soc/codecs/rt5663.c | |||
@@ -1639,7 +1639,8 @@ static irqreturn_t rt5663_irq(int irq, void *data) | |||
1639 | { | 1639 | { |
1640 | struct rt5663_priv *rt5663 = data; | 1640 | struct rt5663_priv *rt5663 = data; |
1641 | 1641 | ||
1642 | dev_dbg(rt5663->codec->dev, "%s IRQ queue work\n", __func__); | 1642 | dev_dbg(regmap_get_device(rt5663->regmap), "%s IRQ queue work\n", |
1643 | __func__); | ||
1643 | 1644 | ||
1644 | queue_delayed_work(system_wq, &rt5663->jack_detect_work, | 1645 | queue_delayed_work(system_wq, &rt5663->jack_detect_work, |
1645 | msecs_to_jiffies(250)); | 1646 | msecs_to_jiffies(250)); |
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c index dd471d2c0266..01a50413c66f 100644 --- a/sound/soc/soc-topology.c +++ b/sound/soc/soc-topology.c | |||
@@ -1301,7 +1301,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create( | |||
1301 | /* validate kcontrol */ | 1301 | /* validate kcontrol */ |
1302 | if (strnlen(ec->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == | 1302 | if (strnlen(ec->hdr.name, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) == |
1303 | SNDRV_CTL_ELEM_ID_NAME_MAXLEN) | 1303 | SNDRV_CTL_ELEM_ID_NAME_MAXLEN) |
1304 | return NULL; | 1304 | goto err; |
1305 | 1305 | ||
1306 | se = kzalloc(sizeof(*se), GFP_KERNEL); | 1306 | se = kzalloc(sizeof(*se), GFP_KERNEL); |
1307 | if (se == NULL) | 1307 | if (se == NULL) |
@@ -1378,6 +1378,9 @@ err_se: | |||
1378 | for (; i >= 0; i--) { | 1378 | for (; i >= 0; i--) { |
1379 | /* free values and texts */ | 1379 | /* free values and texts */ |
1380 | se = (struct soc_enum *)kc[i].private_value; | 1380 | se = (struct soc_enum *)kc[i].private_value; |
1381 | if (!se) | ||
1382 | continue; | ||
1383 | |||
1381 | kfree(se->dobj.control.dvalues); | 1384 | kfree(se->dobj.control.dvalues); |
1382 | for (j = 0; j < ec->items; j++) | 1385 | for (j = 0; j < ec->items; j++) |
1383 | kfree(se->dobj.control.dtexts[j]); | 1386 | kfree(se->dobj.control.dtexts[j]); |
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 43ab5c402f98..c174971afbe6 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h | |||
@@ -312,7 +312,7 @@ union bpf_attr { | |||
312 | * jump into another BPF program | 312 | * jump into another BPF program |
313 | * @ctx: context pointer passed to next program | 313 | * @ctx: context pointer passed to next program |
314 | * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY | 314 | * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY |
315 | * @index: index inside array that selects specific program to run | 315 | * @index: 32-bit index inside array that selects specific program to run |
316 | * Return: 0 on success or negative error | 316 | * Return: 0 on success or negative error |
317 | * | 317 | * |
318 | * int bpf_clone_redirect(skb, ifindex, flags) | 318 | * int bpf_clone_redirect(skb, ifindex, flags) |
@@ -569,9 +569,10 @@ union bpf_attr { | |||
569 | * @flags: reserved for future use | 569 | * @flags: reserved for future use |
570 | * Return: 0 on success or negative error code | 570 | * Return: 0 on success or negative error code |
571 | * | 571 | * |
572 | * int bpf_sk_redirect_map(map, key, flags) | 572 | * int bpf_sk_redirect_map(skb, map, key, flags) |
573 | * Redirect skb to a sock in map using key as a lookup key for the | 573 | * Redirect skb to a sock in map using key as a lookup key for the |
574 | * sock in map. | 574 | * sock in map. |
575 | * @skb: pointer to skb | ||
575 | * @map: pointer to sockmap | 576 | * @map: pointer to sockmap |
576 | * @key: key to lookup sock in map | 577 | * @key: key to lookup sock in map |
577 | * @flags: reserved for future use | 578 | * @flags: reserved for future use |
@@ -786,8 +787,8 @@ struct xdp_md { | |||
786 | }; | 787 | }; |
787 | 788 | ||
788 | enum sk_action { | 789 | enum sk_action { |
789 | SK_ABORTED = 0, | 790 | SK_DROP = 0, |
790 | SK_DROP, | 791 | SK_PASS, |
791 | SK_REDIRECT, | 792 | SK_REDIRECT, |
792 | }; | 793 | }; |
793 | 794 | ||
diff --git a/tools/objtool/check.c b/tools/objtool/check.c index a0c518ecf085..c0e26ad1fa7e 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c | |||
@@ -267,12 +267,13 @@ static int decode_instructions(struct objtool_file *file) | |||
267 | &insn->immediate, | 267 | &insn->immediate, |
268 | &insn->stack_op); | 268 | &insn->stack_op); |
269 | if (ret) | 269 | if (ret) |
270 | return ret; | 270 | goto err; |
271 | 271 | ||
272 | if (!insn->type || insn->type > INSN_LAST) { | 272 | if (!insn->type || insn->type > INSN_LAST) { |
273 | WARN_FUNC("invalid instruction type %d", | 273 | WARN_FUNC("invalid instruction type %d", |
274 | insn->sec, insn->offset, insn->type); | 274 | insn->sec, insn->offset, insn->type); |
275 | return -1; | 275 | ret = -1; |
276 | goto err; | ||
276 | } | 277 | } |
277 | 278 | ||
278 | hash_add(file->insn_hash, &insn->hash, insn->offset); | 279 | hash_add(file->insn_hash, &insn->hash, insn->offset); |
@@ -296,6 +297,10 @@ static int decode_instructions(struct objtool_file *file) | |||
296 | } | 297 | } |
297 | 298 | ||
298 | return 0; | 299 | return 0; |
300 | |||
301 | err: | ||
302 | free(insn); | ||
303 | return ret; | ||
299 | } | 304 | } |
300 | 305 | ||
301 | /* | 306 | /* |
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt index e397453e5a46..63526f4416ea 100644 --- a/tools/perf/Documentation/perf-record.txt +++ b/tools/perf/Documentation/perf-record.txt | |||
@@ -8,8 +8,8 @@ perf-record - Run a command and record its profile into perf.data | |||
8 | SYNOPSIS | 8 | SYNOPSIS |
9 | -------- | 9 | -------- |
10 | [verse] | 10 | [verse] |
11 | 'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] <command> | 11 | 'perf record' [-e <EVENT> | --event=EVENT] [-a] <command> |
12 | 'perf record' [-e <EVENT> | --event=EVENT] [-l] [-a] -- <command> [<options>] | 12 | 'perf record' [-e <EVENT> | --event=EVENT] [-a] -- <command> [<options>] |
13 | 13 | ||
14 | DESCRIPTION | 14 | DESCRIPTION |
15 | ----------- | 15 | ----------- |
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 3d4c3b5e1868..0c977b6e0f8b 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c | |||
@@ -586,7 +586,7 @@ static void print_sample_brstack(struct perf_sample *sample, | |||
586 | thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt); | 586 | thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, to, &alt); |
587 | } | 587 | } |
588 | 588 | ||
589 | printf("0x%"PRIx64, from); | 589 | printf(" 0x%"PRIx64, from); |
590 | if (PRINT_FIELD(DSO)) { | 590 | if (PRINT_FIELD(DSO)) { |
591 | printf("("); | 591 | printf("("); |
592 | map__fprintf_dsoname(alf.map, stdout); | 592 | map__fprintf_dsoname(alf.map, stdout); |
@@ -681,7 +681,7 @@ static void print_sample_brstackoff(struct perf_sample *sample, | |||
681 | if (alt.map && !alt.map->dso->adjust_symbols) | 681 | if (alt.map && !alt.map->dso->adjust_symbols) |
682 | to = map__map_ip(alt.map, to); | 682 | to = map__map_ip(alt.map, to); |
683 | 683 | ||
684 | printf("0x%"PRIx64, from); | 684 | printf(" 0x%"PRIx64, from); |
685 | if (PRINT_FIELD(DSO)) { | 685 | if (PRINT_FIELD(DSO)) { |
686 | printf("("); | 686 | printf("("); |
687 | map__fprintf_dsoname(alf.map, stdout); | 687 | map__fprintf_dsoname(alf.map, stdout); |
diff --git a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh index 462fc755092e..7a84d73324e3 100755 --- a/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh +++ b/tools/perf/tests/shell/trace+probe_libc_inet_pton.sh | |||
@@ -10,6 +10,9 @@ | |||
10 | 10 | ||
11 | . $(dirname $0)/lib/probe.sh | 11 | . $(dirname $0)/lib/probe.sh |
12 | 12 | ||
13 | ld=$(realpath /lib64/ld*.so.* | uniq) | ||
14 | libc=$(echo $ld | sed 's/ld/libc/g') | ||
15 | |||
13 | trace_libc_inet_pton_backtrace() { | 16 | trace_libc_inet_pton_backtrace() { |
14 | idx=0 | 17 | idx=0 |
15 | expected[0]="PING.*bytes" | 18 | expected[0]="PING.*bytes" |
@@ -18,8 +21,8 @@ trace_libc_inet_pton_backtrace() { | |||
18 | expected[3]=".*packets transmitted.*" | 21 | expected[3]=".*packets transmitted.*" |
19 | expected[4]="rtt min.*" | 22 | expected[4]="rtt min.*" |
20 | expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)" | 23 | expected[5]="[0-9]+\.[0-9]+[[:space:]]+probe_libc:inet_pton:\([[:xdigit:]]+\)" |
21 | expected[6]=".*inet_pton[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$" | 24 | expected[6]=".*inet_pton[[:space:]]\($libc\)$" |
22 | expected[7]="getaddrinfo[[:space:]]\(/usr/lib.*/libc-[0-9]+\.[0-9]+\.so\)$" | 25 | expected[7]="getaddrinfo[[:space:]]\($libc\)$" |
23 | expected[8]=".*\(.*/bin/ping.*\)$" | 26 | expected[8]=".*\(.*/bin/ping.*\)$" |
24 | 27 | ||
25 | perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do | 28 | perf trace --no-syscalls -e probe_libc:inet_pton/max-stack=3/ ping -6 -c 1 ::1 2>&1 | grep -v ^$ | while read line ; do |
@@ -35,7 +38,7 @@ trace_libc_inet_pton_backtrace() { | |||
35 | } | 38 | } |
36 | 39 | ||
37 | skip_if_no_perf_probe && \ | 40 | skip_if_no_perf_probe && \ |
38 | perf probe -q /lib64/libc-*.so inet_pton && \ | 41 | perf probe -q $libc inet_pton && \ |
39 | trace_libc_inet_pton_backtrace | 42 | trace_libc_inet_pton_backtrace |
40 | err=$? | 43 | err=$? |
41 | rm -f ${file} | 44 | rm -f ${file} |
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index ddb2c6fbdf91..db79017a6e56 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c | |||
@@ -532,7 +532,7 @@ void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list, | |||
532 | 532 | ||
533 | void perf_hpp__column_unregister(struct perf_hpp_fmt *format) | 533 | void perf_hpp__column_unregister(struct perf_hpp_fmt *format) |
534 | { | 534 | { |
535 | list_del(&format->list); | 535 | list_del_init(&format->list); |
536 | } | 536 | } |
537 | 537 | ||
538 | void perf_hpp__cancel_cumulate(void) | 538 | void perf_hpp__cancel_cumulate(void) |
@@ -606,6 +606,13 @@ next: | |||
606 | 606 | ||
607 | static void fmt_free(struct perf_hpp_fmt *fmt) | 607 | static void fmt_free(struct perf_hpp_fmt *fmt) |
608 | { | 608 | { |
609 | /* | ||
610 | * At this point fmt should be completely | ||
611 | * unhooked, if not it's a bug. | ||
612 | */ | ||
613 | BUG_ON(!list_empty(&fmt->list)); | ||
614 | BUG_ON(!list_empty(&fmt->sort_list)); | ||
615 | |||
609 | if (fmt->free) | 616 | if (fmt->free) |
610 | fmt->free(fmt); | 617 | fmt->free(fmt); |
611 | } | 618 | } |
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index be09d77cade0..a971caf3759d 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c | |||
@@ -685,6 +685,8 @@ static enum match_result match_chain(struct callchain_cursor_node *node, | |||
685 | { | 685 | { |
686 | struct symbol *sym = node->sym; | 686 | struct symbol *sym = node->sym; |
687 | u64 left, right; | 687 | u64 left, right; |
688 | struct dso *left_dso = NULL; | ||
689 | struct dso *right_dso = NULL; | ||
688 | 690 | ||
689 | if (callchain_param.key == CCKEY_SRCLINE) { | 691 | if (callchain_param.key == CCKEY_SRCLINE) { |
690 | enum match_result match = match_chain_srcline(node, cnode); | 692 | enum match_result match = match_chain_srcline(node, cnode); |
@@ -696,12 +698,14 @@ static enum match_result match_chain(struct callchain_cursor_node *node, | |||
696 | if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) { | 698 | if (cnode->ms.sym && sym && callchain_param.key == CCKEY_FUNCTION) { |
697 | left = cnode->ms.sym->start; | 699 | left = cnode->ms.sym->start; |
698 | right = sym->start; | 700 | right = sym->start; |
701 | left_dso = cnode->ms.map->dso; | ||
702 | right_dso = node->map->dso; | ||
699 | } else { | 703 | } else { |
700 | left = cnode->ip; | 704 | left = cnode->ip; |
701 | right = node->ip; | 705 | right = node->ip; |
702 | } | 706 | } |
703 | 707 | ||
704 | if (left == right) { | 708 | if (left == right && left_dso == right_dso) { |
705 | if (node->branch) { | 709 | if (node->branch) { |
706 | cnode->branch_count++; | 710 | cnode->branch_count++; |
707 | 711 | ||
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index f6257fb4f08c..39b15968eab1 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -309,10 +309,11 @@ static char *get_config_name(struct list_head *head_terms) | |||
309 | static struct perf_evsel * | 309 | static struct perf_evsel * |
310 | __add_event(struct list_head *list, int *idx, | 310 | __add_event(struct list_head *list, int *idx, |
311 | struct perf_event_attr *attr, | 311 | struct perf_event_attr *attr, |
312 | char *name, struct cpu_map *cpus, | 312 | char *name, struct perf_pmu *pmu, |
313 | struct list_head *config_terms, bool auto_merge_stats) | 313 | struct list_head *config_terms, bool auto_merge_stats) |
314 | { | 314 | { |
315 | struct perf_evsel *evsel; | 315 | struct perf_evsel *evsel; |
316 | struct cpu_map *cpus = pmu ? pmu->cpus : NULL; | ||
316 | 317 | ||
317 | event_attr_init(attr); | 318 | event_attr_init(attr); |
318 | 319 | ||
@@ -323,7 +324,7 @@ __add_event(struct list_head *list, int *idx, | |||
323 | (*idx)++; | 324 | (*idx)++; |
324 | evsel->cpus = cpu_map__get(cpus); | 325 | evsel->cpus = cpu_map__get(cpus); |
325 | evsel->own_cpus = cpu_map__get(cpus); | 326 | evsel->own_cpus = cpu_map__get(cpus); |
326 | evsel->system_wide = !!cpus; | 327 | evsel->system_wide = pmu ? pmu->is_uncore : false; |
327 | evsel->auto_merge_stats = auto_merge_stats; | 328 | evsel->auto_merge_stats = auto_merge_stats; |
328 | 329 | ||
329 | if (name) | 330 | if (name) |
@@ -1233,7 +1234,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state, | |||
1233 | 1234 | ||
1234 | if (!head_config) { | 1235 | if (!head_config) { |
1235 | attr.type = pmu->type; | 1236 | attr.type = pmu->type; |
1236 | evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu->cpus, NULL, auto_merge_stats); | 1237 | evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats); |
1237 | return evsel ? 0 : -ENOMEM; | 1238 | return evsel ? 0 : -ENOMEM; |
1238 | } | 1239 | } |
1239 | 1240 | ||
@@ -1254,7 +1255,7 @@ static int __parse_events_add_pmu(struct parse_events_state *parse_state, | |||
1254 | return -EINVAL; | 1255 | return -EINVAL; |
1255 | 1256 | ||
1256 | evsel = __add_event(list, &parse_state->idx, &attr, | 1257 | evsel = __add_event(list, &parse_state->idx, &attr, |
1257 | get_config_name(head_config), pmu->cpus, | 1258 | get_config_name(head_config), pmu, |
1258 | &config_terms, auto_merge_stats); | 1259 | &config_terms, auto_merge_stats); |
1259 | if (evsel) { | 1260 | if (evsel) { |
1260 | evsel->unit = info.unit; | 1261 | evsel->unit = info.unit; |
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l index c42edeac451f..dcfdafdc2f1c 100644 --- a/tools/perf/util/parse-events.l +++ b/tools/perf/util/parse-events.l | |||
@@ -8,6 +8,9 @@ | |||
8 | 8 | ||
9 | %{ | 9 | %{ |
10 | #include <errno.h> | 10 | #include <errno.h> |
11 | #include <sys/types.h> | ||
12 | #include <sys/stat.h> | ||
13 | #include <unistd.h> | ||
11 | #include "../perf.h" | 14 | #include "../perf.h" |
12 | #include "parse-events.h" | 15 | #include "parse-events.h" |
13 | #include "parse-events-bison.h" | 16 | #include "parse-events-bison.h" |
@@ -53,9 +56,8 @@ static int str(yyscan_t scanner, int token) | |||
53 | return token; | 56 | return token; |
54 | } | 57 | } |
55 | 58 | ||
56 | static bool isbpf(yyscan_t scanner) | 59 | static bool isbpf_suffix(char *text) |
57 | { | 60 | { |
58 | char *text = parse_events_get_text(scanner); | ||
59 | int len = strlen(text); | 61 | int len = strlen(text); |
60 | 62 | ||
61 | if (len < 2) | 63 | if (len < 2) |
@@ -68,6 +70,17 @@ static bool isbpf(yyscan_t scanner) | |||
68 | return false; | 70 | return false; |
69 | } | 71 | } |
70 | 72 | ||
73 | static bool isbpf(yyscan_t scanner) | ||
74 | { | ||
75 | char *text = parse_events_get_text(scanner); | ||
76 | struct stat st; | ||
77 | |||
78 | if (!isbpf_suffix(text)) | ||
79 | return false; | ||
80 | |||
81 | return stat(text, &st) == 0; | ||
82 | } | ||
83 | |||
71 | /* | 84 | /* |
72 | * This function is called when the parser gets two kind of input: | 85 | * This function is called when the parser gets two kind of input: |
73 | * | 86 | * |
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index ac16a9db1fb5..1c4d7b4e4fb5 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c | |||
@@ -470,17 +470,36 @@ static void pmu_read_sysfs(void) | |||
470 | closedir(dir); | 470 | closedir(dir); |
471 | } | 471 | } |
472 | 472 | ||
473 | static struct cpu_map *__pmu_cpumask(const char *path) | ||
474 | { | ||
475 | FILE *file; | ||
476 | struct cpu_map *cpus; | ||
477 | |||
478 | file = fopen(path, "r"); | ||
479 | if (!file) | ||
480 | return NULL; | ||
481 | |||
482 | cpus = cpu_map__read(file); | ||
483 | fclose(file); | ||
484 | return cpus; | ||
485 | } | ||
486 | |||
487 | /* | ||
488 | * Uncore PMUs have a "cpumask" file under sysfs. CPU PMUs (e.g. on arm/arm64) | ||
489 | * may have a "cpus" file. | ||
490 | */ | ||
491 | #define CPUS_TEMPLATE_UNCORE "%s/bus/event_source/devices/%s/cpumask" | ||
492 | #define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus" | ||
493 | |||
473 | static struct cpu_map *pmu_cpumask(const char *name) | 494 | static struct cpu_map *pmu_cpumask(const char *name) |
474 | { | 495 | { |
475 | struct stat st; | ||
476 | char path[PATH_MAX]; | 496 | char path[PATH_MAX]; |
477 | FILE *file; | ||
478 | struct cpu_map *cpus; | 497 | struct cpu_map *cpus; |
479 | const char *sysfs = sysfs__mountpoint(); | 498 | const char *sysfs = sysfs__mountpoint(); |
480 | const char *templates[] = { | 499 | const char *templates[] = { |
481 | "%s/bus/event_source/devices/%s/cpumask", | 500 | CPUS_TEMPLATE_UNCORE, |
482 | "%s/bus/event_source/devices/%s/cpus", | 501 | CPUS_TEMPLATE_CPU, |
483 | NULL | 502 | NULL |
484 | }; | 503 | }; |
485 | const char **template; | 504 | const char **template; |
486 | 505 | ||
@@ -489,20 +508,25 @@ static struct cpu_map *pmu_cpumask(const char *name) | |||
489 | 508 | ||
490 | for (template = templates; *template; template++) { | 509 | for (template = templates; *template; template++) { |
491 | snprintf(path, PATH_MAX, *template, sysfs, name); | 510 | snprintf(path, PATH_MAX, *template, sysfs, name); |
492 | if (stat(path, &st) == 0) | 511 | cpus = __pmu_cpumask(path); |
493 | break; | 512 | if (cpus) |
513 | return cpus; | ||
494 | } | 514 | } |
495 | 515 | ||
496 | if (!*template) | 516 | return NULL; |
497 | return NULL; | 517 | } |
498 | 518 | ||
499 | file = fopen(path, "r"); | 519 | static bool pmu_is_uncore(const char *name) |
500 | if (!file) | 520 | { |
501 | return NULL; | 521 | char path[PATH_MAX]; |
522 | struct cpu_map *cpus; | ||
523 | const char *sysfs = sysfs__mountpoint(); | ||
502 | 524 | ||
503 | cpus = cpu_map__read(file); | 525 | snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name); |
504 | fclose(file); | 526 | cpus = __pmu_cpumask(path); |
505 | return cpus; | 527 | cpu_map__put(cpus); |
528 | |||
529 | return !!cpus; | ||
506 | } | 530 | } |
507 | 531 | ||
508 | /* | 532 | /* |
@@ -617,6 +641,8 @@ static struct perf_pmu *pmu_lookup(const char *name) | |||
617 | 641 | ||
618 | pmu->cpus = pmu_cpumask(name); | 642 | pmu->cpus = pmu_cpumask(name); |
619 | 643 | ||
644 | pmu->is_uncore = pmu_is_uncore(name); | ||
645 | |||
620 | INIT_LIST_HEAD(&pmu->format); | 646 | INIT_LIST_HEAD(&pmu->format); |
621 | INIT_LIST_HEAD(&pmu->aliases); | 647 | INIT_LIST_HEAD(&pmu->aliases); |
622 | list_splice(&format, &pmu->format); | 648 | list_splice(&format, &pmu->format); |
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 389e9729331f..fe0de0502ce2 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h | |||
@@ -22,6 +22,7 @@ struct perf_pmu { | |||
22 | char *name; | 22 | char *name; |
23 | __u32 type; | 23 | __u32 type; |
24 | bool selectable; | 24 | bool selectable; |
25 | bool is_uncore; | ||
25 | struct perf_event_attr *default_config; | 26 | struct perf_event_attr *default_config; |
26 | struct cpu_map *cpus; | 27 | struct cpu_map *cpus; |
27 | struct list_head format; /* HEAD struct perf_pmu_format -> list */ | 28 | struct list_head format; /* HEAD struct perf_pmu_format -> list */ |
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index a7ebd9fe8e40..76ab0709a20c 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c | |||
@@ -374,6 +374,8 @@ void perf_tool__fill_defaults(struct perf_tool *tool) | |||
374 | tool->mmap2 = process_event_stub; | 374 | tool->mmap2 = process_event_stub; |
375 | if (tool->comm == NULL) | 375 | if (tool->comm == NULL) |
376 | tool->comm = process_event_stub; | 376 | tool->comm = process_event_stub; |
377 | if (tool->namespaces == NULL) | ||
378 | tool->namespaces = process_event_stub; | ||
377 | if (tool->fork == NULL) | 379 | if (tool->fork == NULL) |
378 | tool->fork = process_event_stub; | 380 | tool->fork = process_event_stub; |
379 | if (tool->exit == NULL) | 381 | if (tool->exit == NULL) |
diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h index 4ba726c90870..54af60462130 100644 --- a/tools/perf/util/xyarray.h +++ b/tools/perf/util/xyarray.h | |||
@@ -23,12 +23,12 @@ static inline void *xyarray__entry(struct xyarray *xy, int x, int y) | |||
23 | 23 | ||
24 | static inline int xyarray__max_y(struct xyarray *xy) | 24 | static inline int xyarray__max_y(struct xyarray *xy) |
25 | { | 25 | { |
26 | return xy->max_x; | 26 | return xy->max_y; |
27 | } | 27 | } |
28 | 28 | ||
29 | static inline int xyarray__max_x(struct xyarray *xy) | 29 | static inline int xyarray__max_x(struct xyarray *xy) |
30 | { | 30 | { |
31 | return xy->max_y; | 31 | return xy->max_x; |
32 | } | 32 | } |
33 | 33 | ||
34 | #endif /* _PERF_XYARRAY_H_ */ | 34 | #endif /* _PERF_XYARRAY_H_ */ |
diff --git a/tools/power/cpupower/Makefile b/tools/power/cpupower/Makefile index 4c5a481a850c..d6e1c02ddcfe 100644 --- a/tools/power/cpupower/Makefile +++ b/tools/power/cpupower/Makefile | |||
@@ -26,7 +26,7 @@ endif | |||
26 | 26 | ||
27 | ifneq ($(OUTPUT),) | 27 | ifneq ($(OUTPUT),) |
28 | # check that the output directory actually exists | 28 | # check that the output directory actually exists |
29 | OUTDIR := $(realpath $(OUTPUT)) | 29 | OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) |
30 | $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) | 30 | $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) |
31 | endif | 31 | endif |
32 | 32 | ||
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 0dafba2c1e7d..bd9c6b31a504 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c | |||
@@ -92,7 +92,6 @@ unsigned int do_ring_perf_limit_reasons; | |||
92 | unsigned int crystal_hz; | 92 | unsigned int crystal_hz; |
93 | unsigned long long tsc_hz; | 93 | unsigned long long tsc_hz; |
94 | int base_cpu; | 94 | int base_cpu; |
95 | int do_migrate; | ||
96 | double discover_bclk(unsigned int family, unsigned int model); | 95 | double discover_bclk(unsigned int family, unsigned int model); |
97 | unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */ | 96 | unsigned int has_hwp; /* IA32_PM_ENABLE, IA32_HWP_CAPABILITIES */ |
98 | /* IA32_HWP_REQUEST, IA32_HWP_STATUS */ | 97 | /* IA32_HWP_REQUEST, IA32_HWP_STATUS */ |
@@ -303,9 +302,6 @@ int for_all_cpus(int (func)(struct thread_data *, struct core_data *, struct pkg | |||
303 | 302 | ||
304 | int cpu_migrate(int cpu) | 303 | int cpu_migrate(int cpu) |
305 | { | 304 | { |
306 | if (!do_migrate) | ||
307 | return 0; | ||
308 | |||
309 | CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); | 305 | CPU_ZERO_S(cpu_affinity_setsize, cpu_affinity_set); |
310 | CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set); | 306 | CPU_SET_S(cpu, cpu_affinity_setsize, cpu_affinity_set); |
311 | if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) | 307 | if (sched_setaffinity(0, cpu_affinity_setsize, cpu_affinity_set) == -1) |
@@ -5007,7 +5003,6 @@ void cmdline(int argc, char **argv) | |||
5007 | {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help | 5003 | {"hide", required_argument, 0, 'H'}, // meh, -h taken by --help |
5008 | {"Joules", no_argument, 0, 'J'}, | 5004 | {"Joules", no_argument, 0, 'J'}, |
5009 | {"list", no_argument, 0, 'l'}, | 5005 | {"list", no_argument, 0, 'l'}, |
5010 | {"migrate", no_argument, 0, 'm'}, | ||
5011 | {"out", required_argument, 0, 'o'}, | 5006 | {"out", required_argument, 0, 'o'}, |
5012 | {"quiet", no_argument, 0, 'q'}, | 5007 | {"quiet", no_argument, 0, 'q'}, |
5013 | {"show", required_argument, 0, 's'}, | 5008 | {"show", required_argument, 0, 's'}, |
@@ -5019,7 +5014,7 @@ void cmdline(int argc, char **argv) | |||
5019 | 5014 | ||
5020 | progname = argv[0]; | 5015 | progname = argv[0]; |
5021 | 5016 | ||
5022 | while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:Jmo:qST:v", | 5017 | while ((opt = getopt_long_only(argc, argv, "+C:c:Ddhi:JM:m:o:qST:v", |
5023 | long_options, &option_index)) != -1) { | 5018 | long_options, &option_index)) != -1) { |
5024 | switch (opt) { | 5019 | switch (opt) { |
5025 | case 'a': | 5020 | case 'a': |
@@ -5062,9 +5057,6 @@ void cmdline(int argc, char **argv) | |||
5062 | list_header_only++; | 5057 | list_header_only++; |
5063 | quiet++; | 5058 | quiet++; |
5064 | break; | 5059 | break; |
5065 | case 'm': | ||
5066 | do_migrate = 1; | ||
5067 | break; | ||
5068 | case 'o': | 5060 | case 'o': |
5069 | outf = fopen_or_die(optarg, "w"); | 5061 | outf = fopen_or_die(optarg, "w"); |
5070 | break; | 5062 | break; |
diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index 9dc8f078a83c..1e8b6116ba3c 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include | |||
@@ -1,7 +1,7 @@ | |||
1 | ifneq ($(O),) | 1 | ifneq ($(O),) |
2 | ifeq ($(origin O), command line) | 2 | ifeq ($(origin O), command line) |
3 | ABSOLUTE_O := $(realpath $(O)) | 3 | dummy := $(if $(shell test -d $(O) || echo $(O)),$(error O=$(O) does not exist),) |
4 | dummy := $(if $(ABSOLUTE_O),,$(error O=$(O) does not exist)) | 4 | ABSOLUTE_O := $(shell cd $(O) ; pwd) |
5 | OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/) | 5 | OUTPUT := $(ABSOLUTE_O)/$(if $(subdir),$(subdir)/) |
6 | COMMAND_O := O=$(ABSOLUTE_O) | 6 | COMMAND_O := O=$(ABSOLUTE_O) |
7 | ifeq ($(objtree),) | 7 | ifeq ($(objtree),) |
@@ -12,7 +12,7 @@ endif | |||
12 | 12 | ||
13 | # check that the output directory actually exists | 13 | # check that the output directory actually exists |
14 | ifneq ($(OUTPUT),) | 14 | ifneq ($(OUTPUT),) |
15 | OUTDIR := $(realpath $(OUTPUT)) | 15 | OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd) |
16 | $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) | 16 | $(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist)) |
17 | endif | 17 | endif |
18 | 18 | ||
diff --git a/tools/testing/selftests/bpf/bpf_helpers.h b/tools/testing/selftests/bpf/bpf_helpers.h index 36fb9161b34a..b2e02bdcd098 100644 --- a/tools/testing/selftests/bpf/bpf_helpers.h +++ b/tools/testing/selftests/bpf/bpf_helpers.h | |||
@@ -65,7 +65,7 @@ static int (*bpf_xdp_adjust_head)(void *ctx, int offset) = | |||
65 | static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, | 65 | static int (*bpf_setsockopt)(void *ctx, int level, int optname, void *optval, |
66 | int optlen) = | 66 | int optlen) = |
67 | (void *) BPF_FUNC_setsockopt; | 67 | (void *) BPF_FUNC_setsockopt; |
68 | static int (*bpf_sk_redirect_map)(void *map, int key, int flags) = | 68 | static int (*bpf_sk_redirect_map)(void *ctx, void *map, int key, int flags) = |
69 | (void *) BPF_FUNC_sk_redirect_map; | 69 | (void *) BPF_FUNC_sk_redirect_map; |
70 | static int (*bpf_sock_map_update)(void *map, void *key, void *value, | 70 | static int (*bpf_sock_map_update)(void *map, void *key, void *value, |
71 | unsigned long long flags) = | 71 | unsigned long long flags) = |
diff --git a/tools/testing/selftests/bpf/sockmap_verdict_prog.c b/tools/testing/selftests/bpf/sockmap_verdict_prog.c index 9b99bd10807d..2cd2d552938b 100644 --- a/tools/testing/selftests/bpf/sockmap_verdict_prog.c +++ b/tools/testing/selftests/bpf/sockmap_verdict_prog.c | |||
@@ -61,8 +61,8 @@ int bpf_prog2(struct __sk_buff *skb) | |||
61 | bpf_printk("verdict: data[0] = redir(%u:%u)\n", map, sk); | 61 | bpf_printk("verdict: data[0] = redir(%u:%u)\n", map, sk); |
62 | 62 | ||
63 | if (!map) | 63 | if (!map) |
64 | return bpf_sk_redirect_map(&sock_map_rx, sk, 0); | 64 | return bpf_sk_redirect_map(skb, &sock_map_rx, sk, 0); |
65 | return bpf_sk_redirect_map(&sock_map_tx, sk, 0); | 65 | return bpf_sk_redirect_map(skb, &sock_map_tx, sk, 0); |
66 | } | 66 | } |
67 | 67 | ||
68 | char _license[] SEC("license") = "GPL"; | 68 | char _license[] SEC("license") = "GPL"; |
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index fe3a443a1102..50ce52d2013d 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c | |||
@@ -466,7 +466,7 @@ static void test_sockmap(int tasks, void *data) | |||
466 | int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc; | 466 | int one = 1, map_fd_rx, map_fd_tx, map_fd_break, s, sc, rc; |
467 | struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break; | 467 | struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_break; |
468 | int ports[] = {50200, 50201, 50202, 50204}; | 468 | int ports[] = {50200, 50201, 50202, 50204}; |
469 | int err, i, fd, sfd[6] = {0xdeadbeef}; | 469 | int err, i, fd, udp, sfd[6] = {0xdeadbeef}; |
470 | u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0}; | 470 | u8 buf[20] = {0x0, 0x5, 0x3, 0x2, 0x1, 0x0}; |
471 | int parse_prog, verdict_prog; | 471 | int parse_prog, verdict_prog; |
472 | struct sockaddr_in addr; | 472 | struct sockaddr_in addr; |
@@ -548,6 +548,16 @@ static void test_sockmap(int tasks, void *data) | |||
548 | goto out_sockmap; | 548 | goto out_sockmap; |
549 | } | 549 | } |
550 | 550 | ||
551 | /* Test update with unsupported UDP socket */ | ||
552 | udp = socket(AF_INET, SOCK_DGRAM, 0); | ||
553 | i = 0; | ||
554 | err = bpf_map_update_elem(fd, &i, &udp, BPF_ANY); | ||
555 | if (!err) { | ||
556 | printf("Failed socket SOCK_DGRAM allowed '%i:%i'\n", | ||
557 | i, udp); | ||
558 | goto out_sockmap; | ||
559 | } | ||
560 | |||
551 | /* Test update without programs */ | 561 | /* Test update without programs */ |
552 | for (i = 0; i < 6; i++) { | 562 | for (i = 0; i < 6; i++) { |
553 | err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); | 563 | err = bpf_map_update_elem(fd, &i, &sfd[i], BPF_ANY); |
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 26f3250bdcd2..64ae21f64489 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
@@ -1130,15 +1130,27 @@ static struct bpf_test tests[] = { | |||
1130 | .errstr = "invalid bpf_context access", | 1130 | .errstr = "invalid bpf_context access", |
1131 | }, | 1131 | }, |
1132 | { | 1132 | { |
1133 | "check skb->mark is writeable by SK_SKB", | 1133 | "invalid access of skb->mark for SK_SKB", |
1134 | .insns = { | ||
1135 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
1136 | offsetof(struct __sk_buff, mark)), | ||
1137 | BPF_EXIT_INSN(), | ||
1138 | }, | ||
1139 | .result = REJECT, | ||
1140 | .prog_type = BPF_PROG_TYPE_SK_SKB, | ||
1141 | .errstr = "invalid bpf_context access", | ||
1142 | }, | ||
1143 | { | ||
1144 | "check skb->mark is not writeable by SK_SKB", | ||
1134 | .insns = { | 1145 | .insns = { |
1135 | BPF_MOV64_IMM(BPF_REG_0, 0), | 1146 | BPF_MOV64_IMM(BPF_REG_0, 0), |
1136 | BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, | 1147 | BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, |
1137 | offsetof(struct __sk_buff, mark)), | 1148 | offsetof(struct __sk_buff, mark)), |
1138 | BPF_EXIT_INSN(), | 1149 | BPF_EXIT_INSN(), |
1139 | }, | 1150 | }, |
1140 | .result = ACCEPT, | 1151 | .result = REJECT, |
1141 | .prog_type = BPF_PROG_TYPE_SK_SKB, | 1152 | .prog_type = BPF_PROG_TYPE_SK_SKB, |
1153 | .errstr = "invalid bpf_context access", | ||
1142 | }, | 1154 | }, |
1143 | { | 1155 | { |
1144 | "check skb->tc_index is writeable by SK_SKB", | 1156 | "check skb->tc_index is writeable by SK_SKB", |
@@ -6645,6 +6657,500 @@ static struct bpf_test tests[] = { | |||
6645 | .errstr = "BPF_END uses reserved fields", | 6657 | .errstr = "BPF_END uses reserved fields", |
6646 | .result = REJECT, | 6658 | .result = REJECT, |
6647 | }, | 6659 | }, |
6660 | { | ||
6661 | "arithmetic ops make PTR_TO_CTX unusable", | ||
6662 | .insns = { | ||
6663 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, | ||
6664 | offsetof(struct __sk_buff, data) - | ||
6665 | offsetof(struct __sk_buff, mark)), | ||
6666 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
6667 | offsetof(struct __sk_buff, mark)), | ||
6668 | BPF_EXIT_INSN(), | ||
6669 | }, | ||
6670 | .errstr = "dereference of modified ctx ptr R1 off=68+8, ctx+const is allowed, ctx+const+const is not", | ||
6671 | .result = REJECT, | ||
6672 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
6673 | }, | ||
6674 | { | ||
6675 | "XDP pkt read, pkt_end mangling, bad access 1", | ||
6676 | .insns = { | ||
6677 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6678 | offsetof(struct xdp_md, data)), | ||
6679 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6680 | offsetof(struct xdp_md, data_end)), | ||
6681 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6682 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6683 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8), | ||
6684 | BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), | ||
6685 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
6686 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6687 | BPF_EXIT_INSN(), | ||
6688 | }, | ||
6689 | .errstr = "R1 offset is outside of the packet", | ||
6690 | .result = REJECT, | ||
6691 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6692 | }, | ||
6693 | { | ||
6694 | "XDP pkt read, pkt_end mangling, bad access 2", | ||
6695 | .insns = { | ||
6696 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6697 | offsetof(struct xdp_md, data)), | ||
6698 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6699 | offsetof(struct xdp_md, data_end)), | ||
6700 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6701 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6702 | BPF_ALU64_IMM(BPF_SUB, BPF_REG_3, 8), | ||
6703 | BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), | ||
6704 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
6705 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6706 | BPF_EXIT_INSN(), | ||
6707 | }, | ||
6708 | .errstr = "R1 offset is outside of the packet", | ||
6709 | .result = REJECT, | ||
6710 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6711 | }, | ||
6712 | { | ||
6713 | "XDP pkt read, pkt_data' > pkt_end, good access", | ||
6714 | .insns = { | ||
6715 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6716 | offsetof(struct xdp_md, data)), | ||
6717 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6718 | offsetof(struct xdp_md, data_end)), | ||
6719 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6720 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6721 | BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), | ||
6722 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
6723 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6724 | BPF_EXIT_INSN(), | ||
6725 | }, | ||
6726 | .result = ACCEPT, | ||
6727 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6728 | }, | ||
6729 | { | ||
6730 | "XDP pkt read, pkt_data' > pkt_end, bad access 1", | ||
6731 | .insns = { | ||
6732 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6733 | offsetof(struct xdp_md, data)), | ||
6734 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6735 | offsetof(struct xdp_md, data_end)), | ||
6736 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6737 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6738 | BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1), | ||
6739 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), | ||
6740 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6741 | BPF_EXIT_INSN(), | ||
6742 | }, | ||
6743 | .errstr = "R1 offset is outside of the packet", | ||
6744 | .result = REJECT, | ||
6745 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6746 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
6747 | }, | ||
6748 | { | ||
6749 | "XDP pkt read, pkt_data' > pkt_end, bad access 2", | ||
6750 | .insns = { | ||
6751 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6752 | offsetof(struct xdp_md, data)), | ||
6753 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6754 | offsetof(struct xdp_md, data_end)), | ||
6755 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6756 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6757 | BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 0), | ||
6758 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
6759 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6760 | BPF_EXIT_INSN(), | ||
6761 | }, | ||
6762 | .errstr = "R1 offset is outside of the packet", | ||
6763 | .result = REJECT, | ||
6764 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6765 | }, | ||
6766 | { | ||
6767 | "XDP pkt read, pkt_end > pkt_data', good access", | ||
6768 | .insns = { | ||
6769 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6770 | offsetof(struct xdp_md, data)), | ||
6771 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6772 | offsetof(struct xdp_md, data_end)), | ||
6773 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6774 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6775 | BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), | ||
6776 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
6777 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), | ||
6778 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6779 | BPF_EXIT_INSN(), | ||
6780 | }, | ||
6781 | .result = ACCEPT, | ||
6782 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6783 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
6784 | }, | ||
6785 | { | ||
6786 | "XDP pkt read, pkt_end > pkt_data', bad access 1", | ||
6787 | .insns = { | ||
6788 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6789 | offsetof(struct xdp_md, data)), | ||
6790 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6791 | offsetof(struct xdp_md, data_end)), | ||
6792 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6793 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6794 | BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), | ||
6795 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
6796 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
6797 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6798 | BPF_EXIT_INSN(), | ||
6799 | }, | ||
6800 | .errstr = "R1 offset is outside of the packet", | ||
6801 | .result = REJECT, | ||
6802 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6803 | }, | ||
6804 | { | ||
6805 | "XDP pkt read, pkt_end > pkt_data', bad access 2", | ||
6806 | .insns = { | ||
6807 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6808 | offsetof(struct xdp_md, data)), | ||
6809 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6810 | offsetof(struct xdp_md, data_end)), | ||
6811 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6812 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6813 | BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_1, 1), | ||
6814 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
6815 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6816 | BPF_EXIT_INSN(), | ||
6817 | }, | ||
6818 | .errstr = "R1 offset is outside of the packet", | ||
6819 | .result = REJECT, | ||
6820 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6821 | }, | ||
6822 | { | ||
6823 | "XDP pkt read, pkt_data' < pkt_end, good access", | ||
6824 | .insns = { | ||
6825 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6826 | offsetof(struct xdp_md, data)), | ||
6827 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6828 | offsetof(struct xdp_md, data_end)), | ||
6829 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6830 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6831 | BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), | ||
6832 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
6833 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), | ||
6834 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6835 | BPF_EXIT_INSN(), | ||
6836 | }, | ||
6837 | .result = ACCEPT, | ||
6838 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6839 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
6840 | }, | ||
6841 | { | ||
6842 | "XDP pkt read, pkt_data' < pkt_end, bad access 1", | ||
6843 | .insns = { | ||
6844 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6845 | offsetof(struct xdp_md, data)), | ||
6846 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6847 | offsetof(struct xdp_md, data_end)), | ||
6848 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6849 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6850 | BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), | ||
6851 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
6852 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
6853 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6854 | BPF_EXIT_INSN(), | ||
6855 | }, | ||
6856 | .errstr = "R1 offset is outside of the packet", | ||
6857 | .result = REJECT, | ||
6858 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6859 | }, | ||
6860 | { | ||
6861 | "XDP pkt read, pkt_data' < pkt_end, bad access 2", | ||
6862 | .insns = { | ||
6863 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6864 | offsetof(struct xdp_md, data)), | ||
6865 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6866 | offsetof(struct xdp_md, data_end)), | ||
6867 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6868 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6869 | BPF_JMP_REG(BPF_JLT, BPF_REG_1, BPF_REG_3, 1), | ||
6870 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
6871 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6872 | BPF_EXIT_INSN(), | ||
6873 | }, | ||
6874 | .errstr = "R1 offset is outside of the packet", | ||
6875 | .result = REJECT, | ||
6876 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6877 | }, | ||
6878 | { | ||
6879 | "XDP pkt read, pkt_end < pkt_data', good access", | ||
6880 | .insns = { | ||
6881 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6882 | offsetof(struct xdp_md, data)), | ||
6883 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6884 | offsetof(struct xdp_md, data_end)), | ||
6885 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6886 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6887 | BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), | ||
6888 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
6889 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6890 | BPF_EXIT_INSN(), | ||
6891 | }, | ||
6892 | .result = ACCEPT, | ||
6893 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6894 | }, | ||
6895 | { | ||
6896 | "XDP pkt read, pkt_end < pkt_data', bad access 1", | ||
6897 | .insns = { | ||
6898 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6899 | offsetof(struct xdp_md, data)), | ||
6900 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6901 | offsetof(struct xdp_md, data_end)), | ||
6902 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6903 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6904 | BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 1), | ||
6905 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), | ||
6906 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6907 | BPF_EXIT_INSN(), | ||
6908 | }, | ||
6909 | .errstr = "R1 offset is outside of the packet", | ||
6910 | .result = REJECT, | ||
6911 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6912 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
6913 | }, | ||
6914 | { | ||
6915 | "XDP pkt read, pkt_end < pkt_data', bad access 2", | ||
6916 | .insns = { | ||
6917 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6918 | offsetof(struct xdp_md, data)), | ||
6919 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6920 | offsetof(struct xdp_md, data_end)), | ||
6921 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6922 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6923 | BPF_JMP_REG(BPF_JLT, BPF_REG_3, BPF_REG_1, 0), | ||
6924 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
6925 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6926 | BPF_EXIT_INSN(), | ||
6927 | }, | ||
6928 | .errstr = "R1 offset is outside of the packet", | ||
6929 | .result = REJECT, | ||
6930 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6931 | }, | ||
6932 | { | ||
6933 | "XDP pkt read, pkt_data' >= pkt_end, good access", | ||
6934 | .insns = { | ||
6935 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6936 | offsetof(struct xdp_md, data)), | ||
6937 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6938 | offsetof(struct xdp_md, data_end)), | ||
6939 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6940 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6941 | BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), | ||
6942 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), | ||
6943 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6944 | BPF_EXIT_INSN(), | ||
6945 | }, | ||
6946 | .result = ACCEPT, | ||
6947 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6948 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
6949 | }, | ||
6950 | { | ||
6951 | "XDP pkt read, pkt_data' >= pkt_end, bad access 1", | ||
6952 | .insns = { | ||
6953 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6954 | offsetof(struct xdp_md, data)), | ||
6955 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6956 | offsetof(struct xdp_md, data_end)), | ||
6957 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6958 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6959 | BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 1), | ||
6960 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
6961 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6962 | BPF_EXIT_INSN(), | ||
6963 | }, | ||
6964 | .errstr = "R1 offset is outside of the packet", | ||
6965 | .result = REJECT, | ||
6966 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6967 | }, | ||
6968 | { | ||
6969 | "XDP pkt read, pkt_data' >= pkt_end, bad access 2", | ||
6970 | .insns = { | ||
6971 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6972 | offsetof(struct xdp_md, data)), | ||
6973 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6974 | offsetof(struct xdp_md, data_end)), | ||
6975 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6976 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6977 | BPF_JMP_REG(BPF_JGE, BPF_REG_1, BPF_REG_3, 0), | ||
6978 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), | ||
6979 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
6980 | BPF_EXIT_INSN(), | ||
6981 | }, | ||
6982 | .errstr = "R1 offset is outside of the packet", | ||
6983 | .result = REJECT, | ||
6984 | .prog_type = BPF_PROG_TYPE_XDP, | ||
6985 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
6986 | }, | ||
6987 | { | ||
6988 | "XDP pkt read, pkt_end >= pkt_data', good access", | ||
6989 | .insns = { | ||
6990 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
6991 | offsetof(struct xdp_md, data)), | ||
6992 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
6993 | offsetof(struct xdp_md, data_end)), | ||
6994 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
6995 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
6996 | BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), | ||
6997 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
6998 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
6999 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
7000 | BPF_EXIT_INSN(), | ||
7001 | }, | ||
7002 | .result = ACCEPT, | ||
7003 | .prog_type = BPF_PROG_TYPE_XDP, | ||
7004 | }, | ||
7005 | { | ||
7006 | "XDP pkt read, pkt_end >= pkt_data', bad access 1", | ||
7007 | .insns = { | ||
7008 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
7009 | offsetof(struct xdp_md, data)), | ||
7010 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
7011 | offsetof(struct xdp_md, data_end)), | ||
7012 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
7013 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
7014 | BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), | ||
7015 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
7016 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), | ||
7017 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
7018 | BPF_EXIT_INSN(), | ||
7019 | }, | ||
7020 | .errstr = "R1 offset is outside of the packet", | ||
7021 | .result = REJECT, | ||
7022 | .prog_type = BPF_PROG_TYPE_XDP, | ||
7023 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
7024 | }, | ||
7025 | { | ||
7026 | "XDP pkt read, pkt_end >= pkt_data', bad access 2", | ||
7027 | .insns = { | ||
7028 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
7029 | offsetof(struct xdp_md, data)), | ||
7030 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
7031 | offsetof(struct xdp_md, data_end)), | ||
7032 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
7033 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
7034 | BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 1), | ||
7035 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
7036 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
7037 | BPF_EXIT_INSN(), | ||
7038 | }, | ||
7039 | .errstr = "R1 offset is outside of the packet", | ||
7040 | .result = REJECT, | ||
7041 | .prog_type = BPF_PROG_TYPE_XDP, | ||
7042 | }, | ||
7043 | { | ||
7044 | "XDP pkt read, pkt_data' <= pkt_end, good access", | ||
7045 | .insns = { | ||
7046 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
7047 | offsetof(struct xdp_md, data)), | ||
7048 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
7049 | offsetof(struct xdp_md, data_end)), | ||
7050 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
7051 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
7052 | BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), | ||
7053 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
7054 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
7055 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
7056 | BPF_EXIT_INSN(), | ||
7057 | }, | ||
7058 | .result = ACCEPT, | ||
7059 | .prog_type = BPF_PROG_TYPE_XDP, | ||
7060 | }, | ||
7061 | { | ||
7062 | "XDP pkt read, pkt_data' <= pkt_end, bad access 1", | ||
7063 | .insns = { | ||
7064 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
7065 | offsetof(struct xdp_md, data)), | ||
7066 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
7067 | offsetof(struct xdp_md, data_end)), | ||
7068 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
7069 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
7070 | BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), | ||
7071 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
7072 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -4), | ||
7073 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
7074 | BPF_EXIT_INSN(), | ||
7075 | }, | ||
7076 | .errstr = "R1 offset is outside of the packet", | ||
7077 | .result = REJECT, | ||
7078 | .prog_type = BPF_PROG_TYPE_XDP, | ||
7079 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
7080 | }, | ||
7081 | { | ||
7082 | "XDP pkt read, pkt_data' <= pkt_end, bad access 2", | ||
7083 | .insns = { | ||
7084 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
7085 | offsetof(struct xdp_md, data)), | ||
7086 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
7087 | offsetof(struct xdp_md, data_end)), | ||
7088 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
7089 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
7090 | BPF_JMP_REG(BPF_JLE, BPF_REG_1, BPF_REG_3, 1), | ||
7091 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
7092 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
7093 | BPF_EXIT_INSN(), | ||
7094 | }, | ||
7095 | .errstr = "R1 offset is outside of the packet", | ||
7096 | .result = REJECT, | ||
7097 | .prog_type = BPF_PROG_TYPE_XDP, | ||
7098 | }, | ||
7099 | { | ||
7100 | "XDP pkt read, pkt_end <= pkt_data', good access", | ||
7101 | .insns = { | ||
7102 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
7103 | offsetof(struct xdp_md, data)), | ||
7104 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
7105 | offsetof(struct xdp_md, data_end)), | ||
7106 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
7107 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
7108 | BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), | ||
7109 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), | ||
7110 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
7111 | BPF_EXIT_INSN(), | ||
7112 | }, | ||
7113 | .result = ACCEPT, | ||
7114 | .prog_type = BPF_PROG_TYPE_XDP, | ||
7115 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
7116 | }, | ||
7117 | { | ||
7118 | "XDP pkt read, pkt_end <= pkt_data', bad access 1", | ||
7119 | .insns = { | ||
7120 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
7121 | offsetof(struct xdp_md, data)), | ||
7122 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
7123 | offsetof(struct xdp_md, data_end)), | ||
7124 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
7125 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
7126 | BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 1), | ||
7127 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8), | ||
7128 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
7129 | BPF_EXIT_INSN(), | ||
7130 | }, | ||
7131 | .errstr = "R1 offset is outside of the packet", | ||
7132 | .result = REJECT, | ||
7133 | .prog_type = BPF_PROG_TYPE_XDP, | ||
7134 | }, | ||
7135 | { | ||
7136 | "XDP pkt read, pkt_end <= pkt_data', bad access 2", | ||
7137 | .insns = { | ||
7138 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
7139 | offsetof(struct xdp_md, data)), | ||
7140 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
7141 | offsetof(struct xdp_md, data_end)), | ||
7142 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_2), | ||
7143 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 8), | ||
7144 | BPF_JMP_REG(BPF_JLE, BPF_REG_3, BPF_REG_1, 0), | ||
7145 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, -5), | ||
7146 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
7147 | BPF_EXIT_INSN(), | ||
7148 | }, | ||
7149 | .errstr = "R1 offset is outside of the packet", | ||
7150 | .result = REJECT, | ||
7151 | .prog_type = BPF_PROG_TYPE_XDP, | ||
7152 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
7153 | }, | ||
6648 | }; | 7154 | }; |
6649 | 7155 | ||
6650 | static int probe_filter_length(const struct bpf_insn *fp) | 7156 | static int probe_filter_length(const struct bpf_insn *fp) |
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile index 0f5e347b068d..152823b6cb21 100644 --- a/tools/testing/selftests/mqueue/Makefile +++ b/tools/testing/selftests/mqueue/Makefile | |||
@@ -5,8 +5,8 @@ TEST_GEN_PROGS := mq_open_tests mq_perf_tests | |||
5 | include ../lib.mk | 5 | include ../lib.mk |
6 | 6 | ||
7 | override define RUN_TESTS | 7 | override define RUN_TESTS |
8 | $(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]" | 8 | @$(OUTPUT)/mq_open_tests /test1 || echo "selftests: mq_open_tests [FAIL]" |
9 | $(OUTPUT)//mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" | 9 | @$(OUTPUT)/mq_perf_tests || echo "selftests: mq_perf_tests [FAIL]" |
10 | endef | 10 | endef |
11 | 11 | ||
12 | override define EMIT_TESTS | 12 | override define EMIT_TESTS |
diff --git a/tools/testing/selftests/networking/timestamping/rxtimestamp.c b/tools/testing/selftests/networking/timestamping/rxtimestamp.c index 00f286661dcd..dd4162fc0419 100644 --- a/tools/testing/selftests/networking/timestamping/rxtimestamp.c +++ b/tools/testing/selftests/networking/timestamping/rxtimestamp.c | |||
@@ -341,7 +341,7 @@ int main(int argc, char **argv) | |||
341 | return 0; | 341 | return 0; |
342 | case 'n': | 342 | case 'n': |
343 | t = atoi(optarg); | 343 | t = atoi(optarg); |
344 | if (t > ARRAY_SIZE(test_cases)) | 344 | if (t >= ARRAY_SIZE(test_cases)) |
345 | error(1, 0, "Invalid test case: %d", t); | 345 | error(1, 0, "Invalid test case: %d", t); |
346 | all_tests = false; | 346 | all_tests = false; |
347 | test_cases[t].enabled = true; | 347 | test_cases[t].enabled = true; |
diff --git a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json index c727b96a59b0..5fa02d86b35f 100644 --- a/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json +++ b/tools/testing/selftests/tc-testing/tc-tests/filters/tests.json | |||
@@ -17,5 +17,26 @@ | |||
17 | "teardown": [ | 17 | "teardown": [ |
18 | "$TC qdisc del dev $DEV1 ingress" | 18 | "$TC qdisc del dev $DEV1 ingress" |
19 | ] | 19 | ] |
20 | }, | ||
21 | { | ||
22 | "id": "d052", | ||
23 | "name": "Add 1M filters with the same action", | ||
24 | "category": [ | ||
25 | "filter", | ||
26 | "flower" | ||
27 | ], | ||
28 | "setup": [ | ||
29 | "$TC qdisc add dev $DEV2 ingress", | ||
30 | "./tdc_batch.py $DEV2 $BATCH_FILE --share_action -n 1000000" | ||
31 | ], | ||
32 | "cmdUnderTest": "$TC -b $BATCH_FILE", | ||
33 | "expExitCode": "0", | ||
34 | "verifyCmd": "$TC actions list action gact", | ||
35 | "matchPattern": "action order 0: gact action drop.*index 1 ref 1000000 bind 1000000", | ||
36 | "matchCount": "1", | ||
37 | "teardown": [ | ||
38 | "$TC qdisc del dev $DEV2 ingress", | ||
39 | "/bin/rm $BATCH_FILE" | ||
40 | ] | ||
20 | } | 41 | } |
21 | ] \ No newline at end of file | 42 | ] |
diff --git a/tools/testing/selftests/tc-testing/tdc.py b/tools/testing/selftests/tc-testing/tdc.py index cd61b7844c0d..5f11f5d7456e 100755 --- a/tools/testing/selftests/tc-testing/tdc.py +++ b/tools/testing/selftests/tc-testing/tdc.py | |||
@@ -88,7 +88,7 @@ def prepare_env(cmdlist): | |||
88 | exit(1) | 88 | exit(1) |
89 | 89 | ||
90 | 90 | ||
91 | def test_runner(filtered_tests): | 91 | def test_runner(filtered_tests, args): |
92 | """ | 92 | """ |
93 | Driver function for the unit tests. | 93 | Driver function for the unit tests. |
94 | 94 | ||
@@ -105,6 +105,8 @@ def test_runner(filtered_tests): | |||
105 | for tidx in testlist: | 105 | for tidx in testlist: |
106 | result = True | 106 | result = True |
107 | tresult = "" | 107 | tresult = "" |
108 | if "flower" in tidx["category"] and args.device == None: | ||
109 | continue | ||
108 | print("Test " + tidx["id"] + ": " + tidx["name"]) | 110 | print("Test " + tidx["id"] + ": " + tidx["name"]) |
109 | prepare_env(tidx["setup"]) | 111 | prepare_env(tidx["setup"]) |
110 | (p, procout) = exec_cmd(tidx["cmdUnderTest"]) | 112 | (p, procout) = exec_cmd(tidx["cmdUnderTest"]) |
@@ -152,6 +154,10 @@ def ns_create(): | |||
152 | exec_cmd(cmd, False) | 154 | exec_cmd(cmd, False) |
153 | cmd = 'ip -s $NS link set $DEV1 up' | 155 | cmd = 'ip -s $NS link set $DEV1 up' |
154 | exec_cmd(cmd, False) | 156 | exec_cmd(cmd, False) |
157 | cmd = 'ip link set $DEV2 netns $NS' | ||
158 | exec_cmd(cmd, False) | ||
159 | cmd = 'ip -s $NS link set $DEV2 up' | ||
160 | exec_cmd(cmd, False) | ||
155 | 161 | ||
156 | 162 | ||
157 | def ns_destroy(): | 163 | def ns_destroy(): |
@@ -211,7 +217,8 @@ def set_args(parser): | |||
211 | help='Execute the single test case with specified ID') | 217 | help='Execute the single test case with specified ID') |
212 | parser.add_argument('-i', '--id', action='store_true', dest='gen_id', | 218 | parser.add_argument('-i', '--id', action='store_true', dest='gen_id', |
213 | help='Generate ID numbers for new test cases') | 219 | help='Generate ID numbers for new test cases') |
214 | return parser | 220 | parser.add_argument('-d', '--device', |
221 | help='Execute the test case in flower category') | ||
215 | return parser | 222 | return parser |
216 | 223 | ||
217 | 224 | ||
@@ -225,6 +232,8 @@ def check_default_settings(args): | |||
225 | 232 | ||
226 | if args.path != None: | 233 | if args.path != None: |
227 | NAMES['TC'] = args.path | 234 | NAMES['TC'] = args.path |
235 | if args.device != None: | ||
236 | NAMES['DEV2'] = args.device | ||
228 | if not os.path.isfile(NAMES['TC']): | 237 | if not os.path.isfile(NAMES['TC']): |
229 | print("The specified tc path " + NAMES['TC'] + " does not exist.") | 238 | print("The specified tc path " + NAMES['TC'] + " does not exist.") |
230 | exit(1) | 239 | exit(1) |
@@ -381,14 +390,17 @@ def set_operation_mode(args): | |||
381 | if (len(alltests) == 0): | 390 | if (len(alltests) == 0): |
382 | print("Cannot find a test case with ID matching " + target_id) | 391 | print("Cannot find a test case with ID matching " + target_id) |
383 | exit(1) | 392 | exit(1) |
384 | catresults = test_runner(alltests) | 393 | catresults = test_runner(alltests, args) |
385 | print("All test results: " + "\n\n" + catresults) | 394 | print("All test results: " + "\n\n" + catresults) |
386 | elif (len(target_category) > 0): | 395 | elif (len(target_category) > 0): |
396 | if (target_category == "flower") and args.device == None: | ||
397 | print("Please specify a NIC device (-d) to run category flower") | ||
398 | exit(1) | ||
387 | if (target_category not in ucat): | 399 | if (target_category not in ucat): |
388 | print("Specified category is not present in this file.") | 400 | print("Specified category is not present in this file.") |
389 | exit(1) | 401 | exit(1) |
390 | else: | 402 | else: |
391 | catresults = test_runner(testcases[target_category]) | 403 | catresults = test_runner(testcases[target_category], args) |
392 | print("Category " + target_category + "\n\n" + catresults) | 404 | print("Category " + target_category + "\n\n" + catresults) |
393 | 405 | ||
394 | ns_destroy() | 406 | ns_destroy() |
diff --git a/tools/testing/selftests/tc-testing/tdc_batch.py b/tools/testing/selftests/tc-testing/tdc_batch.py new file mode 100755 index 000000000000..707c6bfef689 --- /dev/null +++ b/tools/testing/selftests/tc-testing/tdc_batch.py | |||
@@ -0,0 +1,62 @@ | |||
1 | #!/usr/bin/python3 | ||
2 | |||
3 | """ | ||
4 | tdc_batch.py - a script to generate TC batch file | ||
5 | |||
6 | Copyright (C) 2017 Chris Mi <chrism@mellanox.com> | ||
7 | """ | ||
8 | |||
9 | import argparse | ||
10 | |||
11 | parser = argparse.ArgumentParser(description='TC batch file generator') | ||
12 | parser.add_argument("device", help="device name") | ||
13 | parser.add_argument("file", help="batch file name") | ||
14 | parser.add_argument("-n", "--number", type=int, | ||
15 | help="how many lines in batch file") | ||
16 | parser.add_argument("-o", "--skip_sw", | ||
17 | help="skip_sw (offload), by default skip_hw", | ||
18 | action="store_true") | ||
19 | parser.add_argument("-s", "--share_action", | ||
20 | help="all filters share the same action", | ||
21 | action="store_true") | ||
22 | parser.add_argument("-p", "--prio", | ||
23 | help="all filters have different prio", | ||
24 | action="store_true") | ||
25 | args = parser.parse_args() | ||
26 | |||
27 | device = args.device | ||
28 | file = open(args.file, 'w') | ||
29 | |||
30 | number = 1 | ||
31 | if args.number: | ||
32 | number = args.number | ||
33 | |||
34 | skip = "skip_hw" | ||
35 | if args.skip_sw: | ||
36 | skip = "skip_sw" | ||
37 | |||
38 | share_action = "" | ||
39 | if args.share_action: | ||
40 | share_action = "index 1" | ||
41 | |||
42 | prio = "prio 1" | ||
43 | if args.prio: | ||
44 | prio = "" | ||
45 | if number > 0x4000: | ||
46 | number = 0x4000 | ||
47 | |||
48 | index = 0 | ||
49 | for i in range(0x100): | ||
50 | for j in range(0x100): | ||
51 | for k in range(0x100): | ||
52 | mac = ("%02x:%02x:%02x" % (i, j, k)) | ||
53 | src_mac = "e4:11:00:" + mac | ||
54 | dst_mac = "e4:12:00:" + mac | ||
55 | cmd = ("filter add dev %s %s protocol ip parent ffff: flower %s " | ||
56 | "src_mac %s dst_mac %s action drop %s" % | ||
57 | (device, prio, skip, src_mac, dst_mac, share_action)) | ||
58 | file.write("%s\n" % cmd) | ||
59 | index += 1 | ||
60 | if index >= number: | ||
61 | file.close() | ||
62 | exit(0) | ||
diff --git a/tools/testing/selftests/tc-testing/tdc_config.py b/tools/testing/selftests/tc-testing/tdc_config.py index 01087375a7c3..b6352515c1b5 100644 --- a/tools/testing/selftests/tc-testing/tdc_config.py +++ b/tools/testing/selftests/tc-testing/tdc_config.py | |||
@@ -12,6 +12,8 @@ NAMES = { | |||
12 | # Name of veth devices to be created for the namespace | 12 | # Name of veth devices to be created for the namespace |
13 | 'DEV0': 'v0p0', | 13 | 'DEV0': 'v0p0', |
14 | 'DEV1': 'v0p1', | 14 | 'DEV1': 'v0p1', |
15 | 'DEV2': '', | ||
16 | 'BATCH_FILE': './batch.txt', | ||
15 | # Name of the namespace to use | 17 | # Name of the namespace to use |
16 | 'NS': 'tcut' | 18 | 'NS': 'tcut' |
17 | } | 19 | } |
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c index a2c53a3d223d..de2f9ec8a87f 100644 --- a/tools/testing/selftests/vm/userfaultfd.c +++ b/tools/testing/selftests/vm/userfaultfd.c | |||
@@ -397,7 +397,7 @@ static void retry_copy_page(int ufd, struct uffdio_copy *uffdio_copy, | |||
397 | } | 397 | } |
398 | } | 398 | } |
399 | 399 | ||
400 | static int copy_page(int ufd, unsigned long offset) | 400 | static int __copy_page(int ufd, unsigned long offset, bool retry) |
401 | { | 401 | { |
402 | struct uffdio_copy uffdio_copy; | 402 | struct uffdio_copy uffdio_copy; |
403 | 403 | ||
@@ -418,7 +418,7 @@ static int copy_page(int ufd, unsigned long offset) | |||
418 | fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n", | 418 | fprintf(stderr, "UFFDIO_COPY unexpected copy %Ld\n", |
419 | uffdio_copy.copy), exit(1); | 419 | uffdio_copy.copy), exit(1); |
420 | } else { | 420 | } else { |
421 | if (test_uffdio_copy_eexist) { | 421 | if (test_uffdio_copy_eexist && retry) { |
422 | test_uffdio_copy_eexist = false; | 422 | test_uffdio_copy_eexist = false; |
423 | retry_copy_page(ufd, &uffdio_copy, offset); | 423 | retry_copy_page(ufd, &uffdio_copy, offset); |
424 | } | 424 | } |
@@ -427,6 +427,16 @@ static int copy_page(int ufd, unsigned long offset) | |||
427 | return 0; | 427 | return 0; |
428 | } | 428 | } |
429 | 429 | ||
430 | static int copy_page_retry(int ufd, unsigned long offset) | ||
431 | { | ||
432 | return __copy_page(ufd, offset, true); | ||
433 | } | ||
434 | |||
435 | static int copy_page(int ufd, unsigned long offset) | ||
436 | { | ||
437 | return __copy_page(ufd, offset, false); | ||
438 | } | ||
439 | |||
430 | static void *uffd_poll_thread(void *arg) | 440 | static void *uffd_poll_thread(void *arg) |
431 | { | 441 | { |
432 | unsigned long cpu = (unsigned long) arg; | 442 | unsigned long cpu = (unsigned long) arg; |
@@ -544,7 +554,7 @@ static void *background_thread(void *arg) | |||
544 | for (page_nr = cpu * nr_pages_per_cpu; | 554 | for (page_nr = cpu * nr_pages_per_cpu; |
545 | page_nr < (cpu+1) * nr_pages_per_cpu; | 555 | page_nr < (cpu+1) * nr_pages_per_cpu; |
546 | page_nr++) | 556 | page_nr++) |
547 | copy_page(uffd, page_nr * page_size); | 557 | copy_page_retry(uffd, page_nr * page_size); |
548 | 558 | ||
549 | return NULL; | 559 | return NULL; |
550 | } | 560 | } |
@@ -779,7 +789,7 @@ static void retry_uffdio_zeropage(int ufd, | |||
779 | } | 789 | } |
780 | } | 790 | } |
781 | 791 | ||
782 | static int uffdio_zeropage(int ufd, unsigned long offset) | 792 | static int __uffdio_zeropage(int ufd, unsigned long offset, bool retry) |
783 | { | 793 | { |
784 | struct uffdio_zeropage uffdio_zeropage; | 794 | struct uffdio_zeropage uffdio_zeropage; |
785 | int ret; | 795 | int ret; |
@@ -814,7 +824,7 @@ static int uffdio_zeropage(int ufd, unsigned long offset) | |||
814 | fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n", | 824 | fprintf(stderr, "UFFDIO_ZEROPAGE unexpected %Ld\n", |
815 | uffdio_zeropage.zeropage), exit(1); | 825 | uffdio_zeropage.zeropage), exit(1); |
816 | } else { | 826 | } else { |
817 | if (test_uffdio_zeropage_eexist) { | 827 | if (test_uffdio_zeropage_eexist && retry) { |
818 | test_uffdio_zeropage_eexist = false; | 828 | test_uffdio_zeropage_eexist = false; |
819 | retry_uffdio_zeropage(ufd, &uffdio_zeropage, | 829 | retry_uffdio_zeropage(ufd, &uffdio_zeropage, |
820 | offset); | 830 | offset); |
@@ -830,6 +840,11 @@ static int uffdio_zeropage(int ufd, unsigned long offset) | |||
830 | return 0; | 840 | return 0; |
831 | } | 841 | } |
832 | 842 | ||
843 | static int uffdio_zeropage(int ufd, unsigned long offset) | ||
844 | { | ||
845 | return __uffdio_zeropage(ufd, offset, false); | ||
846 | } | ||
847 | |||
833 | /* exercise UFFDIO_ZEROPAGE */ | 848 | /* exercise UFFDIO_ZEROPAGE */ |
834 | static int userfaultfd_zeropage_test(void) | 849 | static int userfaultfd_zeropage_test(void) |
835 | { | 850 | { |
diff --git a/tools/testing/selftests/x86/Makefile b/tools/testing/selftests/x86/Makefile index 97f187e2663f..0a74a20ca32b 100644 --- a/tools/testing/selftests/x86/Makefile +++ b/tools/testing/selftests/x86/Makefile | |||
@@ -20,7 +20,7 @@ BINARIES_64 := $(TARGETS_C_64BIT_ALL:%=%_64) | |||
20 | BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32)) | 20 | BINARIES_32 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_32)) |
21 | BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64)) | 21 | BINARIES_64 := $(patsubst %,$(OUTPUT)/%,$(BINARIES_64)) |
22 | 22 | ||
23 | CFLAGS := -O2 -g -std=gnu99 -pthread -Wall | 23 | CFLAGS := -O2 -g -std=gnu99 -pthread -Wall -no-pie |
24 | 24 | ||
25 | UNAME_M := $(shell uname -m) | 25 | UNAME_M := $(shell uname -m) |
26 | CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) | 26 | CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32) |