diff options
114 files changed, 571 insertions, 878 deletions
diff --git a/Documentation/core-api/idr.rst b/Documentation/core-api/idr.rst index d351e880a2f6..a2738050c4f0 100644 --- a/Documentation/core-api/idr.rst +++ b/Documentation/core-api/idr.rst | |||
@@ -1,4 +1,4 @@ | |||
1 | .. SPDX-License-Identifier: CC-BY-SA-4.0 | 1 | .. SPDX-License-Identifier: GPL-2.0+ |
2 | 2 | ||
3 | ============= | 3 | ============= |
4 | ID Allocation | 4 | ID Allocation |
diff --git a/LICENSES/other/CC-BY-SA-4.0 b/LICENSES/other/CC-BY-SA-4.0 deleted file mode 100644 index f9158e831e79..000000000000 --- a/LICENSES/other/CC-BY-SA-4.0 +++ /dev/null | |||
@@ -1,397 +0,0 @@ | |||
1 | Valid-License-Identifier: CC-BY-SA-4.0 | ||
2 | SPDX-URL: https://spdx.org/licenses/CC-BY-SA-4.0 | ||
3 | Usage-Guide: | ||
4 | To use the Creative Commons Attribution Share Alike 4.0 International | ||
5 | license put the following SPDX tag/value pair into a comment according to | ||
6 | the placement guidelines in the licensing rules documentation: | ||
7 | SPDX-License-Identifier: CC-BY-SA-4.0 | ||
8 | License-Text: | ||
9 | |||
10 | Creative Commons Attribution-ShareAlike 4.0 International | ||
11 | |||
12 | Creative Commons Corporation ("Creative Commons") is not a law firm and | ||
13 | does not provide legal services or legal advice. Distribution of Creative | ||
14 | Commons public licenses does not create a lawyer-client or other | ||
15 | relationship. Creative Commons makes its licenses and related information | ||
16 | available on an "as-is" basis. Creative Commons gives no warranties | ||
17 | regarding its licenses, any material licensed under their terms and | ||
18 | conditions, or any related information. Creative Commons disclaims all | ||
19 | liability for damages resulting from their use to the fullest extent | ||
20 | possible. | ||
21 | |||
22 | Using Creative Commons Public Licenses | ||
23 | |||
24 | Creative Commons public licenses provide a standard set of terms and | ||
25 | conditions that creators and other rights holders may use to share original | ||
26 | works of authorship and other material subject to copyright and certain | ||
27 | other rights specified in the public license below. The following | ||
28 | considerations are for informational purposes only, are not exhaustive, and | ||
29 | do not form part of our licenses. | ||
30 | |||
31 | Considerations for licensors: Our public licenses are intended for use by | ||
32 | those authorized to give the public permission to use material in ways | ||
33 | otherwise restricted by copyright and certain other rights. Our licenses | ||
34 | are irrevocable. Licensors should read and understand the terms and | ||
35 | conditions of the license they choose before applying it. Licensors should | ||
36 | also secure all rights necessary before applying our licenses so that the | ||
37 | public can reuse the material as expected. Licensors should clearly mark | ||
38 | any material not subject to the license. This includes other CC-licensed | ||
39 | material, or material used under an exception or limitation to | ||
40 | copyright. More considerations for licensors : | ||
41 | wiki.creativecommons.org/Considerations_for_licensors | ||
42 | |||
43 | Considerations for the public: By using one of our public licenses, a | ||
44 | licensor grants the public permission to use the licensed material under | ||
45 | specified terms and conditions. If the licensor's permission is not | ||
46 | necessary for any reason - for example, because of any applicable exception | ||
47 | or limitation to copyright - then that use is not regulated by the | ||
48 | license. Our licenses grant only permissions under copyright and certain | ||
49 | other rights that a licensor has authority to grant. Use of the licensed | ||
50 | material may still be restricted for other reasons, including because | ||
51 | others have copyright or other rights in the material. A licensor may make | ||
52 | special requests, such as asking that all changes be marked or described. | ||
53 | |||
54 | Although not required by our licenses, you are encouraged to respect those | ||
55 | requests where reasonable. More considerations for the public : | ||
56 | wiki.creativecommons.org/Considerations_for_licensees | ||
57 | |||
58 | Creative Commons Attribution-ShareAlike 4.0 International Public License | ||
59 | |||
60 | By exercising the Licensed Rights (defined below), You accept and agree to | ||
61 | be bound by the terms and conditions of this Creative Commons | ||
62 | Attribution-ShareAlike 4.0 International Public License ("Public | ||
63 | License"). To the extent this Public License may be interpreted as a | ||
64 | contract, You are granted the Licensed Rights in consideration of Your | ||
65 | acceptance of these terms and conditions, and the Licensor grants You such | ||
66 | rights in consideration of benefits the Licensor receives from making the | ||
67 | Licensed Material available under these terms and conditions. | ||
68 | |||
69 | Section 1 - Definitions. | ||
70 | |||
71 | a. Adapted Material means material subject to Copyright and Similar | ||
72 | Rights that is derived from or based upon the Licensed Material and | ||
73 | in which the Licensed Material is translated, altered, arranged, | ||
74 | transformed, or otherwise modified in a manner requiring permission | ||
75 | under the Copyright and Similar Rights held by the Licensor. For | ||
76 | purposes of this Public License, where the Licensed Material is a | ||
77 | musical work, performance, or sound recording, Adapted Material is | ||
78 | always produced where the Licensed Material is synched in timed | ||
79 | relation with a moving image. | ||
80 | |||
81 | b. Adapter's License means the license You apply to Your Copyright and | ||
82 | Similar Rights in Your contributions to Adapted Material in | ||
83 | accordance with the terms and conditions of this Public License. | ||
84 | |||
85 | c. BY-SA Compatible License means a license listed at | ||
86 | creativecommons.org/compatiblelicenses, approved by Creative Commons | ||
87 | as essentially the equivalent of this Public License. | ||
88 | |||
89 | d. Copyright and Similar Rights means copyright and/or similar rights | ||
90 | closely related to copyright including, without limitation, | ||
91 | performance, broadcast, sound recording, and Sui Generis Database | ||
92 | Rights, without regard to how the rights are labeled or | ||
93 | categorized. For purposes of this Public License, the rights | ||
94 | specified in Section 2(b)(1)-(2) are not Copyright and Similar | ||
95 | Rights. | ||
96 | |||
97 | e. Effective Technological Measures means those measures that, in the | ||
98 | absence of proper authority, may not be circumvented under laws | ||
99 | fulfilling obligations under Article 11 of the WIPO Copyright Treaty | ||
100 | adopted on December 20, 1996, and/or similar international | ||
101 | agreements. | ||
102 | |||
103 | f. Exceptions and Limitations means fair use, fair dealing, and/or any | ||
104 | other exception or limitation to Copyright and Similar Rights that | ||
105 | applies to Your use of the Licensed Material. | ||
106 | |||
107 | g. License Elements means the license attributes listed in the name of | ||
108 | a Creative Commons Public License. The License Elements of this | ||
109 | Public License are Attribution and ShareAlike. | ||
110 | |||
111 | h. Licensed Material means the artistic or literary work, database, or | ||
112 | other material to which the Licensor applied this Public License. | ||
113 | |||
114 | i. Licensed Rights means the rights granted to You subject to the terms | ||
115 | and conditions of this Public License, which are limited to all | ||
116 | Copyright and Similar Rights that apply to Your use of the Licensed | ||
117 | Material and that the Licensor has authority to license. | ||
118 | |||
119 | j. Licensor means the individual(s) or entity(ies) granting rights | ||
120 | under this Public License. | ||
121 | |||
122 | k. Share means to provide material to the public by any means or | ||
123 | process that requires permission under the Licensed Rights, such as | ||
124 | reproduction, public display, public performance, distribution, | ||
125 | dissemination, communication, or importation, and to make material | ||
126 | available to the public including in ways that members of the public | ||
127 | may access the material from a place and at a time individually | ||
128 | chosen by them. | ||
129 | |||
130 | l. Sui Generis Database Rights means rights other than copyright | ||
131 | resulting from Directive 96/9/EC of the European Parliament and of | ||
132 | the Council of 11 March 1996 on the legal protection of databases, | ||
133 | as amended and/or succeeded, as well as other essentially equivalent | ||
134 | rights anywhere in the world. m. You means the individual or entity | ||
135 | exercising the Licensed Rights under this Public License. Your has a | ||
136 | corresponding meaning. | ||
137 | |||
138 | Section 2 - Scope. | ||
139 | |||
140 | a. License grant. | ||
141 | |||
142 | 1. Subject to the terms and conditions of this Public License, the | ||
143 | Licensor hereby grants You a worldwide, royalty-free, | ||
144 | non-sublicensable, non-exclusive, irrevocable license to | ||
145 | exercise the Licensed Rights in the Licensed Material to: | ||
146 | |||
147 | A. reproduce and Share the Licensed Material, in whole or in part; and | ||
148 | |||
149 | B. produce, reproduce, and Share Adapted Material. | ||
150 | |||
151 | 2. Exceptions and Limitations. For the avoidance of doubt, where | ||
152 | Exceptions and Limitations apply to Your use, this Public | ||
153 | License does not apply, and You do not need to comply with its | ||
154 | terms and conditions. | ||
155 | |||
156 | 3. Term. The term of this Public License is specified in Section 6(a). | ||
157 | |||
158 | 4. Media and formats; technical modifications allowed. The Licensor | ||
159 | authorizes You to exercise the Licensed Rights in all media and | ||
160 | formats whether now known or hereafter created, and to make | ||
161 | technical modifications necessary to do so. The Licensor waives | ||
162 | and/or agrees not to assert any right or authority to forbid You | ||
163 | from making technical modifications necessary to exercise the | ||
164 | Licensed Rights, including technical modifications necessary to | ||
165 | circumvent Effective Technological Measures. For purposes of | ||
166 | this Public License, simply making modifications authorized by | ||
167 | this Section 2(a)(4) never produces Adapted Material. | ||
168 | |||
169 | 5. Downstream recipients. | ||
170 | |||
171 | A. Offer from the Licensor - Licensed Material. Every recipient | ||
172 | of the Licensed Material automatically receives an offer | ||
173 | from the Licensor to exercise the Licensed Rights under the | ||
174 | terms and conditions of this Public License. | ||
175 | |||
176 | B. Additional offer from the Licensor - Adapted Material. Every | ||
177 | recipient of Adapted Material from You automatically | ||
178 | receives an offer from the Licensor to exercise the Licensed | ||
179 | Rights in the Adapted Material under the conditions of the | ||
180 | Adapter's License You apply. | ||
181 | |||
182 | C. No downstream restrictions. You may not offer or impose any | ||
183 | additional or different terms or conditions on, or apply any | ||
184 | Effective Technological Measures to, the Licensed Material | ||
185 | if doing so restricts exercise of the Licensed Rights by any | ||
186 | recipient of the Licensed Material. | ||
187 | |||
188 | 6. No endorsement. Nothing in this Public License constitutes or | ||
189 | may be construed as permission to assert or imply that You are, | ||
190 | or that Your use of the Licensed Material is, connected with, or | ||
191 | sponsored, endorsed, or granted official status by, the Licensor | ||
192 | or others designated to receive attribution as provided in | ||
193 | Section 3(a)(1)(A)(i). | ||
194 | |||
195 | b. Other rights. | ||
196 | |||
197 | 1. Moral rights, such as the right of integrity, are not licensed | ||
198 | under this Public License, nor are publicity, privacy, and/or | ||
199 | other similar personality rights; however, to the extent | ||
200 | possible, the Licensor waives and/or agrees not to assert any | ||
201 | such rights held by the Licensor to the limited extent necessary | ||
202 | to allow You to exercise the Licensed Rights, but not otherwise. | ||
203 | |||
204 | 2. Patent and trademark rights are not licensed under this Public | ||
205 | License. | ||
206 | |||
207 | 3. To the extent possible, the Licensor waives any right to collect | ||
208 | royalties from You for the exercise of the Licensed Rights, | ||
209 | whether directly or through a collecting society under any | ||
210 | voluntary or waivable statutory or compulsory licensing | ||
211 | scheme. In all other cases the Licensor expressly reserves any | ||
212 | right to collect such royalties. | ||
213 | |||
214 | Section 3 - License Conditions. | ||
215 | |||
216 | Your exercise of the Licensed Rights is expressly made subject to the | ||
217 | following conditions. | ||
218 | |||
219 | a. Attribution. | ||
220 | |||
221 | 1. If You Share the Licensed Material (including in modified form), | ||
222 | You must: | ||
223 | |||
224 | A. retain the following if it is supplied by the Licensor with | ||
225 | the Licensed Material: | ||
226 | |||
227 | i. identification of the creator(s) of the Licensed | ||
228 | Material and any others designated to receive | ||
229 | attribution, in any reasonable manner requested by the | ||
230 | Licensor (including by pseudonym if designated); | ||
231 | |||
232 | ii. a copyright notice; | ||
233 | |||
234 | iii. a notice that refers to this Public License; | ||
235 | |||
236 | iv. a notice that refers to the disclaimer of warranties; | ||
237 | |||
238 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; | ||
239 | |||
240 | B. indicate if You modified the Licensed Material and retain an | ||
241 | indication of any previous modifications; and | ||
242 | |||
243 | C. indicate the Licensed Material is licensed under this Public | ||
244 | License, and include the text of, or the URI or hyperlink to, | ||
245 | this Public License. | ||
246 | |||
247 | 2. You may satisfy the conditions in Section 3(a)(1) in any | ||
248 | reasonable manner based on the medium, means, and context in | ||
249 | which You Share the Licensed Material. For example, it may be | ||
250 | reasonable to satisfy the conditions by providing a URI or | ||
251 | hyperlink to a resource that includes the required information. | ||
252 | |||
253 | 3. If requested by the Licensor, You must remove any of the | ||
254 | information required by Section 3(a)(1)(A) to the extent | ||
255 | reasonably practicable. b. ShareAlike.In addition to the | ||
256 | conditions in Section 3(a), if You Share Adapted Material You | ||
257 | produce, the following conditions also apply. | ||
258 | |||
259 | 1. The Adapter's License You apply must be a Creative Commons | ||
260 | license with the same License Elements, this version or | ||
261 | later, or a BY-SA Compatible License. | ||
262 | |||
263 | 2. You must include the text of, or the URI or hyperlink to, the | ||
264 | Adapter's License You apply. You may satisfy this condition | ||
265 | in any reasonable manner based on the medium, means, and | ||
266 | context in which You Share Adapted Material. | ||
267 | |||
268 | 3. You may not offer or impose any additional or different terms | ||
269 | or conditions on, or apply any Effective Technological | ||
270 | Measures to, Adapted Material that restrict exercise of the | ||
271 | rights granted under the Adapter's License You apply. | ||
272 | |||
273 | Section 4 - Sui Generis Database Rights. | ||
274 | |||
275 | Where the Licensed Rights include Sui Generis Database Rights that apply to | ||
276 | Your use of the Licensed Material: | ||
277 | |||
278 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to | ||
279 | extract, reuse, reproduce, and Share all or a substantial portion of | ||
280 | the contents of the database; | ||
281 | |||
282 | b. if You include all or a substantial portion of the database contents | ||
283 | in a database in which You have Sui Generis Database Rights, then | ||
284 | the database in which You have Sui Generis Database Rights (but not | ||
285 | its individual contents) is Adapted Material, including for purposes | ||
286 | of Section 3(b); and | ||
287 | |||
288 | c. You must comply with the conditions in Section 3(a) if You Share all | ||
289 | or a substantial portion of the contents of the database. | ||
290 | |||
291 | For the avoidance of doubt, this Section 4 supplements and does not | ||
292 | replace Your obligations under this Public License where the Licensed | ||
293 | Rights include other Copyright and Similar Rights. | ||
294 | |||
295 | Section 5 - Disclaimer of Warranties and Limitation of Liability. | ||
296 | |||
297 | a. Unless otherwise separately undertaken by the Licensor, to the | ||
298 | extent possible, the Licensor offers the Licensed Material as-is and | ||
299 | as-available, and makes no representations or warranties of any kind | ||
300 | concerning the Licensed Material, whether express, implied, | ||
301 | statutory, or other. This includes, without limitation, warranties | ||
302 | of title, merchantability, fitness for a particular purpose, | ||
303 | non-infringement, absence of latent or other defects, accuracy, or | ||
304 | the presence or absence of errors, whether or not known or | ||
305 | discoverable. Where disclaimers of warranties are not allowed in | ||
306 | full or in part, this disclaimer may not apply to You. | ||
307 | |||
308 | b. To the extent possible, in no event will the Licensor be liable to | ||
309 | You on any legal theory (including, without limitation, negligence) | ||
310 | or otherwise for any direct, special, indirect, incidental, | ||
311 | consequential, punitive, exemplary, or other losses, costs, | ||
312 | expenses, or damages arising out of this Public License or use of | ||
313 | the Licensed Material, even if the Licensor has been advised of the | ||
314 | possibility of such losses, costs, expenses, or damages. Where a | ||
315 | limitation of liability is not allowed in full or in part, this | ||
316 | limitation may not apply to You. | ||
317 | |||
318 | c. The disclaimer of warranties and limitation of liability provided | ||
319 | above shall be interpreted in a manner that, to the extent possible, | ||
320 | most closely approximates an absolute disclaimer and waiver of all | ||
321 | liability. | ||
322 | |||
323 | Section 6 - Term and Termination. | ||
324 | |||
325 | a. This Public License applies for the term of the Copyright and | ||
326 | Similar Rights licensed here. However, if You fail to comply with | ||
327 | this Public License, then Your rights under this Public License | ||
328 | terminate automatically. | ||
329 | |||
330 | b. Where Your right to use the Licensed Material has terminated under | ||
331 | Section 6(a), it reinstates: | ||
332 | |||
333 | 1. automatically as of the date the violation is cured, provided it | ||
334 | is cured within 30 days of Your discovery of the violation; or | ||
335 | |||
336 | 2. upon express reinstatement by the Licensor. | ||
337 | |||
338 | c. For the avoidance of doubt, this Section 6(b) does not affect any | ||
339 | right the Licensor may have to seek remedies for Your violations of | ||
340 | this Public License. | ||
341 | |||
342 | d. For the avoidance of doubt, the Licensor may also offer the Licensed | ||
343 | Material under separate terms or conditions or stop distributing the | ||
344 | Licensed Material at any time; however, doing so will not terminate | ||
345 | this Public License. | ||
346 | |||
347 | e. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. | ||
348 | |||
349 | Section 7 - Other Terms and Conditions. | ||
350 | |||
351 | a. The Licensor shall not be bound by any additional or different terms | ||
352 | or conditions communicated by You unless expressly agreed. | ||
353 | |||
354 | b. Any arrangements, understandings, or agreements regarding the | ||
355 | Licensed Material not stated herein are separate from and | ||
356 | independent of the terms and conditions of this Public License. | ||
357 | |||
358 | Section 8 - Interpretation. | ||
359 | |||
360 | a. For the avoidance of doubt, this Public License does not, and shall | ||
361 | not be interpreted to, reduce, limit, restrict, or impose conditions | ||
362 | on any use of the Licensed Material that could lawfully be made | ||
363 | without permission under this Public License. | ||
364 | |||
365 | b. To the extent possible, if any provision of this Public License is | ||
366 | deemed unenforceable, it shall be automatically reformed to the | ||
367 | minimum extent necessary to make it enforceable. If the provision | ||
368 | cannot be reformed, it shall be severed from this Public License | ||
369 | without affecting the enforceability of the remaining terms and | ||
370 | conditions. | ||
371 | |||
372 | c. No term or condition of this Public License will be waived and no | ||
373 | failure to comply consented to unless expressly agreed to by the | ||
374 | Licensor. | ||
375 | |||
376 | d. Nothing in this Public License constitutes or may be interpreted as | ||
377 | a limitation upon, or waiver of, any privileges and immunities that | ||
378 | apply to the Licensor or You, including from the legal processes of | ||
379 | any jurisdiction or authority. | ||
380 | |||
381 | Creative Commons is not a party to its public licenses. Notwithstanding, | ||
382 | Creative Commons may elect to apply one of its public licenses to material | ||
383 | it publishes and in those instances will be considered the "Licensor." The | ||
384 | text of the Creative Commons public licenses is dedicated to the public | ||
385 | domain under the CC0 Public Domain Dedication. Except for the limited | ||
386 | purpose of indicating that material is shared under a Creative Commons | ||
387 | public license or as otherwise permitted by the Creative Commons policies | ||
388 | published at creativecommons.org/policies, Creative Commons does not | ||
389 | authorize the use of the trademark "Creative Commons" or any other | ||
390 | trademark or logo of Creative Commons without its prior written consent | ||
391 | including, without limitation, in connection with any unauthorized | ||
392 | modifications to any of its public licenses or any other arrangements, | ||
393 | understandings, or agreements concerning use of licensed material. For the | ||
394 | avoidance of doubt, this paragraph does not form part of the public | ||
395 | licenses. | ||
396 | |||
397 | Creative Commons may be contacted at creativecommons.org. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index a3e46109ceb2..e0eb8ec08789 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -10161,7 +10161,6 @@ L: netdev@vger.kernel.org | |||
10161 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git | 10161 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git |
10162 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git | 10162 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git |
10163 | S: Maintained | 10163 | S: Maintained |
10164 | F: net/core/flow.c | ||
10165 | F: net/xfrm/ | 10164 | F: net/xfrm/ |
10166 | F: net/key/ | 10165 | F: net/key/ |
10167 | F: net/ipv4/xfrm* | 10166 | F: net/ipv4/xfrm* |
@@ -13101,7 +13100,7 @@ SELINUX SECURITY MODULE | |||
13101 | M: Paul Moore <paul@paul-moore.com> | 13100 | M: Paul Moore <paul@paul-moore.com> |
13102 | M: Stephen Smalley <sds@tycho.nsa.gov> | 13101 | M: Stephen Smalley <sds@tycho.nsa.gov> |
13103 | M: Eric Paris <eparis@parisplace.org> | 13102 | M: Eric Paris <eparis@parisplace.org> |
13104 | L: selinux@tycho.nsa.gov (moderated for non-subscribers) | 13103 | L: selinux@vger.kernel.org |
13105 | W: https://selinuxproject.org | 13104 | W: https://selinuxproject.org |
13106 | W: https://github.com/SELinuxProject | 13105 | W: https://github.com/SELinuxProject |
13107 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git | 13106 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git |
@@ -2,7 +2,7 @@ | |||
2 | VERSION = 4 | 2 | VERSION = 4 |
3 | PATCHLEVEL = 19 | 3 | PATCHLEVEL = 19 |
4 | SUBLEVEL = 0 | 4 | SUBLEVEL = 0 |
5 | EXTRAVERSION = -rc7 | 5 | EXTRAVERSION = -rc8 |
6 | NAME = Merciless Moray | 6 | NAME = Merciless Moray |
7 | 7 | ||
8 | # *DOCUMENTATION* | 8 | # *DOCUMENTATION* |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 450c7a4fbc8a..cb094e55dc5f 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -478,15 +478,15 @@ static const struct coproc_reg cp15_regs[] = { | |||
478 | 478 | ||
479 | /* ICC_SGI1R */ | 479 | /* ICC_SGI1R */ |
480 | { CRm64(12), Op1( 0), is64, access_gic_sgi}, | 480 | { CRm64(12), Op1( 0), is64, access_gic_sgi}, |
481 | /* ICC_ASGI1R */ | ||
482 | { CRm64(12), Op1( 1), is64, access_gic_sgi}, | ||
483 | /* ICC_SGI0R */ | ||
484 | { CRm64(12), Op1( 2), is64, access_gic_sgi}, | ||
485 | 481 | ||
486 | /* VBAR: swapped by interrupt.S. */ | 482 | /* VBAR: swapped by interrupt.S. */ |
487 | { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, | 483 | { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, |
488 | NULL, reset_val, c12_VBAR, 0x00000000 }, | 484 | NULL, reset_val, c12_VBAR, 0x00000000 }, |
489 | 485 | ||
486 | /* ICC_ASGI1R */ | ||
487 | { CRm64(12), Op1( 1), is64, access_gic_sgi}, | ||
488 | /* ICC_SGI0R */ | ||
489 | { CRm64(12), Op1( 2), is64, access_gic_sgi}, | ||
490 | /* ICC_SRE */ | 490 | /* ICC_SRE */ |
491 | { CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre }, | 491 | { CRn(12), CRm(12), Op1( 0), Op2(5), is32, access_gic_sre }, |
492 | 492 | ||
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 8e38d5267f22..e213f8e867f6 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c | |||
@@ -966,6 +966,12 @@ static int armv8pmu_set_event_filter(struct hw_perf_event *event, | |||
966 | return 0; | 966 | return 0; |
967 | } | 967 | } |
968 | 968 | ||
969 | static int armv8pmu_filter_match(struct perf_event *event) | ||
970 | { | ||
971 | unsigned long evtype = event->hw.config_base & ARMV8_PMU_EVTYPE_EVENT; | ||
972 | return evtype != ARMV8_PMUV3_PERFCTR_CHAIN; | ||
973 | } | ||
974 | |||
969 | static void armv8pmu_reset(void *info) | 975 | static void armv8pmu_reset(void *info) |
970 | { | 976 | { |
971 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; | 977 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; |
@@ -1114,6 +1120,7 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu) | |||
1114 | cpu_pmu->stop = armv8pmu_stop, | 1120 | cpu_pmu->stop = armv8pmu_stop, |
1115 | cpu_pmu->reset = armv8pmu_reset, | 1121 | cpu_pmu->reset = armv8pmu_reset, |
1116 | cpu_pmu->set_event_filter = armv8pmu_set_event_filter; | 1122 | cpu_pmu->set_event_filter = armv8pmu_set_event_filter; |
1123 | cpu_pmu->filter_match = armv8pmu_filter_match; | ||
1117 | 1124 | ||
1118 | return 0; | 1125 | return 0; |
1119 | } | 1126 | } |
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 5b4fac434c84..b3354ff94e79 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -64,6 +64,9 @@ | |||
64 | #include <asm/xen/hypervisor.h> | 64 | #include <asm/xen/hypervisor.h> |
65 | #include <asm/mmu_context.h> | 65 | #include <asm/mmu_context.h> |
66 | 66 | ||
67 | static int num_standard_resources; | ||
68 | static struct resource *standard_resources; | ||
69 | |||
67 | phys_addr_t __fdt_pointer __initdata; | 70 | phys_addr_t __fdt_pointer __initdata; |
68 | 71 | ||
69 | /* | 72 | /* |
@@ -206,14 +209,19 @@ static void __init request_standard_resources(void) | |||
206 | { | 209 | { |
207 | struct memblock_region *region; | 210 | struct memblock_region *region; |
208 | struct resource *res; | 211 | struct resource *res; |
212 | unsigned long i = 0; | ||
209 | 213 | ||
210 | kernel_code.start = __pa_symbol(_text); | 214 | kernel_code.start = __pa_symbol(_text); |
211 | kernel_code.end = __pa_symbol(__init_begin - 1); | 215 | kernel_code.end = __pa_symbol(__init_begin - 1); |
212 | kernel_data.start = __pa_symbol(_sdata); | 216 | kernel_data.start = __pa_symbol(_sdata); |
213 | kernel_data.end = __pa_symbol(_end - 1); | 217 | kernel_data.end = __pa_symbol(_end - 1); |
214 | 218 | ||
219 | num_standard_resources = memblock.memory.cnt; | ||
220 | standard_resources = alloc_bootmem_low(num_standard_resources * | ||
221 | sizeof(*standard_resources)); | ||
222 | |||
215 | for_each_memblock(memory, region) { | 223 | for_each_memblock(memory, region) { |
216 | res = alloc_bootmem_low(sizeof(*res)); | 224 | res = &standard_resources[i++]; |
217 | if (memblock_is_nomap(region)) { | 225 | if (memblock_is_nomap(region)) { |
218 | res->name = "reserved"; | 226 | res->name = "reserved"; |
219 | res->flags = IORESOURCE_MEM; | 227 | res->flags = IORESOURCE_MEM; |
@@ -243,36 +251,26 @@ static void __init request_standard_resources(void) | |||
243 | 251 | ||
244 | static int __init reserve_memblock_reserved_regions(void) | 252 | static int __init reserve_memblock_reserved_regions(void) |
245 | { | 253 | { |
246 | phys_addr_t start, end, roundup_end = 0; | 254 | u64 i, j; |
247 | struct resource *mem, *res; | 255 | |
248 | u64 i; | 256 | for (i = 0; i < num_standard_resources; ++i) { |
249 | 257 | struct resource *mem = &standard_resources[i]; | |
250 | for_each_reserved_mem_region(i, &start, &end) { | 258 | phys_addr_t r_start, r_end, mem_size = resource_size(mem); |
251 | if (end <= roundup_end) | 259 | |
252 | continue; /* done already */ | 260 | if (!memblock_is_region_reserved(mem->start, mem_size)) |
253 | |||
254 | start = __pfn_to_phys(PFN_DOWN(start)); | ||
255 | end = __pfn_to_phys(PFN_UP(end)) - 1; | ||
256 | roundup_end = end; | ||
257 | |||
258 | res = kzalloc(sizeof(*res), GFP_ATOMIC); | ||
259 | if (WARN_ON(!res)) | ||
260 | return -ENOMEM; | ||
261 | res->start = start; | ||
262 | res->end = end; | ||
263 | res->name = "reserved"; | ||
264 | res->flags = IORESOURCE_MEM; | ||
265 | |||
266 | mem = request_resource_conflict(&iomem_resource, res); | ||
267 | /* | ||
268 | * We expected memblock_reserve() regions to conflict with | ||
269 | * memory created by request_standard_resources(). | ||
270 | */ | ||
271 | if (WARN_ON_ONCE(!mem)) | ||
272 | continue; | 261 | continue; |
273 | kfree(res); | ||
274 | 262 | ||
275 | reserve_region_with_split(mem, start, end, "reserved"); | 263 | for_each_reserved_mem_region(j, &r_start, &r_end) { |
264 | resource_size_t start, end; | ||
265 | |||
266 | start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start); | ||
267 | end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end); | ||
268 | |||
269 | if (start > mem->end || end < mem->start) | ||
270 | continue; | ||
271 | |||
272 | reserve_region_with_split(mem, start, end, "reserved"); | ||
273 | } | ||
276 | } | 274 | } |
277 | 275 | ||
278 | return 0; | 276 | return 0; |
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c index f329b466e68f..2d14f17838d2 100644 --- a/arch/parisc/kernel/unwind.c +++ b/arch/parisc/kernel/unwind.c | |||
@@ -426,7 +426,7 @@ void unwind_frame_init_task(struct unwind_frame_info *info, | |||
426 | r.gr[30] = get_parisc_stackpointer(); | 426 | r.gr[30] = get_parisc_stackpointer(); |
427 | regs = &r; | 427 | regs = &r; |
428 | } | 428 | } |
429 | unwind_frame_init(info, task, &r); | 429 | unwind_frame_init(info, task, regs); |
430 | } else { | 430 | } else { |
431 | unwind_frame_init_from_blocked_task(info, task); | 431 | unwind_frame_init_from_blocked_task(info, task); |
432 | } | 432 | } |
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 2fdc865ca374..2a2486526d1f 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h | |||
@@ -114,7 +114,7 @@ | |||
114 | */ | 114 | */ |
115 | #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ | 115 | #define _HPAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ |
116 | _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \ | 116 | _PAGE_ACCESSED | H_PAGE_THP_HUGE | _PAGE_PTE | \ |
117 | _PAGE_SOFT_DIRTY) | 117 | _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) |
118 | /* | 118 | /* |
119 | * user access blocked by key | 119 | * user access blocked by key |
120 | */ | 120 | */ |
@@ -132,7 +132,7 @@ | |||
132 | */ | 132 | */ |
133 | #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ | 133 | #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ |
134 | _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ | 134 | _PAGE_ACCESSED | _PAGE_SPECIAL | _PAGE_PTE | \ |
135 | _PAGE_SOFT_DIRTY) | 135 | _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) |
136 | 136 | ||
137 | #define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \ | 137 | #define H_PTE_PKEY (H_PTE_PKEY_BIT0 | H_PTE_PKEY_BIT1 | H_PTE_PKEY_BIT2 | \ |
138 | H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4) | 138 | H_PTE_PKEY_BIT3 | H_PTE_PKEY_BIT4) |
diff --git a/arch/sparc/include/asm/cpudata_64.h b/arch/sparc/include/asm/cpudata_64.h index 666d6b5c0440..9c3fc03abe9a 100644 --- a/arch/sparc/include/asm/cpudata_64.h +++ b/arch/sparc/include/asm/cpudata_64.h | |||
@@ -28,7 +28,7 @@ typedef struct { | |||
28 | unsigned short sock_id; /* physical package */ | 28 | unsigned short sock_id; /* physical package */ |
29 | unsigned short core_id; | 29 | unsigned short core_id; |
30 | unsigned short max_cache_id; /* groupings of highest shared cache */ | 30 | unsigned short max_cache_id; /* groupings of highest shared cache */ |
31 | unsigned short proc_id; /* strand (aka HW thread) id */ | 31 | signed short proc_id; /* strand (aka HW thread) id */ |
32 | } cpuinfo_sparc; | 32 | } cpuinfo_sparc; |
33 | 33 | ||
34 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); | 34 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); |
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h index 09acf0ddec10..45b4bf1875e6 100644 --- a/arch/sparc/include/uapi/asm/unistd.h +++ b/arch/sparc/include/uapi/asm/unistd.h | |||
@@ -427,8 +427,9 @@ | |||
427 | #define __NR_preadv2 358 | 427 | #define __NR_preadv2 358 |
428 | #define __NR_pwritev2 359 | 428 | #define __NR_pwritev2 359 |
429 | #define __NR_statx 360 | 429 | #define __NR_statx 360 |
430 | #define __NR_io_pgetevents 361 | ||
430 | 431 | ||
431 | #define NR_syscalls 361 | 432 | #define NR_syscalls 362 |
432 | 433 | ||
433 | /* Bitmask values returned from kern_features system call. */ | 434 | /* Bitmask values returned from kern_features system call. */ |
434 | #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 | 435 | #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 |
diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c index cc42225c20f3..4e8f56c3793c 100644 --- a/arch/sparc/kernel/auxio_64.c +++ b/arch/sparc/kernel/auxio_64.c | |||
@@ -115,8 +115,8 @@ static int auxio_probe(struct platform_device *dev) | |||
115 | auxio_devtype = AUXIO_TYPE_SBUS; | 115 | auxio_devtype = AUXIO_TYPE_SBUS; |
116 | size = 1; | 116 | size = 1; |
117 | } else { | 117 | } else { |
118 | printk("auxio: Unknown parent bus type [%pOFn]\n", | 118 | printk("auxio: Unknown parent bus type [%s]\n", |
119 | dp->parent); | 119 | dp->parent->name); |
120 | return -ENODEV; | 120 | return -ENODEV; |
121 | } | 121 | } |
122 | auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio"); | 122 | auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio"); |
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index d3149baaa33c..67b3e6b3ce5d 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/cpudata.h> | 24 | #include <asm/cpudata.h> |
25 | #include <linux/uaccess.h> | 25 | #include <linux/uaccess.h> |
26 | #include <linux/atomic.h> | 26 | #include <linux/atomic.h> |
27 | #include <linux/sched/clock.h> | ||
27 | #include <asm/nmi.h> | 28 | #include <asm/nmi.h> |
28 | #include <asm/pcr.h> | 29 | #include <asm/pcr.h> |
29 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
@@ -927,6 +928,8 @@ static void read_in_all_counters(struct cpu_hw_events *cpuc) | |||
927 | sparc_perf_event_update(cp, &cp->hw, | 928 | sparc_perf_event_update(cp, &cp->hw, |
928 | cpuc->current_idx[i]); | 929 | cpuc->current_idx[i]); |
929 | cpuc->current_idx[i] = PIC_NO_INDEX; | 930 | cpuc->current_idx[i] = PIC_NO_INDEX; |
931 | if (cp->hw.state & PERF_HES_STOPPED) | ||
932 | cp->hw.state |= PERF_HES_ARCH; | ||
930 | } | 933 | } |
931 | } | 934 | } |
932 | } | 935 | } |
@@ -959,10 +962,12 @@ static void calculate_single_pcr(struct cpu_hw_events *cpuc) | |||
959 | 962 | ||
960 | enc = perf_event_get_enc(cpuc->events[i]); | 963 | enc = perf_event_get_enc(cpuc->events[i]); |
961 | cpuc->pcr[0] &= ~mask_for_index(idx); | 964 | cpuc->pcr[0] &= ~mask_for_index(idx); |
962 | if (hwc->state & PERF_HES_STOPPED) | 965 | if (hwc->state & PERF_HES_ARCH) { |
963 | cpuc->pcr[0] |= nop_for_index(idx); | 966 | cpuc->pcr[0] |= nop_for_index(idx); |
964 | else | 967 | } else { |
965 | cpuc->pcr[0] |= event_encoding(enc, idx); | 968 | cpuc->pcr[0] |= event_encoding(enc, idx); |
969 | hwc->state = 0; | ||
970 | } | ||
966 | } | 971 | } |
967 | out: | 972 | out: |
968 | cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; | 973 | cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; |
@@ -988,6 +993,9 @@ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) | |||
988 | 993 | ||
989 | cpuc->current_idx[i] = idx; | 994 | cpuc->current_idx[i] = idx; |
990 | 995 | ||
996 | if (cp->hw.state & PERF_HES_ARCH) | ||
997 | continue; | ||
998 | |||
991 | sparc_pmu_start(cp, PERF_EF_RELOAD); | 999 | sparc_pmu_start(cp, PERF_EF_RELOAD); |
992 | } | 1000 | } |
993 | out: | 1001 | out: |
@@ -1079,6 +1087,8 @@ static void sparc_pmu_start(struct perf_event *event, int flags) | |||
1079 | event->hw.state = 0; | 1087 | event->hw.state = 0; |
1080 | 1088 | ||
1081 | sparc_pmu_enable_event(cpuc, &event->hw, idx); | 1089 | sparc_pmu_enable_event(cpuc, &event->hw, idx); |
1090 | |||
1091 | perf_event_update_userpage(event); | ||
1082 | } | 1092 | } |
1083 | 1093 | ||
1084 | static void sparc_pmu_stop(struct perf_event *event, int flags) | 1094 | static void sparc_pmu_stop(struct perf_event *event, int flags) |
@@ -1371,9 +1381,9 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags) | |||
1371 | cpuc->events[n0] = event->hw.event_base; | 1381 | cpuc->events[n0] = event->hw.event_base; |
1372 | cpuc->current_idx[n0] = PIC_NO_INDEX; | 1382 | cpuc->current_idx[n0] = PIC_NO_INDEX; |
1373 | 1383 | ||
1374 | event->hw.state = PERF_HES_UPTODATE; | 1384 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; |
1375 | if (!(ef_flags & PERF_EF_START)) | 1385 | if (!(ef_flags & PERF_EF_START)) |
1376 | event->hw.state |= PERF_HES_STOPPED; | 1386 | event->hw.state |= PERF_HES_ARCH; |
1377 | 1387 | ||
1378 | /* | 1388 | /* |
1379 | * If group events scheduling transaction was started, | 1389 | * If group events scheduling transaction was started, |
@@ -1603,6 +1613,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | |||
1603 | struct perf_sample_data data; | 1613 | struct perf_sample_data data; |
1604 | struct cpu_hw_events *cpuc; | 1614 | struct cpu_hw_events *cpuc; |
1605 | struct pt_regs *regs; | 1615 | struct pt_regs *regs; |
1616 | u64 finish_clock; | ||
1617 | u64 start_clock; | ||
1606 | int i; | 1618 | int i; |
1607 | 1619 | ||
1608 | if (!atomic_read(&active_events)) | 1620 | if (!atomic_read(&active_events)) |
@@ -1616,6 +1628,8 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | |||
1616 | return NOTIFY_DONE; | 1628 | return NOTIFY_DONE; |
1617 | } | 1629 | } |
1618 | 1630 | ||
1631 | start_clock = sched_clock(); | ||
1632 | |||
1619 | regs = args->regs; | 1633 | regs = args->regs; |
1620 | 1634 | ||
1621 | cpuc = this_cpu_ptr(&cpu_hw_events); | 1635 | cpuc = this_cpu_ptr(&cpu_hw_events); |
@@ -1654,6 +1668,10 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, | |||
1654 | sparc_pmu_stop(event, 0); | 1668 | sparc_pmu_stop(event, 0); |
1655 | } | 1669 | } |
1656 | 1670 | ||
1671 | finish_clock = sched_clock(); | ||
1672 | |||
1673 | perf_sample_event_took(finish_clock - start_clock); | ||
1674 | |||
1657 | return NOTIFY_STOP; | 1675 | return NOTIFY_STOP; |
1658 | } | 1676 | } |
1659 | 1677 | ||
diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c index d941875dd718..92627abce311 100644 --- a/arch/sparc/kernel/power.c +++ b/arch/sparc/kernel/power.c | |||
@@ -41,8 +41,8 @@ static int power_probe(struct platform_device *op) | |||
41 | 41 | ||
42 | power_reg = of_ioremap(res, 0, 0x4, "power"); | 42 | power_reg = of_ioremap(res, 0, 0x4, "power"); |
43 | 43 | ||
44 | printk(KERN_INFO "%pOFn: Control reg at %llx\n", | 44 | printk(KERN_INFO "%s: Control reg at %llx\n", |
45 | op->dev.of_node, res->start); | 45 | op->dev.of_node->name, res->start); |
46 | 46 | ||
47 | if (has_button_interrupt(irq, op->dev.of_node)) { | 47 | if (has_button_interrupt(irq, op->dev.of_node)) { |
48 | if (request_irq(irq, | 48 | if (request_irq(irq, |
diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index 17c87d29ff20..b51cbb9e87dc 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c | |||
@@ -68,8 +68,8 @@ static void __init sparc32_path_component(struct device_node *dp, char *tmp_buf) | |||
68 | return; | 68 | return; |
69 | 69 | ||
70 | regs = rprop->value; | 70 | regs = rprop->value; |
71 | sprintf(tmp_buf, "%pOFn@%x,%x", | 71 | sprintf(tmp_buf, "%s@%x,%x", |
72 | dp, | 72 | dp->name, |
73 | regs->which_io, regs->phys_addr); | 73 | regs->which_io, regs->phys_addr); |
74 | } | 74 | } |
75 | 75 | ||
@@ -84,8 +84,8 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) | |||
84 | return; | 84 | return; |
85 | 85 | ||
86 | regs = prop->value; | 86 | regs = prop->value; |
87 | sprintf(tmp_buf, "%pOFn@%x,%x", | 87 | sprintf(tmp_buf, "%s@%x,%x", |
88 | dp, | 88 | dp->name, |
89 | regs->which_io, | 89 | regs->which_io, |
90 | regs->phys_addr); | 90 | regs->phys_addr); |
91 | } | 91 | } |
@@ -104,13 +104,13 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf) | |||
104 | regs = prop->value; | 104 | regs = prop->value; |
105 | devfn = (regs->phys_hi >> 8) & 0xff; | 105 | devfn = (regs->phys_hi >> 8) & 0xff; |
106 | if (devfn & 0x07) { | 106 | if (devfn & 0x07) { |
107 | sprintf(tmp_buf, "%pOFn@%x,%x", | 107 | sprintf(tmp_buf, "%s@%x,%x", |
108 | dp, | 108 | dp->name, |
109 | devfn >> 3, | 109 | devfn >> 3, |
110 | devfn & 0x07); | 110 | devfn & 0x07); |
111 | } else { | 111 | } else { |
112 | sprintf(tmp_buf, "%pOFn@%x", | 112 | sprintf(tmp_buf, "%s@%x", |
113 | dp, | 113 | dp->name, |
114 | devfn >> 3); | 114 | devfn >> 3); |
115 | } | 115 | } |
116 | } | 116 | } |
@@ -127,8 +127,8 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) | |||
127 | 127 | ||
128 | regs = prop->value; | 128 | regs = prop->value; |
129 | 129 | ||
130 | sprintf(tmp_buf, "%pOFn@%x,%x", | 130 | sprintf(tmp_buf, "%s@%x,%x", |
131 | dp, | 131 | dp->name, |
132 | regs->which_io, regs->phys_addr); | 132 | regs->which_io, regs->phys_addr); |
133 | } | 133 | } |
134 | 134 | ||
@@ -167,8 +167,8 @@ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf) | |||
167 | return; | 167 | return; |
168 | device = prop->value; | 168 | device = prop->value; |
169 | 169 | ||
170 | sprintf(tmp_buf, "%pOFn:%d:%d@%x,%x", | 170 | sprintf(tmp_buf, "%s:%d:%d@%x,%x", |
171 | dp, *vendor, *device, | 171 | dp->name, *vendor, *device, |
172 | *intr, reg0); | 172 | *intr, reg0); |
173 | } | 173 | } |
174 | 174 | ||
@@ -201,7 +201,7 @@ char * __init build_path_component(struct device_node *dp) | |||
201 | tmp_buf[0] = '\0'; | 201 | tmp_buf[0] = '\0'; |
202 | __build_path_component(dp, tmp_buf); | 202 | __build_path_component(dp, tmp_buf); |
203 | if (tmp_buf[0] == '\0') | 203 | if (tmp_buf[0] == '\0') |
204 | snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp); | 204 | strcpy(tmp_buf, dp->name); |
205 | 205 | ||
206 | n = prom_early_alloc(strlen(tmp_buf) + 1); | 206 | n = prom_early_alloc(strlen(tmp_buf) + 1); |
207 | strcpy(n, tmp_buf); | 207 | strcpy(n, tmp_buf); |
diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c index 6220411ce8fc..baeaeed64993 100644 --- a/arch/sparc/kernel/prom_64.c +++ b/arch/sparc/kernel/prom_64.c | |||
@@ -82,8 +82,8 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf) | |||
82 | 82 | ||
83 | regs = rprop->value; | 83 | regs = rprop->value; |
84 | if (!of_node_is_root(dp->parent)) { | 84 | if (!of_node_is_root(dp->parent)) { |
85 | sprintf(tmp_buf, "%pOFn@%x,%x", | 85 | sprintf(tmp_buf, "%s@%x,%x", |
86 | dp, | 86 | dp->name, |
87 | (unsigned int) (regs->phys_addr >> 32UL), | 87 | (unsigned int) (regs->phys_addr >> 32UL), |
88 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); | 88 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); |
89 | return; | 89 | return; |
@@ -97,17 +97,17 @@ static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf) | |||
97 | const char *prefix = (type == 0) ? "m" : "i"; | 97 | const char *prefix = (type == 0) ? "m" : "i"; |
98 | 98 | ||
99 | if (low_bits) | 99 | if (low_bits) |
100 | sprintf(tmp_buf, "%pOFn@%s%x,%x", | 100 | sprintf(tmp_buf, "%s@%s%x,%x", |
101 | dp, prefix, | 101 | dp->name, prefix, |
102 | high_bits, low_bits); | 102 | high_bits, low_bits); |
103 | else | 103 | else |
104 | sprintf(tmp_buf, "%pOFn@%s%x", | 104 | sprintf(tmp_buf, "%s@%s%x", |
105 | dp, | 105 | dp->name, |
106 | prefix, | 106 | prefix, |
107 | high_bits); | 107 | high_bits); |
108 | } else if (type == 12) { | 108 | } else if (type == 12) { |
109 | sprintf(tmp_buf, "%pOFn@%x", | 109 | sprintf(tmp_buf, "%s@%x", |
110 | dp, high_bits); | 110 | dp->name, high_bits); |
111 | } | 111 | } |
112 | } | 112 | } |
113 | 113 | ||
@@ -122,8 +122,8 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf) | |||
122 | 122 | ||
123 | regs = prop->value; | 123 | regs = prop->value; |
124 | if (!of_node_is_root(dp->parent)) { | 124 | if (!of_node_is_root(dp->parent)) { |
125 | sprintf(tmp_buf, "%pOFn@%x,%x", | 125 | sprintf(tmp_buf, "%s@%x,%x", |
126 | dp, | 126 | dp->name, |
127 | (unsigned int) (regs->phys_addr >> 32UL), | 127 | (unsigned int) (regs->phys_addr >> 32UL), |
128 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); | 128 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); |
129 | return; | 129 | return; |
@@ -138,8 +138,8 @@ static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf) | |||
138 | if (tlb_type >= cheetah) | 138 | if (tlb_type >= cheetah) |
139 | mask = 0x7fffff; | 139 | mask = 0x7fffff; |
140 | 140 | ||
141 | sprintf(tmp_buf, "%pOFn@%x,%x", | 141 | sprintf(tmp_buf, "%s@%x,%x", |
142 | dp, | 142 | dp->name, |
143 | *(u32 *)prop->value, | 143 | *(u32 *)prop->value, |
144 | (unsigned int) (regs->phys_addr & mask)); | 144 | (unsigned int) (regs->phys_addr & mask)); |
145 | } | 145 | } |
@@ -156,8 +156,8 @@ static void __init sbus_path_component(struct device_node *dp, char *tmp_buf) | |||
156 | return; | 156 | return; |
157 | 157 | ||
158 | regs = prop->value; | 158 | regs = prop->value; |
159 | sprintf(tmp_buf, "%pOFn@%x,%x", | 159 | sprintf(tmp_buf, "%s@%x,%x", |
160 | dp, | 160 | dp->name, |
161 | regs->which_io, | 161 | regs->which_io, |
162 | regs->phys_addr); | 162 | regs->phys_addr); |
163 | } | 163 | } |
@@ -176,13 +176,13 @@ static void __init pci_path_component(struct device_node *dp, char *tmp_buf) | |||
176 | regs = prop->value; | 176 | regs = prop->value; |
177 | devfn = (regs->phys_hi >> 8) & 0xff; | 177 | devfn = (regs->phys_hi >> 8) & 0xff; |
178 | if (devfn & 0x07) { | 178 | if (devfn & 0x07) { |
179 | sprintf(tmp_buf, "%pOFn@%x,%x", | 179 | sprintf(tmp_buf, "%s@%x,%x", |
180 | dp, | 180 | dp->name, |
181 | devfn >> 3, | 181 | devfn >> 3, |
182 | devfn & 0x07); | 182 | devfn & 0x07); |
183 | } else { | 183 | } else { |
184 | sprintf(tmp_buf, "%pOFn@%x", | 184 | sprintf(tmp_buf, "%s@%x", |
185 | dp, | 185 | dp->name, |
186 | devfn >> 3); | 186 | devfn >> 3); |
187 | } | 187 | } |
188 | } | 188 | } |
@@ -203,8 +203,8 @@ static void __init upa_path_component(struct device_node *dp, char *tmp_buf) | |||
203 | if (!prop) | 203 | if (!prop) |
204 | return; | 204 | return; |
205 | 205 | ||
206 | sprintf(tmp_buf, "%pOFn@%x,%x", | 206 | sprintf(tmp_buf, "%s@%x,%x", |
207 | dp, | 207 | dp->name, |
208 | *(u32 *) prop->value, | 208 | *(u32 *) prop->value, |
209 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); | 209 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); |
210 | } | 210 | } |
@@ -221,7 +221,7 @@ static void __init vdev_path_component(struct device_node *dp, char *tmp_buf) | |||
221 | 221 | ||
222 | regs = prop->value; | 222 | regs = prop->value; |
223 | 223 | ||
224 | sprintf(tmp_buf, "%pOFn@%x", dp, *regs); | 224 | sprintf(tmp_buf, "%s@%x", dp->name, *regs); |
225 | } | 225 | } |
226 | 226 | ||
227 | /* "name@addrhi,addrlo" */ | 227 | /* "name@addrhi,addrlo" */ |
@@ -236,8 +236,8 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) | |||
236 | 236 | ||
237 | regs = prop->value; | 237 | regs = prop->value; |
238 | 238 | ||
239 | sprintf(tmp_buf, "%pOFn@%x,%x", | 239 | sprintf(tmp_buf, "%s@%x,%x", |
240 | dp, | 240 | dp->name, |
241 | (unsigned int) (regs->phys_addr >> 32UL), | 241 | (unsigned int) (regs->phys_addr >> 32UL), |
242 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); | 242 | (unsigned int) (regs->phys_addr & 0xffffffffUL)); |
243 | } | 243 | } |
@@ -257,8 +257,8 @@ static void __init i2c_path_component(struct device_node *dp, char *tmp_buf) | |||
257 | /* This actually isn't right... should look at the #address-cells | 257 | /* This actually isn't right... should look at the #address-cells |
258 | * property of the i2c bus node etc. etc. | 258 | * property of the i2c bus node etc. etc. |
259 | */ | 259 | */ |
260 | sprintf(tmp_buf, "%pOFn@%x,%x", | 260 | sprintf(tmp_buf, "%s@%x,%x", |
261 | dp, regs[0], regs[1]); | 261 | dp->name, regs[0], regs[1]); |
262 | } | 262 | } |
263 | 263 | ||
264 | /* "name@reg0[,reg1]" */ | 264 | /* "name@reg0[,reg1]" */ |
@@ -274,11 +274,11 @@ static void __init usb_path_component(struct device_node *dp, char *tmp_buf) | |||
274 | regs = prop->value; | 274 | regs = prop->value; |
275 | 275 | ||
276 | if (prop->length == sizeof(u32) || regs[1] == 1) { | 276 | if (prop->length == sizeof(u32) || regs[1] == 1) { |
277 | sprintf(tmp_buf, "%pOFn@%x", | 277 | sprintf(tmp_buf, "%s@%x", |
278 | dp, regs[0]); | 278 | dp->name, regs[0]); |
279 | } else { | 279 | } else { |
280 | sprintf(tmp_buf, "%pOFn@%x,%x", | 280 | sprintf(tmp_buf, "%s@%x,%x", |
281 | dp, regs[0], regs[1]); | 281 | dp->name, regs[0], regs[1]); |
282 | } | 282 | } |
283 | } | 283 | } |
284 | 284 | ||
@@ -295,11 +295,11 @@ static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf | |||
295 | regs = prop->value; | 295 | regs = prop->value; |
296 | 296 | ||
297 | if (regs[2] || regs[3]) { | 297 | if (regs[2] || regs[3]) { |
298 | sprintf(tmp_buf, "%pOFn@%08x%08x,%04x%08x", | 298 | sprintf(tmp_buf, "%s@%08x%08x,%04x%08x", |
299 | dp, regs[0], regs[1], regs[2], regs[3]); | 299 | dp->name, regs[0], regs[1], regs[2], regs[3]); |
300 | } else { | 300 | } else { |
301 | sprintf(tmp_buf, "%pOFn@%08x%08x", | 301 | sprintf(tmp_buf, "%s@%08x%08x", |
302 | dp, regs[0], regs[1]); | 302 | dp->name, regs[0], regs[1]); |
303 | } | 303 | } |
304 | } | 304 | } |
305 | 305 | ||
@@ -361,7 +361,7 @@ char * __init build_path_component(struct device_node *dp) | |||
361 | tmp_buf[0] = '\0'; | 361 | tmp_buf[0] = '\0'; |
362 | __build_path_component(dp, tmp_buf); | 362 | __build_path_component(dp, tmp_buf); |
363 | if (tmp_buf[0] == '\0') | 363 | if (tmp_buf[0] == '\0') |
364 | snprintf(tmp_buf, sizeof(tmp_buf), "%pOFn", dp); | 364 | strcpy(tmp_buf, dp->name); |
365 | 365 | ||
366 | n = prom_early_alloc(strlen(tmp_buf) + 1); | 366 | n = prom_early_alloc(strlen(tmp_buf) + 1); |
367 | strcpy(n, tmp_buf); | 367 | strcpy(n, tmp_buf); |
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index f6528884a2c8..4073e2b87dd0 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S | |||
@@ -84,8 +84,9 @@ __handle_signal: | |||
84 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 | 84 | ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 |
85 | sethi %hi(0xf << 20), %l4 | 85 | sethi %hi(0xf << 20), %l4 |
86 | and %l1, %l4, %l4 | 86 | and %l1, %l4, %l4 |
87 | andn %l1, %l4, %l1 | ||
87 | ba,pt %xcc, __handle_preemption_continue | 88 | ba,pt %xcc, __handle_preemption_continue |
88 | andn %l1, %l4, %l1 | 89 | srl %l4, 20, %l4 |
89 | 90 | ||
90 | /* When returning from a NMI (%pil==15) interrupt we want to | 91 | /* When returning from a NMI (%pil==15) interrupt we want to |
91 | * avoid running softirqs, doing IRQ tracing, preempting, etc. | 92 | * avoid running softirqs, doing IRQ tracing, preempting, etc. |
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 12bee14b552c..621a363098ec 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S | |||
@@ -90,4 +90,4 @@ sys_call_table: | |||
90 | /*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf | 90 | /*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf |
91 | /*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen | 91 | /*350*/ .long sys_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen |
92 | /*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2 | 92 | /*355*/ .long sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2 |
93 | /*360*/ .long sys_statx | 93 | /*360*/ .long sys_statx, sys_io_pgetevents |
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 387ef993880a..bb68c805b891 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S | |||
@@ -91,7 +91,7 @@ sys_call_table32: | |||
91 | .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf | 91 | .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf |
92 | /*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen | 92 | /*350*/ .word sys32_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen |
93 | .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2 | 93 | .word compat_sys_setsockopt, sys_mlock2, sys_copy_file_range, compat_sys_preadv2, compat_sys_pwritev2 |
94 | /*360*/ .word sys_statx | 94 | /*360*/ .word sys_statx, compat_sys_io_pgetevents |
95 | 95 | ||
96 | #endif /* CONFIG_COMPAT */ | 96 | #endif /* CONFIG_COMPAT */ |
97 | 97 | ||
@@ -173,4 +173,4 @@ sys_call_table: | |||
173 | .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf | 173 | .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf |
174 | /*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen | 174 | /*350*/ .word sys64_execveat, sys_membarrier, sys_userfaultfd, sys_bind, sys_listen |
175 | .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2 | 175 | .word sys_setsockopt, sys_mlock2, sys_copy_file_range, sys_preadv2, sys_pwritev2 |
176 | /*360*/ .word sys_statx | 176 | /*360*/ .word sys_statx, sys_io_pgetevents |
diff --git a/arch/sparc/vdso/vclock_gettime.c b/arch/sparc/vdso/vclock_gettime.c index 3feb3d960ca5..75dca9aab737 100644 --- a/arch/sparc/vdso/vclock_gettime.c +++ b/arch/sparc/vdso/vclock_gettime.c | |||
@@ -33,9 +33,19 @@ | |||
33 | #define TICK_PRIV_BIT (1ULL << 63) | 33 | #define TICK_PRIV_BIT (1ULL << 63) |
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | #ifdef CONFIG_SPARC64 | ||
36 | #define SYSCALL_STRING \ | 37 | #define SYSCALL_STRING \ |
37 | "ta 0x6d;" \ | 38 | "ta 0x6d;" \ |
38 | "sub %%g0, %%o0, %%o0;" \ | 39 | "bcs,a 1f;" \ |
40 | " sub %%g0, %%o0, %%o0;" \ | ||
41 | "1:" | ||
42 | #else | ||
43 | #define SYSCALL_STRING \ | ||
44 | "ta 0x10;" \ | ||
45 | "bcs,a 1f;" \ | ||
46 | " sub %%g0, %%o0, %%o0;" \ | ||
47 | "1:" | ||
48 | #endif | ||
39 | 49 | ||
40 | #define SYSCALL_CLOBBERS \ | 50 | #define SYSCALL_CLOBBERS \ |
41 | "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \ | 51 | "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \ |
diff --git a/arch/sparc/vdso/vma.c b/arch/sparc/vdso/vma.c index f51595f861b8..5eaff3c1aa0c 100644 --- a/arch/sparc/vdso/vma.c +++ b/arch/sparc/vdso/vma.c | |||
@@ -262,7 +262,9 @@ static __init int vdso_setup(char *s) | |||
262 | unsigned long val; | 262 | unsigned long val; |
263 | 263 | ||
264 | err = kstrtoul(s, 10, &val); | 264 | err = kstrtoul(s, 10, &val); |
265 | if (err) | ||
266 | return err; | ||
265 | vdso_enabled = val; | 267 | vdso_enabled = val; |
266 | return err; | 268 | return 0; |
267 | } | 269 | } |
268 | __setup("vdso=", vdso_setup); | 270 | __setup("vdso=", vdso_setup); |
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index b64acb08a62b..106b7d0e2dae 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h | |||
@@ -124,7 +124,7 @@ | |||
124 | */ | 124 | */ |
125 | #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ | 125 | #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \ |
126 | _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ | 126 | _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \ |
127 | _PAGE_SOFT_DIRTY) | 127 | _PAGE_SOFT_DIRTY | _PAGE_DEVMAP) |
128 | #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) | 128 | #define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE) |
129 | 129 | ||
130 | /* | 130 | /* |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d96092b35936..61ccfb13899e 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -436,14 +436,18 @@ static inline struct kvm_svm *to_kvm_svm(struct kvm *kvm) | |||
436 | 436 | ||
437 | static inline bool svm_sev_enabled(void) | 437 | static inline bool svm_sev_enabled(void) |
438 | { | 438 | { |
439 | return max_sev_asid; | 439 | return IS_ENABLED(CONFIG_KVM_AMD_SEV) ? max_sev_asid : 0; |
440 | } | 440 | } |
441 | 441 | ||
442 | static inline bool sev_guest(struct kvm *kvm) | 442 | static inline bool sev_guest(struct kvm *kvm) |
443 | { | 443 | { |
444 | #ifdef CONFIG_KVM_AMD_SEV | ||
444 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; | 445 | struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info; |
445 | 446 | ||
446 | return sev->active; | 447 | return sev->active; |
448 | #else | ||
449 | return false; | ||
450 | #endif | ||
447 | } | 451 | } |
448 | 452 | ||
449 | static inline int sev_get_asid(struct kvm *kvm) | 453 | static inline int sev_get_asid(struct kvm *kvm) |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 612fd17be635..e665aa7167cf 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1572,8 +1572,12 @@ static int vmx_hv_remote_flush_tlb(struct kvm *kvm) | |||
1572 | goto out; | 1572 | goto out; |
1573 | } | 1573 | } |
1574 | 1574 | ||
1575 | /* | ||
1576 | * FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE hypercall needs the address of the | ||
1577 | * base of EPT PML4 table, strip off EPT configuration information. | ||
1578 | */ | ||
1575 | ret = hyperv_flush_guest_mapping( | 1579 | ret = hyperv_flush_guest_mapping( |
1576 | to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer); | 1580 | to_vmx(kvm_get_vcpu(kvm, 0))->ept_pointer & PAGE_MASK); |
1577 | 1581 | ||
1578 | out: | 1582 | out: |
1579 | spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); | 1583 | spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock); |
diff --git a/block/blk-wbt.c b/block/blk-wbt.c index 8e20a0677dcf..8ac93fcbaa2e 100644 --- a/block/blk-wbt.c +++ b/block/blk-wbt.c | |||
@@ -310,6 +310,7 @@ static void scale_up(struct rq_wb *rwb) | |||
310 | rq_depth_scale_up(&rwb->rq_depth); | 310 | rq_depth_scale_up(&rwb->rq_depth); |
311 | calc_wb_limits(rwb); | 311 | calc_wb_limits(rwb); |
312 | rwb->unknown_cnt = 0; | 312 | rwb->unknown_cnt = 0; |
313 | rwb_wake_all(rwb); | ||
313 | rwb_trace_step(rwb, "scale up"); | 314 | rwb_trace_step(rwb, "scale up"); |
314 | } | 315 | } |
315 | 316 | ||
@@ -318,7 +319,6 @@ static void scale_down(struct rq_wb *rwb, bool hard_throttle) | |||
318 | rq_depth_scale_down(&rwb->rq_depth, hard_throttle); | 319 | rq_depth_scale_down(&rwb->rq_depth, hard_throttle); |
319 | calc_wb_limits(rwb); | 320 | calc_wb_limits(rwb); |
320 | rwb->unknown_cnt = 0; | 321 | rwb->unknown_cnt = 0; |
321 | rwb_wake_all(rwb); | ||
322 | rwb_trace_step(rwb, "scale down"); | 322 | rwb_trace_step(rwb, "scale down"); |
323 | } | 323 | } |
324 | 324 | ||
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 5ca56bfae63c..f68e9baffad7 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c | |||
@@ -36,6 +36,10 @@ MODULE_VERSION(DRV_MODULE_VERSION); | |||
36 | #define VDC_TX_RING_SIZE 512 | 36 | #define VDC_TX_RING_SIZE 512 |
37 | #define VDC_DEFAULT_BLK_SIZE 512 | 37 | #define VDC_DEFAULT_BLK_SIZE 512 |
38 | 38 | ||
39 | #define MAX_XFER_BLKS (128 * 1024) | ||
40 | #define MAX_XFER_SIZE (MAX_XFER_BLKS / VDC_DEFAULT_BLK_SIZE) | ||
41 | #define MAX_RING_COOKIES ((MAX_XFER_BLKS / PAGE_SIZE) + 2) | ||
42 | |||
39 | #define WAITING_FOR_LINK_UP 0x01 | 43 | #define WAITING_FOR_LINK_UP 0x01 |
40 | #define WAITING_FOR_TX_SPACE 0x02 | 44 | #define WAITING_FOR_TX_SPACE 0x02 |
41 | #define WAITING_FOR_GEN_CMD 0x04 | 45 | #define WAITING_FOR_GEN_CMD 0x04 |
@@ -450,7 +454,7 @@ static int __send_request(struct request *req) | |||
450 | { | 454 | { |
451 | struct vdc_port *port = req->rq_disk->private_data; | 455 | struct vdc_port *port = req->rq_disk->private_data; |
452 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; | 456 | struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING]; |
453 | struct scatterlist sg[port->ring_cookies]; | 457 | struct scatterlist sg[MAX_RING_COOKIES]; |
454 | struct vdc_req_entry *rqe; | 458 | struct vdc_req_entry *rqe; |
455 | struct vio_disk_desc *desc; | 459 | struct vio_disk_desc *desc; |
456 | unsigned int map_perm; | 460 | unsigned int map_perm; |
@@ -458,6 +462,9 @@ static int __send_request(struct request *req) | |||
458 | u64 len; | 462 | u64 len; |
459 | u8 op; | 463 | u8 op; |
460 | 464 | ||
465 | if (WARN_ON(port->ring_cookies > MAX_RING_COOKIES)) | ||
466 | return -EINVAL; | ||
467 | |||
461 | map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO; | 468 | map_perm = LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_IO; |
462 | 469 | ||
463 | if (rq_data_dir(req) == READ) { | 470 | if (rq_data_dir(req) == READ) { |
@@ -984,9 +991,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
984 | goto err_out_free_port; | 991 | goto err_out_free_port; |
985 | 992 | ||
986 | port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE; | 993 | port->vdisk_block_size = VDC_DEFAULT_BLK_SIZE; |
987 | port->max_xfer_size = ((128 * 1024) / port->vdisk_block_size); | 994 | port->max_xfer_size = MAX_XFER_SIZE; |
988 | port->ring_cookies = ((port->max_xfer_size * | 995 | port->ring_cookies = MAX_RING_COOKIES; |
989 | port->vdisk_block_size) / PAGE_SIZE) + 2; | ||
990 | 996 | ||
991 | err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port); | 997 | err = vio_ldc_alloc(&port->vio, &vdc_ldc_cfg, port); |
992 | if (err) | 998 | if (err) |
diff --git a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c index ffa5dac221e4..129ebd2588fd 100644 --- a/drivers/clk/sunxi-ng/ccu-sun4i-a10.c +++ b/drivers/clk/sunxi-ng/ccu-sun4i-a10.c | |||
@@ -1434,8 +1434,16 @@ static void __init sun4i_ccu_init(struct device_node *node, | |||
1434 | return; | 1434 | return; |
1435 | } | 1435 | } |
1436 | 1436 | ||
1437 | /* Force the PLL-Audio-1x divider to 1 */ | ||
1438 | val = readl(reg + SUN4I_PLL_AUDIO_REG); | 1437 | val = readl(reg + SUN4I_PLL_AUDIO_REG); |
1438 | |||
1439 | /* | ||
1440 | * Force VCO and PLL bias current to lowest setting. Higher | ||
1441 | * settings interfere with sigma-delta modulation and result | ||
1442 | * in audible noise and distortions when using SPDIF or I2S. | ||
1443 | */ | ||
1444 | val &= ~GENMASK(25, 16); | ||
1445 | |||
1446 | /* Force the PLL-Audio-1x divider to 1 */ | ||
1439 | val &= ~GENMASK(29, 26); | 1447 | val &= ~GENMASK(29, 26); |
1440 | writel(val | (1 << 26), reg + SUN4I_PLL_AUDIO_REG); | 1448 | writel(val | (1 << 26), reg + SUN4I_PLL_AUDIO_REG); |
1441 | 1449 | ||
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index bae43938c8f6..9cbe8f5c9aca 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c | |||
@@ -567,9 +567,9 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
567 | struct drm_mode_crtc *crtc_req = data; | 567 | struct drm_mode_crtc *crtc_req = data; |
568 | struct drm_crtc *crtc; | 568 | struct drm_crtc *crtc; |
569 | struct drm_plane *plane; | 569 | struct drm_plane *plane; |
570 | struct drm_connector **connector_set = NULL, *connector; | 570 | struct drm_connector **connector_set, *connector; |
571 | struct drm_framebuffer *fb = NULL; | 571 | struct drm_framebuffer *fb; |
572 | struct drm_display_mode *mode = NULL; | 572 | struct drm_display_mode *mode; |
573 | struct drm_mode_set set; | 573 | struct drm_mode_set set; |
574 | uint32_t __user *set_connectors_ptr; | 574 | uint32_t __user *set_connectors_ptr; |
575 | struct drm_modeset_acquire_ctx ctx; | 575 | struct drm_modeset_acquire_ctx ctx; |
@@ -598,6 +598,10 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data, | |||
598 | mutex_lock(&crtc->dev->mode_config.mutex); | 598 | mutex_lock(&crtc->dev->mode_config.mutex); |
599 | drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); | 599 | drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); |
600 | retry: | 600 | retry: |
601 | connector_set = NULL; | ||
602 | fb = NULL; | ||
603 | mode = NULL; | ||
604 | |||
601 | ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx); | 605 | ret = drm_modeset_lock_all_ctx(crtc->dev, &ctx); |
602 | if (ret) | 606 | if (ret) |
603 | goto out; | 607 | goto out; |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 3c9fc99648b7..ff0bfc65a8c1 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
@@ -113,6 +113,9 @@ static const struct edid_quirk { | |||
113 | /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ | 113 | /* AEO model 0 reports 8 bpc, but is a 6 bpc panel */ |
114 | { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, | 114 | { "AEO", 0, EDID_QUIRK_FORCE_6BPC }, |
115 | 115 | ||
116 | /* BOE model on HP Pavilion 15-n233sl reports 8 bpc, but is a 6 bpc panel */ | ||
117 | { "BOE", 0x78b, EDID_QUIRK_FORCE_6BPC }, | ||
118 | |||
116 | /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */ | 119 | /* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */ |
117 | { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC }, | 120 | { "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC }, |
118 | 121 | ||
@@ -4279,7 +4282,7 @@ static void drm_parse_ycbcr420_deep_color_info(struct drm_connector *connector, | |||
4279 | struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; | 4282 | struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; |
4280 | 4283 | ||
4281 | dc_mask = db[7] & DRM_EDID_YCBCR420_DC_MASK; | 4284 | dc_mask = db[7] & DRM_EDID_YCBCR420_DC_MASK; |
4282 | hdmi->y420_dc_modes |= dc_mask; | 4285 | hdmi->y420_dc_modes = dc_mask; |
4283 | } | 4286 | } |
4284 | 4287 | ||
4285 | static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector, | 4288 | static void drm_parse_hdmi_forum_vsdb(struct drm_connector *connector, |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 515a7aec57ac..9628dd617826 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -1580,6 +1580,25 @@ unlock: | |||
1580 | } | 1580 | } |
1581 | EXPORT_SYMBOL(drm_fb_helper_ioctl); | 1581 | EXPORT_SYMBOL(drm_fb_helper_ioctl); |
1582 | 1582 | ||
1583 | static bool drm_fb_pixel_format_equal(const struct fb_var_screeninfo *var_1, | ||
1584 | const struct fb_var_screeninfo *var_2) | ||
1585 | { | ||
1586 | return var_1->bits_per_pixel == var_2->bits_per_pixel && | ||
1587 | var_1->grayscale == var_2->grayscale && | ||
1588 | var_1->red.offset == var_2->red.offset && | ||
1589 | var_1->red.length == var_2->red.length && | ||
1590 | var_1->red.msb_right == var_2->red.msb_right && | ||
1591 | var_1->green.offset == var_2->green.offset && | ||
1592 | var_1->green.length == var_2->green.length && | ||
1593 | var_1->green.msb_right == var_2->green.msb_right && | ||
1594 | var_1->blue.offset == var_2->blue.offset && | ||
1595 | var_1->blue.length == var_2->blue.length && | ||
1596 | var_1->blue.msb_right == var_2->blue.msb_right && | ||
1597 | var_1->transp.offset == var_2->transp.offset && | ||
1598 | var_1->transp.length == var_2->transp.length && | ||
1599 | var_1->transp.msb_right == var_2->transp.msb_right; | ||
1600 | } | ||
1601 | |||
1583 | /** | 1602 | /** |
1584 | * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var | 1603 | * drm_fb_helper_check_var - implementation for &fb_ops.fb_check_var |
1585 | * @var: screeninfo to check | 1604 | * @var: screeninfo to check |
@@ -1590,7 +1609,6 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, | |||
1590 | { | 1609 | { |
1591 | struct drm_fb_helper *fb_helper = info->par; | 1610 | struct drm_fb_helper *fb_helper = info->par; |
1592 | struct drm_framebuffer *fb = fb_helper->fb; | 1611 | struct drm_framebuffer *fb = fb_helper->fb; |
1593 | int depth; | ||
1594 | 1612 | ||
1595 | if (var->pixclock != 0 || in_dbg_master()) | 1613 | if (var->pixclock != 0 || in_dbg_master()) |
1596 | return -EINVAL; | 1614 | return -EINVAL; |
@@ -1610,72 +1628,15 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var, | |||
1610 | return -EINVAL; | 1628 | return -EINVAL; |
1611 | } | 1629 | } |
1612 | 1630 | ||
1613 | switch (var->bits_per_pixel) { | 1631 | /* |
1614 | case 16: | 1632 | * drm fbdev emulation doesn't support changing the pixel format at all, |
1615 | depth = (var->green.length == 6) ? 16 : 15; | 1633 | * so reject all pixel format changing requests. |
1616 | break; | 1634 | */ |
1617 | case 32: | 1635 | if (!drm_fb_pixel_format_equal(var, &info->var)) { |
1618 | depth = (var->transp.length > 0) ? 32 : 24; | 1636 | DRM_DEBUG("fbdev emulation doesn't support changing the pixel format\n"); |
1619 | break; | ||
1620 | default: | ||
1621 | depth = var->bits_per_pixel; | ||
1622 | break; | ||
1623 | } | ||
1624 | |||
1625 | switch (depth) { | ||
1626 | case 8: | ||
1627 | var->red.offset = 0; | ||
1628 | var->green.offset = 0; | ||
1629 | var->blue.offset = 0; | ||
1630 | var->red.length = 8; | ||
1631 | var->green.length = 8; | ||
1632 | var->blue.length = 8; | ||
1633 | var->transp.length = 0; | ||
1634 | var->transp.offset = 0; | ||
1635 | break; | ||
1636 | case 15: | ||
1637 | var->red.offset = 10; | ||
1638 | var->green.offset = 5; | ||
1639 | var->blue.offset = 0; | ||
1640 | var->red.length = 5; | ||
1641 | var->green.length = 5; | ||
1642 | var->blue.length = 5; | ||
1643 | var->transp.length = 1; | ||
1644 | var->transp.offset = 15; | ||
1645 | break; | ||
1646 | case 16: | ||
1647 | var->red.offset = 11; | ||
1648 | var->green.offset = 5; | ||
1649 | var->blue.offset = 0; | ||
1650 | var->red.length = 5; | ||
1651 | var->green.length = 6; | ||
1652 | var->blue.length = 5; | ||
1653 | var->transp.length = 0; | ||
1654 | var->transp.offset = 0; | ||
1655 | break; | ||
1656 | case 24: | ||
1657 | var->red.offset = 16; | ||
1658 | var->green.offset = 8; | ||
1659 | var->blue.offset = 0; | ||
1660 | var->red.length = 8; | ||
1661 | var->green.length = 8; | ||
1662 | var->blue.length = 8; | ||
1663 | var->transp.length = 0; | ||
1664 | var->transp.offset = 0; | ||
1665 | break; | ||
1666 | case 32: | ||
1667 | var->red.offset = 16; | ||
1668 | var->green.offset = 8; | ||
1669 | var->blue.offset = 0; | ||
1670 | var->red.length = 8; | ||
1671 | var->green.length = 8; | ||
1672 | var->blue.length = 8; | ||
1673 | var->transp.length = 8; | ||
1674 | var->transp.offset = 24; | ||
1675 | break; | ||
1676 | default: | ||
1677 | return -EINVAL; | 1637 | return -EINVAL; |
1678 | } | 1638 | } |
1639 | |||
1679 | return 0; | 1640 | return 0; |
1680 | } | 1641 | } |
1681 | EXPORT_SYMBOL(drm_fb_helper_check_var); | 1642 | EXPORT_SYMBOL(drm_fb_helper_check_var); |
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c index 9ee9a15e7134..9200e349f29e 100644 --- a/drivers/i2c/i2c-core-base.c +++ b/drivers/i2c/i2c-core-base.c | |||
@@ -2270,7 +2270,7 @@ EXPORT_SYMBOL(i2c_put_adapter); | |||
2270 | * | 2270 | * |
2271 | * Return: NULL if a DMA safe buffer was not obtained. Use msg->buf with PIO. | 2271 | * Return: NULL if a DMA safe buffer was not obtained. Use msg->buf with PIO. |
2272 | * Or a valid pointer to be used with DMA. After use, release it by | 2272 | * Or a valid pointer to be used with DMA. After use, release it by |
2273 | * calling i2c_release_dma_safe_msg_buf(). | 2273 | * calling i2c_put_dma_safe_msg_buf(). |
2274 | * | 2274 | * |
2275 | * This function must only be called from process context! | 2275 | * This function must only be called from process context! |
2276 | */ | 2276 | */ |
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index faa9e6116b2f..73332b9a25b5 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c | |||
@@ -46,6 +46,8 @@ | |||
46 | #include <linux/mutex.h> | 46 | #include <linux/mutex.h> |
47 | #include <linux/slab.h> | 47 | #include <linux/slab.h> |
48 | 48 | ||
49 | #include <linux/nospec.h> | ||
50 | |||
49 | #include <linux/uaccess.h> | 51 | #include <linux/uaccess.h> |
50 | 52 | ||
51 | #include <rdma/ib.h> | 53 | #include <rdma/ib.h> |
@@ -1120,6 +1122,7 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf, | |||
1120 | 1122 | ||
1121 | if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table)) | 1123 | if (hdr.cmd >= ARRAY_SIZE(ucm_cmd_table)) |
1122 | return -EINVAL; | 1124 | return -EINVAL; |
1125 | hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucm_cmd_table)); | ||
1123 | 1126 | ||
1124 | if (hdr.in + sizeof(hdr) > len) | 1127 | if (hdr.in + sizeof(hdr) > len) |
1125 | return -EINVAL; | 1128 | return -EINVAL; |
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 21863ddde63e..01d68ed46c1b 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -44,6 +44,8 @@ | |||
44 | #include <linux/module.h> | 44 | #include <linux/module.h> |
45 | #include <linux/nsproxy.h> | 45 | #include <linux/nsproxy.h> |
46 | 46 | ||
47 | #include <linux/nospec.h> | ||
48 | |||
47 | #include <rdma/rdma_user_cm.h> | 49 | #include <rdma/rdma_user_cm.h> |
48 | #include <rdma/ib_marshall.h> | 50 | #include <rdma/ib_marshall.h> |
49 | #include <rdma/rdma_cm.h> | 51 | #include <rdma/rdma_cm.h> |
@@ -1676,6 +1678,7 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf, | |||
1676 | 1678 | ||
1677 | if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) | 1679 | if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table)) |
1678 | return -EINVAL; | 1680 | return -EINVAL; |
1681 | hdr.cmd = array_index_nospec(hdr.cmd, ARRAY_SIZE(ucma_cmd_table)); | ||
1679 | 1682 | ||
1680 | if (hdr.in + sizeof(hdr) > len) | 1683 | if (hdr.in + sizeof(hdr) > len) |
1681 | return -EINVAL; | 1684 | return -EINVAL; |
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index b756fc79424e..35564a8a48f9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c | |||
@@ -320,9 +320,12 @@ int bcmgenet_mii_probe(struct net_device *dev) | |||
320 | phydev->advertising = phydev->supported; | 320 | phydev->advertising = phydev->supported; |
321 | 321 | ||
322 | /* The internal PHY has its link interrupts routed to the | 322 | /* The internal PHY has its link interrupts routed to the |
323 | * Ethernet MAC ISRs | 323 | * Ethernet MAC ISRs. On GENETv5 there is a hardware issue |
324 | * that prevents the signaling of link UP interrupts when | ||
325 | * the link operates at 10Mbps, so fallback to polling for | ||
326 | * those versions of GENET. | ||
324 | */ | 327 | */ |
325 | if (priv->internal_phy) | 328 | if (priv->internal_phy && !GENET_IS_V5(priv)) |
326 | dev->phydev->irq = PHY_IGNORE_INTERRUPT; | 329 | dev->phydev->irq = PHY_IGNORE_INTERRUPT; |
327 | 330 | ||
328 | return 0; | 331 | return 0; |
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 4778b663653e..bf80855dd0dd 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h | |||
@@ -452,6 +452,10 @@ struct bufdesc_ex { | |||
452 | * initialisation. | 452 | * initialisation. |
453 | */ | 453 | */ |
454 | #define FEC_QUIRK_MIB_CLEAR (1 << 15) | 454 | #define FEC_QUIRK_MIB_CLEAR (1 << 15) |
455 | /* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers, | ||
456 | * those FIFO receive registers are resolved in other platforms. | ||
457 | */ | ||
458 | #define FEC_QUIRK_HAS_FRREG (1 << 16) | ||
455 | 459 | ||
456 | struct bufdesc_prop { | 460 | struct bufdesc_prop { |
457 | int qid; | 461 | int qid; |
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index a17cc973d9a3..6db69ba30dcd 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c | |||
@@ -91,14 +91,16 @@ static struct platform_device_id fec_devtype[] = { | |||
91 | .driver_data = 0, | 91 | .driver_data = 0, |
92 | }, { | 92 | }, { |
93 | .name = "imx25-fec", | 93 | .name = "imx25-fec", |
94 | .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR, | 94 | .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR | |
95 | FEC_QUIRK_HAS_FRREG, | ||
95 | }, { | 96 | }, { |
96 | .name = "imx27-fec", | 97 | .name = "imx27-fec", |
97 | .driver_data = FEC_QUIRK_MIB_CLEAR, | 98 | .driver_data = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG, |
98 | }, { | 99 | }, { |
99 | .name = "imx28-fec", | 100 | .name = "imx28-fec", |
100 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | | 101 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | |
101 | FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC, | 102 | FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | |
103 | FEC_QUIRK_HAS_FRREG, | ||
102 | }, { | 104 | }, { |
103 | .name = "imx6q-fec", | 105 | .name = "imx6q-fec", |
104 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | | 106 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | |
@@ -2162,7 +2164,13 @@ static void fec_enet_get_regs(struct net_device *ndev, | |||
2162 | memset(buf, 0, regs->len); | 2164 | memset(buf, 0, regs->len); |
2163 | 2165 | ||
2164 | for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { | 2166 | for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { |
2165 | off = fec_enet_register_offset[i] / 4; | 2167 | off = fec_enet_register_offset[i]; |
2168 | |||
2169 | if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && | ||
2170 | !(fep->quirks & FEC_QUIRK_HAS_FRREG)) | ||
2171 | continue; | ||
2172 | |||
2173 | off >>= 2; | ||
2166 | buf[off] = readl(&theregs[off]); | 2174 | buf[off] = readl(&theregs[off]); |
2167 | } | 2175 | } |
2168 | } | 2176 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index f19067c94272..2f7fb8de6967 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | |||
@@ -433,10 +433,9 @@ static inline u16 mlx5e_icosq_wrap_cnt(struct mlx5e_icosq *sq) | |||
433 | 433 | ||
434 | static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, | 434 | static inline void mlx5e_fill_icosq_frag_edge(struct mlx5e_icosq *sq, |
435 | struct mlx5_wq_cyc *wq, | 435 | struct mlx5_wq_cyc *wq, |
436 | u16 pi, u16 frag_pi) | 436 | u16 pi, u16 nnops) |
437 | { | 437 | { |
438 | struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi]; | 438 | struct mlx5e_sq_wqe_info *edge_wi, *wi = &sq->db.ico_wqe[pi]; |
439 | u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi; | ||
440 | 439 | ||
441 | edge_wi = wi + nnops; | 440 | edge_wi = wi + nnops; |
442 | 441 | ||
@@ -455,15 +454,14 @@ static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) | |||
455 | struct mlx5_wq_cyc *wq = &sq->wq; | 454 | struct mlx5_wq_cyc *wq = &sq->wq; |
456 | struct mlx5e_umr_wqe *umr_wqe; | 455 | struct mlx5e_umr_wqe *umr_wqe; |
457 | u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); | 456 | u16 xlt_offset = ix << (MLX5E_LOG_ALIGNED_MPWQE_PPW - 1); |
458 | u16 pi, frag_pi; | 457 | u16 pi, contig_wqebbs_room; |
459 | int err; | 458 | int err; |
460 | int i; | 459 | int i; |
461 | 460 | ||
462 | pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); | 461 | pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
463 | frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); | 462 | contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); |
464 | 463 | if (unlikely(contig_wqebbs_room < MLX5E_UMR_WQEBBS)) { | |
465 | if (unlikely(frag_pi + MLX5E_UMR_WQEBBS > mlx5_wq_cyc_get_frag_size(wq))) { | 464 | mlx5e_fill_icosq_frag_edge(sq, wq, pi, contig_wqebbs_room); |
466 | mlx5e_fill_icosq_frag_edge(sq, wq, pi, frag_pi); | ||
467 | pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); | 465 | pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
468 | } | 466 | } |
469 | 467 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index ae73ea992845..6dacaeba2fbf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | |||
@@ -290,10 +290,9 @@ dma_unmap_wqe_err: | |||
290 | 290 | ||
291 | static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, | 291 | static inline void mlx5e_fill_sq_frag_edge(struct mlx5e_txqsq *sq, |
292 | struct mlx5_wq_cyc *wq, | 292 | struct mlx5_wq_cyc *wq, |
293 | u16 pi, u16 frag_pi) | 293 | u16 pi, u16 nnops) |
294 | { | 294 | { |
295 | struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; | 295 | struct mlx5e_tx_wqe_info *edge_wi, *wi = &sq->db.wqe_info[pi]; |
296 | u8 nnops = mlx5_wq_cyc_get_frag_size(wq) - frag_pi; | ||
297 | 296 | ||
298 | edge_wi = wi + nnops; | 297 | edge_wi = wi + nnops; |
299 | 298 | ||
@@ -348,8 +347,8 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |||
348 | struct mlx5e_tx_wqe_info *wi; | 347 | struct mlx5e_tx_wqe_info *wi; |
349 | 348 | ||
350 | struct mlx5e_sq_stats *stats = sq->stats; | 349 | struct mlx5e_sq_stats *stats = sq->stats; |
350 | u16 headlen, ihs, contig_wqebbs_room; | ||
351 | u16 ds_cnt, ds_cnt_inl = 0; | 351 | u16 ds_cnt, ds_cnt_inl = 0; |
352 | u16 headlen, ihs, frag_pi; | ||
353 | u8 num_wqebbs, opcode; | 352 | u8 num_wqebbs, opcode; |
354 | u32 num_bytes; | 353 | u32 num_bytes; |
355 | int num_dma; | 354 | int num_dma; |
@@ -386,9 +385,9 @@ netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |||
386 | } | 385 | } |
387 | 386 | ||
388 | num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); | 387 | num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); |
389 | frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); | 388 | contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); |
390 | if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { | 389 | if (unlikely(contig_wqebbs_room < num_wqebbs)) { |
391 | mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); | 390 | mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); |
392 | mlx5e_sq_fetch_wqe(sq, &wqe, &pi); | 391 | mlx5e_sq_fetch_wqe(sq, &wqe, &pi); |
393 | } | 392 | } |
394 | 393 | ||
@@ -636,7 +635,7 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |||
636 | struct mlx5e_tx_wqe_info *wi; | 635 | struct mlx5e_tx_wqe_info *wi; |
637 | 636 | ||
638 | struct mlx5e_sq_stats *stats = sq->stats; | 637 | struct mlx5e_sq_stats *stats = sq->stats; |
639 | u16 headlen, ihs, pi, frag_pi; | 638 | u16 headlen, ihs, pi, contig_wqebbs_room; |
640 | u16 ds_cnt, ds_cnt_inl = 0; | 639 | u16 ds_cnt, ds_cnt_inl = 0; |
641 | u8 num_wqebbs, opcode; | 640 | u8 num_wqebbs, opcode; |
642 | u32 num_bytes; | 641 | u32 num_bytes; |
@@ -672,13 +671,14 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, | |||
672 | } | 671 | } |
673 | 672 | ||
674 | num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); | 673 | num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); |
675 | frag_pi = mlx5_wq_cyc_ctr2fragix(wq, sq->pc); | 674 | pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
676 | if (unlikely(frag_pi + num_wqebbs > mlx5_wq_cyc_get_frag_size(wq))) { | 675 | contig_wqebbs_room = mlx5_wq_cyc_get_contig_wqebbs(wq, pi); |
676 | if (unlikely(contig_wqebbs_room < num_wqebbs)) { | ||
677 | mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); | ||
677 | pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); | 678 | pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); |
678 | mlx5e_fill_sq_frag_edge(sq, wq, pi, frag_pi); | ||
679 | } | 679 | } |
680 | 680 | ||
681 | mlx5i_sq_fetch_wqe(sq, &wqe, &pi); | 681 | mlx5i_sq_fetch_wqe(sq, &wqe, pi); |
682 | 682 | ||
683 | /* fill wqe */ | 683 | /* fill wqe */ |
684 | wi = &sq->db.wqe_info[pi]; | 684 | wi = &sq->db.wqe_info[pi]; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 48864f4988a4..c1e1a16a9b07 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c | |||
@@ -273,7 +273,7 @@ static void eq_pf_process(struct mlx5_eq *eq) | |||
273 | case MLX5_PFAULT_SUBTYPE_WQE: | 273 | case MLX5_PFAULT_SUBTYPE_WQE: |
274 | /* WQE based event */ | 274 | /* WQE based event */ |
275 | pfault->type = | 275 | pfault->type = |
276 | be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24; | 276 | (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7; |
277 | pfault->token = | 277 | pfault->token = |
278 | be32_to_cpu(pf_eqe->wqe.token); | 278 | be32_to_cpu(pf_eqe->wqe.token); |
279 | pfault->wqe.wq_num = | 279 | pfault->wqe.wq_num = |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 28aa8c968a80..515e3d6de051 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c | |||
@@ -245,7 +245,7 @@ static void *mlx5_fpga_ipsec_cmd_exec(struct mlx5_core_dev *mdev, | |||
245 | return ERR_PTR(res); | 245 | return ERR_PTR(res); |
246 | } | 246 | } |
247 | 247 | ||
248 | /* Context will be freed by wait func after completion */ | 248 | /* Context should be freed by the caller after completion. */ |
249 | return context; | 249 | return context; |
250 | } | 250 | } |
251 | 251 | ||
@@ -418,10 +418,8 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags) | |||
418 | cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP); | 418 | cmd.cmd = htonl(MLX5_FPGA_IPSEC_CMD_OP_SET_CAP); |
419 | cmd.flags = htonl(flags); | 419 | cmd.flags = htonl(flags); |
420 | context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd)); | 420 | context = mlx5_fpga_ipsec_cmd_exec(mdev, &cmd, sizeof(cmd)); |
421 | if (IS_ERR(context)) { | 421 | if (IS_ERR(context)) |
422 | err = PTR_ERR(context); | 422 | return PTR_ERR(context); |
423 | goto out; | ||
424 | } | ||
425 | 423 | ||
426 | err = mlx5_fpga_ipsec_cmd_wait(context); | 424 | err = mlx5_fpga_ipsec_cmd_wait(context); |
427 | if (err) | 425 | if (err) |
@@ -435,6 +433,7 @@ static int mlx5_fpga_ipsec_set_caps(struct mlx5_core_dev *mdev, u32 flags) | |||
435 | } | 433 | } |
436 | 434 | ||
437 | out: | 435 | out: |
436 | kfree(context); | ||
438 | return err; | 437 | return err; |
439 | } | 438 | } |
440 | 439 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h index 5ef3ef0072b4..9165ca567047 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h | |||
@@ -110,12 +110,11 @@ struct mlx5i_tx_wqe { | |||
110 | 110 | ||
111 | static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq, | 111 | static inline void mlx5i_sq_fetch_wqe(struct mlx5e_txqsq *sq, |
112 | struct mlx5i_tx_wqe **wqe, | 112 | struct mlx5i_tx_wqe **wqe, |
113 | u16 *pi) | 113 | u16 pi) |
114 | { | 114 | { |
115 | struct mlx5_wq_cyc *wq = &sq->wq; | 115 | struct mlx5_wq_cyc *wq = &sq->wq; |
116 | 116 | ||
117 | *pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); | 117 | *wqe = mlx5_wq_cyc_get_wqe(wq, pi); |
118 | *wqe = mlx5_wq_cyc_get_wqe(wq, *pi); | ||
119 | memset(*wqe, 0, sizeof(**wqe)); | 118 | memset(*wqe, 0, sizeof(**wqe)); |
120 | } | 119 | } |
121 | 120 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 9007e91ad53f..2dcbf1ebfd6a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c | |||
@@ -39,11 +39,6 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) | |||
39 | return (u32)wq->fbc.sz_m1 + 1; | 39 | return (u32)wq->fbc.sz_m1 + 1; |
40 | } | 40 | } |
41 | 41 | ||
42 | u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) | ||
43 | { | ||
44 | return wq->fbc.frag_sz_m1 + 1; | ||
45 | } | ||
46 | |||
47 | u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) | 42 | u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) |
48 | { | 43 | { |
49 | return wq->fbc.sz_m1 + 1; | 44 | return wq->fbc.sz_m1 + 1; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 3a1a170bb2d7..b1293d153a58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h | |||
@@ -80,7 +80,6 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, | |||
80 | void *wqc, struct mlx5_wq_cyc *wq, | 80 | void *wqc, struct mlx5_wq_cyc *wq, |
81 | struct mlx5_wq_ctrl *wq_ctrl); | 81 | struct mlx5_wq_ctrl *wq_ctrl); |
82 | u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); | 82 | u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); |
83 | u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); | ||
84 | 83 | ||
85 | int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, | 84 | int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, |
86 | void *qpc, struct mlx5_wq_qp *wq, | 85 | void *qpc, struct mlx5_wq_qp *wq, |
@@ -140,11 +139,6 @@ static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr) | |||
140 | return ctr & wq->fbc.sz_m1; | 139 | return ctr & wq->fbc.sz_m1; |
141 | } | 140 | } |
142 | 141 | ||
143 | static inline u16 mlx5_wq_cyc_ctr2fragix(struct mlx5_wq_cyc *wq, u16 ctr) | ||
144 | { | ||
145 | return ctr & wq->fbc.frag_sz_m1; | ||
146 | } | ||
147 | |||
148 | static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq) | 142 | static inline u16 mlx5_wq_cyc_get_head(struct mlx5_wq_cyc *wq) |
149 | { | 143 | { |
150 | return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr); | 144 | return mlx5_wq_cyc_ctr2ix(wq, wq->wqe_ctr); |
@@ -160,6 +154,11 @@ static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix) | |||
160 | return mlx5_frag_buf_get_wqe(&wq->fbc, ix); | 154 | return mlx5_frag_buf_get_wqe(&wq->fbc, ix); |
161 | } | 155 | } |
162 | 156 | ||
157 | static inline u16 mlx5_wq_cyc_get_contig_wqebbs(struct mlx5_wq_cyc *wq, u16 ix) | ||
158 | { | ||
159 | return mlx5_frag_buf_get_idx_last_contig_stride(&wq->fbc, ix) - ix + 1; | ||
160 | } | ||
161 | |||
163 | static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) | 162 | static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2) |
164 | { | 163 | { |
165 | int equal = (cc1 == cc2); | 164 | int equal = (cc1 == cc2); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index 81533d7f395c..937d0ace699a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c | |||
@@ -1055,6 +1055,7 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, | |||
1055 | err_driver_init: | 1055 | err_driver_init: |
1056 | mlxsw_thermal_fini(mlxsw_core->thermal); | 1056 | mlxsw_thermal_fini(mlxsw_core->thermal); |
1057 | err_thermal_init: | 1057 | err_thermal_init: |
1058 | mlxsw_hwmon_fini(mlxsw_core->hwmon); | ||
1058 | err_hwmon_init: | 1059 | err_hwmon_init: |
1059 | if (!reload) | 1060 | if (!reload) |
1060 | devlink_unregister(devlink); | 1061 | devlink_unregister(devlink); |
@@ -1088,6 +1089,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, | |||
1088 | if (mlxsw_core->driver->fini) | 1089 | if (mlxsw_core->driver->fini) |
1089 | mlxsw_core->driver->fini(mlxsw_core); | 1090 | mlxsw_core->driver->fini(mlxsw_core); |
1090 | mlxsw_thermal_fini(mlxsw_core->thermal); | 1091 | mlxsw_thermal_fini(mlxsw_core->thermal); |
1092 | mlxsw_hwmon_fini(mlxsw_core->hwmon); | ||
1091 | if (!reload) | 1093 | if (!reload) |
1092 | devlink_unregister(devlink); | 1094 | devlink_unregister(devlink); |
1093 | mlxsw_emad_fini(mlxsw_core); | 1095 | mlxsw_emad_fini(mlxsw_core); |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index 655ddd204ab2..c35be477856f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h | |||
@@ -359,6 +359,10 @@ static inline int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, | |||
359 | return 0; | 359 | return 0; |
360 | } | 360 | } |
361 | 361 | ||
362 | static inline void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon) | ||
363 | { | ||
364 | } | ||
365 | |||
362 | #endif | 366 | #endif |
363 | 367 | ||
364 | struct mlxsw_thermal; | 368 | struct mlxsw_thermal; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c index f6cf2896d337..e04e8162aa14 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c | |||
@@ -303,8 +303,7 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, | |||
303 | struct device *hwmon_dev; | 303 | struct device *hwmon_dev; |
304 | int err; | 304 | int err; |
305 | 305 | ||
306 | mlxsw_hwmon = devm_kzalloc(mlxsw_bus_info->dev, sizeof(*mlxsw_hwmon), | 306 | mlxsw_hwmon = kzalloc(sizeof(*mlxsw_hwmon), GFP_KERNEL); |
307 | GFP_KERNEL); | ||
308 | if (!mlxsw_hwmon) | 307 | if (!mlxsw_hwmon) |
309 | return -ENOMEM; | 308 | return -ENOMEM; |
310 | mlxsw_hwmon->core = mlxsw_core; | 309 | mlxsw_hwmon->core = mlxsw_core; |
@@ -321,10 +320,9 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, | |||
321 | mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group; | 320 | mlxsw_hwmon->groups[0] = &mlxsw_hwmon->group; |
322 | mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs; | 321 | mlxsw_hwmon->group.attrs = mlxsw_hwmon->attrs; |
323 | 322 | ||
324 | hwmon_dev = devm_hwmon_device_register_with_groups(mlxsw_bus_info->dev, | 323 | hwmon_dev = hwmon_device_register_with_groups(mlxsw_bus_info->dev, |
325 | "mlxsw", | 324 | "mlxsw", mlxsw_hwmon, |
326 | mlxsw_hwmon, | 325 | mlxsw_hwmon->groups); |
327 | mlxsw_hwmon->groups); | ||
328 | if (IS_ERR(hwmon_dev)) { | 326 | if (IS_ERR(hwmon_dev)) { |
329 | err = PTR_ERR(hwmon_dev); | 327 | err = PTR_ERR(hwmon_dev); |
330 | goto err_hwmon_register; | 328 | goto err_hwmon_register; |
@@ -337,5 +335,12 @@ int mlxsw_hwmon_init(struct mlxsw_core *mlxsw_core, | |||
337 | err_hwmon_register: | 335 | err_hwmon_register: |
338 | err_fans_init: | 336 | err_fans_init: |
339 | err_temp_init: | 337 | err_temp_init: |
338 | kfree(mlxsw_hwmon); | ||
340 | return err; | 339 | return err; |
341 | } | 340 | } |
341 | |||
342 | void mlxsw_hwmon_fini(struct mlxsw_hwmon *mlxsw_hwmon) | ||
343 | { | ||
344 | hwmon_device_unregister(mlxsw_hwmon->hwmon_dev); | ||
345 | kfree(mlxsw_hwmon); | ||
346 | } | ||
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 9b8a17ee3cb3..3238b9ee42f3 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c | |||
@@ -133,9 +133,9 @@ static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot) | |||
133 | { | 133 | { |
134 | unsigned int val, timeout = 10; | 134 | unsigned int val, timeout = 10; |
135 | 135 | ||
136 | /* Wait for the issued mac table command to be completed, or timeout. | 136 | /* Wait for the issued vlan table command to be completed, or timeout. |
137 | * When the command read from ANA_TABLES_MACACCESS is | 137 | * When the command read from ANA_TABLES_VLANACCESS is |
138 | * MACACCESS_CMD_IDLE, the issued command completed successfully. | 138 | * VLANACCESS_CMD_IDLE, the issued command completed successfully. |
139 | */ | 139 | */ |
140 | do { | 140 | do { |
141 | val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS); | 141 | val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS); |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index f2b1938236fe..244dc261006e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c | |||
@@ -399,12 +399,14 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, | |||
399 | 399 | ||
400 | switch (off) { | 400 | switch (off) { |
401 | case offsetof(struct iphdr, daddr): | 401 | case offsetof(struct iphdr, daddr): |
402 | set_ip_addr->ipv4_dst_mask = mask; | 402 | set_ip_addr->ipv4_dst_mask |= mask; |
403 | set_ip_addr->ipv4_dst = exact; | 403 | set_ip_addr->ipv4_dst &= ~mask; |
404 | set_ip_addr->ipv4_dst |= exact & mask; | ||
404 | break; | 405 | break; |
405 | case offsetof(struct iphdr, saddr): | 406 | case offsetof(struct iphdr, saddr): |
406 | set_ip_addr->ipv4_src_mask = mask; | 407 | set_ip_addr->ipv4_src_mask |= mask; |
407 | set_ip_addr->ipv4_src = exact; | 408 | set_ip_addr->ipv4_src &= ~mask; |
409 | set_ip_addr->ipv4_src |= exact & mask; | ||
408 | break; | 410 | break; |
409 | default: | 411 | default: |
410 | return -EOPNOTSUPP; | 412 | return -EOPNOTSUPP; |
@@ -418,11 +420,12 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, | |||
418 | } | 420 | } |
419 | 421 | ||
420 | static void | 422 | static void |
421 | nfp_fl_set_ip6_helper(int opcode_tag, int idx, __be32 exact, __be32 mask, | 423 | nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask, |
422 | struct nfp_fl_set_ipv6_addr *ip6) | 424 | struct nfp_fl_set_ipv6_addr *ip6) |
423 | { | 425 | { |
424 | ip6->ipv6[idx % 4].mask = mask; | 426 | ip6->ipv6[word].mask |= mask; |
425 | ip6->ipv6[idx % 4].exact = exact; | 427 | ip6->ipv6[word].exact &= ~mask; |
428 | ip6->ipv6[word].exact |= exact & mask; | ||
426 | 429 | ||
427 | ip6->reserved = cpu_to_be16(0); | 430 | ip6->reserved = cpu_to_be16(0); |
428 | ip6->head.jump_id = opcode_tag; | 431 | ip6->head.jump_id = opcode_tag; |
@@ -435,6 +438,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, | |||
435 | struct nfp_fl_set_ipv6_addr *ip_src) | 438 | struct nfp_fl_set_ipv6_addr *ip_src) |
436 | { | 439 | { |
437 | __be32 exact, mask; | 440 | __be32 exact, mask; |
441 | u8 word; | ||
438 | 442 | ||
439 | /* We are expecting tcf_pedit to return a big endian value */ | 443 | /* We are expecting tcf_pedit to return a big endian value */ |
440 | mask = (__force __be32)~tcf_pedit_mask(action, idx); | 444 | mask = (__force __be32)~tcf_pedit_mask(action, idx); |
@@ -443,17 +447,20 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, | |||
443 | if (exact & ~mask) | 447 | if (exact & ~mask) |
444 | return -EOPNOTSUPP; | 448 | return -EOPNOTSUPP; |
445 | 449 | ||
446 | if (off < offsetof(struct ipv6hdr, saddr)) | 450 | if (off < offsetof(struct ipv6hdr, saddr)) { |
447 | return -EOPNOTSUPP; | 451 | return -EOPNOTSUPP; |
448 | else if (off < offsetof(struct ipv6hdr, daddr)) | 452 | } else if (off < offsetof(struct ipv6hdr, daddr)) { |
449 | nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, idx, | 453 | word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact); |
454 | nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word, | ||
450 | exact, mask, ip_src); | 455 | exact, mask, ip_src); |
451 | else if (off < offsetof(struct ipv6hdr, daddr) + | 456 | } else if (off < offsetof(struct ipv6hdr, daddr) + |
452 | sizeof(struct in6_addr)) | 457 | sizeof(struct in6_addr)) { |
453 | nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, idx, | 458 | word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact); |
459 | nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word, | ||
454 | exact, mask, ip_dst); | 460 | exact, mask, ip_dst); |
455 | else | 461 | } else { |
456 | return -EOPNOTSUPP; | 462 | return -EOPNOTSUPP; |
463 | } | ||
457 | 464 | ||
458 | return 0; | 465 | return 0; |
459 | } | 466 | } |
@@ -511,7 +518,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, | |||
511 | struct nfp_fl_set_eth set_eth; | 518 | struct nfp_fl_set_eth set_eth; |
512 | enum pedit_header_type htype; | 519 | enum pedit_header_type htype; |
513 | int idx, nkeys, err; | 520 | int idx, nkeys, err; |
514 | size_t act_size; | 521 | size_t act_size = 0; |
515 | u32 offset, cmd; | 522 | u32 offset, cmd; |
516 | u8 ip_proto = 0; | 523 | u8 ip_proto = 0; |
517 | 524 | ||
@@ -569,7 +576,9 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, | |||
569 | act_size = sizeof(set_eth); | 576 | act_size = sizeof(set_eth); |
570 | memcpy(nfp_action, &set_eth, act_size); | 577 | memcpy(nfp_action, &set_eth, act_size); |
571 | *a_len += act_size; | 578 | *a_len += act_size; |
572 | } else if (set_ip_addr.head.len_lw) { | 579 | } |
580 | if (set_ip_addr.head.len_lw) { | ||
581 | nfp_action += act_size; | ||
573 | act_size = sizeof(set_ip_addr); | 582 | act_size = sizeof(set_ip_addr); |
574 | memcpy(nfp_action, &set_ip_addr, act_size); | 583 | memcpy(nfp_action, &set_ip_addr, act_size); |
575 | *a_len += act_size; | 584 | *a_len += act_size; |
@@ -577,10 +586,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, | |||
577 | /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ | 586 | /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ |
578 | *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | | 587 | *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | |
579 | nfp_fl_csum_l4_to_flag(ip_proto); | 588 | nfp_fl_csum_l4_to_flag(ip_proto); |
580 | } else if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { | 589 | } |
590 | if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { | ||
581 | /* TC compiles set src and dst IPv6 address as a single action, | 591 | /* TC compiles set src and dst IPv6 address as a single action, |
582 | * the hardware requires this to be 2 separate actions. | 592 | * the hardware requires this to be 2 separate actions. |
583 | */ | 593 | */ |
594 | nfp_action += act_size; | ||
584 | act_size = sizeof(set_ip6_src); | 595 | act_size = sizeof(set_ip6_src); |
585 | memcpy(nfp_action, &set_ip6_src, act_size); | 596 | memcpy(nfp_action, &set_ip6_src, act_size); |
586 | *a_len += act_size; | 597 | *a_len += act_size; |
@@ -593,6 +604,7 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, | |||
593 | /* Hardware will automatically fix TCP/UDP checksum. */ | 604 | /* Hardware will automatically fix TCP/UDP checksum. */ |
594 | *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); | 605 | *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
595 | } else if (set_ip6_dst.head.len_lw) { | 606 | } else if (set_ip6_dst.head.len_lw) { |
607 | nfp_action += act_size; | ||
596 | act_size = sizeof(set_ip6_dst); | 608 | act_size = sizeof(set_ip6_dst); |
597 | memcpy(nfp_action, &set_ip6_dst, act_size); | 609 | memcpy(nfp_action, &set_ip6_dst, act_size); |
598 | *a_len += act_size; | 610 | *a_len += act_size; |
@@ -600,13 +612,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, | |||
600 | /* Hardware will automatically fix TCP/UDP checksum. */ | 612 | /* Hardware will automatically fix TCP/UDP checksum. */ |
601 | *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); | 613 | *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
602 | } else if (set_ip6_src.head.len_lw) { | 614 | } else if (set_ip6_src.head.len_lw) { |
615 | nfp_action += act_size; | ||
603 | act_size = sizeof(set_ip6_src); | 616 | act_size = sizeof(set_ip6_src); |
604 | memcpy(nfp_action, &set_ip6_src, act_size); | 617 | memcpy(nfp_action, &set_ip6_src, act_size); |
605 | *a_len += act_size; | 618 | *a_len += act_size; |
606 | 619 | ||
607 | /* Hardware will automatically fix TCP/UDP checksum. */ | 620 | /* Hardware will automatically fix TCP/UDP checksum. */ |
608 | *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); | 621 | *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); |
609 | } else if (set_tport.head.len_lw) { | 622 | } |
623 | if (set_tport.head.len_lw) { | ||
624 | nfp_action += act_size; | ||
610 | act_size = sizeof(set_tport); | 625 | act_size = sizeof(set_tport); |
611 | memcpy(nfp_action, &set_tport, act_size); | 626 | memcpy(nfp_action, &set_tport, act_size); |
612 | *a_len += act_size; | 627 | *a_len += act_size; |
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index af3a28ec04eb..0f0aba793352 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c | |||
@@ -228,7 +228,7 @@ static int qed_grc_attn_cb(struct qed_hwfn *p_hwfn) | |||
228 | attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), | 228 | attn_master_to_str(GET_FIELD(tmp, QED_GRC_ATTENTION_MASTER)), |
229 | GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), | 229 | GET_FIELD(tmp2, QED_GRC_ATTENTION_PF), |
230 | (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == | 230 | (GET_FIELD(tmp2, QED_GRC_ATTENTION_PRIV) == |
231 | QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Ireelevant)", | 231 | QED_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant)", |
232 | GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); | 232 | GET_FIELD(tmp2, QED_GRC_ATTENTION_VF)); |
233 | 233 | ||
234 | out: | 234 | out: |
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index b48f76182049..10b075bc5959 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c | |||
@@ -380,8 +380,6 @@ static void fm93c56a_select(struct ql3_adapter *qdev) | |||
380 | 380 | ||
381 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; | 381 | qdev->eeprom_cmd_data = AUBURN_EEPROM_CS_1; |
382 | ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); | 382 | ql_write_nvram_reg(qdev, spir, ISP_NVRAM_MASK | qdev->eeprom_cmd_data); |
383 | ql_write_nvram_reg(qdev, spir, | ||
384 | ((ISP_NVRAM_MASK << 16) | qdev->eeprom_cmd_data)); | ||
385 | } | 383 | } |
386 | 384 | ||
387 | /* | 385 | /* |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index 8c4f49adcf9e..f42d4903a199 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -6528,17 +6528,15 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) | |||
6528 | struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); | 6528 | struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi); |
6529 | struct net_device *dev = tp->dev; | 6529 | struct net_device *dev = tp->dev; |
6530 | u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow; | 6530 | u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow; |
6531 | int work_done= 0; | 6531 | int work_done; |
6532 | u16 status; | 6532 | u16 status; |
6533 | 6533 | ||
6534 | status = rtl_get_events(tp); | 6534 | status = rtl_get_events(tp); |
6535 | rtl_ack_events(tp, status & ~tp->event_slow); | 6535 | rtl_ack_events(tp, status & ~tp->event_slow); |
6536 | 6536 | ||
6537 | if (status & RTL_EVENT_NAPI_RX) | 6537 | work_done = rtl_rx(dev, tp, (u32) budget); |
6538 | work_done = rtl_rx(dev, tp, (u32) budget); | ||
6539 | 6538 | ||
6540 | if (status & RTL_EVENT_NAPI_TX) | 6539 | rtl_tx(dev, tp); |
6541 | rtl_tx(dev, tp); | ||
6542 | 6540 | ||
6543 | if (status & tp->event_slow) { | 6541 | if (status & tp->event_slow) { |
6544 | enable_mask &= ~tp->event_slow; | 6542 | enable_mask &= ~tp->event_slow; |
@@ -7071,20 +7069,12 @@ static int rtl_alloc_irq(struct rtl8169_private *tp) | |||
7071 | { | 7069 | { |
7072 | unsigned int flags; | 7070 | unsigned int flags; |
7073 | 7071 | ||
7074 | switch (tp->mac_version) { | 7072 | if (tp->mac_version <= RTL_GIGA_MAC_VER_06) { |
7075 | case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_06: | ||
7076 | RTL_W8(tp, Cfg9346, Cfg9346_Unlock); | 7073 | RTL_W8(tp, Cfg9346, Cfg9346_Unlock); |
7077 | RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); | 7074 | RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable); |
7078 | RTL_W8(tp, Cfg9346, Cfg9346_Lock); | 7075 | RTL_W8(tp, Cfg9346, Cfg9346_Lock); |
7079 | flags = PCI_IRQ_LEGACY; | 7076 | flags = PCI_IRQ_LEGACY; |
7080 | break; | 7077 | } else { |
7081 | case RTL_GIGA_MAC_VER_39 ... RTL_GIGA_MAC_VER_40: | ||
7082 | /* This version was reported to have issues with resume | ||
7083 | * from suspend when using MSI-X | ||
7084 | */ | ||
7085 | flags = PCI_IRQ_LEGACY | PCI_IRQ_MSI; | ||
7086 | break; | ||
7087 | default: | ||
7088 | flags = PCI_IRQ_ALL_TYPES; | 7078 | flags = PCI_IRQ_ALL_TYPES; |
7089 | } | 7079 | } |
7090 | 7080 | ||
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index 82eccc930c5c..a0cd1c41cf5f 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c | |||
@@ -831,12 +831,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, | |||
831 | if (IS_ERR(rt)) | 831 | if (IS_ERR(rt)) |
832 | return PTR_ERR(rt); | 832 | return PTR_ERR(rt); |
833 | 833 | ||
834 | if (skb_dst(skb)) { | 834 | skb_tunnel_check_pmtu(skb, &rt->dst, |
835 | int mtu = dst_mtu(&rt->dst) - GENEVE_IPV4_HLEN - | 835 | GENEVE_IPV4_HLEN + info->options_len); |
836 | info->options_len; | ||
837 | |||
838 | skb_dst_update_pmtu(skb, mtu); | ||
839 | } | ||
840 | 836 | ||
841 | sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); | 837 | sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
842 | if (geneve->collect_md) { | 838 | if (geneve->collect_md) { |
@@ -881,11 +877,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, | |||
881 | if (IS_ERR(dst)) | 877 | if (IS_ERR(dst)) |
882 | return PTR_ERR(dst); | 878 | return PTR_ERR(dst); |
883 | 879 | ||
884 | if (skb_dst(skb)) { | 880 | skb_tunnel_check_pmtu(skb, dst, GENEVE_IPV6_HLEN + info->options_len); |
885 | int mtu = dst_mtu(dst) - GENEVE_IPV6_HLEN - info->options_len; | ||
886 | |||
887 | skb_dst_update_pmtu(skb, mtu); | ||
888 | } | ||
889 | 881 | ||
890 | sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); | 882 | sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); |
891 | if (geneve->collect_md) { | 883 | if (geneve->collect_md) { |
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 3f5aa59c37b7..3e2c041d76ac 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c | |||
@@ -2267,8 +2267,9 @@ static void virtnet_freeze_down(struct virtio_device *vdev) | |||
2267 | /* Make sure no work handler is accessing the device */ | 2267 | /* Make sure no work handler is accessing the device */ |
2268 | flush_work(&vi->config_work); | 2268 | flush_work(&vi->config_work); |
2269 | 2269 | ||
2270 | netif_tx_lock_bh(vi->dev); | ||
2270 | netif_device_detach(vi->dev); | 2271 | netif_device_detach(vi->dev); |
2271 | netif_tx_disable(vi->dev); | 2272 | netif_tx_unlock_bh(vi->dev); |
2272 | cancel_delayed_work_sync(&vi->refill); | 2273 | cancel_delayed_work_sync(&vi->refill); |
2273 | 2274 | ||
2274 | if (netif_running(vi->dev)) { | 2275 | if (netif_running(vi->dev)) { |
@@ -2304,7 +2305,9 @@ static int virtnet_restore_up(struct virtio_device *vdev) | |||
2304 | } | 2305 | } |
2305 | } | 2306 | } |
2306 | 2307 | ||
2308 | netif_tx_lock_bh(vi->dev); | ||
2307 | netif_device_attach(vi->dev); | 2309 | netif_device_attach(vi->dev); |
2310 | netif_tx_unlock_bh(vi->dev); | ||
2308 | return err; | 2311 | return err; |
2309 | } | 2312 | } |
2310 | 2313 | ||
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 1d74f90d6f5d..297cdeaef479 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c | |||
@@ -2262,11 +2262,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2262 | } | 2262 | } |
2263 | 2263 | ||
2264 | ndst = &rt->dst; | 2264 | ndst = &rt->dst; |
2265 | if (skb_dst(skb)) { | 2265 | skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM); |
2266 | int mtu = dst_mtu(ndst) - VXLAN_HEADROOM; | ||
2267 | |||
2268 | skb_dst_update_pmtu(skb, mtu); | ||
2269 | } | ||
2270 | 2266 | ||
2271 | tos = ip_tunnel_ecn_encap(tos, old_iph, skb); | 2267 | tos = ip_tunnel_ecn_encap(tos, old_iph, skb); |
2272 | ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); | 2268 | ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); |
@@ -2303,11 +2299,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, | |||
2303 | goto out_unlock; | 2299 | goto out_unlock; |
2304 | } | 2300 | } |
2305 | 2301 | ||
2306 | if (skb_dst(skb)) { | 2302 | skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM); |
2307 | int mtu = dst_mtu(ndst) - VXLAN6_HEADROOM; | ||
2308 | |||
2309 | skb_dst_update_pmtu(skb, mtu); | ||
2310 | } | ||
2311 | 2303 | ||
2312 | tos = ip_tunnel_ecn_encap(tos, old_iph, skb); | 2304 | tos = ip_tunnel_ecn_encap(tos, old_iph, skb); |
2313 | ttl = ttl ? : ip6_dst_hoplimit(ndst); | 2305 | ttl = ttl ? : ip6_dst_hoplimit(ndst); |
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 7f01f6f60b87..d0b7dd8fb184 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
@@ -485,7 +485,13 @@ static int armpmu_filter_match(struct perf_event *event) | |||
485 | { | 485 | { |
486 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 486 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
487 | unsigned int cpu = smp_processor_id(); | 487 | unsigned int cpu = smp_processor_id(); |
488 | return cpumask_test_cpu(cpu, &armpmu->supported_cpus); | 488 | int ret; |
489 | |||
490 | ret = cpumask_test_cpu(cpu, &armpmu->supported_cpus); | ||
491 | if (ret && armpmu->filter_match) | ||
492 | return armpmu->filter_match(event); | ||
493 | |||
494 | return ret; | ||
489 | } | 495 | } |
490 | 496 | ||
491 | static ssize_t armpmu_cpumask_show(struct device *dev, | 497 | static ssize_t armpmu_cpumask_show(struct device *dev, |
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index 01b0e2bb3319..2012551d93e0 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c | |||
@@ -24,6 +24,8 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/timekeeping.h> | 25 | #include <linux/timekeeping.h> |
26 | 26 | ||
27 | #include <linux/nospec.h> | ||
28 | |||
27 | #include "ptp_private.h" | 29 | #include "ptp_private.h" |
28 | 30 | ||
29 | static int ptp_disable_pinfunc(struct ptp_clock_info *ops, | 31 | static int ptp_disable_pinfunc(struct ptp_clock_info *ops, |
@@ -248,6 +250,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) | |||
248 | err = -EINVAL; | 250 | err = -EINVAL; |
249 | break; | 251 | break; |
250 | } | 252 | } |
253 | pin_index = array_index_nospec(pin_index, ops->n_pins); | ||
251 | if (mutex_lock_interruptible(&ptp->pincfg_mux)) | 254 | if (mutex_lock_interruptible(&ptp->pincfg_mux)) |
252 | return -ERESTARTSYS; | 255 | return -ERESTARTSYS; |
253 | pd = ops->pin_config[pin_index]; | 256 | pd = ops->pin_config[pin_index]; |
@@ -266,6 +269,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) | |||
266 | err = -EINVAL; | 269 | err = -EINVAL; |
267 | break; | 270 | break; |
268 | } | 271 | } |
272 | pin_index = array_index_nospec(pin_index, ops->n_pins); | ||
269 | if (mutex_lock_interruptible(&ptp->pincfg_mux)) | 273 | if (mutex_lock_interruptible(&ptp->pincfg_mux)) |
270 | return -ERESTARTSYS; | 274 | return -ERESTARTSYS; |
271 | err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan); | 275 | err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan); |
diff --git a/fs/afs/rxrpc.c b/fs/afs/rxrpc.c index 35f2ae30f31f..77a83790a31f 100644 --- a/fs/afs/rxrpc.c +++ b/fs/afs/rxrpc.c | |||
@@ -690,8 +690,6 @@ static void afs_process_async_call(struct work_struct *work) | |||
690 | } | 690 | } |
691 | 691 | ||
692 | if (call->state == AFS_CALL_COMPLETE) { | 692 | if (call->state == AFS_CALL_COMPLETE) { |
693 | call->reply[0] = NULL; | ||
694 | |||
695 | /* We have two refs to release - one from the alloc and one | 693 | /* We have two refs to release - one from the alloc and one |
696 | * queued with the work item - and we can't just deallocate the | 694 | * queued with the work item - and we can't just deallocate the |
697 | * call because the work item may be queued again. | 695 | * call because the work item may be queued again. |
diff --git a/fs/afs/server.c b/fs/afs/server.c index 2f306c0cc4ee..1d329e6981d5 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c | |||
@@ -199,11 +199,9 @@ static struct afs_server *afs_install_server(struct afs_net *net, | |||
199 | 199 | ||
200 | write_sequnlock(&net->fs_addr_lock); | 200 | write_sequnlock(&net->fs_addr_lock); |
201 | ret = 0; | 201 | ret = 0; |
202 | goto out; | ||
203 | 202 | ||
204 | exists: | 203 | exists: |
205 | afs_get_server(server); | 204 | afs_get_server(server); |
206 | out: | ||
207 | write_sequnlock(&net->fs_lock); | 205 | write_sequnlock(&net->fs_lock); |
208 | return server; | 206 | return server; |
209 | } | 207 | } |
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index af2b17b21b94..95983c744164 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c | |||
@@ -343,7 +343,7 @@ try_again: | |||
343 | trap = lock_rename(cache->graveyard, dir); | 343 | trap = lock_rename(cache->graveyard, dir); |
344 | 344 | ||
345 | /* do some checks before getting the grave dentry */ | 345 | /* do some checks before getting the grave dentry */ |
346 | if (rep->d_parent != dir) { | 346 | if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) { |
347 | /* the entry was probably culled when we dropped the parent dir | 347 | /* the entry was probably culled when we dropped the parent dir |
348 | * lock */ | 348 | * lock */ |
349 | unlock_rename(cache->graveyard, dir); | 349 | unlock_rename(cache->graveyard, dir); |
@@ -666,6 +666,8 @@ struct page *dax_layout_busy_page(struct address_space *mapping) | |||
666 | while (index < end && pagevec_lookup_entries(&pvec, mapping, index, | 666 | while (index < end && pagevec_lookup_entries(&pvec, mapping, index, |
667 | min(end - index, (pgoff_t)PAGEVEC_SIZE), | 667 | min(end - index, (pgoff_t)PAGEVEC_SIZE), |
668 | indices)) { | 668 | indices)) { |
669 | pgoff_t nr_pages = 1; | ||
670 | |||
669 | for (i = 0; i < pagevec_count(&pvec); i++) { | 671 | for (i = 0; i < pagevec_count(&pvec); i++) { |
670 | struct page *pvec_ent = pvec.pages[i]; | 672 | struct page *pvec_ent = pvec.pages[i]; |
671 | void *entry; | 673 | void *entry; |
@@ -680,8 +682,15 @@ struct page *dax_layout_busy_page(struct address_space *mapping) | |||
680 | 682 | ||
681 | xa_lock_irq(&mapping->i_pages); | 683 | xa_lock_irq(&mapping->i_pages); |
682 | entry = get_unlocked_mapping_entry(mapping, index, NULL); | 684 | entry = get_unlocked_mapping_entry(mapping, index, NULL); |
683 | if (entry) | 685 | if (entry) { |
684 | page = dax_busy_page(entry); | 686 | page = dax_busy_page(entry); |
687 | /* | ||
688 | * Account for multi-order entries at | ||
689 | * the end of the pagevec. | ||
690 | */ | ||
691 | if (i + 1 >= pagevec_count(&pvec)) | ||
692 | nr_pages = 1UL << dax_radix_order(entry); | ||
693 | } | ||
685 | put_unlocked_mapping_entry(mapping, index, entry); | 694 | put_unlocked_mapping_entry(mapping, index, entry); |
686 | xa_unlock_irq(&mapping->i_pages); | 695 | xa_unlock_irq(&mapping->i_pages); |
687 | if (page) | 696 | if (page) |
@@ -696,7 +705,7 @@ struct page *dax_layout_busy_page(struct address_space *mapping) | |||
696 | */ | 705 | */ |
697 | pagevec_remove_exceptionals(&pvec); | 706 | pagevec_remove_exceptionals(&pvec); |
698 | pagevec_release(&pvec); | 707 | pagevec_release(&pvec); |
699 | index++; | 708 | index += nr_pages; |
700 | 709 | ||
701 | if (page) | 710 | if (page) |
702 | break; | 711 | break; |
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c index defc2168de91..f58c0cacc531 100644 --- a/fs/fat/fatent.c +++ b/fs/fat/fatent.c | |||
@@ -682,6 +682,7 @@ int fat_count_free_clusters(struct super_block *sb) | |||
682 | if (ops->ent_get(&fatent) == FAT_ENT_FREE) | 682 | if (ops->ent_get(&fatent) == FAT_ENT_FREE) |
683 | free++; | 683 | free++; |
684 | } while (fat_ent_next(sbi, &fatent)); | 684 | } while (fat_ent_next(sbi, &fatent)); |
685 | cond_resched(); | ||
685 | } | 686 | } |
686 | sbi->free_clusters = free; | 687 | sbi->free_clusters = free; |
687 | sbi->free_clus_valid = 1; | 688 | sbi->free_clus_valid = 1; |
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index 83bfe04456b6..c550512ce335 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c | |||
@@ -70,20 +70,7 @@ void fscache_free_cookie(struct fscache_cookie *cookie) | |||
70 | } | 70 | } |
71 | 71 | ||
72 | /* | 72 | /* |
73 | * initialise an cookie jar slab element prior to any use | 73 | * Set the index key in a cookie. The cookie struct has space for a 16-byte |
74 | */ | ||
75 | void fscache_cookie_init_once(void *_cookie) | ||
76 | { | ||
77 | struct fscache_cookie *cookie = _cookie; | ||
78 | |||
79 | memset(cookie, 0, sizeof(*cookie)); | ||
80 | spin_lock_init(&cookie->lock); | ||
81 | spin_lock_init(&cookie->stores_lock); | ||
82 | INIT_HLIST_HEAD(&cookie->backing_objects); | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Set the index key in a cookie. The cookie struct has space for a 12-byte | ||
87 | * key plus length and hash, but if that's not big enough, it's instead a | 74 | * key plus length and hash, but if that's not big enough, it's instead a |
88 | * pointer to a buffer containing 3 bytes of hash, 1 byte of length and then | 75 | * pointer to a buffer containing 3 bytes of hash, 1 byte of length and then |
89 | * the key data. | 76 | * the key data. |
@@ -93,20 +80,18 @@ static int fscache_set_key(struct fscache_cookie *cookie, | |||
93 | { | 80 | { |
94 | unsigned long long h; | 81 | unsigned long long h; |
95 | u32 *buf; | 82 | u32 *buf; |
83 | int bufs; | ||
96 | int i; | 84 | int i; |
97 | 85 | ||
98 | cookie->key_len = index_key_len; | 86 | bufs = DIV_ROUND_UP(index_key_len, sizeof(*buf)); |
99 | 87 | ||
100 | if (index_key_len > sizeof(cookie->inline_key)) { | 88 | if (index_key_len > sizeof(cookie->inline_key)) { |
101 | buf = kzalloc(index_key_len, GFP_KERNEL); | 89 | buf = kcalloc(bufs, sizeof(*buf), GFP_KERNEL); |
102 | if (!buf) | 90 | if (!buf) |
103 | return -ENOMEM; | 91 | return -ENOMEM; |
104 | cookie->key = buf; | 92 | cookie->key = buf; |
105 | } else { | 93 | } else { |
106 | buf = (u32 *)cookie->inline_key; | 94 | buf = (u32 *)cookie->inline_key; |
107 | buf[0] = 0; | ||
108 | buf[1] = 0; | ||
109 | buf[2] = 0; | ||
110 | } | 95 | } |
111 | 96 | ||
112 | memcpy(buf, index_key, index_key_len); | 97 | memcpy(buf, index_key, index_key_len); |
@@ -116,7 +101,8 @@ static int fscache_set_key(struct fscache_cookie *cookie, | |||
116 | */ | 101 | */ |
117 | h = (unsigned long)cookie->parent; | 102 | h = (unsigned long)cookie->parent; |
118 | h += index_key_len + cookie->type; | 103 | h += index_key_len + cookie->type; |
119 | for (i = 0; i < (index_key_len + sizeof(u32) - 1) / sizeof(u32); i++) | 104 | |
105 | for (i = 0; i < bufs; i++) | ||
120 | h += buf[i]; | 106 | h += buf[i]; |
121 | 107 | ||
122 | cookie->key_hash = h ^ (h >> 32); | 108 | cookie->key_hash = h ^ (h >> 32); |
@@ -161,7 +147,7 @@ struct fscache_cookie *fscache_alloc_cookie( | |||
161 | struct fscache_cookie *cookie; | 147 | struct fscache_cookie *cookie; |
162 | 148 | ||
163 | /* allocate and initialise a cookie */ | 149 | /* allocate and initialise a cookie */ |
164 | cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL); | 150 | cookie = kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL); |
165 | if (!cookie) | 151 | if (!cookie) |
166 | return NULL; | 152 | return NULL; |
167 | 153 | ||
@@ -192,6 +178,9 @@ struct fscache_cookie *fscache_alloc_cookie( | |||
192 | cookie->netfs_data = netfs_data; | 178 | cookie->netfs_data = netfs_data; |
193 | cookie->flags = (1 << FSCACHE_COOKIE_NO_DATA_YET); | 179 | cookie->flags = (1 << FSCACHE_COOKIE_NO_DATA_YET); |
194 | cookie->type = def->type; | 180 | cookie->type = def->type; |
181 | spin_lock_init(&cookie->lock); | ||
182 | spin_lock_init(&cookie->stores_lock); | ||
183 | INIT_HLIST_HEAD(&cookie->backing_objects); | ||
195 | 184 | ||
196 | /* radix tree insertion won't use the preallocation pool unless it's | 185 | /* radix tree insertion won't use the preallocation pool unless it's |
197 | * told it may not wait */ | 186 | * told it may not wait */ |
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index f83328a7f048..d6209022e965 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h | |||
@@ -51,7 +51,6 @@ extern struct fscache_cache *fscache_select_cache_for_object( | |||
51 | extern struct kmem_cache *fscache_cookie_jar; | 51 | extern struct kmem_cache *fscache_cookie_jar; |
52 | 52 | ||
53 | extern void fscache_free_cookie(struct fscache_cookie *); | 53 | extern void fscache_free_cookie(struct fscache_cookie *); |
54 | extern void fscache_cookie_init_once(void *); | ||
55 | extern struct fscache_cookie *fscache_alloc_cookie(struct fscache_cookie *, | 54 | extern struct fscache_cookie *fscache_alloc_cookie(struct fscache_cookie *, |
56 | const struct fscache_cookie_def *, | 55 | const struct fscache_cookie_def *, |
57 | const void *, size_t, | 56 | const void *, size_t, |
diff --git a/fs/fscache/main.c b/fs/fscache/main.c index 7dce110bf17d..30ad89db1efc 100644 --- a/fs/fscache/main.c +++ b/fs/fscache/main.c | |||
@@ -143,9 +143,7 @@ static int __init fscache_init(void) | |||
143 | 143 | ||
144 | fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar", | 144 | fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar", |
145 | sizeof(struct fscache_cookie), | 145 | sizeof(struct fscache_cookie), |
146 | 0, | 146 | 0, 0, NULL); |
147 | 0, | ||
148 | fscache_cookie_init_once); | ||
149 | if (!fscache_cookie_jar) { | 147 | if (!fscache_cookie_jar) { |
150 | pr_notice("Failed to allocate a cookie jar\n"); | 148 | pr_notice("Failed to allocate a cookie jar\n"); |
151 | ret = -ENOMEM; | 149 | ret = -ENOMEM; |
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c index 3c159a7f9a9e..84544a4f012d 100644 --- a/fs/gfs2/bmap.c +++ b/fs/gfs2/bmap.c | |||
@@ -975,10 +975,6 @@ static void gfs2_iomap_journaled_page_done(struct inode *inode, loff_t pos, | |||
975 | { | 975 | { |
976 | struct gfs2_inode *ip = GFS2_I(inode); | 976 | struct gfs2_inode *ip = GFS2_I(inode); |
977 | 977 | ||
978 | if (!page_has_buffers(page)) { | ||
979 | create_empty_buffers(page, inode->i_sb->s_blocksize, | ||
980 | (1 << BH_Dirty)|(1 << BH_Uptodate)); | ||
981 | } | ||
982 | gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied); | 978 | gfs2_page_add_databufs(ip, page, offset_in_page(pos), copied); |
983 | } | 979 | } |
984 | 980 | ||
@@ -1061,7 +1057,7 @@ static int gfs2_iomap_begin_write(struct inode *inode, loff_t pos, | |||
1061 | } | 1057 | } |
1062 | } | 1058 | } |
1063 | release_metapath(&mp); | 1059 | release_metapath(&mp); |
1064 | if (gfs2_is_jdata(ip)) | 1060 | if (!gfs2_is_stuffed(ip) && gfs2_is_jdata(ip)) |
1065 | iomap->page_done = gfs2_iomap_journaled_page_done; | 1061 | iomap->page_done = gfs2_iomap_journaled_page_done; |
1066 | return 0; | 1062 | return 0; |
1067 | 1063 | ||
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c index 8e712b614e6e..933aac5da193 100644 --- a/fs/ocfs2/dlmglue.c +++ b/fs/ocfs2/dlmglue.c | |||
@@ -96,7 +96,9 @@ struct ocfs2_unblock_ctl { | |||
96 | }; | 96 | }; |
97 | 97 | ||
98 | /* Lockdep class keys */ | 98 | /* Lockdep class keys */ |
99 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
99 | static struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES]; | 100 | static struct lock_class_key lockdep_keys[OCFS2_NUM_LOCK_TYPES]; |
101 | #endif | ||
100 | 102 | ||
101 | static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, | 103 | static int ocfs2_check_meta_downconvert(struct ocfs2_lock_res *lockres, |
102 | int new_level); | 104 | int new_level); |
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index bf000c8aeffb..fec62e9dfbe6 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c | |||
@@ -2337,8 +2337,8 @@ late_initcall(ubifs_init); | |||
2337 | 2337 | ||
2338 | static void __exit ubifs_exit(void) | 2338 | static void __exit ubifs_exit(void) |
2339 | { | 2339 | { |
2340 | WARN_ON(list_empty(&ubifs_infos)); | 2340 | WARN_ON(!list_empty(&ubifs_infos)); |
2341 | WARN_ON(atomic_long_read(&ubifs_clean_zn_cnt) == 0); | 2341 | WARN_ON(atomic_long_read(&ubifs_clean_zn_cnt) != 0); |
2342 | 2342 | ||
2343 | dbg_debugfs_exit(); | 2343 | dbg_debugfs_exit(); |
2344 | ubifs_compressors_exit(); | 2344 | ubifs_compressors_exit(); |
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index b25d12ef120a..e3c404833115 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h | |||
@@ -214,9 +214,9 @@ struct detailed_timing { | |||
214 | #define DRM_EDID_HDMI_DC_Y444 (1 << 3) | 214 | #define DRM_EDID_HDMI_DC_Y444 (1 << 3) |
215 | 215 | ||
216 | /* YCBCR 420 deep color modes */ | 216 | /* YCBCR 420 deep color modes */ |
217 | #define DRM_EDID_YCBCR420_DC_48 (1 << 6) | 217 | #define DRM_EDID_YCBCR420_DC_48 (1 << 2) |
218 | #define DRM_EDID_YCBCR420_DC_36 (1 << 5) | 218 | #define DRM_EDID_YCBCR420_DC_36 (1 << 1) |
219 | #define DRM_EDID_YCBCR420_DC_30 (1 << 4) | 219 | #define DRM_EDID_YCBCR420_DC_30 (1 << 0) |
220 | #define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \ | 220 | #define DRM_EDID_YCBCR420_DC_MASK (DRM_EDID_YCBCR420_DC_48 | \ |
221 | DRM_EDID_YCBCR420_DC_36 | \ | 221 | DRM_EDID_YCBCR420_DC_36 | \ |
222 | DRM_EDID_YCBCR420_DC_30) | 222 | DRM_EDID_YCBCR420_DC_30) |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 99c19b06d9a4..fdcb45999b26 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -43,7 +43,7 @@ extern int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | |||
43 | unsigned char *vec); | 43 | unsigned char *vec); |
44 | extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | 44 | extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
45 | unsigned long new_addr, unsigned long old_end, | 45 | unsigned long new_addr, unsigned long old_end, |
46 | pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush); | 46 | pmd_t *old_pmd, pmd_t *new_pmd); |
47 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, | 47 | extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, |
48 | unsigned long addr, pgprot_t newprot, | 48 | unsigned long addr, pgprot_t newprot, |
49 | int prot_numa); | 49 | int prot_numa); |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 696ed3f7f894..31460eeb6fe0 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -1027,6 +1027,14 @@ static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc, | |||
1027 | return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); | 1027 | return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride); |
1028 | } | 1028 | } |
1029 | 1029 | ||
1030 | static inline u32 | ||
1031 | mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix) | ||
1032 | { | ||
1033 | u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1; | ||
1034 | |||
1035 | return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); | ||
1036 | } | ||
1037 | |||
1030 | int mlx5_cmd_init(struct mlx5_core_dev *dev); | 1038 | int mlx5_cmd_init(struct mlx5_core_dev *dev); |
1031 | void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); | 1039 | void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); |
1032 | void mlx5_cmd_use_events(struct mlx5_core_dev *dev); | 1040 | void mlx5_cmd_use_events(struct mlx5_core_dev *dev); |
diff --git a/include/linux/module.h b/include/linux/module.h index f807f15bebbe..e19ae08c7fb8 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/export.h> | 20 | #include <linux/export.h> |
21 | #include <linux/rbtree_latch.h> | 21 | #include <linux/rbtree_latch.h> |
22 | #include <linux/error-injection.h> | 22 | #include <linux/error-injection.h> |
23 | #include <linux/tracepoint-defs.h> | ||
23 | 24 | ||
24 | #include <linux/percpu.h> | 25 | #include <linux/percpu.h> |
25 | #include <asm/module.h> | 26 | #include <asm/module.h> |
@@ -430,7 +431,7 @@ struct module { | |||
430 | 431 | ||
431 | #ifdef CONFIG_TRACEPOINTS | 432 | #ifdef CONFIG_TRACEPOINTS |
432 | unsigned int num_tracepoints; | 433 | unsigned int num_tracepoints; |
433 | struct tracepoint * const *tracepoints_ptrs; | 434 | tracepoint_ptr_t *tracepoints_ptrs; |
434 | #endif | 435 | #endif |
435 | #ifdef HAVE_JUMP_LABEL | 436 | #ifdef HAVE_JUMP_LABEL |
436 | struct jump_entry *jump_entries; | 437 | struct jump_entry *jump_entries; |
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index 10f92e1d8e7b..bf309ff6f244 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h | |||
@@ -99,6 +99,7 @@ struct arm_pmu { | |||
99 | void (*stop)(struct arm_pmu *); | 99 | void (*stop)(struct arm_pmu *); |
100 | void (*reset)(void *); | 100 | void (*reset)(void *); |
101 | int (*map_event)(struct perf_event *event); | 101 | int (*map_event)(struct perf_event *event); |
102 | int (*filter_match)(struct perf_event *event); | ||
102 | int num_events; | 103 | int num_events; |
103 | bool secure_access; /* 32-bit ARM only */ | 104 | bool secure_access; /* 32-bit ARM only */ |
104 | #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 | 105 | #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 |
diff --git a/include/linux/tracepoint-defs.h b/include/linux/tracepoint-defs.h index 22c5a46e9693..49ba9cde7e4b 100644 --- a/include/linux/tracepoint-defs.h +++ b/include/linux/tracepoint-defs.h | |||
@@ -35,6 +35,12 @@ struct tracepoint { | |||
35 | struct tracepoint_func __rcu *funcs; | 35 | struct tracepoint_func __rcu *funcs; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS | ||
39 | typedef const int tracepoint_ptr_t; | ||
40 | #else | ||
41 | typedef struct tracepoint * const tracepoint_ptr_t; | ||
42 | #endif | ||
43 | |||
38 | struct bpf_raw_event_map { | 44 | struct bpf_raw_event_map { |
39 | struct tracepoint *tp; | 45 | struct tracepoint *tp; |
40 | void *bpf_func; | 46 | void *bpf_func; |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 041f7e56a289..538ba1a58f5b 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -99,6 +99,29 @@ extern void syscall_unregfunc(void); | |||
99 | #define TRACE_DEFINE_ENUM(x) | 99 | #define TRACE_DEFINE_ENUM(x) |
100 | #define TRACE_DEFINE_SIZEOF(x) | 100 | #define TRACE_DEFINE_SIZEOF(x) |
101 | 101 | ||
102 | #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS | ||
103 | static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) | ||
104 | { | ||
105 | return offset_to_ptr(p); | ||
106 | } | ||
107 | |||
108 | #define __TRACEPOINT_ENTRY(name) \ | ||
109 | asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \ | ||
110 | " .balign 4 \n" \ | ||
111 | " .long __tracepoint_" #name " - . \n" \ | ||
112 | " .previous \n") | ||
113 | #else | ||
114 | static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) | ||
115 | { | ||
116 | return *p; | ||
117 | } | ||
118 | |||
119 | #define __TRACEPOINT_ENTRY(name) \ | ||
120 | static tracepoint_ptr_t __tracepoint_ptr_##name __used \ | ||
121 | __attribute__((section("__tracepoints_ptrs"))) = \ | ||
122 | &__tracepoint_##name | ||
123 | #endif | ||
124 | |||
102 | #endif /* _LINUX_TRACEPOINT_H */ | 125 | #endif /* _LINUX_TRACEPOINT_H */ |
103 | 126 | ||
104 | /* | 127 | /* |
@@ -253,19 +276,6 @@ extern void syscall_unregfunc(void); | |||
253 | return static_key_false(&__tracepoint_##name.key); \ | 276 | return static_key_false(&__tracepoint_##name.key); \ |
254 | } | 277 | } |
255 | 278 | ||
256 | #ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS | ||
257 | #define __TRACEPOINT_ENTRY(name) \ | ||
258 | asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \ | ||
259 | " .balign 4 \n" \ | ||
260 | " .long __tracepoint_" #name " - . \n" \ | ||
261 | " .previous \n") | ||
262 | #else | ||
263 | #define __TRACEPOINT_ENTRY(name) \ | ||
264 | static struct tracepoint * const __tracepoint_ptr_##name __used \ | ||
265 | __attribute__((section("__tracepoints_ptrs"))) = \ | ||
266 | &__tracepoint_##name | ||
267 | #endif | ||
268 | |||
269 | /* | 279 | /* |
270 | * We have no guarantee that gcc and the linker won't up-align the tracepoint | 280 | * We have no guarantee that gcc and the linker won't up-align the tracepoint |
271 | * structures, so we create an array of pointers that will be used for iteration | 281 | * structures, so we create an array of pointers that will be used for iteration |
diff --git a/include/net/dst.h b/include/net/dst.h index 7f735e76ca73..6cf0870414c7 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
@@ -527,4 +527,14 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu) | |||
527 | dst->ops->update_pmtu(dst, NULL, skb, mtu); | 527 | dst->ops->update_pmtu(dst, NULL, skb, mtu); |
528 | } | 528 | } |
529 | 529 | ||
530 | static inline void skb_tunnel_check_pmtu(struct sk_buff *skb, | ||
531 | struct dst_entry *encap_dst, | ||
532 | int headroom) | ||
533 | { | ||
534 | u32 encap_mtu = dst_mtu(encap_dst); | ||
535 | |||
536 | if (skb->len > encap_mtu - headroom) | ||
537 | skb_dst_update_pmtu(skb, encap_mtu - headroom); | ||
538 | } | ||
539 | |||
530 | #endif /* _NET_DST_H */ | 540 | #endif /* _NET_DST_H */ |
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h index caabfd84a098..84097010237c 100644 --- a/include/net/ip6_fib.h +++ b/include/net/ip6_fib.h | |||
@@ -159,6 +159,10 @@ struct fib6_info { | |||
159 | struct rt6_info * __percpu *rt6i_pcpu; | 159 | struct rt6_info * __percpu *rt6i_pcpu; |
160 | struct rt6_exception_bucket __rcu *rt6i_exception_bucket; | 160 | struct rt6_exception_bucket __rcu *rt6i_exception_bucket; |
161 | 161 | ||
162 | #ifdef CONFIG_IPV6_ROUTER_PREF | ||
163 | unsigned long last_probe; | ||
164 | #endif | ||
165 | |||
162 | u32 fib6_metric; | 166 | u32 fib6_metric; |
163 | u8 fib6_protocol; | 167 | u8 fib6_protocol; |
164 | u8 fib6_type; | 168 | u8 fib6_type; |
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 5ef1bad81ef5..9e3d32746430 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h | |||
@@ -347,7 +347,7 @@ static inline __u16 sctp_data_size(struct sctp_chunk *chunk) | |||
347 | __u16 size; | 347 | __u16 size; |
348 | 348 | ||
349 | size = ntohs(chunk->chunk_hdr->length); | 349 | size = ntohs(chunk->chunk_hdr->length); |
350 | size -= sctp_datahdr_len(&chunk->asoc->stream); | 350 | size -= sctp_datachk_len(&chunk->asoc->stream); |
351 | 351 | ||
352 | return size; | 352 | return size; |
353 | } | 353 | } |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 28a7c8e44636..a11f93790476 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -876,6 +876,8 @@ struct sctp_transport { | |||
876 | unsigned long sackdelay; | 876 | unsigned long sackdelay; |
877 | __u32 sackfreq; | 877 | __u32 sackfreq; |
878 | 878 | ||
879 | atomic_t mtu_info; | ||
880 | |||
879 | /* When was the last time that we heard from this transport? We use | 881 | /* When was the last time that we heard from this transport? We use |
880 | * this to pick new active and retran paths. | 882 | * this to pick new active and retran paths. |
881 | */ | 883 | */ |
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h index b479db5c71d9..34dd3d497f2c 100644 --- a/include/uapi/linux/sctp.h +++ b/include/uapi/linux/sctp.h | |||
@@ -301,6 +301,7 @@ enum sctp_sinfo_flags { | |||
301 | SCTP_SACK_IMMEDIATELY = (1 << 3), /* SACK should be sent without delay. */ | 301 | SCTP_SACK_IMMEDIATELY = (1 << 3), /* SACK should be sent without delay. */ |
302 | /* 2 bits here have been used by SCTP_PR_SCTP_MASK */ | 302 | /* 2 bits here have been used by SCTP_PR_SCTP_MASK */ |
303 | SCTP_SENDALL = (1 << 6), | 303 | SCTP_SENDALL = (1 << 6), |
304 | SCTP_PR_SCTP_ALL = (1 << 7), | ||
304 | SCTP_NOTIFICATION = MSG_NOTIFICATION, /* Next message is not user msg but notification. */ | 305 | SCTP_NOTIFICATION = MSG_NOTIFICATION, /* Next message is not user msg but notification. */ |
305 | SCTP_EOF = MSG_FIN, /* Initiate graceful shutdown process. */ | 306 | SCTP_EOF = MSG_FIN, /* Initiate graceful shutdown process. */ |
306 | }; | 307 | }; |
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c index ef0b7b6ef8a5..686d244e798d 100644 --- a/kernel/bpf/xskmap.c +++ b/kernel/bpf/xskmap.c | |||
@@ -192,11 +192,8 @@ static int xsk_map_update_elem(struct bpf_map *map, void *key, void *value, | |||
192 | sock_hold(sock->sk); | 192 | sock_hold(sock->sk); |
193 | 193 | ||
194 | old_xs = xchg(&m->xsk_map[i], xs); | 194 | old_xs = xchg(&m->xsk_map[i], xs); |
195 | if (old_xs) { | 195 | if (old_xs) |
196 | /* Make sure we've flushed everything. */ | ||
197 | synchronize_net(); | ||
198 | sock_put((struct sock *)old_xs); | 196 | sock_put((struct sock *)old_xs); |
199 | } | ||
200 | 197 | ||
201 | sockfd_put(sock); | 198 | sockfd_put(sock); |
202 | return 0; | 199 | return 0; |
@@ -212,11 +209,8 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key) | |||
212 | return -EINVAL; | 209 | return -EINVAL; |
213 | 210 | ||
214 | old_xs = xchg(&m->xsk_map[k], NULL); | 211 | old_xs = xchg(&m->xsk_map[k], NULL); |
215 | if (old_xs) { | 212 | if (old_xs) |
216 | /* Make sure we've flushed everything. */ | ||
217 | synchronize_net(); | ||
218 | sock_put((struct sock *)old_xs); | 213 | sock_put((struct sock *)old_xs); |
219 | } | ||
220 | 214 | ||
221 | return 0; | 215 | return 0; |
222 | } | 216 | } |
diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c index f704390db9fc..d8765c952fab 100644 --- a/kernel/trace/preemptirq_delay_test.c +++ b/kernel/trace/preemptirq_delay_test.c | |||
@@ -5,12 +5,12 @@ | |||
5 | * Copyright (C) 2018 Joel Fernandes (Google) <joel@joelfernandes.org> | 5 | * Copyright (C) 2018 Joel Fernandes (Google) <joel@joelfernandes.org> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <linux/trace_clock.h> | ||
8 | #include <linux/delay.h> | 9 | #include <linux/delay.h> |
9 | #include <linux/interrupt.h> | 10 | #include <linux/interrupt.h> |
10 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
11 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
12 | #include <linux/kthread.h> | 13 | #include <linux/kthread.h> |
13 | #include <linux/ktime.h> | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
15 | #include <linux/printk.h> | 15 | #include <linux/printk.h> |
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
@@ -25,13 +25,13 @@ MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt or irq (default ir | |||
25 | 25 | ||
26 | static void busy_wait(ulong time) | 26 | static void busy_wait(ulong time) |
27 | { | 27 | { |
28 | ktime_t start, end; | 28 | u64 start, end; |
29 | start = ktime_get(); | 29 | start = trace_clock_local(); |
30 | do { | 30 | do { |
31 | end = ktime_get(); | 31 | end = trace_clock_local(); |
32 | if (kthread_should_stop()) | 32 | if (kthread_should_stop()) |
33 | break; | 33 | break; |
34 | } while (ktime_to_ns(ktime_sub(end, start)) < (time * 1000)); | 34 | } while ((end - start) < (time * 1000)); |
35 | } | 35 | } |
36 | 36 | ||
37 | static int preemptirq_delay_run(void *data) | 37 | static int preemptirq_delay_run(void *data) |
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index bf2c06ef9afc..a3be42304485 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c | |||
@@ -28,8 +28,8 @@ | |||
28 | #include <linux/sched/task.h> | 28 | #include <linux/sched/task.h> |
29 | #include <linux/static_key.h> | 29 | #include <linux/static_key.h> |
30 | 30 | ||
31 | extern struct tracepoint * const __start___tracepoints_ptrs[]; | 31 | extern tracepoint_ptr_t __start___tracepoints_ptrs[]; |
32 | extern struct tracepoint * const __stop___tracepoints_ptrs[]; | 32 | extern tracepoint_ptr_t __stop___tracepoints_ptrs[]; |
33 | 33 | ||
34 | DEFINE_SRCU(tracepoint_srcu); | 34 | DEFINE_SRCU(tracepoint_srcu); |
35 | EXPORT_SYMBOL_GPL(tracepoint_srcu); | 35 | EXPORT_SYMBOL_GPL(tracepoint_srcu); |
@@ -371,25 +371,17 @@ int tracepoint_probe_unregister(struct tracepoint *tp, void *probe, void *data) | |||
371 | } | 371 | } |
372 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); | 372 | EXPORT_SYMBOL_GPL(tracepoint_probe_unregister); |
373 | 373 | ||
374 | static void for_each_tracepoint_range(struct tracepoint * const *begin, | 374 | static void for_each_tracepoint_range( |
375 | struct tracepoint * const *end, | 375 | tracepoint_ptr_t *begin, tracepoint_ptr_t *end, |
376 | void (*fct)(struct tracepoint *tp, void *priv), | 376 | void (*fct)(struct tracepoint *tp, void *priv), |
377 | void *priv) | 377 | void *priv) |
378 | { | 378 | { |
379 | tracepoint_ptr_t *iter; | ||
380 | |||
379 | if (!begin) | 381 | if (!begin) |
380 | return; | 382 | return; |
381 | 383 | for (iter = begin; iter < end; iter++) | |
382 | if (IS_ENABLED(CONFIG_HAVE_ARCH_PREL32_RELOCATIONS)) { | 384 | fct(tracepoint_ptr_deref(iter), priv); |
383 | const int *iter; | ||
384 | |||
385 | for (iter = (const int *)begin; iter < (const int *)end; iter++) | ||
386 | fct(offset_to_ptr(iter), priv); | ||
387 | } else { | ||
388 | struct tracepoint * const *iter; | ||
389 | |||
390 | for (iter = begin; iter < end; iter++) | ||
391 | fct(*iter, priv); | ||
392 | } | ||
393 | } | 385 | } |
394 | 386 | ||
395 | #ifdef CONFIG_MODULES | 387 | #ifdef CONFIG_MODULES |
diff --git a/lib/test_ida.c b/lib/test_ida.c index 2d1637d8136b..b06880625961 100644 --- a/lib/test_ida.c +++ b/lib/test_ida.c | |||
@@ -150,10 +150,10 @@ static void ida_check_conv(struct ida *ida) | |||
150 | IDA_BUG_ON(ida, !ida_is_empty(ida)); | 150 | IDA_BUG_ON(ida, !ida_is_empty(ida)); |
151 | } | 151 | } |
152 | 152 | ||
153 | static DEFINE_IDA(ida); | ||
154 | |||
153 | static int ida_checks(void) | 155 | static int ida_checks(void) |
154 | { | 156 | { |
155 | DEFINE_IDA(ida); | ||
156 | |||
157 | IDA_BUG_ON(&ida, !ida_is_empty(&ida)); | 157 | IDA_BUG_ON(&ida, !ida_is_empty(&ida)); |
158 | ida_check_alloc(&ida); | 158 | ida_check_alloc(&ida); |
159 | ida_check_destroy(&ida); | 159 | ida_check_destroy(&ida); |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 00704060b7f7..deed97fba979 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1780,7 +1780,7 @@ static pmd_t move_soft_dirty_pmd(pmd_t pmd) | |||
1780 | 1780 | ||
1781 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | 1781 | bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, |
1782 | unsigned long new_addr, unsigned long old_end, | 1782 | unsigned long new_addr, unsigned long old_end, |
1783 | pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush) | 1783 | pmd_t *old_pmd, pmd_t *new_pmd) |
1784 | { | 1784 | { |
1785 | spinlock_t *old_ptl, *new_ptl; | 1785 | spinlock_t *old_ptl, *new_ptl; |
1786 | pmd_t pmd; | 1786 | pmd_t pmd; |
@@ -1811,7 +1811,7 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | |||
1811 | if (new_ptl != old_ptl) | 1811 | if (new_ptl != old_ptl) |
1812 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); | 1812 | spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); |
1813 | pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); | 1813 | pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd); |
1814 | if (pmd_present(pmd) && pmd_dirty(pmd)) | 1814 | if (pmd_present(pmd)) |
1815 | force_flush = true; | 1815 | force_flush = true; |
1816 | VM_BUG_ON(!pmd_none(*new_pmd)); | 1816 | VM_BUG_ON(!pmd_none(*new_pmd)); |
1817 | 1817 | ||
@@ -1822,12 +1822,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, | |||
1822 | } | 1822 | } |
1823 | pmd = move_soft_dirty_pmd(pmd); | 1823 | pmd = move_soft_dirty_pmd(pmd); |
1824 | set_pmd_at(mm, new_addr, new_pmd, pmd); | 1824 | set_pmd_at(mm, new_addr, new_pmd, pmd); |
1825 | if (new_ptl != old_ptl) | ||
1826 | spin_unlock(new_ptl); | ||
1827 | if (force_flush) | 1825 | if (force_flush) |
1828 | flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); | 1826 | flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE); |
1829 | else | 1827 | if (new_ptl != old_ptl) |
1830 | *need_flush = true; | 1828 | spin_unlock(new_ptl); |
1831 | spin_unlock(old_ptl); | 1829 | spin_unlock(old_ptl); |
1832 | return true; | 1830 | return true; |
1833 | } | 1831 | } |
@@ -2885,9 +2883,6 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, | |||
2885 | if (!(pvmw->pmd && !pvmw->pte)) | 2883 | if (!(pvmw->pmd && !pvmw->pte)) |
2886 | return; | 2884 | return; |
2887 | 2885 | ||
2888 | mmu_notifier_invalidate_range_start(mm, address, | ||
2889 | address + HPAGE_PMD_SIZE); | ||
2890 | |||
2891 | flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); | 2886 | flush_cache_range(vma, address, address + HPAGE_PMD_SIZE); |
2892 | pmdval = *pvmw->pmd; | 2887 | pmdval = *pvmw->pmd; |
2893 | pmdp_invalidate(vma, address, pvmw->pmd); | 2888 | pmdp_invalidate(vma, address, pvmw->pmd); |
@@ -2900,9 +2895,6 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, | |||
2900 | set_pmd_at(mm, address, pvmw->pmd, pmdswp); | 2895 | set_pmd_at(mm, address, pvmw->pmd, pmdswp); |
2901 | page_remove_rmap(page, true); | 2896 | page_remove_rmap(page, true); |
2902 | put_page(page); | 2897 | put_page(page); |
2903 | |||
2904 | mmu_notifier_invalidate_range_end(mm, address, | ||
2905 | address + HPAGE_PMD_SIZE); | ||
2906 | } | 2898 | } |
2907 | 2899 | ||
2908 | void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) | 2900 | void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) |
@@ -1410,7 +1410,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, | |||
1410 | if (flags & MAP_FIXED_NOREPLACE) { | 1410 | if (flags & MAP_FIXED_NOREPLACE) { |
1411 | struct vm_area_struct *vma = find_vma(mm, addr); | 1411 | struct vm_area_struct *vma = find_vma(mm, addr); |
1412 | 1412 | ||
1413 | if (vma && vma->vm_start <= addr) | 1413 | if (vma && vma->vm_start < addr + len) |
1414 | return -EEXIST; | 1414 | return -EEXIST; |
1415 | } | 1415 | } |
1416 | 1416 | ||
diff --git a/mm/mremap.c b/mm/mremap.c index 5c2e18505f75..a9617e72e6b7 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -115,7 +115,7 @@ static pte_t move_soft_dirty_pte(pte_t pte) | |||
115 | static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | 115 | static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, |
116 | unsigned long old_addr, unsigned long old_end, | 116 | unsigned long old_addr, unsigned long old_end, |
117 | struct vm_area_struct *new_vma, pmd_t *new_pmd, | 117 | struct vm_area_struct *new_vma, pmd_t *new_pmd, |
118 | unsigned long new_addr, bool need_rmap_locks, bool *need_flush) | 118 | unsigned long new_addr, bool need_rmap_locks) |
119 | { | 119 | { |
120 | struct mm_struct *mm = vma->vm_mm; | 120 | struct mm_struct *mm = vma->vm_mm; |
121 | pte_t *old_pte, *new_pte, pte; | 121 | pte_t *old_pte, *new_pte, pte; |
@@ -163,15 +163,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
163 | 163 | ||
164 | pte = ptep_get_and_clear(mm, old_addr, old_pte); | 164 | pte = ptep_get_and_clear(mm, old_addr, old_pte); |
165 | /* | 165 | /* |
166 | * If we are remapping a dirty PTE, make sure | 166 | * If we are remapping a valid PTE, make sure |
167 | * to flush TLB before we drop the PTL for the | 167 | * to flush TLB before we drop the PTL for the |
168 | * old PTE or we may race with page_mkclean(). | 168 | * PTE. |
169 | * | 169 | * |
170 | * This check has to be done after we removed the | 170 | * NOTE! Both old and new PTL matter: the old one |
171 | * old PTE from page tables or another thread may | 171 | * for racing with page_mkclean(), the new one to |
172 | * dirty it after the check and before the removal. | 172 | * make sure the physical page stays valid until |
173 | * the TLB entry for the old mapping has been | ||
174 | * flushed. | ||
173 | */ | 175 | */ |
174 | if (pte_present(pte) && pte_dirty(pte)) | 176 | if (pte_present(pte)) |
175 | force_flush = true; | 177 | force_flush = true; |
176 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); | 178 | pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); |
177 | pte = move_soft_dirty_pte(pte); | 179 | pte = move_soft_dirty_pte(pte); |
@@ -179,13 +181,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
179 | } | 181 | } |
180 | 182 | ||
181 | arch_leave_lazy_mmu_mode(); | 183 | arch_leave_lazy_mmu_mode(); |
184 | if (force_flush) | ||
185 | flush_tlb_range(vma, old_end - len, old_end); | ||
182 | if (new_ptl != old_ptl) | 186 | if (new_ptl != old_ptl) |
183 | spin_unlock(new_ptl); | 187 | spin_unlock(new_ptl); |
184 | pte_unmap(new_pte - 1); | 188 | pte_unmap(new_pte - 1); |
185 | if (force_flush) | ||
186 | flush_tlb_range(vma, old_end - len, old_end); | ||
187 | else | ||
188 | *need_flush = true; | ||
189 | pte_unmap_unlock(old_pte - 1, old_ptl); | 189 | pte_unmap_unlock(old_pte - 1, old_ptl); |
190 | if (need_rmap_locks) | 190 | if (need_rmap_locks) |
191 | drop_rmap_locks(vma); | 191 | drop_rmap_locks(vma); |
@@ -198,7 +198,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
198 | { | 198 | { |
199 | unsigned long extent, next, old_end; | 199 | unsigned long extent, next, old_end; |
200 | pmd_t *old_pmd, *new_pmd; | 200 | pmd_t *old_pmd, *new_pmd; |
201 | bool need_flush = false; | ||
202 | unsigned long mmun_start; /* For mmu_notifiers */ | 201 | unsigned long mmun_start; /* For mmu_notifiers */ |
203 | unsigned long mmun_end; /* For mmu_notifiers */ | 202 | unsigned long mmun_end; /* For mmu_notifiers */ |
204 | 203 | ||
@@ -229,8 +228,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
229 | if (need_rmap_locks) | 228 | if (need_rmap_locks) |
230 | take_rmap_locks(vma); | 229 | take_rmap_locks(vma); |
231 | moved = move_huge_pmd(vma, old_addr, new_addr, | 230 | moved = move_huge_pmd(vma, old_addr, new_addr, |
232 | old_end, old_pmd, new_pmd, | 231 | old_end, old_pmd, new_pmd); |
233 | &need_flush); | ||
234 | if (need_rmap_locks) | 232 | if (need_rmap_locks) |
235 | drop_rmap_locks(vma); | 233 | drop_rmap_locks(vma); |
236 | if (moved) | 234 | if (moved) |
@@ -246,10 +244,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
246 | if (extent > next - new_addr) | 244 | if (extent > next - new_addr) |
247 | extent = next - new_addr; | 245 | extent = next - new_addr; |
248 | move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, | 246 | move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, |
249 | new_pmd, new_addr, need_rmap_locks, &need_flush); | 247 | new_pmd, new_addr, need_rmap_locks); |
250 | } | 248 | } |
251 | if (need_flush) | ||
252 | flush_tlb_range(vma, old_end-len, old_addr); | ||
253 | 249 | ||
254 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); | 250 | mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end); |
255 | 251 | ||
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c index b64e1649993b..94e88f510c5b 100644 --- a/net/bpfilter/bpfilter_kern.c +++ b/net/bpfilter/bpfilter_kern.c | |||
@@ -23,9 +23,11 @@ static void shutdown_umh(struct umh_info *info) | |||
23 | 23 | ||
24 | if (!info->pid) | 24 | if (!info->pid) |
25 | return; | 25 | return; |
26 | tsk = pid_task(find_vpid(info->pid), PIDTYPE_PID); | 26 | tsk = get_pid_task(find_vpid(info->pid), PIDTYPE_PID); |
27 | if (tsk) | 27 | if (tsk) { |
28 | force_sig(SIGKILL, tsk); | 28 | force_sig(SIGKILL, tsk); |
29 | put_task_struct(tsk); | ||
30 | } | ||
29 | fput(info->pipe_to_umh); | 31 | fput(info->pipe_to_umh); |
30 | fput(info->pipe_from_umh); | 32 | fput(info->pipe_from_umh); |
31 | info->pid = 0; | 33 | info->pid = 0; |
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 4cc603dfc9ef..d05402868575 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -928,6 +928,9 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, | |||
928 | return -EINVAL; | 928 | return -EINVAL; |
929 | } | 929 | } |
930 | 930 | ||
931 | if (info.cmd != cmd) | ||
932 | return -EINVAL; | ||
933 | |||
931 | if (info.cmd == ETHTOOL_GRXCLSRLALL) { | 934 | if (info.cmd == ETHTOOL_GRXCLSRLALL) { |
932 | if (info.rule_cnt > 0) { | 935 | if (info.rule_cnt > 0) { |
933 | if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) | 936 | if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32)) |
@@ -2392,13 +2395,17 @@ roll_back: | |||
2392 | return ret; | 2395 | return ret; |
2393 | } | 2396 | } |
2394 | 2397 | ||
2395 | static int ethtool_set_per_queue(struct net_device *dev, void __user *useraddr) | 2398 | static int ethtool_set_per_queue(struct net_device *dev, |
2399 | void __user *useraddr, u32 sub_cmd) | ||
2396 | { | 2400 | { |
2397 | struct ethtool_per_queue_op per_queue_opt; | 2401 | struct ethtool_per_queue_op per_queue_opt; |
2398 | 2402 | ||
2399 | if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt))) | 2403 | if (copy_from_user(&per_queue_opt, useraddr, sizeof(per_queue_opt))) |
2400 | return -EFAULT; | 2404 | return -EFAULT; |
2401 | 2405 | ||
2406 | if (per_queue_opt.sub_command != sub_cmd) | ||
2407 | return -EINVAL; | ||
2408 | |||
2402 | switch (per_queue_opt.sub_command) { | 2409 | switch (per_queue_opt.sub_command) { |
2403 | case ETHTOOL_GCOALESCE: | 2410 | case ETHTOOL_GCOALESCE: |
2404 | return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt); | 2411 | return ethtool_get_per_queue_coalesce(dev, useraddr, &per_queue_opt); |
@@ -2769,7 +2776,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr) | |||
2769 | rc = ethtool_get_phy_stats(dev, useraddr); | 2776 | rc = ethtool_get_phy_stats(dev, useraddr); |
2770 | break; | 2777 | break; |
2771 | case ETHTOOL_PERQUEUE: | 2778 | case ETHTOOL_PERQUEUE: |
2772 | rc = ethtool_set_per_queue(dev, useraddr); | 2779 | rc = ethtool_set_per_queue(dev, useraddr, sub_cmd); |
2773 | break; | 2780 | break; |
2774 | case ETHTOOL_GLINKSETTINGS: | 2781 | case ETHTOOL_GLINKSETTINGS: |
2775 | rc = ethtool_get_link_ksettings(dev, useraddr); | 2782 | rc = ethtool_get_link_ksettings(dev, useraddr); |
diff --git a/net/core/netpoll.c b/net/core/netpoll.c index de1d1ba92f2d..3ae899805f8b 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c | |||
@@ -312,7 +312,6 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
312 | /* It is up to the caller to keep npinfo alive. */ | 312 | /* It is up to the caller to keep npinfo alive. */ |
313 | struct netpoll_info *npinfo; | 313 | struct netpoll_info *npinfo; |
314 | 314 | ||
315 | rcu_read_lock_bh(); | ||
316 | lockdep_assert_irqs_disabled(); | 315 | lockdep_assert_irqs_disabled(); |
317 | 316 | ||
318 | npinfo = rcu_dereference_bh(np->dev->npinfo); | 317 | npinfo = rcu_dereference_bh(np->dev->npinfo); |
@@ -357,7 +356,6 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, | |||
357 | skb_queue_tail(&npinfo->txq, skb); | 356 | skb_queue_tail(&npinfo->txq, skb); |
358 | schedule_delayed_work(&npinfo->tx_work,0); | 357 | schedule_delayed_work(&npinfo->tx_work,0); |
359 | } | 358 | } |
360 | rcu_read_unlock_bh(); | ||
361 | } | 359 | } |
362 | EXPORT_SYMBOL(netpoll_send_skb_on_dev); | 360 | EXPORT_SYMBOL(netpoll_send_skb_on_dev); |
363 | 361 | ||
diff --git a/net/ipv4/ipmr_base.c b/net/ipv4/ipmr_base.c index 844806120f44..3e614cc824f7 100644 --- a/net/ipv4/ipmr_base.c +++ b/net/ipv4/ipmr_base.c | |||
@@ -315,8 +315,6 @@ int mr_table_dump(struct mr_table *mrt, struct sk_buff *skb, | |||
315 | next_entry: | 315 | next_entry: |
316 | e++; | 316 | e++; |
317 | } | 317 | } |
318 | e = 0; | ||
319 | s_e = 0; | ||
320 | 318 | ||
321 | spin_lock_bh(lock); | 319 | spin_lock_bh(lock); |
322 | list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) { | 320 | list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) { |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index a0b6932c3afd..a9d06d4dd057 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -1184,11 +1184,6 @@ route_lookup: | |||
1184 | } | 1184 | } |
1185 | skb_dst_set(skb, dst); | 1185 | skb_dst_set(skb, dst); |
1186 | 1186 | ||
1187 | if (encap_limit >= 0) { | ||
1188 | init_tel_txopt(&opt, encap_limit); | ||
1189 | ipv6_push_frag_opts(skb, &opt.ops, &proto); | ||
1190 | } | ||
1191 | |||
1192 | if (hop_limit == 0) { | 1187 | if (hop_limit == 0) { |
1193 | if (skb->protocol == htons(ETH_P_IP)) | 1188 | if (skb->protocol == htons(ETH_P_IP)) |
1194 | hop_limit = ip_hdr(skb)->ttl; | 1189 | hop_limit = ip_hdr(skb)->ttl; |
@@ -1210,6 +1205,11 @@ route_lookup: | |||
1210 | if (err) | 1205 | if (err) |
1211 | return err; | 1206 | return err; |
1212 | 1207 | ||
1208 | if (encap_limit >= 0) { | ||
1209 | init_tel_txopt(&opt, encap_limit); | ||
1210 | ipv6_push_frag_opts(skb, &opt.ops, &proto); | ||
1211 | } | ||
1212 | |||
1213 | skb_push(skb, sizeof(struct ipv6hdr)); | 1213 | skb_push(skb, sizeof(struct ipv6hdr)); |
1214 | skb_reset_network_header(skb); | 1214 | skb_reset_network_header(skb); |
1215 | ipv6h = ipv6_hdr(skb); | 1215 | ipv6h = ipv6_hdr(skb); |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 6895e1dc0b03..21f6deb2aec9 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
@@ -2436,17 +2436,17 @@ static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml, | |||
2436 | { | 2436 | { |
2437 | int err; | 2437 | int err; |
2438 | 2438 | ||
2439 | /* callers have the socket lock and rtnl lock | 2439 | write_lock_bh(&iml->sflock); |
2440 | * so no other readers or writers of iml or its sflist | ||
2441 | */ | ||
2442 | if (!iml->sflist) { | 2440 | if (!iml->sflist) { |
2443 | /* any-source empty exclude case */ | 2441 | /* any-source empty exclude case */ |
2444 | return ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); | 2442 | err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, 0, NULL, 0); |
2443 | } else { | ||
2444 | err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, | ||
2445 | iml->sflist->sl_count, iml->sflist->sl_addr, 0); | ||
2446 | sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max)); | ||
2447 | iml->sflist = NULL; | ||
2445 | } | 2448 | } |
2446 | err = ip6_mc_del_src(idev, &iml->addr, iml->sfmode, | 2449 | write_unlock_bh(&iml->sflock); |
2447 | iml->sflist->sl_count, iml->sflist->sl_addr, 0); | ||
2448 | sock_kfree_s(sk, iml->sflist, IP6_SFLSIZE(iml->sflist->sl_max)); | ||
2449 | iml->sflist = NULL; | ||
2450 | return err; | 2450 | return err; |
2451 | } | 2451 | } |
2452 | 2452 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 9fd600e42f9d..e3226284e480 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -517,10 +517,11 @@ static void rt6_probe_deferred(struct work_struct *w) | |||
517 | 517 | ||
518 | static void rt6_probe(struct fib6_info *rt) | 518 | static void rt6_probe(struct fib6_info *rt) |
519 | { | 519 | { |
520 | struct __rt6_probe_work *work; | 520 | struct __rt6_probe_work *work = NULL; |
521 | const struct in6_addr *nh_gw; | 521 | const struct in6_addr *nh_gw; |
522 | struct neighbour *neigh; | 522 | struct neighbour *neigh; |
523 | struct net_device *dev; | 523 | struct net_device *dev; |
524 | struct inet6_dev *idev; | ||
524 | 525 | ||
525 | /* | 526 | /* |
526 | * Okay, this does not seem to be appropriate | 527 | * Okay, this does not seem to be appropriate |
@@ -536,15 +537,12 @@ static void rt6_probe(struct fib6_info *rt) | |||
536 | nh_gw = &rt->fib6_nh.nh_gw; | 537 | nh_gw = &rt->fib6_nh.nh_gw; |
537 | dev = rt->fib6_nh.nh_dev; | 538 | dev = rt->fib6_nh.nh_dev; |
538 | rcu_read_lock_bh(); | 539 | rcu_read_lock_bh(); |
540 | idev = __in6_dev_get(dev); | ||
539 | neigh = __ipv6_neigh_lookup_noref(dev, nh_gw); | 541 | neigh = __ipv6_neigh_lookup_noref(dev, nh_gw); |
540 | if (neigh) { | 542 | if (neigh) { |
541 | struct inet6_dev *idev; | ||
542 | |||
543 | if (neigh->nud_state & NUD_VALID) | 543 | if (neigh->nud_state & NUD_VALID) |
544 | goto out; | 544 | goto out; |
545 | 545 | ||
546 | idev = __in6_dev_get(dev); | ||
547 | work = NULL; | ||
548 | write_lock(&neigh->lock); | 546 | write_lock(&neigh->lock); |
549 | if (!(neigh->nud_state & NUD_VALID) && | 547 | if (!(neigh->nud_state & NUD_VALID) && |
550 | time_after(jiffies, | 548 | time_after(jiffies, |
@@ -554,11 +552,13 @@ static void rt6_probe(struct fib6_info *rt) | |||
554 | __neigh_set_probe_once(neigh); | 552 | __neigh_set_probe_once(neigh); |
555 | } | 553 | } |
556 | write_unlock(&neigh->lock); | 554 | write_unlock(&neigh->lock); |
557 | } else { | 555 | } else if (time_after(jiffies, rt->last_probe + |
556 | idev->cnf.rtr_probe_interval)) { | ||
558 | work = kmalloc(sizeof(*work), GFP_ATOMIC); | 557 | work = kmalloc(sizeof(*work), GFP_ATOMIC); |
559 | } | 558 | } |
560 | 559 | ||
561 | if (work) { | 560 | if (work) { |
561 | rt->last_probe = jiffies; | ||
562 | INIT_WORK(&work->work, rt6_probe_deferred); | 562 | INIT_WORK(&work->work, rt6_probe_deferred); |
563 | work->target = *nh_gw; | 563 | work->target = *nh_gw; |
564 | dev_hold(dev); | 564 | dev_hold(dev); |
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 374e7d302f26..06d17ff3562f 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -766,11 +766,9 @@ static int udp6_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, | |||
766 | 766 | ||
767 | ret = udpv6_queue_rcv_skb(sk, skb); | 767 | ret = udpv6_queue_rcv_skb(sk, skb); |
768 | 768 | ||
769 | /* a return value > 0 means to resubmit the input, but | 769 | /* a return value > 0 means to resubmit the input */ |
770 | * it wants the return to be -protocol, or 0 | ||
771 | */ | ||
772 | if (ret > 0) | 770 | if (ret > 0) |
773 | return -ret; | 771 | return ret; |
774 | return 0; | 772 | return 0; |
775 | } | 773 | } |
776 | 774 | ||
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index ef3defaf43b9..d35bcf92969c 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c | |||
@@ -146,8 +146,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse) | |||
146 | fl6->daddr = reverse ? hdr->saddr : hdr->daddr; | 146 | fl6->daddr = reverse ? hdr->saddr : hdr->daddr; |
147 | fl6->saddr = reverse ? hdr->daddr : hdr->saddr; | 147 | fl6->saddr = reverse ? hdr->daddr : hdr->saddr; |
148 | 148 | ||
149 | while (nh + offset + 1 < skb->data || | 149 | while (nh + offset + sizeof(*exthdr) < skb->data || |
150 | pskb_may_pull(skb, nh + offset + 1 - skb->data)) { | 150 | pskb_may_pull(skb, nh + offset + sizeof(*exthdr) - skb->data)) { |
151 | nh = skb_network_header(skb); | 151 | nh = skb_network_header(skb); |
152 | exthdr = (struct ipv6_opt_hdr *)(nh + offset); | 152 | exthdr = (struct ipv6_opt_hdr *)(nh + offset); |
153 | 153 | ||
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index c0ac522b48a1..4ff89cb7c86f 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c | |||
@@ -734,6 +734,7 @@ void llc_sap_add_socket(struct llc_sap *sap, struct sock *sk) | |||
734 | llc_sk(sk)->sap = sap; | 734 | llc_sk(sk)->sap = sap; |
735 | 735 | ||
736 | spin_lock_bh(&sap->sk_lock); | 736 | spin_lock_bh(&sap->sk_lock); |
737 | sock_set_flag(sk, SOCK_RCU_FREE); | ||
737 | sap->sk_count++; | 738 | sap->sk_count++; |
738 | sk_nulls_add_node_rcu(sk, laddr_hb); | 739 | sk_nulls_add_node_rcu(sk, laddr_hb); |
739 | hlist_add_head(&llc->dev_hash_node, dev_hb); | 740 | hlist_add_head(&llc->dev_hash_node, dev_hb); |
diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c index e0d8ca03169a..44860505246d 100644 --- a/net/rxrpc/call_accept.c +++ b/net/rxrpc/call_accept.c | |||
@@ -337,7 +337,7 @@ struct rxrpc_call *rxrpc_new_incoming_call(struct rxrpc_local *local, | |||
337 | { | 337 | { |
338 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); | 338 | struct rxrpc_skb_priv *sp = rxrpc_skb(skb); |
339 | struct rxrpc_connection *conn; | 339 | struct rxrpc_connection *conn; |
340 | struct rxrpc_peer *peer; | 340 | struct rxrpc_peer *peer = NULL; |
341 | struct rxrpc_call *call; | 341 | struct rxrpc_call *call; |
342 | 342 | ||
343 | _enter(""); | 343 | _enter(""); |
diff --git a/net/rxrpc/local_object.c b/net/rxrpc/local_object.c index cad0691c2bb4..0906e51d3cfb 100644 --- a/net/rxrpc/local_object.c +++ b/net/rxrpc/local_object.c | |||
@@ -139,7 +139,7 @@ static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) | |||
139 | udp_sk(usk)->gro_complete = NULL; | 139 | udp_sk(usk)->gro_complete = NULL; |
140 | 140 | ||
141 | udp_encap_enable(); | 141 | udp_encap_enable(); |
142 | #if IS_ENABLED(CONFIG_IPV6) | 142 | #if IS_ENABLED(CONFIG_AF_RXRPC_IPV6) |
143 | if (local->srx.transport.family == AF_INET6) | 143 | if (local->srx.transport.family == AF_INET6) |
144 | udpv6_encap_enable(); | 144 | udpv6_encap_enable(); |
145 | #endif | 145 | #endif |
diff --git a/net/rxrpc/output.c b/net/rxrpc/output.c index 0f0b499d1202..189418888839 100644 --- a/net/rxrpc/output.c +++ b/net/rxrpc/output.c | |||
@@ -572,7 +572,8 @@ void rxrpc_reject_packets(struct rxrpc_local *local) | |||
572 | whdr.flags ^= RXRPC_CLIENT_INITIATED; | 572 | whdr.flags ^= RXRPC_CLIENT_INITIATED; |
573 | whdr.flags &= RXRPC_CLIENT_INITIATED; | 573 | whdr.flags &= RXRPC_CLIENT_INITIATED; |
574 | 574 | ||
575 | ret = kernel_sendmsg(local->socket, &msg, iov, 2, size); | 575 | ret = kernel_sendmsg(local->socket, &msg, |
576 | iov, ioc, size); | ||
576 | if (ret < 0) | 577 | if (ret < 0) |
577 | trace_rxrpc_tx_fail(local->debug_id, 0, ret, | 578 | trace_rxrpc_tx_fail(local->debug_id, 0, ret, |
578 | rxrpc_tx_point_reject); | 579 | rxrpc_tx_point_reject); |
diff --git a/net/rxrpc/peer_event.c b/net/rxrpc/peer_event.c index 7feb611c7258..bc05af89fc38 100644 --- a/net/rxrpc/peer_event.c +++ b/net/rxrpc/peer_event.c | |||
@@ -197,6 +197,7 @@ void rxrpc_error_report(struct sock *sk) | |||
197 | rxrpc_store_error(peer, serr); | 197 | rxrpc_store_error(peer, serr); |
198 | rcu_read_unlock(); | 198 | rcu_read_unlock(); |
199 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); | 199 | rxrpc_free_skb(skb, rxrpc_skb_rx_freed); |
200 | rxrpc_put_peer(peer); | ||
200 | 201 | ||
201 | _leave(""); | 202 | _leave(""); |
202 | } | 203 | } |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 43c8559aca56..f427a1e00e7e 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
@@ -31,6 +31,8 @@ | |||
31 | #include <net/pkt_sched.h> | 31 | #include <net/pkt_sched.h> |
32 | #include <net/pkt_cls.h> | 32 | #include <net/pkt_cls.h> |
33 | 33 | ||
34 | extern const struct nla_policy rtm_tca_policy[TCA_MAX + 1]; | ||
35 | |||
34 | /* The list of all installed classifier types */ | 36 | /* The list of all installed classifier types */ |
35 | static LIST_HEAD(tcf_proto_base); | 37 | static LIST_HEAD(tcf_proto_base); |
36 | 38 | ||
@@ -1304,7 +1306,7 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, | |||
1304 | replay: | 1306 | replay: |
1305 | tp_created = 0; | 1307 | tp_created = 0; |
1306 | 1308 | ||
1307 | err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); | 1309 | err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack); |
1308 | if (err < 0) | 1310 | if (err < 0) |
1309 | return err; | 1311 | return err; |
1310 | 1312 | ||
@@ -1454,7 +1456,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n, | |||
1454 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) | 1456 | if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) |
1455 | return -EPERM; | 1457 | return -EPERM; |
1456 | 1458 | ||
1457 | err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); | 1459 | err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack); |
1458 | if (err < 0) | 1460 | if (err < 0) |
1459 | return err; | 1461 | return err; |
1460 | 1462 | ||
@@ -1570,7 +1572,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n, | |||
1570 | void *fh = NULL; | 1572 | void *fh = NULL; |
1571 | int err; | 1573 | int err; |
1572 | 1574 | ||
1573 | err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); | 1575 | err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack); |
1574 | if (err < 0) | 1576 | if (err < 0) |
1575 | return err; | 1577 | return err; |
1576 | 1578 | ||
@@ -1937,7 +1939,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, | |||
1937 | return -EPERM; | 1939 | return -EPERM; |
1938 | 1940 | ||
1939 | replay: | 1941 | replay: |
1940 | err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, NULL, extack); | 1942 | err = nlmsg_parse(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack); |
1941 | if (err < 0) | 1943 | if (err < 0) |
1942 | return err; | 1944 | return err; |
1943 | 1945 | ||
@@ -2055,7 +2057,7 @@ static int tc_dump_chain(struct sk_buff *skb, struct netlink_callback *cb) | |||
2055 | if (nlmsg_len(cb->nlh) < sizeof(*tcm)) | 2057 | if (nlmsg_len(cb->nlh) < sizeof(*tcm)) |
2056 | return skb->len; | 2058 | return skb->len; |
2057 | 2059 | ||
2058 | err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, NULL, | 2060 | err = nlmsg_parse(cb->nlh, sizeof(*tcm), tca, TCA_MAX, rtm_tca_policy, |
2059 | cb->extack); | 2061 | cb->extack); |
2060 | if (err) | 2062 | if (err) |
2061 | return err; | 2063 | return err; |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index cf5c714ae786..022bca98bde6 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
@@ -1318,10 +1318,6 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w) | |||
1318 | return 0; | 1318 | return 0; |
1319 | } | 1319 | } |
1320 | 1320 | ||
1321 | /* | ||
1322 | * Delete/get qdisc. | ||
1323 | */ | ||
1324 | |||
1325 | const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = { | 1321 | const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = { |
1326 | [TCA_KIND] = { .type = NLA_STRING }, | 1322 | [TCA_KIND] = { .type = NLA_STRING }, |
1327 | [TCA_OPTIONS] = { .type = NLA_NESTED }, | 1323 | [TCA_OPTIONS] = { .type = NLA_NESTED }, |
@@ -1334,6 +1330,10 @@ const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = { | |||
1334 | [TCA_EGRESS_BLOCK] = { .type = NLA_U32 }, | 1330 | [TCA_EGRESS_BLOCK] = { .type = NLA_U32 }, |
1335 | }; | 1331 | }; |
1336 | 1332 | ||
1333 | /* | ||
1334 | * Delete/get qdisc. | ||
1335 | */ | ||
1336 | |||
1337 | static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, | 1337 | static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, |
1338 | struct netlink_ext_ack *extack) | 1338 | struct netlink_ext_ack *extack) |
1339 | { | 1339 | { |
@@ -2070,7 +2070,8 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb, | |||
2070 | 2070 | ||
2071 | if (tcm->tcm_parent) { | 2071 | if (tcm->tcm_parent) { |
2072 | q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent)); | 2072 | q = qdisc_match_from_root(root, TC_H_MAJ(tcm->tcm_parent)); |
2073 | if (q && tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) | 2073 | if (q && q != root && |
2074 | tc_dump_tclass_qdisc(q, skb, tcm, cb, t_p, s_t) < 0) | ||
2074 | return -1; | 2075 | return -1; |
2075 | return 0; | 2076 | return 0; |
2076 | } | 2077 | } |
diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 297d9cf960b9..a827a1f562bf 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c | |||
@@ -1450,7 +1450,8 @@ void sctp_assoc_sync_pmtu(struct sctp_association *asoc) | |||
1450 | /* Get the lowest pmtu of all the transports. */ | 1450 | /* Get the lowest pmtu of all the transports. */ |
1451 | list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { | 1451 | list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) { |
1452 | if (t->pmtu_pending && t->dst) { | 1452 | if (t->pmtu_pending && t->dst) { |
1453 | sctp_transport_update_pmtu(t, sctp_dst_mtu(t->dst)); | 1453 | sctp_transport_update_pmtu(t, |
1454 | atomic_read(&t->mtu_info)); | ||
1454 | t->pmtu_pending = 0; | 1455 | t->pmtu_pending = 0; |
1455 | } | 1456 | } |
1456 | if (!pmtu || (t->pathmtu < pmtu)) | 1457 | if (!pmtu || (t->pathmtu < pmtu)) |
diff --git a/net/sctp/input.c b/net/sctp/input.c index 9bbc5f92c941..5c36a99882ed 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -395,6 +395,7 @@ void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, | |||
395 | return; | 395 | return; |
396 | 396 | ||
397 | if (sock_owned_by_user(sk)) { | 397 | if (sock_owned_by_user(sk)) { |
398 | atomic_set(&t->mtu_info, pmtu); | ||
398 | asoc->pmtu_pending = 1; | 399 | asoc->pmtu_pending = 1; |
399 | t->pmtu_pending = 1; | 400 | t->pmtu_pending = 1; |
400 | return; | 401 | return; |
diff --git a/net/sctp/output.c b/net/sctp/output.c index 7f849b01ec8e..67939ad99c01 100644 --- a/net/sctp/output.c +++ b/net/sctp/output.c | |||
@@ -120,6 +120,12 @@ void sctp_packet_config(struct sctp_packet *packet, __u32 vtag, | |||
120 | sctp_assoc_sync_pmtu(asoc); | 120 | sctp_assoc_sync_pmtu(asoc); |
121 | } | 121 | } |
122 | 122 | ||
123 | if (asoc->pmtu_pending) { | ||
124 | if (asoc->param_flags & SPP_PMTUD_ENABLE) | ||
125 | sctp_assoc_sync_pmtu(asoc); | ||
126 | asoc->pmtu_pending = 0; | ||
127 | } | ||
128 | |||
123 | /* If there a is a prepend chunk stick it on the list before | 129 | /* If there a is a prepend chunk stick it on the list before |
124 | * any other chunks get appended. | 130 | * any other chunks get appended. |
125 | */ | 131 | */ |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 111ebd89f0ab..fc0386e8ff23 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -253,11 +253,10 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) | |||
253 | 253 | ||
254 | spin_lock_bh(&sctp_assocs_id_lock); | 254 | spin_lock_bh(&sctp_assocs_id_lock); |
255 | asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); | 255 | asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); |
256 | if (asoc && (asoc->base.sk != sk || asoc->base.dead)) | ||
257 | asoc = NULL; | ||
256 | spin_unlock_bh(&sctp_assocs_id_lock); | 258 | spin_unlock_bh(&sctp_assocs_id_lock); |
257 | 259 | ||
258 | if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) | ||
259 | return NULL; | ||
260 | |||
261 | return asoc; | 260 | return asoc; |
262 | } | 261 | } |
263 | 262 | ||
@@ -1928,8 +1927,10 @@ static int sctp_sendmsg_to_asoc(struct sctp_association *asoc, | |||
1928 | if (sp->strm_interleave) { | 1927 | if (sp->strm_interleave) { |
1929 | timeo = sock_sndtimeo(sk, 0); | 1928 | timeo = sock_sndtimeo(sk, 0); |
1930 | err = sctp_wait_for_connect(asoc, &timeo); | 1929 | err = sctp_wait_for_connect(asoc, &timeo); |
1931 | if (err) | 1930 | if (err) { |
1931 | err = -ESRCH; | ||
1932 | goto err; | 1932 | goto err; |
1933 | } | ||
1933 | } else { | 1934 | } else { |
1934 | wait_connect = true; | 1935 | wait_connect = true; |
1935 | } | 1936 | } |
@@ -7082,14 +7083,14 @@ static int sctp_getsockopt_pr_assocstatus(struct sock *sk, int len, | |||
7082 | } | 7083 | } |
7083 | 7084 | ||
7084 | policy = params.sprstat_policy; | 7085 | policy = params.sprstat_policy; |
7085 | if (policy & ~SCTP_PR_SCTP_MASK) | 7086 | if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL))) |
7086 | goto out; | 7087 | goto out; |
7087 | 7088 | ||
7088 | asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); | 7089 | asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); |
7089 | if (!asoc) | 7090 | if (!asoc) |
7090 | goto out; | 7091 | goto out; |
7091 | 7092 | ||
7092 | if (policy == SCTP_PR_SCTP_NONE) { | 7093 | if (policy & SCTP_PR_SCTP_ALL) { |
7093 | params.sprstat_abandoned_unsent = 0; | 7094 | params.sprstat_abandoned_unsent = 0; |
7094 | params.sprstat_abandoned_sent = 0; | 7095 | params.sprstat_abandoned_sent = 0; |
7095 | for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { | 7096 | for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { |
@@ -7141,7 +7142,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len, | |||
7141 | } | 7142 | } |
7142 | 7143 | ||
7143 | policy = params.sprstat_policy; | 7144 | policy = params.sprstat_policy; |
7144 | if (policy & ~SCTP_PR_SCTP_MASK) | 7145 | if (!policy || (policy & ~(SCTP_PR_SCTP_MASK | SCTP_PR_SCTP_ALL))) |
7145 | goto out; | 7146 | goto out; |
7146 | 7147 | ||
7147 | asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); | 7148 | asoc = sctp_id2assoc(sk, params.sprstat_assoc_id); |
@@ -7157,7 +7158,7 @@ static int sctp_getsockopt_pr_streamstatus(struct sock *sk, int len, | |||
7157 | goto out; | 7158 | goto out; |
7158 | } | 7159 | } |
7159 | 7160 | ||
7160 | if (policy == SCTP_PR_SCTP_NONE) { | 7161 | if (policy == SCTP_PR_SCTP_ALL) { |
7161 | params.sprstat_abandoned_unsent = 0; | 7162 | params.sprstat_abandoned_unsent = 0; |
7162 | params.sprstat_abandoned_sent = 0; | 7163 | params.sprstat_abandoned_sent = 0; |
7163 | for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { | 7164 | for (policy = 0; policy <= SCTP_PR_INDEX(MAX); policy++) { |
diff --git a/net/socket.c b/net/socket.c index 713dc4833d40..b68801c7d0ab 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -2875,9 +2875,14 @@ static int ethtool_ioctl(struct net *net, struct compat_ifreq __user *ifr32) | |||
2875 | copy_in_user(&rxnfc->fs.ring_cookie, | 2875 | copy_in_user(&rxnfc->fs.ring_cookie, |
2876 | &compat_rxnfc->fs.ring_cookie, | 2876 | &compat_rxnfc->fs.ring_cookie, |
2877 | (void __user *)(&rxnfc->fs.location + 1) - | 2877 | (void __user *)(&rxnfc->fs.location + 1) - |
2878 | (void __user *)&rxnfc->fs.ring_cookie) || | 2878 | (void __user *)&rxnfc->fs.ring_cookie)) |
2879 | copy_in_user(&rxnfc->rule_cnt, &compat_rxnfc->rule_cnt, | 2879 | return -EFAULT; |
2880 | sizeof(rxnfc->rule_cnt))) | 2880 | if (ethcmd == ETHTOOL_GRXCLSRLALL) { |
2881 | if (put_user(rule_cnt, &rxnfc->rule_cnt)) | ||
2882 | return -EFAULT; | ||
2883 | } else if (copy_in_user(&rxnfc->rule_cnt, | ||
2884 | &compat_rxnfc->rule_cnt, | ||
2885 | sizeof(rxnfc->rule_cnt))) | ||
2881 | return -EFAULT; | 2886 | return -EFAULT; |
2882 | } | 2887 | } |
2883 | 2888 | ||
diff --git a/net/tipc/group.c b/net/tipc/group.c index e82f13cb2dc5..06fee142f09f 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c | |||
@@ -666,6 +666,7 @@ static void tipc_group_create_event(struct tipc_group *grp, | |||
666 | struct sk_buff *skb; | 666 | struct sk_buff *skb; |
667 | struct tipc_msg *hdr; | 667 | struct tipc_msg *hdr; |
668 | 668 | ||
669 | memset(&evt, 0, sizeof(evt)); | ||
669 | evt.event = event; | 670 | evt.event = event; |
670 | evt.found_lower = m->instance; | 671 | evt.found_lower = m->instance; |
671 | evt.found_upper = m->instance; | 672 | evt.found_upper = m->instance; |
diff --git a/net/tipc/link.c b/net/tipc/link.c index f6552e4f4b43..201c3b5bc96b 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1041,6 +1041,7 @@ static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r, | |||
1041 | if (r->last_retransm != buf_seqno(skb)) { | 1041 | if (r->last_retransm != buf_seqno(skb)) { |
1042 | r->last_retransm = buf_seqno(skb); | 1042 | r->last_retransm = buf_seqno(skb); |
1043 | r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance); | 1043 | r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance); |
1044 | r->stale_cnt = 0; | ||
1044 | } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) { | 1045 | } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) { |
1045 | link_retransmit_failure(l, skb); | 1046 | link_retransmit_failure(l, skb); |
1046 | if (link_is_bc_sndlink(l)) | 1047 | if (link_is_bc_sndlink(l)) |
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c index 51b4b96f89db..3cfeb9df64b0 100644 --- a/net/tipc/name_distr.c +++ b/net/tipc/name_distr.c | |||
@@ -115,7 +115,7 @@ struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ) | |||
115 | struct sk_buff *buf; | 115 | struct sk_buff *buf; |
116 | struct distr_item *item; | 116 | struct distr_item *item; |
117 | 117 | ||
118 | list_del(&publ->binding_node); | 118 | list_del_rcu(&publ->binding_node); |
119 | 119 | ||
120 | if (publ->scope == TIPC_NODE_SCOPE) | 120 | if (publ->scope == TIPC_NODE_SCOPE) |
121 | return NULL; | 121 | return NULL; |
@@ -147,7 +147,7 @@ static void named_distribute(struct net *net, struct sk_buff_head *list, | |||
147 | ITEM_SIZE) * ITEM_SIZE; | 147 | ITEM_SIZE) * ITEM_SIZE; |
148 | u32 msg_rem = msg_dsz; | 148 | u32 msg_rem = msg_dsz; |
149 | 149 | ||
150 | list_for_each_entry(publ, pls, binding_node) { | 150 | list_for_each_entry_rcu(publ, pls, binding_node) { |
151 | /* Prepare next buffer: */ | 151 | /* Prepare next buffer: */ |
152 | if (!skb) { | 152 | if (!skb) { |
153 | skb = named_prepare_buf(net, PUBLICATION, msg_rem, | 153 | skb = named_prepare_buf(net, PUBLICATION, msg_rem, |
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 0577cd49aa72..07156f43d295 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c | |||
@@ -754,6 +754,8 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol, | |||
754 | sk->sk_destruct = xsk_destruct; | 754 | sk->sk_destruct = xsk_destruct; |
755 | sk_refcnt_debug_inc(sk); | 755 | sk_refcnt_debug_inc(sk); |
756 | 756 | ||
757 | sock_set_flag(sk, SOCK_RCU_FREE); | ||
758 | |||
757 | xs = xdp_sk(sk); | 759 | xs = xdp_sk(sk); |
758 | mutex_init(&xs->mutex); | 760 | mutex_init(&xs->mutex); |
759 | spin_lock_init(&xs->tx_completion_lock); | 761 | spin_lock_init(&xs->tx_completion_lock); |
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c index abafd49cc65d..d679fa0f44b3 100644 --- a/net/xfrm/xfrm_interface.c +++ b/net/xfrm/xfrm_interface.c | |||
@@ -116,6 +116,9 @@ static void xfrmi_unlink(struct xfrmi_net *xfrmn, struct xfrm_if *xi) | |||
116 | 116 | ||
117 | static void xfrmi_dev_free(struct net_device *dev) | 117 | static void xfrmi_dev_free(struct net_device *dev) |
118 | { | 118 | { |
119 | struct xfrm_if *xi = netdev_priv(dev); | ||
120 | |||
121 | gro_cells_destroy(&xi->gro_cells); | ||
119 | free_percpu(dev->tstats); | 122 | free_percpu(dev->tstats); |
120 | } | 123 | } |
121 | 124 | ||
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f094d4b3520d..119a427d9b2b 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -632,9 +632,9 @@ static void xfrm_hash_rebuild(struct work_struct *work) | |||
632 | break; | 632 | break; |
633 | } | 633 | } |
634 | if (newpos) | 634 | if (newpos) |
635 | hlist_add_behind(&policy->bydst, newpos); | 635 | hlist_add_behind_rcu(&policy->bydst, newpos); |
636 | else | 636 | else |
637 | hlist_add_head(&policy->bydst, chain); | 637 | hlist_add_head_rcu(&policy->bydst, chain); |
638 | } | 638 | } |
639 | 639 | ||
640 | spin_unlock_bh(&net->xfrm.xfrm_policy_lock); | 640 | spin_unlock_bh(&net->xfrm.xfrm_policy_lock); |
@@ -774,9 +774,9 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) | |||
774 | break; | 774 | break; |
775 | } | 775 | } |
776 | if (newpos) | 776 | if (newpos) |
777 | hlist_add_behind(&policy->bydst, newpos); | 777 | hlist_add_behind_rcu(&policy->bydst, newpos); |
778 | else | 778 | else |
779 | hlist_add_head(&policy->bydst, chain); | 779 | hlist_add_head_rcu(&policy->bydst, chain); |
780 | __xfrm_policy_link(policy, dir); | 780 | __xfrm_policy_link(policy, dir); |
781 | 781 | ||
782 | /* After previous checking, family can either be AF_INET or AF_INET6 */ | 782 | /* After previous checking, family can either be AF_INET or AF_INET6 */ |
diff --git a/tools/testing/selftests/net/reuseport_bpf.c b/tools/testing/selftests/net/reuseport_bpf.c index cad14cd0ea92..b5277106df1f 100644 --- a/tools/testing/selftests/net/reuseport_bpf.c +++ b/tools/testing/selftests/net/reuseport_bpf.c | |||
@@ -437,14 +437,19 @@ void enable_fastopen(void) | |||
437 | } | 437 | } |
438 | } | 438 | } |
439 | 439 | ||
440 | static struct rlimit rlim_old, rlim_new; | 440 | static struct rlimit rlim_old; |
441 | 441 | ||
442 | static __attribute__((constructor)) void main_ctor(void) | 442 | static __attribute__((constructor)) void main_ctor(void) |
443 | { | 443 | { |
444 | getrlimit(RLIMIT_MEMLOCK, &rlim_old); | 444 | getrlimit(RLIMIT_MEMLOCK, &rlim_old); |
445 | rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20); | 445 | |
446 | rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20); | 446 | if (rlim_old.rlim_cur != RLIM_INFINITY) { |
447 | setrlimit(RLIMIT_MEMLOCK, &rlim_new); | 447 | struct rlimit rlim_new; |
448 | |||
449 | rlim_new.rlim_cur = rlim_old.rlim_cur + (1UL << 20); | ||
450 | rlim_new.rlim_max = rlim_old.rlim_max + (1UL << 20); | ||
451 | setrlimit(RLIMIT_MEMLOCK, &rlim_new); | ||
452 | } | ||
448 | } | 453 | } |
449 | 454 | ||
450 | static __attribute__((destructor)) void main_dtor(void) | 455 | static __attribute__((destructor)) void main_dtor(void) |