diff options
335 files changed, 2388 insertions, 3023 deletions
diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt index 304bf22bb83c..fc1c884fea10 100644 --- a/Documentation/arm64/silicon-errata.txt +++ b/Documentation/arm64/silicon-errata.txt | |||
| @@ -75,3 +75,4 @@ stable kernels. | |||
| 75 | | Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | | 75 | | Qualcomm Tech. | Falkor v1 | E1003 | QCOM_FALKOR_ERRATUM_1003 | |
| 76 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | | 76 | | Qualcomm Tech. | Falkor v1 | E1009 | QCOM_FALKOR_ERRATUM_1009 | |
| 77 | | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | | 77 | | Qualcomm Tech. | QDF2400 ITS | E0065 | QCOM_QDF2400_ERRATUM_0065 | |
| 78 | | Qualcomm Tech. | Falkor v{1,2} | E1041 | QCOM_FALKOR_ERRATUM_1041 | | ||
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt index 779211fbb69f..2cddab7efb20 100644 --- a/Documentation/cgroup-v2.txt +++ b/Documentation/cgroup-v2.txt | |||
| @@ -898,6 +898,13 @@ controller implements weight and absolute bandwidth limit models for | |||
| 898 | normal scheduling policy and absolute bandwidth allocation model for | 898 | normal scheduling policy and absolute bandwidth allocation model for |
| 899 | realtime scheduling policy. | 899 | realtime scheduling policy. |
| 900 | 900 | ||
| 901 | WARNING: cgroup2 doesn't yet support control of realtime processes and | ||
| 902 | the cpu controller can only be enabled when all RT processes are in | ||
| 903 | the root cgroup. Be aware that system management software may already | ||
| 904 | have placed RT processes into nonroot cgroups during the system boot | ||
| 905 | process, and these processes may need to be moved to the root cgroup | ||
| 906 | before the cpu controller can be enabled. | ||
| 907 | |||
| 901 | 908 | ||
| 902 | CPU Interface Files | 909 | CPU Interface Files |
| 903 | ~~~~~~~~~~~~~~~~~~~ | 910 | ~~~~~~~~~~~~~~~~~~~ |
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index 8caa60734647..e6a5f4912b6d 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt | |||
| @@ -156,6 +156,40 @@ handle it in two different ways: | |||
| 156 | root of the overlay. Finally the directory is moved to the new | 156 | root of the overlay. Finally the directory is moved to the new |
| 157 | location. | 157 | location. |
| 158 | 158 | ||
| 159 | There are several ways to tune the "redirect_dir" feature. | ||
| 160 | |||
| 161 | Kernel config options: | ||
| 162 | |||
| 163 | - OVERLAY_FS_REDIRECT_DIR: | ||
| 164 | If this is enabled, then redirect_dir is turned on by default. | ||
| 165 | - OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW: | ||
| 166 | If this is enabled, then redirects are always followed by default. Enabling | ||
| 167 | this results in a less secure configuration. Enable this option only when | ||
| 168 | worried about backward compatibility with kernels that have the redirect_dir | ||
| 169 | feature and follow redirects even if turned off. | ||
| 170 | |||
| 171 | Module options (can also be changed through /sys/module/overlay/parameters/*): | ||
| 172 | |||
| 173 | - "redirect_dir=BOOL": | ||
| 174 | See OVERLAY_FS_REDIRECT_DIR kernel config option above. | ||
| 175 | - "redirect_always_follow=BOOL": | ||
| 176 | See OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW kernel config option above. | ||
| 177 | - "redirect_max=NUM": | ||
| 178 | The maximum number of bytes in an absolute redirect (default is 256). | ||
| 179 | |||
| 180 | Mount options: | ||
| 181 | |||
| 182 | - "redirect_dir=on": | ||
| 183 | Redirects are enabled. | ||
| 184 | - "redirect_dir=follow": | ||
| 185 | Redirects are not created, but followed. | ||
| 186 | - "redirect_dir=off": | ||
| 187 | Redirects are not created and only followed if "redirect_always_follow" | ||
| 188 | feature is enabled in the kernel/module config. | ||
| 189 | - "redirect_dir=nofollow": | ||
| 190 | Redirects are not created and not followed (equivalent to "redirect_dir=off" | ||
| 191 | if "redirect_always_follow" feature is not enabled). | ||
| 192 | |||
| 159 | Non-directories | 193 | Non-directories |
| 160 | --------------- | 194 | --------------- |
| 161 | 195 | ||
diff --git a/Documentation/locking/crossrelease.txt b/Documentation/locking/crossrelease.txt deleted file mode 100644 index bdf1423d5f99..000000000000 --- a/Documentation/locking/crossrelease.txt +++ /dev/null | |||
| @@ -1,874 +0,0 @@ | |||
| 1 | Crossrelease | ||
| 2 | ============ | ||
| 3 | |||
| 4 | Started by Byungchul Park <byungchul.park@lge.com> | ||
| 5 | |||
| 6 | Contents: | ||
| 7 | |||
| 8 | (*) Background | ||
| 9 | |||
| 10 | - What causes deadlock | ||
| 11 | - How lockdep works | ||
| 12 | |||
| 13 | (*) Limitation | ||
| 14 | |||
| 15 | - Limit lockdep | ||
| 16 | - Pros from the limitation | ||
| 17 | - Cons from the limitation | ||
| 18 | - Relax the limitation | ||
| 19 | |||
| 20 | (*) Crossrelease | ||
| 21 | |||
| 22 | - Introduce crossrelease | ||
| 23 | - Introduce commit | ||
| 24 | |||
| 25 | (*) Implementation | ||
| 26 | |||
| 27 | - Data structures | ||
| 28 | - How crossrelease works | ||
| 29 | |||
| 30 | (*) Optimizations | ||
| 31 | |||
| 32 | - Avoid duplication | ||
| 33 | - Lockless for hot paths | ||
| 34 | |||
| 35 | (*) APPENDIX A: What lockdep does to work aggresively | ||
| 36 | |||
| 37 | (*) APPENDIX B: How to avoid adding false dependencies | ||
| 38 | |||
| 39 | |||
| 40 | ========== | ||
| 41 | Background | ||
| 42 | ========== | ||
| 43 | |||
| 44 | What causes deadlock | ||
| 45 | -------------------- | ||
| 46 | |||
| 47 | A deadlock occurs when a context is waiting for an event to happen, | ||
| 48 | which is impossible because another (or the) context who can trigger the | ||
| 49 | event is also waiting for another (or the) event to happen, which is | ||
| 50 | also impossible due to the same reason. | ||
| 51 | |||
| 52 | For example: | ||
| 53 | |||
| 54 | A context going to trigger event C is waiting for event A to happen. | ||
| 55 | A context going to trigger event A is waiting for event B to happen. | ||
| 56 | A context going to trigger event B is waiting for event C to happen. | ||
| 57 | |||
| 58 | A deadlock occurs when these three wait operations run at the same time, | ||
| 59 | because event C cannot be triggered if event A does not happen, which in | ||
| 60 | turn cannot be triggered if event B does not happen, which in turn | ||
| 61 | cannot be triggered if event C does not happen. After all, no event can | ||
| 62 | be triggered since any of them never meets its condition to wake up. | ||
| 63 | |||
| 64 | A dependency might exist between two waiters and a deadlock might happen | ||
| 65 | due to an incorrect releationship between dependencies. Thus, we must | ||
| 66 | define what a dependency is first. A dependency exists between them if: | ||
| 67 | |||
| 68 | 1. There are two waiters waiting for each event at a given time. | ||
| 69 | 2. The only way to wake up each waiter is to trigger its event. | ||
| 70 | 3. Whether one can be woken up depends on whether the other can. | ||
| 71 | |||
| 72 | Each wait in the example creates its dependency like: | ||
| 73 | |||
| 74 | Event C depends on event A. | ||
| 75 | Event A depends on event B. | ||
| 76 | Event B depends on event C. | ||
| 77 | |||
| 78 | NOTE: Precisely speaking, a dependency is one between whether a | ||
| 79 | waiter for an event can be woken up and whether another waiter for | ||
| 80 | another event can be woken up. However from now on, we will describe | ||
| 81 | a dependency as if it's one between an event and another event for | ||
| 82 | simplicity. | ||
| 83 | |||
| 84 | And they form circular dependencies like: | ||
| 85 | |||
| 86 | -> C -> A -> B - | ||
| 87 | / \ | ||
| 88 | \ / | ||
| 89 | ---------------- | ||
| 90 | |||
| 91 | where 'A -> B' means that event A depends on event B. | ||
| 92 | |||
| 93 | Such circular dependencies lead to a deadlock since no waiter can meet | ||
| 94 | its condition to wake up as described. | ||
| 95 | |||
| 96 | CONCLUSION | ||
| 97 | |||
| 98 | Circular dependencies cause a deadlock. | ||
| 99 | |||
| 100 | |||
| 101 | How lockdep works | ||
| 102 | ----------------- | ||
| 103 | |||
| 104 | Lockdep tries to detect a deadlock by checking dependencies created by | ||
| 105 | lock operations, acquire and release. Waiting for a lock corresponds to | ||
| 106 | waiting for an event, and releasing a lock corresponds to triggering an | ||
| 107 | event in the previous section. | ||
| 108 | |||
| 109 | In short, lockdep does: | ||
| 110 | |||
| 111 | 1. Detect a new dependency. | ||
| 112 | 2. Add the dependency into a global graph. | ||
| 113 | 3. Check if that makes dependencies circular. | ||
| 114 | 4. Report a deadlock or its possibility if so. | ||
| 115 | |||
| 116 | For example, consider a graph built by lockdep that looks like: | ||
| 117 | |||
| 118 | A -> B - | ||
| 119 | \ | ||
| 120 | -> E | ||
| 121 | / | ||
| 122 | C -> D - | ||
| 123 | |||
| 124 | where A, B,..., E are different lock classes. | ||
| 125 | |||
| 126 | Lockdep will add a dependency into the graph on detection of a new | ||
| 127 | dependency. For example, it will add a dependency 'E -> C' when a new | ||
| 128 | dependency between lock E and lock C is detected. Then the graph will be: | ||
| 129 | |||
| 130 | A -> B - | ||
| 131 | \ | ||
| 132 | -> E - | ||
| 133 | / \ | ||
| 134 | -> C -> D - \ | ||
| 135 | / / | ||
| 136 | \ / | ||
| 137 | ------------------ | ||
| 138 | |||
| 139 | where A, B,..., E are different lock classes. | ||
| 140 | |||
| 141 | This graph contains a subgraph which demonstrates circular dependencies: | ||
| 142 | |||
| 143 | -> E - | ||
| 144 | / \ | ||
| 145 | -> C -> D - \ | ||
| 146 | / / | ||
| 147 | \ / | ||
| 148 | ------------------ | ||
| 149 | |||
| 150 | where C, D and E are different lock classes. | ||
| 151 | |||
| 152 | This is the condition under which a deadlock might occur. Lockdep | ||
| 153 | reports it on detection after adding a new dependency. This is the way | ||
| 154 | how lockdep works. | ||
| 155 | |||
| 156 | CONCLUSION | ||
| 157 | |||
| 158 | Lockdep detects a deadlock or its possibility by checking if circular | ||
| 159 | dependencies were created after adding each new dependency. | ||
| 160 | |||
| 161 | |||
| 162 | ========== | ||
| 163 | Limitation | ||
| 164 | ========== | ||
| 165 | |||
| 166 | Limit lockdep | ||
| 167 | ------------- | ||
| 168 | |||
| 169 | Limiting lockdep to work on only typical locks e.g. spin locks and | ||
| 170 | mutexes, which are released within the acquire context, the | ||
| 171 | implementation becomes simple but its capacity for detection becomes | ||
| 172 | limited. Let's check pros and cons in next section. | ||
| 173 | |||
| 174 | |||
| 175 | Pros from the limitation | ||
| 176 | ------------------------ | ||
| 177 | |||
| 178 | Given the limitation, when acquiring a lock, locks in a held_locks | ||
| 179 | cannot be released if the context cannot acquire it so has to wait to | ||
| 180 | acquire it, which means all waiters for the locks in the held_locks are | ||
| 181 | stuck. It's an exact case to create dependencies between each lock in | ||
| 182 | the held_locks and the lock to acquire. | ||
| 183 | |||
| 184 | For example: | ||
| 185 | |||
| 186 | CONTEXT X | ||
| 187 | --------- | ||
| 188 | acquire A | ||
| 189 | acquire B /* Add a dependency 'A -> B' */ | ||
| 190 | release B | ||
| 191 | release A | ||
| 192 | |||
| 193 | where A and B are different lock classes. | ||
| 194 | |||
| 195 | When acquiring lock A, the held_locks of CONTEXT X is empty thus no | ||
| 196 | dependency is added. But when acquiring lock B, lockdep detects and adds | ||
| 197 | a new dependency 'A -> B' between lock A in the held_locks and lock B. | ||
| 198 | They can be simply added whenever acquiring each lock. | ||
| 199 | |||
| 200 | And data required by lockdep exists in a local structure, held_locks | ||
| 201 | embedded in task_struct. Forcing to access the data within the context, | ||
| 202 | lockdep can avoid racy problems without explicit locks while handling | ||
| 203 | the local data. | ||
| 204 | |||
| 205 | Lastly, lockdep only needs to keep locks currently being held, to build | ||
| 206 | a dependency graph. However, relaxing the limitation, it needs to keep | ||
| 207 | even locks already released, because a decision whether they created | ||
| 208 | dependencies might be long-deferred. | ||
| 209 | |||
| 210 | To sum up, we can expect several advantages from the limitation: | ||
| 211 | |||
| 212 | 1. Lockdep can easily identify a dependency when acquiring a lock. | ||
| 213 | 2. Races are avoidable while accessing local locks in a held_locks. | ||
| 214 | 3. Lockdep only needs to keep locks currently being held. | ||
| 215 | |||
| 216 | CONCLUSION | ||
| 217 | |||
| 218 | Given the limitation, the implementation becomes simple and efficient. | ||
| 219 | |||
| 220 | |||
| 221 | Cons from the limitation | ||
| 222 | ------------------------ | ||
| 223 | |||
| 224 | Given the limitation, lockdep is applicable only to typical locks. For | ||
| 225 | example, page locks for page access or completions for synchronization | ||
| 226 | cannot work with lockdep. | ||
| 227 | |||
| 228 | Can we detect deadlocks below, under the limitation? | ||
| 229 | |||
| 230 | Example 1: | ||
| 231 | |||
| 232 | CONTEXT X CONTEXT Y CONTEXT Z | ||
| 233 | --------- --------- ---------- | ||
| 234 | mutex_lock A | ||
| 235 | lock_page B | ||
| 236 | lock_page B | ||
| 237 | mutex_lock A /* DEADLOCK */ | ||
| 238 | unlock_page B held by X | ||
| 239 | unlock_page B | ||
| 240 | mutex_unlock A | ||
| 241 | mutex_unlock A | ||
| 242 | |||
| 243 | where A and B are different lock classes. | ||
| 244 | |||
| 245 | No, we cannot. | ||
| 246 | |||
| 247 | Example 2: | ||
| 248 | |||
| 249 | CONTEXT X CONTEXT Y | ||
| 250 | --------- --------- | ||
| 251 | mutex_lock A | ||
| 252 | mutex_lock A | ||
| 253 | wait_for_complete B /* DEADLOCK */ | ||
| 254 | complete B | ||
| 255 | mutex_unlock A | ||
| 256 | mutex_unlock A | ||
| 257 | |||
| 258 | where A is a lock class and B is a completion variable. | ||
| 259 | |||
| 260 | No, we cannot. | ||
| 261 | |||
| 262 | CONCLUSION | ||
| 263 | |||
| 264 | Given the limitation, lockdep cannot detect a deadlock or its | ||
| 265 | possibility caused by page locks or completions. | ||
| 266 | |||
| 267 | |||
| 268 | Relax the limitation | ||
| 269 | -------------------- | ||
| 270 | |||
| 271 | Under the limitation, things to create dependencies are limited to | ||
| 272 | typical locks. However, synchronization primitives like page locks and | ||
| 273 | completions, which are allowed to be released in any context, also | ||
| 274 | create dependencies and can cause a deadlock. So lockdep should track | ||
| 275 | these locks to do a better job. We have to relax the limitation for | ||
| 276 | these locks to work with lockdep. | ||
| 277 | |||
| 278 | Detecting dependencies is very important for lockdep to work because | ||
| 279 | adding a dependency means adding an opportunity to check whether it | ||
| 280 | causes a deadlock. The more lockdep adds dependencies, the more it | ||
| 281 | thoroughly works. Thus Lockdep has to do its best to detect and add as | ||
| 282 | many true dependencies into a graph as possible. | ||
| 283 | |||
| 284 | For example, considering only typical locks, lockdep builds a graph like: | ||
| 285 | |||
| 286 | A -> B - | ||
| 287 | \ | ||
| 288 | -> E | ||
| 289 | / | ||
| 290 | C -> D - | ||
| 291 | |||
| 292 | where A, B,..., E are different lock classes. | ||
| 293 | |||
| 294 | On the other hand, under the relaxation, additional dependencies might | ||
| 295 | be created and added. Assuming additional 'FX -> C' and 'E -> GX' are | ||
| 296 | added thanks to the relaxation, the graph will be: | ||
| 297 | |||
| 298 | A -> B - | ||
| 299 | \ | ||
| 300 | -> E -> GX | ||
| 301 | / | ||
| 302 | FX -> C -> D - | ||
| 303 | |||
| 304 | where A, B,..., E, FX and GX are different lock classes, and a suffix | ||
| 305 | 'X' is added on non-typical locks. | ||
| 306 | |||
| 307 | The latter graph gives us more chances to check circular dependencies | ||
| 308 | than the former. However, it might suffer performance degradation since | ||
| 309 | relaxing the limitation, with which design and implementation of lockdep | ||
| 310 | can be efficient, might introduce inefficiency inevitably. So lockdep | ||
| 311 | should provide two options, strong detection and efficient detection. | ||
| 312 | |||
| 313 | Choosing efficient detection: | ||
| 314 | |||
| 315 | Lockdep works with only locks restricted to be released within the | ||
| 316 | acquire context. However, lockdep works efficiently. | ||
| 317 | |||
| 318 | Choosing strong detection: | ||
| 319 | |||
| 320 | Lockdep works with all synchronization primitives. However, lockdep | ||
| 321 | suffers performance degradation. | ||
| 322 | |||
| 323 | CONCLUSION | ||
| 324 | |||
| 325 | Relaxing the limitation, lockdep can add additional dependencies giving | ||
| 326 | additional opportunities to check circular dependencies. | ||
| 327 | |||
| 328 | |||
| 329 | ============ | ||
| 330 | Crossrelease | ||
| 331 | ============ | ||
| 332 | |||
| 333 | Introduce crossrelease | ||
| 334 | ---------------------- | ||
| 335 | |||
| 336 | In order to allow lockdep to handle additional dependencies by what | ||
| 337 | might be released in any context, namely 'crosslock', we have to be able | ||
| 338 | to identify those created by crosslocks. The proposed 'crossrelease' | ||
| 339 | feature provoides a way to do that. | ||
| 340 | |||
| 341 | Crossrelease feature has to do: | ||
| 342 | |||
| 343 | 1. Identify dependencies created by crosslocks. | ||
| 344 | 2. Add the dependencies into a dependency graph. | ||
| 345 | |||
| 346 | That's all. Once a meaningful dependency is added into graph, then | ||
| 347 | lockdep would work with the graph as it did. The most important thing | ||
| 348 | crossrelease feature has to do is to correctly identify and add true | ||
| 349 | dependencies into the global graph. | ||
| 350 | |||
| 351 | A dependency e.g. 'A -> B' can be identified only in the A's release | ||
| 352 | context because a decision required to identify the dependency can be | ||
| 353 | made only in the release context. That is to decide whether A can be | ||
| 354 | released so that a waiter for A can be woken up. It cannot be made in | ||
| 355 | other than the A's release context. | ||
| 356 | |||
| 357 | It's no matter for typical locks because each acquire context is same as | ||
| 358 | its release context, thus lockdep can decide whether a lock can be | ||
| 359 | released in the acquire context. However for crosslocks, lockdep cannot | ||
| 360 | make the decision in the acquire context but has to wait until the | ||
| 361 | release context is identified. | ||
| 362 | |||
| 363 | Therefore, deadlocks by crosslocks cannot be detected just when it | ||
| 364 | happens, because those cannot be identified until the crosslocks are | ||
| 365 | released. However, deadlock possibilities can be detected and it's very | ||
| 366 | worth. See 'APPENDIX A' section to check why. | ||
| 367 | |||
| 368 | CONCLUSION | ||
| 369 | |||
| 370 | Using crossrelease feature, lockdep can work with what might be released | ||
| 371 | in any context, namely crosslock. | ||
| 372 | |||
| 373 | |||
| 374 | Introduce commit | ||
| 375 | ---------------- | ||
| 376 | |||
| 377 | Since crossrelease defers the work adding true dependencies of | ||
| 378 | crosslocks until they are actually released, crossrelease has to queue | ||
| 379 | all acquisitions which might create dependencies with the crosslocks. | ||
| 380 | Then it identifies dependencies using the queued data in batches at a | ||
| 381 | proper time. We call it 'commit'. | ||
| 382 | |||
| 383 | There are four types of dependencies: | ||
| 384 | |||
| 385 | 1. TT type: 'typical lock A -> typical lock B' | ||
| 386 | |||
| 387 | Just when acquiring B, lockdep can see it's in the A's release | ||
| 388 | context. So the dependency between A and B can be identified | ||
| 389 | immediately. Commit is unnecessary. | ||
| 390 | |||
| 391 | 2. TC type: 'typical lock A -> crosslock BX' | ||
| 392 | |||
| 393 | Just when acquiring BX, lockdep can see it's in the A's release | ||
| 394 | context. So the dependency between A and BX can be identified | ||
| 395 | immediately. Commit is unnecessary, too. | ||
| 396 | |||
| 397 | 3. CT type: 'crosslock AX -> typical lock B' | ||
| 398 | |||
| 399 | When acquiring B, lockdep cannot identify the dependency because | ||
| 400 | there's no way to know if it's in the AX's release context. It has | ||
| 401 | to wait until the decision can be made. Commit is necessary. | ||
| 402 | |||
| 403 | 4. CC type: 'crosslock AX -> crosslock BX' | ||
| 404 | |||
| 405 | When acquiring BX, lockdep cannot identify the dependency because | ||
| 406 | there's no way to know if it's in the AX's release context. It has | ||
| 407 | to wait until the decision can be made. Commit is necessary. | ||
| 408 | But, handling CC type is not implemented yet. It's a future work. | ||
| 409 | |||
| 410 | Lockdep can work without commit for typical locks, but commit step is | ||
| 411 | necessary once crosslocks are involved. Introducing commit, lockdep | ||
| 412 | performs three steps. What lockdep does in each step is: | ||
| 413 | |||
| 414 | 1. Acquisition: For typical locks, lockdep does what it originally did | ||
| 415 | and queues the lock so that CT type dependencies can be checked using | ||
| 416 | it at the commit step. For crosslocks, it saves data which will be | ||
| 417 | used at the commit step and increases a reference count for it. | ||
| 418 | |||
| 419 | 2. Commit: No action is reauired for typical locks. For crosslocks, | ||
| 420 | lockdep adds CT type dependencies using the data saved at the | ||
| 421 | acquisition step. | ||
| 422 | |||
| 423 | 3. Release: No changes are required for typical locks. When a crosslock | ||
| 424 | is released, it decreases a reference count for it. | ||
| 425 | |||
| 426 | CONCLUSION | ||
| 427 | |||
| 428 | Crossrelease introduces commit step to handle dependencies of crosslocks | ||
| 429 | in batches at a proper time. | ||
| 430 | |||
| 431 | |||
| 432 | ============== | ||
| 433 | Implementation | ||
| 434 | ============== | ||
| 435 | |||
| 436 | Data structures | ||
| 437 | --------------- | ||
| 438 | |||
| 439 | Crossrelease introduces two main data structures. | ||
| 440 | |||
| 441 | 1. hist_lock | ||
| 442 | |||
| 443 | This is an array embedded in task_struct, for keeping lock history so | ||
| 444 | that dependencies can be added using them at the commit step. Since | ||
| 445 | it's local data, it can be accessed locklessly in the owner context. | ||
| 446 | The array is filled at the acquisition step and consumed at the | ||
| 447 | commit step. And it's managed in circular manner. | ||
| 448 | |||
| 449 | 2. cross_lock | ||
| 450 | |||
| 451 | One per lockdep_map exists. This is for keeping data of crosslocks | ||
| 452 | and used at the commit step. | ||
| 453 | |||
| 454 | |||
| 455 | How crossrelease works | ||
| 456 | ---------------------- | ||
| 457 | |||
| 458 | It's the key of how crossrelease works, to defer necessary works to an | ||
| 459 | appropriate point in time and perform in at once at the commit step. | ||
| 460 | Let's take a look with examples step by step, starting from how lockdep | ||
| 461 | works without crossrelease for typical locks. | ||
| 462 | |||
| 463 | acquire A /* Push A onto held_locks */ | ||
| 464 | acquire B /* Push B onto held_locks and add 'A -> B' */ | ||
| 465 | acquire C /* Push C onto held_locks and add 'B -> C' */ | ||
| 466 | release C /* Pop C from held_locks */ | ||
| 467 | release B /* Pop B from held_locks */ | ||
| 468 | release A /* Pop A from held_locks */ | ||
| 469 | |||
| 470 | where A, B and C are different lock classes. | ||
| 471 | |||
| 472 | NOTE: This document assumes that readers already understand how | ||
| 473 | lockdep works without crossrelease thus omits details. But there's | ||
| 474 | one thing to note. Lockdep pretends to pop a lock from held_locks | ||
| 475 | when releasing it. But it's subtly different from the original pop | ||
| 476 | operation because lockdep allows other than the top to be poped. | ||
| 477 | |||
| 478 | In this case, lockdep adds 'the top of held_locks -> the lock to acquire' | ||
| 479 | dependency every time acquiring a lock. | ||
| 480 | |||
| 481 | After adding 'A -> B', a dependency graph will be: | ||
| 482 | |||
| 483 | A -> B | ||
| 484 | |||
| 485 | where A and B are different lock classes. | ||
| 486 | |||
| 487 | And after adding 'B -> C', the graph will be: | ||
| 488 | |||
| 489 | A -> B -> C | ||
| 490 | |||
| 491 | where A, B and C are different lock classes. | ||
| 492 | |||
| 493 | Let's performs commit step even for typical locks to add dependencies. | ||
| 494 | Of course, commit step is not necessary for them, however, it would work | ||
| 495 | well because this is a more general way. | ||
| 496 | |||
| 497 | acquire A | ||
| 498 | /* | ||
| 499 | * Queue A into hist_locks | ||
| 500 | * | ||
| 501 | * In hist_locks: A | ||
| 502 | * In graph: Empty | ||
| 503 | */ | ||
| 504 | |||
| 505 | acquire B | ||
| 506 | /* | ||
| 507 | * Queue B into hist_locks | ||
| 508 | * | ||
| 509 | * In hist_locks: A, B | ||
| 510 | * In graph: Empty | ||
| 511 | */ | ||
| 512 | |||
| 513 | acquire C | ||
| 514 | /* | ||
| 515 | * Queue C into hist_locks | ||
| 516 | * | ||
| 517 | * In hist_locks: A, B, C | ||
| 518 | * In graph: Empty | ||
| 519 | */ | ||
| 520 | |||
| 521 | commit C | ||
| 522 | /* | ||
| 523 | * Add 'C -> ?' | ||
| 524 | * Answer the following to decide '?' | ||
| 525 | * What has been queued since acquire C: Nothing | ||
| 526 | * | ||
| 527 | * In hist_locks: A, B, C | ||
| 528 | * In graph: Empty | ||
| 529 | */ | ||
| 530 | |||
| 531 | release C | ||
| 532 | |||
| 533 | commit B | ||
| 534 | /* | ||
| 535 | * Add 'B -> ?' | ||
| 536 | * Answer the following to decide '?' | ||
| 537 | * What has been queued since acquire B: C | ||
| 538 | * | ||
| 539 | * In hist_locks: A, B, C | ||
| 540 | * In graph: 'B -> C' | ||
| 541 | */ | ||
| 542 | |||
| 543 | release B | ||
| 544 | |||
| 545 | commit A | ||
| 546 | /* | ||
| 547 | * Add 'A -> ?' | ||
| 548 | * Answer the following to decide '?' | ||
| 549 | * What has been queued since acquire A: B, C | ||
| 550 | * | ||
| 551 | * In hist_locks: A, B, C | ||
| 552 | * In graph: 'B -> C', 'A -> B', 'A -> C' | ||
| 553 | */ | ||
| 554 | |||
| 555 | release A | ||
| 556 | |||
| 557 | where A, B and C are different lock classes. | ||
| 558 | |||
| 559 | In this case, dependencies are added at the commit step as described. | ||
| 560 | |||
| 561 | After commits for A, B and C, the graph will be: | ||
| 562 | |||
| 563 | A -> B -> C | ||
| 564 | |||
| 565 | where A, B and C are different lock classes. | ||
| 566 | |||
| 567 | NOTE: A dependency 'A -> C' is optimized out. | ||
| 568 | |||
| 569 | We can see the former graph built without commit step is same as the | ||
| 570 | latter graph built using commit steps. Of course the former way leads to | ||
| 571 | earlier finish for building the graph, which means we can detect a | ||
| 572 | deadlock or its possibility sooner. So the former way would be prefered | ||
| 573 | when possible. But we cannot avoid using the latter way for crosslocks. | ||
| 574 | |||
| 575 | Let's look at how commit steps work for crosslocks. In this case, the | ||
| 576 | commit step is performed only on crosslock AX as real. And it assumes | ||
| 577 | that the AX release context is different from the AX acquire context. | ||
| 578 | |||
| 579 | BX RELEASE CONTEXT BX ACQUIRE CONTEXT | ||
| 580 | ------------------ ------------------ | ||
| 581 | acquire A | ||
| 582 | /* | ||
| 583 | * Push A onto held_locks | ||
| 584 | * Queue A into hist_locks | ||
| 585 | * | ||
| 586 | * In held_locks: A | ||
| 587 | * In hist_locks: A | ||
| 588 | * In graph: Empty | ||
| 589 | */ | ||
| 590 | |||
| 591 | acquire BX | ||
| 592 | /* | ||
| 593 | * Add 'the top of held_locks -> BX' | ||
| 594 | * | ||
| 595 | * In held_locks: A | ||
| 596 | * In hist_locks: A | ||
| 597 | * In graph: 'A -> BX' | ||
| 598 | */ | ||
| 599 | |||
| 600 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
| 601 | It must be guaranteed that the following operations are seen after | ||
| 602 | acquiring BX globally. It can be done by things like barrier. | ||
| 603 | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
| 604 | |||
| 605 | acquire C | ||
| 606 | /* | ||
| 607 | * Push C onto held_locks | ||
| 608 | * Queue C into hist_locks | ||
| 609 | * | ||
| 610 | * In held_locks: C | ||
| 611 | * In hist_locks: C | ||
| 612 | * In graph: 'A -> BX' | ||
| 613 | */ | ||
| 614 | |||
| 615 | release C | ||
| 616 | /* | ||
| 617 | * Pop C from held_locks | ||
| 618 | * | ||
| 619 | * In held_locks: Empty | ||
| 620 | * In hist_locks: C | ||
| 621 | * In graph: 'A -> BX' | ||
| 622 | */ | ||
| 623 | acquire D | ||
| 624 | /* | ||
| 625 | * Push D onto held_locks | ||
| 626 | * Queue D into hist_locks | ||
| 627 | * Add 'the top of held_locks -> D' | ||
| 628 | * | ||
| 629 | * In held_locks: A, D | ||
| 630 | * In hist_locks: A, D | ||
| 631 | * In graph: 'A -> BX', 'A -> D' | ||
| 632 | */ | ||
| 633 | acquire E | ||
| 634 | /* | ||
| 635 | * Push E onto held_locks | ||
| 636 | * Queue E into hist_locks | ||
| 637 | * | ||
| 638 | * In held_locks: E | ||
| 639 | * In hist_locks: C, E | ||
| 640 | * In graph: 'A -> BX', 'A -> D' | ||
| 641 | */ | ||
| 642 | |||
| 643 | release E | ||
| 644 | /* | ||
| 645 | * Pop E from held_locks | ||
| 646 | * | ||
| 647 | * In held_locks: Empty | ||
| 648 | * In hist_locks: D, E | ||
| 649 | * In graph: 'A -> BX', 'A -> D' | ||
| 650 | */ | ||
| 651 | release D | ||
| 652 | /* | ||
| 653 | * Pop D from held_locks | ||
| 654 | * | ||
| 655 | * In held_locks: A | ||
| 656 | * In hist_locks: A, D | ||
| 657 | * In graph: 'A -> BX', 'A -> D' | ||
| 658 | */ | ||
| 659 | commit BX | ||
| 660 | /* | ||
| 661 | * Add 'BX -> ?' | ||
| 662 | * What has been queued since acquire BX: C, E | ||
| 663 | * | ||
| 664 | * In held_locks: Empty | ||
| 665 | * In hist_locks: D, E | ||
| 666 | * In graph: 'A -> BX', 'A -> D', | ||
| 667 | * 'BX -> C', 'BX -> E' | ||
| 668 | */ | ||
| 669 | |||
| 670 | release BX | ||
| 671 | /* | ||
| 672 | * In held_locks: Empty | ||
| 673 | * In hist_locks: D, E | ||
| 674 | * In graph: 'A -> BX', 'A -> D', | ||
| 675 | * 'BX -> C', 'BX -> E' | ||
| 676 | */ | ||
| 677 | release A | ||
| 678 | /* | ||
| 679 | * Pop A from held_locks | ||
| 680 | * | ||
| 681 | * In held_locks: Empty | ||
| 682 | * In hist_locks: A, D | ||
| 683 | * In graph: 'A -> BX', 'A -> D', | ||
| 684 | * 'BX -> C', 'BX -> E' | ||
| 685 | */ | ||
| 686 | |||
| 687 | where A, BX, C,..., E are different lock classes, and a suffix 'X' is | ||
| 688 | added on crosslocks. | ||
| 689 | |||
| 690 | Crossrelease considers all acquisitions after acqiuring BX are | ||
| 691 | candidates which might create dependencies with BX. True dependencies | ||
| 692 | will be determined when identifying the release context of BX. Meanwhile, | ||
| 693 | all typical locks are queued so that they can be used at the commit step. | ||
| 694 | And then two dependencies 'BX -> C' and 'BX -> E' are added at the | ||
| 695 | commit step when identifying the release context. | ||
| 696 | |||
| 697 | The final graph will be, with crossrelease: | ||
| 698 | |||
| 699 | -> C | ||
| 700 | / | ||
| 701 | -> BX - | ||
| 702 | / \ | ||
| 703 | A - -> E | ||
| 704 | \ | ||
| 705 | -> D | ||
| 706 | |||
| 707 | where A, BX, C,..., E are different lock classes, and a suffix 'X' is | ||
| 708 | added on crosslocks. | ||
| 709 | |||
| 710 | However, the final graph will be, without crossrelease: | ||
| 711 | |||
| 712 | A -> D | ||
| 713 | |||
| 714 | where A and D are different lock classes. | ||
| 715 | |||
| 716 | The former graph has three more dependencies, 'A -> BX', 'BX -> C' and | ||
| 717 | 'BX -> E' giving additional opportunities to check if they cause | ||
| 718 | deadlocks. This way lockdep can detect a deadlock or its possibility | ||
| 719 | caused by crosslocks. | ||
| 720 | |||
| 721 | CONCLUSION | ||
| 722 | |||
| 723 | We checked how crossrelease works with several examples. | ||
| 724 | |||
| 725 | |||
| 726 | ============= | ||
| 727 | Optimizations | ||
| 728 | ============= | ||
| 729 | |||
| 730 | Avoid duplication | ||
| 731 | ----------------- | ||
| 732 | |||
| 733 | Crossrelease feature uses a cache like what lockdep already uses for | ||
| 734 | dependency chains, but this time it's for caching CT type dependencies. | ||
| 735 | Once that dependency is cached, the same will never be added again. | ||
| 736 | |||
| 737 | |||
| 738 | Lockless for hot paths | ||
| 739 | ---------------------- | ||
| 740 | |||
| 741 | To keep all locks for later use at the commit step, crossrelease adopts | ||
| 742 | a local array embedded in task_struct, which makes access to the data | ||
| 743 | lockless by forcing it to happen only within the owner context. It's | ||
| 744 | like how lockdep handles held_locks. Lockless implmentation is important | ||
| 745 | since typical locks are very frequently acquired and released. | ||
| 746 | |||
| 747 | |||
| 748 | ================================================= | ||
| 749 | APPENDIX A: What lockdep does to work aggresively | ||
| 750 | ================================================= | ||
| 751 | |||
| 752 | A deadlock actually occurs when all wait operations creating circular | ||
| 753 | dependencies run at the same time. Even though they don't, a potential | ||
| 754 | deadlock exists if the problematic dependencies exist. Thus it's | ||
| 755 | meaningful to detect not only an actual deadlock but also its potential | ||
| 756 | possibility. The latter is rather valuable. When a deadlock occurs | ||
| 757 | actually, we can identify what happens in the system by some means or | ||
| 758 | other even without lockdep. However, there's no way to detect possiblity | ||
| 759 | without lockdep unless the whole code is parsed in head. It's terrible. | ||
| 760 | Lockdep does the both, and crossrelease only focuses on the latter. | ||
| 761 | |||
| 762 | Whether or not a deadlock actually occurs depends on several factors. | ||
| 763 | For example, what order contexts are switched in is a factor. Assuming | ||
| 764 | circular dependencies exist, a deadlock would occur when contexts are | ||
| 765 | switched so that all wait operations creating the dependencies run | ||
| 766 | simultaneously. Thus to detect a deadlock possibility even in the case | ||
| 767 | that it has not occured yet, lockdep should consider all possible | ||
| 768 | combinations of dependencies, trying to: | ||
| 769 | |||
| 770 | 1. Use a global dependency graph. | ||
| 771 | |||
| 772 | Lockdep combines all dependencies into one global graph and uses them, | ||
| 773 | regardless of which context generates them or what order contexts are | ||
| 774 | switched in. Aggregated dependencies are only considered so they are | ||
| 775 | prone to be circular if a problem exists. | ||
| 776 | |||
| 777 | 2. Check dependencies between classes instead of instances. | ||
| 778 | |||
| 779 | What actually causes a deadlock are instances of lock. However, | ||
| 780 | lockdep checks dependencies between classes instead of instances. | ||
| 781 | This way lockdep can detect a deadlock which has not happened but | ||
| 782 | might happen in future by others but the same class. | ||
| 783 | |||
| 784 | 3. Assume all acquisitions lead to waiting. | ||
| 785 | |||
| 786 | Although locks might be acquired without waiting which is essential | ||
| 787 | to create dependencies, lockdep assumes all acquisitions lead to | ||
| 788 | waiting since it might be true some time or another. | ||
| 789 | |||
| 790 | CONCLUSION | ||
| 791 | |||
| 792 | Lockdep detects not only an actual deadlock but also its possibility, | ||
| 793 | and the latter is more valuable. | ||
| 794 | |||
| 795 | |||
| 796 | ================================================== | ||
| 797 | APPENDIX B: How to avoid adding false dependencies | ||
| 798 | ================================================== | ||
| 799 | |||
| 800 | Remind what a dependency is. A dependency exists if: | ||
| 801 | |||
| 802 | 1. There are two waiters waiting for each event at a given time. | ||
| 803 | 2. The only way to wake up each waiter is to trigger its event. | ||
| 804 | 3. Whether one can be woken up depends on whether the other can. | ||
| 805 | |||
| 806 | For example: | ||
| 807 | |||
| 808 | acquire A | ||
| 809 | acquire B /* A dependency 'A -> B' exists */ | ||
| 810 | release B | ||
| 811 | release A | ||
| 812 | |||
| 813 | where A and B are different lock classes. | ||
| 814 | |||
| 815 | A depedency 'A -> B' exists since: | ||
| 816 | |||
| 817 | 1. A waiter for A and a waiter for B might exist when acquiring B. | ||
| 818 | 2. Only way to wake up each is to release what it waits for. | ||
| 819 | 3. Whether the waiter for A can be woken up depends on whether the | ||
| 820 | other can. IOW, TASK X cannot release A if it fails to acquire B. | ||
| 821 | |||
| 822 | For another example: | ||
| 823 | |||
| 824 | TASK X TASK Y | ||
| 825 | ------ ------ | ||
| 826 | acquire AX | ||
| 827 | acquire B /* A dependency 'AX -> B' exists */ | ||
| 828 | release B | ||
| 829 | release AX held by Y | ||
| 830 | |||
| 831 | where AX and B are different lock classes, and a suffix 'X' is added | ||
| 832 | on crosslocks. | ||
| 833 | |||
| 834 | Even in this case involving crosslocks, the same rule can be applied. A | ||
| 835 | depedency 'AX -> B' exists since: | ||
| 836 | |||
| 837 | 1. A waiter for AX and a waiter for B might exist when acquiring B. | ||
| 838 | 2. Only way to wake up each is to release what it waits for. | ||
| 839 | 3. Whether the waiter for AX can be woken up depends on whether the | ||
| 840 | other can. IOW, TASK X cannot release AX if it fails to acquire B. | ||
| 841 | |||
| 842 | Let's take a look at more complicated example: | ||
| 843 | |||
| 844 | TASK X TASK Y | ||
| 845 | ------ ------ | ||
| 846 | acquire B | ||
| 847 | release B | ||
| 848 | fork Y | ||
| 849 | acquire AX | ||
| 850 | acquire C /* A dependency 'AX -> C' exists */ | ||
| 851 | release C | ||
| 852 | release AX held by Y | ||
| 853 | |||
| 854 | where AX, B and C are different lock classes, and a suffix 'X' is | ||
| 855 | added on crosslocks. | ||
| 856 | |||
| 857 | Does a dependency 'AX -> B' exist? Nope. | ||
| 858 | |||
| 859 | Two waiters are essential to create a dependency. However, waiters for | ||
| 860 | AX and B to create 'AX -> B' cannot exist at the same time in this | ||
| 861 | example. Thus the dependency 'AX -> B' cannot be created. | ||
| 862 | |||
| 863 | It would be ideal if the full set of true ones can be considered. But | ||
| 864 | we can ensure nothing but what actually happened. Relying on what | ||
| 865 | actually happens at runtime, we can anyway add only true ones, though | ||
| 866 | they might be a subset of true ones. It's similar to how lockdep works | ||
| 867 | for typical locks. There might be more true dependencies than what | ||
| 868 | lockdep has detected in runtime. Lockdep has no choice but to rely on | ||
| 869 | what actually happens. Crossrelease also relies on it. | ||
| 870 | |||
| 871 | CONCLUSION | ||
| 872 | |||
| 873 | Relying on what actually happens, lockdep can avoid adding false | ||
| 874 | dependencies. | ||
diff --git a/Documentation/vm/zswap.txt b/Documentation/vm/zswap.txt index 89fff7d611cc..0b3a1148f9f0 100644 --- a/Documentation/vm/zswap.txt +++ b/Documentation/vm/zswap.txt | |||
| @@ -98,5 +98,25 @@ request is made for a page in an old zpool, it is uncompressed using its | |||
| 98 | original compressor. Once all pages are removed from an old zpool, the zpool | 98 | original compressor. Once all pages are removed from an old zpool, the zpool |
| 99 | and its compressor are freed. | 99 | and its compressor are freed. |
| 100 | 100 | ||
| 101 | Some of the pages in zswap are same-value filled pages (i.e. contents of the | ||
| 102 | page have same value or repetitive pattern). These pages include zero-filled | ||
| 103 | pages and they are handled differently. During store operation, a page is | ||
| 104 | checked if it is a same-value filled page before compressing it. If true, the | ||
| 105 | compressed length of the page is set to zero and the pattern or same-filled | ||
| 106 | value is stored. | ||
| 107 | |||
| 108 | Same-value filled pages identification feature is enabled by default and can be | ||
| 109 | disabled at boot time by setting the "same_filled_pages_enabled" attribute to 0, | ||
| 110 | e.g. zswap.same_filled_pages_enabled=0. It can also be enabled and disabled at | ||
| 111 | runtime using the sysfs "same_filled_pages_enabled" attribute, e.g. | ||
| 112 | |||
| 113 | echo 1 > /sys/module/zswap/parameters/same_filled_pages_enabled | ||
| 114 | |||
| 115 | When zswap same-filled page identification is disabled at runtime, it will stop | ||
| 116 | checking for the same-value filled pages during store operation. However, the | ||
| 117 | existing pages which are marked as same-value filled pages remain stored | ||
| 118 | unchanged in zswap until they are either loaded or invalidated. | ||
| 119 | |||
| 101 | A debugfs interface is provided for various statistic about pool size, number | 120 | A debugfs interface is provided for various statistic about pool size, number |
| 102 | of pages stored, and various counters for the reasons pages are rejected. | 121 | of pages stored, same-value filled pages and various counters for the reasons |
| 122 | pages are rejected. | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 82ad0eabce4f..a6e86e20761e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -5431,7 +5431,7 @@ F: drivers/media/tuners/fc2580* | |||
| 5431 | 5431 | ||
| 5432 | FCOE SUBSYSTEM (libfc, libfcoe, fcoe) | 5432 | FCOE SUBSYSTEM (libfc, libfcoe, fcoe) |
| 5433 | M: Johannes Thumshirn <jth@kernel.org> | 5433 | M: Johannes Thumshirn <jth@kernel.org> |
| 5434 | L: fcoe-devel@open-fcoe.org | 5434 | L: linux-scsi@vger.kernel.org |
| 5435 | W: www.Open-FCoE.org | 5435 | W: www.Open-FCoE.org |
| 5436 | S: Supported | 5436 | S: Supported |
| 5437 | F: drivers/scsi/libfc/ | 5437 | F: drivers/scsi/libfc/ |
| @@ -13117,6 +13117,7 @@ F: drivers/dma/dw/ | |||
| 13117 | 13117 | ||
| 13118 | SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER | 13118 | SYNOPSYS DESIGNWARE ENTERPRISE ETHERNET DRIVER |
| 13119 | M: Jie Deng <jiedeng@synopsys.com> | 13119 | M: Jie Deng <jiedeng@synopsys.com> |
| 13120 | M: Jose Abreu <Jose.Abreu@synopsys.com> | ||
| 13120 | L: netdev@vger.kernel.org | 13121 | L: netdev@vger.kernel.org |
| 13121 | S: Supported | 13122 | S: Supported |
| 13122 | F: drivers/net/ethernet/synopsys/ | 13123 | F: drivers/net/ethernet/synopsys/ |
diff --git a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts index fbb3758ca2e3..4b8edc8982cf 100644 --- a/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts +++ b/arch/arm/boot/dts/vf610-zii-dev-rev-c.dts | |||
| @@ -121,7 +121,7 @@ | |||
| 121 | switch0port10: port@10 { | 121 | switch0port10: port@10 { |
| 122 | reg = <10>; | 122 | reg = <10>; |
| 123 | label = "dsa"; | 123 | label = "dsa"; |
| 124 | phy-mode = "xgmii"; | 124 | phy-mode = "xaui"; |
| 125 | link = <&switch1port10>; | 125 | link = <&switch1port10>; |
| 126 | }; | 126 | }; |
| 127 | }; | 127 | }; |
| @@ -208,7 +208,7 @@ | |||
| 208 | switch1port10: port@10 { | 208 | switch1port10: port@10 { |
| 209 | reg = <10>; | 209 | reg = <10>; |
| 210 | label = "dsa"; | 210 | label = "dsa"; |
| 211 | phy-mode = "xgmii"; | 211 | phy-mode = "xaui"; |
| 212 | link = <&switch0port10>; | 212 | link = <&switch0port10>; |
| 213 | }; | 213 | }; |
| 214 | }; | 214 | }; |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a93339f5178f..c9a7e9e1414f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
| @@ -557,7 +557,6 @@ config QCOM_QDF2400_ERRATUM_0065 | |||
| 557 | 557 | ||
| 558 | If unsure, say Y. | 558 | If unsure, say Y. |
| 559 | 559 | ||
| 560 | |||
| 561 | config SOCIONEXT_SYNQUACER_PREITS | 560 | config SOCIONEXT_SYNQUACER_PREITS |
| 562 | bool "Socionext Synquacer: Workaround for GICv3 pre-ITS" | 561 | bool "Socionext Synquacer: Workaround for GICv3 pre-ITS" |
| 563 | default y | 562 | default y |
| @@ -576,6 +575,17 @@ config HISILICON_ERRATUM_161600802 | |||
| 576 | a 128kB offset to be applied to the target address in this commands. | 575 | a 128kB offset to be applied to the target address in this commands. |
| 577 | 576 | ||
| 578 | If unsure, say Y. | 577 | If unsure, say Y. |
| 578 | |||
| 579 | config QCOM_FALKOR_ERRATUM_E1041 | ||
| 580 | bool "Falkor E1041: Speculative instruction fetches might cause errant memory access" | ||
| 581 | default y | ||
| 582 | help | ||
| 583 | Falkor CPU may speculatively fetch instructions from an improper | ||
| 584 | memory location when MMU translation is changed from SCTLR_ELn[M]=1 | ||
| 585 | to SCTLR_ELn[M]=0. Prefix an ISB instruction to fix the problem. | ||
| 586 | |||
| 587 | If unsure, say Y. | ||
| 588 | |||
| 579 | endmenu | 589 | endmenu |
| 580 | 590 | ||
| 581 | 591 | ||
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index aef72d886677..8b168280976f 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h | |||
| @@ -512,4 +512,14 @@ alternative_else_nop_endif | |||
| 512 | #endif | 512 | #endif |
| 513 | .endm | 513 | .endm |
| 514 | 514 | ||
| 515 | /** | ||
| 516 | * Errata workaround prior to disable MMU. Insert an ISB immediately prior | ||
| 517 | * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0. | ||
| 518 | */ | ||
| 519 | .macro pre_disable_mmu_workaround | ||
| 520 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041 | ||
| 521 | isb | ||
| 522 | #endif | ||
| 523 | .endm | ||
| 524 | |||
| 515 | #endif /* __ASM_ASSEMBLER_H */ | 525 | #endif /* __ASM_ASSEMBLER_H */ |
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index ac67cfc2585a..060e3a4008ab 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h | |||
| @@ -60,6 +60,9 @@ enum ftr_type { | |||
| 60 | #define FTR_VISIBLE true /* Feature visible to the user space */ | 60 | #define FTR_VISIBLE true /* Feature visible to the user space */ |
| 61 | #define FTR_HIDDEN false /* Feature is hidden from the user */ | 61 | #define FTR_HIDDEN false /* Feature is hidden from the user */ |
| 62 | 62 | ||
| 63 | #define FTR_VISIBLE_IF_IS_ENABLED(config) \ | ||
| 64 | (IS_ENABLED(config) ? FTR_VISIBLE : FTR_HIDDEN) | ||
| 65 | |||
| 63 | struct arm64_ftr_bits { | 66 | struct arm64_ftr_bits { |
| 64 | bool sign; /* Value is signed ? */ | 67 | bool sign; /* Value is signed ? */ |
| 65 | bool visible; | 68 | bool visible; |
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 235e77d98261..cbf08d7cbf30 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h | |||
| @@ -91,6 +91,7 @@ | |||
| 91 | #define BRCM_CPU_PART_VULCAN 0x516 | 91 | #define BRCM_CPU_PART_VULCAN 0x516 |
| 92 | 92 | ||
| 93 | #define QCOM_CPU_PART_FALKOR_V1 0x800 | 93 | #define QCOM_CPU_PART_FALKOR_V1 0x800 |
| 94 | #define QCOM_CPU_PART_FALKOR 0xC00 | ||
| 94 | 95 | ||
| 95 | #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) | 96 | #define MIDR_CORTEX_A53 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) |
| 96 | #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) | 97 | #define MIDR_CORTEX_A57 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) |
| @@ -99,6 +100,7 @@ | |||
| 99 | #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) | 100 | #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX) |
| 100 | #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) | 101 | #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX) |
| 101 | #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) | 102 | #define MIDR_QCOM_FALKOR_V1 MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR_V1) |
| 103 | #define MIDR_QCOM_FALKOR MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, QCOM_CPU_PART_FALKOR) | ||
| 102 | 104 | ||
| 103 | #ifndef __ASSEMBLY__ | 105 | #ifndef __ASSEMBLY__ |
| 104 | 106 | ||
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 149d05fb9421..bdcc7f1c9d06 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
| @@ -42,6 +42,8 @@ | |||
| 42 | #include <asm/cmpxchg.h> | 42 | #include <asm/cmpxchg.h> |
| 43 | #include <asm/fixmap.h> | 43 | #include <asm/fixmap.h> |
| 44 | #include <linux/mmdebug.h> | 44 | #include <linux/mmdebug.h> |
| 45 | #include <linux/mm_types.h> | ||
| 46 | #include <linux/sched.h> | ||
| 45 | 47 | ||
| 46 | extern void __pte_error(const char *file, int line, unsigned long val); | 48 | extern void __pte_error(const char *file, int line, unsigned long val); |
| 47 | extern void __pmd_error(const char *file, int line, unsigned long val); | 49 | extern void __pmd_error(const char *file, int line, unsigned long val); |
| @@ -149,12 +151,20 @@ static inline pte_t pte_mkwrite(pte_t pte) | |||
| 149 | 151 | ||
| 150 | static inline pte_t pte_mkclean(pte_t pte) | 152 | static inline pte_t pte_mkclean(pte_t pte) |
| 151 | { | 153 | { |
| 152 | return clear_pte_bit(pte, __pgprot(PTE_DIRTY)); | 154 | pte = clear_pte_bit(pte, __pgprot(PTE_DIRTY)); |
| 155 | pte = set_pte_bit(pte, __pgprot(PTE_RDONLY)); | ||
| 156 | |||
| 157 | return pte; | ||
| 153 | } | 158 | } |
| 154 | 159 | ||
| 155 | static inline pte_t pte_mkdirty(pte_t pte) | 160 | static inline pte_t pte_mkdirty(pte_t pte) |
| 156 | { | 161 | { |
| 157 | return set_pte_bit(pte, __pgprot(PTE_DIRTY)); | 162 | pte = set_pte_bit(pte, __pgprot(PTE_DIRTY)); |
| 163 | |||
| 164 | if (pte_write(pte)) | ||
| 165 | pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY)); | ||
| 166 | |||
| 167 | return pte; | ||
| 158 | } | 168 | } |
| 159 | 169 | ||
| 160 | static inline pte_t pte_mkold(pte_t pte) | 170 | static inline pte_t pte_mkold(pte_t pte) |
| @@ -207,9 +217,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte) | |||
| 207 | } | 217 | } |
| 208 | } | 218 | } |
| 209 | 219 | ||
| 210 | struct mm_struct; | ||
| 211 | struct vm_area_struct; | ||
| 212 | |||
| 213 | extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); | 220 | extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); |
| 214 | 221 | ||
| 215 | /* | 222 | /* |
| @@ -238,7 +245,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, | |||
| 238 | * hardware updates of the pte (ptep_set_access_flags safely changes | 245 | * hardware updates of the pte (ptep_set_access_flags safely changes |
| 239 | * valid ptes without going through an invalid entry). | 246 | * valid ptes without going through an invalid entry). |
| 240 | */ | 247 | */ |
| 241 | if (pte_valid(*ptep) && pte_valid(pte)) { | 248 | if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) && |
| 249 | (mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) { | ||
| 242 | VM_WARN_ONCE(!pte_young(pte), | 250 | VM_WARN_ONCE(!pte_young(pte), |
| 243 | "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", | 251 | "%s: racy access flag clearing: 0x%016llx -> 0x%016llx", |
| 244 | __func__, pte_val(*ptep), pte_val(pte)); | 252 | __func__, pte_val(*ptep), pte_val(pte)); |
| @@ -641,28 +649,23 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, | |||
| 641 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ | 649 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
| 642 | 650 | ||
| 643 | /* | 651 | /* |
| 644 | * ptep_set_wrprotect - mark read-only while preserving the hardware update of | 652 | * ptep_set_wrprotect - mark read-only while trasferring potential hardware |
| 645 | * the Access Flag. | 653 | * dirty status (PTE_DBM && !PTE_RDONLY) to the software PTE_DIRTY bit. |
| 646 | */ | 654 | */ |
| 647 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | 655 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT |
| 648 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) | 656 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) |
| 649 | { | 657 | { |
| 650 | pte_t old_pte, pte; | 658 | pte_t old_pte, pte; |
| 651 | 659 | ||
| 652 | /* | ||
| 653 | * ptep_set_wrprotect() is only called on CoW mappings which are | ||
| 654 | * private (!VM_SHARED) with the pte either read-only (!PTE_WRITE && | ||
| 655 | * PTE_RDONLY) or writable and software-dirty (PTE_WRITE && | ||
| 656 | * !PTE_RDONLY && PTE_DIRTY); see is_cow_mapping() and | ||
| 657 | * protection_map[]. There is no race with the hardware update of the | ||
| 658 | * dirty state: clearing of PTE_RDONLY when PTE_WRITE (a.k.a. PTE_DBM) | ||
| 659 | * is set. | ||
| 660 | */ | ||
| 661 | VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(*ptep), | ||
| 662 | "%s: potential race with hardware DBM", __func__); | ||
| 663 | pte = READ_ONCE(*ptep); | 660 | pte = READ_ONCE(*ptep); |
| 664 | do { | 661 | do { |
| 665 | old_pte = pte; | 662 | old_pte = pte; |
| 663 | /* | ||
| 664 | * If hardware-dirty (PTE_WRITE/DBM bit set and PTE_RDONLY | ||
| 665 | * clear), set the PTE_DIRTY bit. | ||
| 666 | */ | ||
| 667 | if (pte_hw_dirty(pte)) | ||
| 668 | pte = pte_mkdirty(pte); | ||
| 666 | pte = pte_wrprotect(pte); | 669 | pte = pte_wrprotect(pte); |
| 667 | pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), | 670 | pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep), |
| 668 | pte_val(old_pte), pte_val(pte)); | 671 | pte_val(old_pte), pte_val(pte)); |
diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S index 65f42d257414..2a752cb2a0f3 100644 --- a/arch/arm64/kernel/cpu-reset.S +++ b/arch/arm64/kernel/cpu-reset.S | |||
| @@ -37,6 +37,7 @@ ENTRY(__cpu_soft_restart) | |||
| 37 | mrs x12, sctlr_el1 | 37 | mrs x12, sctlr_el1 |
| 38 | ldr x13, =SCTLR_ELx_FLAGS | 38 | ldr x13, =SCTLR_ELx_FLAGS |
| 39 | bic x12, x12, x13 | 39 | bic x12, x12, x13 |
| 40 | pre_disable_mmu_workaround | ||
| 40 | msr sctlr_el1, x12 | 41 | msr sctlr_el1, x12 |
| 41 | isb | 42 | isb |
| 42 | 43 | ||
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index c5ba0097887f..a73a5928f09b 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
| @@ -145,7 +145,8 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { | |||
| 145 | }; | 145 | }; |
| 146 | 146 | ||
| 147 | static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { | 147 | static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { |
| 148 | ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), | 148 | ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE), |
| 149 | FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), | ||
| 149 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), | 150 | ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), |
| 150 | S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), | 151 | S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), |
| 151 | S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), | 152 | S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI), |
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S index 4e6ad355bd05..6b9736c3fb56 100644 --- a/arch/arm64/kernel/efi-entry.S +++ b/arch/arm64/kernel/efi-entry.S | |||
| @@ -96,6 +96,7 @@ ENTRY(entry) | |||
| 96 | mrs x0, sctlr_el2 | 96 | mrs x0, sctlr_el2 |
| 97 | bic x0, x0, #1 << 0 // clear SCTLR.M | 97 | bic x0, x0, #1 << 0 // clear SCTLR.M |
| 98 | bic x0, x0, #1 << 2 // clear SCTLR.C | 98 | bic x0, x0, #1 << 2 // clear SCTLR.C |
| 99 | pre_disable_mmu_workaround | ||
| 99 | msr sctlr_el2, x0 | 100 | msr sctlr_el2, x0 |
| 100 | isb | 101 | isb |
| 101 | b 2f | 102 | b 2f |
| @@ -103,6 +104,7 @@ ENTRY(entry) | |||
| 103 | mrs x0, sctlr_el1 | 104 | mrs x0, sctlr_el1 |
| 104 | bic x0, x0, #1 << 0 // clear SCTLR.M | 105 | bic x0, x0, #1 << 0 // clear SCTLR.M |
| 105 | bic x0, x0, #1 << 2 // clear SCTLR.C | 106 | bic x0, x0, #1 << 2 // clear SCTLR.C |
| 107 | pre_disable_mmu_workaround | ||
| 106 | msr sctlr_el1, x0 | 108 | msr sctlr_el1, x0 |
| 107 | isb | 109 | isb |
| 108 | 2: | 110 | 2: |
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index 540a1e010eb5..fae81f7964b4 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c | |||
| @@ -1043,7 +1043,7 @@ void fpsimd_update_current_state(struct fpsimd_state *state) | |||
| 1043 | 1043 | ||
| 1044 | local_bh_disable(); | 1044 | local_bh_disable(); |
| 1045 | 1045 | ||
| 1046 | current->thread.fpsimd_state = *state; | 1046 | current->thread.fpsimd_state.user_fpsimd = state->user_fpsimd; |
| 1047 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) | 1047 | if (system_supports_sve() && test_thread_flag(TIF_SVE)) |
| 1048 | fpsimd_to_sve(current); | 1048 | fpsimd_to_sve(current); |
| 1049 | 1049 | ||
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 67e86a0f57ac..e3cb9fbf96b6 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
| @@ -750,6 +750,7 @@ __primary_switch: | |||
| 750 | * to take into account by discarding the current kernel mapping and | 750 | * to take into account by discarding the current kernel mapping and |
| 751 | * creating a new one. | 751 | * creating a new one. |
| 752 | */ | 752 | */ |
| 753 | pre_disable_mmu_workaround | ||
| 753 | msr sctlr_el1, x20 // disable the MMU | 754 | msr sctlr_el1, x20 // disable the MMU |
| 754 | isb | 755 | isb |
| 755 | bl __create_page_tables // recreate kernel mapping | 756 | bl __create_page_tables // recreate kernel mapping |
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 749f81779420..74bb56f656ef 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/perf_event.h> | 28 | #include <linux/perf_event.h> |
| 29 | #include <linux/ptrace.h> | 29 | #include <linux/ptrace.h> |
| 30 | #include <linux/smp.h> | 30 | #include <linux/smp.h> |
| 31 | #include <linux/uaccess.h> | ||
| 31 | 32 | ||
| 32 | #include <asm/compat.h> | 33 | #include <asm/compat.h> |
| 33 | #include <asm/current.h> | 34 | #include <asm/current.h> |
| @@ -36,7 +37,6 @@ | |||
| 36 | #include <asm/traps.h> | 37 | #include <asm/traps.h> |
| 37 | #include <asm/cputype.h> | 38 | #include <asm/cputype.h> |
| 38 | #include <asm/system_misc.h> | 39 | #include <asm/system_misc.h> |
| 39 | #include <asm/uaccess.h> | ||
| 40 | 40 | ||
| 41 | /* Breakpoint currently in use for each BRP. */ | 41 | /* Breakpoint currently in use for each BRP. */ |
| 42 | static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); | 42 | static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); |
diff --git a/arch/arm64/kernel/relocate_kernel.S b/arch/arm64/kernel/relocate_kernel.S index ce704a4aeadd..f407e422a720 100644 --- a/arch/arm64/kernel/relocate_kernel.S +++ b/arch/arm64/kernel/relocate_kernel.S | |||
| @@ -45,6 +45,7 @@ ENTRY(arm64_relocate_new_kernel) | |||
| 45 | mrs x0, sctlr_el2 | 45 | mrs x0, sctlr_el2 |
| 46 | ldr x1, =SCTLR_ELx_FLAGS | 46 | ldr x1, =SCTLR_ELx_FLAGS |
| 47 | bic x0, x0, x1 | 47 | bic x0, x0, x1 |
| 48 | pre_disable_mmu_workaround | ||
| 48 | msr sctlr_el2, x0 | 49 | msr sctlr_el2, x0 |
| 49 | isb | 50 | isb |
| 50 | 1: | 51 | 1: |
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index 3f9615582377..870828c364c5 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S | |||
| @@ -151,6 +151,7 @@ reset: | |||
| 151 | mrs x5, sctlr_el2 | 151 | mrs x5, sctlr_el2 |
| 152 | ldr x6, =SCTLR_ELx_FLAGS | 152 | ldr x6, =SCTLR_ELx_FLAGS |
| 153 | bic x5, x5, x6 // Clear SCTL_M and etc | 153 | bic x5, x5, x6 // Clear SCTL_M and etc |
| 154 | pre_disable_mmu_workaround | ||
| 154 | msr sctlr_el2, x5 | 155 | msr sctlr_el2, x5 |
| 155 | isb | 156 | isb |
| 156 | 157 | ||
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index ca74a2aace42..7b60d62ac593 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c | |||
| @@ -389,7 +389,7 @@ void ptdump_check_wx(void) | |||
| 389 | .check_wx = true, | 389 | .check_wx = true, |
| 390 | }; | 390 | }; |
| 391 | 391 | ||
| 392 | walk_pgd(&st, &init_mm, 0); | 392 | walk_pgd(&st, &init_mm, VA_START); |
| 393 | note_page(&st, 0, 0, 0); | 393 | note_page(&st, 0, 0, 0); |
| 394 | if (st.wx_pages || st.uxn_pages) | 394 | if (st.wx_pages || st.uxn_pages) |
| 395 | pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", | 395 | pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", |
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 22168cd0dde7..9b7f89df49db 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
| @@ -574,7 +574,6 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) | |||
| 574 | { | 574 | { |
| 575 | struct siginfo info; | 575 | struct siginfo info; |
| 576 | const struct fault_info *inf; | 576 | const struct fault_info *inf; |
| 577 | int ret = 0; | ||
| 578 | 577 | ||
| 579 | inf = esr_to_fault_info(esr); | 578 | inf = esr_to_fault_info(esr); |
| 580 | pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n", | 579 | pr_err("Synchronous External Abort: %s (0x%08x) at 0x%016lx\n", |
| @@ -589,7 +588,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) | |||
| 589 | if (interrupts_enabled(regs)) | 588 | if (interrupts_enabled(regs)) |
| 590 | nmi_enter(); | 589 | nmi_enter(); |
| 591 | 590 | ||
| 592 | ret = ghes_notify_sea(); | 591 | ghes_notify_sea(); |
| 593 | 592 | ||
| 594 | if (interrupts_enabled(regs)) | 593 | if (interrupts_enabled(regs)) |
| 595 | nmi_exit(); | 594 | nmi_exit(); |
| @@ -604,7 +603,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) | |||
| 604 | info.si_addr = (void __user *)addr; | 603 | info.si_addr = (void __user *)addr; |
| 605 | arm64_notify_die("", regs, &info, esr); | 604 | arm64_notify_die("", regs, &info, esr); |
| 606 | 605 | ||
| 607 | return ret; | 606 | return 0; |
| 608 | } | 607 | } |
| 609 | 608 | ||
| 610 | static const struct fault_info fault_info[] = { | 609 | static const struct fault_info fault_info[] = { |
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 5960bef0170d..00e7b900ca41 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
| @@ -476,6 +476,8 @@ void __init arm64_memblock_init(void) | |||
| 476 | 476 | ||
| 477 | reserve_elfcorehdr(); | 477 | reserve_elfcorehdr(); |
| 478 | 478 | ||
| 479 | high_memory = __va(memblock_end_of_DRAM() - 1) + 1; | ||
| 480 | |||
| 479 | dma_contiguous_reserve(arm64_dma_phys_limit); | 481 | dma_contiguous_reserve(arm64_dma_phys_limit); |
| 480 | 482 | ||
| 481 | memblock_allow_resize(); | 483 | memblock_allow_resize(); |
| @@ -502,7 +504,6 @@ void __init bootmem_init(void) | |||
| 502 | sparse_init(); | 504 | sparse_init(); |
| 503 | zone_sizes_init(min, max); | 505 | zone_sizes_init(min, max); |
| 504 | 506 | ||
| 505 | high_memory = __va((max << PAGE_SHIFT) - 1) + 1; | ||
| 506 | memblock_dump_all(); | 507 | memblock_dump_all(); |
| 507 | } | 508 | } |
| 508 | 509 | ||
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index 773c4e039cd7..c0319cbf1eec 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h | |||
| @@ -38,6 +38,25 @@ | |||
| 38 | #define smp_rmb() RISCV_FENCE(r,r) | 38 | #define smp_rmb() RISCV_FENCE(r,r) |
| 39 | #define smp_wmb() RISCV_FENCE(w,w) | 39 | #define smp_wmb() RISCV_FENCE(w,w) |
| 40 | 40 | ||
| 41 | /* | ||
| 42 | * This is a very specific barrier: it's currently only used in two places in | ||
| 43 | * the kernel, both in the scheduler. See include/linux/spinlock.h for the two | ||
| 44 | * orderings it guarantees, but the "critical section is RCsc" guarantee | ||
| 45 | * mandates a barrier on RISC-V. The sequence looks like: | ||
| 46 | * | ||
| 47 | * lr.aq lock | ||
| 48 | * sc lock <= LOCKED | ||
| 49 | * smp_mb__after_spinlock() | ||
| 50 | * // critical section | ||
| 51 | * lr lock | ||
| 52 | * sc.rl lock <= UNLOCKED | ||
| 53 | * | ||
| 54 | * The AQ/RL pair provides a RCpc critical section, but there's not really any | ||
| 55 | * way we can take advantage of that here because the ordering is only enforced | ||
| 56 | * on that one lock. Thus, we're just doing a full fence. | ||
| 57 | */ | ||
| 58 | #define smp_mb__after_spinlock() RISCV_FENCE(rw,rw) | ||
| 59 | |||
| 41 | #include <asm-generic/barrier.h> | 60 | #include <asm-generic/barrier.h> |
| 42 | 61 | ||
| 43 | #endif /* __ASSEMBLY__ */ | 62 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 8fbb6749910d..cb7b0c63014e 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c | |||
| @@ -38,10 +38,6 @@ | |||
| 38 | #include <asm/tlbflush.h> | 38 | #include <asm/tlbflush.h> |
| 39 | #include <asm/thread_info.h> | 39 | #include <asm/thread_info.h> |
| 40 | 40 | ||
| 41 | #ifdef CONFIG_HVC_RISCV_SBI | ||
| 42 | #include <asm/hvc_riscv_sbi.h> | ||
| 43 | #endif | ||
| 44 | |||
| 45 | #ifdef CONFIG_DUMMY_CONSOLE | 41 | #ifdef CONFIG_DUMMY_CONSOLE |
| 46 | struct screen_info screen_info = { | 42 | struct screen_info screen_info = { |
| 47 | .orig_video_lines = 30, | 43 | .orig_video_lines = 30, |
| @@ -212,13 +208,6 @@ static void __init setup_bootmem(void) | |||
| 212 | 208 | ||
| 213 | void __init setup_arch(char **cmdline_p) | 209 | void __init setup_arch(char **cmdline_p) |
| 214 | { | 210 | { |
| 215 | #if defined(CONFIG_HVC_RISCV_SBI) | ||
| 216 | if (likely(early_console == NULL)) { | ||
| 217 | early_console = &riscv_sbi_early_console_dev; | ||
| 218 | register_console(early_console); | ||
| 219 | } | ||
| 220 | #endif | ||
| 221 | |||
| 222 | #ifdef CONFIG_CMDLINE_BOOL | 211 | #ifdef CONFIG_CMDLINE_BOOL |
| 223 | #ifdef CONFIG_CMDLINE_OVERRIDE | 212 | #ifdef CONFIG_CMDLINE_OVERRIDE |
| 224 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); | 213 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); |
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index a2ae936a093e..79c78668258e 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c | |||
| @@ -70,7 +70,7 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, | |||
| 70 | bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0; | 70 | bool local = (flags & SYS_RISCV_FLUSH_ICACHE_LOCAL) != 0; |
| 71 | 71 | ||
| 72 | /* Check the reserved flags. */ | 72 | /* Check the reserved flags. */ |
| 73 | if (unlikely(flags & !SYS_RISCV_FLUSH_ICACHE_ALL)) | 73 | if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL)) |
| 74 | return -EINVAL; | 74 | return -EINVAL; |
| 75 | 75 | ||
| 76 | flush_icache_mm(mm, local); | 76 | flush_icache_mm(mm, local); |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 57d7bc92e0b8..0a6b0286c32e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
| @@ -1264,12 +1264,6 @@ static inline pud_t pud_mkwrite(pud_t pud) | |||
| 1264 | return pud; | 1264 | return pud; |
| 1265 | } | 1265 | } |
| 1266 | 1266 | ||
| 1267 | #define pud_write pud_write | ||
| 1268 | static inline int pud_write(pud_t pud) | ||
| 1269 | { | ||
| 1270 | return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0; | ||
| 1271 | } | ||
| 1272 | |||
| 1273 | static inline pud_t pud_mkclean(pud_t pud) | 1267 | static inline pud_t pud_mkclean(pud_t pud) |
| 1274 | { | 1268 | { |
| 1275 | if (pud_large(pud)) { | 1269 | if (pud_large(pud)) { |
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index f04db3779b34..59eea9c65d3e 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c | |||
| @@ -263,6 +263,7 @@ COMPAT_SYSCALL_DEFINE2(s390_setgroups16, int, gidsetsize, u16 __user *, grouplis | |||
| 263 | return retval; | 263 | return retval; |
| 264 | } | 264 | } |
| 265 | 265 | ||
| 266 | groups_sort(group_info); | ||
| 266 | retval = set_current_groups(group_info); | 267 | retval = set_current_groups(group_info); |
| 267 | put_group_info(group_info); | 268 | put_group_info(group_info); |
| 268 | 269 | ||
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 33c0f8bb0f33..5335ba3c850e 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c | |||
| @@ -75,7 +75,7 @@ static int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr, | |||
| 75 | if (!(pmd_val(pmd) & _PAGE_VALID)) | 75 | if (!(pmd_val(pmd) & _PAGE_VALID)) |
| 76 | return 0; | 76 | return 0; |
| 77 | 77 | ||
| 78 | if (!pmd_access_permitted(pmd, write)) | 78 | if (write && !pmd_write(pmd)) |
| 79 | return 0; | 79 | return 0; |
| 80 | 80 | ||
| 81 | refs = 0; | 81 | refs = 0; |
| @@ -114,7 +114,7 @@ static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr, | |||
| 114 | if (!(pud_val(pud) & _PAGE_VALID)) | 114 | if (!(pud_val(pud) & _PAGE_VALID)) |
| 115 | return 0; | 115 | return 0; |
| 116 | 116 | ||
| 117 | if (!pud_access_permitted(pud, write)) | 117 | if (write && !pud_write(pud)) |
| 118 | return 0; | 118 | return 0; |
| 119 | 119 | ||
| 120 | refs = 0; | 120 | refs = 0; |
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index 50a32c33d729..73c57f614c9e 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild | |||
| @@ -1,4 +1,5 @@ | |||
| 1 | generic-y += barrier.h | 1 | generic-y += barrier.h |
| 2 | generic-y += bpf_perf_event.h | ||
| 2 | generic-y += bug.h | 3 | generic-y += bug.h |
| 3 | generic-y += clkdev.h | 4 | generic-y += clkdev.h |
| 4 | generic-y += current.h | 5 | generic-y += current.h |
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 6293a8768a91..672441c008c7 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug | |||
| @@ -400,6 +400,7 @@ config UNWINDER_FRAME_POINTER | |||
| 400 | config UNWINDER_GUESS | 400 | config UNWINDER_GUESS |
| 401 | bool "Guess unwinder" | 401 | bool "Guess unwinder" |
| 402 | depends on EXPERT | 402 | depends on EXPERT |
| 403 | depends on !STACKDEPOT | ||
| 403 | ---help--- | 404 | ---help--- |
| 404 | This option enables the "guess" unwinder for unwinding kernel stack | 405 | This option enables the "guess" unwinder for unwinding kernel stack |
| 405 | traces. It scans the stack and reports every kernel text address it | 406 | traces. It scans the stack and reports every kernel text address it |
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 1e9c322e973a..f25e1530e064 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
| @@ -80,6 +80,7 @@ vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o | |||
| 80 | ifdef CONFIG_X86_64 | 80 | ifdef CONFIG_X86_64 |
| 81 | vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o | 81 | vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o |
| 82 | vmlinux-objs-y += $(obj)/mem_encrypt.o | 82 | vmlinux-objs-y += $(obj)/mem_encrypt.o |
| 83 | vmlinux-objs-y += $(obj)/pgtable_64.o | ||
| 83 | endif | 84 | endif |
| 84 | 85 | ||
| 85 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone | 86 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone |
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 20919b4f3133..fc313e29fe2c 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S | |||
| @@ -305,10 +305,18 @@ ENTRY(startup_64) | |||
| 305 | leaq boot_stack_end(%rbx), %rsp | 305 | leaq boot_stack_end(%rbx), %rsp |
| 306 | 306 | ||
| 307 | #ifdef CONFIG_X86_5LEVEL | 307 | #ifdef CONFIG_X86_5LEVEL |
| 308 | /* Check if 5-level paging has already enabled */ | 308 | /* |
| 309 | movq %cr4, %rax | 309 | * Check if we need to enable 5-level paging. |
| 310 | testl $X86_CR4_LA57, %eax | 310 | * RSI holds real mode data and need to be preserved across |
| 311 | jnz lvl5 | 311 | * a function call. |
| 312 | */ | ||
| 313 | pushq %rsi | ||
| 314 | call l5_paging_required | ||
| 315 | popq %rsi | ||
| 316 | |||
| 317 | /* If l5_paging_required() returned zero, we're done here. */ | ||
| 318 | cmpq $0, %rax | ||
| 319 | je lvl5 | ||
| 312 | 320 | ||
| 313 | /* | 321 | /* |
| 314 | * At this point we are in long mode with 4-level paging enabled, | 322 | * At this point we are in long mode with 4-level paging enabled, |
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index b50c42455e25..98761a1576ce 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c | |||
| @@ -169,6 +169,16 @@ void __puthex(unsigned long value) | |||
| 169 | } | 169 | } |
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | static bool l5_supported(void) | ||
| 173 | { | ||
| 174 | /* Check if leaf 7 is supported. */ | ||
| 175 | if (native_cpuid_eax(0) < 7) | ||
| 176 | return 0; | ||
| 177 | |||
| 178 | /* Check if la57 is supported. */ | ||
| 179 | return native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)); | ||
| 180 | } | ||
| 181 | |||
| 172 | #if CONFIG_X86_NEED_RELOCS | 182 | #if CONFIG_X86_NEED_RELOCS |
| 173 | static void handle_relocations(void *output, unsigned long output_len, | 183 | static void handle_relocations(void *output, unsigned long output_len, |
| 174 | unsigned long virt_addr) | 184 | unsigned long virt_addr) |
| @@ -362,6 +372,12 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, | |||
| 362 | console_init(); | 372 | console_init(); |
| 363 | debug_putstr("early console in extract_kernel\n"); | 373 | debug_putstr("early console in extract_kernel\n"); |
| 364 | 374 | ||
| 375 | if (IS_ENABLED(CONFIG_X86_5LEVEL) && !l5_supported()) { | ||
| 376 | error("This linux kernel as configured requires 5-level paging\n" | ||
| 377 | "This CPU does not support the required 'cr4.la57' feature\n" | ||
| 378 | "Unable to boot - please use a kernel appropriate for your CPU\n"); | ||
| 379 | } | ||
| 380 | |||
| 365 | free_mem_ptr = heap; /* Heap */ | 381 | free_mem_ptr = heap; /* Heap */ |
| 366 | free_mem_end_ptr = heap + BOOT_HEAP_SIZE; | 382 | free_mem_end_ptr = heap + BOOT_HEAP_SIZE; |
| 367 | 383 | ||
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c new file mode 100644 index 000000000000..b4469a37e9a1 --- /dev/null +++ b/arch/x86/boot/compressed/pgtable_64.c | |||
| @@ -0,0 +1,28 @@ | |||
| 1 | #include <asm/processor.h> | ||
| 2 | |||
| 3 | /* | ||
| 4 | * __force_order is used by special_insns.h asm code to force instruction | ||
| 5 | * serialization. | ||
| 6 | * | ||
| 7 | * It is not referenced from the code, but GCC < 5 with -fPIE would fail | ||
| 8 | * due to an undefined symbol. Define it to make these ancient GCCs work. | ||
| 9 | */ | ||
| 10 | unsigned long __force_order; | ||
| 11 | |||
| 12 | int l5_paging_required(void) | ||
| 13 | { | ||
| 14 | /* Check if leaf 7 is supported. */ | ||
| 15 | |||
| 16 | if (native_cpuid_eax(0) < 7) | ||
| 17 | return 0; | ||
| 18 | |||
| 19 | /* Check if la57 is supported. */ | ||
| 20 | if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) | ||
| 21 | return 0; | ||
| 22 | |||
| 23 | /* Check if 5-level paging has already been enabled. */ | ||
| 24 | if (native_read_cr4() & X86_CR4_LA57) | ||
| 25 | return 0; | ||
| 26 | |||
| 27 | return 1; | ||
| 28 | } | ||
diff --git a/arch/x86/boot/genimage.sh b/arch/x86/boot/genimage.sh index 49f4970f693b..c9e8499fbfe7 100644 --- a/arch/x86/boot/genimage.sh +++ b/arch/x86/boot/genimage.sh | |||
| @@ -44,9 +44,9 @@ FDINITRD=$6 | |||
| 44 | 44 | ||
| 45 | # Make sure the files actually exist | 45 | # Make sure the files actually exist |
| 46 | verify "$FBZIMAGE" | 46 | verify "$FBZIMAGE" |
| 47 | verify "$MTOOLSRC" | ||
| 48 | 47 | ||
| 49 | genbzdisk() { | 48 | genbzdisk() { |
| 49 | verify "$MTOOLSRC" | ||
| 50 | mformat a: | 50 | mformat a: |
| 51 | syslinux $FIMAGE | 51 | syslinux $FIMAGE |
| 52 | echo "$KCMDLINE" | mcopy - a:syslinux.cfg | 52 | echo "$KCMDLINE" | mcopy - a:syslinux.cfg |
| @@ -57,6 +57,7 @@ genbzdisk() { | |||
| 57 | } | 57 | } |
| 58 | 58 | ||
| 59 | genfdimage144() { | 59 | genfdimage144() { |
| 60 | verify "$MTOOLSRC" | ||
| 60 | dd if=/dev/zero of=$FIMAGE bs=1024 count=1440 2> /dev/null | 61 | dd if=/dev/zero of=$FIMAGE bs=1024 count=1440 2> /dev/null |
| 61 | mformat v: | 62 | mformat v: |
| 62 | syslinux $FIMAGE | 63 | syslinux $FIMAGE |
| @@ -68,6 +69,7 @@ genfdimage144() { | |||
| 68 | } | 69 | } |
| 69 | 70 | ||
| 70 | genfdimage288() { | 71 | genfdimage288() { |
| 72 | verify "$MTOOLSRC" | ||
| 71 | dd if=/dev/zero of=$FIMAGE bs=1024 count=2880 2> /dev/null | 73 | dd if=/dev/zero of=$FIMAGE bs=1024 count=2880 2> /dev/null |
| 72 | mformat w: | 74 | mformat w: |
| 73 | syslinux $FIMAGE | 75 | syslinux $FIMAGE |
diff --git a/arch/x86/crypto/salsa20_glue.c b/arch/x86/crypto/salsa20_glue.c index 399a29d067d6..cb91a64a99e7 100644 --- a/arch/x86/crypto/salsa20_glue.c +++ b/arch/x86/crypto/salsa20_glue.c | |||
| @@ -59,13 +59,6 @@ static int encrypt(struct blkcipher_desc *desc, | |||
| 59 | 59 | ||
| 60 | salsa20_ivsetup(ctx, walk.iv); | 60 | salsa20_ivsetup(ctx, walk.iv); |
| 61 | 61 | ||
| 62 | if (likely(walk.nbytes == nbytes)) | ||
| 63 | { | ||
| 64 | salsa20_encrypt_bytes(ctx, walk.src.virt.addr, | ||
| 65 | walk.dst.virt.addr, nbytes); | ||
| 66 | return blkcipher_walk_done(desc, &walk, 0); | ||
| 67 | } | ||
| 68 | |||
| 69 | while (walk.nbytes >= 64) { | 62 | while (walk.nbytes >= 64) { |
| 70 | salsa20_encrypt_bytes(ctx, walk.src.virt.addr, | 63 | salsa20_encrypt_bytes(ctx, walk.src.virt.addr, |
| 71 | walk.dst.virt.addr, | 64 | walk.dst.virt.addr, |
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index 982c325dad33..8be6afb58471 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h | |||
| @@ -12,7 +12,13 @@ | |||
| 12 | 12 | ||
| 13 | /* image of the saved processor state */ | 13 | /* image of the saved processor state */ |
| 14 | struct saved_context { | 14 | struct saved_context { |
| 15 | u16 es, fs, gs, ss; | 15 | /* |
| 16 | * On x86_32, all segment registers, with the possible exception of | ||
| 17 | * gs, are saved at kernel entry in pt_regs. | ||
| 18 | */ | ||
| 19 | #ifdef CONFIG_X86_32_LAZY_GS | ||
| 20 | u16 gs; | ||
| 21 | #endif | ||
| 16 | unsigned long cr0, cr2, cr3, cr4; | 22 | unsigned long cr0, cr2, cr3, cr4; |
| 17 | u64 misc_enable; | 23 | u64 misc_enable; |
| 18 | bool misc_enable_saved; | 24 | bool misc_enable_saved; |
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h index 7306e911faee..a7af9f53c0cb 100644 --- a/arch/x86/include/asm/suspend_64.h +++ b/arch/x86/include/asm/suspend_64.h | |||
| @@ -20,8 +20,20 @@ | |||
| 20 | */ | 20 | */ |
| 21 | struct saved_context { | 21 | struct saved_context { |
| 22 | struct pt_regs regs; | 22 | struct pt_regs regs; |
| 23 | u16 ds, es, fs, gs, ss; | 23 | |
| 24 | unsigned long gs_base, gs_kernel_base, fs_base; | 24 | /* |
| 25 | * User CS and SS are saved in current_pt_regs(). The rest of the | ||
| 26 | * segment selectors need to be saved and restored here. | ||
| 27 | */ | ||
| 28 | u16 ds, es, fs, gs; | ||
| 29 | |||
| 30 | /* | ||
| 31 | * Usermode FSBASE and GSBASE may not match the fs and gs selectors, | ||
| 32 | * so we save them separately. We save the kernelmode GSBASE to | ||
| 33 | * restore percpu access after resume. | ||
| 34 | */ | ||
| 35 | unsigned long kernelmode_gs_base, usermode_gs_base, fs_base; | ||
| 36 | |||
| 25 | unsigned long cr0, cr2, cr3, cr4, cr8; | 37 | unsigned long cr0, cr2, cr3, cr4, cr8; |
| 26 | u64 misc_enable; | 38 | u64 misc_enable; |
| 27 | bool misc_enable_saved; | 39 | bool misc_enable_saved; |
| @@ -30,8 +42,7 @@ struct saved_context { | |||
| 30 | u16 gdt_pad; /* Unused */ | 42 | u16 gdt_pad; /* Unused */ |
| 31 | struct desc_ptr gdt_desc; | 43 | struct desc_ptr gdt_desc; |
| 32 | u16 idt_pad; | 44 | u16 idt_pad; |
| 33 | u16 idt_limit; | 45 | struct desc_ptr idt; |
| 34 | unsigned long idt_base; | ||
| 35 | u16 ldt; | 46 | u16 ldt; |
| 36 | u16 tss; | 47 | u16 tss; |
| 37 | unsigned long tr; | 48 | unsigned long tr; |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 05a97d5fe298..35cb20994e32 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -106,7 +106,7 @@ EXPORT_SYMBOL(__max_logical_packages); | |||
| 106 | static unsigned int logical_packages __read_mostly; | 106 | static unsigned int logical_packages __read_mostly; |
| 107 | 107 | ||
| 108 | /* Maximum number of SMT threads on any online core */ | 108 | /* Maximum number of SMT threads on any online core */ |
| 109 | int __max_smt_threads __read_mostly; | 109 | int __read_mostly __max_smt_threads = 1; |
| 110 | 110 | ||
| 111 | /* Flag to indicate if a complete sched domain rebuild is required */ | 111 | /* Flag to indicate if a complete sched domain rebuild is required */ |
| 112 | bool x86_topology_update; | 112 | bool x86_topology_update; |
| @@ -1304,7 +1304,7 @@ void __init native_smp_cpus_done(unsigned int max_cpus) | |||
| 1304 | * Today neither Intel nor AMD support heterogenous systems so | 1304 | * Today neither Intel nor AMD support heterogenous systems so |
| 1305 | * extrapolate the boot cpu's data to all packages. | 1305 | * extrapolate the boot cpu's data to all packages. |
| 1306 | */ | 1306 | */ |
| 1307 | ncpus = cpu_data(0).booted_cores * smp_num_siblings; | 1307 | ncpus = cpu_data(0).booted_cores * topology_max_smt_threads(); |
| 1308 | __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); | 1308 | __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); |
| 1309 | pr_info("Max logical packages: %u\n", __max_logical_packages); | 1309 | pr_info("Max logical packages: %u\n", __max_logical_packages); |
| 1310 | 1310 | ||
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index c4d55919fac1..e0b85930dd77 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt | |||
| @@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1) | |||
| 607 | fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) | 607 | fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) |
| 608 | fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) | 608 | fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) |
| 609 | fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) | 609 | fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) |
| 610 | ff: | 610 | ff: UD0 |
| 611 | EndTable | 611 | EndTable |
| 612 | 612 | ||
| 613 | Table: 3-byte opcode 1 (0x0f 0x38) | 613 | Table: 3-byte opcode 1 (0x0f 0x38) |
| @@ -717,7 +717,7 @@ AVXcode: 2 | |||
| 717 | 7e: vpermt2d/q Vx,Hx,Wx (66),(ev) | 717 | 7e: vpermt2d/q Vx,Hx,Wx (66),(ev) |
| 718 | 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev) | 718 | 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev) |
| 719 | 80: INVEPT Gy,Mdq (66) | 719 | 80: INVEPT Gy,Mdq (66) |
| 720 | 81: INVPID Gy,Mdq (66) | 720 | 81: INVVPID Gy,Mdq (66) |
| 721 | 82: INVPCID Gy,Mdq (66) | 721 | 82: INVPCID Gy,Mdq (66) |
| 722 | 83: vpmultishiftqb Vx,Hx,Wx (66),(ev) | 722 | 83: vpmultishiftqb Vx,Hx,Wx (66),(ev) |
| 723 | 88: vexpandps/d Vpd,Wpd (66),(ev) | 723 | 88: vexpandps/d Vpd,Wpd (66),(ev) |
| @@ -970,6 +970,15 @@ GrpTable: Grp9 | |||
| 970 | EndTable | 970 | EndTable |
| 971 | 971 | ||
| 972 | GrpTable: Grp10 | 972 | GrpTable: Grp10 |
| 973 | # all are UD1 | ||
| 974 | 0: UD1 | ||
| 975 | 1: UD1 | ||
| 976 | 2: UD1 | ||
| 977 | 3: UD1 | ||
| 978 | 4: UD1 | ||
| 979 | 5: UD1 | ||
| 980 | 6: UD1 | ||
| 981 | 7: UD1 | ||
| 973 | EndTable | 982 | EndTable |
| 974 | 983 | ||
| 975 | # Grp11A and Grp11B are expressed as Grp11 in Intel SDM | 984 | # Grp11A and Grp11B are expressed as Grp11 in Intel SDM |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 6e4573b1da34..c45b6ec5357b 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
| @@ -404,11 +404,11 @@ void iounmap(volatile void __iomem *addr) | |||
| 404 | return; | 404 | return; |
| 405 | } | 405 | } |
| 406 | 406 | ||
| 407 | mmiotrace_iounmap(addr); | ||
| 408 | |||
| 407 | addr = (volatile void __iomem *) | 409 | addr = (volatile void __iomem *) |
| 408 | (PAGE_MASK & (unsigned long __force)addr); | 410 | (PAGE_MASK & (unsigned long __force)addr); |
| 409 | 411 | ||
| 410 | mmiotrace_iounmap(addr); | ||
| 411 | |||
| 412 | /* Use the vm area unlocked, assuming the caller | 412 | /* Use the vm area unlocked, assuming the caller |
| 413 | ensures there isn't another iounmap for the same address | 413 | ensures there isn't another iounmap for the same address |
| 414 | in parallel. Reuse of the virtual address is prevented by | 414 | in parallel. Reuse of the virtual address is prevented by |
diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index c21c2ed04612..58477ec3d66d 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c | |||
| @@ -435,17 +435,18 @@ int register_kmmio_probe(struct kmmio_probe *p) | |||
| 435 | unsigned long flags; | 435 | unsigned long flags; |
| 436 | int ret = 0; | 436 | int ret = 0; |
| 437 | unsigned long size = 0; | 437 | unsigned long size = 0; |
| 438 | unsigned long addr = p->addr & PAGE_MASK; | ||
| 438 | const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); | 439 | const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); |
| 439 | unsigned int l; | 440 | unsigned int l; |
| 440 | pte_t *pte; | 441 | pte_t *pte; |
| 441 | 442 | ||
| 442 | spin_lock_irqsave(&kmmio_lock, flags); | 443 | spin_lock_irqsave(&kmmio_lock, flags); |
| 443 | if (get_kmmio_probe(p->addr)) { | 444 | if (get_kmmio_probe(addr)) { |
| 444 | ret = -EEXIST; | 445 | ret = -EEXIST; |
| 445 | goto out; | 446 | goto out; |
| 446 | } | 447 | } |
| 447 | 448 | ||
| 448 | pte = lookup_address(p->addr, &l); | 449 | pte = lookup_address(addr, &l); |
| 449 | if (!pte) { | 450 | if (!pte) { |
| 450 | ret = -EINVAL; | 451 | ret = -EINVAL; |
| 451 | goto out; | 452 | goto out; |
| @@ -454,7 +455,7 @@ int register_kmmio_probe(struct kmmio_probe *p) | |||
| 454 | kmmio_count++; | 455 | kmmio_count++; |
| 455 | list_add_rcu(&p->list, &kmmio_probes); | 456 | list_add_rcu(&p->list, &kmmio_probes); |
| 456 | while (size < size_lim) { | 457 | while (size < size_lim) { |
| 457 | if (add_kmmio_fault_page(p->addr + size)) | 458 | if (add_kmmio_fault_page(addr + size)) |
| 458 | pr_err("Unable to set page fault.\n"); | 459 | pr_err("Unable to set page fault.\n"); |
| 459 | size += page_level_size(l); | 460 | size += page_level_size(l); |
| 460 | } | 461 | } |
| @@ -528,19 +529,20 @@ void unregister_kmmio_probe(struct kmmio_probe *p) | |||
| 528 | { | 529 | { |
| 529 | unsigned long flags; | 530 | unsigned long flags; |
| 530 | unsigned long size = 0; | 531 | unsigned long size = 0; |
| 532 | unsigned long addr = p->addr & PAGE_MASK; | ||
| 531 | const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); | 533 | const unsigned long size_lim = p->len + (p->addr & ~PAGE_MASK); |
| 532 | struct kmmio_fault_page *release_list = NULL; | 534 | struct kmmio_fault_page *release_list = NULL; |
| 533 | struct kmmio_delayed_release *drelease; | 535 | struct kmmio_delayed_release *drelease; |
| 534 | unsigned int l; | 536 | unsigned int l; |
| 535 | pte_t *pte; | 537 | pte_t *pte; |
| 536 | 538 | ||
| 537 | pte = lookup_address(p->addr, &l); | 539 | pte = lookup_address(addr, &l); |
| 538 | if (!pte) | 540 | if (!pte) |
| 539 | return; | 541 | return; |
| 540 | 542 | ||
| 541 | spin_lock_irqsave(&kmmio_lock, flags); | 543 | spin_lock_irqsave(&kmmio_lock, flags); |
| 542 | while (size < size_lim) { | 544 | while (size < size_lim) { |
| 543 | release_kmmio_fault_page(p->addr + size, &release_list); | 545 | release_kmmio_fault_page(addr + size, &release_list); |
| 544 | size += page_level_size(l); | 546 | size += page_level_size(l); |
| 545 | } | 547 | } |
| 546 | list_del_rcu(&p->list); | 548 | list_del_rcu(&p->list); |
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index 1e996df687a3..e663d6bf1328 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c | |||
| @@ -665,6 +665,16 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev) | |||
| 665 | unsigned i; | 665 | unsigned i; |
| 666 | u32 base, limit, high; | 666 | u32 base, limit, high; |
| 667 | struct resource *res, *conflict; | 667 | struct resource *res, *conflict; |
| 668 | struct pci_dev *other; | ||
| 669 | |||
| 670 | /* Check that we are the only device of that type */ | ||
| 671 | other = pci_get_device(dev->vendor, dev->device, NULL); | ||
| 672 | if (other != dev || | ||
| 673 | (other = pci_get_device(dev->vendor, dev->device, other))) { | ||
| 674 | /* This is a multi-socket system, don't touch it for now */ | ||
| 675 | pci_dev_put(other); | ||
| 676 | return; | ||
| 677 | } | ||
| 668 | 678 | ||
| 669 | for (i = 0; i < 8; i++) { | 679 | for (i = 0; i < 8; i++) { |
| 670 | pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base); | 680 | pci_read_config_dword(dev, AMD_141b_MMIO_BASE(i), &base); |
| @@ -696,8 +706,13 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev) | |||
| 696 | res->end = 0xfd00000000ull - 1; | 706 | res->end = 0xfd00000000ull - 1; |
| 697 | 707 | ||
| 698 | /* Just grab the free area behind system memory for this */ | 708 | /* Just grab the free area behind system memory for this */ |
| 699 | while ((conflict = request_resource_conflict(&iomem_resource, res))) | 709 | while ((conflict = request_resource_conflict(&iomem_resource, res))) { |
| 710 | if (conflict->end >= res->end) { | ||
| 711 | kfree(res); | ||
| 712 | return; | ||
| 713 | } | ||
| 700 | res->start = conflict->end + 1; | 714 | res->start = conflict->end + 1; |
| 715 | } | ||
| 701 | 716 | ||
| 702 | dev_info(&dev->dev, "adding root bus resource %pR\n", res); | 717 | dev_info(&dev->dev, "adding root bus resource %pR\n", res); |
| 703 | 718 | ||
| @@ -714,10 +729,10 @@ static void pci_amd_enable_64bit_bar(struct pci_dev *dev) | |||
| 714 | 729 | ||
| 715 | pci_bus_add_resource(dev->bus, res, 0); | 730 | pci_bus_add_resource(dev->bus, res, 0); |
| 716 | } | 731 | } |
| 717 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar); | 732 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1401, pci_amd_enable_64bit_bar); |
| 718 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar); | 733 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x141b, pci_amd_enable_64bit_bar); |
| 719 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar); | 734 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar); |
| 720 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar); | 735 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar); |
| 721 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar); | 736 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar); |
| 722 | 737 | ||
| 723 | #endif | 738 | #endif |
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 5191de14f4df..36a28eddb435 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c | |||
| @@ -82,12 +82,8 @@ static void __save_processor_state(struct saved_context *ctxt) | |||
| 82 | /* | 82 | /* |
| 83 | * descriptor tables | 83 | * descriptor tables |
| 84 | */ | 84 | */ |
| 85 | #ifdef CONFIG_X86_32 | ||
| 86 | store_idt(&ctxt->idt); | 85 | store_idt(&ctxt->idt); |
| 87 | #else | 86 | |
| 88 | /* CONFIG_X86_64 */ | ||
| 89 | store_idt((struct desc_ptr *)&ctxt->idt_limit); | ||
| 90 | #endif | ||
| 91 | /* | 87 | /* |
| 92 | * We save it here, but restore it only in the hibernate case. | 88 | * We save it here, but restore it only in the hibernate case. |
| 93 | * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit | 89 | * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit |
| @@ -103,22 +99,18 @@ static void __save_processor_state(struct saved_context *ctxt) | |||
| 103 | /* | 99 | /* |
| 104 | * segment registers | 100 | * segment registers |
| 105 | */ | 101 | */ |
| 106 | #ifdef CONFIG_X86_32 | 102 | #ifdef CONFIG_X86_32_LAZY_GS |
| 107 | savesegment(es, ctxt->es); | ||
| 108 | savesegment(fs, ctxt->fs); | ||
| 109 | savesegment(gs, ctxt->gs); | 103 | savesegment(gs, ctxt->gs); |
| 110 | savesegment(ss, ctxt->ss); | 104 | #endif |
| 111 | #else | 105 | #ifdef CONFIG_X86_64 |
| 112 | /* CONFIG_X86_64 */ | 106 | savesegment(gs, ctxt->gs); |
| 113 | asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds)); | 107 | savesegment(fs, ctxt->fs); |
| 114 | asm volatile ("movw %%es, %0" : "=m" (ctxt->es)); | 108 | savesegment(ds, ctxt->ds); |
| 115 | asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs)); | 109 | savesegment(es, ctxt->es); |
| 116 | asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs)); | ||
| 117 | asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss)); | ||
| 118 | 110 | ||
| 119 | rdmsrl(MSR_FS_BASE, ctxt->fs_base); | 111 | rdmsrl(MSR_FS_BASE, ctxt->fs_base); |
| 120 | rdmsrl(MSR_GS_BASE, ctxt->gs_base); | 112 | rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); |
| 121 | rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | 113 | rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); |
| 122 | mtrr_save_fixed_ranges(NULL); | 114 | mtrr_save_fixed_ranges(NULL); |
| 123 | 115 | ||
| 124 | rdmsrl(MSR_EFER, ctxt->efer); | 116 | rdmsrl(MSR_EFER, ctxt->efer); |
| @@ -178,6 +170,9 @@ static void fix_processor_context(void) | |||
| 178 | write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS); | 170 | write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS); |
| 179 | 171 | ||
| 180 | syscall_init(); /* This sets MSR_*STAR and related */ | 172 | syscall_init(); /* This sets MSR_*STAR and related */ |
| 173 | #else | ||
| 174 | if (boot_cpu_has(X86_FEATURE_SEP)) | ||
| 175 | enable_sep_cpu(); | ||
| 181 | #endif | 176 | #endif |
| 182 | load_TR_desc(); /* This does ltr */ | 177 | load_TR_desc(); /* This does ltr */ |
| 183 | load_mm_ldt(current->active_mm); /* This does lldt */ | 178 | load_mm_ldt(current->active_mm); /* This does lldt */ |
| @@ -190,9 +185,12 @@ static void fix_processor_context(void) | |||
| 190 | } | 185 | } |
| 191 | 186 | ||
| 192 | /** | 187 | /** |
| 193 | * __restore_processor_state - restore the contents of CPU registers saved | 188 | * __restore_processor_state - restore the contents of CPU registers saved |
| 194 | * by __save_processor_state() | 189 | * by __save_processor_state() |
| 195 | * @ctxt - structure to load the registers contents from | 190 | * @ctxt - structure to load the registers contents from |
| 191 | * | ||
| 192 | * The asm code that gets us here will have restored a usable GDT, although | ||
| 193 | * it will be pointing to the wrong alias. | ||
| 196 | */ | 194 | */ |
| 197 | static void notrace __restore_processor_state(struct saved_context *ctxt) | 195 | static void notrace __restore_processor_state(struct saved_context *ctxt) |
| 198 | { | 196 | { |
| @@ -215,57 +213,50 @@ static void notrace __restore_processor_state(struct saved_context *ctxt) | |||
| 215 | write_cr2(ctxt->cr2); | 213 | write_cr2(ctxt->cr2); |
| 216 | write_cr0(ctxt->cr0); | 214 | write_cr0(ctxt->cr0); |
| 217 | 215 | ||
| 216 | /* Restore the IDT. */ | ||
| 217 | load_idt(&ctxt->idt); | ||
| 218 | |||
| 218 | /* | 219 | /* |
| 219 | * now restore the descriptor tables to their proper values | 220 | * Just in case the asm code got us here with the SS, DS, or ES |
| 220 | * ltr is done i fix_processor_context(). | 221 | * out of sync with the GDT, update them. |
| 221 | */ | 222 | */ |
| 222 | #ifdef CONFIG_X86_32 | 223 | loadsegment(ss, __KERNEL_DS); |
| 223 | load_idt(&ctxt->idt); | 224 | loadsegment(ds, __USER_DS); |
| 224 | #else | 225 | loadsegment(es, __USER_DS); |
| 225 | /* CONFIG_X86_64 */ | ||
| 226 | load_idt((const struct desc_ptr *)&ctxt->idt_limit); | ||
| 227 | #endif | ||
| 228 | 226 | ||
| 229 | #ifdef CONFIG_X86_64 | ||
| 230 | /* | 227 | /* |
| 231 | * We need GSBASE restored before percpu access can work. | 228 | * Restore percpu access. Percpu access can happen in exception |
| 232 | * percpu access can happen in exception handlers or in complicated | 229 | * handlers or in complicated helpers like load_gs_index(). |
| 233 | * helpers like load_gs_index(). | ||
| 234 | */ | 230 | */ |
| 235 | wrmsrl(MSR_GS_BASE, ctxt->gs_base); | 231 | #ifdef CONFIG_X86_64 |
| 232 | wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); | ||
| 233 | #else | ||
| 234 | loadsegment(fs, __KERNEL_PERCPU); | ||
| 235 | loadsegment(gs, __KERNEL_STACK_CANARY); | ||
| 236 | #endif | 236 | #endif |
| 237 | 237 | ||
| 238 | /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */ | ||
| 238 | fix_processor_context(); | 239 | fix_processor_context(); |
| 239 | 240 | ||
| 240 | /* | 241 | /* |
| 241 | * Restore segment registers. This happens after restoring the GDT | 242 | * Now that we have descriptor tables fully restored and working |
| 242 | * and LDT, which happen in fix_processor_context(). | 243 | * exception handling, restore the usermode segments. |
| 243 | */ | 244 | */ |
| 244 | #ifdef CONFIG_X86_32 | 245 | #ifdef CONFIG_X86_64 |
| 246 | loadsegment(ds, ctxt->es); | ||
| 245 | loadsegment(es, ctxt->es); | 247 | loadsegment(es, ctxt->es); |
| 246 | loadsegment(fs, ctxt->fs); | 248 | loadsegment(fs, ctxt->fs); |
| 247 | loadsegment(gs, ctxt->gs); | ||
| 248 | loadsegment(ss, ctxt->ss); | ||
| 249 | |||
| 250 | /* | ||
| 251 | * sysenter MSRs | ||
| 252 | */ | ||
| 253 | if (boot_cpu_has(X86_FEATURE_SEP)) | ||
| 254 | enable_sep_cpu(); | ||
| 255 | #else | ||
| 256 | /* CONFIG_X86_64 */ | ||
| 257 | asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds)); | ||
| 258 | asm volatile ("movw %0, %%es" :: "r" (ctxt->es)); | ||
| 259 | asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs)); | ||
| 260 | load_gs_index(ctxt->gs); | 249 | load_gs_index(ctxt->gs); |
| 261 | asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss)); | ||
| 262 | 250 | ||
| 263 | /* | 251 | /* |
| 264 | * Restore FSBASE and user GSBASE after reloading the respective | 252 | * Restore FSBASE and GSBASE after restoring the selectors, since |
| 265 | * segment selectors. | 253 | * restoring the selectors clobbers the bases. Keep in mind |
| 254 | * that MSR_KERNEL_GS_BASE is horribly misnamed. | ||
| 266 | */ | 255 | */ |
| 267 | wrmsrl(MSR_FS_BASE, ctxt->fs_base); | 256 | wrmsrl(MSR_FS_BASE, ctxt->fs_base); |
| 268 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base); | 257 | wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); |
| 258 | #elif defined(CONFIG_X86_32_LAZY_GS) | ||
| 259 | loadsegment(gs, ctxt->gs); | ||
| 269 | #endif | 260 | #endif |
| 270 | 261 | ||
| 271 | do_fpu_end(); | 262 | do_fpu_end(); |
diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c index 6b830d4cb4c8..de58533d3664 100644 --- a/arch/x86/xen/apic.c +++ b/arch/x86/xen/apic.c | |||
| @@ -57,7 +57,7 @@ static u32 xen_apic_read(u32 reg) | |||
| 57 | return 0; | 57 | return 0; |
| 58 | 58 | ||
| 59 | if (reg == APIC_LVR) | 59 | if (reg == APIC_LVR) |
| 60 | return 0x10; | 60 | return 0x14; |
| 61 | #ifdef CONFIG_X86_32 | 61 | #ifdef CONFIG_X86_32 |
| 62 | if (reg == APIC_LDR) | 62 | if (reg == APIC_LDR) |
| 63 | return SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); | 63 | return SET_APIC_LOGICAL_ID(1UL << smp_processor_id()); |
diff --git a/crypto/af_alg.c b/crypto/af_alg.c index 358749c38894..415a54ced4d6 100644 --- a/crypto/af_alg.c +++ b/crypto/af_alg.c | |||
| @@ -672,14 +672,15 @@ void af_alg_free_areq_sgls(struct af_alg_async_req *areq) | |||
| 672 | } | 672 | } |
| 673 | 673 | ||
| 674 | tsgl = areq->tsgl; | 674 | tsgl = areq->tsgl; |
| 675 | for_each_sg(tsgl, sg, areq->tsgl_entries, i) { | 675 | if (tsgl) { |
| 676 | if (!sg_page(sg)) | 676 | for_each_sg(tsgl, sg, areq->tsgl_entries, i) { |
| 677 | continue; | 677 | if (!sg_page(sg)) |
| 678 | put_page(sg_page(sg)); | 678 | continue; |
| 679 | } | 679 | put_page(sg_page(sg)); |
| 680 | } | ||
| 680 | 681 | ||
| 681 | if (areq->tsgl && areq->tsgl_entries) | ||
| 682 | sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); | 682 | sock_kfree_s(sk, tsgl, areq->tsgl_entries * sizeof(*tsgl)); |
| 683 | } | ||
| 683 | } | 684 | } |
| 684 | EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); | 685 | EXPORT_SYMBOL_GPL(af_alg_free_areq_sgls); |
| 685 | 686 | ||
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c index 805f485ddf1b..48b34e9c6834 100644 --- a/crypto/algif_aead.c +++ b/crypto/algif_aead.c | |||
| @@ -503,6 +503,7 @@ static void aead_release(void *private) | |||
| 503 | struct aead_tfm *tfm = private; | 503 | struct aead_tfm *tfm = private; |
| 504 | 504 | ||
| 505 | crypto_free_aead(tfm->aead); | 505 | crypto_free_aead(tfm->aead); |
| 506 | crypto_put_default_null_skcipher2(); | ||
| 506 | kfree(tfm); | 507 | kfree(tfm); |
| 507 | } | 508 | } |
| 508 | 509 | ||
| @@ -535,7 +536,6 @@ static void aead_sock_destruct(struct sock *sk) | |||
| 535 | unsigned int ivlen = crypto_aead_ivsize(tfm); | 536 | unsigned int ivlen = crypto_aead_ivsize(tfm); |
| 536 | 537 | ||
| 537 | af_alg_pull_tsgl(sk, ctx->used, NULL, 0); | 538 | af_alg_pull_tsgl(sk, ctx->used, NULL, 0); |
| 538 | crypto_put_default_null_skcipher2(); | ||
| 539 | sock_kzfree_s(sk, ctx->iv, ivlen); | 539 | sock_kzfree_s(sk, ctx->iv, ivlen); |
| 540 | sock_kfree_s(sk, ctx, ctx->len); | 540 | sock_kfree_s(sk, ctx, ctx->len); |
| 541 | af_alg_release_parent(sk); | 541 | af_alg_release_parent(sk); |
diff --git a/crypto/hmac.c b/crypto/hmac.c index 92871dc2a63e..e74730224f0a 100644 --- a/crypto/hmac.c +++ b/crypto/hmac.c | |||
| @@ -195,11 +195,15 @@ static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) | |||
| 195 | salg = shash_attr_alg(tb[1], 0, 0); | 195 | salg = shash_attr_alg(tb[1], 0, 0); |
| 196 | if (IS_ERR(salg)) | 196 | if (IS_ERR(salg)) |
| 197 | return PTR_ERR(salg); | 197 | return PTR_ERR(salg); |
| 198 | alg = &salg->base; | ||
| 198 | 199 | ||
| 200 | /* The underlying hash algorithm must be unkeyed */ | ||
| 199 | err = -EINVAL; | 201 | err = -EINVAL; |
| 202 | if (crypto_shash_alg_has_setkey(salg)) | ||
| 203 | goto out_put_alg; | ||
| 204 | |||
| 200 | ds = salg->digestsize; | 205 | ds = salg->digestsize; |
| 201 | ss = salg->statesize; | 206 | ss = salg->statesize; |
| 202 | alg = &salg->base; | ||
| 203 | if (ds > alg->cra_blocksize || | 207 | if (ds > alg->cra_blocksize || |
| 204 | ss < alg->cra_blocksize) | 208 | ss < alg->cra_blocksize) |
| 205 | goto out_put_alg; | 209 | goto out_put_alg; |
diff --git a/crypto/rsa_helper.c b/crypto/rsa_helper.c index 0b66dc824606..cad395d70d78 100644 --- a/crypto/rsa_helper.c +++ b/crypto/rsa_helper.c | |||
| @@ -30,7 +30,7 @@ int rsa_get_n(void *context, size_t hdrlen, unsigned char tag, | |||
| 30 | return -EINVAL; | 30 | return -EINVAL; |
| 31 | 31 | ||
| 32 | if (fips_enabled) { | 32 | if (fips_enabled) { |
| 33 | while (!*ptr && n_sz) { | 33 | while (n_sz && !*ptr) { |
| 34 | ptr++; | 34 | ptr++; |
| 35 | n_sz--; | 35 | n_sz--; |
| 36 | } | 36 | } |
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index f550b5d94630..d7da0eea5622 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c | |||
| @@ -188,13 +188,6 @@ static int encrypt(struct blkcipher_desc *desc, | |||
| 188 | 188 | ||
| 189 | salsa20_ivsetup(ctx, walk.iv); | 189 | salsa20_ivsetup(ctx, walk.iv); |
| 190 | 190 | ||
| 191 | if (likely(walk.nbytes == nbytes)) | ||
| 192 | { | ||
| 193 | salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, | ||
| 194 | walk.src.virt.addr, nbytes); | ||
| 195 | return blkcipher_walk_done(desc, &walk, 0); | ||
| 196 | } | ||
| 197 | |||
| 198 | while (walk.nbytes >= 64) { | 191 | while (walk.nbytes >= 64) { |
| 199 | salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, | 192 | salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, |
| 200 | walk.src.virt.addr, | 193 | walk.src.virt.addr, |
diff --git a/crypto/shash.c b/crypto/shash.c index 325a14da5827..e849d3ee2e27 100644 --- a/crypto/shash.c +++ b/crypto/shash.c | |||
| @@ -25,11 +25,12 @@ | |||
| 25 | 25 | ||
| 26 | static const struct crypto_type crypto_shash_type; | 26 | static const struct crypto_type crypto_shash_type; |
| 27 | 27 | ||
| 28 | static int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, | 28 | int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, |
| 29 | unsigned int keylen) | 29 | unsigned int keylen) |
| 30 | { | 30 | { |
| 31 | return -ENOSYS; | 31 | return -ENOSYS; |
| 32 | } | 32 | } |
| 33 | EXPORT_SYMBOL_GPL(shash_no_setkey); | ||
| 33 | 34 | ||
| 34 | static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, | 35 | static int shash_setkey_unaligned(struct crypto_shash *tfm, const u8 *key, |
| 35 | unsigned int keylen) | 36 | unsigned int keylen) |
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index e4ffaeec9ec2..a4c8ad98560d 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c | |||
| @@ -1138,7 +1138,7 @@ int acpi_subsys_thaw_noirq(struct device *dev) | |||
| 1138 | * skip all of the subsequent "thaw" callbacks for the device. | 1138 | * skip all of the subsequent "thaw" callbacks for the device. |
| 1139 | */ | 1139 | */ |
| 1140 | if (dev_pm_smart_suspend_and_suspended(dev)) { | 1140 | if (dev_pm_smart_suspend_and_suspended(dev)) { |
| 1141 | dev->power.direct_complete = true; | 1141 | dev_pm_skip_next_resume_phases(dev); |
| 1142 | return 0; | 1142 | return 0; |
| 1143 | } | 1143 | } |
| 1144 | 1144 | ||
diff --git a/drivers/ata/ahci_mtk.c b/drivers/ata/ahci_mtk.c index 80854f71559a..0ae6971c2a4c 100644 --- a/drivers/ata/ahci_mtk.c +++ b/drivers/ata/ahci_mtk.c | |||
| @@ -1,5 +1,5 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * MeidaTek AHCI SATA driver | 2 | * MediaTek AHCI SATA driver |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 2017 MediaTek Inc. | 4 | * Copyright (c) 2017 MediaTek Inc. |
| 5 | * Author: Ryder Lee <ryder.lee@mediatek.com> | 5 | * Author: Ryder Lee <ryder.lee@mediatek.com> |
| @@ -25,7 +25,7 @@ | |||
| 25 | #include <linux/reset.h> | 25 | #include <linux/reset.h> |
| 26 | #include "ahci.h" | 26 | #include "ahci.h" |
| 27 | 27 | ||
| 28 | #define DRV_NAME "ahci" | 28 | #define DRV_NAME "ahci-mtk" |
| 29 | 29 | ||
| 30 | #define SYS_CFG 0x14 | 30 | #define SYS_CFG 0x14 |
| 31 | #define SYS_CFG_SATA_MSK GENMASK(31, 30) | 31 | #define SYS_CFG_SATA_MSK GENMASK(31, 30) |
| @@ -192,5 +192,5 @@ static struct platform_driver mtk_ahci_driver = { | |||
| 192 | }; | 192 | }; |
| 193 | module_platform_driver(mtk_ahci_driver); | 193 | module_platform_driver(mtk_ahci_driver); |
| 194 | 194 | ||
| 195 | MODULE_DESCRIPTION("MeidaTek SATA AHCI Driver"); | 195 | MODULE_DESCRIPTION("MediaTek SATA AHCI Driver"); |
| 196 | MODULE_LICENSE("GPL v2"); | 196 | MODULE_LICENSE("GPL v2"); |
diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index b6b0bf76dfc7..2685f28160f7 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c | |||
| @@ -35,6 +35,8 @@ | |||
| 35 | 35 | ||
| 36 | /* port register default value */ | 36 | /* port register default value */ |
| 37 | #define AHCI_PORT_PHY_1_CFG 0xa003fffe | 37 | #define AHCI_PORT_PHY_1_CFG 0xa003fffe |
| 38 | #define AHCI_PORT_PHY2_CFG 0x28184d1f | ||
| 39 | #define AHCI_PORT_PHY3_CFG 0x0e081509 | ||
| 38 | #define AHCI_PORT_TRANS_CFG 0x08000029 | 40 | #define AHCI_PORT_TRANS_CFG 0x08000029 |
| 39 | #define AHCI_PORT_AXICC_CFG 0x3fffffff | 41 | #define AHCI_PORT_AXICC_CFG 0x3fffffff |
| 40 | 42 | ||
| @@ -183,6 +185,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv) | |||
| 183 | writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, | 185 | writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, |
| 184 | qpriv->ecc_addr); | 186 | qpriv->ecc_addr); |
| 185 | writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); | 187 | writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); |
| 188 | writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2); | ||
| 189 | writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3); | ||
| 186 | writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); | 190 | writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); |
| 187 | if (qpriv->is_dmacoherent) | 191 | if (qpriv->is_dmacoherent) |
| 188 | writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); | 192 | writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); |
| @@ -190,6 +194,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv) | |||
| 190 | 194 | ||
| 191 | case AHCI_LS2080A: | 195 | case AHCI_LS2080A: |
| 192 | writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); | 196 | writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); |
| 197 | writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2); | ||
| 198 | writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3); | ||
| 193 | writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); | 199 | writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); |
| 194 | if (qpriv->is_dmacoherent) | 200 | if (qpriv->is_dmacoherent) |
| 195 | writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); | 201 | writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); |
| @@ -201,6 +207,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv) | |||
| 201 | writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, | 207 | writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2, |
| 202 | qpriv->ecc_addr); | 208 | qpriv->ecc_addr); |
| 203 | writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); | 209 | writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); |
| 210 | writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2); | ||
| 211 | writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3); | ||
| 204 | writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); | 212 | writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); |
| 205 | if (qpriv->is_dmacoherent) | 213 | if (qpriv->is_dmacoherent) |
| 206 | writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); | 214 | writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); |
| @@ -212,6 +220,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv) | |||
| 212 | writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A, | 220 | writel(readl(qpriv->ecc_addr) | ECC_DIS_LS1088A, |
| 213 | qpriv->ecc_addr); | 221 | qpriv->ecc_addr); |
| 214 | writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); | 222 | writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); |
| 223 | writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2); | ||
| 224 | writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3); | ||
| 215 | writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); | 225 | writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); |
| 216 | if (qpriv->is_dmacoherent) | 226 | if (qpriv->is_dmacoherent) |
| 217 | writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); | 227 | writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); |
| @@ -219,6 +229,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv) | |||
| 219 | 229 | ||
| 220 | case AHCI_LS2088A: | 230 | case AHCI_LS2088A: |
| 221 | writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); | 231 | writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1); |
| 232 | writel(AHCI_PORT_PHY2_CFG, reg_base + PORT_PHY2); | ||
| 233 | writel(AHCI_PORT_PHY3_CFG, reg_base + PORT_PHY3); | ||
| 222 | writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); | 234 | writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS); |
| 223 | if (qpriv->is_dmacoherent) | 235 | if (qpriv->is_dmacoherent) |
| 224 | writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); | 236 | writel(AHCI_PORT_AXICC_CFG, reg_base + PORT_AXICC); |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 2a882929de4a..8193b38a1cae 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
| @@ -3082,13 +3082,19 @@ int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) | |||
| 3082 | bit = fls(mask) - 1; | 3082 | bit = fls(mask) - 1; |
| 3083 | mask &= ~(1 << bit); | 3083 | mask &= ~(1 << bit); |
| 3084 | 3084 | ||
| 3085 | /* Mask off all speeds higher than or equal to the current | 3085 | /* |
| 3086 | * one. Force 1.5Gbps if current SPD is not available. | 3086 | * Mask off all speeds higher than or equal to the current one. At |
| 3087 | * this point, if current SPD is not available and we previously | ||
| 3088 | * recorded the link speed from SStatus, the driver has already | ||
| 3089 | * masked off the highest bit so mask should already be 1 or 0. | ||
| 3090 | * Otherwise, we should not force 1.5Gbps on a link where we have | ||
| 3091 | * not previously recorded speed from SStatus. Just return in this | ||
| 3092 | * case. | ||
| 3087 | */ | 3093 | */ |
| 3088 | if (spd > 1) | 3094 | if (spd > 1) |
| 3089 | mask &= (1 << (spd - 1)) - 1; | 3095 | mask &= (1 << (spd - 1)) - 1; |
| 3090 | else | 3096 | else |
| 3091 | mask &= 1; | 3097 | return -EINVAL; |
| 3092 | 3098 | ||
| 3093 | /* were we already at the bottom? */ | 3099 | /* were we already at the bottom? */ |
| 3094 | if (!mask) | 3100 | if (!mask) |
diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c index ffd8d33c6e0f..6db2e34bd52f 100644 --- a/drivers/ata/pata_pdc2027x.c +++ b/drivers/ata/pata_pdc2027x.c | |||
| @@ -82,7 +82,7 @@ static int pdc2027x_set_mode(struct ata_link *link, struct ata_device **r_failed | |||
| 82 | * is issued to the device. However, if the controller clock is 133MHz, | 82 | * is issued to the device. However, if the controller clock is 133MHz, |
| 83 | * the following tables must be used. | 83 | * the following tables must be used. |
| 84 | */ | 84 | */ |
| 85 | static struct pdc2027x_pio_timing { | 85 | static const struct pdc2027x_pio_timing { |
| 86 | u8 value0, value1, value2; | 86 | u8 value0, value1, value2; |
| 87 | } pdc2027x_pio_timing_tbl[] = { | 87 | } pdc2027x_pio_timing_tbl[] = { |
| 88 | { 0xfb, 0x2b, 0xac }, /* PIO mode 0 */ | 88 | { 0xfb, 0x2b, 0xac }, /* PIO mode 0 */ |
| @@ -92,7 +92,7 @@ static struct pdc2027x_pio_timing { | |||
| 92 | { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */ | 92 | { 0x23, 0x09, 0x25 }, /* PIO mode 4, IORDY on, Prefetch off */ |
| 93 | }; | 93 | }; |
| 94 | 94 | ||
| 95 | static struct pdc2027x_mdma_timing { | 95 | static const struct pdc2027x_mdma_timing { |
| 96 | u8 value0, value1; | 96 | u8 value0, value1; |
| 97 | } pdc2027x_mdma_timing_tbl[] = { | 97 | } pdc2027x_mdma_timing_tbl[] = { |
| 98 | { 0xdf, 0x5f }, /* MDMA mode 0 */ | 98 | { 0xdf, 0x5f }, /* MDMA mode 0 */ |
| @@ -100,7 +100,7 @@ static struct pdc2027x_mdma_timing { | |||
| 100 | { 0x69, 0x25 }, /* MDMA mode 2 */ | 100 | { 0x69, 0x25 }, /* MDMA mode 2 */ |
| 101 | }; | 101 | }; |
| 102 | 102 | ||
| 103 | static struct pdc2027x_udma_timing { | 103 | static const struct pdc2027x_udma_timing { |
| 104 | u8 value0, value1, value2; | 104 | u8 value0, value1, value2; |
| 105 | } pdc2027x_udma_timing_tbl[] = { | 105 | } pdc2027x_udma_timing_tbl[] = { |
| 106 | { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */ | 106 | { 0x4a, 0x0f, 0xd5 }, /* UDMA mode 0 */ |
| @@ -649,7 +649,7 @@ static long pdc_detect_pll_input_clock(struct ata_host *host) | |||
| 649 | * @host: target ATA host | 649 | * @host: target ATA host |
| 650 | * @board_idx: board identifier | 650 | * @board_idx: board identifier |
| 651 | */ | 651 | */ |
| 652 | static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx) | 652 | static void pdc_hardware_init(struct ata_host *host, unsigned int board_idx) |
| 653 | { | 653 | { |
| 654 | long pll_clock; | 654 | long pll_clock; |
| 655 | 655 | ||
| @@ -665,8 +665,6 @@ static int pdc_hardware_init(struct ata_host *host, unsigned int board_idx) | |||
| 665 | 665 | ||
| 666 | /* Adjust PLL control register */ | 666 | /* Adjust PLL control register */ |
| 667 | pdc_adjust_pll(host, pll_clock, board_idx); | 667 | pdc_adjust_pll(host, pll_clock, board_idx); |
| 668 | |||
| 669 | return 0; | ||
| 670 | } | 668 | } |
| 671 | 669 | ||
| 672 | /** | 670 | /** |
| @@ -753,8 +751,7 @@ static int pdc2027x_init_one(struct pci_dev *pdev, | |||
| 753 | //pci_enable_intx(pdev); | 751 | //pci_enable_intx(pdev); |
| 754 | 752 | ||
| 755 | /* initialize adapter */ | 753 | /* initialize adapter */ |
| 756 | if (pdc_hardware_init(host, board_idx) != 0) | 754 | pdc_hardware_init(host, board_idx); |
| 757 | return -EIO; | ||
| 758 | 755 | ||
| 759 | pci_set_master(pdev); | 756 | pci_set_master(pdev); |
| 760 | return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, | 757 | return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, |
| @@ -778,8 +775,7 @@ static int pdc2027x_reinit_one(struct pci_dev *pdev) | |||
| 778 | else | 775 | else |
| 779 | board_idx = PDC_UDMA_133; | 776 | board_idx = PDC_UDMA_133; |
| 780 | 777 | ||
| 781 | if (pdc_hardware_init(host, board_idx)) | 778 | pdc_hardware_init(host, board_idx); |
| 782 | return -EIO; | ||
| 783 | 779 | ||
| 784 | ata_host_resume(host); | 780 | ata_host_resume(host); |
| 785 | return 0; | 781 | return 0; |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index db2f04415927..08744b572af6 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -526,6 +526,21 @@ static void dpm_watchdog_clear(struct dpm_watchdog *wd) | |||
| 526 | /*------------------------- Resume routines -------------------------*/ | 526 | /*------------------------- Resume routines -------------------------*/ |
| 527 | 527 | ||
| 528 | /** | 528 | /** |
| 529 | * dev_pm_skip_next_resume_phases - Skip next system resume phases for device. | ||
| 530 | * @dev: Target device. | ||
| 531 | * | ||
| 532 | * Make the core skip the "early resume" and "resume" phases for @dev. | ||
| 533 | * | ||
| 534 | * This function can be called by middle-layer code during the "noirq" phase of | ||
| 535 | * system resume if necessary, but not by device drivers. | ||
| 536 | */ | ||
| 537 | void dev_pm_skip_next_resume_phases(struct device *dev) | ||
| 538 | { | ||
| 539 | dev->power.is_late_suspended = false; | ||
| 540 | dev->power.is_suspended = false; | ||
| 541 | } | ||
| 542 | |||
| 543 | /** | ||
| 529 | * device_resume_noirq - Execute a "noirq resume" callback for given device. | 544 | * device_resume_noirq - Execute a "noirq resume" callback for given device. |
| 530 | * @dev: Device to handle. | 545 | * @dev: Device to handle. |
| 531 | * @state: PM transition of the system being carried out. | 546 | * @state: PM transition of the system being carried out. |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 779869ed32b1..71fad747c0c7 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
| @@ -199,6 +199,9 @@ struct smi_info { | |||
| 199 | /* The timer for this si. */ | 199 | /* The timer for this si. */ |
| 200 | struct timer_list si_timer; | 200 | struct timer_list si_timer; |
| 201 | 201 | ||
| 202 | /* This flag is set, if the timer can be set */ | ||
| 203 | bool timer_can_start; | ||
| 204 | |||
| 202 | /* This flag is set, if the timer is running (timer_pending() isn't enough) */ | 205 | /* This flag is set, if the timer is running (timer_pending() isn't enough) */ |
| 203 | bool timer_running; | 206 | bool timer_running; |
| 204 | 207 | ||
| @@ -355,6 +358,8 @@ out: | |||
| 355 | 358 | ||
| 356 | static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) | 359 | static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) |
| 357 | { | 360 | { |
| 361 | if (!smi_info->timer_can_start) | ||
| 362 | return; | ||
| 358 | smi_info->last_timeout_jiffies = jiffies; | 363 | smi_info->last_timeout_jiffies = jiffies; |
| 359 | mod_timer(&smi_info->si_timer, new_val); | 364 | mod_timer(&smi_info->si_timer, new_val); |
| 360 | smi_info->timer_running = true; | 365 | smi_info->timer_running = true; |
| @@ -374,21 +379,18 @@ static void start_new_msg(struct smi_info *smi_info, unsigned char *msg, | |||
| 374 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); | 379 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, size); |
| 375 | } | 380 | } |
| 376 | 381 | ||
| 377 | static void start_check_enables(struct smi_info *smi_info, bool start_timer) | 382 | static void start_check_enables(struct smi_info *smi_info) |
| 378 | { | 383 | { |
| 379 | unsigned char msg[2]; | 384 | unsigned char msg[2]; |
| 380 | 385 | ||
| 381 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 386 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
| 382 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; | 387 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; |
| 383 | 388 | ||
| 384 | if (start_timer) | 389 | start_new_msg(smi_info, msg, 2); |
| 385 | start_new_msg(smi_info, msg, 2); | ||
| 386 | else | ||
| 387 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | ||
| 388 | smi_info->si_state = SI_CHECKING_ENABLES; | 390 | smi_info->si_state = SI_CHECKING_ENABLES; |
| 389 | } | 391 | } |
| 390 | 392 | ||
| 391 | static void start_clear_flags(struct smi_info *smi_info, bool start_timer) | 393 | static void start_clear_flags(struct smi_info *smi_info) |
| 392 | { | 394 | { |
| 393 | unsigned char msg[3]; | 395 | unsigned char msg[3]; |
| 394 | 396 | ||
| @@ -397,10 +399,7 @@ static void start_clear_flags(struct smi_info *smi_info, bool start_timer) | |||
| 397 | msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; | 399 | msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD; |
| 398 | msg[2] = WDT_PRE_TIMEOUT_INT; | 400 | msg[2] = WDT_PRE_TIMEOUT_INT; |
| 399 | 401 | ||
| 400 | if (start_timer) | 402 | start_new_msg(smi_info, msg, 3); |
| 401 | start_new_msg(smi_info, msg, 3); | ||
| 402 | else | ||
| 403 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3); | ||
| 404 | smi_info->si_state = SI_CLEARING_FLAGS; | 403 | smi_info->si_state = SI_CLEARING_FLAGS; |
| 405 | } | 404 | } |
| 406 | 405 | ||
| @@ -435,11 +434,11 @@ static void start_getting_events(struct smi_info *smi_info) | |||
| 435 | * Note that we cannot just use disable_irq(), since the interrupt may | 434 | * Note that we cannot just use disable_irq(), since the interrupt may |
| 436 | * be shared. | 435 | * be shared. |
| 437 | */ | 436 | */ |
| 438 | static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer) | 437 | static inline bool disable_si_irq(struct smi_info *smi_info) |
| 439 | { | 438 | { |
| 440 | if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { | 439 | if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) { |
| 441 | smi_info->interrupt_disabled = true; | 440 | smi_info->interrupt_disabled = true; |
| 442 | start_check_enables(smi_info, start_timer); | 441 | start_check_enables(smi_info); |
| 443 | return true; | 442 | return true; |
| 444 | } | 443 | } |
| 445 | return false; | 444 | return false; |
| @@ -449,7 +448,7 @@ static inline bool enable_si_irq(struct smi_info *smi_info) | |||
| 449 | { | 448 | { |
| 450 | if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) { | 449 | if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) { |
| 451 | smi_info->interrupt_disabled = false; | 450 | smi_info->interrupt_disabled = false; |
| 452 | start_check_enables(smi_info, true); | 451 | start_check_enables(smi_info); |
| 453 | return true; | 452 | return true; |
| 454 | } | 453 | } |
| 455 | return false; | 454 | return false; |
| @@ -467,7 +466,7 @@ static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info) | |||
| 467 | 466 | ||
| 468 | msg = ipmi_alloc_smi_msg(); | 467 | msg = ipmi_alloc_smi_msg(); |
| 469 | if (!msg) { | 468 | if (!msg) { |
| 470 | if (!disable_si_irq(smi_info, true)) | 469 | if (!disable_si_irq(smi_info)) |
| 471 | smi_info->si_state = SI_NORMAL; | 470 | smi_info->si_state = SI_NORMAL; |
| 472 | } else if (enable_si_irq(smi_info)) { | 471 | } else if (enable_si_irq(smi_info)) { |
| 473 | ipmi_free_smi_msg(msg); | 472 | ipmi_free_smi_msg(msg); |
| @@ -483,7 +482,7 @@ retry: | |||
| 483 | /* Watchdog pre-timeout */ | 482 | /* Watchdog pre-timeout */ |
| 484 | smi_inc_stat(smi_info, watchdog_pretimeouts); | 483 | smi_inc_stat(smi_info, watchdog_pretimeouts); |
| 485 | 484 | ||
| 486 | start_clear_flags(smi_info, true); | 485 | start_clear_flags(smi_info); |
| 487 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; | 486 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; |
| 488 | if (smi_info->intf) | 487 | if (smi_info->intf) |
| 489 | ipmi_smi_watchdog_pretimeout(smi_info->intf); | 488 | ipmi_smi_watchdog_pretimeout(smi_info->intf); |
| @@ -866,7 +865,7 @@ restart: | |||
| 866 | * disable and messages disabled. | 865 | * disable and messages disabled. |
| 867 | */ | 866 | */ |
| 868 | if (smi_info->supports_event_msg_buff || smi_info->io.irq) { | 867 | if (smi_info->supports_event_msg_buff || smi_info->io.irq) { |
| 869 | start_check_enables(smi_info, true); | 868 | start_check_enables(smi_info); |
| 870 | } else { | 869 | } else { |
| 871 | smi_info->curr_msg = alloc_msg_handle_irq(smi_info); | 870 | smi_info->curr_msg = alloc_msg_handle_irq(smi_info); |
| 872 | if (!smi_info->curr_msg) | 871 | if (!smi_info->curr_msg) |
| @@ -1167,6 +1166,7 @@ static int smi_start_processing(void *send_info, | |||
| 1167 | 1166 | ||
| 1168 | /* Set up the timer that drives the interface. */ | 1167 | /* Set up the timer that drives the interface. */ |
| 1169 | timer_setup(&new_smi->si_timer, smi_timeout, 0); | 1168 | timer_setup(&new_smi->si_timer, smi_timeout, 0); |
| 1169 | new_smi->timer_can_start = true; | ||
| 1170 | smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); | 1170 | smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES); |
| 1171 | 1171 | ||
| 1172 | /* Try to claim any interrupts. */ | 1172 | /* Try to claim any interrupts. */ |
| @@ -1936,10 +1936,12 @@ static void check_for_broken_irqs(struct smi_info *smi_info) | |||
| 1936 | check_set_rcv_irq(smi_info); | 1936 | check_set_rcv_irq(smi_info); |
| 1937 | } | 1937 | } |
| 1938 | 1938 | ||
| 1939 | static inline void wait_for_timer_and_thread(struct smi_info *smi_info) | 1939 | static inline void stop_timer_and_thread(struct smi_info *smi_info) |
| 1940 | { | 1940 | { |
| 1941 | if (smi_info->thread != NULL) | 1941 | if (smi_info->thread != NULL) |
| 1942 | kthread_stop(smi_info->thread); | 1942 | kthread_stop(smi_info->thread); |
| 1943 | |||
| 1944 | smi_info->timer_can_start = false; | ||
| 1943 | if (smi_info->timer_running) | 1945 | if (smi_info->timer_running) |
| 1944 | del_timer_sync(&smi_info->si_timer); | 1946 | del_timer_sync(&smi_info->si_timer); |
| 1945 | } | 1947 | } |
| @@ -2152,7 +2154,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
| 2152 | * Start clearing the flags before we enable interrupts or the | 2154 | * Start clearing the flags before we enable interrupts or the |
| 2153 | * timer to avoid racing with the timer. | 2155 | * timer to avoid racing with the timer. |
| 2154 | */ | 2156 | */ |
| 2155 | start_clear_flags(new_smi, false); | 2157 | start_clear_flags(new_smi); |
| 2156 | 2158 | ||
| 2157 | /* | 2159 | /* |
| 2158 | * IRQ is defined to be set when non-zero. req_events will | 2160 | * IRQ is defined to be set when non-zero. req_events will |
| @@ -2238,7 +2240,7 @@ out_err_remove_attrs: | |||
| 2238 | dev_set_drvdata(new_smi->io.dev, NULL); | 2240 | dev_set_drvdata(new_smi->io.dev, NULL); |
| 2239 | 2241 | ||
| 2240 | out_err_stop_timer: | 2242 | out_err_stop_timer: |
| 2241 | wait_for_timer_and_thread(new_smi); | 2243 | stop_timer_and_thread(new_smi); |
| 2242 | 2244 | ||
| 2243 | out_err: | 2245 | out_err: |
| 2244 | new_smi->interrupt_disabled = true; | 2246 | new_smi->interrupt_disabled = true; |
| @@ -2388,7 +2390,7 @@ static void cleanup_one_si(struct smi_info *to_clean) | |||
| 2388 | */ | 2390 | */ |
| 2389 | if (to_clean->io.irq_cleanup) | 2391 | if (to_clean->io.irq_cleanup) |
| 2390 | to_clean->io.irq_cleanup(&to_clean->io); | 2392 | to_clean->io.irq_cleanup(&to_clean->io); |
| 2391 | wait_for_timer_and_thread(to_clean); | 2393 | stop_timer_and_thread(to_clean); |
| 2392 | 2394 | ||
| 2393 | /* | 2395 | /* |
| 2394 | * Timeouts are stopped, now make sure the interrupts are off | 2396 | * Timeouts are stopped, now make sure the interrupts are off |
| @@ -2400,7 +2402,7 @@ static void cleanup_one_si(struct smi_info *to_clean) | |||
| 2400 | schedule_timeout_uninterruptible(1); | 2402 | schedule_timeout_uninterruptible(1); |
| 2401 | } | 2403 | } |
| 2402 | if (to_clean->handlers) | 2404 | if (to_clean->handlers) |
| 2403 | disable_si_irq(to_clean, false); | 2405 | disable_si_irq(to_clean); |
| 2404 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { | 2406 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { |
| 2405 | poll(to_clean); | 2407 | poll(to_clean); |
| 2406 | schedule_timeout_uninterruptible(1); | 2408 | schedule_timeout_uninterruptible(1); |
diff --git a/drivers/char/ipmi/ipmi_si_parisc.c b/drivers/char/ipmi/ipmi_si_parisc.c index 090b073ab441..6b10f0e18a95 100644 --- a/drivers/char/ipmi/ipmi_si_parisc.c +++ b/drivers/char/ipmi/ipmi_si_parisc.c | |||
| @@ -10,6 +10,8 @@ static int __init ipmi_parisc_probe(struct parisc_device *dev) | |||
| 10 | { | 10 | { |
| 11 | struct si_sm_io io; | 11 | struct si_sm_io io; |
| 12 | 12 | ||
| 13 | memset(&io, 0, sizeof(io)); | ||
| 14 | |||
| 13 | io.si_type = SI_KCS; | 15 | io.si_type = SI_KCS; |
| 14 | io.addr_source = SI_DEVICETREE; | 16 | io.addr_source = SI_DEVICETREE; |
| 15 | io.addr_type = IPMI_MEM_ADDR_SPACE; | 17 | io.addr_type = IPMI_MEM_ADDR_SPACE; |
diff --git a/drivers/char/ipmi/ipmi_si_pci.c b/drivers/char/ipmi/ipmi_si_pci.c index 99771f5cad07..27dd11c49d21 100644 --- a/drivers/char/ipmi/ipmi_si_pci.c +++ b/drivers/char/ipmi/ipmi_si_pci.c | |||
| @@ -103,10 +103,13 @@ static int ipmi_pci_probe(struct pci_dev *pdev, | |||
| 103 | io.addr_source_cleanup = ipmi_pci_cleanup; | 103 | io.addr_source_cleanup = ipmi_pci_cleanup; |
| 104 | io.addr_source_data = pdev; | 104 | io.addr_source_data = pdev; |
| 105 | 105 | ||
| 106 | if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) | 106 | if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) { |
| 107 | io.addr_type = IPMI_IO_ADDR_SPACE; | 107 | io.addr_type = IPMI_IO_ADDR_SPACE; |
| 108 | else | 108 | io.io_setup = ipmi_si_port_setup; |
| 109 | } else { | ||
| 109 | io.addr_type = IPMI_MEM_ADDR_SPACE; | 110 | io.addr_type = IPMI_MEM_ADDR_SPACE; |
| 111 | io.io_setup = ipmi_si_mem_setup; | ||
| 112 | } | ||
| 110 | io.addr_data = pci_resource_start(pdev, 0); | 113 | io.addr_data = pci_resource_start(pdev, 0); |
| 111 | 114 | ||
| 112 | io.regspacing = ipmi_pci_probe_regspacing(&io); | 115 | io.regspacing = ipmi_pci_probe_regspacing(&io); |
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index fbab271b3bf9..a861b5b4d443 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c | |||
| @@ -708,7 +708,7 @@ atc_prep_dma_interleaved(struct dma_chan *chan, | |||
| 708 | unsigned long flags) | 708 | unsigned long flags) |
| 709 | { | 709 | { |
| 710 | struct at_dma_chan *atchan = to_at_dma_chan(chan); | 710 | struct at_dma_chan *atchan = to_at_dma_chan(chan); |
| 711 | struct data_chunk *first = xt->sgl; | 711 | struct data_chunk *first; |
| 712 | struct at_desc *desc = NULL; | 712 | struct at_desc *desc = NULL; |
| 713 | size_t xfer_count; | 713 | size_t xfer_count; |
| 714 | unsigned int dwidth; | 714 | unsigned int dwidth; |
| @@ -720,6 +720,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan, | |||
| 720 | if (unlikely(!xt || xt->numf != 1 || !xt->frame_size)) | 720 | if (unlikely(!xt || xt->numf != 1 || !xt->frame_size)) |
| 721 | return NULL; | 721 | return NULL; |
| 722 | 722 | ||
| 723 | first = xt->sgl; | ||
| 724 | |||
| 723 | dev_info(chan2dev(chan), | 725 | dev_info(chan2dev(chan), |
| 724 | "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", | 726 | "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", |
| 725 | __func__, &xt->src_start, &xt->dst_start, xt->numf, | 727 | __func__, &xt->src_start, &xt->dst_start, xt->numf, |
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index d50273fed715..afd5e10f8927 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c | |||
| @@ -555,7 +555,7 @@ static int jz4740_dma_probe(struct platform_device *pdev) | |||
| 555 | 555 | ||
| 556 | ret = dma_async_device_register(dd); | 556 | ret = dma_async_device_register(dd); |
| 557 | if (ret) | 557 | if (ret) |
| 558 | return ret; | 558 | goto err_clk; |
| 559 | 559 | ||
| 560 | irq = platform_get_irq(pdev, 0); | 560 | irq = platform_get_irq(pdev, 0); |
| 561 | ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev); | 561 | ret = request_irq(irq, jz4740_dma_irq, 0, dev_name(&pdev->dev), dmadev); |
| @@ -568,6 +568,8 @@ static int jz4740_dma_probe(struct platform_device *pdev) | |||
| 568 | 568 | ||
| 569 | err_unregister: | 569 | err_unregister: |
| 570 | dma_async_device_unregister(dd); | 570 | dma_async_device_unregister(dd); |
| 571 | err_clk: | ||
| 572 | clk_disable_unprepare(dmadev->clk); | ||
| 571 | return ret; | 573 | return ret; |
| 572 | } | 574 | } |
| 573 | 575 | ||
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 47edc7fbf91f..ec5f9d2bc820 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c | |||
| @@ -155,6 +155,12 @@ MODULE_PARM_DESC(run, "Run the test (default: false)"); | |||
| 155 | #define PATTERN_COUNT_MASK 0x1f | 155 | #define PATTERN_COUNT_MASK 0x1f |
| 156 | #define PATTERN_MEMSET_IDX 0x01 | 156 | #define PATTERN_MEMSET_IDX 0x01 |
| 157 | 157 | ||
| 158 | /* poor man's completion - we want to use wait_event_freezable() on it */ | ||
| 159 | struct dmatest_done { | ||
| 160 | bool done; | ||
| 161 | wait_queue_head_t *wait; | ||
| 162 | }; | ||
| 163 | |||
| 158 | struct dmatest_thread { | 164 | struct dmatest_thread { |
| 159 | struct list_head node; | 165 | struct list_head node; |
| 160 | struct dmatest_info *info; | 166 | struct dmatest_info *info; |
| @@ -165,6 +171,8 @@ struct dmatest_thread { | |||
| 165 | u8 **dsts; | 171 | u8 **dsts; |
| 166 | u8 **udsts; | 172 | u8 **udsts; |
| 167 | enum dma_transaction_type type; | 173 | enum dma_transaction_type type; |
| 174 | wait_queue_head_t done_wait; | ||
| 175 | struct dmatest_done test_done; | ||
| 168 | bool done; | 176 | bool done; |
| 169 | }; | 177 | }; |
| 170 | 178 | ||
| @@ -342,18 +350,25 @@ static unsigned int dmatest_verify(u8 **bufs, unsigned int start, | |||
| 342 | return error_count; | 350 | return error_count; |
| 343 | } | 351 | } |
| 344 | 352 | ||
| 345 | /* poor man's completion - we want to use wait_event_freezable() on it */ | ||
| 346 | struct dmatest_done { | ||
| 347 | bool done; | ||
| 348 | wait_queue_head_t *wait; | ||
| 349 | }; | ||
| 350 | 353 | ||
| 351 | static void dmatest_callback(void *arg) | 354 | static void dmatest_callback(void *arg) |
| 352 | { | 355 | { |
| 353 | struct dmatest_done *done = arg; | 356 | struct dmatest_done *done = arg; |
| 354 | 357 | struct dmatest_thread *thread = | |
| 355 | done->done = true; | 358 | container_of(arg, struct dmatest_thread, done_wait); |
| 356 | wake_up_all(done->wait); | 359 | if (!thread->done) { |
| 360 | done->done = true; | ||
| 361 | wake_up_all(done->wait); | ||
| 362 | } else { | ||
| 363 | /* | ||
| 364 | * If thread->done, it means that this callback occurred | ||
| 365 | * after the parent thread has cleaned up. This can | ||
| 366 | * happen in the case that driver doesn't implement | ||
| 367 | * the terminate_all() functionality and a dma operation | ||
| 368 | * did not occur within the timeout period | ||
| 369 | */ | ||
| 370 | WARN(1, "dmatest: Kernel memory may be corrupted!!\n"); | ||
| 371 | } | ||
| 357 | } | 372 | } |
| 358 | 373 | ||
| 359 | static unsigned int min_odd(unsigned int x, unsigned int y) | 374 | static unsigned int min_odd(unsigned int x, unsigned int y) |
| @@ -424,9 +439,8 @@ static unsigned long long dmatest_KBs(s64 runtime, unsigned long long len) | |||
| 424 | */ | 439 | */ |
| 425 | static int dmatest_func(void *data) | 440 | static int dmatest_func(void *data) |
| 426 | { | 441 | { |
| 427 | DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_wait); | ||
| 428 | struct dmatest_thread *thread = data; | 442 | struct dmatest_thread *thread = data; |
| 429 | struct dmatest_done done = { .wait = &done_wait }; | 443 | struct dmatest_done *done = &thread->test_done; |
| 430 | struct dmatest_info *info; | 444 | struct dmatest_info *info; |
| 431 | struct dmatest_params *params; | 445 | struct dmatest_params *params; |
| 432 | struct dma_chan *chan; | 446 | struct dma_chan *chan; |
| @@ -673,9 +687,9 @@ static int dmatest_func(void *data) | |||
| 673 | continue; | 687 | continue; |
| 674 | } | 688 | } |
| 675 | 689 | ||
| 676 | done.done = false; | 690 | done->done = false; |
| 677 | tx->callback = dmatest_callback; | 691 | tx->callback = dmatest_callback; |
| 678 | tx->callback_param = &done; | 692 | tx->callback_param = done; |
| 679 | cookie = tx->tx_submit(tx); | 693 | cookie = tx->tx_submit(tx); |
| 680 | 694 | ||
| 681 | if (dma_submit_error(cookie)) { | 695 | if (dma_submit_error(cookie)) { |
| @@ -688,21 +702,12 @@ static int dmatest_func(void *data) | |||
| 688 | } | 702 | } |
| 689 | dma_async_issue_pending(chan); | 703 | dma_async_issue_pending(chan); |
| 690 | 704 | ||
| 691 | wait_event_freezable_timeout(done_wait, done.done, | 705 | wait_event_freezable_timeout(thread->done_wait, done->done, |
| 692 | msecs_to_jiffies(params->timeout)); | 706 | msecs_to_jiffies(params->timeout)); |
| 693 | 707 | ||
| 694 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | 708 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
| 695 | 709 | ||
| 696 | if (!done.done) { | 710 | if (!done->done) { |
| 697 | /* | ||
| 698 | * We're leaving the timed out dma operation with | ||
| 699 | * dangling pointer to done_wait. To make this | ||
| 700 | * correct, we'll need to allocate wait_done for | ||
| 701 | * each test iteration and perform "who's gonna | ||
| 702 | * free it this time?" dancing. For now, just | ||
| 703 | * leave it dangling. | ||
| 704 | */ | ||
| 705 | WARN(1, "dmatest: Kernel stack may be corrupted!!\n"); | ||
| 706 | dmaengine_unmap_put(um); | 711 | dmaengine_unmap_put(um); |
| 707 | result("test timed out", total_tests, src_off, dst_off, | 712 | result("test timed out", total_tests, src_off, dst_off, |
| 708 | len, 0); | 713 | len, 0); |
| @@ -789,7 +794,7 @@ err_thread_type: | |||
| 789 | dmatest_KBs(runtime, total_len), ret); | 794 | dmatest_KBs(runtime, total_len), ret); |
| 790 | 795 | ||
| 791 | /* terminate all transfers on specified channels */ | 796 | /* terminate all transfers on specified channels */ |
| 792 | if (ret) | 797 | if (ret || failed_tests) |
| 793 | dmaengine_terminate_all(chan); | 798 | dmaengine_terminate_all(chan); |
| 794 | 799 | ||
| 795 | thread->done = true; | 800 | thread->done = true; |
| @@ -849,6 +854,8 @@ static int dmatest_add_threads(struct dmatest_info *info, | |||
| 849 | thread->info = info; | 854 | thread->info = info; |
| 850 | thread->chan = dtc->chan; | 855 | thread->chan = dtc->chan; |
| 851 | thread->type = type; | 856 | thread->type = type; |
| 857 | thread->test_done.wait = &thread->done_wait; | ||
| 858 | init_waitqueue_head(&thread->done_wait); | ||
| 852 | smp_wmb(); | 859 | smp_wmb(); |
| 853 | thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", | 860 | thread->task = kthread_create(dmatest_func, thread, "%s-%s%u", |
| 854 | dma_chan_name(chan), op, i); | 861 | dma_chan_name(chan), op, i); |
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index 6775f2c74e25..c7568869284e 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c | |||
| @@ -863,11 +863,11 @@ static void fsl_edma_irq_exit( | |||
| 863 | } | 863 | } |
| 864 | } | 864 | } |
| 865 | 865 | ||
| 866 | static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma) | 866 | static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks) |
| 867 | { | 867 | { |
| 868 | int i; | 868 | int i; |
| 869 | 869 | ||
| 870 | for (i = 0; i < DMAMUX_NR; i++) | 870 | for (i = 0; i < nr_clocks; i++) |
| 871 | clk_disable_unprepare(fsl_edma->muxclk[i]); | 871 | clk_disable_unprepare(fsl_edma->muxclk[i]); |
| 872 | } | 872 | } |
| 873 | 873 | ||
| @@ -904,25 +904,25 @@ static int fsl_edma_probe(struct platform_device *pdev) | |||
| 904 | 904 | ||
| 905 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); | 905 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i); |
| 906 | fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res); | 906 | fsl_edma->muxbase[i] = devm_ioremap_resource(&pdev->dev, res); |
| 907 | if (IS_ERR(fsl_edma->muxbase[i])) | 907 | if (IS_ERR(fsl_edma->muxbase[i])) { |
| 908 | /* on error: disable all previously enabled clks */ | ||
| 909 | fsl_disable_clocks(fsl_edma, i); | ||
| 908 | return PTR_ERR(fsl_edma->muxbase[i]); | 910 | return PTR_ERR(fsl_edma->muxbase[i]); |
| 911 | } | ||
| 909 | 912 | ||
| 910 | sprintf(clkname, "dmamux%d", i); | 913 | sprintf(clkname, "dmamux%d", i); |
| 911 | fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname); | 914 | fsl_edma->muxclk[i] = devm_clk_get(&pdev->dev, clkname); |
| 912 | if (IS_ERR(fsl_edma->muxclk[i])) { | 915 | if (IS_ERR(fsl_edma->muxclk[i])) { |
| 913 | dev_err(&pdev->dev, "Missing DMAMUX block clock.\n"); | 916 | dev_err(&pdev->dev, "Missing DMAMUX block clock.\n"); |
| 917 | /* on error: disable all previously enabled clks */ | ||
| 918 | fsl_disable_clocks(fsl_edma, i); | ||
| 914 | return PTR_ERR(fsl_edma->muxclk[i]); | 919 | return PTR_ERR(fsl_edma->muxclk[i]); |
| 915 | } | 920 | } |
| 916 | 921 | ||
| 917 | ret = clk_prepare_enable(fsl_edma->muxclk[i]); | 922 | ret = clk_prepare_enable(fsl_edma->muxclk[i]); |
| 918 | if (ret) { | 923 | if (ret) |
| 919 | /* disable only clks which were enabled on error */ | 924 | /* on error: disable all previously enabled clks */ |
| 920 | for (; i >= 0; i--) | 925 | fsl_disable_clocks(fsl_edma, i); |
| 921 | clk_disable_unprepare(fsl_edma->muxclk[i]); | ||
| 922 | |||
| 923 | dev_err(&pdev->dev, "DMAMUX clk block failed.\n"); | ||
| 924 | return ret; | ||
| 925 | } | ||
| 926 | 926 | ||
| 927 | } | 927 | } |
| 928 | 928 | ||
| @@ -976,7 +976,7 @@ static int fsl_edma_probe(struct platform_device *pdev) | |||
| 976 | if (ret) { | 976 | if (ret) { |
| 977 | dev_err(&pdev->dev, | 977 | dev_err(&pdev->dev, |
| 978 | "Can't register Freescale eDMA engine. (%d)\n", ret); | 978 | "Can't register Freescale eDMA engine. (%d)\n", ret); |
| 979 | fsl_disable_clocks(fsl_edma); | 979 | fsl_disable_clocks(fsl_edma, DMAMUX_NR); |
| 980 | return ret; | 980 | return ret; |
| 981 | } | 981 | } |
| 982 | 982 | ||
| @@ -985,7 +985,7 @@ static int fsl_edma_probe(struct platform_device *pdev) | |||
| 985 | dev_err(&pdev->dev, | 985 | dev_err(&pdev->dev, |
| 986 | "Can't register Freescale eDMA of_dma. (%d)\n", ret); | 986 | "Can't register Freescale eDMA of_dma. (%d)\n", ret); |
| 987 | dma_async_device_unregister(&fsl_edma->dma_dev); | 987 | dma_async_device_unregister(&fsl_edma->dma_dev); |
| 988 | fsl_disable_clocks(fsl_edma); | 988 | fsl_disable_clocks(fsl_edma, DMAMUX_NR); |
| 989 | return ret; | 989 | return ret; |
| 990 | } | 990 | } |
| 991 | 991 | ||
| @@ -1015,7 +1015,7 @@ static int fsl_edma_remove(struct platform_device *pdev) | |||
| 1015 | fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); | 1015 | fsl_edma_cleanup_vchan(&fsl_edma->dma_dev); |
| 1016 | of_dma_controller_free(np); | 1016 | of_dma_controller_free(np); |
| 1017 | dma_async_device_unregister(&fsl_edma->dma_dev); | 1017 | dma_async_device_unregister(&fsl_edma->dma_dev); |
| 1018 | fsl_disable_clocks(fsl_edma); | 1018 | fsl_disable_clocks(fsl_edma, DMAMUX_NR); |
| 1019 | 1019 | ||
| 1020 | return 0; | 1020 | return 0; |
| 1021 | } | 1021 | } |
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 2f31d3d0caa6..7792a9186f9c 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c | |||
| @@ -390,7 +390,7 @@ static int ioat_dma_self_test(struct ioatdma_device *ioat_dma) | |||
| 390 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { | 390 | if (memcmp(src, dest, IOAT_TEST_SIZE)) { |
| 391 | dev_err(dev, "Self-test copy failed compare, disabling\n"); | 391 | dev_err(dev, "Self-test copy failed compare, disabling\n"); |
| 392 | err = -ENODEV; | 392 | err = -ENODEV; |
| 393 | goto free_resources; | 393 | goto unmap_dma; |
| 394 | } | 394 | } |
| 395 | 395 | ||
| 396 | unmap_dma: | 396 | unmap_dma: |
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 482014137953..9ae236036e32 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c | |||
| @@ -152,14 +152,23 @@ static void drm_connector_free(struct kref *kref) | |||
| 152 | connector->funcs->destroy(connector); | 152 | connector->funcs->destroy(connector); |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | static void drm_connector_free_work_fn(struct work_struct *work) | 155 | void drm_connector_free_work_fn(struct work_struct *work) |
| 156 | { | 156 | { |
| 157 | struct drm_connector *connector = | 157 | struct drm_connector *connector, *n; |
| 158 | container_of(work, struct drm_connector, free_work); | 158 | struct drm_device *dev = |
| 159 | struct drm_device *dev = connector->dev; | 159 | container_of(work, struct drm_device, mode_config.connector_free_work); |
| 160 | struct drm_mode_config *config = &dev->mode_config; | ||
| 161 | unsigned long flags; | ||
| 162 | struct llist_node *freed; | ||
| 160 | 163 | ||
| 161 | drm_mode_object_unregister(dev, &connector->base); | 164 | spin_lock_irqsave(&config->connector_list_lock, flags); |
| 162 | connector->funcs->destroy(connector); | 165 | freed = llist_del_all(&config->connector_free_list); |
| 166 | spin_unlock_irqrestore(&config->connector_list_lock, flags); | ||
| 167 | |||
| 168 | llist_for_each_entry_safe(connector, n, freed, free_node) { | ||
| 169 | drm_mode_object_unregister(dev, &connector->base); | ||
| 170 | connector->funcs->destroy(connector); | ||
| 171 | } | ||
| 163 | } | 172 | } |
| 164 | 173 | ||
| 165 | /** | 174 | /** |
| @@ -191,8 +200,6 @@ int drm_connector_init(struct drm_device *dev, | |||
| 191 | if (ret) | 200 | if (ret) |
| 192 | return ret; | 201 | return ret; |
| 193 | 202 | ||
| 194 | INIT_WORK(&connector->free_work, drm_connector_free_work_fn); | ||
| 195 | |||
| 196 | connector->base.properties = &connector->properties; | 203 | connector->base.properties = &connector->properties; |
| 197 | connector->dev = dev; | 204 | connector->dev = dev; |
| 198 | connector->funcs = funcs; | 205 | connector->funcs = funcs; |
| @@ -547,10 +554,17 @@ EXPORT_SYMBOL(drm_connector_list_iter_begin); | |||
| 547 | * actually release the connector when dropping our final reference. | 554 | * actually release the connector when dropping our final reference. |
| 548 | */ | 555 | */ |
| 549 | static void | 556 | static void |
| 550 | drm_connector_put_safe(struct drm_connector *conn) | 557 | __drm_connector_put_safe(struct drm_connector *conn) |
| 551 | { | 558 | { |
| 552 | if (refcount_dec_and_test(&conn->base.refcount.refcount)) | 559 | struct drm_mode_config *config = &conn->dev->mode_config; |
| 553 | schedule_work(&conn->free_work); | 560 | |
| 561 | lockdep_assert_held(&config->connector_list_lock); | ||
| 562 | |||
| 563 | if (!refcount_dec_and_test(&conn->base.refcount.refcount)) | ||
| 564 | return; | ||
| 565 | |||
| 566 | llist_add(&conn->free_node, &config->connector_free_list); | ||
| 567 | schedule_work(&config->connector_free_work); | ||
| 554 | } | 568 | } |
| 555 | 569 | ||
| 556 | /** | 570 | /** |
| @@ -582,10 +596,10 @@ drm_connector_list_iter_next(struct drm_connector_list_iter *iter) | |||
| 582 | 596 | ||
| 583 | /* loop until it's not a zombie connector */ | 597 | /* loop until it's not a zombie connector */ |
| 584 | } while (!kref_get_unless_zero(&iter->conn->base.refcount)); | 598 | } while (!kref_get_unless_zero(&iter->conn->base.refcount)); |
| 585 | spin_unlock_irqrestore(&config->connector_list_lock, flags); | ||
| 586 | 599 | ||
| 587 | if (old_conn) | 600 | if (old_conn) |
| 588 | drm_connector_put_safe(old_conn); | 601 | __drm_connector_put_safe(old_conn); |
| 602 | spin_unlock_irqrestore(&config->connector_list_lock, flags); | ||
| 589 | 603 | ||
| 590 | return iter->conn; | 604 | return iter->conn; |
| 591 | } | 605 | } |
| @@ -602,9 +616,15 @@ EXPORT_SYMBOL(drm_connector_list_iter_next); | |||
| 602 | */ | 616 | */ |
| 603 | void drm_connector_list_iter_end(struct drm_connector_list_iter *iter) | 617 | void drm_connector_list_iter_end(struct drm_connector_list_iter *iter) |
| 604 | { | 618 | { |
| 619 | struct drm_mode_config *config = &iter->dev->mode_config; | ||
| 620 | unsigned long flags; | ||
| 621 | |||
| 605 | iter->dev = NULL; | 622 | iter->dev = NULL; |
| 606 | if (iter->conn) | 623 | if (iter->conn) { |
| 607 | drm_connector_put_safe(iter->conn); | 624 | spin_lock_irqsave(&config->connector_list_lock, flags); |
| 625 | __drm_connector_put_safe(iter->conn); | ||
| 626 | spin_unlock_irqrestore(&config->connector_list_lock, flags); | ||
| 627 | } | ||
| 608 | lock_release(&connector_list_iter_dep_map, 0, _RET_IP_); | 628 | lock_release(&connector_list_iter_dep_map, 0, _RET_IP_); |
| 609 | } | 629 | } |
| 610 | EXPORT_SYMBOL(drm_connector_list_iter_end); | 630 | EXPORT_SYMBOL(drm_connector_list_iter_end); |
| @@ -1231,6 +1251,19 @@ int drm_mode_connector_update_edid_property(struct drm_connector *connector, | |||
| 1231 | if (edid) | 1251 | if (edid) |
| 1232 | size = EDID_LENGTH * (1 + edid->extensions); | 1252 | size = EDID_LENGTH * (1 + edid->extensions); |
| 1233 | 1253 | ||
| 1254 | /* Set the display info, using edid if available, otherwise | ||
| 1255 | * reseting the values to defaults. This duplicates the work | ||
| 1256 | * done in drm_add_edid_modes, but that function is not | ||
| 1257 | * consistently called before this one in all drivers and the | ||
| 1258 | * computation is cheap enough that it seems better to | ||
| 1259 | * duplicate it rather than attempt to ensure some arbitrary | ||
| 1260 | * ordering of calls. | ||
| 1261 | */ | ||
| 1262 | if (edid) | ||
| 1263 | drm_add_display_info(connector, edid); | ||
| 1264 | else | ||
| 1265 | drm_reset_display_info(connector); | ||
| 1266 | |||
| 1234 | drm_object_property_set_value(&connector->base, | 1267 | drm_object_property_set_value(&connector->base, |
| 1235 | dev->mode_config.non_desktop_property, | 1268 | dev->mode_config.non_desktop_property, |
| 1236 | connector->display_info.non_desktop); | 1269 | connector->display_info.non_desktop); |
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 9ebb8841778c..af00f42ba269 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h | |||
| @@ -142,6 +142,7 @@ int drm_mode_connector_set_obj_prop(struct drm_mode_object *obj, | |||
| 142 | uint64_t value); | 142 | uint64_t value); |
| 143 | int drm_connector_create_standard_properties(struct drm_device *dev); | 143 | int drm_connector_create_standard_properties(struct drm_device *dev); |
| 144 | const char *drm_get_connector_force_name(enum drm_connector_force force); | 144 | const char *drm_get_connector_force_name(enum drm_connector_force force); |
| 145 | void drm_connector_free_work_fn(struct work_struct *work); | ||
| 145 | 146 | ||
| 146 | /* IOCTL */ | 147 | /* IOCTL */ |
| 147 | int drm_mode_connector_property_set_ioctl(struct drm_device *dev, | 148 | int drm_mode_connector_property_set_ioctl(struct drm_device *dev, |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 5dfe14763871..cb487148359a 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
| @@ -1731,7 +1731,7 @@ EXPORT_SYMBOL(drm_edid_duplicate); | |||
| 1731 | * | 1731 | * |
| 1732 | * Returns true if @vendor is in @edid, false otherwise | 1732 | * Returns true if @vendor is in @edid, false otherwise |
| 1733 | */ | 1733 | */ |
| 1734 | static bool edid_vendor(struct edid *edid, const char *vendor) | 1734 | static bool edid_vendor(const struct edid *edid, const char *vendor) |
| 1735 | { | 1735 | { |
| 1736 | char edid_vendor[3]; | 1736 | char edid_vendor[3]; |
| 1737 | 1737 | ||
| @@ -1749,7 +1749,7 @@ static bool edid_vendor(struct edid *edid, const char *vendor) | |||
| 1749 | * | 1749 | * |
| 1750 | * This tells subsequent routines what fixes they need to apply. | 1750 | * This tells subsequent routines what fixes they need to apply. |
| 1751 | */ | 1751 | */ |
| 1752 | static u32 edid_get_quirks(struct edid *edid) | 1752 | static u32 edid_get_quirks(const struct edid *edid) |
| 1753 | { | 1753 | { |
| 1754 | const struct edid_quirk *quirk; | 1754 | const struct edid_quirk *quirk; |
| 1755 | int i; | 1755 | int i; |
| @@ -2813,7 +2813,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid, | |||
| 2813 | /* | 2813 | /* |
| 2814 | * Search EDID for CEA extension block. | 2814 | * Search EDID for CEA extension block. |
| 2815 | */ | 2815 | */ |
| 2816 | static u8 *drm_find_edid_extension(struct edid *edid, int ext_id) | 2816 | static u8 *drm_find_edid_extension(const struct edid *edid, int ext_id) |
| 2817 | { | 2817 | { |
| 2818 | u8 *edid_ext = NULL; | 2818 | u8 *edid_ext = NULL; |
| 2819 | int i; | 2819 | int i; |
| @@ -2835,12 +2835,12 @@ static u8 *drm_find_edid_extension(struct edid *edid, int ext_id) | |||
| 2835 | return edid_ext; | 2835 | return edid_ext; |
| 2836 | } | 2836 | } |
| 2837 | 2837 | ||
| 2838 | static u8 *drm_find_cea_extension(struct edid *edid) | 2838 | static u8 *drm_find_cea_extension(const struct edid *edid) |
| 2839 | { | 2839 | { |
| 2840 | return drm_find_edid_extension(edid, CEA_EXT); | 2840 | return drm_find_edid_extension(edid, CEA_EXT); |
| 2841 | } | 2841 | } |
| 2842 | 2842 | ||
| 2843 | static u8 *drm_find_displayid_extension(struct edid *edid) | 2843 | static u8 *drm_find_displayid_extension(const struct edid *edid) |
| 2844 | { | 2844 | { |
| 2845 | return drm_find_edid_extension(edid, DISPLAYID_EXT); | 2845 | return drm_find_edid_extension(edid, DISPLAYID_EXT); |
| 2846 | } | 2846 | } |
| @@ -4363,7 +4363,7 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db) | |||
| 4363 | } | 4363 | } |
| 4364 | 4364 | ||
| 4365 | static void drm_parse_cea_ext(struct drm_connector *connector, | 4365 | static void drm_parse_cea_ext(struct drm_connector *connector, |
| 4366 | struct edid *edid) | 4366 | const struct edid *edid) |
| 4367 | { | 4367 | { |
| 4368 | struct drm_display_info *info = &connector->display_info; | 4368 | struct drm_display_info *info = &connector->display_info; |
| 4369 | const u8 *edid_ext; | 4369 | const u8 *edid_ext; |
| @@ -4397,11 +4397,33 @@ static void drm_parse_cea_ext(struct drm_connector *connector, | |||
| 4397 | } | 4397 | } |
| 4398 | } | 4398 | } |
| 4399 | 4399 | ||
| 4400 | static void drm_add_display_info(struct drm_connector *connector, | 4400 | /* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset |
| 4401 | struct edid *edid, u32 quirks) | 4401 | * all of the values which would have been set from EDID |
| 4402 | */ | ||
| 4403 | void | ||
| 4404 | drm_reset_display_info(struct drm_connector *connector) | ||
| 4402 | { | 4405 | { |
| 4403 | struct drm_display_info *info = &connector->display_info; | 4406 | struct drm_display_info *info = &connector->display_info; |
| 4404 | 4407 | ||
| 4408 | info->width_mm = 0; | ||
| 4409 | info->height_mm = 0; | ||
| 4410 | |||
| 4411 | info->bpc = 0; | ||
| 4412 | info->color_formats = 0; | ||
| 4413 | info->cea_rev = 0; | ||
| 4414 | info->max_tmds_clock = 0; | ||
| 4415 | info->dvi_dual = false; | ||
| 4416 | |||
| 4417 | info->non_desktop = 0; | ||
| 4418 | } | ||
| 4419 | EXPORT_SYMBOL_GPL(drm_reset_display_info); | ||
| 4420 | |||
| 4421 | u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid) | ||
| 4422 | { | ||
| 4423 | struct drm_display_info *info = &connector->display_info; | ||
| 4424 | |||
| 4425 | u32 quirks = edid_get_quirks(edid); | ||
| 4426 | |||
| 4405 | info->width_mm = edid->width_cm * 10; | 4427 | info->width_mm = edid->width_cm * 10; |
| 4406 | info->height_mm = edid->height_cm * 10; | 4428 | info->height_mm = edid->height_cm * 10; |
| 4407 | 4429 | ||
| @@ -4414,11 +4436,13 @@ static void drm_add_display_info(struct drm_connector *connector, | |||
| 4414 | 4436 | ||
| 4415 | info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); | 4437 | info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP); |
| 4416 | 4438 | ||
| 4439 | DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop); | ||
| 4440 | |||
| 4417 | if (edid->revision < 3) | 4441 | if (edid->revision < 3) |
| 4418 | return; | 4442 | return quirks; |
| 4419 | 4443 | ||
| 4420 | if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) | 4444 | if (!(edid->input & DRM_EDID_INPUT_DIGITAL)) |
| 4421 | return; | 4445 | return quirks; |
| 4422 | 4446 | ||
| 4423 | drm_parse_cea_ext(connector, edid); | 4447 | drm_parse_cea_ext(connector, edid); |
| 4424 | 4448 | ||
| @@ -4438,7 +4462,7 @@ static void drm_add_display_info(struct drm_connector *connector, | |||
| 4438 | 4462 | ||
| 4439 | /* Only defined for 1.4 with digital displays */ | 4463 | /* Only defined for 1.4 with digital displays */ |
| 4440 | if (edid->revision < 4) | 4464 | if (edid->revision < 4) |
| 4441 | return; | 4465 | return quirks; |
| 4442 | 4466 | ||
| 4443 | switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) { | 4467 | switch (edid->input & DRM_EDID_DIGITAL_DEPTH_MASK) { |
| 4444 | case DRM_EDID_DIGITAL_DEPTH_6: | 4468 | case DRM_EDID_DIGITAL_DEPTH_6: |
| @@ -4473,7 +4497,9 @@ static void drm_add_display_info(struct drm_connector *connector, | |||
| 4473 | info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; | 4497 | info->color_formats |= DRM_COLOR_FORMAT_YCRCB444; |
| 4474 | if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) | 4498 | if (edid->features & DRM_EDID_FEATURE_RGB_YCRCB422) |
| 4475 | info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; | 4499 | info->color_formats |= DRM_COLOR_FORMAT_YCRCB422; |
| 4500 | return quirks; | ||
| 4476 | } | 4501 | } |
| 4502 | EXPORT_SYMBOL_GPL(drm_add_display_info); | ||
| 4477 | 4503 | ||
| 4478 | static int validate_displayid(u8 *displayid, int length, int idx) | 4504 | static int validate_displayid(u8 *displayid, int length, int idx) |
| 4479 | { | 4505 | { |
| @@ -4627,14 +4653,12 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid) | |||
| 4627 | return 0; | 4653 | return 0; |
| 4628 | } | 4654 | } |
| 4629 | 4655 | ||
| 4630 | quirks = edid_get_quirks(edid); | ||
| 4631 | |||
| 4632 | /* | 4656 | /* |
| 4633 | * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks. | 4657 | * CEA-861-F adds ycbcr capability map block, for HDMI 2.0 sinks. |
| 4634 | * To avoid multiple parsing of same block, lets parse that map | 4658 | * To avoid multiple parsing of same block, lets parse that map |
| 4635 | * from sink info, before parsing CEA modes. | 4659 | * from sink info, before parsing CEA modes. |
| 4636 | */ | 4660 | */ |
| 4637 | drm_add_display_info(connector, edid, quirks); | 4661 | quirks = drm_add_display_info(connector, edid); |
| 4638 | 4662 | ||
| 4639 | /* | 4663 | /* |
| 4640 | * EDID spec says modes should be preferred in this order: | 4664 | * EDID spec says modes should be preferred in this order: |
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c index d1eb56a1eff4..59849f02e2ad 100644 --- a/drivers/gpu/drm/drm_lease.c +++ b/drivers/gpu/drm/drm_lease.c | |||
| @@ -254,10 +254,10 @@ static struct drm_master *drm_lease_create(struct drm_master *lessor, struct idr | |||
| 254 | return lessee; | 254 | return lessee; |
| 255 | 255 | ||
| 256 | out_lessee: | 256 | out_lessee: |
| 257 | drm_master_put(&lessee); | ||
| 258 | |||
| 259 | mutex_unlock(&dev->mode_config.idr_mutex); | 257 | mutex_unlock(&dev->mode_config.idr_mutex); |
| 260 | 258 | ||
| 259 | drm_master_put(&lessee); | ||
| 260 | |||
| 261 | return ERR_PTR(error); | 261 | return ERR_PTR(error); |
| 262 | } | 262 | } |
| 263 | 263 | ||
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 61a1c8ea74bc..c3c79ee6119e 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
| @@ -575,21 +575,23 @@ EXPORT_SYMBOL(drm_mm_remove_node); | |||
| 575 | */ | 575 | */ |
| 576 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) | 576 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) |
| 577 | { | 577 | { |
| 578 | struct drm_mm *mm = old->mm; | ||
| 579 | |||
| 578 | DRM_MM_BUG_ON(!old->allocated); | 580 | DRM_MM_BUG_ON(!old->allocated); |
| 579 | 581 | ||
| 580 | *new = *old; | 582 | *new = *old; |
| 581 | 583 | ||
| 582 | list_replace(&old->node_list, &new->node_list); | 584 | list_replace(&old->node_list, &new->node_list); |
| 583 | rb_replace_node(&old->rb, &new->rb, &old->mm->interval_tree.rb_root); | 585 | rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree); |
| 584 | 586 | ||
| 585 | if (drm_mm_hole_follows(old)) { | 587 | if (drm_mm_hole_follows(old)) { |
| 586 | list_replace(&old->hole_stack, &new->hole_stack); | 588 | list_replace(&old->hole_stack, &new->hole_stack); |
| 587 | rb_replace_node(&old->rb_hole_size, | 589 | rb_replace_node(&old->rb_hole_size, |
| 588 | &new->rb_hole_size, | 590 | &new->rb_hole_size, |
| 589 | &old->mm->holes_size); | 591 | &mm->holes_size); |
| 590 | rb_replace_node(&old->rb_hole_addr, | 592 | rb_replace_node(&old->rb_hole_addr, |
| 591 | &new->rb_hole_addr, | 593 | &new->rb_hole_addr, |
| 592 | &old->mm->holes_addr); | 594 | &mm->holes_addr); |
| 593 | } | 595 | } |
| 594 | 596 | ||
| 595 | old->allocated = false; | 597 | old->allocated = false; |
diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index cc78b3d9e5e4..256de7313612 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c | |||
| @@ -382,6 +382,9 @@ void drm_mode_config_init(struct drm_device *dev) | |||
| 382 | ida_init(&dev->mode_config.connector_ida); | 382 | ida_init(&dev->mode_config.connector_ida); |
| 383 | spin_lock_init(&dev->mode_config.connector_list_lock); | 383 | spin_lock_init(&dev->mode_config.connector_list_lock); |
| 384 | 384 | ||
| 385 | init_llist_head(&dev->mode_config.connector_free_list); | ||
| 386 | INIT_WORK(&dev->mode_config.connector_free_work, drm_connector_free_work_fn); | ||
| 387 | |||
| 385 | drm_mode_create_standard_properties(dev); | 388 | drm_mode_create_standard_properties(dev); |
| 386 | 389 | ||
| 387 | /* Just to be sure */ | 390 | /* Just to be sure */ |
| @@ -432,7 +435,7 @@ void drm_mode_config_cleanup(struct drm_device *dev) | |||
| 432 | } | 435 | } |
| 433 | drm_connector_list_iter_end(&conn_iter); | 436 | drm_connector_list_iter_end(&conn_iter); |
| 434 | /* connector_iter drops references in a work item. */ | 437 | /* connector_iter drops references in a work item. */ |
| 435 | flush_scheduled_work(); | 438 | flush_work(&dev->mode_config.connector_free_work); |
| 436 | if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) { | 439 | if (WARN_ON(!list_empty(&dev->mode_config.connector_list))) { |
| 437 | drm_connector_list_iter_begin(dev, &conn_iter); | 440 | drm_connector_list_iter_begin(dev, &conn_iter); |
| 438 | drm_for_each_connector_iter(connector, &conn_iter) | 441 | drm_for_each_connector_iter(connector, &conn_iter) |
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 6c32c89a83a9..638540943c61 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c | |||
| @@ -888,8 +888,10 @@ vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec) | |||
| 888 | /* If we got force-completed because of GPU reset rather than | 888 | /* If we got force-completed because of GPU reset rather than |
| 889 | * through our IRQ handler, signal the fence now. | 889 | * through our IRQ handler, signal the fence now. |
| 890 | */ | 890 | */ |
| 891 | if (exec->fence) | 891 | if (exec->fence) { |
| 892 | dma_fence_signal(exec->fence); | 892 | dma_fence_signal(exec->fence); |
| 893 | dma_fence_put(exec->fence); | ||
| 894 | } | ||
| 893 | 895 | ||
| 894 | if (exec->bo) { | 896 | if (exec->bo) { |
| 895 | for (i = 0; i < exec->bo_count; i++) { | 897 | for (i = 0; i < exec->bo_count; i++) { |
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index 61b2e5377993..26eddbb62893 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c | |||
| @@ -139,6 +139,7 @@ vc4_irq_finish_render_job(struct drm_device *dev) | |||
| 139 | list_move_tail(&exec->head, &vc4->job_done_list); | 139 | list_move_tail(&exec->head, &vc4->job_done_list); |
| 140 | if (exec->fence) { | 140 | if (exec->fence) { |
| 141 | dma_fence_signal_locked(exec->fence); | 141 | dma_fence_signal_locked(exec->fence); |
| 142 | dma_fence_put(exec->fence); | ||
| 142 | exec->fence = NULL; | 143 | exec->fence = NULL; |
| 143 | } | 144 | } |
| 144 | vc4_submit_next_render_job(dev); | 145 | vc4_submit_next_render_job(dev); |
diff --git a/drivers/hwtracing/stm/ftrace.c b/drivers/hwtracing/stm/ftrace.c index bd126a7c6da2..7da75644c750 100644 --- a/drivers/hwtracing/stm/ftrace.c +++ b/drivers/hwtracing/stm/ftrace.c | |||
| @@ -42,9 +42,11 @@ static struct stm_ftrace { | |||
| 42 | * @len: length of the data packet | 42 | * @len: length of the data packet |
| 43 | */ | 43 | */ |
| 44 | static void notrace | 44 | static void notrace |
| 45 | stm_ftrace_write(const void *buf, unsigned int len) | 45 | stm_ftrace_write(struct trace_export *export, const void *buf, unsigned int len) |
| 46 | { | 46 | { |
| 47 | stm_source_write(&stm_ftrace.data, STM_FTRACE_CHAN, buf, len); | 47 | struct stm_ftrace *stm = container_of(export, struct stm_ftrace, ftrace); |
| 48 | |||
| 49 | stm_source_write(&stm->data, STM_FTRACE_CHAN, buf, len); | ||
| 48 | } | 50 | } |
| 49 | 51 | ||
| 50 | static int stm_ftrace_link(struct stm_source_data *data) | 52 | static int stm_ftrace_link(struct stm_source_data *data) |
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c index 0d05dadb2dc5..44cffad43701 100644 --- a/drivers/i2c/busses/i2c-cht-wc.c +++ b/drivers/i2c/busses/i2c-cht-wc.c | |||
| @@ -379,7 +379,7 @@ static int cht_wc_i2c_adap_i2c_remove(struct platform_device *pdev) | |||
| 379 | return 0; | 379 | return 0; |
| 380 | } | 380 | } |
| 381 | 381 | ||
| 382 | static struct platform_device_id cht_wc_i2c_adap_id_table[] = { | 382 | static const struct platform_device_id cht_wc_i2c_adap_id_table[] = { |
| 383 | { .name = "cht_wcove_ext_chgr" }, | 383 | { .name = "cht_wcove_ext_chgr" }, |
| 384 | {}, | 384 | {}, |
| 385 | }; | 385 | }; |
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 174579d32e5f..462948e2c535 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c | |||
| @@ -983,7 +983,7 @@ static void piix4_adap_remove(struct i2c_adapter *adap) | |||
| 983 | 983 | ||
| 984 | if (adapdata->smba) { | 984 | if (adapdata->smba) { |
| 985 | i2c_del_adapter(adap); | 985 | i2c_del_adapter(adap); |
| 986 | if (adapdata->port == (0 << 1)) { | 986 | if (adapdata->port == (0 << piix4_port_shift_sb800)) { |
| 987 | release_region(adapdata->smba, SMBIOSIZE); | 987 | release_region(adapdata->smba, SMBIOSIZE); |
| 988 | if (adapdata->sb800_main) | 988 | if (adapdata->sb800_main) |
| 989 | release_region(SB800_PIIX4_SMB_IDX, 2); | 989 | release_region(SB800_PIIX4_SMB_IDX, 2); |
diff --git a/drivers/i2c/busses/i2c-stm32.h b/drivers/i2c/busses/i2c-stm32.h index dab51761f8c5..d4f9cef251ac 100644 --- a/drivers/i2c/busses/i2c-stm32.h +++ b/drivers/i2c/busses/i2c-stm32.h | |||
| @@ -1,10 +1,11 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * i2c-stm32.h | 3 | * i2c-stm32.h |
| 3 | * | 4 | * |
| 4 | * Copyright (C) M'boumba Cedric Madianga 2017 | 5 | * Copyright (C) M'boumba Cedric Madianga 2017 |
| 6 | * Copyright (C) STMicroelectronics 2017 | ||
| 5 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> | 7 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> |
| 6 | * | 8 | * |
| 7 | * License terms: GNU General Public License (GPL), version 2 | ||
| 8 | */ | 9 | */ |
| 9 | 10 | ||
| 10 | #ifndef _I2C_STM32_H | 11 | #ifndef _I2C_STM32_H |
diff --git a/drivers/i2c/busses/i2c-stm32f4.c b/drivers/i2c/busses/i2c-stm32f4.c index 4ec108496f15..47c8d00de53f 100644 --- a/drivers/i2c/busses/i2c-stm32f4.c +++ b/drivers/i2c/busses/i2c-stm32f4.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * Driver for STMicroelectronics STM32 I2C controller | 3 | * Driver for STMicroelectronics STM32 I2C controller |
| 3 | * | 4 | * |
| @@ -6,11 +7,11 @@ | |||
| 6 | * http://www.st.com/resource/en/reference_manual/DM00031020.pdf | 7 | * http://www.st.com/resource/en/reference_manual/DM00031020.pdf |
| 7 | * | 8 | * |
| 8 | * Copyright (C) M'boumba Cedric Madianga 2016 | 9 | * Copyright (C) M'boumba Cedric Madianga 2016 |
| 10 | * Copyright (C) STMicroelectronics 2017 | ||
| 9 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> | 11 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> |
| 10 | * | 12 | * |
| 11 | * This driver is based on i2c-st.c | 13 | * This driver is based on i2c-st.c |
| 12 | * | 14 | * |
| 13 | * License terms: GNU General Public License (GPL), version 2 | ||
| 14 | */ | 15 | */ |
| 15 | 16 | ||
| 16 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index d4a6e9c2e9aa..b445b3bb0bb1 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 1 | /* | 2 | /* |
| 2 | * Driver for STMicroelectronics STM32F7 I2C controller | 3 | * Driver for STMicroelectronics STM32F7 I2C controller |
| 3 | * | 4 | * |
| @@ -7,11 +8,11 @@ | |||
| 7 | * http://www.st.com/resource/en/reference_manual/dm00124865.pdf | 8 | * http://www.st.com/resource/en/reference_manual/dm00124865.pdf |
| 8 | * | 9 | * |
| 9 | * Copyright (C) M'boumba Cedric Madianga 2017 | 10 | * Copyright (C) M'boumba Cedric Madianga 2017 |
| 11 | * Copyright (C) STMicroelectronics 2017 | ||
| 10 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> | 12 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> |
| 11 | * | 13 | * |
| 12 | * This driver is based on i2c-stm32f4.c | 14 | * This driver is based on i2c-stm32f4.c |
| 13 | * | 15 | * |
| 14 | * License terms: GNU General Public License (GPL), version 2 | ||
| 15 | */ | 16 | */ |
| 16 | #include <linux/clk.h> | 17 | #include <linux/clk.h> |
| 17 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index f6983357145d..6294a7001d33 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
| @@ -4458,7 +4458,7 @@ out: | |||
| 4458 | return skb->len; | 4458 | return skb->len; |
| 4459 | } | 4459 | } |
| 4460 | 4460 | ||
| 4461 | static const struct rdma_nl_cbs cma_cb_table[] = { | 4461 | static const struct rdma_nl_cbs cma_cb_table[RDMA_NL_RDMA_CM_NUM_OPS] = { |
| 4462 | [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats}, | 4462 | [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats}, |
| 4463 | }; | 4463 | }; |
| 4464 | 4464 | ||
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 5e1be4949d5f..30914f3baa5f 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
| @@ -1146,7 +1146,7 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, | |||
| 1146 | } | 1146 | } |
| 1147 | EXPORT_SYMBOL(ib_get_net_dev_by_params); | 1147 | EXPORT_SYMBOL(ib_get_net_dev_by_params); |
| 1148 | 1148 | ||
| 1149 | static const struct rdma_nl_cbs ibnl_ls_cb_table[] = { | 1149 | static const struct rdma_nl_cbs ibnl_ls_cb_table[RDMA_NL_LS_NUM_OPS] = { |
| 1150 | [RDMA_NL_LS_OP_RESOLVE] = { | 1150 | [RDMA_NL_LS_OP_RESOLVE] = { |
| 1151 | .doit = ib_nl_handle_resolve_resp, | 1151 | .doit = ib_nl_handle_resolve_resp, |
| 1152 | .flags = RDMA_NL_ADMIN_PERM, | 1152 | .flags = RDMA_NL_ADMIN_PERM, |
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index e9e189ec7502..5d676cff41f4 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c | |||
| @@ -80,7 +80,7 @@ const char *__attribute_const__ iwcm_reject_msg(int reason) | |||
| 80 | } | 80 | } |
| 81 | EXPORT_SYMBOL(iwcm_reject_msg); | 81 | EXPORT_SYMBOL(iwcm_reject_msg); |
| 82 | 82 | ||
| 83 | static struct rdma_nl_cbs iwcm_nl_cb_table[] = { | 83 | static struct rdma_nl_cbs iwcm_nl_cb_table[RDMA_NL_IWPM_NUM_OPS] = { |
| 84 | [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, | 84 | [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb}, |
| 85 | [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, | 85 | [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb}, |
| 86 | [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, | 86 | [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb}, |
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index 2fae850a3eff..9a05245a1acf 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c | |||
| @@ -303,7 +303,7 @@ out: cb->args[0] = idx; | |||
| 303 | return skb->len; | 303 | return skb->len; |
| 304 | } | 304 | } |
| 305 | 305 | ||
| 306 | static const struct rdma_nl_cbs nldev_cb_table[] = { | 306 | static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { |
| 307 | [RDMA_NLDEV_CMD_GET] = { | 307 | [RDMA_NLDEV_CMD_GET] = { |
| 308 | .doit = nldev_get_doit, | 308 | .doit = nldev_get_doit, |
| 309 | .dump = nldev_get_dumpit, | 309 | .dump = nldev_get_dumpit, |
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c index a337386652b0..feafdb961c48 100644 --- a/drivers/infiniband/core/security.c +++ b/drivers/infiniband/core/security.c | |||
| @@ -739,8 +739,11 @@ int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) | |||
| 739 | if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) | 739 | if (!rdma_protocol_ib(map->agent.device, map->agent.port_num)) |
| 740 | return 0; | 740 | return 0; |
| 741 | 741 | ||
| 742 | if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) | 742 | if (map->agent.qp->qp_type == IB_QPT_SMI) { |
| 743 | return -EACCES; | 743 | if (!map->agent.smp_allowed) |
| 744 | return -EACCES; | ||
| 745 | return 0; | ||
| 746 | } | ||
| 744 | 747 | ||
| 745 | return ib_security_pkey_access(map->agent.device, | 748 | return ib_security_pkey_access(map->agent.device, |
| 746 | map->agent.port_num, | 749 | map->agent.port_num, |
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 16d55710b116..d0202bb176a4 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c | |||
| @@ -1971,6 +1971,12 @@ static int modify_qp(struct ib_uverbs_file *file, | |||
| 1971 | goto release_qp; | 1971 | goto release_qp; |
| 1972 | } | 1972 | } |
| 1973 | 1973 | ||
| 1974 | if ((cmd->base.attr_mask & IB_QP_ALT_PATH) && | ||
| 1975 | !rdma_is_port_valid(qp->device, cmd->base.alt_port_num)) { | ||
| 1976 | ret = -EINVAL; | ||
| 1977 | goto release_qp; | ||
| 1978 | } | ||
| 1979 | |||
| 1974 | attr->qp_state = cmd->base.qp_state; | 1980 | attr->qp_state = cmd->base.qp_state; |
| 1975 | attr->cur_qp_state = cmd->base.cur_qp_state; | 1981 | attr->cur_qp_state = cmd->base.cur_qp_state; |
| 1976 | attr->path_mtu = cmd->base.path_mtu; | 1982 | attr->path_mtu = cmd->base.path_mtu; |
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index ea55e95cd2c5..b7bfc536e00f 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
| @@ -395,6 +395,11 @@ next_cqe: | |||
| 395 | 395 | ||
| 396 | static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) | 396 | static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) |
| 397 | { | 397 | { |
| 398 | if (CQE_OPCODE(cqe) == C4IW_DRAIN_OPCODE) { | ||
| 399 | WARN_ONCE(1, "Unexpected DRAIN CQE qp id %u!\n", wq->sq.qid); | ||
| 400 | return 0; | ||
| 401 | } | ||
| 402 | |||
| 398 | if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) | 403 | if (CQE_OPCODE(cqe) == FW_RI_TERMINATE) |
| 399 | return 0; | 404 | return 0; |
| 400 | 405 | ||
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 5ee7fe433136..38bddd02a943 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c | |||
| @@ -868,7 +868,12 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
| 868 | 868 | ||
| 869 | qhp = to_c4iw_qp(ibqp); | 869 | qhp = to_c4iw_qp(ibqp); |
| 870 | spin_lock_irqsave(&qhp->lock, flag); | 870 | spin_lock_irqsave(&qhp->lock, flag); |
| 871 | if (t4_wq_in_error(&qhp->wq)) { | 871 | |
| 872 | /* | ||
| 873 | * If the qp has been flushed, then just insert a special | ||
| 874 | * drain cqe. | ||
| 875 | */ | ||
| 876 | if (qhp->wq.flushed) { | ||
| 872 | spin_unlock_irqrestore(&qhp->lock, flag); | 877 | spin_unlock_irqrestore(&qhp->lock, flag); |
| 873 | complete_sq_drain_wr(qhp, wr); | 878 | complete_sq_drain_wr(qhp, wr); |
| 874 | return err; | 879 | return err; |
| @@ -1011,7 +1016,12 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
| 1011 | 1016 | ||
| 1012 | qhp = to_c4iw_qp(ibqp); | 1017 | qhp = to_c4iw_qp(ibqp); |
| 1013 | spin_lock_irqsave(&qhp->lock, flag); | 1018 | spin_lock_irqsave(&qhp->lock, flag); |
| 1014 | if (t4_wq_in_error(&qhp->wq)) { | 1019 | |
| 1020 | /* | ||
| 1021 | * If the qp has been flushed, then just insert a special | ||
| 1022 | * drain cqe. | ||
| 1023 | */ | ||
| 1024 | if (qhp->wq.flushed) { | ||
| 1015 | spin_unlock_irqrestore(&qhp->lock, flag); | 1025 | spin_unlock_irqrestore(&qhp->lock, flag); |
| 1016 | complete_rq_drain_wr(qhp, wr); | 1026 | complete_rq_drain_wr(qhp, wr); |
| 1017 | return err; | 1027 | return err; |
| @@ -1285,21 +1295,21 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, | |||
| 1285 | spin_unlock_irqrestore(&rchp->lock, flag); | 1295 | spin_unlock_irqrestore(&rchp->lock, flag); |
| 1286 | 1296 | ||
| 1287 | if (schp == rchp) { | 1297 | if (schp == rchp) { |
| 1288 | if (t4_clear_cq_armed(&rchp->cq) && | 1298 | if ((rq_flushed || sq_flushed) && |
| 1289 | (rq_flushed || sq_flushed)) { | 1299 | t4_clear_cq_armed(&rchp->cq)) { |
| 1290 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | 1300 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); |
| 1291 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | 1301 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, |
| 1292 | rchp->ibcq.cq_context); | 1302 | rchp->ibcq.cq_context); |
| 1293 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | 1303 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); |
| 1294 | } | 1304 | } |
| 1295 | } else { | 1305 | } else { |
| 1296 | if (t4_clear_cq_armed(&rchp->cq) && rq_flushed) { | 1306 | if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) { |
| 1297 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); | 1307 | spin_lock_irqsave(&rchp->comp_handler_lock, flag); |
| 1298 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, | 1308 | (*rchp->ibcq.comp_handler)(&rchp->ibcq, |
| 1299 | rchp->ibcq.cq_context); | 1309 | rchp->ibcq.cq_context); |
| 1300 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); | 1310 | spin_unlock_irqrestore(&rchp->comp_handler_lock, flag); |
| 1301 | } | 1311 | } |
| 1302 | if (t4_clear_cq_armed(&schp->cq) && sq_flushed) { | 1312 | if (sq_flushed && t4_clear_cq_armed(&schp->cq)) { |
| 1303 | spin_lock_irqsave(&schp->comp_handler_lock, flag); | 1313 | spin_lock_irqsave(&schp->comp_handler_lock, flag); |
| 1304 | (*schp->ibcq.comp_handler)(&schp->ibcq, | 1314 | (*schp->ibcq.comp_handler)(&schp->ibcq, |
| 1305 | schp->ibcq.cq_context); | 1315 | schp->ibcq.cq_context); |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 013049bcdb53..caf490ab24c8 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
| @@ -666,6 +666,19 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, | |||
| 666 | return (-EOPNOTSUPP); | 666 | return (-EOPNOTSUPP); |
| 667 | } | 667 | } |
| 668 | 668 | ||
| 669 | if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 | | ||
| 670 | MLX4_IB_RX_HASH_DST_IPV4 | | ||
| 671 | MLX4_IB_RX_HASH_SRC_IPV6 | | ||
| 672 | MLX4_IB_RX_HASH_DST_IPV6 | | ||
| 673 | MLX4_IB_RX_HASH_SRC_PORT_TCP | | ||
| 674 | MLX4_IB_RX_HASH_DST_PORT_TCP | | ||
| 675 | MLX4_IB_RX_HASH_SRC_PORT_UDP | | ||
| 676 | MLX4_IB_RX_HASH_DST_PORT_UDP)) { | ||
| 677 | pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", | ||
| 678 | ucmd->rx_hash_fields_mask); | ||
| 679 | return (-EOPNOTSUPP); | ||
| 680 | } | ||
| 681 | |||
| 669 | if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) && | 682 | if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_IPV4) && |
| 670 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) { | 683 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_IPV4)) { |
| 671 | rss_ctx->flags = MLX4_RSS_IPV4; | 684 | rss_ctx->flags = MLX4_RSS_IPV4; |
| @@ -691,11 +704,11 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, | |||
| 691 | return (-EOPNOTSUPP); | 704 | return (-EOPNOTSUPP); |
| 692 | } | 705 | } |
| 693 | 706 | ||
| 694 | if (rss_ctx->flags & MLX4_RSS_IPV4) { | 707 | if (rss_ctx->flags & MLX4_RSS_IPV4) |
| 695 | rss_ctx->flags |= MLX4_RSS_UDP_IPV4; | 708 | rss_ctx->flags |= MLX4_RSS_UDP_IPV4; |
| 696 | } else if (rss_ctx->flags & MLX4_RSS_IPV6) { | 709 | if (rss_ctx->flags & MLX4_RSS_IPV6) |
| 697 | rss_ctx->flags |= MLX4_RSS_UDP_IPV6; | 710 | rss_ctx->flags |= MLX4_RSS_UDP_IPV6; |
| 698 | } else { | 711 | if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) { |
| 699 | pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n"); | 712 | pr_debug("RX Hash fields_mask is not supported - UDP must be set with IPv4 or IPv6\n"); |
| 700 | return (-EOPNOTSUPP); | 713 | return (-EOPNOTSUPP); |
| 701 | } | 714 | } |
| @@ -707,15 +720,14 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, | |||
| 707 | 720 | ||
| 708 | if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) && | 721 | if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) && |
| 709 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { | 722 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { |
| 710 | if (rss_ctx->flags & MLX4_RSS_IPV4) { | 723 | if (rss_ctx->flags & MLX4_RSS_IPV4) |
| 711 | rss_ctx->flags |= MLX4_RSS_TCP_IPV4; | 724 | rss_ctx->flags |= MLX4_RSS_TCP_IPV4; |
| 712 | } else if (rss_ctx->flags & MLX4_RSS_IPV6) { | 725 | if (rss_ctx->flags & MLX4_RSS_IPV6) |
| 713 | rss_ctx->flags |= MLX4_RSS_TCP_IPV6; | 726 | rss_ctx->flags |= MLX4_RSS_TCP_IPV6; |
| 714 | } else { | 727 | if (!(rss_ctx->flags & (MLX4_RSS_IPV6 | MLX4_RSS_IPV4))) { |
| 715 | pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n"); | 728 | pr_debug("RX Hash fields_mask is not supported - TCP must be set with IPv4 or IPv6\n"); |
| 716 | return (-EOPNOTSUPP); | 729 | return (-EOPNOTSUPP); |
| 717 | } | 730 | } |
| 718 | |||
| 719 | } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) || | 731 | } else if ((ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_SRC_PORT_TCP) || |
| 720 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { | 732 | (ucmd->rx_hash_fields_mask & MLX4_IB_RX_HASH_DST_PORT_TCP)) { |
| 721 | pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n"); | 733 | pr_debug("RX Hash fields_mask is not supported - both TCP SRC and DST must be set\n"); |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 87f4bd99cdf7..2c13123bfd69 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
| @@ -1145,6 +1145,7 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, | |||
| 1145 | noio_flag = memalloc_noio_save(); | 1145 | noio_flag = memalloc_noio_save(); |
| 1146 | p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring)); | 1146 | p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring)); |
| 1147 | if (!p->tx_ring) { | 1147 | if (!p->tx_ring) { |
| 1148 | memalloc_noio_restore(noio_flag); | ||
| 1148 | ret = -ENOMEM; | 1149 | ret = -ENOMEM; |
| 1149 | goto err_tx; | 1150 | goto err_tx; |
| 1150 | } | 1151 | } |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index b8ac591aaaa7..c546b567f3b5 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
| @@ -1611,7 +1611,8 @@ static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan, | |||
| 1611 | int l; | 1611 | int l; |
| 1612 | struct dm_buffer *b, *tmp; | 1612 | struct dm_buffer *b, *tmp; |
| 1613 | unsigned long freed = 0; | 1613 | unsigned long freed = 0; |
| 1614 | unsigned long count = nr_to_scan; | 1614 | unsigned long count = c->n_buffers[LIST_CLEAN] + |
| 1615 | c->n_buffers[LIST_DIRTY]; | ||
| 1615 | unsigned long retain_target = get_retain_buffers(c); | 1616 | unsigned long retain_target = get_retain_buffers(c); |
| 1616 | 1617 | ||
| 1617 | for (l = 0; l < LIST_SIZE; l++) { | 1618 | for (l = 0; l < LIST_SIZE; l++) { |
| @@ -1647,8 +1648,11 @@ static unsigned long | |||
| 1647 | dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | 1648 | dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) |
| 1648 | { | 1649 | { |
| 1649 | struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); | 1650 | struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); |
| 1651 | unsigned long count = READ_ONCE(c->n_buffers[LIST_CLEAN]) + | ||
| 1652 | READ_ONCE(c->n_buffers[LIST_DIRTY]); | ||
| 1653 | unsigned long retain_target = get_retain_buffers(c); | ||
| 1650 | 1654 | ||
| 1651 | return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]); | 1655 | return (count < retain_target) ? 0 : (count - retain_target); |
| 1652 | } | 1656 | } |
| 1653 | 1657 | ||
| 1654 | /* | 1658 | /* |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index cf23a14f9c6a..47407e43b96a 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
| @@ -3472,18 +3472,18 @@ static int __init dm_cache_init(void) | |||
| 3472 | { | 3472 | { |
| 3473 | int r; | 3473 | int r; |
| 3474 | 3474 | ||
| 3475 | r = dm_register_target(&cache_target); | ||
| 3476 | if (r) { | ||
| 3477 | DMERR("cache target registration failed: %d", r); | ||
| 3478 | return r; | ||
| 3479 | } | ||
| 3480 | |||
| 3481 | migration_cache = KMEM_CACHE(dm_cache_migration, 0); | 3475 | migration_cache = KMEM_CACHE(dm_cache_migration, 0); |
| 3482 | if (!migration_cache) { | 3476 | if (!migration_cache) { |
| 3483 | dm_unregister_target(&cache_target); | 3477 | dm_unregister_target(&cache_target); |
| 3484 | return -ENOMEM; | 3478 | return -ENOMEM; |
| 3485 | } | 3479 | } |
| 3486 | 3480 | ||
| 3481 | r = dm_register_target(&cache_target); | ||
| 3482 | if (r) { | ||
| 3483 | DMERR("cache target registration failed: %d", r); | ||
| 3484 | return r; | ||
| 3485 | } | ||
| 3486 | |||
| 3487 | return 0; | 3487 | return 0; |
| 3488 | } | 3488 | } |
| 3489 | 3489 | ||
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index c8faa2b85842..f7810cc869ac 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
| @@ -458,6 +458,38 @@ do { \ | |||
| 458 | } while (0) | 458 | } while (0) |
| 459 | 459 | ||
| 460 | /* | 460 | /* |
| 461 | * Check whether bios must be queued in the device-mapper core rather | ||
| 462 | * than here in the target. | ||
| 463 | * | ||
| 464 | * If MPATHF_QUEUE_IF_NO_PATH and MPATHF_SAVED_QUEUE_IF_NO_PATH hold | ||
| 465 | * the same value then we are not between multipath_presuspend() | ||
| 466 | * and multipath_resume() calls and we have no need to check | ||
| 467 | * for the DMF_NOFLUSH_SUSPENDING flag. | ||
| 468 | */ | ||
| 469 | static bool __must_push_back(struct multipath *m, unsigned long flags) | ||
| 470 | { | ||
| 471 | return ((test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) != | ||
| 472 | test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &flags)) && | ||
| 473 | dm_noflush_suspending(m->ti)); | ||
| 474 | } | ||
| 475 | |||
| 476 | /* | ||
| 477 | * Following functions use READ_ONCE to get atomic access to | ||
| 478 | * all m->flags to avoid taking spinlock | ||
| 479 | */ | ||
| 480 | static bool must_push_back_rq(struct multipath *m) | ||
| 481 | { | ||
| 482 | unsigned long flags = READ_ONCE(m->flags); | ||
| 483 | return test_bit(MPATHF_QUEUE_IF_NO_PATH, &flags) || __must_push_back(m, flags); | ||
| 484 | } | ||
| 485 | |||
| 486 | static bool must_push_back_bio(struct multipath *m) | ||
| 487 | { | ||
| 488 | unsigned long flags = READ_ONCE(m->flags); | ||
| 489 | return __must_push_back(m, flags); | ||
| 490 | } | ||
| 491 | |||
| 492 | /* | ||
| 461 | * Map cloned requests (request-based multipath) | 493 | * Map cloned requests (request-based multipath) |
| 462 | */ | 494 | */ |
| 463 | static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, | 495 | static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, |
| @@ -478,7 +510,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, | |||
| 478 | pgpath = choose_pgpath(m, nr_bytes); | 510 | pgpath = choose_pgpath(m, nr_bytes); |
| 479 | 511 | ||
| 480 | if (!pgpath) { | 512 | if (!pgpath) { |
| 481 | if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) | 513 | if (must_push_back_rq(m)) |
| 482 | return DM_MAPIO_DELAY_REQUEUE; | 514 | return DM_MAPIO_DELAY_REQUEUE; |
| 483 | dm_report_EIO(m); /* Failed */ | 515 | dm_report_EIO(m); /* Failed */ |
| 484 | return DM_MAPIO_KILL; | 516 | return DM_MAPIO_KILL; |
| @@ -553,7 +585,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m | |||
| 553 | } | 585 | } |
| 554 | 586 | ||
| 555 | if (!pgpath) { | 587 | if (!pgpath) { |
| 556 | if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) | 588 | if (must_push_back_bio(m)) |
| 557 | return DM_MAPIO_REQUEUE; | 589 | return DM_MAPIO_REQUEUE; |
| 558 | dm_report_EIO(m); | 590 | dm_report_EIO(m); |
| 559 | return DM_MAPIO_KILL; | 591 | return DM_MAPIO_KILL; |
| @@ -651,8 +683,7 @@ static int queue_if_no_path(struct multipath *m, bool queue_if_no_path, | |||
| 651 | assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, | 683 | assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, |
| 652 | (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) || | 684 | (save_old_value && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) || |
| 653 | (!save_old_value && queue_if_no_path)); | 685 | (!save_old_value && queue_if_no_path)); |
| 654 | assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, | 686 | assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path); |
| 655 | queue_if_no_path || dm_noflush_suspending(m->ti)); | ||
| 656 | spin_unlock_irqrestore(&m->lock, flags); | 687 | spin_unlock_irqrestore(&m->lock, flags); |
| 657 | 688 | ||
| 658 | if (!queue_if_no_path) { | 689 | if (!queue_if_no_path) { |
| @@ -1486,7 +1517,7 @@ static int multipath_end_io(struct dm_target *ti, struct request *clone, | |||
| 1486 | fail_path(pgpath); | 1517 | fail_path(pgpath); |
| 1487 | 1518 | ||
| 1488 | if (atomic_read(&m->nr_valid_paths) == 0 && | 1519 | if (atomic_read(&m->nr_valid_paths) == 0 && |
| 1489 | !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { | 1520 | !must_push_back_rq(m)) { |
| 1490 | if (error == BLK_STS_IOERR) | 1521 | if (error == BLK_STS_IOERR) |
| 1491 | dm_report_EIO(m); | 1522 | dm_report_EIO(m); |
| 1492 | /* complete with the original error */ | 1523 | /* complete with the original error */ |
| @@ -1521,8 +1552,12 @@ static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone, | |||
| 1521 | 1552 | ||
| 1522 | if (atomic_read(&m->nr_valid_paths) == 0 && | 1553 | if (atomic_read(&m->nr_valid_paths) == 0 && |
| 1523 | !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { | 1554 | !test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { |
| 1524 | dm_report_EIO(m); | 1555 | if (must_push_back_bio(m)) { |
| 1525 | *error = BLK_STS_IOERR; | 1556 | r = DM_ENDIO_REQUEUE; |
| 1557 | } else { | ||
| 1558 | dm_report_EIO(m); | ||
| 1559 | *error = BLK_STS_IOERR; | ||
| 1560 | } | ||
| 1526 | goto done; | 1561 | goto done; |
| 1527 | } | 1562 | } |
| 1528 | 1563 | ||
| @@ -1957,13 +1992,6 @@ static int __init dm_multipath_init(void) | |||
| 1957 | { | 1992 | { |
| 1958 | int r; | 1993 | int r; |
| 1959 | 1994 | ||
| 1960 | r = dm_register_target(&multipath_target); | ||
| 1961 | if (r < 0) { | ||
| 1962 | DMERR("request-based register failed %d", r); | ||
| 1963 | r = -EINVAL; | ||
| 1964 | goto bad_register_target; | ||
| 1965 | } | ||
| 1966 | |||
| 1967 | kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); | 1995 | kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0); |
| 1968 | if (!kmultipathd) { | 1996 | if (!kmultipathd) { |
| 1969 | DMERR("failed to create workqueue kmpathd"); | 1997 | DMERR("failed to create workqueue kmpathd"); |
| @@ -1985,13 +2013,20 @@ static int __init dm_multipath_init(void) | |||
| 1985 | goto bad_alloc_kmpath_handlerd; | 2013 | goto bad_alloc_kmpath_handlerd; |
| 1986 | } | 2014 | } |
| 1987 | 2015 | ||
| 2016 | r = dm_register_target(&multipath_target); | ||
| 2017 | if (r < 0) { | ||
| 2018 | DMERR("request-based register failed %d", r); | ||
| 2019 | r = -EINVAL; | ||
| 2020 | goto bad_register_target; | ||
| 2021 | } | ||
| 2022 | |||
| 1988 | return 0; | 2023 | return 0; |
| 1989 | 2024 | ||
| 2025 | bad_register_target: | ||
| 2026 | destroy_workqueue(kmpath_handlerd); | ||
| 1990 | bad_alloc_kmpath_handlerd: | 2027 | bad_alloc_kmpath_handlerd: |
| 1991 | destroy_workqueue(kmultipathd); | 2028 | destroy_workqueue(kmultipathd); |
| 1992 | bad_alloc_kmultipathd: | 2029 | bad_alloc_kmultipathd: |
| 1993 | dm_unregister_target(&multipath_target); | ||
| 1994 | bad_register_target: | ||
| 1995 | return r; | 2030 | return r; |
| 1996 | } | 2031 | } |
| 1997 | 2032 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 1113b42e1eda..a0613bd8ed00 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
| @@ -2411,24 +2411,6 @@ static int __init dm_snapshot_init(void) | |||
| 2411 | return r; | 2411 | return r; |
| 2412 | } | 2412 | } |
| 2413 | 2413 | ||
| 2414 | r = dm_register_target(&snapshot_target); | ||
| 2415 | if (r < 0) { | ||
| 2416 | DMERR("snapshot target register failed %d", r); | ||
| 2417 | goto bad_register_snapshot_target; | ||
| 2418 | } | ||
| 2419 | |||
| 2420 | r = dm_register_target(&origin_target); | ||
| 2421 | if (r < 0) { | ||
| 2422 | DMERR("Origin target register failed %d", r); | ||
| 2423 | goto bad_register_origin_target; | ||
| 2424 | } | ||
| 2425 | |||
| 2426 | r = dm_register_target(&merge_target); | ||
| 2427 | if (r < 0) { | ||
| 2428 | DMERR("Merge target register failed %d", r); | ||
| 2429 | goto bad_register_merge_target; | ||
| 2430 | } | ||
| 2431 | |||
| 2432 | r = init_origin_hash(); | 2414 | r = init_origin_hash(); |
| 2433 | if (r) { | 2415 | if (r) { |
| 2434 | DMERR("init_origin_hash failed."); | 2416 | DMERR("init_origin_hash failed."); |
| @@ -2449,19 +2431,37 @@ static int __init dm_snapshot_init(void) | |||
| 2449 | goto bad_pending_cache; | 2431 | goto bad_pending_cache; |
| 2450 | } | 2432 | } |
| 2451 | 2433 | ||
| 2434 | r = dm_register_target(&snapshot_target); | ||
| 2435 | if (r < 0) { | ||
| 2436 | DMERR("snapshot target register failed %d", r); | ||
| 2437 | goto bad_register_snapshot_target; | ||
| 2438 | } | ||
| 2439 | |||
| 2440 | r = dm_register_target(&origin_target); | ||
| 2441 | if (r < 0) { | ||
| 2442 | DMERR("Origin target register failed %d", r); | ||
| 2443 | goto bad_register_origin_target; | ||
| 2444 | } | ||
| 2445 | |||
| 2446 | r = dm_register_target(&merge_target); | ||
| 2447 | if (r < 0) { | ||
| 2448 | DMERR("Merge target register failed %d", r); | ||
| 2449 | goto bad_register_merge_target; | ||
| 2450 | } | ||
| 2451 | |||
| 2452 | return 0; | 2452 | return 0; |
| 2453 | 2453 | ||
| 2454 | bad_pending_cache: | ||
| 2455 | kmem_cache_destroy(exception_cache); | ||
| 2456 | bad_exception_cache: | ||
| 2457 | exit_origin_hash(); | ||
| 2458 | bad_origin_hash: | ||
| 2459 | dm_unregister_target(&merge_target); | ||
| 2460 | bad_register_merge_target: | 2454 | bad_register_merge_target: |
| 2461 | dm_unregister_target(&origin_target); | 2455 | dm_unregister_target(&origin_target); |
| 2462 | bad_register_origin_target: | 2456 | bad_register_origin_target: |
| 2463 | dm_unregister_target(&snapshot_target); | 2457 | dm_unregister_target(&snapshot_target); |
| 2464 | bad_register_snapshot_target: | 2458 | bad_register_snapshot_target: |
| 2459 | kmem_cache_destroy(pending_cache); | ||
| 2460 | bad_pending_cache: | ||
| 2461 | kmem_cache_destroy(exception_cache); | ||
| 2462 | bad_exception_cache: | ||
| 2463 | exit_origin_hash(); | ||
| 2464 | bad_origin_hash: | ||
| 2465 | dm_exception_store_exit(); | 2465 | dm_exception_store_exit(); |
| 2466 | 2466 | ||
| 2467 | return r; | 2467 | return r; |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 88130b5d95f9..aaffd0c0ee9a 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
| @@ -453,14 +453,15 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, | |||
| 453 | 453 | ||
| 454 | refcount_set(&dd->count, 1); | 454 | refcount_set(&dd->count, 1); |
| 455 | list_add(&dd->list, &t->devices); | 455 | list_add(&dd->list, &t->devices); |
| 456 | goto out; | ||
| 456 | 457 | ||
| 457 | } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { | 458 | } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { |
| 458 | r = upgrade_mode(dd, mode, t->md); | 459 | r = upgrade_mode(dd, mode, t->md); |
| 459 | if (r) | 460 | if (r) |
| 460 | return r; | 461 | return r; |
| 461 | refcount_inc(&dd->count); | ||
| 462 | } | 462 | } |
| 463 | 463 | refcount_inc(&dd->count); | |
| 464 | out: | ||
| 464 | *result = dd->dm_dev; | 465 | *result = dd->dm_dev; |
| 465 | return 0; | 466 | return 0; |
| 466 | } | 467 | } |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 89e5dff9b4cf..f91d771fff4b 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
| @@ -4355,30 +4355,28 @@ static struct target_type thin_target = { | |||
| 4355 | 4355 | ||
| 4356 | static int __init dm_thin_init(void) | 4356 | static int __init dm_thin_init(void) |
| 4357 | { | 4357 | { |
| 4358 | int r; | 4358 | int r = -ENOMEM; |
| 4359 | 4359 | ||
| 4360 | pool_table_init(); | 4360 | pool_table_init(); |
| 4361 | 4361 | ||
| 4362 | _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); | ||
| 4363 | if (!_new_mapping_cache) | ||
| 4364 | return r; | ||
| 4365 | |||
| 4362 | r = dm_register_target(&thin_target); | 4366 | r = dm_register_target(&thin_target); |
| 4363 | if (r) | 4367 | if (r) |
| 4364 | return r; | 4368 | goto bad_new_mapping_cache; |
| 4365 | 4369 | ||
| 4366 | r = dm_register_target(&pool_target); | 4370 | r = dm_register_target(&pool_target); |
| 4367 | if (r) | 4371 | if (r) |
| 4368 | goto bad_pool_target; | 4372 | goto bad_thin_target; |
| 4369 | |||
| 4370 | r = -ENOMEM; | ||
| 4371 | |||
| 4372 | _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0); | ||
| 4373 | if (!_new_mapping_cache) | ||
| 4374 | goto bad_new_mapping_cache; | ||
| 4375 | 4373 | ||
| 4376 | return 0; | 4374 | return 0; |
| 4377 | 4375 | ||
| 4378 | bad_new_mapping_cache: | 4376 | bad_thin_target: |
| 4379 | dm_unregister_target(&pool_target); | ||
| 4380 | bad_pool_target: | ||
| 4381 | dm_unregister_target(&thin_target); | 4377 | dm_unregister_target(&thin_target); |
| 4378 | bad_new_mapping_cache: | ||
| 4379 | kmem_cache_destroy(_new_mapping_cache); | ||
| 4382 | 4380 | ||
| 4383 | return r; | 4381 | return r; |
| 4384 | } | 4382 | } |
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index 305a7a464d09..4d63ac8a82e0 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c | |||
| @@ -562,7 +562,7 @@ static ssize_t at24_eeprom_write_i2c(struct at24_data *at24, const char *buf, | |||
| 562 | static int at24_read(void *priv, unsigned int off, void *val, size_t count) | 562 | static int at24_read(void *priv, unsigned int off, void *val, size_t count) |
| 563 | { | 563 | { |
| 564 | struct at24_data *at24 = priv; | 564 | struct at24_data *at24 = priv; |
| 565 | struct i2c_client *client; | 565 | struct device *dev = &at24->client[0]->dev; |
| 566 | char *buf = val; | 566 | char *buf = val; |
| 567 | int ret; | 567 | int ret; |
| 568 | 568 | ||
| @@ -572,11 +572,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count) | |||
| 572 | if (off + count > at24->chip.byte_len) | 572 | if (off + count > at24->chip.byte_len) |
| 573 | return -EINVAL; | 573 | return -EINVAL; |
| 574 | 574 | ||
| 575 | client = at24_translate_offset(at24, &off); | 575 | ret = pm_runtime_get_sync(dev); |
| 576 | |||
| 577 | ret = pm_runtime_get_sync(&client->dev); | ||
| 578 | if (ret < 0) { | 576 | if (ret < 0) { |
| 579 | pm_runtime_put_noidle(&client->dev); | 577 | pm_runtime_put_noidle(dev); |
| 580 | return ret; | 578 | return ret; |
| 581 | } | 579 | } |
| 582 | 580 | ||
| @@ -592,7 +590,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count) | |||
| 592 | status = at24->read_func(at24, buf, off, count); | 590 | status = at24->read_func(at24, buf, off, count); |
| 593 | if (status < 0) { | 591 | if (status < 0) { |
| 594 | mutex_unlock(&at24->lock); | 592 | mutex_unlock(&at24->lock); |
| 595 | pm_runtime_put(&client->dev); | 593 | pm_runtime_put(dev); |
| 596 | return status; | 594 | return status; |
| 597 | } | 595 | } |
| 598 | buf += status; | 596 | buf += status; |
| @@ -602,7 +600,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count) | |||
| 602 | 600 | ||
| 603 | mutex_unlock(&at24->lock); | 601 | mutex_unlock(&at24->lock); |
| 604 | 602 | ||
| 605 | pm_runtime_put(&client->dev); | 603 | pm_runtime_put(dev); |
| 606 | 604 | ||
| 607 | return 0; | 605 | return 0; |
| 608 | } | 606 | } |
| @@ -610,7 +608,7 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count) | |||
| 610 | static int at24_write(void *priv, unsigned int off, void *val, size_t count) | 608 | static int at24_write(void *priv, unsigned int off, void *val, size_t count) |
| 611 | { | 609 | { |
| 612 | struct at24_data *at24 = priv; | 610 | struct at24_data *at24 = priv; |
| 613 | struct i2c_client *client; | 611 | struct device *dev = &at24->client[0]->dev; |
| 614 | char *buf = val; | 612 | char *buf = val; |
| 615 | int ret; | 613 | int ret; |
| 616 | 614 | ||
| @@ -620,11 +618,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) | |||
| 620 | if (off + count > at24->chip.byte_len) | 618 | if (off + count > at24->chip.byte_len) |
| 621 | return -EINVAL; | 619 | return -EINVAL; |
| 622 | 620 | ||
| 623 | client = at24_translate_offset(at24, &off); | 621 | ret = pm_runtime_get_sync(dev); |
| 624 | |||
| 625 | ret = pm_runtime_get_sync(&client->dev); | ||
| 626 | if (ret < 0) { | 622 | if (ret < 0) { |
| 627 | pm_runtime_put_noidle(&client->dev); | 623 | pm_runtime_put_noidle(dev); |
| 628 | return ret; | 624 | return ret; |
| 629 | } | 625 | } |
| 630 | 626 | ||
| @@ -640,7 +636,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) | |||
| 640 | status = at24->write_func(at24, buf, off, count); | 636 | status = at24->write_func(at24, buf, off, count); |
| 641 | if (status < 0) { | 637 | if (status < 0) { |
| 642 | mutex_unlock(&at24->lock); | 638 | mutex_unlock(&at24->lock); |
| 643 | pm_runtime_put(&client->dev); | 639 | pm_runtime_put(dev); |
| 644 | return status; | 640 | return status; |
| 645 | } | 641 | } |
| 646 | buf += status; | 642 | buf += status; |
| @@ -650,7 +646,7 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) | |||
| 650 | 646 | ||
| 651 | mutex_unlock(&at24->lock); | 647 | mutex_unlock(&at24->lock); |
| 652 | 648 | ||
| 653 | pm_runtime_put(&client->dev); | 649 | pm_runtime_put(dev); |
| 654 | 650 | ||
| 655 | return 0; | 651 | return 0; |
| 656 | } | 652 | } |
| @@ -880,7 +876,7 @@ static int at24_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
| 880 | at24->nvmem_config.reg_read = at24_read; | 876 | at24->nvmem_config.reg_read = at24_read; |
| 881 | at24->nvmem_config.reg_write = at24_write; | 877 | at24->nvmem_config.reg_write = at24_write; |
| 882 | at24->nvmem_config.priv = at24; | 878 | at24->nvmem_config.priv = at24; |
| 883 | at24->nvmem_config.stride = 4; | 879 | at24->nvmem_config.stride = 1; |
| 884 | at24->nvmem_config.word_size = 1; | 880 | at24->nvmem_config.word_size = 1; |
| 885 | at24->nvmem_config.size = chip.byte_len; | 881 | at24->nvmem_config.size = chip.byte_len; |
| 886 | 882 | ||
diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h index f06cd91964ce..79a5b985ccf5 100644 --- a/drivers/mmc/core/card.h +++ b/drivers/mmc/core/card.h | |||
| @@ -75,9 +75,11 @@ struct mmc_fixup { | |||
| 75 | #define EXT_CSD_REV_ANY (-1u) | 75 | #define EXT_CSD_REV_ANY (-1u) |
| 76 | 76 | ||
| 77 | #define CID_MANFID_SANDISK 0x2 | 77 | #define CID_MANFID_SANDISK 0x2 |
| 78 | #define CID_MANFID_ATP 0x9 | ||
| 78 | #define CID_MANFID_TOSHIBA 0x11 | 79 | #define CID_MANFID_TOSHIBA 0x11 |
| 79 | #define CID_MANFID_MICRON 0x13 | 80 | #define CID_MANFID_MICRON 0x13 |
| 80 | #define CID_MANFID_SAMSUNG 0x15 | 81 | #define CID_MANFID_SAMSUNG 0x15 |
| 82 | #define CID_MANFID_APACER 0x27 | ||
| 81 | #define CID_MANFID_KINGSTON 0x70 | 83 | #define CID_MANFID_KINGSTON 0x70 |
| 82 | #define CID_MANFID_HYNIX 0x90 | 84 | #define CID_MANFID_HYNIX 0x90 |
| 83 | 85 | ||
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index d209fb466979..208a762b87ef 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c | |||
| @@ -1290,7 +1290,7 @@ out_err: | |||
| 1290 | 1290 | ||
| 1291 | static void mmc_select_driver_type(struct mmc_card *card) | 1291 | static void mmc_select_driver_type(struct mmc_card *card) |
| 1292 | { | 1292 | { |
| 1293 | int card_drv_type, drive_strength, drv_type; | 1293 | int card_drv_type, drive_strength, drv_type = 0; |
| 1294 | int fixed_drv_type = card->host->fixed_drv_type; | 1294 | int fixed_drv_type = card->host->fixed_drv_type; |
| 1295 | 1295 | ||
| 1296 | card_drv_type = card->ext_csd.raw_driver_strength | | 1296 | card_drv_type = card->ext_csd.raw_driver_strength | |
diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index f664e9cbc9f8..75d317623852 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h | |||
| @@ -53,6 +53,14 @@ static const struct mmc_fixup mmc_blk_fixups[] = { | |||
| 53 | MMC_QUIRK_BLK_NO_CMD23), | 53 | MMC_QUIRK_BLK_NO_CMD23), |
| 54 | 54 | ||
| 55 | /* | 55 | /* |
| 56 | * Some SD cards lockup while using CMD23 multiblock transfers. | ||
| 57 | */ | ||
| 58 | MMC_FIXUP("AF SD", CID_MANFID_ATP, CID_OEMID_ANY, add_quirk_sd, | ||
| 59 | MMC_QUIRK_BLK_NO_CMD23), | ||
| 60 | MMC_FIXUP("APUSD", CID_MANFID_APACER, 0x5048, add_quirk_sd, | ||
| 61 | MMC_QUIRK_BLK_NO_CMD23), | ||
| 62 | |||
| 63 | /* | ||
| 56 | * Some MMC cards need longer data read timeout than indicated in CSD. | 64 | * Some MMC cards need longer data read timeout than indicated in CSD. |
| 57 | */ | 65 | */ |
| 58 | MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, | 66 | MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, |
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index a7801f6668a5..6315774d72b3 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c | |||
| @@ -338,6 +338,7 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, | |||
| 338 | cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX; | 338 | cmode = MV88E6XXX_PORT_STS_CMODE_2500BASEX; |
| 339 | break; | 339 | break; |
| 340 | case PHY_INTERFACE_MODE_XGMII: | 340 | case PHY_INTERFACE_MODE_XGMII: |
| 341 | case PHY_INTERFACE_MODE_XAUI: | ||
| 341 | cmode = MV88E6XXX_PORT_STS_CMODE_XAUI; | 342 | cmode = MV88E6XXX_PORT_STS_CMODE_XAUI; |
| 342 | break; | 343 | break; |
| 343 | case PHY_INTERFACE_MODE_RXAUI: | 344 | case PHY_INTERFACE_MODE_RXAUI: |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 57e796870595..105fdb958cef 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h | |||
| @@ -50,7 +50,7 @@ | |||
| 50 | #define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U | 50 | #define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U |
| 51 | #define AQ_CFG_PCI_FUNC_PORTS 2U | 51 | #define AQ_CFG_PCI_FUNC_PORTS 2U |
| 52 | 52 | ||
| 53 | #define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ) | 53 | #define AQ_CFG_SERVICE_TIMER_INTERVAL (1 * HZ) |
| 54 | #define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ)) | 54 | #define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ)) |
| 55 | 55 | ||
| 56 | #define AQ_CFG_SKB_FRAGS_MAX 32U | 56 | #define AQ_CFG_SKB_FRAGS_MAX 32U |
| @@ -80,6 +80,7 @@ | |||
| 80 | #define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\ | 80 | #define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\ |
| 81 | __stringify(NIC_MINOR_DRIVER_VERSION)"."\ | 81 | __stringify(NIC_MINOR_DRIVER_VERSION)"."\ |
| 82 | __stringify(NIC_BUILD_DRIVER_VERSION)"."\ | 82 | __stringify(NIC_BUILD_DRIVER_VERSION)"."\ |
| 83 | __stringify(NIC_REVISION_DRIVER_VERSION) | 83 | __stringify(NIC_REVISION_DRIVER_VERSION) \ |
| 84 | AQ_CFG_DRV_VERSION_SUFFIX | ||
| 84 | 85 | ||
| 85 | #endif /* AQ_CFG_H */ | 86 | #endif /* AQ_CFG_H */ |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 70efb7467bf3..f2d8063a2cef 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c | |||
| @@ -66,14 +66,14 @@ static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = { | |||
| 66 | "OutUCast", | 66 | "OutUCast", |
| 67 | "OutMCast", | 67 | "OutMCast", |
| 68 | "OutBCast", | 68 | "OutBCast", |
| 69 | "InUCastOctects", | 69 | "InUCastOctets", |
| 70 | "OutUCastOctects", | 70 | "OutUCastOctets", |
| 71 | "InMCastOctects", | 71 | "InMCastOctets", |
| 72 | "OutMCastOctects", | 72 | "OutMCastOctets", |
| 73 | "InBCastOctects", | 73 | "InBCastOctets", |
| 74 | "OutBCastOctects", | 74 | "OutBCastOctets", |
| 75 | "InOctects", | 75 | "InOctets", |
| 76 | "OutOctects", | 76 | "OutOctets", |
| 77 | "InPacketsDma", | 77 | "InPacketsDma", |
| 78 | "OutPacketsDma", | 78 | "OutPacketsDma", |
| 79 | "InOctetsDma", | 79 | "InOctetsDma", |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index 0207927dc8a6..b3825de6cdfb 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h | |||
| @@ -46,6 +46,28 @@ struct aq_hw_link_status_s { | |||
| 46 | unsigned int mbps; | 46 | unsigned int mbps; |
| 47 | }; | 47 | }; |
| 48 | 48 | ||
| 49 | struct aq_stats_s { | ||
| 50 | u64 uprc; | ||
| 51 | u64 mprc; | ||
| 52 | u64 bprc; | ||
| 53 | u64 erpt; | ||
| 54 | u64 uptc; | ||
| 55 | u64 mptc; | ||
| 56 | u64 bptc; | ||
| 57 | u64 erpr; | ||
| 58 | u64 mbtc; | ||
| 59 | u64 bbtc; | ||
| 60 | u64 mbrc; | ||
| 61 | u64 bbrc; | ||
| 62 | u64 ubrc; | ||
| 63 | u64 ubtc; | ||
| 64 | u64 dpc; | ||
| 65 | u64 dma_pkt_rc; | ||
| 66 | u64 dma_pkt_tc; | ||
| 67 | u64 dma_oct_rc; | ||
| 68 | u64 dma_oct_tc; | ||
| 69 | }; | ||
| 70 | |||
| 49 | #define AQ_HW_IRQ_INVALID 0U | 71 | #define AQ_HW_IRQ_INVALID 0U |
| 50 | #define AQ_HW_IRQ_LEGACY 1U | 72 | #define AQ_HW_IRQ_LEGACY 1U |
| 51 | #define AQ_HW_IRQ_MSI 2U | 73 | #define AQ_HW_IRQ_MSI 2U |
| @@ -85,7 +107,9 @@ struct aq_hw_ops { | |||
| 85 | void (*destroy)(struct aq_hw_s *self); | 107 | void (*destroy)(struct aq_hw_s *self); |
| 86 | 108 | ||
| 87 | int (*get_hw_caps)(struct aq_hw_s *self, | 109 | int (*get_hw_caps)(struct aq_hw_s *self, |
| 88 | struct aq_hw_caps_s *aq_hw_caps); | 110 | struct aq_hw_caps_s *aq_hw_caps, |
| 111 | unsigned short device, | ||
| 112 | unsigned short subsystem_device); | ||
| 89 | 113 | ||
| 90 | int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, | 114 | int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring, |
| 91 | unsigned int frags); | 115 | unsigned int frags); |
| @@ -164,8 +188,7 @@ struct aq_hw_ops { | |||
| 164 | 188 | ||
| 165 | int (*hw_update_stats)(struct aq_hw_s *self); | 189 | int (*hw_update_stats)(struct aq_hw_s *self); |
| 166 | 190 | ||
| 167 | int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data, | 191 | struct aq_stats_s *(*hw_get_hw_stats)(struct aq_hw_s *self); |
| 168 | unsigned int *p_count); | ||
| 169 | 192 | ||
| 170 | int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); | 193 | int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version); |
| 171 | 194 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 78dfb2ab78ce..75a894a9251c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c | |||
| @@ -37,6 +37,8 @@ static unsigned int aq_itr_rx; | |||
| 37 | module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644); | 37 | module_param_named(aq_itr_rx, aq_itr_rx, uint, 0644); |
| 38 | MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate"); | 38 | MODULE_PARM_DESC(aq_itr_rx, "RX interrupt throttle rate"); |
| 39 | 39 | ||
| 40 | static void aq_nic_update_ndev_stats(struct aq_nic_s *self); | ||
| 41 | |||
| 40 | static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) | 42 | static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) |
| 41 | { | 43 | { |
| 42 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; | 44 | struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg; |
| @@ -166,11 +168,8 @@ static int aq_nic_update_link_status(struct aq_nic_s *self) | |||
| 166 | static void aq_nic_service_timer_cb(struct timer_list *t) | 168 | static void aq_nic_service_timer_cb(struct timer_list *t) |
| 167 | { | 169 | { |
| 168 | struct aq_nic_s *self = from_timer(self, t, service_timer); | 170 | struct aq_nic_s *self = from_timer(self, t, service_timer); |
| 169 | struct net_device *ndev = aq_nic_get_ndev(self); | 171 | int ctimer = AQ_CFG_SERVICE_TIMER_INTERVAL; |
| 170 | int err = 0; | 172 | int err = 0; |
| 171 | unsigned int i = 0U; | ||
| 172 | struct aq_ring_stats_rx_s stats_rx; | ||
| 173 | struct aq_ring_stats_tx_s stats_tx; | ||
| 174 | 173 | ||
| 175 | if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) | 174 | if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY)) |
| 176 | goto err_exit; | 175 | goto err_exit; |
| @@ -182,23 +181,14 @@ static void aq_nic_service_timer_cb(struct timer_list *t) | |||
| 182 | if (self->aq_hw_ops.hw_update_stats) | 181 | if (self->aq_hw_ops.hw_update_stats) |
| 183 | self->aq_hw_ops.hw_update_stats(self->aq_hw); | 182 | self->aq_hw_ops.hw_update_stats(self->aq_hw); |
| 184 | 183 | ||
| 185 | memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s)); | 184 | aq_nic_update_ndev_stats(self); |
| 186 | memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s)); | ||
| 187 | for (i = AQ_DIMOF(self->aq_vec); i--;) { | ||
| 188 | if (self->aq_vec[i]) | ||
| 189 | aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx); | ||
| 190 | } | ||
| 191 | 185 | ||
| 192 | ndev->stats.rx_packets = stats_rx.packets; | 186 | /* If no link - use faster timer rate to detect link up asap */ |
| 193 | ndev->stats.rx_bytes = stats_rx.bytes; | 187 | if (!netif_carrier_ok(self->ndev)) |
| 194 | ndev->stats.rx_errors = stats_rx.errors; | 188 | ctimer = max(ctimer / 2, 1); |
| 195 | ndev->stats.tx_packets = stats_tx.packets; | ||
| 196 | ndev->stats.tx_bytes = stats_tx.bytes; | ||
| 197 | ndev->stats.tx_errors = stats_tx.errors; | ||
| 198 | 189 | ||
| 199 | err_exit: | 190 | err_exit: |
| 200 | mod_timer(&self->service_timer, | 191 | mod_timer(&self->service_timer, jiffies + ctimer); |
| 201 | jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL); | ||
| 202 | } | 192 | } |
| 203 | 193 | ||
| 204 | static void aq_nic_polling_timer_cb(struct timer_list *t) | 194 | static void aq_nic_polling_timer_cb(struct timer_list *t) |
| @@ -222,7 +212,7 @@ static struct net_device *aq_nic_ndev_alloc(void) | |||
| 222 | 212 | ||
| 223 | struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, | 213 | struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, |
| 224 | const struct ethtool_ops *et_ops, | 214 | const struct ethtool_ops *et_ops, |
| 225 | struct device *dev, | 215 | struct pci_dev *pdev, |
| 226 | struct aq_pci_func_s *aq_pci_func, | 216 | struct aq_pci_func_s *aq_pci_func, |
| 227 | unsigned int port, | 217 | unsigned int port, |
| 228 | const struct aq_hw_ops *aq_hw_ops) | 218 | const struct aq_hw_ops *aq_hw_ops) |
| @@ -242,7 +232,7 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, | |||
| 242 | ndev->netdev_ops = ndev_ops; | 232 | ndev->netdev_ops = ndev_ops; |
| 243 | ndev->ethtool_ops = et_ops; | 233 | ndev->ethtool_ops = et_ops; |
| 244 | 234 | ||
| 245 | SET_NETDEV_DEV(ndev, dev); | 235 | SET_NETDEV_DEV(ndev, &pdev->dev); |
| 246 | 236 | ||
| 247 | ndev->if_port = port; | 237 | ndev->if_port = port; |
| 248 | self->ndev = ndev; | 238 | self->ndev = ndev; |
| @@ -254,7 +244,8 @@ struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, | |||
| 254 | 244 | ||
| 255 | self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, | 245 | self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port, |
| 256 | &self->aq_hw_ops); | 246 | &self->aq_hw_ops); |
| 257 | err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps); | 247 | err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps, |
| 248 | pdev->device, pdev->subsystem_device); | ||
| 258 | if (err < 0) | 249 | if (err < 0) |
| 259 | goto err_exit; | 250 | goto err_exit; |
| 260 | 251 | ||
| @@ -749,16 +740,40 @@ int aq_nic_get_regs_count(struct aq_nic_s *self) | |||
| 749 | 740 | ||
| 750 | void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) | 741 | void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) |
| 751 | { | 742 | { |
| 752 | struct aq_vec_s *aq_vec = NULL; | ||
| 753 | unsigned int i = 0U; | 743 | unsigned int i = 0U; |
| 754 | unsigned int count = 0U; | 744 | unsigned int count = 0U; |
| 755 | int err = 0; | 745 | struct aq_vec_s *aq_vec = NULL; |
| 746 | struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw); | ||
| 756 | 747 | ||
| 757 | err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count); | 748 | if (!stats) |
| 758 | if (err < 0) | ||
| 759 | goto err_exit; | 749 | goto err_exit; |
| 760 | 750 | ||
| 761 | data += count; | 751 | data[i] = stats->uprc + stats->mprc + stats->bprc; |
| 752 | data[++i] = stats->uprc; | ||
| 753 | data[++i] = stats->mprc; | ||
| 754 | data[++i] = stats->bprc; | ||
| 755 | data[++i] = stats->erpt; | ||
| 756 | data[++i] = stats->uptc + stats->mptc + stats->bptc; | ||
| 757 | data[++i] = stats->uptc; | ||
| 758 | data[++i] = stats->mptc; | ||
| 759 | data[++i] = stats->bptc; | ||
| 760 | data[++i] = stats->ubrc; | ||
| 761 | data[++i] = stats->ubtc; | ||
| 762 | data[++i] = stats->mbrc; | ||
| 763 | data[++i] = stats->mbtc; | ||
| 764 | data[++i] = stats->bbrc; | ||
| 765 | data[++i] = stats->bbtc; | ||
| 766 | data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; | ||
| 767 | data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; | ||
| 768 | data[++i] = stats->dma_pkt_rc; | ||
| 769 | data[++i] = stats->dma_pkt_tc; | ||
| 770 | data[++i] = stats->dma_oct_rc; | ||
| 771 | data[++i] = stats->dma_oct_tc; | ||
| 772 | data[++i] = stats->dpc; | ||
| 773 | |||
| 774 | i++; | ||
| 775 | |||
| 776 | data += i; | ||
| 762 | count = 0U; | 777 | count = 0U; |
| 763 | 778 | ||
| 764 | for (i = 0U, aq_vec = self->aq_vec[0]; | 779 | for (i = 0U, aq_vec = self->aq_vec[0]; |
| @@ -768,7 +783,20 @@ void aq_nic_get_stats(struct aq_nic_s *self, u64 *data) | |||
| 768 | } | 783 | } |
| 769 | 784 | ||
| 770 | err_exit:; | 785 | err_exit:; |
| 771 | (void)err; | 786 | } |
| 787 | |||
| 788 | static void aq_nic_update_ndev_stats(struct aq_nic_s *self) | ||
| 789 | { | ||
| 790 | struct net_device *ndev = self->ndev; | ||
| 791 | struct aq_stats_s *stats = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw); | ||
| 792 | |||
| 793 | ndev->stats.rx_packets = stats->uprc + stats->mprc + stats->bprc; | ||
| 794 | ndev->stats.rx_bytes = stats->ubrc + stats->mbrc + stats->bbrc; | ||
| 795 | ndev->stats.rx_errors = stats->erpr; | ||
| 796 | ndev->stats.tx_packets = stats->uptc + stats->mptc + stats->bptc; | ||
| 797 | ndev->stats.tx_bytes = stats->ubtc + stats->mbtc + stats->bbtc; | ||
| 798 | ndev->stats.tx_errors = stats->erpt; | ||
| 799 | ndev->stats.multicast = stats->mprc; | ||
| 772 | } | 800 | } |
| 773 | 801 | ||
| 774 | void aq_nic_get_link_ksettings(struct aq_nic_s *self, | 802 | void aq_nic_get_link_ksettings(struct aq_nic_s *self, |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 4309983acdd6..3c9f8db03d5f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h | |||
| @@ -71,7 +71,7 @@ struct aq_nic_cfg_s { | |||
| 71 | 71 | ||
| 72 | struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, | 72 | struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops, |
| 73 | const struct ethtool_ops *et_ops, | 73 | const struct ethtool_ops *et_ops, |
| 74 | struct device *dev, | 74 | struct pci_dev *pdev, |
| 75 | struct aq_pci_func_s *aq_pci_func, | 75 | struct aq_pci_func_s *aq_pci_func, |
| 76 | unsigned int port, | 76 | unsigned int port, |
| 77 | const struct aq_hw_ops *aq_hw_ops); | 77 | const struct aq_hw_ops *aq_hw_ops); |
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index cadaa646c89f..58c29d04b186 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c | |||
| @@ -51,7 +51,8 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, | |||
| 51 | pci_set_drvdata(pdev, self); | 51 | pci_set_drvdata(pdev, self); |
| 52 | self->pdev = pdev; | 52 | self->pdev = pdev; |
| 53 | 53 | ||
| 54 | err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps); | 54 | err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps, pdev->device, |
| 55 | pdev->subsystem_device); | ||
| 55 | if (err < 0) | 56 | if (err < 0) |
| 56 | goto err_exit; | 57 | goto err_exit; |
| 57 | 58 | ||
| @@ -59,7 +60,7 @@ struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops, | |||
| 59 | 60 | ||
| 60 | for (port = 0; port < self->ports; ++port) { | 61 | for (port = 0; port < self->ports; ++port) { |
| 61 | struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, | 62 | struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops, |
| 62 | &pdev->dev, self, | 63 | pdev, self, |
| 63 | port, aq_hw_ops); | 64 | port, aq_hw_ops); |
| 64 | 65 | ||
| 65 | if (!aq_nic) { | 66 | if (!aq_nic) { |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c index 07b3c49a16a4..f18dce14c93c 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c | |||
| @@ -18,9 +18,20 @@ | |||
| 18 | #include "hw_atl_a0_internal.h" | 18 | #include "hw_atl_a0_internal.h" |
| 19 | 19 | ||
| 20 | static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, | 20 | static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self, |
| 21 | struct aq_hw_caps_s *aq_hw_caps) | 21 | struct aq_hw_caps_s *aq_hw_caps, |
| 22 | unsigned short device, | ||
| 23 | unsigned short subsystem_device) | ||
| 22 | { | 24 | { |
| 23 | memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); | 25 | memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps)); |
| 26 | |||
| 27 | if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) | ||
| 28 | aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; | ||
| 29 | |||
| 30 | if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { | ||
| 31 | aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_10G; | ||
| 32 | aq_hw_caps->link_speed_msk &= ~HW_ATL_A0_RATE_5G; | ||
| 33 | } | ||
| 34 | |||
| 24 | return 0; | 35 | return 0; |
| 25 | } | 36 | } |
| 26 | 37 | ||
| @@ -333,6 +344,10 @@ static int hw_atl_a0_hw_init(struct aq_hw_s *self, | |||
| 333 | hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss); | 344 | hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss); |
| 334 | hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); | 345 | hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); |
| 335 | 346 | ||
| 347 | /* Reset link status and read out initial hardware counters */ | ||
| 348 | self->aq_link_status.mbps = 0; | ||
| 349 | hw_atl_utils_update_stats(self); | ||
| 350 | |||
| 336 | err = aq_hw_err_from_flags(self); | 351 | err = aq_hw_err_from_flags(self); |
| 337 | if (err < 0) | 352 | if (err < 0) |
| 338 | goto err_exit; | 353 | goto err_exit; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index ec68c20efcbd..e4a22ce7bf09 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c | |||
| @@ -16,11 +16,23 @@ | |||
| 16 | #include "hw_atl_utils.h" | 16 | #include "hw_atl_utils.h" |
| 17 | #include "hw_atl_llh.h" | 17 | #include "hw_atl_llh.h" |
| 18 | #include "hw_atl_b0_internal.h" | 18 | #include "hw_atl_b0_internal.h" |
| 19 | #include "hw_atl_llh_internal.h" | ||
| 19 | 20 | ||
| 20 | static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, | 21 | static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self, |
| 21 | struct aq_hw_caps_s *aq_hw_caps) | 22 | struct aq_hw_caps_s *aq_hw_caps, |
| 23 | unsigned short device, | ||
| 24 | unsigned short subsystem_device) | ||
| 22 | { | 25 | { |
| 23 | memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); | 26 | memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps)); |
| 27 | |||
| 28 | if (device == HW_ATL_DEVICE_ID_D108 && subsystem_device == 0x0001) | ||
| 29 | aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; | ||
| 30 | |||
| 31 | if (device == HW_ATL_DEVICE_ID_D109 && subsystem_device == 0x0001) { | ||
| 32 | aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_10G; | ||
| 33 | aq_hw_caps->link_speed_msk &= ~HW_ATL_B0_RATE_5G; | ||
| 34 | } | ||
| 35 | |||
| 24 | return 0; | 36 | return 0; |
| 25 | } | 37 | } |
| 26 | 38 | ||
| @@ -357,6 +369,7 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, | |||
| 357 | }; | 369 | }; |
| 358 | 370 | ||
| 359 | int err = 0; | 371 | int err = 0; |
| 372 | u32 val; | ||
| 360 | 373 | ||
| 361 | self->aq_nic_cfg = aq_nic_cfg; | 374 | self->aq_nic_cfg = aq_nic_cfg; |
| 362 | 375 | ||
| @@ -374,6 +387,20 @@ static int hw_atl_b0_hw_init(struct aq_hw_s *self, | |||
| 374 | hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); | 387 | hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss); |
| 375 | hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); | 388 | hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); |
| 376 | 389 | ||
| 390 | /* Force limit MRRS on RDM/TDM to 2K */ | ||
| 391 | val = aq_hw_read_reg(self, pci_reg_control6_adr); | ||
| 392 | aq_hw_write_reg(self, pci_reg_control6_adr, (val & ~0x707) | 0x404); | ||
| 393 | |||
| 394 | /* TX DMA total request limit. B0 hardware is not capable to | ||
| 395 | * handle more than (8K-MRRS) incoming DMA data. | ||
| 396 | * Value 24 in 256byte units | ||
| 397 | */ | ||
| 398 | aq_hw_write_reg(self, tx_dma_total_req_limit_adr, 24); | ||
| 399 | |||
| 400 | /* Reset link status and read out initial hardware counters */ | ||
| 401 | self->aq_link_status.mbps = 0; | ||
| 402 | hw_atl_utils_update_stats(self); | ||
| 403 | |||
| 377 | err = aq_hw_err_from_flags(self); | 404 | err = aq_hw_err_from_flags(self); |
| 378 | if (err < 0) | 405 | if (err < 0) |
| 379 | goto err_exit; | 406 | goto err_exit; |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index 5527fc0e5942..93450ec930e8 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h | |||
| @@ -2343,6 +2343,9 @@ | |||
| 2343 | #define tx_dma_desc_base_addrmsw_adr(descriptor) \ | 2343 | #define tx_dma_desc_base_addrmsw_adr(descriptor) \ |
| 2344 | (0x00007c04u + (descriptor) * 0x40) | 2344 | (0x00007c04u + (descriptor) * 0x40) |
| 2345 | 2345 | ||
| 2346 | /* tx dma total request limit */ | ||
| 2347 | #define tx_dma_total_req_limit_adr 0x00007b20u | ||
| 2348 | |||
| 2346 | /* tx interrupt moderation control register definitions | 2349 | /* tx interrupt moderation control register definitions |
| 2347 | * Preprocessor definitions for TX Interrupt Moderation Control Register | 2350 | * Preprocessor definitions for TX Interrupt Moderation Control Register |
| 2348 | * Base Address: 0x00008980 | 2351 | * Base Address: 0x00008980 |
| @@ -2369,6 +2372,9 @@ | |||
| 2369 | /* default value of bitfield reg_res_dsbl */ | 2372 | /* default value of bitfield reg_res_dsbl */ |
| 2370 | #define pci_reg_res_dsbl_default 0x1 | 2373 | #define pci_reg_res_dsbl_default 0x1 |
| 2371 | 2374 | ||
| 2375 | /* PCI core control register */ | ||
| 2376 | #define pci_reg_control6_adr 0x1014u | ||
| 2377 | |||
| 2372 | /* global microprocessor scratch pad definitions */ | 2378 | /* global microprocessor scratch pad definitions */ |
| 2373 | #define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) | 2379 | #define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4) |
| 2374 | 2380 | ||
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 1fe016fc4bc7..f2ce12ed4218 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c | |||
| @@ -503,73 +503,43 @@ int hw_atl_utils_update_stats(struct aq_hw_s *self) | |||
| 503 | struct hw_atl_s *hw_self = PHAL_ATLANTIC; | 503 | struct hw_atl_s *hw_self = PHAL_ATLANTIC; |
| 504 | struct hw_aq_atl_utils_mbox mbox; | 504 | struct hw_aq_atl_utils_mbox mbox; |
| 505 | 505 | ||
| 506 | if (!self->aq_link_status.mbps) | ||
| 507 | return 0; | ||
| 508 | |||
| 509 | hw_atl_utils_mpi_read_stats(self, &mbox); | 506 | hw_atl_utils_mpi_read_stats(self, &mbox); |
| 510 | 507 | ||
| 511 | #define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \ | 508 | #define AQ_SDELTA(_N_) (hw_self->curr_stats._N_ += \ |
| 512 | mbox.stats._N_ - hw_self->last_stats._N_) | 509 | mbox.stats._N_ - hw_self->last_stats._N_) |
| 513 | 510 | if (self->aq_link_status.mbps) { | |
| 514 | AQ_SDELTA(uprc); | 511 | AQ_SDELTA(uprc); |
| 515 | AQ_SDELTA(mprc); | 512 | AQ_SDELTA(mprc); |
| 516 | AQ_SDELTA(bprc); | 513 | AQ_SDELTA(bprc); |
| 517 | AQ_SDELTA(erpt); | 514 | AQ_SDELTA(erpt); |
| 518 | 515 | ||
| 519 | AQ_SDELTA(uptc); | 516 | AQ_SDELTA(uptc); |
| 520 | AQ_SDELTA(mptc); | 517 | AQ_SDELTA(mptc); |
| 521 | AQ_SDELTA(bptc); | 518 | AQ_SDELTA(bptc); |
| 522 | AQ_SDELTA(erpr); | 519 | AQ_SDELTA(erpr); |
| 523 | 520 | ||
| 524 | AQ_SDELTA(ubrc); | 521 | AQ_SDELTA(ubrc); |
| 525 | AQ_SDELTA(ubtc); | 522 | AQ_SDELTA(ubtc); |
| 526 | AQ_SDELTA(mbrc); | 523 | AQ_SDELTA(mbrc); |
| 527 | AQ_SDELTA(mbtc); | 524 | AQ_SDELTA(mbtc); |
| 528 | AQ_SDELTA(bbrc); | 525 | AQ_SDELTA(bbrc); |
| 529 | AQ_SDELTA(bbtc); | 526 | AQ_SDELTA(bbtc); |
| 530 | AQ_SDELTA(dpc); | 527 | AQ_SDELTA(dpc); |
| 531 | 528 | } | |
| 532 | #undef AQ_SDELTA | 529 | #undef AQ_SDELTA |
| 530 | hw_self->curr_stats.dma_pkt_rc = stats_rx_dma_good_pkt_counterlsw_get(self); | ||
| 531 | hw_self->curr_stats.dma_pkt_tc = stats_tx_dma_good_pkt_counterlsw_get(self); | ||
| 532 | hw_self->curr_stats.dma_oct_rc = stats_rx_dma_good_octet_counterlsw_get(self); | ||
| 533 | hw_self->curr_stats.dma_oct_tc = stats_tx_dma_good_octet_counterlsw_get(self); | ||
| 533 | 534 | ||
| 534 | memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats)); | 535 | memcpy(&hw_self->last_stats, &mbox.stats, sizeof(mbox.stats)); |
| 535 | 536 | ||
| 536 | return 0; | 537 | return 0; |
| 537 | } | 538 | } |
| 538 | 539 | ||
| 539 | int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, | 540 | struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self) |
| 540 | u64 *data, unsigned int *p_count) | ||
| 541 | { | 541 | { |
| 542 | struct hw_atl_s *hw_self = PHAL_ATLANTIC; | 542 | return &PHAL_ATLANTIC->curr_stats; |
| 543 | struct hw_atl_stats_s *stats = &hw_self->curr_stats; | ||
| 544 | int i = 0; | ||
| 545 | |||
| 546 | data[i] = stats->uprc + stats->mprc + stats->bprc; | ||
| 547 | data[++i] = stats->uprc; | ||
| 548 | data[++i] = stats->mprc; | ||
| 549 | data[++i] = stats->bprc; | ||
| 550 | data[++i] = stats->erpt; | ||
| 551 | data[++i] = stats->uptc + stats->mptc + stats->bptc; | ||
| 552 | data[++i] = stats->uptc; | ||
| 553 | data[++i] = stats->mptc; | ||
| 554 | data[++i] = stats->bptc; | ||
| 555 | data[++i] = stats->ubrc; | ||
| 556 | data[++i] = stats->ubtc; | ||
| 557 | data[++i] = stats->mbrc; | ||
| 558 | data[++i] = stats->mbtc; | ||
| 559 | data[++i] = stats->bbrc; | ||
| 560 | data[++i] = stats->bbtc; | ||
| 561 | data[++i] = stats->ubrc + stats->mbrc + stats->bbrc; | ||
| 562 | data[++i] = stats->ubtc + stats->mbtc + stats->bbtc; | ||
| 563 | data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self); | ||
| 564 | data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self); | ||
| 565 | data[++i] = stats_rx_dma_good_octet_counterlsw_get(self); | ||
| 566 | data[++i] = stats_tx_dma_good_octet_counterlsw_get(self); | ||
| 567 | data[++i] = stats->dpc; | ||
| 568 | |||
| 569 | if (p_count) | ||
| 570 | *p_count = ++i; | ||
| 571 | |||
| 572 | return 0; | ||
| 573 | } | 543 | } |
| 574 | 544 | ||
| 575 | static const u32 hw_atl_utils_hw_mac_regs[] = { | 545 | static const u32 hw_atl_utils_hw_mac_regs[] = { |
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index c99cc690e425..21aeca6908d3 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h | |||
| @@ -129,7 +129,7 @@ struct __packed hw_aq_atl_utils_mbox { | |||
| 129 | struct __packed hw_atl_s { | 129 | struct __packed hw_atl_s { |
| 130 | struct aq_hw_s base; | 130 | struct aq_hw_s base; |
| 131 | struct hw_atl_stats_s last_stats; | 131 | struct hw_atl_stats_s last_stats; |
| 132 | struct hw_atl_stats_s curr_stats; | 132 | struct aq_stats_s curr_stats; |
| 133 | u64 speed; | 133 | u64 speed; |
| 134 | unsigned int chip_features; | 134 | unsigned int chip_features; |
| 135 | u32 fw_ver_actual; | 135 | u32 fw_ver_actual; |
| @@ -207,8 +207,6 @@ int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version); | |||
| 207 | 207 | ||
| 208 | int hw_atl_utils_update_stats(struct aq_hw_s *self); | 208 | int hw_atl_utils_update_stats(struct aq_hw_s *self); |
| 209 | 209 | ||
| 210 | int hw_atl_utils_get_hw_stats(struct aq_hw_s *self, | 210 | struct aq_stats_s *hw_atl_utils_get_hw_stats(struct aq_hw_s *self); |
| 211 | u64 *data, | ||
| 212 | unsigned int *p_count); | ||
| 213 | 211 | ||
| 214 | #endif /* HW_ATL_UTILS_H */ | 212 | #endif /* HW_ATL_UTILS_H */ |
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h index 0de858d215c2..9009f2651e70 100644 --- a/drivers/net/ethernet/aquantia/atlantic/ver.h +++ b/drivers/net/ethernet/aquantia/atlantic/ver.h | |||
| @@ -11,8 +11,10 @@ | |||
| 11 | #define VER_H | 11 | #define VER_H |
| 12 | 12 | ||
| 13 | #define NIC_MAJOR_DRIVER_VERSION 1 | 13 | #define NIC_MAJOR_DRIVER_VERSION 1 |
| 14 | #define NIC_MINOR_DRIVER_VERSION 5 | 14 | #define NIC_MINOR_DRIVER_VERSION 6 |
| 15 | #define NIC_BUILD_DRIVER_VERSION 345 | 15 | #define NIC_BUILD_DRIVER_VERSION 13 |
| 16 | #define NIC_REVISION_DRIVER_VERSION 0 | 16 | #define NIC_REVISION_DRIVER_VERSION 0 |
| 17 | 17 | ||
| 18 | #define AQ_CFG_DRV_VERSION_SUFFIX "-kern" | ||
| 19 | |||
| 18 | #endif /* VER_H */ | 20 | #endif /* VER_H */ |
diff --git a/drivers/net/ethernet/arc/emac_rockchip.c b/drivers/net/ethernet/arc/emac_rockchip.c index c6163874e4e7..16f9bee992fe 100644 --- a/drivers/net/ethernet/arc/emac_rockchip.c +++ b/drivers/net/ethernet/arc/emac_rockchip.c | |||
| @@ -199,9 +199,11 @@ static int emac_rockchip_probe(struct platform_device *pdev) | |||
| 199 | 199 | ||
| 200 | /* RMII interface needs always a rate of 50MHz */ | 200 | /* RMII interface needs always a rate of 50MHz */ |
| 201 | err = clk_set_rate(priv->refclk, 50000000); | 201 | err = clk_set_rate(priv->refclk, 50000000); |
| 202 | if (err) | 202 | if (err) { |
| 203 | dev_err(dev, | 203 | dev_err(dev, |
| 204 | "failed to change reference clock rate (%d)\n", err); | 204 | "failed to change reference clock rate (%d)\n", err); |
| 205 | goto out_regulator_disable; | ||
| 206 | } | ||
| 205 | 207 | ||
| 206 | if (priv->soc_data->need_div_macclk) { | 208 | if (priv->soc_data->need_div_macclk) { |
| 207 | priv->macclk = devm_clk_get(dev, "macclk"); | 209 | priv->macclk = devm_clk_get(dev, "macclk"); |
| @@ -230,12 +232,14 @@ static int emac_rockchip_probe(struct platform_device *pdev) | |||
| 230 | err = arc_emac_probe(ndev, interface); | 232 | err = arc_emac_probe(ndev, interface); |
| 231 | if (err) { | 233 | if (err) { |
| 232 | dev_err(dev, "failed to probe arc emac (%d)\n", err); | 234 | dev_err(dev, "failed to probe arc emac (%d)\n", err); |
| 233 | goto out_regulator_disable; | 235 | goto out_clk_disable_macclk; |
| 234 | } | 236 | } |
| 235 | 237 | ||
| 236 | return 0; | 238 | return 0; |
| 239 | |||
| 237 | out_clk_disable_macclk: | 240 | out_clk_disable_macclk: |
| 238 | clk_disable_unprepare(priv->macclk); | 241 | if (priv->soc_data->need_div_macclk) |
| 242 | clk_disable_unprepare(priv->macclk); | ||
| 239 | out_regulator_disable: | 243 | out_regulator_disable: |
| 240 | if (priv->regulator) | 244 | if (priv->regulator) |
| 241 | regulator_disable(priv->regulator); | 245 | regulator_disable(priv->regulator); |
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 6e423f098a60..31efc47c847e 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c | |||
| @@ -4081,7 +4081,6 @@ static void skge_remove(struct pci_dev *pdev) | |||
| 4081 | if (hw->ports > 1) { | 4081 | if (hw->ports > 1) { |
| 4082 | skge_write32(hw, B0_IMSK, 0); | 4082 | skge_write32(hw, B0_IMSK, 0); |
| 4083 | skge_read32(hw, B0_IMSK); | 4083 | skge_read32(hw, B0_IMSK); |
| 4084 | free_irq(pdev->irq, hw); | ||
| 4085 | } | 4084 | } |
| 4086 | spin_unlock_irq(&hw->hw_lock); | 4085 | spin_unlock_irq(&hw->hw_lock); |
| 4087 | 4086 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c index e0eb695318e6..1fa4849a6f56 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_port.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c | |||
| @@ -188,7 +188,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 188 | struct net_device *dev = mdev->pndev[port]; | 188 | struct net_device *dev = mdev->pndev[port]; |
| 189 | struct mlx4_en_priv *priv = netdev_priv(dev); | 189 | struct mlx4_en_priv *priv = netdev_priv(dev); |
| 190 | struct net_device_stats *stats = &dev->stats; | 190 | struct net_device_stats *stats = &dev->stats; |
| 191 | struct mlx4_cmd_mailbox *mailbox; | 191 | struct mlx4_cmd_mailbox *mailbox, *mailbox_priority; |
| 192 | u64 in_mod = reset << 8 | port; | 192 | u64 in_mod = reset << 8 | port; |
| 193 | int err; | 193 | int err; |
| 194 | int i, counter_index; | 194 | int i, counter_index; |
| @@ -198,6 +198,13 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 198 | mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); | 198 | mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); |
| 199 | if (IS_ERR(mailbox)) | 199 | if (IS_ERR(mailbox)) |
| 200 | return PTR_ERR(mailbox); | 200 | return PTR_ERR(mailbox); |
| 201 | |||
| 202 | mailbox_priority = mlx4_alloc_cmd_mailbox(mdev->dev); | ||
| 203 | if (IS_ERR(mailbox_priority)) { | ||
| 204 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | ||
| 205 | return PTR_ERR(mailbox_priority); | ||
| 206 | } | ||
| 207 | |||
| 201 | err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, | 208 | err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, in_mod, 0, |
| 202 | MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, | 209 | MLX4_CMD_DUMP_ETH_STATS, MLX4_CMD_TIME_CLASS_B, |
| 203 | MLX4_CMD_NATIVE); | 210 | MLX4_CMD_NATIVE); |
| @@ -206,6 +213,28 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 206 | 213 | ||
| 207 | mlx4_en_stats = mailbox->buf; | 214 | mlx4_en_stats = mailbox->buf; |
| 208 | 215 | ||
| 216 | memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats)); | ||
| 217 | counter_index = mlx4_get_default_counter_index(mdev->dev, port); | ||
| 218 | err = mlx4_get_counter_stats(mdev->dev, counter_index, | ||
| 219 | &tmp_counter_stats, reset); | ||
| 220 | |||
| 221 | /* 0xffs indicates invalid value */ | ||
| 222 | memset(mailbox_priority->buf, 0xff, | ||
| 223 | sizeof(*flowstats) * MLX4_NUM_PRIORITIES); | ||
| 224 | |||
| 225 | if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) { | ||
| 226 | memset(mailbox_priority->buf, 0, | ||
| 227 | sizeof(*flowstats) * MLX4_NUM_PRIORITIES); | ||
| 228 | err = mlx4_cmd_box(mdev->dev, 0, mailbox_priority->dma, | ||
| 229 | in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, | ||
| 230 | 0, MLX4_CMD_DUMP_ETH_STATS, | ||
| 231 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); | ||
| 232 | if (err) | ||
| 233 | goto out; | ||
| 234 | } | ||
| 235 | |||
| 236 | flowstats = mailbox_priority->buf; | ||
| 237 | |||
| 209 | spin_lock_bh(&priv->stats_lock); | 238 | spin_lock_bh(&priv->stats_lock); |
| 210 | 239 | ||
| 211 | mlx4_en_fold_software_stats(dev); | 240 | mlx4_en_fold_software_stats(dev); |
| @@ -345,31 +374,6 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 345 | priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan); | 374 | priv->pkstats.tx_prio[8][0] = be64_to_cpu(mlx4_en_stats->TTOT_novlan); |
| 346 | priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan); | 375 | priv->pkstats.tx_prio[8][1] = be64_to_cpu(mlx4_en_stats->TOCT_novlan); |
| 347 | 376 | ||
| 348 | spin_unlock_bh(&priv->stats_lock); | ||
| 349 | |||
| 350 | memset(&tmp_counter_stats, 0, sizeof(tmp_counter_stats)); | ||
| 351 | counter_index = mlx4_get_default_counter_index(mdev->dev, port); | ||
| 352 | err = mlx4_get_counter_stats(mdev->dev, counter_index, | ||
| 353 | &tmp_counter_stats, reset); | ||
| 354 | |||
| 355 | /* 0xffs indicates invalid value */ | ||
| 356 | memset(mailbox->buf, 0xff, sizeof(*flowstats) * MLX4_NUM_PRIORITIES); | ||
| 357 | |||
| 358 | if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FLOWSTATS_EN) { | ||
| 359 | memset(mailbox->buf, 0, | ||
| 360 | sizeof(*flowstats) * MLX4_NUM_PRIORITIES); | ||
| 361 | err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, | ||
| 362 | in_mod | MLX4_DUMP_ETH_STATS_FLOW_CONTROL, | ||
| 363 | 0, MLX4_CMD_DUMP_ETH_STATS, | ||
| 364 | MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); | ||
| 365 | if (err) | ||
| 366 | goto out; | ||
| 367 | } | ||
| 368 | |||
| 369 | flowstats = mailbox->buf; | ||
| 370 | |||
| 371 | spin_lock_bh(&priv->stats_lock); | ||
| 372 | |||
| 373 | if (tmp_counter_stats.counter_mode == 0) { | 377 | if (tmp_counter_stats.counter_mode == 0) { |
| 374 | priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes); | 378 | priv->pf_stats.rx_bytes = be64_to_cpu(tmp_counter_stats.rx_bytes); |
| 375 | priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes); | 379 | priv->pf_stats.tx_bytes = be64_to_cpu(tmp_counter_stats.tx_bytes); |
| @@ -410,6 +414,7 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset) | |||
| 410 | 414 | ||
| 411 | out: | 415 | out: |
| 412 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); | 416 | mlx4_free_cmd_mailbox(mdev->dev, mailbox); |
| 417 | mlx4_free_cmd_mailbox(mdev->dev, mailbox_priority); | ||
| 413 | return err; | 418 | return err; |
| 414 | } | 419 | } |
| 415 | 420 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 88699b181946..946d9db7c8c2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c | |||
| @@ -185,7 +185,7 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf) | |||
| 185 | if (priv->mdev->dev->caps.flags & | 185 | if (priv->mdev->dev->caps.flags & |
| 186 | MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { | 186 | MLX4_DEV_CAP_FLAG_UC_LOOPBACK) { |
| 187 | buf[3] = mlx4_en_test_registers(priv); | 187 | buf[3] = mlx4_en_test_registers(priv); |
| 188 | if (priv->port_up) | 188 | if (priv->port_up && dev->mtu >= MLX4_SELFTEST_LB_MIN_MTU) |
| 189 | buf[4] = mlx4_en_test_loopback(priv); | 189 | buf[4] = mlx4_en_test_loopback(priv); |
| 190 | } | 190 | } |
| 191 | 191 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 1856e279a7e0..2b72677eccd4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h | |||
| @@ -153,6 +153,9 @@ | |||
| 153 | #define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) | 153 | #define SMALL_PACKET_SIZE (256 - NET_IP_ALIGN) |
| 154 | #define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) | 154 | #define HEADER_COPY_SIZE (128 - NET_IP_ALIGN) |
| 155 | #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN) | 155 | #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN) |
| 156 | #define PREAMBLE_LEN 8 | ||
| 157 | #define MLX4_SELFTEST_LB_MIN_MTU (MLX4_LOOPBACK_TEST_PAYLOAD + NET_IP_ALIGN + \ | ||
| 158 | ETH_HLEN + PREAMBLE_LEN) | ||
| 156 | 159 | ||
| 157 | #define MLX4_EN_MIN_MTU 46 | 160 | #define MLX4_EN_MIN_MTU 46 |
| 158 | /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple | 161 | /* VLAN_HLEN is added twice,to support skb vlan tagged with multiple |
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 04304dd894c6..606a0e0beeae 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c | |||
| @@ -611,7 +611,6 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) | |||
| 611 | MLX4_MAX_PORTS; | 611 | MLX4_MAX_PORTS; |
| 612 | else | 612 | else |
| 613 | res_alloc->guaranteed[t] = 0; | 613 | res_alloc->guaranteed[t] = 0; |
| 614 | res_alloc->res_free -= res_alloc->guaranteed[t]; | ||
| 615 | break; | 614 | break; |
| 616 | default: | 615 | default: |
| 617 | break; | 616 | break; |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 2d0897b7d860..9bd8d28de152 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c | |||
| @@ -4300,6 +4300,7 @@ static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, | |||
| 4300 | 4300 | ||
| 4301 | static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) | 4301 | static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) |
| 4302 | { | 4302 | { |
| 4303 | u16 vid = 1; | ||
| 4303 | int err; | 4304 | int err; |
| 4304 | 4305 | ||
| 4305 | err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); | 4306 | err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true); |
| @@ -4312,8 +4313,19 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) | |||
| 4312 | true, false); | 4313 | true, false); |
| 4313 | if (err) | 4314 | if (err) |
| 4314 | goto err_port_vlan_set; | 4315 | goto err_port_vlan_set; |
| 4316 | |||
| 4317 | for (; vid <= VLAN_N_VID - 1; vid++) { | ||
| 4318 | err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, | ||
| 4319 | vid, false); | ||
| 4320 | if (err) | ||
| 4321 | goto err_vid_learning_set; | ||
| 4322 | } | ||
| 4323 | |||
| 4315 | return 0; | 4324 | return 0; |
| 4316 | 4325 | ||
| 4326 | err_vid_learning_set: | ||
| 4327 | for (vid--; vid >= 1; vid--) | ||
| 4328 | mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); | ||
| 4317 | err_port_vlan_set: | 4329 | err_port_vlan_set: |
| 4318 | mlxsw_sp_port_stp_set(mlxsw_sp_port, false); | 4330 | mlxsw_sp_port_stp_set(mlxsw_sp_port, false); |
| 4319 | err_port_stp_set: | 4331 | err_port_stp_set: |
| @@ -4323,6 +4335,12 @@ err_port_stp_set: | |||
| 4323 | 4335 | ||
| 4324 | static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) | 4336 | static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) |
| 4325 | { | 4337 | { |
| 4338 | u16 vid; | ||
| 4339 | |||
| 4340 | for (vid = VLAN_N_VID - 1; vid >= 1; vid--) | ||
| 4341 | mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, | ||
| 4342 | vid, true); | ||
| 4343 | |||
| 4326 | mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, | 4344 | mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, |
| 4327 | false, false); | 4345 | false, false); |
| 4328 | mlxsw_sp_port_stp_set(mlxsw_sp_port, false); | 4346 | mlxsw_sp_port_stp_set(mlxsw_sp_port, false); |
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c index 18461fcb9815..53dbf1e163a8 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c +++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c | |||
| @@ -47,6 +47,7 @@ | |||
| 47 | #define MDIO_CLK_25_28 7 | 47 | #define MDIO_CLK_25_28 7 |
| 48 | 48 | ||
| 49 | #define MDIO_WAIT_TIMES 1000 | 49 | #define MDIO_WAIT_TIMES 1000 |
| 50 | #define MDIO_STATUS_DELAY_TIME 1 | ||
| 50 | 51 | ||
| 51 | static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) | 52 | static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) |
| 52 | { | 53 | { |
| @@ -65,7 +66,7 @@ static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) | |||
| 65 | 66 | ||
| 66 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, | 67 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, |
| 67 | !(reg & (MDIO_START | MDIO_BUSY)), | 68 | !(reg & (MDIO_START | MDIO_BUSY)), |
| 68 | 100, MDIO_WAIT_TIMES * 100)) | 69 | MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100)) |
| 69 | return -EIO; | 70 | return -EIO; |
| 70 | 71 | ||
| 71 | return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; | 72 | return (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; |
| @@ -88,8 +89,8 @@ static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) | |||
| 88 | writel(reg, adpt->base + EMAC_MDIO_CTRL); | 89 | writel(reg, adpt->base + EMAC_MDIO_CTRL); |
| 89 | 90 | ||
| 90 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, | 91 | if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, |
| 91 | !(reg & (MDIO_START | MDIO_BUSY)), 100, | 92 | !(reg & (MDIO_START | MDIO_BUSY)), |
| 92 | MDIO_WAIT_TIMES * 100)) | 93 | MDIO_STATUS_DELAY_TIME, MDIO_WAIT_TIMES * 100)) |
| 93 | return -EIO; | 94 | return -EIO; |
| 94 | 95 | ||
| 95 | return 0; | 96 | return 0; |
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 2b962d349f5f..009780df664b 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c | |||
| @@ -2308,32 +2308,9 @@ static int __maybe_unused ravb_resume(struct device *dev) | |||
| 2308 | struct ravb_private *priv = netdev_priv(ndev); | 2308 | struct ravb_private *priv = netdev_priv(ndev); |
| 2309 | int ret = 0; | 2309 | int ret = 0; |
| 2310 | 2310 | ||
| 2311 | if (priv->wol_enabled) { | 2311 | /* If WoL is enabled set reset mode to rearm the WoL logic */ |
| 2312 | /* Reduce the usecount of the clock to zero and then | 2312 | if (priv->wol_enabled) |
| 2313 | * restore it to its original value. This is done to force | ||
| 2314 | * the clock to be re-enabled which is a workaround | ||
| 2315 | * for renesas-cpg-mssr driver which do not enable clocks | ||
| 2316 | * when resuming from PSCI suspend/resume. | ||
| 2317 | * | ||
| 2318 | * Without this workaround the driver fails to communicate | ||
| 2319 | * with the hardware if WoL was enabled when the system | ||
| 2320 | * entered PSCI suspend. This is due to that if WoL is enabled | ||
| 2321 | * we explicitly keep the clock from being turned off when | ||
| 2322 | * suspending, but in PSCI sleep power is cut so the clock | ||
| 2323 | * is disabled anyhow, the clock driver is not aware of this | ||
| 2324 | * so the clock is not turned back on when resuming. | ||
| 2325 | * | ||
| 2326 | * TODO: once the renesas-cpg-mssr suspend/resume is working | ||
| 2327 | * this clock dance should be removed. | ||
| 2328 | */ | ||
| 2329 | clk_disable(priv->clk); | ||
| 2330 | clk_disable(priv->clk); | ||
| 2331 | clk_enable(priv->clk); | ||
| 2332 | clk_enable(priv->clk); | ||
| 2333 | |||
| 2334 | /* Set reset mode to rearm the WoL logic */ | ||
| 2335 | ravb_write(ndev, CCC_OPC_RESET, CCC); | 2313 | ravb_write(ndev, CCC_OPC_RESET, CCC); |
| 2336 | } | ||
| 2337 | 2314 | ||
| 2338 | /* All register have been reset to default values. | 2315 | /* All register have been reset to default values. |
| 2339 | * Restore all registers which where setup at probe time and | 2316 | * Restore all registers which where setup at probe time and |
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index db72d13cebb9..75323000c364 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c | |||
| @@ -1892,6 +1892,16 @@ static int sh_eth_phy_init(struct net_device *ndev) | |||
| 1892 | return PTR_ERR(phydev); | 1892 | return PTR_ERR(phydev); |
| 1893 | } | 1893 | } |
| 1894 | 1894 | ||
| 1895 | /* mask with MAC supported features */ | ||
| 1896 | if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) { | ||
| 1897 | int err = phy_set_max_speed(phydev, SPEED_100); | ||
| 1898 | if (err) { | ||
| 1899 | netdev_err(ndev, "failed to limit PHY to 100 Mbit/s\n"); | ||
| 1900 | phy_disconnect(phydev); | ||
| 1901 | return err; | ||
| 1902 | } | ||
| 1903 | } | ||
| 1904 | |||
| 1895 | phy_attached_info(phydev); | 1905 | phy_attached_info(phydev); |
| 1896 | 1906 | ||
| 1897 | return 0; | 1907 | return 0; |
diff --git a/drivers/net/hippi/rrunner.c b/drivers/net/hippi/rrunner.c index 8483f03d5a41..1ab97d99b9ba 100644 --- a/drivers/net/hippi/rrunner.c +++ b/drivers/net/hippi/rrunner.c | |||
| @@ -1379,8 +1379,8 @@ static int rr_close(struct net_device *dev) | |||
| 1379 | rrpriv->info_dma); | 1379 | rrpriv->info_dma); |
| 1380 | rrpriv->info = NULL; | 1380 | rrpriv->info = NULL; |
| 1381 | 1381 | ||
| 1382 | free_irq(pdev->irq, dev); | ||
| 1383 | spin_unlock_irqrestore(&rrpriv->lock, flags); | 1382 | spin_unlock_irqrestore(&rrpriv->lock, flags); |
| 1383 | free_irq(pdev->irq, dev); | ||
| 1384 | 1384 | ||
| 1385 | return 0; | 1385 | return 0; |
| 1386 | } | 1386 | } |
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 5f93e6add563..e911e4990b20 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c | |||
| @@ -239,14 +239,10 @@ static int at803x_resume(struct phy_device *phydev) | |||
| 239 | { | 239 | { |
| 240 | int value; | 240 | int value; |
| 241 | 241 | ||
| 242 | mutex_lock(&phydev->lock); | ||
| 243 | |||
| 244 | value = phy_read(phydev, MII_BMCR); | 242 | value = phy_read(phydev, MII_BMCR); |
| 245 | value &= ~(BMCR_PDOWN | BMCR_ISOLATE); | 243 | value &= ~(BMCR_PDOWN | BMCR_ISOLATE); |
| 246 | phy_write(phydev, MII_BMCR, value); | 244 | phy_write(phydev, MII_BMCR, value); |
| 247 | 245 | ||
| 248 | mutex_unlock(&phydev->lock); | ||
| 249 | |||
| 250 | return 0; | 246 | return 0; |
| 251 | } | 247 | } |
| 252 | 248 | ||
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 4d02b27df044..b5a8f750e433 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c | |||
| @@ -637,6 +637,10 @@ static int m88e1510_config_aneg(struct phy_device *phydev) | |||
| 637 | if (err < 0) | 637 | if (err < 0) |
| 638 | goto error; | 638 | goto error; |
| 639 | 639 | ||
| 640 | /* Do not touch the fiber page if we're in copper->sgmii mode */ | ||
| 641 | if (phydev->interface == PHY_INTERFACE_MODE_SGMII) | ||
| 642 | return 0; | ||
| 643 | |||
| 640 | /* Then the fiber link */ | 644 | /* Then the fiber link */ |
| 641 | err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); | 645 | err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); |
| 642 | if (err < 0) | 646 | if (err < 0) |
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 2df7b62c1a36..54d00a1d2bef 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c | |||
| @@ -270,6 +270,7 @@ static void of_mdiobus_link_mdiodev(struct mii_bus *bus, | |||
| 270 | 270 | ||
| 271 | if (addr == mdiodev->addr) { | 271 | if (addr == mdiodev->addr) { |
| 272 | dev->of_node = child; | 272 | dev->of_node = child; |
| 273 | dev->fwnode = of_fwnode_handle(child); | ||
| 273 | return; | 274 | return; |
| 274 | } | 275 | } |
| 275 | } | 276 | } |
diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index 1ea69b7585d9..842eb871a6e3 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/ethtool.h> | 22 | #include <linux/ethtool.h> |
| 23 | #include <linux/phy.h> | 23 | #include <linux/phy.h> |
| 24 | #include <linux/netdevice.h> | 24 | #include <linux/netdevice.h> |
| 25 | #include <linux/bitfield.h> | ||
| 25 | 26 | ||
| 26 | static int meson_gxl_config_init(struct phy_device *phydev) | 27 | static int meson_gxl_config_init(struct phy_device *phydev) |
| 27 | { | 28 | { |
| @@ -50,6 +51,77 @@ static int meson_gxl_config_init(struct phy_device *phydev) | |||
| 50 | return 0; | 51 | return 0; |
| 51 | } | 52 | } |
| 52 | 53 | ||
| 54 | /* This function is provided to cope with the possible failures of this phy | ||
| 55 | * during aneg process. When aneg fails, the PHY reports that aneg is done | ||
| 56 | * but the value found in MII_LPA is wrong: | ||
| 57 | * - Early failures: MII_LPA is just 0x0001. if MII_EXPANSION reports that | ||
| 58 | * the link partner (LP) supports aneg but the LP never acked our base | ||
| 59 | * code word, it is likely that we never sent it to begin with. | ||
| 60 | * - Late failures: MII_LPA is filled with a value which seems to make sense | ||
| 61 | * but it actually is not what the LP is advertising. It seems that we | ||
| 62 | * can detect this using a magic bit in the WOL bank (reg 12 - bit 12). | ||
| 63 | * If this particular bit is not set when aneg is reported being done, | ||
| 64 | * it means MII_LPA is likely to be wrong. | ||
| 65 | * | ||
| 66 | * In both case, forcing a restart of the aneg process solve the problem. | ||
| 67 | * When this failure happens, the first retry is usually successful but, | ||
| 68 | * in some cases, it may take up to 6 retries to get a decent result | ||
| 69 | */ | ||
| 70 | static int meson_gxl_read_status(struct phy_device *phydev) | ||
| 71 | { | ||
| 72 | int ret, wol, lpa, exp; | ||
| 73 | |||
| 74 | if (phydev->autoneg == AUTONEG_ENABLE) { | ||
| 75 | ret = genphy_aneg_done(phydev); | ||
| 76 | if (ret < 0) | ||
| 77 | return ret; | ||
| 78 | else if (!ret) | ||
| 79 | goto read_status_continue; | ||
| 80 | |||
| 81 | /* Need to access WOL bank, make sure the access is open */ | ||
| 82 | ret = phy_write(phydev, 0x14, 0x0000); | ||
| 83 | if (ret) | ||
| 84 | return ret; | ||
| 85 | ret = phy_write(phydev, 0x14, 0x0400); | ||
| 86 | if (ret) | ||
| 87 | return ret; | ||
| 88 | ret = phy_write(phydev, 0x14, 0x0000); | ||
| 89 | if (ret) | ||
| 90 | return ret; | ||
| 91 | ret = phy_write(phydev, 0x14, 0x0400); | ||
| 92 | if (ret) | ||
| 93 | return ret; | ||
| 94 | |||
| 95 | /* Request LPI_STATUS WOL register */ | ||
| 96 | ret = phy_write(phydev, 0x14, 0x8D80); | ||
| 97 | if (ret) | ||
| 98 | return ret; | ||
| 99 | |||
| 100 | /* Read LPI_STATUS value */ | ||
| 101 | wol = phy_read(phydev, 0x15); | ||
| 102 | if (wol < 0) | ||
| 103 | return wol; | ||
| 104 | |||
| 105 | lpa = phy_read(phydev, MII_LPA); | ||
| 106 | if (lpa < 0) | ||
| 107 | return lpa; | ||
| 108 | |||
| 109 | exp = phy_read(phydev, MII_EXPANSION); | ||
| 110 | if (exp < 0) | ||
| 111 | return exp; | ||
| 112 | |||
| 113 | if (!(wol & BIT(12)) || | ||
| 114 | ((exp & EXPANSION_NWAY) && !(lpa & LPA_LPACK))) { | ||
| 115 | /* Looks like aneg failed after all */ | ||
| 116 | phydev_dbg(phydev, "LPA corruption - aneg restart\n"); | ||
| 117 | return genphy_restart_aneg(phydev); | ||
| 118 | } | ||
| 119 | } | ||
| 120 | |||
| 121 | read_status_continue: | ||
| 122 | return genphy_read_status(phydev); | ||
| 123 | } | ||
| 124 | |||
| 53 | static struct phy_driver meson_gxl_phy[] = { | 125 | static struct phy_driver meson_gxl_phy[] = { |
| 54 | { | 126 | { |
| 55 | .phy_id = 0x01814400, | 127 | .phy_id = 0x01814400, |
| @@ -60,7 +132,7 @@ static struct phy_driver meson_gxl_phy[] = { | |||
| 60 | .config_init = meson_gxl_config_init, | 132 | .config_init = meson_gxl_config_init, |
| 61 | .config_aneg = genphy_config_aneg, | 133 | .config_aneg = genphy_config_aneg, |
| 62 | .aneg_done = genphy_aneg_done, | 134 | .aneg_done = genphy_aneg_done, |
| 63 | .read_status = genphy_read_status, | 135 | .read_status = meson_gxl_read_status, |
| 64 | .suspend = genphy_suspend, | 136 | .suspend = genphy_suspend, |
| 65 | .resume = genphy_resume, | 137 | .resume = genphy_resume, |
| 66 | }, | 138 | }, |
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 2b1e67bc1e73..ed10d1fc8f59 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c | |||
| @@ -828,7 +828,6 @@ EXPORT_SYMBOL(phy_stop); | |||
| 828 | */ | 828 | */ |
| 829 | void phy_start(struct phy_device *phydev) | 829 | void phy_start(struct phy_device *phydev) |
| 830 | { | 830 | { |
| 831 | bool do_resume = false; | ||
| 832 | int err = 0; | 831 | int err = 0; |
| 833 | 832 | ||
| 834 | mutex_lock(&phydev->lock); | 833 | mutex_lock(&phydev->lock); |
| @@ -841,6 +840,9 @@ void phy_start(struct phy_device *phydev) | |||
| 841 | phydev->state = PHY_UP; | 840 | phydev->state = PHY_UP; |
| 842 | break; | 841 | break; |
| 843 | case PHY_HALTED: | 842 | case PHY_HALTED: |
| 843 | /* if phy was suspended, bring the physical link up again */ | ||
| 844 | phy_resume(phydev); | ||
| 845 | |||
| 844 | /* make sure interrupts are re-enabled for the PHY */ | 846 | /* make sure interrupts are re-enabled for the PHY */ |
| 845 | if (phydev->irq != PHY_POLL) { | 847 | if (phydev->irq != PHY_POLL) { |
| 846 | err = phy_enable_interrupts(phydev); | 848 | err = phy_enable_interrupts(phydev); |
| @@ -849,17 +851,12 @@ void phy_start(struct phy_device *phydev) | |||
| 849 | } | 851 | } |
| 850 | 852 | ||
| 851 | phydev->state = PHY_RESUMING; | 853 | phydev->state = PHY_RESUMING; |
| 852 | do_resume = true; | ||
| 853 | break; | 854 | break; |
| 854 | default: | 855 | default: |
| 855 | break; | 856 | break; |
| 856 | } | 857 | } |
| 857 | mutex_unlock(&phydev->lock); | 858 | mutex_unlock(&phydev->lock); |
| 858 | 859 | ||
| 859 | /* if phy was suspended, bring the physical link up again */ | ||
| 860 | if (do_resume) | ||
| 861 | phy_resume(phydev); | ||
| 862 | |||
| 863 | phy_trigger_machine(phydev, true); | 860 | phy_trigger_machine(phydev, true); |
| 864 | } | 861 | } |
| 865 | EXPORT_SYMBOL(phy_start); | 862 | EXPORT_SYMBOL(phy_start); |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 67f25ac29025..b15b31ca2618 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -135,7 +135,9 @@ static int mdio_bus_phy_resume(struct device *dev) | |||
| 135 | if (!mdio_bus_phy_may_suspend(phydev)) | 135 | if (!mdio_bus_phy_may_suspend(phydev)) |
| 136 | goto no_resume; | 136 | goto no_resume; |
| 137 | 137 | ||
| 138 | mutex_lock(&phydev->lock); | ||
| 138 | ret = phy_resume(phydev); | 139 | ret = phy_resume(phydev); |
| 140 | mutex_unlock(&phydev->lock); | ||
| 139 | if (ret < 0) | 141 | if (ret < 0) |
| 140 | return ret; | 142 | return ret; |
| 141 | 143 | ||
| @@ -1026,7 +1028,9 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, | |||
| 1026 | if (err) | 1028 | if (err) |
| 1027 | goto error; | 1029 | goto error; |
| 1028 | 1030 | ||
| 1031 | mutex_lock(&phydev->lock); | ||
| 1029 | phy_resume(phydev); | 1032 | phy_resume(phydev); |
| 1033 | mutex_unlock(&phydev->lock); | ||
| 1030 | phy_led_triggers_register(phydev); | 1034 | phy_led_triggers_register(phydev); |
| 1031 | 1035 | ||
| 1032 | return err; | 1036 | return err; |
| @@ -1157,6 +1161,8 @@ int phy_resume(struct phy_device *phydev) | |||
| 1157 | struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); | 1161 | struct phy_driver *phydrv = to_phy_driver(phydev->mdio.dev.driver); |
| 1158 | int ret = 0; | 1162 | int ret = 0; |
| 1159 | 1163 | ||
| 1164 | WARN_ON(!mutex_is_locked(&phydev->lock)); | ||
| 1165 | |||
| 1160 | if (phydev->drv && phydrv->resume) | 1166 | if (phydev->drv && phydrv->resume) |
| 1161 | ret = phydrv->resume(phydev); | 1167 | ret = phydrv->resume(phydev); |
| 1162 | 1168 | ||
| @@ -1639,13 +1645,9 @@ int genphy_resume(struct phy_device *phydev) | |||
| 1639 | { | 1645 | { |
| 1640 | int value; | 1646 | int value; |
| 1641 | 1647 | ||
| 1642 | mutex_lock(&phydev->lock); | ||
| 1643 | |||
| 1644 | value = phy_read(phydev, MII_BMCR); | 1648 | value = phy_read(phydev, MII_BMCR); |
| 1645 | phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); | 1649 | phy_write(phydev, MII_BMCR, value & ~BMCR_PDOWN); |
| 1646 | 1650 | ||
| 1647 | mutex_unlock(&phydev->lock); | ||
| 1648 | |||
| 1649 | return 0; | 1651 | return 0; |
| 1650 | } | 1652 | } |
| 1651 | EXPORT_SYMBOL(genphy_resume); | 1653 | EXPORT_SYMBOL(genphy_resume); |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 304ec6555cd8..3000ddd1c7e2 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
| @@ -1204,12 +1204,14 @@ static const struct usb_device_id products[] = { | |||
| 1204 | {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ | 1204 | {QMI_FIXED_INTF(0x1199, 0x9079, 10)}, /* Sierra Wireless EM74xx */ |
| 1205 | {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ | 1205 | {QMI_FIXED_INTF(0x1199, 0x907b, 8)}, /* Sierra Wireless EM74xx */ |
| 1206 | {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ | 1206 | {QMI_FIXED_INTF(0x1199, 0x907b, 10)}, /* Sierra Wireless EM74xx */ |
| 1207 | {QMI_FIXED_INTF(0x1199, 0x9091, 8)}, /* Sierra Wireless EM7565 */ | ||
| 1207 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ | 1208 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ |
| 1208 | {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ | 1209 | {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ |
| 1209 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ | 1210 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ |
| 1210 | {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ | 1211 | {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ |
| 1211 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ | 1212 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ |
| 1212 | {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ | 1213 | {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */ |
| 1214 | {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ | ||
| 1213 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ | 1215 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ |
| 1214 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ | 1216 | {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ |
| 1215 | {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ | 1217 | {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ |
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c index 98258583abb0..3481e69738b5 100644 --- a/drivers/of/of_mdio.c +++ b/drivers/of/of_mdio.c | |||
| @@ -81,6 +81,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, | |||
| 81 | * can be looked up later */ | 81 | * can be looked up later */ |
| 82 | of_node_get(child); | 82 | of_node_get(child); |
| 83 | phy->mdio.dev.of_node = child; | 83 | phy->mdio.dev.of_node = child; |
| 84 | phy->mdio.dev.fwnode = of_fwnode_handle(child); | ||
| 84 | 85 | ||
| 85 | /* All data is now stored in the phy struct; | 86 | /* All data is now stored in the phy struct; |
| 86 | * register it */ | 87 | * register it */ |
| @@ -111,6 +112,7 @@ static int of_mdiobus_register_device(struct mii_bus *mdio, | |||
| 111 | */ | 112 | */ |
| 112 | of_node_get(child); | 113 | of_node_get(child); |
| 113 | mdiodev->dev.of_node = child; | 114 | mdiodev->dev.of_node = child; |
| 115 | mdiodev->dev.fwnode = of_fwnode_handle(child); | ||
| 114 | 116 | ||
| 115 | /* All data is now stored in the mdiodev struct; register it. */ | 117 | /* All data is now stored in the mdiodev struct; register it. */ |
| 116 | rc = mdio_device_register(mdiodev); | 118 | rc = mdio_device_register(mdiodev); |
| @@ -206,6 +208,7 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) | |||
| 206 | mdio->phy_mask = ~0; | 208 | mdio->phy_mask = ~0; |
| 207 | 209 | ||
| 208 | mdio->dev.of_node = np; | 210 | mdio->dev.of_node = np; |
| 211 | mdio->dev.fwnode = of_fwnode_handle(np); | ||
| 209 | 212 | ||
| 210 | /* Get bus level PHY reset GPIO details */ | 213 | /* Get bus level PHY reset GPIO details */ |
| 211 | mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY; | 214 | mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY; |
diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c index 12796eccb2be..52ab3cb0a0bf 100644 --- a/drivers/pci/host/pcie-rcar.c +++ b/drivers/pci/host/pcie-rcar.c | |||
| @@ -1128,12 +1128,12 @@ static int rcar_pcie_probe(struct platform_device *pdev) | |||
| 1128 | err = rcar_pcie_get_resources(pcie); | 1128 | err = rcar_pcie_get_resources(pcie); |
| 1129 | if (err < 0) { | 1129 | if (err < 0) { |
| 1130 | dev_err(dev, "failed to request resources: %d\n", err); | 1130 | dev_err(dev, "failed to request resources: %d\n", err); |
| 1131 | goto err_free_bridge; | 1131 | goto err_free_resource_list; |
| 1132 | } | 1132 | } |
| 1133 | 1133 | ||
| 1134 | err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); | 1134 | err = rcar_pcie_parse_map_dma_ranges(pcie, dev->of_node); |
| 1135 | if (err) | 1135 | if (err) |
| 1136 | goto err_free_bridge; | 1136 | goto err_free_resource_list; |
| 1137 | 1137 | ||
| 1138 | pm_runtime_enable(dev); | 1138 | pm_runtime_enable(dev); |
| 1139 | err = pm_runtime_get_sync(dev); | 1139 | err = pm_runtime_get_sync(dev); |
| @@ -1176,9 +1176,9 @@ err_pm_put: | |||
| 1176 | err_pm_disable: | 1176 | err_pm_disable: |
| 1177 | pm_runtime_disable(dev); | 1177 | pm_runtime_disable(dev); |
| 1178 | 1178 | ||
| 1179 | err_free_bridge: | 1179 | err_free_resource_list: |
| 1180 | pci_free_host_bridge(bridge); | ||
| 1181 | pci_free_resource_list(&pcie->resources); | 1180 | pci_free_resource_list(&pcie->resources); |
| 1181 | pci_free_host_bridge(bridge); | ||
| 1182 | 1182 | ||
| 1183 | return err; | 1183 | return err; |
| 1184 | } | 1184 | } |
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 7f47bb72bf30..945099d49f8f 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c | |||
| @@ -999,7 +999,7 @@ static int pci_pm_thaw_noirq(struct device *dev) | |||
| 999 | * the subsequent "thaw" callbacks for the device. | 999 | * the subsequent "thaw" callbacks for the device. |
| 1000 | */ | 1000 | */ |
| 1001 | if (dev_pm_smart_suspend_and_suspended(dev)) { | 1001 | if (dev_pm_smart_suspend_and_suspended(dev)) { |
| 1002 | dev->power.direct_complete = true; | 1002 | dev_pm_skip_next_resume_phases(dev); |
| 1003 | return 0; | 1003 | return 0; |
| 1004 | } | 1004 | } |
| 1005 | 1005 | ||
diff --git a/drivers/platform/x86/asus-wireless.c b/drivers/platform/x86/asus-wireless.c index f3796164329e..d4aeac3477f5 100644 --- a/drivers/platform/x86/asus-wireless.c +++ b/drivers/platform/x86/asus-wireless.c | |||
| @@ -118,6 +118,7 @@ static void asus_wireless_notify(struct acpi_device *adev, u32 event) | |||
| 118 | return; | 118 | return; |
| 119 | } | 119 | } |
| 120 | input_report_key(data->idev, KEY_RFKILL, 1); | 120 | input_report_key(data->idev, KEY_RFKILL, 1); |
| 121 | input_sync(data->idev); | ||
| 121 | input_report_key(data->idev, KEY_RFKILL, 0); | 122 | input_report_key(data->idev, KEY_RFKILL, 0); |
| 122 | input_sync(data->idev); | 123 | input_sync(data->idev); |
| 123 | } | 124 | } |
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index bf897b1832b1..cd4725e7e0b5 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c | |||
| @@ -37,6 +37,7 @@ | |||
| 37 | 37 | ||
| 38 | struct quirk_entry { | 38 | struct quirk_entry { |
| 39 | u8 touchpad_led; | 39 | u8 touchpad_led; |
| 40 | u8 kbd_led_levels_off_1; | ||
| 40 | 41 | ||
| 41 | int needs_kbd_timeouts; | 42 | int needs_kbd_timeouts; |
| 42 | /* | 43 | /* |
| @@ -67,6 +68,10 @@ static struct quirk_entry quirk_dell_xps13_9333 = { | |||
| 67 | .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 }, | 68 | .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 }, |
| 68 | }; | 69 | }; |
| 69 | 70 | ||
| 71 | static struct quirk_entry quirk_dell_latitude_e6410 = { | ||
| 72 | .kbd_led_levels_off_1 = 1, | ||
| 73 | }; | ||
| 74 | |||
| 70 | static struct platform_driver platform_driver = { | 75 | static struct platform_driver platform_driver = { |
| 71 | .driver = { | 76 | .driver = { |
| 72 | .name = "dell-laptop", | 77 | .name = "dell-laptop", |
| @@ -269,6 +274,15 @@ static const struct dmi_system_id dell_quirks[] __initconst = { | |||
| 269 | }, | 274 | }, |
| 270 | .driver_data = &quirk_dell_xps13_9333, | 275 | .driver_data = &quirk_dell_xps13_9333, |
| 271 | }, | 276 | }, |
| 277 | { | ||
| 278 | .callback = dmi_matched, | ||
| 279 | .ident = "Dell Latitude E6410", | ||
| 280 | .matches = { | ||
| 281 | DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), | ||
| 282 | DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6410"), | ||
| 283 | }, | ||
| 284 | .driver_data = &quirk_dell_latitude_e6410, | ||
| 285 | }, | ||
| 272 | { } | 286 | { } |
| 273 | }; | 287 | }; |
| 274 | 288 | ||
| @@ -1149,6 +1163,9 @@ static int kbd_get_info(struct kbd_info *info) | |||
| 1149 | units = (buffer->output[2] >> 8) & 0xFF; | 1163 | units = (buffer->output[2] >> 8) & 0xFF; |
| 1150 | info->levels = (buffer->output[2] >> 16) & 0xFF; | 1164 | info->levels = (buffer->output[2] >> 16) & 0xFF; |
| 1151 | 1165 | ||
| 1166 | if (quirks && quirks->kbd_led_levels_off_1 && info->levels) | ||
| 1167 | info->levels--; | ||
| 1168 | |||
| 1152 | if (units & BIT(0)) | 1169 | if (units & BIT(0)) |
| 1153 | info->seconds = (buffer->output[3] >> 0) & 0xFF; | 1170 | info->seconds = (buffer->output[3] >> 0) & 0xFF; |
| 1154 | if (units & BIT(1)) | 1171 | if (units & BIT(1)) |
diff --git a/drivers/platform/x86/dell-wmi.c b/drivers/platform/x86/dell-wmi.c index 39d2f4518483..fb25b20df316 100644 --- a/drivers/platform/x86/dell-wmi.c +++ b/drivers/platform/x86/dell-wmi.c | |||
| @@ -639,6 +639,8 @@ static int dell_wmi_events_set_enabled(bool enable) | |||
| 639 | int ret; | 639 | int ret; |
| 640 | 640 | ||
| 641 | buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL); | 641 | buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL); |
| 642 | if (!buffer) | ||
| 643 | return -ENOMEM; | ||
| 642 | buffer->cmd_class = CLASS_INFO; | 644 | buffer->cmd_class = CLASS_INFO; |
| 643 | buffer->cmd_select = SELECT_APP_REGISTRATION; | 645 | buffer->cmd_select = SELECT_APP_REGISTRATION; |
| 644 | buffer->input[0] = 0x10000; | 646 | buffer->input[0] = 0x10000; |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 15015a24f8ad..badf42acbf95 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
| @@ -565,9 +565,9 @@ enum qeth_cq { | |||
| 565 | }; | 565 | }; |
| 566 | 566 | ||
| 567 | struct qeth_ipato { | 567 | struct qeth_ipato { |
| 568 | int enabled; | 568 | bool enabled; |
| 569 | int invert4; | 569 | bool invert4; |
| 570 | int invert6; | 570 | bool invert6; |
| 571 | struct list_head entries; | 571 | struct list_head entries; |
| 572 | }; | 572 | }; |
| 573 | 573 | ||
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 430e3214f7e2..6c815207f4f5 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
| @@ -1480,9 +1480,9 @@ static int qeth_setup_card(struct qeth_card *card) | |||
| 1480 | qeth_set_intial_options(card); | 1480 | qeth_set_intial_options(card); |
| 1481 | /* IP address takeover */ | 1481 | /* IP address takeover */ |
| 1482 | INIT_LIST_HEAD(&card->ipato.entries); | 1482 | INIT_LIST_HEAD(&card->ipato.entries); |
| 1483 | card->ipato.enabled = 0; | 1483 | card->ipato.enabled = false; |
| 1484 | card->ipato.invert4 = 0; | 1484 | card->ipato.invert4 = false; |
| 1485 | card->ipato.invert6 = 0; | 1485 | card->ipato.invert6 = false; |
| 1486 | /* init QDIO stuff */ | 1486 | /* init QDIO stuff */ |
| 1487 | qeth_init_qdio_info(card); | 1487 | qeth_init_qdio_info(card); |
| 1488 | INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); | 1488 | INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work); |
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 194ae9b577cc..e5833837b799 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h | |||
| @@ -82,7 +82,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *); | |||
| 82 | int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); | 82 | int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); |
| 83 | void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, | 83 | void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, |
| 84 | const u8 *); | 84 | const u8 *); |
| 85 | int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *, struct qeth_ipaddr *); | 85 | void qeth_l3_update_ipato(struct qeth_card *card); |
| 86 | struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions); | 86 | struct qeth_ipaddr *qeth_l3_get_addr_buffer(enum qeth_prot_versions); |
| 87 | int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *); | 87 | int qeth_l3_add_ip(struct qeth_card *, struct qeth_ipaddr *); |
| 88 | int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *); | 88 | int qeth_l3_delete_ip(struct qeth_card *, struct qeth_ipaddr *); |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 6a73894b0cb5..ef0961e18686 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
| @@ -164,8 +164,8 @@ static void qeth_l3_convert_addr_to_bits(u8 *addr, u8 *bits, int len) | |||
| 164 | } | 164 | } |
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, | 167 | static bool qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, |
| 168 | struct qeth_ipaddr *addr) | 168 | struct qeth_ipaddr *addr) |
| 169 | { | 169 | { |
| 170 | struct qeth_ipato_entry *ipatoe; | 170 | struct qeth_ipato_entry *ipatoe; |
| 171 | u8 addr_bits[128] = {0, }; | 171 | u8 addr_bits[128] = {0, }; |
| @@ -174,6 +174,8 @@ int qeth_l3_is_addr_covered_by_ipato(struct qeth_card *card, | |||
| 174 | 174 | ||
| 175 | if (!card->ipato.enabled) | 175 | if (!card->ipato.enabled) |
| 176 | return 0; | 176 | return 0; |
| 177 | if (addr->type != QETH_IP_TYPE_NORMAL) | ||
| 178 | return 0; | ||
| 177 | 179 | ||
| 178 | qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, | 180 | qeth_l3_convert_addr_to_bits((u8 *) &addr->u, addr_bits, |
| 179 | (addr->proto == QETH_PROT_IPV4)? 4:16); | 181 | (addr->proto == QETH_PROT_IPV4)? 4:16); |
| @@ -290,8 +292,7 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) | |||
| 290 | memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); | 292 | memcpy(addr, tmp_addr, sizeof(struct qeth_ipaddr)); |
| 291 | addr->ref_counter = 1; | 293 | addr->ref_counter = 1; |
| 292 | 294 | ||
| 293 | if (addr->type == QETH_IP_TYPE_NORMAL && | 295 | if (qeth_l3_is_addr_covered_by_ipato(card, addr)) { |
| 294 | qeth_l3_is_addr_covered_by_ipato(card, addr)) { | ||
| 295 | QETH_CARD_TEXT(card, 2, "tkovaddr"); | 296 | QETH_CARD_TEXT(card, 2, "tkovaddr"); |
| 296 | addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; | 297 | addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; |
| 297 | } | 298 | } |
| @@ -605,6 +606,27 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) | |||
| 605 | /* | 606 | /* |
| 606 | * IP address takeover related functions | 607 | * IP address takeover related functions |
| 607 | */ | 608 | */ |
| 609 | |||
| 610 | /** | ||
| 611 | * qeth_l3_update_ipato() - Update 'takeover' property, for all NORMAL IPs. | ||
| 612 | * | ||
| 613 | * Caller must hold ip_lock. | ||
| 614 | */ | ||
| 615 | void qeth_l3_update_ipato(struct qeth_card *card) | ||
| 616 | { | ||
| 617 | struct qeth_ipaddr *addr; | ||
| 618 | unsigned int i; | ||
| 619 | |||
| 620 | hash_for_each(card->ip_htable, i, addr, hnode) { | ||
| 621 | if (addr->type != QETH_IP_TYPE_NORMAL) | ||
| 622 | continue; | ||
| 623 | if (qeth_l3_is_addr_covered_by_ipato(card, addr)) | ||
| 624 | addr->set_flags |= QETH_IPA_SETIP_TAKEOVER_FLAG; | ||
| 625 | else | ||
| 626 | addr->set_flags &= ~QETH_IPA_SETIP_TAKEOVER_FLAG; | ||
| 627 | } | ||
| 628 | } | ||
| 629 | |||
| 608 | static void qeth_l3_clear_ipato_list(struct qeth_card *card) | 630 | static void qeth_l3_clear_ipato_list(struct qeth_card *card) |
| 609 | { | 631 | { |
| 610 | struct qeth_ipato_entry *ipatoe, *tmp; | 632 | struct qeth_ipato_entry *ipatoe, *tmp; |
| @@ -616,6 +638,7 @@ static void qeth_l3_clear_ipato_list(struct qeth_card *card) | |||
| 616 | kfree(ipatoe); | 638 | kfree(ipatoe); |
| 617 | } | 639 | } |
| 618 | 640 | ||
| 641 | qeth_l3_update_ipato(card); | ||
| 619 | spin_unlock_bh(&card->ip_lock); | 642 | spin_unlock_bh(&card->ip_lock); |
| 620 | } | 643 | } |
| 621 | 644 | ||
| @@ -640,8 +663,10 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, | |||
| 640 | } | 663 | } |
| 641 | } | 664 | } |
| 642 | 665 | ||
| 643 | if (!rc) | 666 | if (!rc) { |
| 644 | list_add_tail(&new->entry, &card->ipato.entries); | 667 | list_add_tail(&new->entry, &card->ipato.entries); |
| 668 | qeth_l3_update_ipato(card); | ||
| 669 | } | ||
| 645 | 670 | ||
| 646 | spin_unlock_bh(&card->ip_lock); | 671 | spin_unlock_bh(&card->ip_lock); |
| 647 | 672 | ||
| @@ -664,6 +689,7 @@ void qeth_l3_del_ipato_entry(struct qeth_card *card, | |||
| 664 | (proto == QETH_PROT_IPV4)? 4:16) && | 689 | (proto == QETH_PROT_IPV4)? 4:16) && |
| 665 | (ipatoe->mask_bits == mask_bits)) { | 690 | (ipatoe->mask_bits == mask_bits)) { |
| 666 | list_del(&ipatoe->entry); | 691 | list_del(&ipatoe->entry); |
| 692 | qeth_l3_update_ipato(card); | ||
| 667 | kfree(ipatoe); | 693 | kfree(ipatoe); |
| 668 | } | 694 | } |
| 669 | } | 695 | } |
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index bd12fdf678be..6ea2b528a64e 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c | |||
| @@ -370,8 +370,8 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, | |||
| 370 | struct device_attribute *attr, const char *buf, size_t count) | 370 | struct device_attribute *attr, const char *buf, size_t count) |
| 371 | { | 371 | { |
| 372 | struct qeth_card *card = dev_get_drvdata(dev); | 372 | struct qeth_card *card = dev_get_drvdata(dev); |
| 373 | struct qeth_ipaddr *addr; | 373 | bool enable; |
| 374 | int i, rc = 0; | 374 | int rc = 0; |
| 375 | 375 | ||
| 376 | if (!card) | 376 | if (!card) |
| 377 | return -EINVAL; | 377 | return -EINVAL; |
| @@ -384,25 +384,18 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, | |||
| 384 | } | 384 | } |
| 385 | 385 | ||
| 386 | if (sysfs_streq(buf, "toggle")) { | 386 | if (sysfs_streq(buf, "toggle")) { |
| 387 | card->ipato.enabled = (card->ipato.enabled)? 0 : 1; | 387 | enable = !card->ipato.enabled; |
| 388 | } else if (sysfs_streq(buf, "1")) { | 388 | } else if (kstrtobool(buf, &enable)) { |
| 389 | card->ipato.enabled = 1; | ||
| 390 | hash_for_each(card->ip_htable, i, addr, hnode) { | ||
| 391 | if ((addr->type == QETH_IP_TYPE_NORMAL) && | ||
| 392 | qeth_l3_is_addr_covered_by_ipato(card, addr)) | ||
| 393 | addr->set_flags |= | ||
| 394 | QETH_IPA_SETIP_TAKEOVER_FLAG; | ||
| 395 | } | ||
| 396 | } else if (sysfs_streq(buf, "0")) { | ||
| 397 | card->ipato.enabled = 0; | ||
| 398 | hash_for_each(card->ip_htable, i, addr, hnode) { | ||
| 399 | if (addr->set_flags & | ||
| 400 | QETH_IPA_SETIP_TAKEOVER_FLAG) | ||
| 401 | addr->set_flags &= | ||
| 402 | ~QETH_IPA_SETIP_TAKEOVER_FLAG; | ||
| 403 | } | ||
| 404 | } else | ||
| 405 | rc = -EINVAL; | 389 | rc = -EINVAL; |
| 390 | goto out; | ||
| 391 | } | ||
| 392 | |||
| 393 | if (card->ipato.enabled != enable) { | ||
| 394 | card->ipato.enabled = enable; | ||
| 395 | spin_lock_bh(&card->ip_lock); | ||
| 396 | qeth_l3_update_ipato(card); | ||
| 397 | spin_unlock_bh(&card->ip_lock); | ||
| 398 | } | ||
| 406 | out: | 399 | out: |
| 407 | mutex_unlock(&card->conf_mutex); | 400 | mutex_unlock(&card->conf_mutex); |
| 408 | return rc ? rc : count; | 401 | return rc ? rc : count; |
| @@ -428,20 +421,27 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, | |||
| 428 | const char *buf, size_t count) | 421 | const char *buf, size_t count) |
| 429 | { | 422 | { |
| 430 | struct qeth_card *card = dev_get_drvdata(dev); | 423 | struct qeth_card *card = dev_get_drvdata(dev); |
| 424 | bool invert; | ||
| 431 | int rc = 0; | 425 | int rc = 0; |
| 432 | 426 | ||
| 433 | if (!card) | 427 | if (!card) |
| 434 | return -EINVAL; | 428 | return -EINVAL; |
| 435 | 429 | ||
| 436 | mutex_lock(&card->conf_mutex); | 430 | mutex_lock(&card->conf_mutex); |
| 437 | if (sysfs_streq(buf, "toggle")) | 431 | if (sysfs_streq(buf, "toggle")) { |
| 438 | card->ipato.invert4 = (card->ipato.invert4)? 0 : 1; | 432 | invert = !card->ipato.invert4; |
| 439 | else if (sysfs_streq(buf, "1")) | 433 | } else if (kstrtobool(buf, &invert)) { |
| 440 | card->ipato.invert4 = 1; | ||
| 441 | else if (sysfs_streq(buf, "0")) | ||
| 442 | card->ipato.invert4 = 0; | ||
| 443 | else | ||
| 444 | rc = -EINVAL; | 434 | rc = -EINVAL; |
| 435 | goto out; | ||
| 436 | } | ||
| 437 | |||
| 438 | if (card->ipato.invert4 != invert) { | ||
| 439 | card->ipato.invert4 = invert; | ||
| 440 | spin_lock_bh(&card->ip_lock); | ||
| 441 | qeth_l3_update_ipato(card); | ||
| 442 | spin_unlock_bh(&card->ip_lock); | ||
| 443 | } | ||
| 444 | out: | ||
| 445 | mutex_unlock(&card->conf_mutex); | 445 | mutex_unlock(&card->conf_mutex); |
| 446 | return rc ? rc : count; | 446 | return rc ? rc : count; |
| 447 | } | 447 | } |
| @@ -607,20 +607,27 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, | |||
| 607 | struct device_attribute *attr, const char *buf, size_t count) | 607 | struct device_attribute *attr, const char *buf, size_t count) |
| 608 | { | 608 | { |
| 609 | struct qeth_card *card = dev_get_drvdata(dev); | 609 | struct qeth_card *card = dev_get_drvdata(dev); |
| 610 | bool invert; | ||
| 610 | int rc = 0; | 611 | int rc = 0; |
| 611 | 612 | ||
| 612 | if (!card) | 613 | if (!card) |
| 613 | return -EINVAL; | 614 | return -EINVAL; |
| 614 | 615 | ||
| 615 | mutex_lock(&card->conf_mutex); | 616 | mutex_lock(&card->conf_mutex); |
| 616 | if (sysfs_streq(buf, "toggle")) | 617 | if (sysfs_streq(buf, "toggle")) { |
| 617 | card->ipato.invert6 = (card->ipato.invert6)? 0 : 1; | 618 | invert = !card->ipato.invert6; |
| 618 | else if (sysfs_streq(buf, "1")) | 619 | } else if (kstrtobool(buf, &invert)) { |
| 619 | card->ipato.invert6 = 1; | ||
| 620 | else if (sysfs_streq(buf, "0")) | ||
| 621 | card->ipato.invert6 = 0; | ||
| 622 | else | ||
| 623 | rc = -EINVAL; | 620 | rc = -EINVAL; |
| 621 | goto out; | ||
| 622 | } | ||
| 623 | |||
| 624 | if (card->ipato.invert6 != invert) { | ||
| 625 | card->ipato.invert6 = invert; | ||
| 626 | spin_lock_bh(&card->ip_lock); | ||
| 627 | qeth_l3_update_ipato(card); | ||
| 628 | spin_unlock_bh(&card->ip_lock); | ||
| 629 | } | ||
| 630 | out: | ||
| 624 | mutex_unlock(&card->conf_mutex); | 631 | mutex_unlock(&card->conf_mutex); |
| 625 | return rc ? rc : count; | 632 | return rc ? rc : count; |
| 626 | } | 633 | } |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index bec9f3193f60..80a8cb26cdea 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
| @@ -2482,8 +2482,8 @@ int aac_command_thread(void *data) | |||
| 2482 | /* Synchronize our watches */ | 2482 | /* Synchronize our watches */ |
| 2483 | if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec) | 2483 | if (((NSEC_PER_SEC - (NSEC_PER_SEC / HZ)) > now.tv_nsec) |
| 2484 | && (now.tv_nsec > (NSEC_PER_SEC / HZ))) | 2484 | && (now.tv_nsec > (NSEC_PER_SEC / HZ))) |
| 2485 | difference = (((NSEC_PER_SEC - now.tv_nsec) * HZ) | 2485 | difference = HZ + HZ / 2 - |
| 2486 | + NSEC_PER_SEC / 2) / NSEC_PER_SEC; | 2486 | now.tv_nsec / (NSEC_PER_SEC / HZ); |
| 2487 | else { | 2487 | else { |
| 2488 | if (now.tv_nsec > NSEC_PER_SEC / 2) | 2488 | if (now.tv_nsec > NSEC_PER_SEC / 2) |
| 2489 | ++now.tv_sec; | 2489 | ++now.tv_sec; |
| @@ -2507,6 +2507,10 @@ int aac_command_thread(void *data) | |||
| 2507 | if (kthread_should_stop()) | 2507 | if (kthread_should_stop()) |
| 2508 | break; | 2508 | break; |
| 2509 | 2509 | ||
| 2510 | /* | ||
| 2511 | * we probably want usleep_range() here instead of the | ||
| 2512 | * jiffies computation | ||
| 2513 | */ | ||
| 2510 | schedule_timeout(difference); | 2514 | schedule_timeout(difference); |
| 2511 | 2515 | ||
| 2512 | if (kthread_should_stop()) | 2516 | if (kthread_should_stop()) |
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c index 72ca2a2e08e2..b2fa195adc7a 100644 --- a/drivers/scsi/bfa/bfad_bsg.c +++ b/drivers/scsi/bfa/bfad_bsg.c | |||
| @@ -3135,7 +3135,8 @@ bfad_im_bsg_vendor_request(struct bsg_job *job) | |||
| 3135 | struct fc_bsg_request *bsg_request = job->request; | 3135 | struct fc_bsg_request *bsg_request = job->request; |
| 3136 | struct fc_bsg_reply *bsg_reply = job->reply; | 3136 | struct fc_bsg_reply *bsg_reply = job->reply; |
| 3137 | uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; | 3137 | uint32_t vendor_cmd = bsg_request->rqst_data.h_vendor.vendor_cmd[0]; |
| 3138 | struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); | 3138 | struct Scsi_Host *shost = fc_bsg_to_shost(job); |
| 3139 | struct bfad_im_port_s *im_port = bfad_get_im_port(shost); | ||
| 3139 | struct bfad_s *bfad = im_port->bfad; | 3140 | struct bfad_s *bfad = im_port->bfad; |
| 3140 | void *payload_kbuf; | 3141 | void *payload_kbuf; |
| 3141 | int rc = -EINVAL; | 3142 | int rc = -EINVAL; |
| @@ -3350,7 +3351,8 @@ int | |||
| 3350 | bfad_im_bsg_els_ct_request(struct bsg_job *job) | 3351 | bfad_im_bsg_els_ct_request(struct bsg_job *job) |
| 3351 | { | 3352 | { |
| 3352 | struct bfa_bsg_data *bsg_data; | 3353 | struct bfa_bsg_data *bsg_data; |
| 3353 | struct bfad_im_port_s *im_port = shost_priv(fc_bsg_to_shost(job)); | 3354 | struct Scsi_Host *shost = fc_bsg_to_shost(job); |
| 3355 | struct bfad_im_port_s *im_port = bfad_get_im_port(shost); | ||
| 3354 | struct bfad_s *bfad = im_port->bfad; | 3356 | struct bfad_s *bfad = im_port->bfad; |
| 3355 | bfa_bsg_fcpt_t *bsg_fcpt; | 3357 | bfa_bsg_fcpt_t *bsg_fcpt; |
| 3356 | struct bfad_fcxp *drv_fcxp; | 3358 | struct bfad_fcxp *drv_fcxp; |
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index 24e657a4ec80..c05d6e91e4bd 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c | |||
| @@ -546,6 +546,7 @@ int | |||
| 546 | bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, | 546 | bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, |
| 547 | struct device *dev) | 547 | struct device *dev) |
| 548 | { | 548 | { |
| 549 | struct bfad_im_port_pointer *im_portp; | ||
| 549 | int error = 1; | 550 | int error = 1; |
| 550 | 551 | ||
| 551 | mutex_lock(&bfad_mutex); | 552 | mutex_lock(&bfad_mutex); |
| @@ -564,7 +565,8 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, | |||
| 564 | goto out_free_idr; | 565 | goto out_free_idr; |
| 565 | } | 566 | } |
| 566 | 567 | ||
| 567 | im_port->shost->hostdata[0] = (unsigned long)im_port; | 568 | im_portp = shost_priv(im_port->shost); |
| 569 | im_portp->p = im_port; | ||
| 568 | im_port->shost->unique_id = im_port->idr_id; | 570 | im_port->shost->unique_id = im_port->idr_id; |
| 569 | im_port->shost->this_id = -1; | 571 | im_port->shost->this_id = -1; |
| 570 | im_port->shost->max_id = MAX_FCP_TARGET; | 572 | im_port->shost->max_id = MAX_FCP_TARGET; |
| @@ -748,7 +750,7 @@ bfad_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad) | |||
| 748 | 750 | ||
| 749 | sht->sg_tablesize = bfad->cfg_data.io_max_sge; | 751 | sht->sg_tablesize = bfad->cfg_data.io_max_sge; |
| 750 | 752 | ||
| 751 | return scsi_host_alloc(sht, sizeof(unsigned long)); | 753 | return scsi_host_alloc(sht, sizeof(struct bfad_im_port_pointer)); |
| 752 | } | 754 | } |
| 753 | 755 | ||
| 754 | void | 756 | void |
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h index c81ec2a77ef5..06ce4ba2b7bc 100644 --- a/drivers/scsi/bfa/bfad_im.h +++ b/drivers/scsi/bfa/bfad_im.h | |||
| @@ -69,6 +69,16 @@ struct bfad_im_port_s { | |||
| 69 | struct fc_vport *fc_vport; | 69 | struct fc_vport *fc_vport; |
| 70 | }; | 70 | }; |
| 71 | 71 | ||
| 72 | struct bfad_im_port_pointer { | ||
| 73 | struct bfad_im_port_s *p; | ||
| 74 | }; | ||
| 75 | |||
| 76 | static inline struct bfad_im_port_s *bfad_get_im_port(struct Scsi_Host *host) | ||
| 77 | { | ||
| 78 | struct bfad_im_port_pointer *im_portp = shost_priv(host); | ||
| 79 | return im_portp->p; | ||
| 80 | } | ||
| 81 | |||
| 72 | enum bfad_itnim_state { | 82 | enum bfad_itnim_state { |
| 73 | ITNIM_STATE_NONE, | 83 | ITNIM_STATE_NONE, |
| 74 | ITNIM_STATE_ONLINE, | 84 | ITNIM_STATE_ONLINE, |
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c index 5da46052e179..21be672679fb 100644 --- a/drivers/scsi/libfc/fc_lport.c +++ b/drivers/scsi/libfc/fc_lport.c | |||
| @@ -904,10 +904,14 @@ static void fc_lport_recv_els_req(struct fc_lport *lport, | |||
| 904 | case ELS_FLOGI: | 904 | case ELS_FLOGI: |
| 905 | if (!lport->point_to_multipoint) | 905 | if (!lport->point_to_multipoint) |
| 906 | fc_lport_recv_flogi_req(lport, fp); | 906 | fc_lport_recv_flogi_req(lport, fp); |
| 907 | else | ||
| 908 | fc_rport_recv_req(lport, fp); | ||
| 907 | break; | 909 | break; |
| 908 | case ELS_LOGO: | 910 | case ELS_LOGO: |
| 909 | if (fc_frame_sid(fp) == FC_FID_FLOGI) | 911 | if (fc_frame_sid(fp) == FC_FID_FLOGI) |
| 910 | fc_lport_recv_logo_req(lport, fp); | 912 | fc_lport_recv_logo_req(lport, fp); |
| 913 | else | ||
| 914 | fc_rport_recv_req(lport, fp); | ||
| 911 | break; | 915 | break; |
| 912 | case ELS_RSCN: | 916 | case ELS_RSCN: |
| 913 | lport->tt.disc_recv_req(lport, fp); | 917 | lport->tt.disc_recv_req(lport, fp); |
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index ca1566237ae7..3183d63de4da 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c | |||
| @@ -2145,7 +2145,7 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, | |||
| 2145 | struct sas_rphy *rphy) | 2145 | struct sas_rphy *rphy) |
| 2146 | { | 2146 | { |
| 2147 | struct domain_device *dev; | 2147 | struct domain_device *dev; |
| 2148 | unsigned int reslen = 0; | 2148 | unsigned int rcvlen = 0; |
| 2149 | int ret = -EINVAL; | 2149 | int ret = -EINVAL; |
| 2150 | 2150 | ||
| 2151 | /* no rphy means no smp target support (ie aic94xx host) */ | 2151 | /* no rphy means no smp target support (ie aic94xx host) */ |
| @@ -2179,12 +2179,12 @@ void sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, | |||
| 2179 | 2179 | ||
| 2180 | ret = smp_execute_task_sg(dev, job->request_payload.sg_list, | 2180 | ret = smp_execute_task_sg(dev, job->request_payload.sg_list, |
| 2181 | job->reply_payload.sg_list); | 2181 | job->reply_payload.sg_list); |
| 2182 | if (ret > 0) { | 2182 | if (ret >= 0) { |
| 2183 | /* positive number is the untransferred residual */ | 2183 | /* bsg_job_done() requires the length received */ |
| 2184 | reslen = ret; | 2184 | rcvlen = job->reply_payload.payload_len - ret; |
| 2185 | ret = 0; | 2185 | ret = 0; |
| 2186 | } | 2186 | } |
| 2187 | 2187 | ||
| 2188 | out: | 2188 | out: |
| 2189 | bsg_job_done(job, ret, reslen); | 2189 | bsg_job_done(job, ret, rcvlen); |
| 2190 | } | 2190 | } |
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 56faeb049b4a..87c08ff37ddd 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c | |||
| @@ -753,12 +753,12 @@ lpfc_rq_buf_free(struct lpfc_hba *phba, struct lpfc_dmabuf *mp) | |||
| 753 | drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); | 753 | drqe.address_hi = putPaddrHigh(rqb_entry->dbuf.phys); |
| 754 | rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); | 754 | rc = lpfc_sli4_rq_put(rqb_entry->hrq, rqb_entry->drq, &hrqe, &drqe); |
| 755 | if (rc < 0) { | 755 | if (rc < 0) { |
| 756 | (rqbp->rqb_free_buffer)(phba, rqb_entry); | ||
| 757 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 756 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
| 758 | "6409 Cannot post to RQ %d: %x %x\n", | 757 | "6409 Cannot post to RQ %d: %x %x\n", |
| 759 | rqb_entry->hrq->queue_id, | 758 | rqb_entry->hrq->queue_id, |
| 760 | rqb_entry->hrq->host_index, | 759 | rqb_entry->hrq->host_index, |
| 761 | rqb_entry->hrq->hba_index); | 760 | rqb_entry->hrq->hba_index); |
| 761 | (rqbp->rqb_free_buffer)(phba, rqb_entry); | ||
| 762 | } else { | 762 | } else { |
| 763 | list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); | 763 | list_add_tail(&rqb_entry->hbuf.list, &rqbp->rqb_buffer_list); |
| 764 | rqbp->buffer_count++; | 764 | rqbp->buffer_count++; |
diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c index 01f08c03f2c1..c3765d29fd3f 100644 --- a/drivers/scsi/scsi_debugfs.c +++ b/drivers/scsi/scsi_debugfs.c | |||
| @@ -8,9 +8,11 @@ void scsi_show_rq(struct seq_file *m, struct request *rq) | |||
| 8 | { | 8 | { |
| 9 | struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req); | 9 | struct scsi_cmnd *cmd = container_of(scsi_req(rq), typeof(*cmd), req); |
| 10 | int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc); | 10 | int msecs = jiffies_to_msecs(jiffies - cmd->jiffies_at_alloc); |
| 11 | char buf[80]; | 11 | const u8 *const cdb = READ_ONCE(cmd->cmnd); |
| 12 | char buf[80] = "(?)"; | ||
| 12 | 13 | ||
| 13 | __scsi_format_command(buf, sizeof(buf), cmd->cmnd, cmd->cmd_len); | 14 | if (cdb) |
| 15 | __scsi_format_command(buf, sizeof(buf), cdb, cmd->cmd_len); | ||
| 14 | seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf, | 16 | seq_printf(m, ", .cmd=%s, .retries=%d, allocated %d.%03d s ago", buf, |
| 15 | cmd->retries, msecs / 1000, msecs % 1000); | 17 | cmd->retries, msecs / 1000, msecs % 1000); |
| 16 | } | 18 | } |
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index 78d4aa8df675..449ef5adbb2b 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
| @@ -34,7 +34,6 @@ struct scsi_dev_info_list_table { | |||
| 34 | }; | 34 | }; |
| 35 | 35 | ||
| 36 | 36 | ||
| 37 | static const char spaces[] = " "; /* 16 of them */ | ||
| 38 | static blist_flags_t scsi_default_dev_flags; | 37 | static blist_flags_t scsi_default_dev_flags; |
| 39 | static LIST_HEAD(scsi_dev_info_list); | 38 | static LIST_HEAD(scsi_dev_info_list); |
| 40 | static char scsi_dev_flags[256]; | 39 | static char scsi_dev_flags[256]; |
| @@ -298,20 +297,13 @@ static void scsi_strcpy_devinfo(char *name, char *to, size_t to_length, | |||
| 298 | size_t from_length; | 297 | size_t from_length; |
| 299 | 298 | ||
| 300 | from_length = strlen(from); | 299 | from_length = strlen(from); |
| 301 | strncpy(to, from, min(to_length, from_length)); | 300 | /* This zero-pads the destination */ |
| 302 | if (from_length < to_length) { | 301 | strncpy(to, from, to_length); |
| 303 | if (compatible) { | 302 | if (from_length < to_length && !compatible) { |
| 304 | /* | 303 | /* |
| 305 | * NUL terminate the string if it is short. | 304 | * space pad the string if it is short. |
| 306 | */ | 305 | */ |
| 307 | to[from_length] = '\0'; | 306 | memset(&to[from_length], ' ', to_length - from_length); |
| 308 | } else { | ||
| 309 | /* | ||
| 310 | * space pad the string if it is short. | ||
| 311 | */ | ||
| 312 | strncpy(&to[from_length], spaces, | ||
| 313 | to_length - from_length); | ||
| 314 | } | ||
| 315 | } | 307 | } |
| 316 | if (from_length > to_length) | 308 | if (from_length > to_length) |
| 317 | printk(KERN_WARNING "%s: %s string '%s' is too long\n", | 309 | printk(KERN_WARNING "%s: %s string '%s' is too long\n", |
| @@ -458,7 +450,8 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, | |||
| 458 | /* | 450 | /* |
| 459 | * vendor strings must be an exact match | 451 | * vendor strings must be an exact match |
| 460 | */ | 452 | */ |
| 461 | if (vmax != strlen(devinfo->vendor) || | 453 | if (vmax != strnlen(devinfo->vendor, |
| 454 | sizeof(devinfo->vendor)) || | ||
| 462 | memcmp(devinfo->vendor, vskip, vmax)) | 455 | memcmp(devinfo->vendor, vskip, vmax)) |
| 463 | continue; | 456 | continue; |
| 464 | 457 | ||
| @@ -466,7 +459,7 @@ static struct scsi_dev_info_list *scsi_dev_info_list_find(const char *vendor, | |||
| 466 | * @model specifies the full string, and | 459 | * @model specifies the full string, and |
| 467 | * must be larger or equal to devinfo->model | 460 | * must be larger or equal to devinfo->model |
| 468 | */ | 461 | */ |
| 469 | mlen = strlen(devinfo->model); | 462 | mlen = strnlen(devinfo->model, sizeof(devinfo->model)); |
| 470 | if (mmax < mlen || memcmp(devinfo->model, mskip, mlen)) | 463 | if (mmax < mlen || memcmp(devinfo->model, mskip, mlen)) |
| 471 | continue; | 464 | continue; |
| 472 | return devinfo; | 465 | return devinfo; |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 00742c50cd44..d9ca1dfab154 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -1967,6 +1967,8 @@ static bool scsi_mq_get_budget(struct blk_mq_hw_ctx *hctx) | |||
| 1967 | out_put_device: | 1967 | out_put_device: |
| 1968 | put_device(&sdev->sdev_gendev); | 1968 | put_device(&sdev->sdev_gendev); |
| 1969 | out: | 1969 | out: |
| 1970 | if (atomic_read(&sdev->device_busy) == 0 && !scsi_device_blocked(sdev)) | ||
| 1971 | blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY); | ||
| 1970 | return false; | 1972 | return false; |
| 1971 | } | 1973 | } |
| 1972 | 1974 | ||
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 24fe68522716..a028ab3322a9 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
| @@ -1312,6 +1312,7 @@ static int sd_init_command(struct scsi_cmnd *cmd) | |||
| 1312 | static void sd_uninit_command(struct scsi_cmnd *SCpnt) | 1312 | static void sd_uninit_command(struct scsi_cmnd *SCpnt) |
| 1313 | { | 1313 | { |
| 1314 | struct request *rq = SCpnt->request; | 1314 | struct request *rq = SCpnt->request; |
| 1315 | u8 *cmnd; | ||
| 1315 | 1316 | ||
| 1316 | if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK) | 1317 | if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK) |
| 1317 | sd_zbc_write_unlock_zone(SCpnt); | 1318 | sd_zbc_write_unlock_zone(SCpnt); |
| @@ -1320,9 +1321,10 @@ static void sd_uninit_command(struct scsi_cmnd *SCpnt) | |||
| 1320 | __free_page(rq->special_vec.bv_page); | 1321 | __free_page(rq->special_vec.bv_page); |
| 1321 | 1322 | ||
| 1322 | if (SCpnt->cmnd != scsi_req(rq)->cmd) { | 1323 | if (SCpnt->cmnd != scsi_req(rq)->cmd) { |
| 1323 | mempool_free(SCpnt->cmnd, sd_cdb_pool); | 1324 | cmnd = SCpnt->cmnd; |
| 1324 | SCpnt->cmnd = NULL; | 1325 | SCpnt->cmnd = NULL; |
| 1325 | SCpnt->cmd_len = 0; | 1326 | SCpnt->cmd_len = 0; |
| 1327 | mempool_free(cmnd, sd_cdb_pool); | ||
| 1326 | } | 1328 | } |
| 1327 | } | 1329 | } |
| 1328 | 1330 | ||
diff --git a/drivers/staging/ccree/ssi_hash.c b/drivers/staging/ccree/ssi_hash.c index 1799d3f26a9e..2035835b62dc 100644 --- a/drivers/staging/ccree/ssi_hash.c +++ b/drivers/staging/ccree/ssi_hash.c | |||
| @@ -1769,7 +1769,7 @@ static int ssi_ahash_import(struct ahash_request *req, const void *in) | |||
| 1769 | struct device *dev = drvdata_to_dev(ctx->drvdata); | 1769 | struct device *dev = drvdata_to_dev(ctx->drvdata); |
| 1770 | struct ahash_req_ctx *state = ahash_request_ctx(req); | 1770 | struct ahash_req_ctx *state = ahash_request_ctx(req); |
| 1771 | u32 tmp; | 1771 | u32 tmp; |
| 1772 | int rc; | 1772 | int rc = 0; |
| 1773 | 1773 | ||
| 1774 | memcpy(&tmp, in, sizeof(u32)); | 1774 | memcpy(&tmp, in, sizeof(u32)); |
| 1775 | if (tmp != CC_EXPORT_MAGIC) { | 1775 | if (tmp != CC_EXPORT_MAGIC) { |
diff --git a/drivers/staging/pi433/rf69.c b/drivers/staging/pi433/rf69.c index e69a2153c999..12c9df9cddde 100644 --- a/drivers/staging/pi433/rf69.c +++ b/drivers/staging/pi433/rf69.c | |||
| @@ -102,7 +102,7 @@ enum modulation rf69_get_modulation(struct spi_device *spi) | |||
| 102 | 102 | ||
| 103 | currentValue = READ_REG(REG_DATAMODUL); | 103 | currentValue = READ_REG(REG_DATAMODUL); |
| 104 | 104 | ||
| 105 | switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE >> 3) { // TODO improvement: change 3 to define | 105 | switch (currentValue & MASK_DATAMODUL_MODULATION_TYPE) { |
| 106 | case DATAMODUL_MODULATION_TYPE_OOK: return OOK; | 106 | case DATAMODUL_MODULATION_TYPE_OOK: return OOK; |
| 107 | case DATAMODUL_MODULATION_TYPE_FSK: return FSK; | 107 | case DATAMODUL_MODULATION_TYPE_FSK: return FSK; |
| 108 | default: return undefined; | 108 | default: return undefined; |
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 55b198ba629b..78e92d29f8d9 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c | |||
| @@ -555,6 +555,9 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, | |||
| 555 | unsigned iad_num = 0; | 555 | unsigned iad_num = 0; |
| 556 | 556 | ||
| 557 | memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); | 557 | memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); |
| 558 | nintf = nintf_orig = config->desc.bNumInterfaces; | ||
| 559 | config->desc.bNumInterfaces = 0; // Adjusted later | ||
| 560 | |||
| 558 | if (config->desc.bDescriptorType != USB_DT_CONFIG || | 561 | if (config->desc.bDescriptorType != USB_DT_CONFIG || |
| 559 | config->desc.bLength < USB_DT_CONFIG_SIZE || | 562 | config->desc.bLength < USB_DT_CONFIG_SIZE || |
| 560 | config->desc.bLength > size) { | 563 | config->desc.bLength > size) { |
| @@ -568,7 +571,6 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, | |||
| 568 | buffer += config->desc.bLength; | 571 | buffer += config->desc.bLength; |
| 569 | size -= config->desc.bLength; | 572 | size -= config->desc.bLength; |
| 570 | 573 | ||
| 571 | nintf = nintf_orig = config->desc.bNumInterfaces; | ||
| 572 | if (nintf > USB_MAXINTERFACES) { | 574 | if (nintf > USB_MAXINTERFACES) { |
| 573 | dev_warn(ddev, "config %d has too many interfaces: %d, " | 575 | dev_warn(ddev, "config %d has too many interfaces: %d, " |
| 574 | "using maximum allowed: %d\n", | 576 | "using maximum allowed: %d\n", |
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index f66c94130cac..31749c79045f 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h | |||
| @@ -537,6 +537,7 @@ struct dwc2_core_params { | |||
| 537 | * 2 - Internal DMA | 537 | * 2 - Internal DMA |
| 538 | * @power_optimized Are power optimizations enabled? | 538 | * @power_optimized Are power optimizations enabled? |
| 539 | * @num_dev_ep Number of device endpoints available | 539 | * @num_dev_ep Number of device endpoints available |
| 540 | * @num_dev_in_eps Number of device IN endpoints available | ||
| 540 | * @num_dev_perio_in_ep Number of device periodic IN endpoints | 541 | * @num_dev_perio_in_ep Number of device periodic IN endpoints |
| 541 | * available | 542 | * available |
| 542 | * @dev_token_q_depth Device Mode IN Token Sequence Learning Queue | 543 | * @dev_token_q_depth Device Mode IN Token Sequence Learning Queue |
| @@ -565,6 +566,7 @@ struct dwc2_core_params { | |||
| 565 | * 2 - 8 or 16 bits | 566 | * 2 - 8 or 16 bits |
| 566 | * @snpsid: Value from SNPSID register | 567 | * @snpsid: Value from SNPSID register |
| 567 | * @dev_ep_dirs: Direction of device endpoints (GHWCFG1) | 568 | * @dev_ep_dirs: Direction of device endpoints (GHWCFG1) |
| 569 | * @g_tx_fifo_size[] Power-on values of TxFIFO sizes | ||
| 568 | */ | 570 | */ |
| 569 | struct dwc2_hw_params { | 571 | struct dwc2_hw_params { |
| 570 | unsigned op_mode:3; | 572 | unsigned op_mode:3; |
| @@ -586,12 +588,14 @@ struct dwc2_hw_params { | |||
| 586 | unsigned fs_phy_type:2; | 588 | unsigned fs_phy_type:2; |
| 587 | unsigned i2c_enable:1; | 589 | unsigned i2c_enable:1; |
| 588 | unsigned num_dev_ep:4; | 590 | unsigned num_dev_ep:4; |
| 591 | unsigned num_dev_in_eps : 4; | ||
| 589 | unsigned num_dev_perio_in_ep:4; | 592 | unsigned num_dev_perio_in_ep:4; |
| 590 | unsigned total_fifo_size:16; | 593 | unsigned total_fifo_size:16; |
| 591 | unsigned power_optimized:1; | 594 | unsigned power_optimized:1; |
| 592 | unsigned utmi_phy_data_width:2; | 595 | unsigned utmi_phy_data_width:2; |
| 593 | u32 snpsid; | 596 | u32 snpsid; |
| 594 | u32 dev_ep_dirs; | 597 | u32 dev_ep_dirs; |
| 598 | u32 g_tx_fifo_size[MAX_EPS_CHANNELS]; | ||
| 595 | }; | 599 | }; |
| 596 | 600 | ||
| 597 | /* Size of control and EP0 buffers */ | 601 | /* Size of control and EP0 buffers */ |
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 88529d092503..e4c3ce0de5de 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c | |||
| @@ -195,55 +195,18 @@ int dwc2_hsotg_tx_fifo_count(struct dwc2_hsotg *hsotg) | |||
| 195 | { | 195 | { |
| 196 | if (hsotg->hw_params.en_multiple_tx_fifo) | 196 | if (hsotg->hw_params.en_multiple_tx_fifo) |
| 197 | /* In dedicated FIFO mode we need count of IN EPs */ | 197 | /* In dedicated FIFO mode we need count of IN EPs */ |
| 198 | return (dwc2_readl(hsotg->regs + GHWCFG4) & | 198 | return hsotg->hw_params.num_dev_in_eps; |
| 199 | GHWCFG4_NUM_IN_EPS_MASK) >> GHWCFG4_NUM_IN_EPS_SHIFT; | ||
| 200 | else | 199 | else |
| 201 | /* In shared FIFO mode we need count of Periodic IN EPs */ | 200 | /* In shared FIFO mode we need count of Periodic IN EPs */ |
| 202 | return hsotg->hw_params.num_dev_perio_in_ep; | 201 | return hsotg->hw_params.num_dev_perio_in_ep; |
| 203 | } | 202 | } |
| 204 | 203 | ||
| 205 | /** | 204 | /** |
| 206 | * dwc2_hsotg_ep_info_size - return Endpoint Info Control block size in DWORDs | ||
| 207 | */ | ||
| 208 | static int dwc2_hsotg_ep_info_size(struct dwc2_hsotg *hsotg) | ||
| 209 | { | ||
| 210 | int val = 0; | ||
| 211 | int i; | ||
| 212 | u32 ep_dirs; | ||
| 213 | |||
| 214 | /* | ||
| 215 | * Don't need additional space for ep info control registers in | ||
| 216 | * slave mode. | ||
| 217 | */ | ||
| 218 | if (!using_dma(hsotg)) { | ||
| 219 | dev_dbg(hsotg->dev, "Buffer DMA ep info size 0\n"); | ||
| 220 | return 0; | ||
| 221 | } | ||
| 222 | |||
| 223 | /* | ||
| 224 | * Buffer DMA mode - 1 location per endpoit | ||
| 225 | * Descriptor DMA mode - 4 locations per endpoint | ||
| 226 | */ | ||
| 227 | ep_dirs = hsotg->hw_params.dev_ep_dirs; | ||
| 228 | |||
| 229 | for (i = 0; i <= hsotg->hw_params.num_dev_ep; i++) { | ||
| 230 | val += ep_dirs & 3 ? 1 : 2; | ||
| 231 | ep_dirs >>= 2; | ||
| 232 | } | ||
| 233 | |||
| 234 | if (using_desc_dma(hsotg)) | ||
| 235 | val = val * 4; | ||
| 236 | |||
| 237 | return val; | ||
| 238 | } | ||
| 239 | |||
| 240 | /** | ||
| 241 | * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for | 205 | * dwc2_hsotg_tx_fifo_total_depth - return total FIFO depth available for |
| 242 | * device mode TX FIFOs | 206 | * device mode TX FIFOs |
| 243 | */ | 207 | */ |
| 244 | int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg) | 208 | int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg) |
| 245 | { | 209 | { |
| 246 | int ep_info_size; | ||
| 247 | int addr; | 210 | int addr; |
| 248 | int tx_addr_max; | 211 | int tx_addr_max; |
| 249 | u32 np_tx_fifo_size; | 212 | u32 np_tx_fifo_size; |
| @@ -252,8 +215,7 @@ int dwc2_hsotg_tx_fifo_total_depth(struct dwc2_hsotg *hsotg) | |||
| 252 | hsotg->params.g_np_tx_fifo_size); | 215 | hsotg->params.g_np_tx_fifo_size); |
| 253 | 216 | ||
| 254 | /* Get Endpoint Info Control block size in DWORDs. */ | 217 | /* Get Endpoint Info Control block size in DWORDs. */ |
| 255 | ep_info_size = dwc2_hsotg_ep_info_size(hsotg); | 218 | tx_addr_max = hsotg->hw_params.total_fifo_size; |
| 256 | tx_addr_max = hsotg->hw_params.total_fifo_size - ep_info_size; | ||
| 257 | 219 | ||
| 258 | addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size; | 220 | addr = hsotg->params.g_rx_fifo_size + np_tx_fifo_size; |
| 259 | if (tx_addr_max <= addr) | 221 | if (tx_addr_max <= addr) |
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index ef73af6e03a9..03fd20f0b496 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c | |||
| @@ -484,8 +484,7 @@ static void dwc2_check_param_tx_fifo_sizes(struct dwc2_hsotg *hsotg) | |||
| 484 | } | 484 | } |
| 485 | 485 | ||
| 486 | for (fifo = 1; fifo <= fifo_count; fifo++) { | 486 | for (fifo = 1; fifo <= fifo_count; fifo++) { |
| 487 | dptxfszn = (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) & | 487 | dptxfszn = hsotg->hw_params.g_tx_fifo_size[fifo]; |
| 488 | FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT; | ||
| 489 | 488 | ||
| 490 | if (hsotg->params.g_tx_fifo_size[fifo] < min || | 489 | if (hsotg->params.g_tx_fifo_size[fifo] < min || |
| 491 | hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) { | 490 | hsotg->params.g_tx_fifo_size[fifo] > dptxfszn) { |
| @@ -609,6 +608,7 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg) | |||
| 609 | struct dwc2_hw_params *hw = &hsotg->hw_params; | 608 | struct dwc2_hw_params *hw = &hsotg->hw_params; |
| 610 | bool forced; | 609 | bool forced; |
| 611 | u32 gnptxfsiz; | 610 | u32 gnptxfsiz; |
| 611 | int fifo, fifo_count; | ||
| 612 | 612 | ||
| 613 | if (hsotg->dr_mode == USB_DR_MODE_HOST) | 613 | if (hsotg->dr_mode == USB_DR_MODE_HOST) |
| 614 | return; | 614 | return; |
| @@ -617,6 +617,14 @@ static void dwc2_get_dev_hwparams(struct dwc2_hsotg *hsotg) | |||
| 617 | 617 | ||
| 618 | gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); | 618 | gnptxfsiz = dwc2_readl(hsotg->regs + GNPTXFSIZ); |
| 619 | 619 | ||
| 620 | fifo_count = dwc2_hsotg_tx_fifo_count(hsotg); | ||
| 621 | |||
| 622 | for (fifo = 1; fifo <= fifo_count; fifo++) { | ||
| 623 | hw->g_tx_fifo_size[fifo] = | ||
| 624 | (dwc2_readl(hsotg->regs + DPTXFSIZN(fifo)) & | ||
| 625 | FIFOSIZE_DEPTH_MASK) >> FIFOSIZE_DEPTH_SHIFT; | ||
| 626 | } | ||
| 627 | |||
| 620 | if (forced) | 628 | if (forced) |
| 621 | dwc2_clear_force_mode(hsotg); | 629 | dwc2_clear_force_mode(hsotg); |
| 622 | 630 | ||
| @@ -661,14 +669,6 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) | |||
| 661 | hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4); | 669 | hwcfg4 = dwc2_readl(hsotg->regs + GHWCFG4); |
| 662 | grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); | 670 | grxfsiz = dwc2_readl(hsotg->regs + GRXFSIZ); |
| 663 | 671 | ||
| 664 | /* | ||
| 665 | * Host specific hardware parameters. Reading these parameters | ||
| 666 | * requires the controller to be in host mode. The mode will | ||
| 667 | * be forced, if necessary, to read these values. | ||
| 668 | */ | ||
| 669 | dwc2_get_host_hwparams(hsotg); | ||
| 670 | dwc2_get_dev_hwparams(hsotg); | ||
| 671 | |||
| 672 | /* hwcfg1 */ | 672 | /* hwcfg1 */ |
| 673 | hw->dev_ep_dirs = hwcfg1; | 673 | hw->dev_ep_dirs = hwcfg1; |
| 674 | 674 | ||
| @@ -711,6 +711,8 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) | |||
| 711 | hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); | 711 | hw->en_multiple_tx_fifo = !!(hwcfg4 & GHWCFG4_DED_FIFO_EN); |
| 712 | hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> | 712 | hw->num_dev_perio_in_ep = (hwcfg4 & GHWCFG4_NUM_DEV_PERIO_IN_EP_MASK) >> |
| 713 | GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; | 713 | GHWCFG4_NUM_DEV_PERIO_IN_EP_SHIFT; |
| 714 | hw->num_dev_in_eps = (hwcfg4 & GHWCFG4_NUM_IN_EPS_MASK) >> | ||
| 715 | GHWCFG4_NUM_IN_EPS_SHIFT; | ||
| 714 | hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); | 716 | hw->dma_desc_enable = !!(hwcfg4 & GHWCFG4_DESC_DMA); |
| 715 | hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); | 717 | hw->power_optimized = !!(hwcfg4 & GHWCFG4_POWER_OPTIMIZ); |
| 716 | hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> | 718 | hw->utmi_phy_data_width = (hwcfg4 & GHWCFG4_UTMI_PHY_DATA_WIDTH_MASK) >> |
| @@ -719,6 +721,13 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) | |||
| 719 | /* fifo sizes */ | 721 | /* fifo sizes */ |
| 720 | hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> | 722 | hw->rx_fifo_size = (grxfsiz & GRXFSIZ_DEPTH_MASK) >> |
| 721 | GRXFSIZ_DEPTH_SHIFT; | 723 | GRXFSIZ_DEPTH_SHIFT; |
| 724 | /* | ||
| 725 | * Host specific hardware parameters. Reading these parameters | ||
| 726 | * requires the controller to be in host mode. The mode will | ||
| 727 | * be forced, if necessary, to read these values. | ||
| 728 | */ | ||
| 729 | dwc2_get_host_hwparams(hsotg); | ||
| 730 | dwc2_get_dev_hwparams(hsotg); | ||
| 722 | 731 | ||
| 723 | return 0; | 732 | return 0; |
| 724 | } | 733 | } |
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index c4a4d7bd2766..7ae0eefc7cc7 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c | |||
| @@ -51,8 +51,10 @@ static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count) | |||
| 51 | 51 | ||
| 52 | clk = of_clk_get(np, i); | 52 | clk = of_clk_get(np, i); |
| 53 | if (IS_ERR(clk)) { | 53 | if (IS_ERR(clk)) { |
| 54 | while (--i >= 0) | 54 | while (--i >= 0) { |
| 55 | clk_disable_unprepare(simple->clks[i]); | ||
| 55 | clk_put(simple->clks[i]); | 56 | clk_put(simple->clks[i]); |
| 57 | } | ||
| 56 | return PTR_ERR(clk); | 58 | return PTR_ERR(clk); |
| 57 | } | 59 | } |
| 58 | 60 | ||
| @@ -203,6 +205,7 @@ static struct platform_driver dwc3_of_simple_driver = { | |||
| 203 | .driver = { | 205 | .driver = { |
| 204 | .name = "dwc3-of-simple", | 206 | .name = "dwc3-of-simple", |
| 205 | .of_match_table = of_dwc3_simple_match, | 207 | .of_match_table = of_dwc3_simple_match, |
| 208 | .pm = &dwc3_of_simple_dev_pm_ops, | ||
| 206 | }, | 209 | }, |
| 207 | }; | 210 | }; |
| 208 | 211 | ||
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 981fd986cf82..639dd1b163a0 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c | |||
| @@ -259,7 +259,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, | |||
| 259 | { | 259 | { |
| 260 | const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; | 260 | const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; |
| 261 | struct dwc3 *dwc = dep->dwc; | 261 | struct dwc3 *dwc = dep->dwc; |
| 262 | u32 timeout = 500; | 262 | u32 timeout = 1000; |
| 263 | u32 reg; | 263 | u32 reg; |
| 264 | 264 | ||
| 265 | int cmd_status = 0; | 265 | int cmd_status = 0; |
| @@ -912,7 +912,7 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, | |||
| 912 | */ | 912 | */ |
| 913 | if (speed == USB_SPEED_HIGH) { | 913 | if (speed == USB_SPEED_HIGH) { |
| 914 | struct usb_ep *ep = &dep->endpoint; | 914 | struct usb_ep *ep = &dep->endpoint; |
| 915 | unsigned int mult = ep->mult - 1; | 915 | unsigned int mult = 2; |
| 916 | unsigned int maxp = usb_endpoint_maxp(ep->desc); | 916 | unsigned int maxp = usb_endpoint_maxp(ep->desc); |
| 917 | 917 | ||
| 918 | if (length <= (2 * maxp)) | 918 | if (length <= (2 * maxp)) |
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 0a19a76645ad..31cce7805eb2 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig | |||
| @@ -508,8 +508,8 @@ choice | |||
| 508 | controller, and the relevant drivers for each function declared | 508 | controller, and the relevant drivers for each function declared |
| 509 | by the device. | 509 | by the device. |
| 510 | 510 | ||
| 511 | endchoice | ||
| 512 | |||
| 513 | source "drivers/usb/gadget/legacy/Kconfig" | 511 | source "drivers/usb/gadget/legacy/Kconfig" |
| 514 | 512 | ||
| 513 | endchoice | ||
| 514 | |||
| 515 | endif # USB_GADGET | 515 | endif # USB_GADGET |
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig index 9570bbeced4f..784bf86dad4f 100644 --- a/drivers/usb/gadget/legacy/Kconfig +++ b/drivers/usb/gadget/legacy/Kconfig | |||
| @@ -13,14 +13,6 @@ | |||
| 13 | # both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG). | 13 | # both kinds of controller can also support "USB On-the-Go" (CONFIG_USB_OTG). |
| 14 | # | 14 | # |
| 15 | 15 | ||
| 16 | menuconfig USB_GADGET_LEGACY | ||
| 17 | bool "Legacy USB Gadget Support" | ||
| 18 | help | ||
| 19 | Legacy USB gadgets are USB gadgets that do not use the USB gadget | ||
| 20 | configfs interface. | ||
| 21 | |||
| 22 | if USB_GADGET_LEGACY | ||
| 23 | |||
| 24 | config USB_ZERO | 16 | config USB_ZERO |
| 25 | tristate "Gadget Zero (DEVELOPMENT)" | 17 | tristate "Gadget Zero (DEVELOPMENT)" |
| 26 | select USB_LIBCOMPOSITE | 18 | select USB_LIBCOMPOSITE |
| @@ -487,7 +479,7 @@ endif | |||
| 487 | # or video class gadget drivers), or specific hardware, here. | 479 | # or video class gadget drivers), or specific hardware, here. |
| 488 | config USB_G_WEBCAM | 480 | config USB_G_WEBCAM |
| 489 | tristate "USB Webcam Gadget" | 481 | tristate "USB Webcam Gadget" |
| 490 | depends on VIDEO_DEV | 482 | depends on VIDEO_V4L2 |
| 491 | select USB_LIBCOMPOSITE | 483 | select USB_LIBCOMPOSITE |
| 492 | select VIDEOBUF2_VMALLOC | 484 | select VIDEOBUF2_VMALLOC |
| 493 | select USB_F_UVC | 485 | select USB_F_UVC |
| @@ -498,5 +490,3 @@ config USB_G_WEBCAM | |||
| 498 | 490 | ||
| 499 | Say "y" to link the driver statically, or "m" to build a | 491 | Say "y" to link the driver statically, or "m" to build a |
| 500 | dynamically linked module called "g_webcam". | 492 | dynamically linked module called "g_webcam". |
| 501 | |||
| 502 | endif | ||
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 15f7d422885f..3a29b32a3bd0 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
| @@ -971,10 +971,9 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
| 971 | return 0; | 971 | return 0; |
| 972 | } | 972 | } |
| 973 | 973 | ||
| 974 | xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); | 974 | dev = kzalloc(sizeof(*dev), flags); |
| 975 | if (!xhci->devs[slot_id]) | 975 | if (!dev) |
| 976 | return 0; | 976 | return 0; |
| 977 | dev = xhci->devs[slot_id]; | ||
| 978 | 977 | ||
| 979 | /* Allocate the (output) device context that will be used in the HC. */ | 978 | /* Allocate the (output) device context that will be used in the HC. */ |
| 980 | dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); | 979 | dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); |
| @@ -1015,9 +1014,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |||
| 1015 | 1014 | ||
| 1016 | trace_xhci_alloc_virt_device(dev); | 1015 | trace_xhci_alloc_virt_device(dev); |
| 1017 | 1016 | ||
| 1017 | xhci->devs[slot_id] = dev; | ||
| 1018 | |||
| 1018 | return 1; | 1019 | return 1; |
| 1019 | fail: | 1020 | fail: |
| 1020 | xhci_free_virt_device(xhci, slot_id); | 1021 | |
| 1022 | if (dev->in_ctx) | ||
| 1023 | xhci_free_container_ctx(xhci, dev->in_ctx); | ||
| 1024 | if (dev->out_ctx) | ||
| 1025 | xhci_free_container_ctx(xhci, dev->out_ctx); | ||
| 1026 | kfree(dev); | ||
| 1027 | |||
| 1021 | return 0; | 1028 | return 0; |
| 1022 | } | 1029 | } |
| 1023 | 1030 | ||
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 6eb87c6e4d24..c5cbc685c691 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
| @@ -3112,7 +3112,7 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, | |||
| 3112 | { | 3112 | { |
| 3113 | u32 maxp, total_packet_count; | 3113 | u32 maxp, total_packet_count; |
| 3114 | 3114 | ||
| 3115 | /* MTK xHCI is mostly 0.97 but contains some features from 1.0 */ | 3115 | /* MTK xHCI 0.96 contains some features from 1.0 */ |
| 3116 | if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) | 3116 | if (xhci->hci_version < 0x100 && !(xhci->quirks & XHCI_MTK_HOST)) |
| 3117 | return ((td_total_len - transferred) >> 10); | 3117 | return ((td_total_len - transferred) >> 10); |
| 3118 | 3118 | ||
| @@ -3121,8 +3121,8 @@ static u32 xhci_td_remainder(struct xhci_hcd *xhci, int transferred, | |||
| 3121 | trb_buff_len == td_total_len) | 3121 | trb_buff_len == td_total_len) |
| 3122 | return 0; | 3122 | return 0; |
| 3123 | 3123 | ||
| 3124 | /* for MTK xHCI, TD size doesn't include this TRB */ | 3124 | /* for MTK xHCI 0.96, TD size include this TRB, but not in 1.x */ |
| 3125 | if (xhci->quirks & XHCI_MTK_HOST) | 3125 | if ((xhci->quirks & XHCI_MTK_HOST) && (xhci->hci_version < 0x100)) |
| 3126 | trb_buff_len = 0; | 3126 | trb_buff_len = 0; |
| 3127 | 3127 | ||
| 3128 | maxp = usb_endpoint_maxp(&urb->ep->desc); | 3128 | maxp = usb_endpoint_maxp(&urb->ep->desc); |
diff --git a/drivers/usb/musb/da8xx.c b/drivers/usb/musb/da8xx.c index 0397606a211b..6c036de63272 100644 --- a/drivers/usb/musb/da8xx.c +++ b/drivers/usb/musb/da8xx.c | |||
| @@ -284,7 +284,15 @@ static irqreturn_t da8xx_musb_interrupt(int irq, void *hci) | |||
| 284 | musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; | 284 | musb->xceiv->otg->state = OTG_STATE_A_WAIT_VRISE; |
| 285 | portstate(musb->port1_status |= USB_PORT_STAT_POWER); | 285 | portstate(musb->port1_status |= USB_PORT_STAT_POWER); |
| 286 | del_timer(&musb->dev_timer); | 286 | del_timer(&musb->dev_timer); |
| 287 | } else { | 287 | } else if (!(musb->int_usb & MUSB_INTR_BABBLE)) { |
| 288 | /* | ||
| 289 | * When babble condition happens, drvvbus interrupt | ||
| 290 | * is also generated. Ignore this drvvbus interrupt | ||
| 291 | * and let babble interrupt handler recovers the | ||
| 292 | * controller; otherwise, the host-mode flag is lost | ||
| 293 | * due to the MUSB_DEV_MODE() call below and babble | ||
| 294 | * recovery logic will not be called. | ||
| 295 | */ | ||
| 288 | musb->is_active = 0; | 296 | musb->is_active = 0; |
| 289 | MUSB_DEV_MODE(musb); | 297 | MUSB_DEV_MODE(musb); |
| 290 | otg->default_a = 0; | 298 | otg->default_a = 0; |
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 2968046e7c05..f72d045ee9ef 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h | |||
| @@ -2100,6 +2100,13 @@ UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0116, | |||
| 2100 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 2100 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
| 2101 | US_FL_BROKEN_FUA ), | 2101 | US_FL_BROKEN_FUA ), |
| 2102 | 2102 | ||
| 2103 | /* Reported by David Kozub <zub@linux.fjfi.cvut.cz> */ | ||
| 2104 | UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, | ||
| 2105 | "JMicron", | ||
| 2106 | "JMS567", | ||
| 2107 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
| 2108 | US_FL_BROKEN_FUA), | ||
| 2109 | |||
| 2103 | /* | 2110 | /* |
| 2104 | * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br> | 2111 | * Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br> |
| 2105 | * JMicron responds to USN and several other SCSI ioctls with a | 2112 | * JMicron responds to USN and several other SCSI ioctls with a |
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index d520374a824e..e6127fb21c12 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h | |||
| @@ -129,6 +129,13 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, | |||
| 129 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | 129 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, |
| 130 | US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES), | 130 | US_FL_BROKEN_FUA | US_FL_NO_REPORT_OPCODES), |
| 131 | 131 | ||
| 132 | /* Reported-by: David Kozub <zub@linux.fjfi.cvut.cz> */ | ||
| 133 | UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999, | ||
| 134 | "JMicron", | ||
| 135 | "JMS567", | ||
| 136 | USB_SC_DEVICE, USB_PR_DEVICE, NULL, | ||
| 137 | US_FL_BROKEN_FUA), | ||
| 138 | |||
| 132 | /* Reported-by: Hans de Goede <hdegoede@redhat.com> */ | 139 | /* Reported-by: Hans de Goede <hdegoede@redhat.com> */ |
| 133 | UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, | 140 | UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, |
| 134 | "VIA", | 141 | "VIA", |
diff --git a/drivers/usb/usbip/stub_rx.c b/drivers/usb/usbip/stub_rx.c index 536e037f541f..493ac2928391 100644 --- a/drivers/usb/usbip/stub_rx.c +++ b/drivers/usb/usbip/stub_rx.c | |||
| @@ -322,23 +322,34 @@ static struct stub_priv *stub_priv_alloc(struct stub_device *sdev, | |||
| 322 | return priv; | 322 | return priv; |
| 323 | } | 323 | } |
| 324 | 324 | ||
| 325 | static int get_pipe(struct stub_device *sdev, int epnum, int dir) | 325 | static int get_pipe(struct stub_device *sdev, struct usbip_header *pdu) |
| 326 | { | 326 | { |
| 327 | struct usb_device *udev = sdev->udev; | 327 | struct usb_device *udev = sdev->udev; |
| 328 | struct usb_host_endpoint *ep; | 328 | struct usb_host_endpoint *ep; |
| 329 | struct usb_endpoint_descriptor *epd = NULL; | 329 | struct usb_endpoint_descriptor *epd = NULL; |
| 330 | int epnum = pdu->base.ep; | ||
| 331 | int dir = pdu->base.direction; | ||
| 332 | |||
| 333 | if (epnum < 0 || epnum > 15) | ||
| 334 | goto err_ret; | ||
| 330 | 335 | ||
| 331 | if (dir == USBIP_DIR_IN) | 336 | if (dir == USBIP_DIR_IN) |
| 332 | ep = udev->ep_in[epnum & 0x7f]; | 337 | ep = udev->ep_in[epnum & 0x7f]; |
| 333 | else | 338 | else |
| 334 | ep = udev->ep_out[epnum & 0x7f]; | 339 | ep = udev->ep_out[epnum & 0x7f]; |
| 335 | if (!ep) { | 340 | if (!ep) |
| 336 | dev_err(&sdev->udev->dev, "no such endpoint?, %d\n", | 341 | goto err_ret; |
| 337 | epnum); | ||
| 338 | BUG(); | ||
| 339 | } | ||
| 340 | 342 | ||
| 341 | epd = &ep->desc; | 343 | epd = &ep->desc; |
| 344 | |||
| 345 | /* validate transfer_buffer_length */ | ||
| 346 | if (pdu->u.cmd_submit.transfer_buffer_length > INT_MAX) { | ||
| 347 | dev_err(&sdev->udev->dev, | ||
| 348 | "CMD_SUBMIT: -EMSGSIZE transfer_buffer_length %d\n", | ||
| 349 | pdu->u.cmd_submit.transfer_buffer_length); | ||
| 350 | return -1; | ||
| 351 | } | ||
| 352 | |||
| 342 | if (usb_endpoint_xfer_control(epd)) { | 353 | if (usb_endpoint_xfer_control(epd)) { |
| 343 | if (dir == USBIP_DIR_OUT) | 354 | if (dir == USBIP_DIR_OUT) |
| 344 | return usb_sndctrlpipe(udev, epnum); | 355 | return usb_sndctrlpipe(udev, epnum); |
| @@ -361,15 +372,31 @@ static int get_pipe(struct stub_device *sdev, int epnum, int dir) | |||
| 361 | } | 372 | } |
| 362 | 373 | ||
| 363 | if (usb_endpoint_xfer_isoc(epd)) { | 374 | if (usb_endpoint_xfer_isoc(epd)) { |
| 375 | /* validate packet size and number of packets */ | ||
| 376 | unsigned int maxp, packets, bytes; | ||
| 377 | |||
| 378 | maxp = usb_endpoint_maxp(epd); | ||
| 379 | maxp *= usb_endpoint_maxp_mult(epd); | ||
| 380 | bytes = pdu->u.cmd_submit.transfer_buffer_length; | ||
| 381 | packets = DIV_ROUND_UP(bytes, maxp); | ||
| 382 | |||
| 383 | if (pdu->u.cmd_submit.number_of_packets < 0 || | ||
| 384 | pdu->u.cmd_submit.number_of_packets > packets) { | ||
| 385 | dev_err(&sdev->udev->dev, | ||
| 386 | "CMD_SUBMIT: isoc invalid num packets %d\n", | ||
| 387 | pdu->u.cmd_submit.number_of_packets); | ||
| 388 | return -1; | ||
| 389 | } | ||
| 364 | if (dir == USBIP_DIR_OUT) | 390 | if (dir == USBIP_DIR_OUT) |
| 365 | return usb_sndisocpipe(udev, epnum); | 391 | return usb_sndisocpipe(udev, epnum); |
| 366 | else | 392 | else |
| 367 | return usb_rcvisocpipe(udev, epnum); | 393 | return usb_rcvisocpipe(udev, epnum); |
| 368 | } | 394 | } |
| 369 | 395 | ||
| 396 | err_ret: | ||
| 370 | /* NOT REACHED */ | 397 | /* NOT REACHED */ |
| 371 | dev_err(&sdev->udev->dev, "get pipe, epnum %d\n", epnum); | 398 | dev_err(&sdev->udev->dev, "CMD_SUBMIT: invalid epnum %d\n", epnum); |
| 372 | return 0; | 399 | return -1; |
| 373 | } | 400 | } |
| 374 | 401 | ||
| 375 | static void masking_bogus_flags(struct urb *urb) | 402 | static void masking_bogus_flags(struct urb *urb) |
| @@ -433,7 +460,10 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, | |||
| 433 | struct stub_priv *priv; | 460 | struct stub_priv *priv; |
| 434 | struct usbip_device *ud = &sdev->ud; | 461 | struct usbip_device *ud = &sdev->ud; |
| 435 | struct usb_device *udev = sdev->udev; | 462 | struct usb_device *udev = sdev->udev; |
| 436 | int pipe = get_pipe(sdev, pdu->base.ep, pdu->base.direction); | 463 | int pipe = get_pipe(sdev, pdu); |
| 464 | |||
| 465 | if (pipe == -1) | ||
| 466 | return; | ||
| 437 | 467 | ||
| 438 | priv = stub_priv_alloc(sdev, pdu); | 468 | priv = stub_priv_alloc(sdev, pdu); |
| 439 | if (!priv) | 469 | if (!priv) |
| @@ -452,7 +482,8 @@ static void stub_recv_cmd_submit(struct stub_device *sdev, | |||
| 452 | } | 482 | } |
| 453 | 483 | ||
| 454 | /* allocate urb transfer buffer, if needed */ | 484 | /* allocate urb transfer buffer, if needed */ |
| 455 | if (pdu->u.cmd_submit.transfer_buffer_length > 0) { | 485 | if (pdu->u.cmd_submit.transfer_buffer_length > 0 && |
| 486 | pdu->u.cmd_submit.transfer_buffer_length <= INT_MAX) { | ||
| 456 | priv->urb->transfer_buffer = | 487 | priv->urb->transfer_buffer = |
| 457 | kzalloc(pdu->u.cmd_submit.transfer_buffer_length, | 488 | kzalloc(pdu->u.cmd_submit.transfer_buffer_length, |
| 458 | GFP_KERNEL); | 489 | GFP_KERNEL); |
diff --git a/drivers/usb/usbip/stub_tx.c b/drivers/usb/usbip/stub_tx.c index b18bce96c212..53172b1f6257 100644 --- a/drivers/usb/usbip/stub_tx.c +++ b/drivers/usb/usbip/stub_tx.c | |||
| @@ -167,6 +167,13 @@ static int stub_send_ret_submit(struct stub_device *sdev) | |||
| 167 | memset(&pdu_header, 0, sizeof(pdu_header)); | 167 | memset(&pdu_header, 0, sizeof(pdu_header)); |
| 168 | memset(&msg, 0, sizeof(msg)); | 168 | memset(&msg, 0, sizeof(msg)); |
| 169 | 169 | ||
| 170 | if (urb->actual_length > 0 && !urb->transfer_buffer) { | ||
| 171 | dev_err(&sdev->udev->dev, | ||
| 172 | "urb: actual_length %d transfer_buffer null\n", | ||
| 173 | urb->actual_length); | ||
| 174 | return -1; | ||
| 175 | } | ||
| 176 | |||
| 170 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) | 177 | if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS) |
| 171 | iovnum = 2 + urb->number_of_packets; | 178 | iovnum = 2 + urb->number_of_packets; |
| 172 | else | 179 | else |
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h index e5de35c8c505..473fb8a87289 100644 --- a/drivers/usb/usbip/usbip_common.h +++ b/drivers/usb/usbip/usbip_common.h | |||
| @@ -256,6 +256,7 @@ struct usbip_device { | |||
| 256 | /* lock for status */ | 256 | /* lock for status */ |
| 257 | spinlock_t lock; | 257 | spinlock_t lock; |
| 258 | 258 | ||
| 259 | int sockfd; | ||
| 259 | struct socket *tcp_socket; | 260 | struct socket *tcp_socket; |
| 260 | 261 | ||
| 261 | struct task_struct *tcp_rx; | 262 | struct task_struct *tcp_rx; |
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c index e78f7472cac4..091f76b7196d 100644 --- a/drivers/usb/usbip/vhci_sysfs.c +++ b/drivers/usb/usbip/vhci_sysfs.c | |||
| @@ -17,15 +17,20 @@ | |||
| 17 | 17 | ||
| 18 | /* | 18 | /* |
| 19 | * output example: | 19 | * output example: |
| 20 | * hub port sta spd dev socket local_busid | 20 | * hub port sta spd dev sockfd local_busid |
| 21 | * hs 0000 004 000 00000000 c5a7bb80 1-2.3 | 21 | * hs 0000 004 000 00000000 3 1-2.3 |
| 22 | * ................................................ | 22 | * ................................................ |
| 23 | * ss 0008 004 000 00000000 d8cee980 2-3.4 | 23 | * ss 0008 004 000 00000000 4 2-3.4 |
| 24 | * ................................................ | 24 | * ................................................ |
| 25 | * | 25 | * |
| 26 | * IP address can be retrieved from a socket pointer address by looking | 26 | * Output includes socket fd instead of socket pointer address to avoid |
| 27 | * up /proc/net/{tcp,tcp6}. Also, a userland program may remember a | 27 | * leaking kernel memory address in: |
| 28 | * port number and its peer IP address. | 28 | * /sys/devices/platform/vhci_hcd.0/status and in debug output. |
| 29 | * The socket pointer address is not used at the moment and it was made | ||
| 30 | * visible as a convenient way to find IP address from socket pointer | ||
| 31 | * address by looking up /proc/net/{tcp,tcp6}. As this opens a security | ||
| 32 | * hole, the change is made to use sockfd instead. | ||
| 33 | * | ||
| 29 | */ | 34 | */ |
| 30 | static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev) | 35 | static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vdev) |
| 31 | { | 36 | { |
| @@ -39,8 +44,8 @@ static void port_show_vhci(char **out, int hub, int port, struct vhci_device *vd | |||
| 39 | if (vdev->ud.status == VDEV_ST_USED) { | 44 | if (vdev->ud.status == VDEV_ST_USED) { |
| 40 | *out += sprintf(*out, "%03u %08x ", | 45 | *out += sprintf(*out, "%03u %08x ", |
| 41 | vdev->speed, vdev->devid); | 46 | vdev->speed, vdev->devid); |
| 42 | *out += sprintf(*out, "%16p %s", | 47 | *out += sprintf(*out, "%u %s", |
| 43 | vdev->ud.tcp_socket, | 48 | vdev->ud.sockfd, |
| 44 | dev_name(&vdev->udev->dev)); | 49 | dev_name(&vdev->udev->dev)); |
| 45 | 50 | ||
| 46 | } else { | 51 | } else { |
| @@ -160,7 +165,8 @@ static ssize_t nports_show(struct device *dev, struct device_attribute *attr, | |||
| 160 | char *s = out; | 165 | char *s = out; |
| 161 | 166 | ||
| 162 | /* | 167 | /* |
| 163 | * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, thus the * 2. | 168 | * Half the ports are for SPEED_HIGH and half for SPEED_SUPER, |
| 169 | * thus the * 2. | ||
| 164 | */ | 170 | */ |
| 165 | out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers); | 171 | out += sprintf(out, "%d\n", VHCI_PORTS * vhci_num_controllers); |
| 166 | return out - s; | 172 | return out - s; |
| @@ -366,6 +372,7 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr, | |||
| 366 | 372 | ||
| 367 | vdev->devid = devid; | 373 | vdev->devid = devid; |
| 368 | vdev->speed = speed; | 374 | vdev->speed = speed; |
| 375 | vdev->ud.sockfd = sockfd; | ||
| 369 | vdev->ud.tcp_socket = socket; | 376 | vdev->ud.tcp_socket = socket; |
| 370 | vdev->ud.status = VDEV_ST_NOTASSIGNED; | 377 | vdev->ud.status = VDEV_ST_NOTASSIGNED; |
| 371 | 378 | ||
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index a9192fe4f345..c92131edfaba 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c | |||
| @@ -522,10 +522,8 @@ static int virtio_mmio_probe(struct platform_device *pdev) | |||
| 522 | return -EBUSY; | 522 | return -EBUSY; |
| 523 | 523 | ||
| 524 | vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); | 524 | vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); |
| 525 | if (!vm_dev) { | 525 | if (!vm_dev) |
| 526 | rc = -ENOMEM; | 526 | return -ENOMEM; |
| 527 | goto free_mem; | ||
| 528 | } | ||
| 529 | 527 | ||
| 530 | vm_dev->vdev.dev.parent = &pdev->dev; | 528 | vm_dev->vdev.dev.parent = &pdev->dev; |
| 531 | vm_dev->vdev.dev.release = virtio_mmio_release_dev; | 529 | vm_dev->vdev.dev.release = virtio_mmio_release_dev; |
| @@ -535,17 +533,14 @@ static int virtio_mmio_probe(struct platform_device *pdev) | |||
| 535 | spin_lock_init(&vm_dev->lock); | 533 | spin_lock_init(&vm_dev->lock); |
| 536 | 534 | ||
| 537 | vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); | 535 | vm_dev->base = devm_ioremap(&pdev->dev, mem->start, resource_size(mem)); |
| 538 | if (vm_dev->base == NULL) { | 536 | if (vm_dev->base == NULL) |
| 539 | rc = -EFAULT; | 537 | return -EFAULT; |
| 540 | goto free_vmdev; | ||
| 541 | } | ||
| 542 | 538 | ||
| 543 | /* Check magic value */ | 539 | /* Check magic value */ |
| 544 | magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); | 540 | magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); |
| 545 | if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) { | 541 | if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) { |
| 546 | dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); | 542 | dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); |
| 547 | rc = -ENODEV; | 543 | return -ENODEV; |
| 548 | goto unmap; | ||
| 549 | } | 544 | } |
| 550 | 545 | ||
| 551 | /* Check device version */ | 546 | /* Check device version */ |
| @@ -553,8 +548,7 @@ static int virtio_mmio_probe(struct platform_device *pdev) | |||
| 553 | if (vm_dev->version < 1 || vm_dev->version > 2) { | 548 | if (vm_dev->version < 1 || vm_dev->version > 2) { |
| 554 | dev_err(&pdev->dev, "Version %ld not supported!\n", | 549 | dev_err(&pdev->dev, "Version %ld not supported!\n", |
| 555 | vm_dev->version); | 550 | vm_dev->version); |
| 556 | rc = -ENXIO; | 551 | return -ENXIO; |
| 557 | goto unmap; | ||
| 558 | } | 552 | } |
| 559 | 553 | ||
| 560 | vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); | 554 | vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); |
| @@ -563,8 +557,7 @@ static int virtio_mmio_probe(struct platform_device *pdev) | |||
| 563 | * virtio-mmio device with an ID 0 is a (dummy) placeholder | 557 | * virtio-mmio device with an ID 0 is a (dummy) placeholder |
| 564 | * with no function. End probing now with no error reported. | 558 | * with no function. End probing now with no error reported. |
| 565 | */ | 559 | */ |
| 566 | rc = -ENODEV; | 560 | return -ENODEV; |
| 567 | goto unmap; | ||
| 568 | } | 561 | } |
| 569 | vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); | 562 | vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); |
| 570 | 563 | ||
| @@ -590,33 +583,15 @@ static int virtio_mmio_probe(struct platform_device *pdev) | |||
| 590 | platform_set_drvdata(pdev, vm_dev); | 583 | platform_set_drvdata(pdev, vm_dev); |
| 591 | 584 | ||
| 592 | rc = register_virtio_device(&vm_dev->vdev); | 585 | rc = register_virtio_device(&vm_dev->vdev); |
| 593 | if (rc) { | 586 | if (rc) |
| 594 | iounmap(vm_dev->base); | ||
| 595 | devm_release_mem_region(&pdev->dev, mem->start, | ||
| 596 | resource_size(mem)); | ||
| 597 | put_device(&vm_dev->vdev.dev); | 587 | put_device(&vm_dev->vdev.dev); |
| 598 | } | 588 | |
| 599 | return rc; | ||
| 600 | unmap: | ||
| 601 | iounmap(vm_dev->base); | ||
| 602 | free_mem: | ||
| 603 | devm_release_mem_region(&pdev->dev, mem->start, | ||
| 604 | resource_size(mem)); | ||
| 605 | free_vmdev: | ||
| 606 | devm_kfree(&pdev->dev, vm_dev); | ||
| 607 | return rc; | 589 | return rc; |
| 608 | } | 590 | } |
| 609 | 591 | ||
| 610 | static int virtio_mmio_remove(struct platform_device *pdev) | 592 | static int virtio_mmio_remove(struct platform_device *pdev) |
| 611 | { | 593 | { |
| 612 | struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); | 594 | struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev); |
| 613 | struct resource *mem; | ||
| 614 | |||
| 615 | iounmap(vm_dev->base); | ||
| 616 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
| 617 | if (mem) | ||
| 618 | devm_release_mem_region(&pdev->dev, mem->start, | ||
| 619 | resource_size(mem)); | ||
| 620 | unregister_virtio_device(&vm_dev->vdev); | 595 | unregister_virtio_device(&vm_dev->vdev); |
| 621 | 596 | ||
| 622 | return 0; | 597 | return 0; |
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index d8dd54678ab7..e5d0c28372ea 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig | |||
| @@ -269,7 +269,7 @@ config XEN_ACPI_HOTPLUG_CPU | |||
| 269 | 269 | ||
| 270 | config XEN_ACPI_PROCESSOR | 270 | config XEN_ACPI_PROCESSOR |
| 271 | tristate "Xen ACPI processor" | 271 | tristate "Xen ACPI processor" |
| 272 | depends on XEN && X86 && ACPI_PROCESSOR && CPU_FREQ | 272 | depends on XEN && XEN_DOM0 && X86 && ACPI_PROCESSOR && CPU_FREQ |
| 273 | default m | 273 | default m |
| 274 | help | 274 | help |
| 275 | This ACPI processor uploads Power Management information to the Xen | 275 | This ACPI processor uploads Power Management information to the Xen |
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 8fc41705c7cd..961a12dc6dc8 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c | |||
| @@ -170,7 +170,6 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi, | |||
| 170 | 170 | ||
| 171 | mutex_unlock(&sbi->wq_mutex); | 171 | mutex_unlock(&sbi->wq_mutex); |
| 172 | 172 | ||
| 173 | if (autofs4_write(sbi, pipe, &pkt, pktsz)) | ||
| 174 | switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) { | 173 | switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) { |
| 175 | case 0: | 174 | case 0: |
| 176 | break; | 175 | break; |
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index ab69dcb70e8a..1b468250e947 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c | |||
| @@ -1440,6 +1440,29 @@ static int __close_session(struct ceph_mds_client *mdsc, | |||
| 1440 | return request_close_session(mdsc, session); | 1440 | return request_close_session(mdsc, session); |
| 1441 | } | 1441 | } |
| 1442 | 1442 | ||
| 1443 | static bool drop_negative_children(struct dentry *dentry) | ||
| 1444 | { | ||
| 1445 | struct dentry *child; | ||
| 1446 | bool all_negative = true; | ||
| 1447 | |||
| 1448 | if (!d_is_dir(dentry)) | ||
| 1449 | goto out; | ||
| 1450 | |||
| 1451 | spin_lock(&dentry->d_lock); | ||
| 1452 | list_for_each_entry(child, &dentry->d_subdirs, d_child) { | ||
| 1453 | if (d_really_is_positive(child)) { | ||
| 1454 | all_negative = false; | ||
| 1455 | break; | ||
| 1456 | } | ||
| 1457 | } | ||
| 1458 | spin_unlock(&dentry->d_lock); | ||
| 1459 | |||
| 1460 | if (all_negative) | ||
| 1461 | shrink_dcache_parent(dentry); | ||
| 1462 | out: | ||
| 1463 | return all_negative; | ||
| 1464 | } | ||
| 1465 | |||
| 1443 | /* | 1466 | /* |
| 1444 | * Trim old(er) caps. | 1467 | * Trim old(er) caps. |
| 1445 | * | 1468 | * |
| @@ -1490,16 +1513,27 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) | |||
| 1490 | if ((used | wanted) & ~oissued & mine) | 1513 | if ((used | wanted) & ~oissued & mine) |
| 1491 | goto out; /* we need these caps */ | 1514 | goto out; /* we need these caps */ |
| 1492 | 1515 | ||
| 1493 | session->s_trim_caps--; | ||
| 1494 | if (oissued) { | 1516 | if (oissued) { |
| 1495 | /* we aren't the only cap.. just remove us */ | 1517 | /* we aren't the only cap.. just remove us */ |
| 1496 | __ceph_remove_cap(cap, true); | 1518 | __ceph_remove_cap(cap, true); |
| 1519 | session->s_trim_caps--; | ||
| 1497 | } else { | 1520 | } else { |
| 1521 | struct dentry *dentry; | ||
| 1498 | /* try dropping referring dentries */ | 1522 | /* try dropping referring dentries */ |
| 1499 | spin_unlock(&ci->i_ceph_lock); | 1523 | spin_unlock(&ci->i_ceph_lock); |
| 1500 | d_prune_aliases(inode); | 1524 | dentry = d_find_any_alias(inode); |
| 1501 | dout("trim_caps_cb %p cap %p pruned, count now %d\n", | 1525 | if (dentry && drop_negative_children(dentry)) { |
| 1502 | inode, cap, atomic_read(&inode->i_count)); | 1526 | int count; |
| 1527 | dput(dentry); | ||
| 1528 | d_prune_aliases(inode); | ||
| 1529 | count = atomic_read(&inode->i_count); | ||
| 1530 | if (count == 1) | ||
| 1531 | session->s_trim_caps--; | ||
| 1532 | dout("trim_caps_cb %p cap %p pruned, count now %d\n", | ||
| 1533 | inode, cap, count); | ||
| 1534 | } else { | ||
| 1535 | dput(dentry); | ||
| 1536 | } | ||
| 1503 | return 0; | 1537 | return 0; |
| 1504 | } | 1538 | } |
| 1505 | 1539 | ||
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index e06740436b92..ed88ab8a4774 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c | |||
| @@ -1406,7 +1406,8 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses, | |||
| 1406 | } while (rc == -EAGAIN); | 1406 | } while (rc == -EAGAIN); |
| 1407 | 1407 | ||
| 1408 | if (rc) { | 1408 | if (rc) { |
| 1409 | cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc); | 1409 | if (rc != -ENOENT) |
| 1410 | cifs_dbg(VFS, "ioctl error in smb2_get_dfs_refer rc=%d\n", rc); | ||
| 1410 | goto out; | 1411 | goto out; |
| 1411 | } | 1412 | } |
| 1412 | 1413 | ||
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c index 5331631386a2..01346b8b6edb 100644 --- a/fs/cifs/smb2pdu.c +++ b/fs/cifs/smb2pdu.c | |||
| @@ -2678,27 +2678,27 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, | |||
| 2678 | cifs_small_buf_release(req); | 2678 | cifs_small_buf_release(req); |
| 2679 | 2679 | ||
| 2680 | rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; | 2680 | rsp = (struct smb2_read_rsp *)rsp_iov.iov_base; |
| 2681 | shdr = get_sync_hdr(rsp); | ||
| 2682 | 2681 | ||
| 2683 | if (shdr->Status == STATUS_END_OF_FILE) { | 2682 | if (rc) { |
| 2683 | if (rc != -ENODATA) { | ||
| 2684 | cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); | ||
| 2685 | cifs_dbg(VFS, "Send error in read = %d\n", rc); | ||
| 2686 | } | ||
| 2684 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); | 2687 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); |
| 2685 | return 0; | 2688 | return rc == -ENODATA ? 0 : rc; |
| 2686 | } | 2689 | } |
| 2687 | 2690 | ||
| 2688 | if (rc) { | 2691 | *nbytes = le32_to_cpu(rsp->DataLength); |
| 2689 | cifs_stats_fail_inc(io_parms->tcon, SMB2_READ_HE); | 2692 | if ((*nbytes > CIFS_MAX_MSGSIZE) || |
| 2690 | cifs_dbg(VFS, "Send error in read = %d\n", rc); | 2693 | (*nbytes > io_parms->length)) { |
| 2691 | } else { | 2694 | cifs_dbg(FYI, "bad length %d for count %d\n", |
| 2692 | *nbytes = le32_to_cpu(rsp->DataLength); | 2695 | *nbytes, io_parms->length); |
| 2693 | if ((*nbytes > CIFS_MAX_MSGSIZE) || | 2696 | rc = -EIO; |
| 2694 | (*nbytes > io_parms->length)) { | 2697 | *nbytes = 0; |
| 2695 | cifs_dbg(FYI, "bad length %d for count %d\n", | ||
| 2696 | *nbytes, io_parms->length); | ||
| 2697 | rc = -EIO; | ||
| 2698 | *nbytes = 0; | ||
| 2699 | } | ||
| 2700 | } | 2698 | } |
| 2701 | 2699 | ||
| 2700 | shdr = get_sync_hdr(rsp); | ||
| 2701 | |||
| 2702 | if (*buf) { | 2702 | if (*buf) { |
| 2703 | memcpy(*buf, (char *)shdr + rsp->DataOffset, *nbytes); | 2703 | memcpy(*buf, (char *)shdr + rsp->DataOffset, *nbytes); |
| 2704 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); | 2704 | free_rsp_buf(resp_buftype, rsp_iov.iov_base); |
diff --git a/fs/cramfs/Kconfig b/fs/cramfs/Kconfig index f937082f3244..58e2fe40b2a0 100644 --- a/fs/cramfs/Kconfig +++ b/fs/cramfs/Kconfig | |||
| @@ -34,6 +34,7 @@ config CRAMFS_BLOCKDEV | |||
| 34 | config CRAMFS_MTD | 34 | config CRAMFS_MTD |
| 35 | bool "Support CramFs image directly mapped in physical memory" | 35 | bool "Support CramFs image directly mapped in physical memory" |
| 36 | depends on CRAMFS && MTD | 36 | depends on CRAMFS && MTD |
| 37 | depends on CRAMFS=m || MTD=y | ||
| 37 | default y if !CRAMFS_BLOCKDEV | 38 | default y if !CRAMFS_BLOCKDEV |
| 38 | help | 39 | help |
| 39 | This option allows the CramFs driver to load data directly from | 40 | This option allows the CramFs driver to load data directly from |
| @@ -627,8 +627,7 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping, | |||
| 627 | 627 | ||
| 628 | if (pfn != pmd_pfn(*pmdp)) | 628 | if (pfn != pmd_pfn(*pmdp)) |
| 629 | goto unlock_pmd; | 629 | goto unlock_pmd; |
| 630 | if (!pmd_dirty(*pmdp) | 630 | if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp)) |
| 631 | && !pmd_access_permitted(*pmdp, WRITE)) | ||
| 632 | goto unlock_pmd; | 631 | goto unlock_pmd; |
| 633 | 632 | ||
| 634 | flush_cache_page(vma, address, pfn); | 633 | flush_cache_page(vma, address, pfn); |
| @@ -1216,15 +1216,14 @@ killed: | |||
| 1216 | return -EAGAIN; | 1216 | return -EAGAIN; |
| 1217 | } | 1217 | } |
| 1218 | 1218 | ||
| 1219 | char *get_task_comm(char *buf, struct task_struct *tsk) | 1219 | char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk) |
| 1220 | { | 1220 | { |
| 1221 | /* buf must be at least sizeof(tsk->comm) in size */ | ||
| 1222 | task_lock(tsk); | 1221 | task_lock(tsk); |
| 1223 | strncpy(buf, tsk->comm, sizeof(tsk->comm)); | 1222 | strncpy(buf, tsk->comm, buf_size); |
| 1224 | task_unlock(tsk); | 1223 | task_unlock(tsk); |
| 1225 | return buf; | 1224 | return buf; |
| 1226 | } | 1225 | } |
| 1227 | EXPORT_SYMBOL_GPL(get_task_comm); | 1226 | EXPORT_SYMBOL_GPL(__get_task_comm); |
| 1228 | 1227 | ||
| 1229 | /* | 1228 | /* |
| 1230 | * These functions flushes out all traces of the currently running executable | 1229 | * These functions flushes out all traces of the currently running executable |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 07bca11749d4..c941251ac0c0 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
| @@ -4722,6 +4722,7 @@ retry: | |||
| 4722 | EXT4_INODE_EOFBLOCKS); | 4722 | EXT4_INODE_EOFBLOCKS); |
| 4723 | } | 4723 | } |
| 4724 | ext4_mark_inode_dirty(handle, inode); | 4724 | ext4_mark_inode_dirty(handle, inode); |
| 4725 | ext4_update_inode_fsync_trans(handle, inode, 1); | ||
| 4725 | ret2 = ext4_journal_stop(handle); | 4726 | ret2 = ext4_journal_stop(handle); |
| 4726 | if (ret2) | 4727 | if (ret2) |
| 4727 | break; | 4728 | break; |
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index b4267d72f249..b32cf263750d 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c | |||
| @@ -816,6 +816,8 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir, | |||
| 816 | #ifdef CONFIG_EXT4_FS_POSIX_ACL | 816 | #ifdef CONFIG_EXT4_FS_POSIX_ACL |
| 817 | struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT); | 817 | struct posix_acl *p = get_acl(dir, ACL_TYPE_DEFAULT); |
| 818 | 818 | ||
| 819 | if (IS_ERR(p)) | ||
| 820 | return ERR_CAST(p); | ||
| 819 | if (p) { | 821 | if (p) { |
| 820 | int acl_size = p->a_count * sizeof(ext4_acl_entry); | 822 | int acl_size = p->a_count * sizeof(ext4_acl_entry); |
| 821 | 823 | ||
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7df2c5644e59..534a9130f625 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -149,6 +149,15 @@ static int ext4_meta_trans_blocks(struct inode *inode, int lblocks, | |||
| 149 | */ | 149 | */ |
| 150 | int ext4_inode_is_fast_symlink(struct inode *inode) | 150 | int ext4_inode_is_fast_symlink(struct inode *inode) |
| 151 | { | 151 | { |
| 152 | if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) { | ||
| 153 | int ea_blocks = EXT4_I(inode)->i_file_acl ? | ||
| 154 | EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0; | ||
| 155 | |||
| 156 | if (ext4_has_inline_data(inode)) | ||
| 157 | return 0; | ||
| 158 | |||
| 159 | return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); | ||
| 160 | } | ||
| 152 | return S_ISLNK(inode->i_mode) && inode->i_size && | 161 | return S_ISLNK(inode->i_mode) && inode->i_size && |
| 153 | (inode->i_size < EXT4_N_BLOCKS * 4); | 162 | (inode->i_size < EXT4_N_BLOCKS * 4); |
| 154 | } | 163 | } |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 798b3ac680db..e750d68fbcb5 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
| @@ -1399,6 +1399,10 @@ static struct buffer_head * ext4_find_entry (struct inode *dir, | |||
| 1399 | "falling back\n")); | 1399 | "falling back\n")); |
| 1400 | } | 1400 | } |
| 1401 | nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); | 1401 | nblocks = dir->i_size >> EXT4_BLOCK_SIZE_BITS(sb); |
| 1402 | if (!nblocks) { | ||
| 1403 | ret = NULL; | ||
| 1404 | goto cleanup_and_exit; | ||
| 1405 | } | ||
| 1402 | start = EXT4_I(dir)->i_dir_start_lookup; | 1406 | start = EXT4_I(dir)->i_dir_start_lookup; |
| 1403 | if (start >= nblocks) | 1407 | if (start >= nblocks) |
| 1404 | start = 0; | 1408 | start = 0; |
diff --git a/fs/namespace.c b/fs/namespace.c index e158ec6b527b..9d1374ab6e06 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
| @@ -2826,6 +2826,7 @@ long do_mount(const char *dev_name, const char __user *dir_name, | |||
| 2826 | SB_DIRSYNC | | 2826 | SB_DIRSYNC | |
| 2827 | SB_SILENT | | 2827 | SB_SILENT | |
| 2828 | SB_POSIXACL | | 2828 | SB_POSIXACL | |
| 2829 | SB_LAZYTIME | | ||
| 2829 | SB_I_VERSION); | 2830 | SB_I_VERSION); |
| 2830 | 2831 | ||
| 2831 | if (flags & MS_REMOUNT) | 2832 | if (flags & MS_REMOUNT) |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 0ac2fb1c6b63..b9129e2befea 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
| @@ -291,12 +291,23 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat | |||
| 291 | const struct sockaddr *sap = data->addr; | 291 | const struct sockaddr *sap = data->addr; |
| 292 | struct nfs_net *nn = net_generic(data->net, nfs_net_id); | 292 | struct nfs_net *nn = net_generic(data->net, nfs_net_id); |
| 293 | 293 | ||
| 294 | again: | ||
| 294 | list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { | 295 | list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { |
| 295 | const struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; | 296 | const struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; |
| 296 | /* Don't match clients that failed to initialise properly */ | 297 | /* Don't match clients that failed to initialise properly */ |
| 297 | if (clp->cl_cons_state < 0) | 298 | if (clp->cl_cons_state < 0) |
| 298 | continue; | 299 | continue; |
| 299 | 300 | ||
| 301 | /* If a client is still initializing then we need to wait */ | ||
| 302 | if (clp->cl_cons_state > NFS_CS_READY) { | ||
| 303 | refcount_inc(&clp->cl_count); | ||
| 304 | spin_unlock(&nn->nfs_client_lock); | ||
| 305 | nfs_wait_client_init_complete(clp); | ||
| 306 | nfs_put_client(clp); | ||
| 307 | spin_lock(&nn->nfs_client_lock); | ||
| 308 | goto again; | ||
| 309 | } | ||
| 310 | |||
| 300 | /* Different NFS versions cannot share the same nfs_client */ | 311 | /* Different NFS versions cannot share the same nfs_client */ |
| 301 | if (clp->rpc_ops != data->nfs_mod->rpc_ops) | 312 | if (clp->rpc_ops != data->nfs_mod->rpc_ops) |
| 302 | continue; | 313 | continue; |
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 12bbab0becb4..65a7e5da508c 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
| @@ -404,15 +404,19 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp, | |||
| 404 | if (error < 0) | 404 | if (error < 0) |
| 405 | goto error; | 405 | goto error; |
| 406 | 406 | ||
| 407 | if (!nfs4_has_session(clp)) | ||
| 408 | nfs_mark_client_ready(clp, NFS_CS_READY); | ||
| 409 | |||
| 410 | error = nfs4_discover_server_trunking(clp, &old); | 407 | error = nfs4_discover_server_trunking(clp, &old); |
| 411 | if (error < 0) | 408 | if (error < 0) |
| 412 | goto error; | 409 | goto error; |
| 413 | 410 | ||
| 414 | if (clp != old) | 411 | if (clp != old) { |
| 415 | clp->cl_preserve_clid = true; | 412 | clp->cl_preserve_clid = true; |
| 413 | /* | ||
| 414 | * Mark the client as having failed initialization so other | ||
| 415 | * processes walking the nfs_client_list in nfs_match_client() | ||
| 416 | * won't try to use it. | ||
| 417 | */ | ||
| 418 | nfs_mark_client_ready(clp, -EPERM); | ||
| 419 | } | ||
| 416 | nfs_put_client(clp); | 420 | nfs_put_client(clp); |
| 417 | clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags); | 421 | clear_bit(NFS_CS_TSM_POSSIBLE, &clp->cl_flags); |
| 418 | return old; | 422 | return old; |
| @@ -539,6 +543,9 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
| 539 | spin_lock(&nn->nfs_client_lock); | 543 | spin_lock(&nn->nfs_client_lock); |
| 540 | list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { | 544 | list_for_each_entry(pos, &nn->nfs_client_list, cl_share_link) { |
| 541 | 545 | ||
| 546 | if (pos == new) | ||
| 547 | goto found; | ||
| 548 | |||
| 542 | status = nfs4_match_client(pos, new, &prev, nn); | 549 | status = nfs4_match_client(pos, new, &prev, nn); |
| 543 | if (status < 0) | 550 | if (status < 0) |
| 544 | goto out_unlock; | 551 | goto out_unlock; |
| @@ -559,6 +566,7 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
| 559 | * way that a SETCLIENTID_CONFIRM to pos can succeed is | 566 | * way that a SETCLIENTID_CONFIRM to pos can succeed is |
| 560 | * if new and pos point to the same server: | 567 | * if new and pos point to the same server: |
| 561 | */ | 568 | */ |
| 569 | found: | ||
| 562 | refcount_inc(&pos->cl_count); | 570 | refcount_inc(&pos->cl_count); |
| 563 | spin_unlock(&nn->nfs_client_lock); | 571 | spin_unlock(&nn->nfs_client_lock); |
| 564 | 572 | ||
| @@ -572,6 +580,7 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
| 572 | case 0: | 580 | case 0: |
| 573 | nfs4_swap_callback_idents(pos, new); | 581 | nfs4_swap_callback_idents(pos, new); |
| 574 | pos->cl_confirm = new->cl_confirm; | 582 | pos->cl_confirm = new->cl_confirm; |
| 583 | nfs_mark_client_ready(pos, NFS_CS_READY); | ||
| 575 | 584 | ||
| 576 | prev = NULL; | 585 | prev = NULL; |
| 577 | *result = pos; | 586 | *result = pos; |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 5b5f464f6f2a..4a379d7918f2 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
| @@ -1890,6 +1890,8 @@ int nfs_commit_inode(struct inode *inode, int how) | |||
| 1890 | if (res) | 1890 | if (res) |
| 1891 | error = nfs_generic_commit_list(inode, &head, how, &cinfo); | 1891 | error = nfs_generic_commit_list(inode, &head, how, &cinfo); |
| 1892 | nfs_commit_end(cinfo.mds); | 1892 | nfs_commit_end(cinfo.mds); |
| 1893 | if (res == 0) | ||
| 1894 | return res; | ||
| 1893 | if (error < 0) | 1895 | if (error < 0) |
| 1894 | goto out_error; | 1896 | goto out_error; |
| 1895 | if (!may_wait) | 1897 | if (!may_wait) |
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c index 697f8ae7792d..f650e475d8f0 100644 --- a/fs/nfsd/auth.c +++ b/fs/nfsd/auth.c | |||
| @@ -60,6 +60,9 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp) | |||
| 60 | gi->gid[i] = exp->ex_anon_gid; | 60 | gi->gid[i] = exp->ex_anon_gid; |
| 61 | else | 61 | else |
| 62 | gi->gid[i] = rqgi->gid[i]; | 62 | gi->gid[i] = rqgi->gid[i]; |
| 63 | |||
| 64 | /* Each thread allocates its own gi, no race */ | ||
| 65 | groups_sort(gi); | ||
| 63 | } | 66 | } |
| 64 | } else { | 67 | } else { |
| 65 | gi = get_group_info(rqgi); | 68 | gi = get_group_info(rqgi); |
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig index cbfc196e5dc5..5ac415466861 100644 --- a/fs/overlayfs/Kconfig +++ b/fs/overlayfs/Kconfig | |||
| @@ -24,6 +24,16 @@ config OVERLAY_FS_REDIRECT_DIR | |||
| 24 | an overlay which has redirects on a kernel that doesn't support this | 24 | an overlay which has redirects on a kernel that doesn't support this |
| 25 | feature will have unexpected results. | 25 | feature will have unexpected results. |
| 26 | 26 | ||
| 27 | config OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW | ||
| 28 | bool "Overlayfs: follow redirects even if redirects are turned off" | ||
| 29 | default y | ||
| 30 | depends on OVERLAY_FS | ||
| 31 | help | ||
| 32 | Disable this to get a possibly more secure configuration, but that | ||
| 33 | might not be backward compatible with previous kernels. | ||
| 34 | |||
| 35 | For more information, see Documentation/filesystems/overlayfs.txt | ||
| 36 | |||
| 27 | config OVERLAY_FS_INDEX | 37 | config OVERLAY_FS_INDEX |
| 28 | bool "Overlayfs: turn on inodes index feature by default" | 38 | bool "Overlayfs: turn on inodes index feature by default" |
| 29 | depends on OVERLAY_FS | 39 | depends on OVERLAY_FS |
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index e13921824c70..f9788bc116a8 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c | |||
| @@ -887,7 +887,8 @@ static int ovl_set_redirect(struct dentry *dentry, bool samedir) | |||
| 887 | spin_unlock(&dentry->d_lock); | 887 | spin_unlock(&dentry->d_lock); |
| 888 | } else { | 888 | } else { |
| 889 | kfree(redirect); | 889 | kfree(redirect); |
| 890 | pr_warn_ratelimited("overlay: failed to set redirect (%i)\n", err); | 890 | pr_warn_ratelimited("overlayfs: failed to set redirect (%i)\n", |
| 891 | err); | ||
| 891 | /* Fall back to userspace copy-up */ | 892 | /* Fall back to userspace copy-up */ |
| 892 | err = -EXDEV; | 893 | err = -EXDEV; |
| 893 | } | 894 | } |
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c index 625ed8066570..beb945e1963c 100644 --- a/fs/overlayfs/namei.c +++ b/fs/overlayfs/namei.c | |||
| @@ -435,7 +435,7 @@ int ovl_verify_index(struct dentry *index, struct ovl_path *lower, | |||
| 435 | 435 | ||
| 436 | /* Check if index is orphan and don't warn before cleaning it */ | 436 | /* Check if index is orphan and don't warn before cleaning it */ |
| 437 | if (d_inode(index)->i_nlink == 1 && | 437 | if (d_inode(index)->i_nlink == 1 && |
| 438 | ovl_get_nlink(index, origin.dentry, 0) == 0) | 438 | ovl_get_nlink(origin.dentry, index, 0) == 0) |
| 439 | err = -ENOENT; | 439 | err = -ENOENT; |
| 440 | 440 | ||
| 441 | dput(origin.dentry); | 441 | dput(origin.dentry); |
| @@ -681,6 +681,22 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, | |||
| 681 | if (d.stop) | 681 | if (d.stop) |
| 682 | break; | 682 | break; |
| 683 | 683 | ||
| 684 | /* | ||
| 685 | * Following redirects can have security consequences: it's like | ||
| 686 | * a symlink into the lower layer without the permission checks. | ||
| 687 | * This is only a problem if the upper layer is untrusted (e.g | ||
| 688 | * comes from an USB drive). This can allow a non-readable file | ||
| 689 | * or directory to become readable. | ||
| 690 | * | ||
| 691 | * Only following redirects when redirects are enabled disables | ||
| 692 | * this attack vector when not necessary. | ||
| 693 | */ | ||
| 694 | err = -EPERM; | ||
| 695 | if (d.redirect && !ofs->config.redirect_follow) { | ||
| 696 | pr_warn_ratelimited("overlay: refusing to follow redirect for (%pd2)\n", dentry); | ||
| 697 | goto out_put; | ||
| 698 | } | ||
| 699 | |||
| 684 | if (d.redirect && d.redirect[0] == '/' && poe != roe) { | 700 | if (d.redirect && d.redirect[0] == '/' && poe != roe) { |
| 685 | poe = roe; | 701 | poe = roe; |
| 686 | 702 | ||
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index 13eab09a6b6f..b489099ccd49 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h | |||
| @@ -180,7 +180,7 @@ static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry) | |||
| 180 | static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode) | 180 | static inline struct dentry *ovl_do_tmpfile(struct dentry *dentry, umode_t mode) |
| 181 | { | 181 | { |
| 182 | struct dentry *ret = vfs_tmpfile(dentry, mode, 0); | 182 | struct dentry *ret = vfs_tmpfile(dentry, mode, 0); |
| 183 | int err = IS_ERR(ret) ? PTR_ERR(ret) : 0; | 183 | int err = PTR_ERR_OR_ZERO(ret); |
| 184 | 184 | ||
| 185 | pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err); | 185 | pr_debug("tmpfile(%pd2, 0%o) = %i\n", dentry, mode, err); |
| 186 | return ret; | 186 | return ret; |
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index 752bab645879..9d0bc03bf6e4 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h | |||
| @@ -14,6 +14,8 @@ struct ovl_config { | |||
| 14 | char *workdir; | 14 | char *workdir; |
| 15 | bool default_permissions; | 15 | bool default_permissions; |
| 16 | bool redirect_dir; | 16 | bool redirect_dir; |
| 17 | bool redirect_follow; | ||
| 18 | const char *redirect_mode; | ||
| 17 | bool index; | 19 | bool index; |
| 18 | }; | 20 | }; |
| 19 | 21 | ||
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 0daa4354fec4..8c98578d27a1 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c | |||
| @@ -499,7 +499,7 @@ out: | |||
| 499 | return err; | 499 | return err; |
| 500 | 500 | ||
| 501 | fail: | 501 | fail: |
| 502 | pr_warn_ratelimited("overlay: failed to look up (%s) for ino (%i)\n", | 502 | pr_warn_ratelimited("overlayfs: failed to look up (%s) for ino (%i)\n", |
| 503 | p->name, err); | 503 | p->name, err); |
| 504 | goto out; | 504 | goto out; |
| 505 | } | 505 | } |
| @@ -663,7 +663,10 @@ static int ovl_iterate_real(struct file *file, struct dir_context *ctx) | |||
| 663 | return PTR_ERR(rdt.cache); | 663 | return PTR_ERR(rdt.cache); |
| 664 | } | 664 | } |
| 665 | 665 | ||
| 666 | return iterate_dir(od->realfile, &rdt.ctx); | 666 | err = iterate_dir(od->realfile, &rdt.ctx); |
| 667 | ctx->pos = rdt.ctx.pos; | ||
| 668 | |||
| 669 | return err; | ||
| 667 | } | 670 | } |
| 668 | 671 | ||
| 669 | 672 | ||
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 288d20f9a55a..76440feb79f6 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c | |||
| @@ -33,6 +33,13 @@ module_param_named(redirect_dir, ovl_redirect_dir_def, bool, 0644); | |||
| 33 | MODULE_PARM_DESC(ovl_redirect_dir_def, | 33 | MODULE_PARM_DESC(ovl_redirect_dir_def, |
| 34 | "Default to on or off for the redirect_dir feature"); | 34 | "Default to on or off for the redirect_dir feature"); |
| 35 | 35 | ||
| 36 | static bool ovl_redirect_always_follow = | ||
| 37 | IS_ENABLED(CONFIG_OVERLAY_FS_REDIRECT_ALWAYS_FOLLOW); | ||
| 38 | module_param_named(redirect_always_follow, ovl_redirect_always_follow, | ||
| 39 | bool, 0644); | ||
| 40 | MODULE_PARM_DESC(ovl_redirect_always_follow, | ||
| 41 | "Follow redirects even if redirect_dir feature is turned off"); | ||
| 42 | |||
| 36 | static bool ovl_index_def = IS_ENABLED(CONFIG_OVERLAY_FS_INDEX); | 43 | static bool ovl_index_def = IS_ENABLED(CONFIG_OVERLAY_FS_INDEX); |
| 37 | module_param_named(index, ovl_index_def, bool, 0644); | 44 | module_param_named(index, ovl_index_def, bool, 0644); |
| 38 | MODULE_PARM_DESC(ovl_index_def, | 45 | MODULE_PARM_DESC(ovl_index_def, |
| @@ -232,6 +239,7 @@ static void ovl_free_fs(struct ovl_fs *ofs) | |||
| 232 | kfree(ofs->config.lowerdir); | 239 | kfree(ofs->config.lowerdir); |
| 233 | kfree(ofs->config.upperdir); | 240 | kfree(ofs->config.upperdir); |
| 234 | kfree(ofs->config.workdir); | 241 | kfree(ofs->config.workdir); |
| 242 | kfree(ofs->config.redirect_mode); | ||
| 235 | if (ofs->creator_cred) | 243 | if (ofs->creator_cred) |
| 236 | put_cred(ofs->creator_cred); | 244 | put_cred(ofs->creator_cred); |
| 237 | kfree(ofs); | 245 | kfree(ofs); |
| @@ -244,6 +252,7 @@ static void ovl_put_super(struct super_block *sb) | |||
| 244 | ovl_free_fs(ofs); | 252 | ovl_free_fs(ofs); |
| 245 | } | 253 | } |
| 246 | 254 | ||
| 255 | /* Sync real dirty inodes in upper filesystem (if it exists) */ | ||
| 247 | static int ovl_sync_fs(struct super_block *sb, int wait) | 256 | static int ovl_sync_fs(struct super_block *sb, int wait) |
| 248 | { | 257 | { |
| 249 | struct ovl_fs *ofs = sb->s_fs_info; | 258 | struct ovl_fs *ofs = sb->s_fs_info; |
| @@ -252,14 +261,24 @@ static int ovl_sync_fs(struct super_block *sb, int wait) | |||
| 252 | 261 | ||
| 253 | if (!ofs->upper_mnt) | 262 | if (!ofs->upper_mnt) |
| 254 | return 0; | 263 | return 0; |
| 255 | upper_sb = ofs->upper_mnt->mnt_sb; | 264 | |
| 256 | if (!upper_sb->s_op->sync_fs) | 265 | /* |
| 266 | * If this is a sync(2) call or an emergency sync, all the super blocks | ||
| 267 | * will be iterated, including upper_sb, so no need to do anything. | ||
| 268 | * | ||
| 269 | * If this is a syncfs(2) call, then we do need to call | ||
| 270 | * sync_filesystem() on upper_sb, but enough if we do it when being | ||
| 271 | * called with wait == 1. | ||
| 272 | */ | ||
| 273 | if (!wait) | ||
| 257 | return 0; | 274 | return 0; |
| 258 | 275 | ||
| 259 | /* real inodes have already been synced by sync_filesystem(ovl_sb) */ | 276 | upper_sb = ofs->upper_mnt->mnt_sb; |
| 277 | |||
| 260 | down_read(&upper_sb->s_umount); | 278 | down_read(&upper_sb->s_umount); |
| 261 | ret = upper_sb->s_op->sync_fs(upper_sb, wait); | 279 | ret = sync_filesystem(upper_sb); |
| 262 | up_read(&upper_sb->s_umount); | 280 | up_read(&upper_sb->s_umount); |
| 281 | |||
| 263 | return ret; | 282 | return ret; |
| 264 | } | 283 | } |
| 265 | 284 | ||
| @@ -295,6 +314,11 @@ static bool ovl_force_readonly(struct ovl_fs *ofs) | |||
| 295 | return (!ofs->upper_mnt || !ofs->workdir); | 314 | return (!ofs->upper_mnt || !ofs->workdir); |
| 296 | } | 315 | } |
| 297 | 316 | ||
| 317 | static const char *ovl_redirect_mode_def(void) | ||
| 318 | { | ||
| 319 | return ovl_redirect_dir_def ? "on" : "off"; | ||
| 320 | } | ||
| 321 | |||
| 298 | /** | 322 | /** |
| 299 | * ovl_show_options | 323 | * ovl_show_options |
| 300 | * | 324 | * |
| @@ -313,12 +337,10 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry) | |||
| 313 | } | 337 | } |
| 314 | if (ofs->config.default_permissions) | 338 | if (ofs->config.default_permissions) |
| 315 | seq_puts(m, ",default_permissions"); | 339 | seq_puts(m, ",default_permissions"); |
| 316 | if (ofs->config.redirect_dir != ovl_redirect_dir_def) | 340 | if (strcmp(ofs->config.redirect_mode, ovl_redirect_mode_def()) != 0) |
| 317 | seq_printf(m, ",redirect_dir=%s", | 341 | seq_printf(m, ",redirect_dir=%s", ofs->config.redirect_mode); |
| 318 | ofs->config.redirect_dir ? "on" : "off"); | ||
| 319 | if (ofs->config.index != ovl_index_def) | 342 | if (ofs->config.index != ovl_index_def) |
| 320 | seq_printf(m, ",index=%s", | 343 | seq_printf(m, ",index=%s", ofs->config.index ? "on" : "off"); |
| 321 | ofs->config.index ? "on" : "off"); | ||
| 322 | return 0; | 344 | return 0; |
| 323 | } | 345 | } |
| 324 | 346 | ||
| @@ -348,8 +370,7 @@ enum { | |||
| 348 | OPT_UPPERDIR, | 370 | OPT_UPPERDIR, |
| 349 | OPT_WORKDIR, | 371 | OPT_WORKDIR, |
| 350 | OPT_DEFAULT_PERMISSIONS, | 372 | OPT_DEFAULT_PERMISSIONS, |
| 351 | OPT_REDIRECT_DIR_ON, | 373 | OPT_REDIRECT_DIR, |
| 352 | OPT_REDIRECT_DIR_OFF, | ||
| 353 | OPT_INDEX_ON, | 374 | OPT_INDEX_ON, |
| 354 | OPT_INDEX_OFF, | 375 | OPT_INDEX_OFF, |
| 355 | OPT_ERR, | 376 | OPT_ERR, |
| @@ -360,8 +381,7 @@ static const match_table_t ovl_tokens = { | |||
| 360 | {OPT_UPPERDIR, "upperdir=%s"}, | 381 | {OPT_UPPERDIR, "upperdir=%s"}, |
| 361 | {OPT_WORKDIR, "workdir=%s"}, | 382 | {OPT_WORKDIR, "workdir=%s"}, |
| 362 | {OPT_DEFAULT_PERMISSIONS, "default_permissions"}, | 383 | {OPT_DEFAULT_PERMISSIONS, "default_permissions"}, |
| 363 | {OPT_REDIRECT_DIR_ON, "redirect_dir=on"}, | 384 | {OPT_REDIRECT_DIR, "redirect_dir=%s"}, |
| 364 | {OPT_REDIRECT_DIR_OFF, "redirect_dir=off"}, | ||
| 365 | {OPT_INDEX_ON, "index=on"}, | 385 | {OPT_INDEX_ON, "index=on"}, |
| 366 | {OPT_INDEX_OFF, "index=off"}, | 386 | {OPT_INDEX_OFF, "index=off"}, |
| 367 | {OPT_ERR, NULL} | 387 | {OPT_ERR, NULL} |
| @@ -390,10 +410,37 @@ static char *ovl_next_opt(char **s) | |||
| 390 | return sbegin; | 410 | return sbegin; |
| 391 | } | 411 | } |
| 392 | 412 | ||
| 413 | static int ovl_parse_redirect_mode(struct ovl_config *config, const char *mode) | ||
| 414 | { | ||
| 415 | if (strcmp(mode, "on") == 0) { | ||
| 416 | config->redirect_dir = true; | ||
| 417 | /* | ||
| 418 | * Does not make sense to have redirect creation without | ||
| 419 | * redirect following. | ||
| 420 | */ | ||
| 421 | config->redirect_follow = true; | ||
| 422 | } else if (strcmp(mode, "follow") == 0) { | ||
| 423 | config->redirect_follow = true; | ||
| 424 | } else if (strcmp(mode, "off") == 0) { | ||
| 425 | if (ovl_redirect_always_follow) | ||
| 426 | config->redirect_follow = true; | ||
| 427 | } else if (strcmp(mode, "nofollow") != 0) { | ||
| 428 | pr_err("overlayfs: bad mount option \"redirect_dir=%s\"\n", | ||
| 429 | mode); | ||
| 430 | return -EINVAL; | ||
| 431 | } | ||
| 432 | |||
| 433 | return 0; | ||
| 434 | } | ||
| 435 | |||
| 393 | static int ovl_parse_opt(char *opt, struct ovl_config *config) | 436 | static int ovl_parse_opt(char *opt, struct ovl_config *config) |
| 394 | { | 437 | { |
| 395 | char *p; | 438 | char *p; |
| 396 | 439 | ||
| 440 | config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL); | ||
| 441 | if (!config->redirect_mode) | ||
| 442 | return -ENOMEM; | ||
| 443 | |||
| 397 | while ((p = ovl_next_opt(&opt)) != NULL) { | 444 | while ((p = ovl_next_opt(&opt)) != NULL) { |
| 398 | int token; | 445 | int token; |
| 399 | substring_t args[MAX_OPT_ARGS]; | 446 | substring_t args[MAX_OPT_ARGS]; |
| @@ -428,12 +475,11 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) | |||
| 428 | config->default_permissions = true; | 475 | config->default_permissions = true; |
| 429 | break; | 476 | break; |
| 430 | 477 | ||
| 431 | case OPT_REDIRECT_DIR_ON: | 478 | case OPT_REDIRECT_DIR: |
| 432 | config->redirect_dir = true; | 479 | kfree(config->redirect_mode); |
| 433 | break; | 480 | config->redirect_mode = match_strdup(&args[0]); |
| 434 | 481 | if (!config->redirect_mode) | |
| 435 | case OPT_REDIRECT_DIR_OFF: | 482 | return -ENOMEM; |
| 436 | config->redirect_dir = false; | ||
| 437 | break; | 483 | break; |
| 438 | 484 | ||
| 439 | case OPT_INDEX_ON: | 485 | case OPT_INDEX_ON: |
| @@ -458,7 +504,7 @@ static int ovl_parse_opt(char *opt, struct ovl_config *config) | |||
| 458 | config->workdir = NULL; | 504 | config->workdir = NULL; |
| 459 | } | 505 | } |
| 460 | 506 | ||
| 461 | return 0; | 507 | return ovl_parse_redirect_mode(config, config->redirect_mode); |
| 462 | } | 508 | } |
| 463 | 509 | ||
| 464 | #define OVL_WORKDIR_NAME "work" | 510 | #define OVL_WORKDIR_NAME "work" |
| @@ -1160,7 +1206,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) | |||
| 1160 | if (!cred) | 1206 | if (!cred) |
| 1161 | goto out_err; | 1207 | goto out_err; |
| 1162 | 1208 | ||
| 1163 | ofs->config.redirect_dir = ovl_redirect_dir_def; | ||
| 1164 | ofs->config.index = ovl_index_def; | 1209 | ofs->config.index = ovl_index_def; |
| 1165 | err = ovl_parse_opt((char *) data, &ofs->config); | 1210 | err = ovl_parse_opt((char *) data, &ofs->config); |
| 1166 | if (err) | 1211 | if (err) |
diff --git a/fs/super.c b/fs/super.c index d4e33e8f1e6f..7ff1349609e4 100644 --- a/fs/super.c +++ b/fs/super.c | |||
| @@ -191,6 +191,24 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags, | |||
| 191 | 191 | ||
| 192 | INIT_LIST_HEAD(&s->s_mounts); | 192 | INIT_LIST_HEAD(&s->s_mounts); |
| 193 | s->s_user_ns = get_user_ns(user_ns); | 193 | s->s_user_ns = get_user_ns(user_ns); |
| 194 | init_rwsem(&s->s_umount); | ||
| 195 | lockdep_set_class(&s->s_umount, &type->s_umount_key); | ||
| 196 | /* | ||
| 197 | * sget() can have s_umount recursion. | ||
| 198 | * | ||
| 199 | * When it cannot find a suitable sb, it allocates a new | ||
| 200 | * one (this one), and tries again to find a suitable old | ||
| 201 | * one. | ||
| 202 | * | ||
| 203 | * In case that succeeds, it will acquire the s_umount | ||
| 204 | * lock of the old one. Since these are clearly distrinct | ||
| 205 | * locks, and this object isn't exposed yet, there's no | ||
| 206 | * risk of deadlocks. | ||
| 207 | * | ||
| 208 | * Annotate this by putting this lock in a different | ||
| 209 | * subclass. | ||
| 210 | */ | ||
| 211 | down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); | ||
| 194 | 212 | ||
| 195 | if (security_sb_alloc(s)) | 213 | if (security_sb_alloc(s)) |
| 196 | goto fail; | 214 | goto fail; |
| @@ -218,25 +236,6 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags, | |||
| 218 | goto fail; | 236 | goto fail; |
| 219 | if (list_lru_init_memcg(&s->s_inode_lru)) | 237 | if (list_lru_init_memcg(&s->s_inode_lru)) |
| 220 | goto fail; | 238 | goto fail; |
| 221 | |||
| 222 | init_rwsem(&s->s_umount); | ||
| 223 | lockdep_set_class(&s->s_umount, &type->s_umount_key); | ||
| 224 | /* | ||
| 225 | * sget() can have s_umount recursion. | ||
| 226 | * | ||
| 227 | * When it cannot find a suitable sb, it allocates a new | ||
| 228 | * one (this one), and tries again to find a suitable old | ||
| 229 | * one. | ||
| 230 | * | ||
| 231 | * In case that succeeds, it will acquire the s_umount | ||
| 232 | * lock of the old one. Since these are clearly distrinct | ||
| 233 | * locks, and this object isn't exposed yet, there's no | ||
| 234 | * risk of deadlocks. | ||
| 235 | * | ||
| 236 | * Annotate this by putting this lock in a different | ||
| 237 | * subclass. | ||
| 238 | */ | ||
| 239 | down_write_nested(&s->s_umount, SINGLE_DEPTH_NESTING); | ||
| 240 | s->s_count = 1; | 239 | s->s_count = 1; |
| 241 | atomic_set(&s->s_active, 1); | 240 | atomic_set(&s->s_active, 1); |
| 242 | mutex_init(&s->s_vfs_rename_mutex); | 241 | mutex_init(&s->s_vfs_rename_mutex); |
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c index de3f04a98656..3b57ef0f2f76 100644 --- a/fs/xfs/libxfs/xfs_ialloc.c +++ b/fs/xfs/libxfs/xfs_ialloc.c | |||
| @@ -920,8 +920,7 @@ STATIC xfs_agnumber_t | |||
| 920 | xfs_ialloc_ag_select( | 920 | xfs_ialloc_ag_select( |
| 921 | xfs_trans_t *tp, /* transaction pointer */ | 921 | xfs_trans_t *tp, /* transaction pointer */ |
| 922 | xfs_ino_t parent, /* parent directory inode number */ | 922 | xfs_ino_t parent, /* parent directory inode number */ |
| 923 | umode_t mode, /* bits set to indicate file type */ | 923 | umode_t mode) /* bits set to indicate file type */ |
| 924 | int okalloc) /* ok to allocate more space */ | ||
| 925 | { | 924 | { |
| 926 | xfs_agnumber_t agcount; /* number of ag's in the filesystem */ | 925 | xfs_agnumber_t agcount; /* number of ag's in the filesystem */ |
| 927 | xfs_agnumber_t agno; /* current ag number */ | 926 | xfs_agnumber_t agno; /* current ag number */ |
| @@ -978,9 +977,6 @@ xfs_ialloc_ag_select( | |||
| 978 | return agno; | 977 | return agno; |
| 979 | } | 978 | } |
| 980 | 979 | ||
| 981 | if (!okalloc) | ||
| 982 | goto nextag; | ||
| 983 | |||
| 984 | if (!pag->pagf_init) { | 980 | if (!pag->pagf_init) { |
| 985 | error = xfs_alloc_pagf_init(mp, tp, agno, flags); | 981 | error = xfs_alloc_pagf_init(mp, tp, agno, flags); |
| 986 | if (error) | 982 | if (error) |
| @@ -1680,7 +1676,6 @@ xfs_dialloc( | |||
| 1680 | struct xfs_trans *tp, | 1676 | struct xfs_trans *tp, |
| 1681 | xfs_ino_t parent, | 1677 | xfs_ino_t parent, |
| 1682 | umode_t mode, | 1678 | umode_t mode, |
| 1683 | int okalloc, | ||
| 1684 | struct xfs_buf **IO_agbp, | 1679 | struct xfs_buf **IO_agbp, |
| 1685 | xfs_ino_t *inop) | 1680 | xfs_ino_t *inop) |
| 1686 | { | 1681 | { |
| @@ -1692,6 +1687,7 @@ xfs_dialloc( | |||
| 1692 | int noroom = 0; | 1687 | int noroom = 0; |
| 1693 | xfs_agnumber_t start_agno; | 1688 | xfs_agnumber_t start_agno; |
| 1694 | struct xfs_perag *pag; | 1689 | struct xfs_perag *pag; |
| 1690 | int okalloc = 1; | ||
| 1695 | 1691 | ||
| 1696 | if (*IO_agbp) { | 1692 | if (*IO_agbp) { |
| 1697 | /* | 1693 | /* |
| @@ -1707,7 +1703,7 @@ xfs_dialloc( | |||
| 1707 | * We do not have an agbp, so select an initial allocation | 1703 | * We do not have an agbp, so select an initial allocation |
| 1708 | * group for inode allocation. | 1704 | * group for inode allocation. |
| 1709 | */ | 1705 | */ |
| 1710 | start_agno = xfs_ialloc_ag_select(tp, parent, mode, okalloc); | 1706 | start_agno = xfs_ialloc_ag_select(tp, parent, mode); |
| 1711 | if (start_agno == NULLAGNUMBER) { | 1707 | if (start_agno == NULLAGNUMBER) { |
| 1712 | *inop = NULLFSINO; | 1708 | *inop = NULLFSINO; |
| 1713 | return 0; | 1709 | return 0; |
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h index d2bdcd5e7312..66a8de0b1caa 100644 --- a/fs/xfs/libxfs/xfs_ialloc.h +++ b/fs/xfs/libxfs/xfs_ialloc.h | |||
| @@ -81,7 +81,6 @@ xfs_dialloc( | |||
| 81 | struct xfs_trans *tp, /* transaction pointer */ | 81 | struct xfs_trans *tp, /* transaction pointer */ |
| 82 | xfs_ino_t parent, /* parent inode (directory) */ | 82 | xfs_ino_t parent, /* parent inode (directory) */ |
| 83 | umode_t mode, /* mode bits for new inode */ | 83 | umode_t mode, /* mode bits for new inode */ |
| 84 | int okalloc, /* ok to allocate more space */ | ||
| 85 | struct xfs_buf **agbp, /* buf for a.g. inode header */ | 84 | struct xfs_buf **agbp, /* buf for a.g. inode header */ |
| 86 | xfs_ino_t *inop); /* inode number allocated */ | 85 | xfs_ino_t *inop); /* inode number allocated */ |
| 87 | 86 | ||
diff --git a/fs/xfs/scrub/scrub.c b/fs/xfs/scrub/scrub.c index 9c42c4efd01e..ab3aef2ae823 100644 --- a/fs/xfs/scrub/scrub.c +++ b/fs/xfs/scrub/scrub.c | |||
| @@ -46,7 +46,6 @@ | |||
| 46 | #include "scrub/scrub.h" | 46 | #include "scrub/scrub.h" |
| 47 | #include "scrub/common.h" | 47 | #include "scrub/common.h" |
| 48 | #include "scrub/trace.h" | 48 | #include "scrub/trace.h" |
| 49 | #include "scrub/scrub.h" | ||
| 50 | #include "scrub/btree.h" | 49 | #include "scrub/btree.h" |
| 51 | 50 | ||
| 52 | /* | 51 | /* |
diff --git a/fs/xfs/scrub/trace.c b/fs/xfs/scrub/trace.c index 472080e75788..86daed0e3a45 100644 --- a/fs/xfs/scrub/trace.c +++ b/fs/xfs/scrub/trace.c | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include "xfs_mount.h" | 26 | #include "xfs_mount.h" |
| 27 | #include "xfs_defer.h" | 27 | #include "xfs_defer.h" |
| 28 | #include "xfs_da_format.h" | 28 | #include "xfs_da_format.h" |
| 29 | #include "xfs_defer.h" | ||
| 30 | #include "xfs_inode.h" | 29 | #include "xfs_inode.h" |
| 31 | #include "xfs_btree.h" | 30 | #include "xfs_btree.h" |
| 32 | #include "xfs_trans.h" | 31 | #include "xfs_trans.h" |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index 801274126648..b41952a4ddd8 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
| @@ -749,7 +749,6 @@ xfs_ialloc( | |||
| 749 | xfs_nlink_t nlink, | 749 | xfs_nlink_t nlink, |
| 750 | dev_t rdev, | 750 | dev_t rdev, |
| 751 | prid_t prid, | 751 | prid_t prid, |
| 752 | int okalloc, | ||
| 753 | xfs_buf_t **ialloc_context, | 752 | xfs_buf_t **ialloc_context, |
| 754 | xfs_inode_t **ipp) | 753 | xfs_inode_t **ipp) |
| 755 | { | 754 | { |
| @@ -765,7 +764,7 @@ xfs_ialloc( | |||
| 765 | * Call the space management code to pick | 764 | * Call the space management code to pick |
| 766 | * the on-disk inode to be allocated. | 765 | * the on-disk inode to be allocated. |
| 767 | */ | 766 | */ |
| 768 | error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, okalloc, | 767 | error = xfs_dialloc(tp, pip ? pip->i_ino : 0, mode, |
| 769 | ialloc_context, &ino); | 768 | ialloc_context, &ino); |
| 770 | if (error) | 769 | if (error) |
| 771 | return error; | 770 | return error; |
| @@ -957,7 +956,6 @@ xfs_dir_ialloc( | |||
| 957 | xfs_nlink_t nlink, | 956 | xfs_nlink_t nlink, |
| 958 | dev_t rdev, | 957 | dev_t rdev, |
| 959 | prid_t prid, /* project id */ | 958 | prid_t prid, /* project id */ |
| 960 | int okalloc, /* ok to allocate new space */ | ||
| 961 | xfs_inode_t **ipp, /* pointer to inode; it will be | 959 | xfs_inode_t **ipp, /* pointer to inode; it will be |
| 962 | locked. */ | 960 | locked. */ |
| 963 | int *committed) | 961 | int *committed) |
| @@ -988,8 +986,8 @@ xfs_dir_ialloc( | |||
| 988 | * transaction commit so that no other process can steal | 986 | * transaction commit so that no other process can steal |
| 989 | * the inode(s) that we've just allocated. | 987 | * the inode(s) that we've just allocated. |
| 990 | */ | 988 | */ |
| 991 | code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, okalloc, | 989 | code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, &ialloc_context, |
| 992 | &ialloc_context, &ip); | 990 | &ip); |
| 993 | 991 | ||
| 994 | /* | 992 | /* |
| 995 | * Return an error if we were unable to allocate a new inode. | 993 | * Return an error if we were unable to allocate a new inode. |
| @@ -1061,7 +1059,7 @@ xfs_dir_ialloc( | |||
| 1061 | * this call should always succeed. | 1059 | * this call should always succeed. |
| 1062 | */ | 1060 | */ |
| 1063 | code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, | 1061 | code = xfs_ialloc(tp, dp, mode, nlink, rdev, prid, |
| 1064 | okalloc, &ialloc_context, &ip); | 1062 | &ialloc_context, &ip); |
| 1065 | 1063 | ||
| 1066 | /* | 1064 | /* |
| 1067 | * If we get an error at this point, return to the caller | 1065 | * If we get an error at this point, return to the caller |
| @@ -1182,11 +1180,6 @@ xfs_create( | |||
| 1182 | xfs_flush_inodes(mp); | 1180 | xfs_flush_inodes(mp); |
| 1183 | error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); | 1181 | error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); |
| 1184 | } | 1182 | } |
| 1185 | if (error == -ENOSPC) { | ||
| 1186 | /* No space at all so try a "no-allocation" reservation */ | ||
| 1187 | resblks = 0; | ||
| 1188 | error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp); | ||
| 1189 | } | ||
| 1190 | if (error) | 1183 | if (error) |
| 1191 | goto out_release_inode; | 1184 | goto out_release_inode; |
| 1192 | 1185 | ||
| @@ -1203,19 +1196,13 @@ xfs_create( | |||
| 1203 | if (error) | 1196 | if (error) |
| 1204 | goto out_trans_cancel; | 1197 | goto out_trans_cancel; |
| 1205 | 1198 | ||
| 1206 | if (!resblks) { | ||
| 1207 | error = xfs_dir_canenter(tp, dp, name); | ||
| 1208 | if (error) | ||
| 1209 | goto out_trans_cancel; | ||
| 1210 | } | ||
| 1211 | |||
| 1212 | /* | 1199 | /* |
| 1213 | * A newly created regular or special file just has one directory | 1200 | * A newly created regular or special file just has one directory |
| 1214 | * entry pointing to them, but a directory also the "." entry | 1201 | * entry pointing to them, but a directory also the "." entry |
| 1215 | * pointing to itself. | 1202 | * pointing to itself. |
| 1216 | */ | 1203 | */ |
| 1217 | error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, | 1204 | error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev, prid, &ip, |
| 1218 | prid, resblks > 0, &ip, NULL); | 1205 | NULL); |
| 1219 | if (error) | 1206 | if (error) |
| 1220 | goto out_trans_cancel; | 1207 | goto out_trans_cancel; |
| 1221 | 1208 | ||
| @@ -1340,11 +1327,6 @@ xfs_create_tmpfile( | |||
| 1340 | tres = &M_RES(mp)->tr_create_tmpfile; | 1327 | tres = &M_RES(mp)->tr_create_tmpfile; |
| 1341 | 1328 | ||
| 1342 | error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); | 1329 | error = xfs_trans_alloc(mp, tres, resblks, 0, 0, &tp); |
| 1343 | if (error == -ENOSPC) { | ||
| 1344 | /* No space at all so try a "no-allocation" reservation */ | ||
| 1345 | resblks = 0; | ||
| 1346 | error = xfs_trans_alloc(mp, tres, 0, 0, 0, &tp); | ||
| 1347 | } | ||
| 1348 | if (error) | 1330 | if (error) |
| 1349 | goto out_release_inode; | 1331 | goto out_release_inode; |
| 1350 | 1332 | ||
| @@ -1353,8 +1335,7 @@ xfs_create_tmpfile( | |||
| 1353 | if (error) | 1335 | if (error) |
| 1354 | goto out_trans_cancel; | 1336 | goto out_trans_cancel; |
| 1355 | 1337 | ||
| 1356 | error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, | 1338 | error = xfs_dir_ialloc(&tp, dp, mode, 1, 0, prid, &ip, NULL); |
| 1357 | prid, resblks > 0, &ip, NULL); | ||
| 1358 | if (error) | 1339 | if (error) |
| 1359 | goto out_trans_cancel; | 1340 | goto out_trans_cancel; |
| 1360 | 1341 | ||
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index cc13c3763721..b2136af9289f 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
| @@ -428,7 +428,7 @@ xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip); | |||
| 428 | xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip); | 428 | xfs_extlen_t xfs_get_cowextsz_hint(struct xfs_inode *ip); |
| 429 | 429 | ||
| 430 | int xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t, | 430 | int xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t, |
| 431 | xfs_nlink_t, dev_t, prid_t, int, | 431 | xfs_nlink_t, dev_t, prid_t, |
| 432 | struct xfs_inode **, int *); | 432 | struct xfs_inode **, int *); |
| 433 | 433 | ||
| 434 | /* from xfs_file.c */ | 434 | /* from xfs_file.c */ |
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c index 33eb4fb2e3fd..7ab52a8bc0a9 100644 --- a/fs/xfs/xfs_iomap.c +++ b/fs/xfs/xfs_iomap.c | |||
| @@ -1213,7 +1213,7 @@ xfs_xattr_iomap_begin( | |||
| 1213 | 1213 | ||
| 1214 | ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL); | 1214 | ASSERT(ip->i_d.di_aformat != XFS_DINODE_FMT_LOCAL); |
| 1215 | error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, | 1215 | error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, |
| 1216 | &nimaps, XFS_BMAPI_ENTIRE | XFS_BMAPI_ATTRFORK); | 1216 | &nimaps, XFS_BMAPI_ATTRFORK); |
| 1217 | out_unlock: | 1217 | out_unlock: |
| 1218 | xfs_iunlock(ip, lockmode); | 1218 | xfs_iunlock(ip, lockmode); |
| 1219 | 1219 | ||
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 010a13a201aa..ec952dfad359 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c | |||
| @@ -793,8 +793,8 @@ xfs_qm_qino_alloc( | |||
| 793 | return error; | 793 | return error; |
| 794 | 794 | ||
| 795 | if (need_alloc) { | 795 | if (need_alloc) { |
| 796 | error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip, | 796 | error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, ip, |
| 797 | &committed); | 797 | &committed); |
| 798 | if (error) { | 798 | if (error) { |
| 799 | xfs_trans_cancel(tp); | 799 | xfs_trans_cancel(tp); |
| 800 | return error; | 800 | return error; |
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c index cc041a29eb70..cf7c8f81bebb 100644 --- a/fs/xfs/xfs_reflink.c +++ b/fs/xfs/xfs_reflink.c | |||
| @@ -49,8 +49,6 @@ | |||
| 49 | #include "xfs_alloc.h" | 49 | #include "xfs_alloc.h" |
| 50 | #include "xfs_quota_defs.h" | 50 | #include "xfs_quota_defs.h" |
| 51 | #include "xfs_quota.h" | 51 | #include "xfs_quota.h" |
| 52 | #include "xfs_btree.h" | ||
| 53 | #include "xfs_bmap_btree.h" | ||
| 54 | #include "xfs_reflink.h" | 52 | #include "xfs_reflink.h" |
| 55 | #include "xfs_iomap.h" | 53 | #include "xfs_iomap.h" |
| 56 | #include "xfs_rmap_btree.h" | 54 | #include "xfs_rmap_btree.h" |
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c index 68d3ca2c4968..2e9e793a8f9d 100644 --- a/fs/xfs/xfs_symlink.c +++ b/fs/xfs/xfs_symlink.c | |||
| @@ -232,11 +232,6 @@ xfs_symlink( | |||
| 232 | resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks); | 232 | resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks); |
| 233 | 233 | ||
| 234 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp); | 234 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, resblks, 0, 0, &tp); |
| 235 | if (error == -ENOSPC && fs_blocks == 0) { | ||
| 236 | resblks = 0; | ||
| 237 | error = xfs_trans_alloc(mp, &M_RES(mp)->tr_symlink, 0, 0, 0, | ||
| 238 | &tp); | ||
| 239 | } | ||
| 240 | if (error) | 235 | if (error) |
| 241 | goto out_release_inode; | 236 | goto out_release_inode; |
| 242 | 237 | ||
| @@ -260,14 +255,6 @@ xfs_symlink( | |||
| 260 | goto out_trans_cancel; | 255 | goto out_trans_cancel; |
| 261 | 256 | ||
| 262 | /* | 257 | /* |
| 263 | * Check for ability to enter directory entry, if no space reserved. | ||
| 264 | */ | ||
| 265 | if (!resblks) { | ||
| 266 | error = xfs_dir_canenter(tp, dp, link_name); | ||
| 267 | if (error) | ||
| 268 | goto out_trans_cancel; | ||
| 269 | } | ||
| 270 | /* | ||
| 271 | * Initialize the bmap freelist prior to calling either | 258 | * Initialize the bmap freelist prior to calling either |
| 272 | * bmapi or the directory create code. | 259 | * bmapi or the directory create code. |
| 273 | */ | 260 | */ |
| @@ -277,7 +264,7 @@ xfs_symlink( | |||
| 277 | * Allocate an inode for the symlink. | 264 | * Allocate an inode for the symlink. |
| 278 | */ | 265 | */ |
| 279 | error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0, | 266 | error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0, |
| 280 | prid, resblks > 0, &ip, NULL); | 267 | prid, &ip, NULL); |
| 281 | if (error) | 268 | if (error) |
| 282 | goto out_trans_cancel; | 269 | goto out_trans_cancel; |
| 283 | 270 | ||
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c index 5d95fe348294..35f3546b6af5 100644 --- a/fs/xfs/xfs_trace.c +++ b/fs/xfs/xfs_trace.c | |||
| @@ -24,7 +24,6 @@ | |||
| 24 | #include "xfs_mount.h" | 24 | #include "xfs_mount.h" |
| 25 | #include "xfs_defer.h" | 25 | #include "xfs_defer.h" |
| 26 | #include "xfs_da_format.h" | 26 | #include "xfs_da_format.h" |
| 27 | #include "xfs_defer.h" | ||
| 28 | #include "xfs_inode.h" | 27 | #include "xfs_inode.h" |
| 29 | #include "xfs_btree.h" | 28 | #include "xfs_btree.h" |
| 30 | #include "xfs_da_btree.h" | 29 | #include "xfs_da_btree.h" |
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index f0b44c16e88f..c2bae8da642c 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h | |||
| @@ -82,6 +82,14 @@ int ahash_register_instance(struct crypto_template *tmpl, | |||
| 82 | struct ahash_instance *inst); | 82 | struct ahash_instance *inst); |
| 83 | void ahash_free_instance(struct crypto_instance *inst); | 83 | void ahash_free_instance(struct crypto_instance *inst); |
| 84 | 84 | ||
| 85 | int shash_no_setkey(struct crypto_shash *tfm, const u8 *key, | ||
| 86 | unsigned int keylen); | ||
| 87 | |||
| 88 | static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg) | ||
| 89 | { | ||
| 90 | return alg->setkey != shash_no_setkey; | ||
| 91 | } | ||
| 92 | |||
| 85 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, | 93 | int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn, |
| 86 | struct hash_alg_common *alg, | 94 | struct hash_alg_common *alg, |
| 87 | struct crypto_instance *inst); | 95 | struct crypto_instance *inst); |
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h index a4649c56ca2f..5971577016a2 100644 --- a/include/drm/drm_connector.h +++ b/include/drm/drm_connector.h | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #define __DRM_CONNECTOR_H__ | 24 | #define __DRM_CONNECTOR_H__ |
| 25 | 25 | ||
| 26 | #include <linux/list.h> | 26 | #include <linux/list.h> |
| 27 | #include <linux/llist.h> | ||
| 27 | #include <linux/ctype.h> | 28 | #include <linux/ctype.h> |
| 28 | #include <linux/hdmi.h> | 29 | #include <linux/hdmi.h> |
| 29 | #include <drm/drm_mode_object.h> | 30 | #include <drm/drm_mode_object.h> |
| @@ -918,12 +919,13 @@ struct drm_connector { | |||
| 918 | uint16_t tile_h_size, tile_v_size; | 919 | uint16_t tile_h_size, tile_v_size; |
| 919 | 920 | ||
| 920 | /** | 921 | /** |
| 921 | * @free_work: | 922 | * @free_node: |
| 922 | * | 923 | * |
| 923 | * Work used only by &drm_connector_iter to be able to clean up a | 924 | * List used only by &drm_connector_iter to be able to clean up a |
| 924 | * connector from any context. | 925 | * connector from any context, in conjunction with |
| 926 | * &drm_mode_config.connector_free_work. | ||
| 925 | */ | 927 | */ |
| 926 | struct work_struct free_work; | 928 | struct llist_node free_node; |
| 927 | }; | 929 | }; |
| 928 | 930 | ||
| 929 | #define obj_to_connector(x) container_of(x, struct drm_connector, base) | 931 | #define obj_to_connector(x) container_of(x, struct drm_connector, base) |
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h index 2ec41d032e56..efe6d5a8e834 100644 --- a/include/drm/drm_edid.h +++ b/include/drm/drm_edid.h | |||
| @@ -465,6 +465,8 @@ struct edid *drm_get_edid(struct drm_connector *connector, | |||
| 465 | struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, | 465 | struct edid *drm_get_edid_switcheroo(struct drm_connector *connector, |
| 466 | struct i2c_adapter *adapter); | 466 | struct i2c_adapter *adapter); |
| 467 | struct edid *drm_edid_duplicate(const struct edid *edid); | 467 | struct edid *drm_edid_duplicate(const struct edid *edid); |
| 468 | void drm_reset_display_info(struct drm_connector *connector); | ||
| 469 | u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid); | ||
| 468 | int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); | 470 | int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid); |
| 469 | 471 | ||
| 470 | u8 drm_match_cea_mode(const struct drm_display_mode *to_match); | 472 | u8 drm_match_cea_mode(const struct drm_display_mode *to_match); |
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index b21e827c5c78..b0ce26d71296 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
| 28 | #include <linux/idr.h> | 28 | #include <linux/idr.h> |
| 29 | #include <linux/workqueue.h> | 29 | #include <linux/workqueue.h> |
| 30 | #include <linux/llist.h> | ||
| 30 | 31 | ||
| 31 | #include <drm/drm_modeset_lock.h> | 32 | #include <drm/drm_modeset_lock.h> |
| 32 | 33 | ||
| @@ -393,7 +394,7 @@ struct drm_mode_config { | |||
| 393 | 394 | ||
| 394 | /** | 395 | /** |
| 395 | * @connector_list_lock: Protects @num_connector and | 396 | * @connector_list_lock: Protects @num_connector and |
| 396 | * @connector_list. | 397 | * @connector_list and @connector_free_list. |
| 397 | */ | 398 | */ |
| 398 | spinlock_t connector_list_lock; | 399 | spinlock_t connector_list_lock; |
| 399 | /** | 400 | /** |
| @@ -414,6 +415,21 @@ struct drm_mode_config { | |||
| 414 | */ | 415 | */ |
| 415 | struct list_head connector_list; | 416 | struct list_head connector_list; |
| 416 | /** | 417 | /** |
| 418 | * @connector_free_list: | ||
| 419 | * | ||
| 420 | * List of connector objects linked with &drm_connector.free_head. | ||
| 421 | * Protected by @connector_list_lock. Used by | ||
| 422 | * drm_for_each_connector_iter() and | ||
| 423 | * &struct drm_connector_list_iter to savely free connectors using | ||
| 424 | * @connector_free_work. | ||
| 425 | */ | ||
| 426 | struct llist_head connector_free_list; | ||
| 427 | /** | ||
| 428 | * @connector_free_work: Work to clean up @connector_free_list. | ||
| 429 | */ | ||
| 430 | struct work_struct connector_free_work; | ||
| 431 | |||
| 432 | /** | ||
| 417 | * @num_encoder: | 433 | * @num_encoder: |
| 418 | * | 434 | * |
| 419 | * Number of encoders on this device. This is invariant over the | 435 | * Number of encoders on this device. This is invariant over the |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 188ed9f65517..52e611ab9a6c 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -220,21 +220,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
| 220 | /* | 220 | /* |
| 221 | * Prevent the compiler from merging or refetching reads or writes. The | 221 | * Prevent the compiler from merging or refetching reads or writes. The |
| 222 | * compiler is also forbidden from reordering successive instances of | 222 | * compiler is also forbidden from reordering successive instances of |
| 223 | * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the | 223 | * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some |
| 224 | * compiler is aware of some particular ordering. One way to make the | 224 | * particular ordering. One way to make the compiler aware of ordering is to |
| 225 | * compiler aware of ordering is to put the two invocations of READ_ONCE, | 225 | * put the two invocations of READ_ONCE or WRITE_ONCE in different C |
| 226 | * WRITE_ONCE or ACCESS_ONCE() in different C statements. | 226 | * statements. |
| 227 | * | 227 | * |
| 228 | * In contrast to ACCESS_ONCE these two macros will also work on aggregate | 228 | * These two macros will also work on aggregate data types like structs or |
| 229 | * data types like structs or unions. If the size of the accessed data | 229 | * unions. If the size of the accessed data type exceeds the word size of |
| 230 | * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) | 230 | * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will |
| 231 | * READ_ONCE() and WRITE_ONCE() will fall back to memcpy(). There's at | 231 | * fall back to memcpy(). There's at least two memcpy()s: one for the |
| 232 | * least two memcpy()s: one for the __builtin_memcpy() and then one for | 232 | * __builtin_memcpy() and then one for the macro doing the copy of variable |
| 233 | * the macro doing the copy of variable - '__u' allocated on the stack. | 233 | * - '__u' allocated on the stack. |
| 234 | * | 234 | * |
| 235 | * Their two major use cases are: (1) Mediating communication between | 235 | * Their two major use cases are: (1) Mediating communication between |
| 236 | * process-level code and irq/NMI handlers, all running on the same CPU, | 236 | * process-level code and irq/NMI handlers, all running on the same CPU, |
| 237 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise | 237 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
| 238 | * mutilate accesses that either do not require ordering or that interact | 238 | * mutilate accesses that either do not require ordering or that interact |
| 239 | * with an explicit memory barrier or atomic instruction that provides the | 239 | * with an explicit memory barrier or atomic instruction that provides the |
| 240 | * required ordering. | 240 | * required ordering. |
| @@ -327,29 +327,4 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
| 327 | compiletime_assert(__native_word(t), \ | 327 | compiletime_assert(__native_word(t), \ |
| 328 | "Need native word sized stores/loads for atomicity.") | 328 | "Need native word sized stores/loads for atomicity.") |
| 329 | 329 | ||
| 330 | /* | ||
| 331 | * Prevent the compiler from merging or refetching accesses. The compiler | ||
| 332 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | ||
| 333 | * but only when the compiler is aware of some particular ordering. One way | ||
| 334 | * to make the compiler aware of ordering is to put the two invocations of | ||
| 335 | * ACCESS_ONCE() in different C statements. | ||
| 336 | * | ||
| 337 | * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE | ||
| 338 | * on a union member will work as long as the size of the member matches the | ||
| 339 | * size of the union and the size is smaller than word size. | ||
| 340 | * | ||
| 341 | * The major use cases of ACCESS_ONCE used to be (1) Mediating communication | ||
| 342 | * between process-level code and irq/NMI handlers, all running on the same CPU, | ||
| 343 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise | ||
| 344 | * mutilate accesses that either do not require ordering or that interact | ||
| 345 | * with an explicit memory barrier or atomic instruction that provides the | ||
| 346 | * required ordering. | ||
| 347 | * | ||
| 348 | * If possible use READ_ONCE()/WRITE_ONCE() instead. | ||
| 349 | */ | ||
| 350 | #define __ACCESS_ONCE(x) ({ \ | ||
| 351 | __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ | ||
| 352 | (volatile typeof(x) *)&(x); }) | ||
| 353 | #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) | ||
| 354 | |||
| 355 | #endif /* __LINUX_COMPILER_H */ | 330 | #endif /* __LINUX_COMPILER_H */ |
diff --git a/include/linux/completion.h b/include/linux/completion.h index 0662a417febe..94a59ba7d422 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h | |||
| @@ -10,9 +10,6 @@ | |||
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/wait.h> | 12 | #include <linux/wait.h> |
| 13 | #ifdef CONFIG_LOCKDEP_COMPLETIONS | ||
| 14 | #include <linux/lockdep.h> | ||
| 15 | #endif | ||
| 16 | 13 | ||
| 17 | /* | 14 | /* |
| 18 | * struct completion - structure used to maintain state for a "completion" | 15 | * struct completion - structure used to maintain state for a "completion" |
| @@ -29,58 +26,16 @@ | |||
| 29 | struct completion { | 26 | struct completion { |
| 30 | unsigned int done; | 27 | unsigned int done; |
| 31 | wait_queue_head_t wait; | 28 | wait_queue_head_t wait; |
| 32 | #ifdef CONFIG_LOCKDEP_COMPLETIONS | ||
| 33 | struct lockdep_map_cross map; | ||
| 34 | #endif | ||
| 35 | }; | 29 | }; |
| 36 | 30 | ||
| 37 | #ifdef CONFIG_LOCKDEP_COMPLETIONS | ||
| 38 | static inline void complete_acquire(struct completion *x) | ||
| 39 | { | ||
| 40 | lock_acquire_exclusive((struct lockdep_map *)&x->map, 0, 0, NULL, _RET_IP_); | ||
| 41 | } | ||
| 42 | |||
| 43 | static inline void complete_release(struct completion *x) | ||
| 44 | { | ||
| 45 | lock_release((struct lockdep_map *)&x->map, 0, _RET_IP_); | ||
| 46 | } | ||
| 47 | |||
| 48 | static inline void complete_release_commit(struct completion *x) | ||
| 49 | { | ||
| 50 | lock_commit_crosslock((struct lockdep_map *)&x->map); | ||
| 51 | } | ||
| 52 | |||
| 53 | #define init_completion_map(x, m) \ | ||
| 54 | do { \ | ||
| 55 | lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \ | ||
| 56 | (m)->name, (m)->key, 0); \ | ||
| 57 | __init_completion(x); \ | ||
| 58 | } while (0) | ||
| 59 | |||
| 60 | #define init_completion(x) \ | ||
| 61 | do { \ | ||
| 62 | static struct lock_class_key __key; \ | ||
| 63 | lockdep_init_map_crosslock((struct lockdep_map *)&(x)->map, \ | ||
| 64 | "(completion)" #x, \ | ||
| 65 | &__key, 0); \ | ||
| 66 | __init_completion(x); \ | ||
| 67 | } while (0) | ||
| 68 | #else | ||
| 69 | #define init_completion_map(x, m) __init_completion(x) | 31 | #define init_completion_map(x, m) __init_completion(x) |
| 70 | #define init_completion(x) __init_completion(x) | 32 | #define init_completion(x) __init_completion(x) |
| 71 | static inline void complete_acquire(struct completion *x) {} | 33 | static inline void complete_acquire(struct completion *x) {} |
| 72 | static inline void complete_release(struct completion *x) {} | 34 | static inline void complete_release(struct completion *x) {} |
| 73 | static inline void complete_release_commit(struct completion *x) {} | 35 | static inline void complete_release_commit(struct completion *x) {} |
| 74 | #endif | ||
| 75 | 36 | ||
| 76 | #ifdef CONFIG_LOCKDEP_COMPLETIONS | ||
| 77 | #define COMPLETION_INITIALIZER(work) \ | ||
| 78 | { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \ | ||
| 79 | STATIC_CROSS_LOCKDEP_MAP_INIT("(completion)" #work, &(work)) } | ||
| 80 | #else | ||
| 81 | #define COMPLETION_INITIALIZER(work) \ | 37 | #define COMPLETION_INITIALIZER(work) \ |
| 82 | { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } | 38 | { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } |
| 83 | #endif | ||
| 84 | 39 | ||
| 85 | #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ | 40 | #define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \ |
| 86 | (*({ init_completion_map(&(work), &(map)); &(work); })) | 41 | (*({ init_completion_map(&(work), &(map)); &(work); })) |
diff --git a/include/linux/cred.h b/include/linux/cred.h index 099058e1178b..631286535d0f 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
| @@ -83,6 +83,7 @@ extern int set_current_groups(struct group_info *); | |||
| 83 | extern void set_groups(struct cred *, struct group_info *); | 83 | extern void set_groups(struct cred *, struct group_info *); |
| 84 | extern int groups_search(const struct group_info *, kgid_t); | 84 | extern int groups_search(const struct group_info *, kgid_t); |
| 85 | extern bool may_setgroups(void); | 85 | extern bool may_setgroups(void); |
| 86 | extern void groups_sort(struct group_info *); | ||
| 86 | 87 | ||
| 87 | /* | 88 | /* |
| 88 | * The security context of a task | 89 | * The security context of a task |
diff --git a/include/linux/idr.h b/include/linux/idr.h index 7c3a365f7e12..fa14f834e4ed 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/radix-tree.h> | 15 | #include <linux/radix-tree.h> |
| 16 | #include <linux/gfp.h> | 16 | #include <linux/gfp.h> |
| 17 | #include <linux/percpu.h> | 17 | #include <linux/percpu.h> |
| 18 | #include <linux/bug.h> | ||
| 18 | 19 | ||
| 19 | struct idr { | 20 | struct idr { |
| 20 | struct radix_tree_root idr_rt; | 21 | struct radix_tree_root idr_rt; |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index a842551fe044..2e75dc34bff5 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -158,12 +158,6 @@ struct lockdep_map { | |||
| 158 | int cpu; | 158 | int cpu; |
| 159 | unsigned long ip; | 159 | unsigned long ip; |
| 160 | #endif | 160 | #endif |
| 161 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 162 | /* | ||
| 163 | * Whether it's a crosslock. | ||
| 164 | */ | ||
| 165 | int cross; | ||
| 166 | #endif | ||
| 167 | }; | 161 | }; |
| 168 | 162 | ||
| 169 | static inline void lockdep_copy_map(struct lockdep_map *to, | 163 | static inline void lockdep_copy_map(struct lockdep_map *to, |
| @@ -267,96 +261,9 @@ struct held_lock { | |||
| 267 | unsigned int hardirqs_off:1; | 261 | unsigned int hardirqs_off:1; |
| 268 | unsigned int references:12; /* 32 bits */ | 262 | unsigned int references:12; /* 32 bits */ |
| 269 | unsigned int pin_count; | 263 | unsigned int pin_count; |
| 270 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 271 | /* | ||
| 272 | * Generation id. | ||
| 273 | * | ||
| 274 | * A value of cross_gen_id will be stored when holding this, | ||
| 275 | * which is globally increased whenever each crosslock is held. | ||
| 276 | */ | ||
| 277 | unsigned int gen_id; | ||
| 278 | #endif | ||
| 279 | }; | ||
| 280 | |||
| 281 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 282 | #define MAX_XHLOCK_TRACE_ENTRIES 5 | ||
| 283 | |||
| 284 | /* | ||
| 285 | * This is for keeping locks waiting for commit so that true dependencies | ||
| 286 | * can be added at commit step. | ||
| 287 | */ | ||
| 288 | struct hist_lock { | ||
| 289 | /* | ||
| 290 | * Id for each entry in the ring buffer. This is used to | ||
| 291 | * decide whether the ring buffer was overwritten or not. | ||
| 292 | * | ||
| 293 | * For example, | ||
| 294 | * | ||
| 295 | * |<----------- hist_lock ring buffer size ------->| | ||
| 296 | * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii | ||
| 297 | * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii....................... | ||
| 298 | * | ||
| 299 | * where 'p' represents an acquisition in process | ||
| 300 | * context, 'i' represents an acquisition in irq | ||
| 301 | * context. | ||
| 302 | * | ||
| 303 | * In this example, the ring buffer was overwritten by | ||
| 304 | * acquisitions in irq context, that should be detected on | ||
| 305 | * rollback or commit. | ||
| 306 | */ | ||
| 307 | unsigned int hist_id; | ||
| 308 | |||
| 309 | /* | ||
| 310 | * Seperate stack_trace data. This will be used at commit step. | ||
| 311 | */ | ||
| 312 | struct stack_trace trace; | ||
| 313 | unsigned long trace_entries[MAX_XHLOCK_TRACE_ENTRIES]; | ||
| 314 | |||
| 315 | /* | ||
| 316 | * Seperate hlock instance. This will be used at commit step. | ||
| 317 | * | ||
| 318 | * TODO: Use a smaller data structure containing only necessary | ||
| 319 | * data. However, we should make lockdep code able to handle the | ||
| 320 | * smaller one first. | ||
| 321 | */ | ||
| 322 | struct held_lock hlock; | ||
| 323 | }; | 264 | }; |
| 324 | 265 | ||
| 325 | /* | 266 | /* |
| 326 | * To initialize a lock as crosslock, lockdep_init_map_crosslock() should | ||
| 327 | * be called instead of lockdep_init_map(). | ||
| 328 | */ | ||
| 329 | struct cross_lock { | ||
| 330 | /* | ||
| 331 | * When more than one acquisition of crosslocks are overlapped, | ||
| 332 | * we have to perform commit for them based on cross_gen_id of | ||
| 333 | * the first acquisition, which allows us to add more true | ||
| 334 | * dependencies. | ||
| 335 | * | ||
| 336 | * Moreover, when no acquisition of a crosslock is in progress, | ||
| 337 | * we should not perform commit because the lock might not exist | ||
| 338 | * any more, which might cause incorrect memory access. So we | ||
| 339 | * have to track the number of acquisitions of a crosslock. | ||
| 340 | */ | ||
| 341 | int nr_acquire; | ||
| 342 | |||
| 343 | /* | ||
| 344 | * Seperate hlock instance. This will be used at commit step. | ||
| 345 | * | ||
| 346 | * TODO: Use a smaller data structure containing only necessary | ||
| 347 | * data. However, we should make lockdep code able to handle the | ||
| 348 | * smaller one first. | ||
| 349 | */ | ||
| 350 | struct held_lock hlock; | ||
| 351 | }; | ||
| 352 | |||
| 353 | struct lockdep_map_cross { | ||
| 354 | struct lockdep_map map; | ||
| 355 | struct cross_lock xlock; | ||
| 356 | }; | ||
| 357 | #endif | ||
| 358 | |||
| 359 | /* | ||
| 360 | * Initialization, self-test and debugging-output methods: | 267 | * Initialization, self-test and debugging-output methods: |
| 361 | */ | 268 | */ |
| 362 | extern void lockdep_info(void); | 269 | extern void lockdep_info(void); |
| @@ -560,37 +467,6 @@ enum xhlock_context_t { | |||
| 560 | XHLOCK_CTX_NR, | 467 | XHLOCK_CTX_NR, |
| 561 | }; | 468 | }; |
| 562 | 469 | ||
| 563 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 564 | extern void lockdep_init_map_crosslock(struct lockdep_map *lock, | ||
| 565 | const char *name, | ||
| 566 | struct lock_class_key *key, | ||
| 567 | int subclass); | ||
| 568 | extern void lock_commit_crosslock(struct lockdep_map *lock); | ||
| 569 | |||
| 570 | /* | ||
| 571 | * What we essencially have to initialize is 'nr_acquire'. Other members | ||
| 572 | * will be initialized in add_xlock(). | ||
| 573 | */ | ||
| 574 | #define STATIC_CROSS_LOCK_INIT() \ | ||
| 575 | { .nr_acquire = 0,} | ||
| 576 | |||
| 577 | #define STATIC_CROSS_LOCKDEP_MAP_INIT(_name, _key) \ | ||
| 578 | { .map.name = (_name), .map.key = (void *)(_key), \ | ||
| 579 | .map.cross = 1, .xlock = STATIC_CROSS_LOCK_INIT(), } | ||
| 580 | |||
| 581 | /* | ||
| 582 | * To initialize a lockdep_map statically use this macro. | ||
| 583 | * Note that _name must not be NULL. | ||
| 584 | */ | ||
| 585 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ | ||
| 586 | { .name = (_name), .key = (void *)(_key), .cross = 0, } | ||
| 587 | |||
| 588 | extern void crossrelease_hist_start(enum xhlock_context_t c); | ||
| 589 | extern void crossrelease_hist_end(enum xhlock_context_t c); | ||
| 590 | extern void lockdep_invariant_state(bool force); | ||
| 591 | extern void lockdep_init_task(struct task_struct *task); | ||
| 592 | extern void lockdep_free_task(struct task_struct *task); | ||
| 593 | #else /* !CROSSRELEASE */ | ||
| 594 | #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) | 470 | #define lockdep_init_map_crosslock(m, n, k, s) do {} while (0) |
| 595 | /* | 471 | /* |
| 596 | * To initialize a lockdep_map statically use this macro. | 472 | * To initialize a lockdep_map statically use this macro. |
| @@ -604,7 +480,6 @@ static inline void crossrelease_hist_end(enum xhlock_context_t c) {} | |||
| 604 | static inline void lockdep_invariant_state(bool force) {} | 480 | static inline void lockdep_invariant_state(bool force) {} |
| 605 | static inline void lockdep_init_task(struct task_struct *task) {} | 481 | static inline void lockdep_init_task(struct task_struct *task) {} |
| 606 | static inline void lockdep_free_task(struct task_struct *task) {} | 482 | static inline void lockdep_free_task(struct task_struct *task) {} |
| 607 | #endif /* CROSSRELEASE */ | ||
| 608 | 483 | ||
| 609 | #ifdef CONFIG_LOCK_STAT | 484 | #ifdef CONFIG_LOCK_STAT |
| 610 | 485 | ||
diff --git a/include/linux/oom.h b/include/linux/oom.h index 01c91d874a57..5bad038ac012 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
| @@ -67,6 +67,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk) | |||
| 67 | } | 67 | } |
| 68 | 68 | ||
| 69 | /* | 69 | /* |
| 70 | * Use this helper if tsk->mm != mm and the victim mm needs a special | ||
| 71 | * handling. This is guaranteed to stay true after once set. | ||
| 72 | */ | ||
| 73 | static inline bool mm_is_oom_victim(struct mm_struct *mm) | ||
| 74 | { | ||
| 75 | return test_bit(MMF_OOM_VICTIM, &mm->flags); | ||
| 76 | } | ||
| 77 | |||
| 78 | /* | ||
| 70 | * Checks whether a page fault on the given mm is still reliable. | 79 | * Checks whether a page fault on the given mm is still reliable. |
| 71 | * This is no longer true if the oom reaper started to reap the | 80 | * This is no longer true if the oom reaper started to reap the |
| 72 | * address space which is reflected by MMF_UNSTABLE flag set in | 81 | * address space which is reflected by MMF_UNSTABLE flag set in |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 0403894147a3..c170c9250c8b 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -1674,6 +1674,9 @@ static inline struct pci_dev *pci_get_slot(struct pci_bus *bus, | |||
| 1674 | static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, | 1674 | static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, |
| 1675 | unsigned int devfn) | 1675 | unsigned int devfn) |
| 1676 | { return NULL; } | 1676 | { return NULL; } |
| 1677 | static inline struct pci_dev *pci_get_domain_bus_and_slot(int domain, | ||
| 1678 | unsigned int bus, unsigned int devfn) | ||
| 1679 | { return NULL; } | ||
| 1677 | 1680 | ||
| 1678 | static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } | 1681 | static inline int pci_domain_nr(struct pci_bus *bus) { return 0; } |
| 1679 | static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } | 1682 | static inline struct pci_dev *pci_dev_get(struct pci_dev *dev) { return NULL; } |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 65d39115f06d..492ed473ba7e 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
| @@ -765,6 +765,7 @@ extern int pm_generic_poweroff_late(struct device *dev); | |||
| 765 | extern int pm_generic_poweroff(struct device *dev); | 765 | extern int pm_generic_poweroff(struct device *dev); |
| 766 | extern void pm_generic_complete(struct device *dev); | 766 | extern void pm_generic_complete(struct device *dev); |
| 767 | 767 | ||
| 768 | extern void dev_pm_skip_next_resume_phases(struct device *dev); | ||
| 768 | extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); | 769 | extern bool dev_pm_smart_suspend_and_suspended(struct device *dev); |
| 769 | 770 | ||
| 770 | #else /* !CONFIG_PM_SLEEP */ | 771 | #else /* !CONFIG_PM_SLEEP */ |
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 37b4bb2545b3..6866df4f31b5 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h | |||
| @@ -101,12 +101,18 @@ static inline bool ptr_ring_full_bh(struct ptr_ring *r) | |||
| 101 | 101 | ||
| 102 | /* Note: callers invoking this in a loop must use a compiler barrier, | 102 | /* Note: callers invoking this in a loop must use a compiler barrier, |
| 103 | * for example cpu_relax(). Callers must hold producer_lock. | 103 | * for example cpu_relax(). Callers must hold producer_lock. |
| 104 | * Callers are responsible for making sure pointer that is being queued | ||
| 105 | * points to a valid data. | ||
| 104 | */ | 106 | */ |
| 105 | static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) | 107 | static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) |
| 106 | { | 108 | { |
| 107 | if (unlikely(!r->size) || r->queue[r->producer]) | 109 | if (unlikely(!r->size) || r->queue[r->producer]) |
| 108 | return -ENOSPC; | 110 | return -ENOSPC; |
| 109 | 111 | ||
| 112 | /* Make sure the pointer we are storing points to a valid data. */ | ||
| 113 | /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ | ||
| 114 | smp_wmb(); | ||
| 115 | |||
| 110 | r->queue[r->producer++] = ptr; | 116 | r->queue[r->producer++] = ptr; |
| 111 | if (unlikely(r->producer >= r->size)) | 117 | if (unlikely(r->producer >= r->size)) |
| 112 | r->producer = 0; | 118 | r->producer = 0; |
| @@ -275,6 +281,9 @@ static inline void *__ptr_ring_consume(struct ptr_ring *r) | |||
| 275 | if (ptr) | 281 | if (ptr) |
| 276 | __ptr_ring_discard_one(r); | 282 | __ptr_ring_discard_one(r); |
| 277 | 283 | ||
| 284 | /* Make sure anyone accessing data through the pointer is up to date. */ | ||
| 285 | /* Pairs with smp_wmb in __ptr_ring_produce. */ | ||
| 286 | smp_read_barrier_depends(); | ||
| 278 | return ptr; | 287 | return ptr; |
| 279 | } | 288 | } |
| 280 | 289 | ||
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index d574361943ea..fcbeed4053ef 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h | |||
| @@ -99,6 +99,8 @@ extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, | |||
| 99 | struct rb_root *root); | 99 | struct rb_root *root); |
| 100 | extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, | 100 | extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, |
| 101 | struct rb_root *root); | 101 | struct rb_root *root); |
| 102 | extern void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, | ||
| 103 | struct rb_root_cached *root); | ||
| 102 | 104 | ||
| 103 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, | 105 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, |
| 104 | struct rb_node **rb_link) | 106 | struct rb_node **rb_link) |
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index cc0072e93e36..857a72ceb794 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h | |||
| @@ -10,9 +10,6 @@ | |||
| 10 | */ | 10 | */ |
| 11 | typedef struct { | 11 | typedef struct { |
| 12 | arch_rwlock_t raw_lock; | 12 | arch_rwlock_t raw_lock; |
| 13 | #ifdef CONFIG_GENERIC_LOCKBREAK | ||
| 14 | unsigned int break_lock; | ||
| 15 | #endif | ||
| 16 | #ifdef CONFIG_DEBUG_SPINLOCK | 13 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 17 | unsigned int magic, owner_cpu; | 14 | unsigned int magic, owner_cpu; |
| 18 | void *owner; | 15 | void *owner; |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 21991d668d35..d2588263a989 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -849,17 +849,6 @@ struct task_struct { | |||
| 849 | struct held_lock held_locks[MAX_LOCK_DEPTH]; | 849 | struct held_lock held_locks[MAX_LOCK_DEPTH]; |
| 850 | #endif | 850 | #endif |
| 851 | 851 | ||
| 852 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 853 | #define MAX_XHLOCKS_NR 64UL | ||
| 854 | struct hist_lock *xhlocks; /* Crossrelease history locks */ | ||
| 855 | unsigned int xhlock_idx; | ||
| 856 | /* For restoring at history boundaries */ | ||
| 857 | unsigned int xhlock_idx_hist[XHLOCK_CTX_NR]; | ||
| 858 | unsigned int hist_id; | ||
| 859 | /* For overwrite check at each context exit */ | ||
| 860 | unsigned int hist_id_save[XHLOCK_CTX_NR]; | ||
| 861 | #endif | ||
| 862 | |||
| 863 | #ifdef CONFIG_UBSAN | 852 | #ifdef CONFIG_UBSAN |
| 864 | unsigned int in_ubsan; | 853 | unsigned int in_ubsan; |
| 865 | #endif | 854 | #endif |
| @@ -1503,7 +1492,11 @@ static inline void set_task_comm(struct task_struct *tsk, const char *from) | |||
| 1503 | __set_task_comm(tsk, from, false); | 1492 | __set_task_comm(tsk, from, false); |
| 1504 | } | 1493 | } |
| 1505 | 1494 | ||
| 1506 | extern char *get_task_comm(char *to, struct task_struct *tsk); | 1495 | extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk); |
| 1496 | #define get_task_comm(buf, tsk) ({ \ | ||
| 1497 | BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN); \ | ||
| 1498 | __get_task_comm(buf, sizeof(buf), tsk); \ | ||
| 1499 | }) | ||
| 1507 | 1500 | ||
| 1508 | #ifdef CONFIG_SMP | 1501 | #ifdef CONFIG_SMP |
| 1509 | void scheduler_ipi(void); | 1502 | void scheduler_ipi(void); |
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index 9c8847395b5e..ec912d01126f 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h | |||
| @@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm) | |||
| 70 | #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ | 70 | #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ |
| 71 | #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ | 71 | #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ |
| 72 | #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ | 72 | #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ |
| 73 | #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ | ||
| 73 | #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) | 74 | #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) |
| 74 | 75 | ||
| 75 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ | 76 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index a39186194cd6..3bf273538840 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
| @@ -107,16 +107,11 @@ do { \ | |||
| 107 | 107 | ||
| 108 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) | 108 | #define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock) |
| 109 | 109 | ||
| 110 | #ifdef CONFIG_GENERIC_LOCKBREAK | ||
| 111 | #define raw_spin_is_contended(lock) ((lock)->break_lock) | ||
| 112 | #else | ||
| 113 | |||
| 114 | #ifdef arch_spin_is_contended | 110 | #ifdef arch_spin_is_contended |
| 115 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) | 111 | #define raw_spin_is_contended(lock) arch_spin_is_contended(&(lock)->raw_lock) |
| 116 | #else | 112 | #else |
| 117 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) | 113 | #define raw_spin_is_contended(lock) (((void)(lock), 0)) |
| 118 | #endif /*arch_spin_is_contended*/ | 114 | #endif /*arch_spin_is_contended*/ |
| 119 | #endif | ||
| 120 | 115 | ||
| 121 | /* | 116 | /* |
| 122 | * This barrier must provide two things: | 117 | * This barrier must provide two things: |
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 73548eb13a5d..24b4e6f2c1a2 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h | |||
| @@ -19,9 +19,6 @@ | |||
| 19 | 19 | ||
| 20 | typedef struct raw_spinlock { | 20 | typedef struct raw_spinlock { |
| 21 | arch_spinlock_t raw_lock; | 21 | arch_spinlock_t raw_lock; |
| 22 | #ifdef CONFIG_GENERIC_LOCKBREAK | ||
| 23 | unsigned int break_lock; | ||
| 24 | #endif | ||
| 25 | #ifdef CONFIG_DEBUG_SPINLOCK | 22 | #ifdef CONFIG_DEBUG_SPINLOCK |
| 26 | unsigned int magic, owner_cpu; | 23 | unsigned int magic, owner_cpu; |
| 27 | void *owner; | 24 | void *owner; |
diff --git a/include/linux/string.h b/include/linux/string.h index 410ecf17de3c..cfd83eb2f926 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
| @@ -259,7 +259,10 @@ __FORTIFY_INLINE __kernel_size_t strlen(const char *p) | |||
| 259 | { | 259 | { |
| 260 | __kernel_size_t ret; | 260 | __kernel_size_t ret; |
| 261 | size_t p_size = __builtin_object_size(p, 0); | 261 | size_t p_size = __builtin_object_size(p, 0); |
| 262 | if (p_size == (size_t)-1) | 262 | |
| 263 | /* Work around gcc excess stack consumption issue */ | ||
| 264 | if (p_size == (size_t)-1 || | ||
| 265 | (__builtin_constant_p(p[p_size - 1]) && p[p_size - 1] == '\0')) | ||
| 263 | return __builtin_strlen(p); | 266 | return __builtin_strlen(p); |
| 264 | ret = strnlen(p, p_size); | 267 | ret = strnlen(p, p_size); |
| 265 | if (p_size <= ret) | 268 | if (p_size <= ret) |
diff --git a/include/linux/trace.h b/include/linux/trace.h index d24991c1fef3..b95ffb2188ab 100644 --- a/include/linux/trace.h +++ b/include/linux/trace.h | |||
| @@ -18,7 +18,7 @@ | |||
| 18 | */ | 18 | */ |
| 19 | struct trace_export { | 19 | struct trace_export { |
| 20 | struct trace_export __rcu *next; | 20 | struct trace_export __rcu *next; |
| 21 | void (*write)(const void *, unsigned int); | 21 | void (*write)(struct trace_export *, const void *, unsigned int); |
| 22 | }; | 22 | }; |
| 23 | 23 | ||
| 24 | int register_ftrace_export(struct trace_export *export); | 24 | int register_ftrace_export(struct trace_export *export); |
diff --git a/include/net/gue.h b/include/net/gue.h index 2fdb29ca74c2..fdad41469b65 100644 --- a/include/net/gue.h +++ b/include/net/gue.h | |||
| @@ -44,10 +44,10 @@ struct guehdr { | |||
| 44 | #else | 44 | #else |
| 45 | #error "Please fix <asm/byteorder.h>" | 45 | #error "Please fix <asm/byteorder.h>" |
| 46 | #endif | 46 | #endif |
| 47 | __u8 proto_ctype; | 47 | __u8 proto_ctype; |
| 48 | __u16 flags; | 48 | __be16 flags; |
| 49 | }; | 49 | }; |
| 50 | __u32 word; | 50 | __be32 word; |
| 51 | }; | 51 | }; |
| 52 | }; | 52 | }; |
| 53 | 53 | ||
| @@ -84,11 +84,10 @@ static inline size_t guehdr_priv_flags_len(__be32 flags) | |||
| 84 | * if there is an unknown standard or private flags, or the options length for | 84 | * if there is an unknown standard or private flags, or the options length for |
| 85 | * the flags exceeds the options length specific in hlen of the GUE header. | 85 | * the flags exceeds the options length specific in hlen of the GUE header. |
| 86 | */ | 86 | */ |
| 87 | static inline int validate_gue_flags(struct guehdr *guehdr, | 87 | static inline int validate_gue_flags(struct guehdr *guehdr, size_t optlen) |
| 88 | size_t optlen) | ||
| 89 | { | 88 | { |
| 89 | __be16 flags = guehdr->flags; | ||
| 90 | size_t len; | 90 | size_t len; |
| 91 | __be32 flags = guehdr->flags; | ||
| 92 | 91 | ||
| 93 | if (flags & ~GUE_FLAGS_ALL) | 92 | if (flags & ~GUE_FLAGS_ALL) |
| 94 | return 1; | 93 | return 1; |
| @@ -101,12 +100,13 @@ static inline int validate_gue_flags(struct guehdr *guehdr, | |||
| 101 | /* Private flags are last four bytes accounted in | 100 | /* Private flags are last four bytes accounted in |
| 102 | * guehdr_flags_len | 101 | * guehdr_flags_len |
| 103 | */ | 102 | */ |
| 104 | flags = *(__be32 *)((void *)&guehdr[1] + len - GUE_LEN_PRIV); | 103 | __be32 pflags = *(__be32 *)((void *)&guehdr[1] + |
| 104 | len - GUE_LEN_PRIV); | ||
| 105 | 105 | ||
| 106 | if (flags & ~GUE_PFLAGS_ALL) | 106 | if (pflags & ~GUE_PFLAGS_ALL) |
| 107 | return 1; | 107 | return 1; |
| 108 | 108 | ||
| 109 | len += guehdr_priv_flags_len(flags); | 109 | len += guehdr_priv_flags_len(pflags); |
| 110 | if (len > optlen) | 110 | if (len > optlen) |
| 111 | return 1; | 111 | return 1; |
| 112 | } | 112 | } |
diff --git a/include/net/ip.h b/include/net/ip.h index 9896f46cbbf1..af8addbaa3c1 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #include <net/flow_dissector.h> | 34 | #include <net/flow_dissector.h> |
| 35 | 35 | ||
| 36 | #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ | 36 | #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ |
| 37 | #define IPV4_MIN_MTU 68 /* RFC 791 */ | ||
| 37 | 38 | ||
| 38 | struct sock; | 39 | struct sock; |
| 39 | 40 | ||
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 65d0d25f2648..83a3e47d5845 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
| @@ -71,6 +71,7 @@ struct Qdisc { | |||
| 71 | * qdisc_tree_decrease_qlen() should stop. | 71 | * qdisc_tree_decrease_qlen() should stop. |
| 72 | */ | 72 | */ |
| 73 | #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ | 73 | #define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */ |
| 74 | #define TCQ_F_OFFLOADED 0x200 /* qdisc is offloaded to HW */ | ||
| 74 | u32 limit; | 75 | u32 limit; |
| 75 | const struct Qdisc_ops *ops; | 76 | const struct Qdisc_ops *ops; |
| 76 | struct qdisc_size_table __rcu *stab; | 77 | struct qdisc_size_table __rcu *stab; |
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h index f5024c560d8f..9c4eb33c5a1d 100644 --- a/include/trace/events/preemptirq.h +++ b/include/trace/events/preemptirq.h | |||
| @@ -56,15 +56,18 @@ DEFINE_EVENT(preemptirq_template, preempt_enable, | |||
| 56 | 56 | ||
| 57 | #include <trace/define_trace.h> | 57 | #include <trace/define_trace.h> |
| 58 | 58 | ||
| 59 | #else /* !CONFIG_PREEMPTIRQ_EVENTS */ | 59 | #endif /* !CONFIG_PREEMPTIRQ_EVENTS */ |
| 60 | 60 | ||
| 61 | #if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING) | ||
| 61 | #define trace_irq_enable(...) | 62 | #define trace_irq_enable(...) |
| 62 | #define trace_irq_disable(...) | 63 | #define trace_irq_disable(...) |
| 63 | #define trace_preempt_enable(...) | ||
| 64 | #define trace_preempt_disable(...) | ||
| 65 | #define trace_irq_enable_rcuidle(...) | 64 | #define trace_irq_enable_rcuidle(...) |
| 66 | #define trace_irq_disable_rcuidle(...) | 65 | #define trace_irq_disable_rcuidle(...) |
| 66 | #endif | ||
| 67 | |||
| 68 | #if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT) | ||
| 69 | #define trace_preempt_enable(...) | ||
| 70 | #define trace_preempt_disable(...) | ||
| 67 | #define trace_preempt_enable_rcuidle(...) | 71 | #define trace_preempt_enable_rcuidle(...) |
| 68 | #define trace_preempt_disable_rcuidle(...) | 72 | #define trace_preempt_disable_rcuidle(...) |
| 69 | |||
| 70 | #endif | 73 | #endif |
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h index af3cc2f4e1ad..37b5096ae97b 100644 --- a/include/uapi/linux/pkt_sched.h +++ b/include/uapi/linux/pkt_sched.h | |||
| @@ -256,7 +256,6 @@ struct tc_red_qopt { | |||
| 256 | #define TC_RED_ECN 1 | 256 | #define TC_RED_ECN 1 |
| 257 | #define TC_RED_HARDDROP 2 | 257 | #define TC_RED_HARDDROP 2 |
| 258 | #define TC_RED_ADAPTATIVE 4 | 258 | #define TC_RED_ADAPTATIVE 4 |
| 259 | #define TC_RED_OFFLOADED 8 | ||
| 260 | }; | 259 | }; |
| 261 | 260 | ||
| 262 | struct tc_red_xstats { | 261 | struct tc_red_xstats { |
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index d8b5f80c2ea6..843e29aa3cac 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h | |||
| @@ -557,6 +557,7 @@ enum { | |||
| 557 | TCA_PAD, | 557 | TCA_PAD, |
| 558 | TCA_DUMP_INVISIBLE, | 558 | TCA_DUMP_INVISIBLE, |
| 559 | TCA_CHAIN, | 559 | TCA_CHAIN, |
| 560 | TCA_HW_OFFLOAD, | ||
| 560 | __TCA_MAX | 561 | __TCA_MAX |
| 561 | }; | 562 | }; |
| 562 | 563 | ||
diff --git a/init/main.c b/init/main.c index dfec3809e740..e96e3a14533c 100644 --- a/init/main.c +++ b/init/main.c | |||
| @@ -589,6 +589,12 @@ asmlinkage __visible void __init start_kernel(void) | |||
| 589 | radix_tree_init(); | 589 | radix_tree_init(); |
| 590 | 590 | ||
| 591 | /* | 591 | /* |
| 592 | * Set up housekeeping before setting up workqueues to allow the unbound | ||
| 593 | * workqueue to take non-housekeeping into account. | ||
| 594 | */ | ||
| 595 | housekeeping_init(); | ||
| 596 | |||
| 597 | /* | ||
| 592 | * Allow workqueue creation and work item queueing/cancelling | 598 | * Allow workqueue creation and work item queueing/cancelling |
| 593 | * early. Work item execution depends on kthreads and starts after | 599 | * early. Work item execution depends on kthreads and starts after |
| 594 | * workqueue_init(). | 600 | * workqueue_init(). |
| @@ -605,7 +611,6 @@ asmlinkage __visible void __init start_kernel(void) | |||
| 605 | early_irq_init(); | 611 | early_irq_init(); |
| 606 | init_IRQ(); | 612 | init_IRQ(); |
| 607 | tick_init(); | 613 | tick_init(); |
| 608 | housekeeping_init(); | ||
| 609 | rcu_init_nohz(); | 614 | rcu_init_nohz(); |
| 610 | init_timers(); | 615 | init_timers(); |
| 611 | hrtimers_init(); | 616 | hrtimers_init(); |
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index e469e05c8e83..3905d4bc5b80 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c | |||
| @@ -114,6 +114,7 @@ static void htab_free_elems(struct bpf_htab *htab) | |||
| 114 | pptr = htab_elem_get_ptr(get_htab_elem(htab, i), | 114 | pptr = htab_elem_get_ptr(get_htab_elem(htab, i), |
| 115 | htab->map.key_size); | 115 | htab->map.key_size); |
| 116 | free_percpu(pptr); | 116 | free_percpu(pptr); |
| 117 | cond_resched(); | ||
| 117 | } | 118 | } |
| 118 | free_elems: | 119 | free_elems: |
| 119 | bpf_map_area_free(htab->elems); | 120 | bpf_map_area_free(htab->elems); |
| @@ -159,6 +160,7 @@ static int prealloc_init(struct bpf_htab *htab) | |||
| 159 | goto free_elems; | 160 | goto free_elems; |
| 160 | htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, | 161 | htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, |
| 161 | pptr); | 162 | pptr); |
| 163 | cond_resched(); | ||
| 162 | } | 164 | } |
| 163 | 165 | ||
| 164 | skip_percpu_elems: | 166 | skip_percpu_elems: |
diff --git a/kernel/cgroup/debug.c b/kernel/cgroup/debug.c index 5f780d8f6a9d..9caeda610249 100644 --- a/kernel/cgroup/debug.c +++ b/kernel/cgroup/debug.c | |||
| @@ -50,7 +50,7 @@ static int current_css_set_read(struct seq_file *seq, void *v) | |||
| 50 | 50 | ||
| 51 | spin_lock_irq(&css_set_lock); | 51 | spin_lock_irq(&css_set_lock); |
| 52 | rcu_read_lock(); | 52 | rcu_read_lock(); |
| 53 | cset = rcu_dereference(current->cgroups); | 53 | cset = task_css_set(current); |
| 54 | refcnt = refcount_read(&cset->refcount); | 54 | refcnt = refcount_read(&cset->refcount); |
| 55 | seq_printf(seq, "css_set %pK %d", cset, refcnt); | 55 | seq_printf(seq, "css_set %pK %d", cset, refcnt); |
| 56 | if (refcnt > cset->nr_tasks) | 56 | if (refcnt > cset->nr_tasks) |
| @@ -96,7 +96,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v) | |||
| 96 | 96 | ||
| 97 | spin_lock_irq(&css_set_lock); | 97 | spin_lock_irq(&css_set_lock); |
| 98 | rcu_read_lock(); | 98 | rcu_read_lock(); |
| 99 | cset = rcu_dereference(current->cgroups); | 99 | cset = task_css_set(current); |
| 100 | list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { | 100 | list_for_each_entry(link, &cset->cgrp_links, cgrp_link) { |
| 101 | struct cgroup *c = link->cgrp; | 101 | struct cgroup *c = link->cgrp; |
| 102 | 102 | ||
diff --git a/kernel/cgroup/stat.c b/kernel/cgroup/stat.c index 133b465691d6..1e111dd455c4 100644 --- a/kernel/cgroup/stat.c +++ b/kernel/cgroup/stat.c | |||
| @@ -296,8 +296,12 @@ int cgroup_stat_init(struct cgroup *cgrp) | |||
| 296 | } | 296 | } |
| 297 | 297 | ||
| 298 | /* ->updated_children list is self terminated */ | 298 | /* ->updated_children list is self terminated */ |
| 299 | for_each_possible_cpu(cpu) | 299 | for_each_possible_cpu(cpu) { |
| 300 | cgroup_cpu_stat(cgrp, cpu)->updated_children = cgrp; | 300 | struct cgroup_cpu_stat *cstat = cgroup_cpu_stat(cgrp, cpu); |
| 301 | |||
| 302 | cstat->updated_children = cgrp; | ||
| 303 | u64_stats_init(&cstat->sync); | ||
| 304 | } | ||
| 301 | 305 | ||
| 302 | prev_cputime_init(&cgrp->stat.prev_cputime); | 306 | prev_cputime_init(&cgrp->stat.prev_cputime); |
| 303 | 307 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 6b4298a41167..df0c91d5606c 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -1755,3 +1755,11 @@ Efault: | |||
| 1755 | return -EFAULT; | 1755 | return -EFAULT; |
| 1756 | } | 1756 | } |
| 1757 | #endif | 1757 | #endif |
| 1758 | |||
| 1759 | __weak void abort(void) | ||
| 1760 | { | ||
| 1761 | BUG(); | ||
| 1762 | |||
| 1763 | /* if that doesn't kill us, halt */ | ||
| 1764 | panic("Oops failed to kill thread"); | ||
| 1765 | } | ||
diff --git a/kernel/groups.c b/kernel/groups.c index e357bc800111..daae2f2dc6d4 100644 --- a/kernel/groups.c +++ b/kernel/groups.c | |||
| @@ -86,11 +86,12 @@ static int gid_cmp(const void *_a, const void *_b) | |||
| 86 | return gid_gt(a, b) - gid_lt(a, b); | 86 | return gid_gt(a, b) - gid_lt(a, b); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static void groups_sort(struct group_info *group_info) | 89 | void groups_sort(struct group_info *group_info) |
| 90 | { | 90 | { |
| 91 | sort(group_info->gid, group_info->ngroups, sizeof(*group_info->gid), | 91 | sort(group_info->gid, group_info->ngroups, sizeof(*group_info->gid), |
| 92 | gid_cmp, NULL); | 92 | gid_cmp, NULL); |
| 93 | } | 93 | } |
| 94 | EXPORT_SYMBOL(groups_sort); | ||
| 94 | 95 | ||
| 95 | /* a simple bsearch */ | 96 | /* a simple bsearch */ |
| 96 | int groups_search(const struct group_info *group_info, kgid_t grp) | 97 | int groups_search(const struct group_info *group_info, kgid_t grp) |
| @@ -122,7 +123,6 @@ int groups_search(const struct group_info *group_info, kgid_t grp) | |||
| 122 | void set_groups(struct cred *new, struct group_info *group_info) | 123 | void set_groups(struct cred *new, struct group_info *group_info) |
| 123 | { | 124 | { |
| 124 | put_group_info(new->group_info); | 125 | put_group_info(new->group_info); |
| 125 | groups_sort(group_info); | ||
| 126 | get_group_info(group_info); | 126 | get_group_info(group_info); |
| 127 | new->group_info = group_info; | 127 | new->group_info = group_info; |
| 128 | } | 128 | } |
| @@ -206,6 +206,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist) | |||
| 206 | return retval; | 206 | return retval; |
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | groups_sort(group_info); | ||
| 209 | retval = set_current_groups(group_info); | 210 | retval = set_current_groups(group_info); |
| 210 | put_group_info(group_info); | 211 | put_group_info(group_info); |
| 211 | 212 | ||
diff --git a/kernel/kcov.c b/kernel/kcov.c index 15f33faf4013..7594c033d98a 100644 --- a/kernel/kcov.c +++ b/kernel/kcov.c | |||
| @@ -157,7 +157,7 @@ void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2) | |||
| 157 | } | 157 | } |
| 158 | EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2); | 158 | EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2); |
| 159 | 159 | ||
| 160 | void notrace __sanitizer_cov_trace_cmp4(u16 arg1, u16 arg2) | 160 | void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2) |
| 161 | { | 161 | { |
| 162 | write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_); | 162 | write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_); |
| 163 | } | 163 | } |
| @@ -183,7 +183,7 @@ void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2) | |||
| 183 | } | 183 | } |
| 184 | EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2); | 184 | EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2); |
| 185 | 185 | ||
| 186 | void notrace __sanitizer_cov_trace_const_cmp4(u16 arg1, u16 arg2) | 186 | void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2) |
| 187 | { | 187 | { |
| 188 | write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2, | 188 | write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2, |
| 189 | _RET_IP_); | 189 | _RET_IP_); |
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 670d8d7d8087..5fa1324a4f29 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
| @@ -57,10 +57,6 @@ | |||
| 57 | #define CREATE_TRACE_POINTS | 57 | #define CREATE_TRACE_POINTS |
| 58 | #include <trace/events/lock.h> | 58 | #include <trace/events/lock.h> |
| 59 | 59 | ||
| 60 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 61 | #include <linux/slab.h> | ||
| 62 | #endif | ||
| 63 | |||
| 64 | #ifdef CONFIG_PROVE_LOCKING | 60 | #ifdef CONFIG_PROVE_LOCKING |
| 65 | int prove_locking = 1; | 61 | int prove_locking = 1; |
| 66 | module_param(prove_locking, int, 0644); | 62 | module_param(prove_locking, int, 0644); |
| @@ -75,19 +71,6 @@ module_param(lock_stat, int, 0644); | |||
| 75 | #define lock_stat 0 | 71 | #define lock_stat 0 |
| 76 | #endif | 72 | #endif |
| 77 | 73 | ||
| 78 | #ifdef CONFIG_BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK | ||
| 79 | static int crossrelease_fullstack = 1; | ||
| 80 | #else | ||
| 81 | static int crossrelease_fullstack; | ||
| 82 | #endif | ||
| 83 | static int __init allow_crossrelease_fullstack(char *str) | ||
| 84 | { | ||
| 85 | crossrelease_fullstack = 1; | ||
| 86 | return 0; | ||
| 87 | } | ||
| 88 | |||
| 89 | early_param("crossrelease_fullstack", allow_crossrelease_fullstack); | ||
| 90 | |||
| 91 | /* | 74 | /* |
| 92 | * lockdep_lock: protects the lockdep graph, the hashes and the | 75 | * lockdep_lock: protects the lockdep graph, the hashes and the |
| 93 | * class/list/hash allocators. | 76 | * class/list/hash allocators. |
| @@ -740,18 +723,6 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
| 740 | return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); | 723 | return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); |
| 741 | } | 724 | } |
| 742 | 725 | ||
| 743 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 744 | static void cross_init(struct lockdep_map *lock, int cross); | ||
| 745 | static int cross_lock(struct lockdep_map *lock); | ||
| 746 | static int lock_acquire_crosslock(struct held_lock *hlock); | ||
| 747 | static int lock_release_crosslock(struct lockdep_map *lock); | ||
| 748 | #else | ||
| 749 | static inline void cross_init(struct lockdep_map *lock, int cross) {} | ||
| 750 | static inline int cross_lock(struct lockdep_map *lock) { return 0; } | ||
| 751 | static inline int lock_acquire_crosslock(struct held_lock *hlock) { return 2; } | ||
| 752 | static inline int lock_release_crosslock(struct lockdep_map *lock) { return 2; } | ||
| 753 | #endif | ||
| 754 | |||
| 755 | /* | 726 | /* |
| 756 | * Register a lock's class in the hash-table, if the class is not present | 727 | * Register a lock's class in the hash-table, if the class is not present |
| 757 | * yet. Otherwise we look it up. We cache the result in the lock object | 728 | * yet. Otherwise we look it up. We cache the result in the lock object |
| @@ -1151,41 +1122,22 @@ print_circular_lock_scenario(struct held_lock *src, | |||
| 1151 | printk(KERN_CONT "\n\n"); | 1122 | printk(KERN_CONT "\n\n"); |
| 1152 | } | 1123 | } |
| 1153 | 1124 | ||
| 1154 | if (cross_lock(tgt->instance)) { | 1125 | printk(" Possible unsafe locking scenario:\n\n"); |
| 1155 | printk(" Possible unsafe locking scenario by crosslock:\n\n"); | 1126 | printk(" CPU0 CPU1\n"); |
| 1156 | printk(" CPU0 CPU1\n"); | 1127 | printk(" ---- ----\n"); |
| 1157 | printk(" ---- ----\n"); | 1128 | printk(" lock("); |
| 1158 | printk(" lock("); | 1129 | __print_lock_name(target); |
| 1159 | __print_lock_name(parent); | 1130 | printk(KERN_CONT ");\n"); |
| 1160 | printk(KERN_CONT ");\n"); | 1131 | printk(" lock("); |
| 1161 | printk(" lock("); | 1132 | __print_lock_name(parent); |
| 1162 | __print_lock_name(target); | 1133 | printk(KERN_CONT ");\n"); |
| 1163 | printk(KERN_CONT ");\n"); | 1134 | printk(" lock("); |
| 1164 | printk(" lock("); | 1135 | __print_lock_name(target); |
| 1165 | __print_lock_name(source); | 1136 | printk(KERN_CONT ");\n"); |
| 1166 | printk(KERN_CONT ");\n"); | 1137 | printk(" lock("); |
| 1167 | printk(" unlock("); | 1138 | __print_lock_name(source); |
| 1168 | __print_lock_name(target); | 1139 | printk(KERN_CONT ");\n"); |
| 1169 | printk(KERN_CONT ");\n"); | 1140 | printk("\n *** DEADLOCK ***\n\n"); |
| 1170 | printk("\n *** DEADLOCK ***\n\n"); | ||
| 1171 | } else { | ||
| 1172 | printk(" Possible unsafe locking scenario:\n\n"); | ||
| 1173 | printk(" CPU0 CPU1\n"); | ||
| 1174 | printk(" ---- ----\n"); | ||
| 1175 | printk(" lock("); | ||
| 1176 | __print_lock_name(target); | ||
| 1177 | printk(KERN_CONT ");\n"); | ||
| 1178 | printk(" lock("); | ||
| 1179 | __print_lock_name(parent); | ||
| 1180 | printk(KERN_CONT ");\n"); | ||
| 1181 | printk(" lock("); | ||
| 1182 | __print_lock_name(target); | ||
| 1183 | printk(KERN_CONT ");\n"); | ||
| 1184 | printk(" lock("); | ||
| 1185 | __print_lock_name(source); | ||
| 1186 | printk(KERN_CONT ");\n"); | ||
| 1187 | printk("\n *** DEADLOCK ***\n\n"); | ||
| 1188 | } | ||
| 1189 | } | 1141 | } |
| 1190 | 1142 | ||
| 1191 | /* | 1143 | /* |
| @@ -1211,10 +1163,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth, | |||
| 1211 | curr->comm, task_pid_nr(curr)); | 1163 | curr->comm, task_pid_nr(curr)); |
| 1212 | print_lock(check_src); | 1164 | print_lock(check_src); |
| 1213 | 1165 | ||
| 1214 | if (cross_lock(check_tgt->instance)) | 1166 | pr_warn("\nbut task is already holding lock:\n"); |
| 1215 | pr_warn("\nbut now in release context of a crosslock acquired at the following:\n"); | ||
| 1216 | else | ||
| 1217 | pr_warn("\nbut task is already holding lock:\n"); | ||
| 1218 | 1167 | ||
| 1219 | print_lock(check_tgt); | 1168 | print_lock(check_tgt); |
| 1220 | pr_warn("\nwhich lock already depends on the new lock.\n\n"); | 1169 | pr_warn("\nwhich lock already depends on the new lock.\n\n"); |
| @@ -1244,9 +1193,7 @@ static noinline int print_circular_bug(struct lock_list *this, | |||
| 1244 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) | 1193 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
| 1245 | return 0; | 1194 | return 0; |
| 1246 | 1195 | ||
| 1247 | if (cross_lock(check_tgt->instance)) | 1196 | if (!save_trace(&this->trace)) |
| 1248 | this->trace = *trace; | ||
| 1249 | else if (!save_trace(&this->trace)) | ||
| 1250 | return 0; | 1197 | return 0; |
| 1251 | 1198 | ||
| 1252 | depth = get_lock_depth(target); | 1199 | depth = get_lock_depth(target); |
| @@ -1850,9 +1797,6 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, | |||
| 1850 | if (nest) | 1797 | if (nest) |
| 1851 | return 2; | 1798 | return 2; |
| 1852 | 1799 | ||
| 1853 | if (cross_lock(prev->instance)) | ||
| 1854 | continue; | ||
| 1855 | |||
| 1856 | return print_deadlock_bug(curr, prev, next); | 1800 | return print_deadlock_bug(curr, prev, next); |
| 1857 | } | 1801 | } |
| 1858 | return 1; | 1802 | return 1; |
| @@ -2018,31 +1962,26 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
| 2018 | for (;;) { | 1962 | for (;;) { |
| 2019 | int distance = curr->lockdep_depth - depth + 1; | 1963 | int distance = curr->lockdep_depth - depth + 1; |
| 2020 | hlock = curr->held_locks + depth - 1; | 1964 | hlock = curr->held_locks + depth - 1; |
| 1965 | |||
| 2021 | /* | 1966 | /* |
| 2022 | * Only non-crosslock entries get new dependencies added. | 1967 | * Only non-recursive-read entries get new dependencies |
| 2023 | * Crosslock entries will be added by commit later: | 1968 | * added: |
| 2024 | */ | 1969 | */ |
| 2025 | if (!cross_lock(hlock->instance)) { | 1970 | if (hlock->read != 2 && hlock->check) { |
| 1971 | int ret = check_prev_add(curr, hlock, next, distance, &trace, save_trace); | ||
| 1972 | if (!ret) | ||
| 1973 | return 0; | ||
| 1974 | |||
| 2026 | /* | 1975 | /* |
| 2027 | * Only non-recursive-read entries get new dependencies | 1976 | * Stop after the first non-trylock entry, |
| 2028 | * added: | 1977 | * as non-trylock entries have added their |
| 1978 | * own direct dependencies already, so this | ||
| 1979 | * lock is connected to them indirectly: | ||
| 2029 | */ | 1980 | */ |
| 2030 | if (hlock->read != 2 && hlock->check) { | 1981 | if (!hlock->trylock) |
| 2031 | int ret = check_prev_add(curr, hlock, next, | 1982 | break; |
| 2032 | distance, &trace, save_trace); | ||
| 2033 | if (!ret) | ||
| 2034 | return 0; | ||
| 2035 | |||
| 2036 | /* | ||
| 2037 | * Stop after the first non-trylock entry, | ||
| 2038 | * as non-trylock entries have added their | ||
| 2039 | * own direct dependencies already, so this | ||
| 2040 | * lock is connected to them indirectly: | ||
| 2041 | */ | ||
| 2042 | if (!hlock->trylock) | ||
| 2043 | break; | ||
| 2044 | } | ||
| 2045 | } | 1983 | } |
| 1984 | |||
| 2046 | depth--; | 1985 | depth--; |
| 2047 | /* | 1986 | /* |
| 2048 | * End of lock-stack? | 1987 | * End of lock-stack? |
| @@ -3292,21 +3231,10 @@ static void __lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
| 3292 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | 3231 | void lockdep_init_map(struct lockdep_map *lock, const char *name, |
| 3293 | struct lock_class_key *key, int subclass) | 3232 | struct lock_class_key *key, int subclass) |
| 3294 | { | 3233 | { |
| 3295 | cross_init(lock, 0); | ||
| 3296 | __lockdep_init_map(lock, name, key, subclass); | 3234 | __lockdep_init_map(lock, name, key, subclass); |
| 3297 | } | 3235 | } |
| 3298 | EXPORT_SYMBOL_GPL(lockdep_init_map); | 3236 | EXPORT_SYMBOL_GPL(lockdep_init_map); |
| 3299 | 3237 | ||
| 3300 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 3301 | void lockdep_init_map_crosslock(struct lockdep_map *lock, const char *name, | ||
| 3302 | struct lock_class_key *key, int subclass) | ||
| 3303 | { | ||
| 3304 | cross_init(lock, 1); | ||
| 3305 | __lockdep_init_map(lock, name, key, subclass); | ||
| 3306 | } | ||
| 3307 | EXPORT_SYMBOL_GPL(lockdep_init_map_crosslock); | ||
| 3308 | #endif | ||
| 3309 | |||
| 3310 | struct lock_class_key __lockdep_no_validate__; | 3238 | struct lock_class_key __lockdep_no_validate__; |
| 3311 | EXPORT_SYMBOL_GPL(__lockdep_no_validate__); | 3239 | EXPORT_SYMBOL_GPL(__lockdep_no_validate__); |
| 3312 | 3240 | ||
| @@ -3362,7 +3290,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 3362 | int chain_head = 0; | 3290 | int chain_head = 0; |
| 3363 | int class_idx; | 3291 | int class_idx; |
| 3364 | u64 chain_key; | 3292 | u64 chain_key; |
| 3365 | int ret; | ||
| 3366 | 3293 | ||
| 3367 | if (unlikely(!debug_locks)) | 3294 | if (unlikely(!debug_locks)) |
| 3368 | return 0; | 3295 | return 0; |
| @@ -3411,8 +3338,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 3411 | 3338 | ||
| 3412 | class_idx = class - lock_classes + 1; | 3339 | class_idx = class - lock_classes + 1; |
| 3413 | 3340 | ||
| 3414 | /* TODO: nest_lock is not implemented for crosslock yet. */ | 3341 | if (depth) { |
| 3415 | if (depth && !cross_lock(lock)) { | ||
| 3416 | hlock = curr->held_locks + depth - 1; | 3342 | hlock = curr->held_locks + depth - 1; |
| 3417 | if (hlock->class_idx == class_idx && nest_lock) { | 3343 | if (hlock->class_idx == class_idx && nest_lock) { |
| 3418 | if (hlock->references) { | 3344 | if (hlock->references) { |
| @@ -3500,14 +3426,6 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 3500 | if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) | 3426 | if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) |
| 3501 | return 0; | 3427 | return 0; |
| 3502 | 3428 | ||
| 3503 | ret = lock_acquire_crosslock(hlock); | ||
| 3504 | /* | ||
| 3505 | * 2 means normal acquire operations are needed. Otherwise, it's | ||
| 3506 | * ok just to return with '0:fail, 1:success'. | ||
| 3507 | */ | ||
| 3508 | if (ret != 2) | ||
| 3509 | return ret; | ||
| 3510 | |||
| 3511 | curr->curr_chain_key = chain_key; | 3429 | curr->curr_chain_key = chain_key; |
| 3512 | curr->lockdep_depth++; | 3430 | curr->lockdep_depth++; |
| 3513 | check_chain_key(curr); | 3431 | check_chain_key(curr); |
| @@ -3745,19 +3663,11 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) | |||
| 3745 | struct task_struct *curr = current; | 3663 | struct task_struct *curr = current; |
| 3746 | struct held_lock *hlock; | 3664 | struct held_lock *hlock; |
| 3747 | unsigned int depth; | 3665 | unsigned int depth; |
| 3748 | int ret, i; | 3666 | int i; |
| 3749 | 3667 | ||
| 3750 | if (unlikely(!debug_locks)) | 3668 | if (unlikely(!debug_locks)) |
| 3751 | return 0; | 3669 | return 0; |
| 3752 | 3670 | ||
| 3753 | ret = lock_release_crosslock(lock); | ||
| 3754 | /* | ||
| 3755 | * 2 means normal release operations are needed. Otherwise, it's | ||
| 3756 | * ok just to return with '0:fail, 1:success'. | ||
| 3757 | */ | ||
| 3758 | if (ret != 2) | ||
| 3759 | return ret; | ||
| 3760 | |||
| 3761 | depth = curr->lockdep_depth; | 3671 | depth = curr->lockdep_depth; |
| 3762 | /* | 3672 | /* |
| 3763 | * So we're all set to release this lock.. wait what lock? We don't | 3673 | * So we're all set to release this lock.. wait what lock? We don't |
| @@ -4675,495 +4585,3 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) | |||
| 4675 | dump_stack(); | 4585 | dump_stack(); |
| 4676 | } | 4586 | } |
| 4677 | EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); | 4587 | EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); |
| 4678 | |||
| 4679 | #ifdef CONFIG_LOCKDEP_CROSSRELEASE | ||
| 4680 | |||
| 4681 | /* | ||
| 4682 | * Crossrelease works by recording a lock history for each thread and | ||
| 4683 | * connecting those historic locks that were taken after the | ||
| 4684 | * wait_for_completion() in the complete() context. | ||
| 4685 | * | ||
| 4686 | * Task-A Task-B | ||
| 4687 | * | ||
| 4688 | * mutex_lock(&A); | ||
| 4689 | * mutex_unlock(&A); | ||
| 4690 | * | ||
| 4691 | * wait_for_completion(&C); | ||
| 4692 | * lock_acquire_crosslock(); | ||
| 4693 | * atomic_inc_return(&cross_gen_id); | ||
| 4694 | * | | ||
| 4695 | * | mutex_lock(&B); | ||
| 4696 | * | mutex_unlock(&B); | ||
| 4697 | * | | ||
| 4698 | * | complete(&C); | ||
| 4699 | * `-- lock_commit_crosslock(); | ||
| 4700 | * | ||
| 4701 | * Which will then add a dependency between B and C. | ||
| 4702 | */ | ||
| 4703 | |||
| 4704 | #define xhlock(i) (current->xhlocks[(i) % MAX_XHLOCKS_NR]) | ||
| 4705 | |||
| 4706 | /* | ||
| 4707 | * Whenever a crosslock is held, cross_gen_id will be increased. | ||
| 4708 | */ | ||
| 4709 | static atomic_t cross_gen_id; /* Can be wrapped */ | ||
| 4710 | |||
| 4711 | /* | ||
| 4712 | * Make an entry of the ring buffer invalid. | ||
| 4713 | */ | ||
| 4714 | static inline void invalidate_xhlock(struct hist_lock *xhlock) | ||
| 4715 | { | ||
| 4716 | /* | ||
| 4717 | * Normally, xhlock->hlock.instance must be !NULL. | ||
| 4718 | */ | ||
| 4719 | xhlock->hlock.instance = NULL; | ||
| 4720 | } | ||
| 4721 | |||
| 4722 | /* | ||
| 4723 | * Lock history stacks; we have 2 nested lock history stacks: | ||
| 4724 | * | ||
| 4725 | * HARD(IRQ) | ||
| 4726 | * SOFT(IRQ) | ||
| 4727 | * | ||
| 4728 | * The thing is that once we complete a HARD/SOFT IRQ the future task locks | ||
| 4729 | * should not depend on any of the locks observed while running the IRQ. So | ||
| 4730 | * what we do is rewind the history buffer and erase all our knowledge of that | ||
| 4731 | * temporal event. | ||
| 4732 | */ | ||
| 4733 | |||
| 4734 | void crossrelease_hist_start(enum xhlock_context_t c) | ||
| 4735 | { | ||
| 4736 | struct task_struct *cur = current; | ||
| 4737 | |||
| 4738 | if (!cur->xhlocks) | ||
| 4739 | return; | ||
| 4740 | |||
| 4741 | cur->xhlock_idx_hist[c] = cur->xhlock_idx; | ||
| 4742 | cur->hist_id_save[c] = cur->hist_id; | ||
| 4743 | } | ||
| 4744 | |||
| 4745 | void crossrelease_hist_end(enum xhlock_context_t c) | ||
| 4746 | { | ||
| 4747 | struct task_struct *cur = current; | ||
| 4748 | |||
| 4749 | if (cur->xhlocks) { | ||
| 4750 | unsigned int idx = cur->xhlock_idx_hist[c]; | ||
| 4751 | struct hist_lock *h = &xhlock(idx); | ||
| 4752 | |||
| 4753 | cur->xhlock_idx = idx; | ||
| 4754 | |||
| 4755 | /* Check if the ring was overwritten. */ | ||
| 4756 | if (h->hist_id != cur->hist_id_save[c]) | ||
| 4757 | invalidate_xhlock(h); | ||
| 4758 | } | ||
| 4759 | } | ||
| 4760 | |||
| 4761 | /* | ||
| 4762 | * lockdep_invariant_state() is used to annotate independence inside a task, to | ||
| 4763 | * make one task look like multiple independent 'tasks'. | ||
| 4764 | * | ||
| 4765 | * Take for instance workqueues; each work is independent of the last. The | ||
| 4766 | * completion of a future work does not depend on the completion of a past work | ||
| 4767 | * (in general). Therefore we must not carry that (lock) dependency across | ||
| 4768 | * works. | ||
| 4769 | * | ||
| 4770 | * This is true for many things; pretty much all kthreads fall into this | ||
| 4771 | * pattern, where they have an invariant state and future completions do not | ||
| 4772 | * depend on past completions. Its just that since they all have the 'same' | ||
| 4773 | * form -- the kthread does the same over and over -- it doesn't typically | ||
| 4774 | * matter. | ||
| 4775 | * | ||
| 4776 | * The same is true for system-calls, once a system call is completed (we've | ||
| 4777 | * returned to userspace) the next system call does not depend on the lock | ||
| 4778 | * history of the previous system call. | ||
| 4779 | * | ||
| 4780 | * They key property for independence, this invariant state, is that it must be | ||
| 4781 | * a point where we hold no locks and have no history. Because if we were to | ||
| 4782 | * hold locks, the restore at _end() would not necessarily recover it's history | ||
| 4783 | * entry. Similarly, independence per-definition means it does not depend on | ||
| 4784 | * prior state. | ||
| 4785 | */ | ||
| 4786 | void lockdep_invariant_state(bool force) | ||
| 4787 | { | ||
| 4788 | /* | ||
| 4789 | * We call this at an invariant point, no current state, no history. | ||
| 4790 | * Verify the former, enforce the latter. | ||
| 4791 | */ | ||
| 4792 | WARN_ON_ONCE(!force && current->lockdep_depth); | ||
| 4793 | if (current->xhlocks) | ||
| 4794 | invalidate_xhlock(&xhlock(current->xhlock_idx)); | ||
| 4795 | } | ||
| 4796 | |||
| 4797 | static int cross_lock(struct lockdep_map *lock) | ||
| 4798 | { | ||
| 4799 | return lock ? lock->cross : 0; | ||
| 4800 | } | ||
| 4801 | |||
| 4802 | /* | ||
| 4803 | * This is needed to decide the relationship between wrapable variables. | ||
| 4804 | */ | ||
| 4805 | static inline int before(unsigned int a, unsigned int b) | ||
| 4806 | { | ||
| 4807 | return (int)(a - b) < 0; | ||
| 4808 | } | ||
| 4809 | |||
| 4810 | static inline struct lock_class *xhlock_class(struct hist_lock *xhlock) | ||
| 4811 | { | ||
| 4812 | return hlock_class(&xhlock->hlock); | ||
| 4813 | } | ||
| 4814 | |||
| 4815 | static inline struct lock_class *xlock_class(struct cross_lock *xlock) | ||
| 4816 | { | ||
| 4817 | return hlock_class(&xlock->hlock); | ||
| 4818 | } | ||
| 4819 | |||
| 4820 | /* | ||
| 4821 | * Should we check a dependency with previous one? | ||
| 4822 | */ | ||
| 4823 | static inline int depend_before(struct held_lock *hlock) | ||
| 4824 | { | ||
| 4825 | return hlock->read != 2 && hlock->check && !hlock->trylock; | ||
| 4826 | } | ||
| 4827 | |||
| 4828 | /* | ||
| 4829 | * Should we check a dependency with next one? | ||
| 4830 | */ | ||
| 4831 | static inline int depend_after(struct held_lock *hlock) | ||
| 4832 | { | ||
| 4833 | return hlock->read != 2 && hlock->check; | ||
| 4834 | } | ||
| 4835 | |||
| 4836 | /* | ||
| 4837 | * Check if the xhlock is valid, which would be false if, | ||
| 4838 | * | ||
| 4839 | * 1. Has not used after initializaion yet. | ||
| 4840 | * 2. Got invalidated. | ||
| 4841 | * | ||
| 4842 | * Remind hist_lock is implemented as a ring buffer. | ||
| 4843 | */ | ||
| 4844 | static inline int xhlock_valid(struct hist_lock *xhlock) | ||
| 4845 | { | ||
| 4846 | /* | ||
| 4847 | * xhlock->hlock.instance must be !NULL. | ||
| 4848 | */ | ||
| 4849 | return !!xhlock->hlock.instance; | ||
| 4850 | } | ||
| 4851 | |||
| 4852 | /* | ||
| 4853 | * Record a hist_lock entry. | ||
| 4854 | * | ||
| 4855 | * Irq disable is only required. | ||
| 4856 | */ | ||
| 4857 | static void add_xhlock(struct held_lock *hlock) | ||
| 4858 | { | ||
| 4859 | unsigned int idx = ++current->xhlock_idx; | ||
| 4860 | struct hist_lock *xhlock = &xhlock(idx); | ||
| 4861 | |||
| 4862 | #ifdef CONFIG_DEBUG_LOCKDEP | ||
| 4863 | /* | ||
| 4864 | * This can be done locklessly because they are all task-local | ||
| 4865 | * state, we must however ensure IRQs are disabled. | ||
| 4866 | */ | ||
| 4867 | WARN_ON_ONCE(!irqs_disabled()); | ||
| 4868 | #endif | ||
| 4869 | |||
| 4870 | /* Initialize hist_lock's members */ | ||
| 4871 | xhlock->hlock = *hlock; | ||
| 4872 | xhlock->hist_id = ++current->hist_id; | ||
| 4873 | |||
| 4874 | xhlock->trace.nr_entries = 0; | ||
| 4875 | xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES; | ||
| 4876 | xhlock->trace.entries = xhlock->trace_entries; | ||
| 4877 | |||
| 4878 | if (crossrelease_fullstack) { | ||
| 4879 | xhlock->trace.skip = 3; | ||
| 4880 | save_stack_trace(&xhlock->trace); | ||
| 4881 | } else { | ||
| 4882 | xhlock->trace.nr_entries = 1; | ||
| 4883 | xhlock->trace.entries[0] = hlock->acquire_ip; | ||
| 4884 | } | ||
| 4885 | } | ||
| 4886 | |||
| 4887 | static inline int same_context_xhlock(struct hist_lock *xhlock) | ||
| 4888 | { | ||
| 4889 | return xhlock->hlock.irq_context == task_irq_context(current); | ||
| 4890 | } | ||
| 4891 | |||
| 4892 | /* | ||
| 4893 | * This should be lockless as far as possible because this would be | ||
| 4894 | * called very frequently. | ||
| 4895 | */ | ||
| 4896 | static void check_add_xhlock(struct held_lock *hlock) | ||
| 4897 | { | ||
| 4898 | /* | ||
| 4899 | * Record a hist_lock, only in case that acquisitions ahead | ||
| 4900 | * could depend on the held_lock. For example, if the held_lock | ||
| 4901 | * is trylock then acquisitions ahead never depends on that. | ||
| 4902 | * In that case, we don't need to record it. Just return. | ||
| 4903 | */ | ||
| 4904 | if (!current->xhlocks || !depend_before(hlock)) | ||
| 4905 | return; | ||
| 4906 | |||
| 4907 | add_xhlock(hlock); | ||
| 4908 | } | ||
| 4909 | |||
| 4910 | /* | ||
| 4911 | * For crosslock. | ||
| 4912 | */ | ||
| 4913 | static int add_xlock(struct held_lock *hlock) | ||
| 4914 | { | ||
| 4915 | struct cross_lock *xlock; | ||
| 4916 | unsigned int gen_id; | ||
| 4917 | |||
| 4918 | if (!graph_lock()) | ||
| 4919 | return 0; | ||
| 4920 | |||
| 4921 | xlock = &((struct lockdep_map_cross *)hlock->instance)->xlock; | ||
| 4922 | |||
| 4923 | /* | ||
| 4924 | * When acquisitions for a crosslock are overlapped, we use | ||
| 4925 | * nr_acquire to perform commit for them, based on cross_gen_id | ||
| 4926 | * of the first acquisition, which allows to add additional | ||
| 4927 | * dependencies. | ||
| 4928 | * | ||
| 4929 | * Moreover, when no acquisition of a crosslock is in progress, | ||
| 4930 | * we should not perform commit because the lock might not exist | ||
| 4931 | * any more, which might cause incorrect memory access. So we | ||
| 4932 | * have to track the number of acquisitions of a crosslock. | ||
| 4933 | * | ||
| 4934 | * depend_after() is necessary to initialize only the first | ||
| 4935 | * valid xlock so that the xlock can be used on its commit. | ||
| 4936 | */ | ||
| 4937 | if (xlock->nr_acquire++ && depend_after(&xlock->hlock)) | ||
| 4938 | goto unlock; | ||
| 4939 | |||
| 4940 | gen_id = (unsigned int)atomic_inc_return(&cross_gen_id); | ||
| 4941 | xlock->hlock = *hlock; | ||
| 4942 | xlock->hlock.gen_id = gen_id; | ||
| 4943 | unlock: | ||
| 4944 | graph_unlock(); | ||
| 4945 | return 1; | ||
| 4946 | } | ||
| 4947 | |||
| 4948 | /* | ||
| 4949 | * Called for both normal and crosslock acquires. Normal locks will be | ||
| 4950 | * pushed on the hist_lock queue. Cross locks will record state and | ||
| 4951 | * stop regular lock_acquire() to avoid being placed on the held_lock | ||
| 4952 | * stack. | ||
| 4953 | * | ||
| 4954 | * Return: 0 - failure; | ||
| 4955 | * 1 - crosslock, done; | ||
| 4956 | * 2 - normal lock, continue to held_lock[] ops. | ||
| 4957 | */ | ||
| 4958 | static int lock_acquire_crosslock(struct held_lock *hlock) | ||
| 4959 | { | ||
| 4960 | /* | ||
| 4961 | * CONTEXT 1 CONTEXT 2 | ||
| 4962 | * --------- --------- | ||
| 4963 | * lock A (cross) | ||
| 4964 | * X = atomic_inc_return(&cross_gen_id) | ||
| 4965 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ | ||
| 4966 | * Y = atomic_read_acquire(&cross_gen_id) | ||
| 4967 | * lock B | ||
| 4968 | * | ||
| 4969 | * atomic_read_acquire() is for ordering between A and B, | ||
| 4970 | * IOW, A happens before B, when CONTEXT 2 see Y >= X. | ||
| 4971 | * | ||
| 4972 | * Pairs with atomic_inc_return() in add_xlock(). | ||
| 4973 | */ | ||
| 4974 | hlock->gen_id = (unsigned int)atomic_read_acquire(&cross_gen_id); | ||
| 4975 | |||
| 4976 | if (cross_lock(hlock->instance)) | ||
| 4977 | return add_xlock(hlock); | ||
| 4978 | |||
| 4979 | check_add_xhlock(hlock); | ||
| 4980 | return 2; | ||
| 4981 | } | ||
| 4982 | |||
| 4983 | static int copy_trace(struct stack_trace *trace) | ||
| 4984 | { | ||
| 4985 | unsigned long *buf = stack_trace + nr_stack_trace_entries; | ||
| 4986 | unsigned int max_nr = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; | ||
| 4987 | unsigned int nr = min(max_nr, trace->nr_entries); | ||
| 4988 | |||
| 4989 | trace->nr_entries = nr; | ||
| 4990 | memcpy(buf, trace->entries, nr * sizeof(trace->entries[0])); | ||
| 4991 | trace->entries = buf; | ||
| 4992 | nr_stack_trace_entries += nr; | ||
| 4993 | |||
| 4994 | if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { | ||
| 4995 | if (!debug_locks_off_graph_unlock()) | ||
| 4996 | return 0; | ||
| 4997 | |||
| 4998 | print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!"); | ||
| 4999 | dump_stack(); | ||
| 5000 | |||
| 5001 | return 0; | ||
| 5002 | } | ||
| 5003 | |||
| 5004 | return 1; | ||
| 5005 | } | ||
| 5006 | |||
| 5007 | static int commit_xhlock(struct cross_lock *xlock, struct hist_lock *xhlock) | ||
| 5008 | { | ||
| 5009 | unsigned int xid, pid; | ||
| 5010 | u64 chain_key; | ||
| 5011 | |||
| 5012 | xid = xlock_class(xlock) - lock_classes; | ||
| 5013 | chain_key = iterate_chain_key((u64)0, xid); | ||
| 5014 | pid = xhlock_class(xhlock) - lock_classes; | ||
| 5015 | chain_key = iterate_chain_key(chain_key, pid); | ||
| 5016 | |||
| 5017 | if (lookup_chain_cache(chain_key)) | ||
| 5018 | return 1; | ||
| 5019 | |||
| 5020 | if (!add_chain_cache_classes(xid, pid, xhlock->hlock.irq_context, | ||
| 5021 | chain_key)) | ||
| 5022 | return 0; | ||
| 5023 | |||
| 5024 | if (!check_prev_add(current, &xlock->hlock, &xhlock->hlock, 1, | ||
| 5025 | &xhlock->trace, copy_trace)) | ||
| 5026 | return 0; | ||
| 5027 | |||
| 5028 | return 1; | ||
| 5029 | } | ||
| 5030 | |||
| 5031 | static void commit_xhlocks(struct cross_lock *xlock) | ||
| 5032 | { | ||
| 5033 | unsigned int cur = current->xhlock_idx; | ||
| 5034 | unsigned int prev_hist_id = xhlock(cur).hist_id; | ||
| 5035 | unsigned int i; | ||
| 5036 | |||
| 5037 | if (!graph_lock()) | ||
| 5038 | return; | ||
| 5039 | |||
| 5040 | if (xlock->nr_acquire) { | ||
| 5041 | for (i = 0; i < MAX_XHLOCKS_NR; i++) { | ||
| 5042 | struct hist_lock *xhlock = &xhlock(cur - i); | ||
| 5043 | |||
| 5044 | if (!xhlock_valid(xhlock)) | ||
| 5045 | break; | ||
| 5046 | |||
| 5047 | if (before(xhlock->hlock.gen_id, xlock->hlock.gen_id)) | ||
| 5048 | break; | ||
| 5049 | |||
| 5050 | if (!same_context_xhlock(xhlock)) | ||
| 5051 | break; | ||
| 5052 | |||
| 5053 | /* | ||
| 5054 | * Filter out the cases where the ring buffer was | ||
| 5055 | * overwritten and the current entry has a bigger | ||
| 5056 | * hist_id than the previous one, which is impossible | ||
| 5057 | * otherwise: | ||
| 5058 | */ | ||
| 5059 | if (unlikely(before(prev_hist_id, xhlock->hist_id))) | ||
| 5060 | break; | ||
| 5061 | |||
| 5062 | prev_hist_id = xhlock->hist_id; | ||
| 5063 | |||
| 5064 | /* | ||
| 5065 | * commit_xhlock() returns 0 with graph_lock already | ||
| 5066 | * released if fail. | ||
| 5067 | */ | ||
| 5068 | if (!commit_xhlock(xlock, xhlock)) | ||
| 5069 | return; | ||
| 5070 | } | ||
| 5071 | } | ||
| 5072 | |||
| 5073 | graph_unlock(); | ||
| 5074 | } | ||
| 5075 | |||
| 5076 | void lock_commit_crosslock(struct lockdep_map *lock) | ||
| 5077 | { | ||
| 5078 | struct cross_lock *xlock; | ||
| 5079 | unsigned long flags; | ||
| 5080 | |||
| 5081 | if (unlikely(!debug_locks || current->lockdep_recursion)) | ||
| 5082 | return; | ||
| 5083 | |||
| 5084 | if (!current->xhlocks) | ||
| 5085 | return; | ||
| 5086 | |||
| 5087 | /* | ||
| 5088 | * Do commit hist_locks with the cross_lock, only in case that | ||
| 5089 | * the cross_lock could depend on acquisitions after that. | ||
| 5090 | * | ||
| 5091 | * For example, if the cross_lock does not have the 'check' flag | ||
| 5092 | * then we don't need to check dependencies and commit for that. | ||
| 5093 | * Just skip it. In that case, of course, the cross_lock does | ||
| 5094 | * not depend on acquisitions ahead, either. | ||
| 5095 | * | ||
| 5096 | * WARNING: Don't do that in add_xlock() in advance. When an | ||
| 5097 | * acquisition context is different from the commit context, | ||
| 5098 | * invalid(skipped) cross_lock might be accessed. | ||
| 5099 | */ | ||
| 5100 | if (!depend_after(&((struct lockdep_map_cross *)lock)->xlock.hlock)) | ||
| 5101 | return; | ||
| 5102 | |||
| 5103 | raw_local_irq_save(flags); | ||
| 5104 | check_flags(flags); | ||
| 5105 | current->lockdep_recursion = 1; | ||
| 5106 | xlock = &((struct lockdep_map_cross *)lock)->xlock; | ||
| 5107 | commit_xhlocks(xlock); | ||
| 5108 | current->lockdep_recursion = 0; | ||
| 5109 | raw_local_irq_restore(flags); | ||
| 5110 | } | ||
| 5111 | EXPORT_SYMBOL_GPL(lock_commit_crosslock); | ||
| 5112 | |||
| 5113 | /* | ||
| 5114 | * Return: 0 - failure; | ||
| 5115 | * 1 - crosslock, done; | ||
| 5116 | * 2 - normal lock, continue to held_lock[] ops. | ||
| 5117 | */ | ||
| 5118 | static int lock_release_crosslock(struct lockdep_map *lock) | ||
| 5119 | { | ||
| 5120 | if (cross_lock(lock)) { | ||
| 5121 | if (!graph_lock()) | ||
| 5122 | return 0; | ||
| 5123 | ((struct lockdep_map_cross *)lock)->xlock.nr_acquire--; | ||
| 5124 | graph_unlock(); | ||
| 5125 | return 1; | ||
| 5126 | } | ||
| 5127 | return 2; | ||
| 5128 | } | ||
| 5129 | |||
| 5130 | static void cross_init(struct lockdep_map *lock, int cross) | ||
| 5131 | { | ||
| 5132 | if (cross) | ||
| 5133 | ((struct lockdep_map_cross *)lock)->xlock.nr_acquire = 0; | ||
| 5134 | |||
| 5135 | lock->cross = cross; | ||
| 5136 | |||
| 5137 | /* | ||
| 5138 | * Crossrelease assumes that the ring buffer size of xhlocks | ||
| 5139 | * is aligned with power of 2. So force it on build. | ||
| 5140 | */ | ||
| 5141 | BUILD_BUG_ON(MAX_XHLOCKS_NR & (MAX_XHLOCKS_NR - 1)); | ||
| 5142 | } | ||
| 5143 | |||
| 5144 | void lockdep_init_task(struct task_struct *task) | ||
| 5145 | { | ||
| 5146 | int i; | ||
| 5147 | |||
| 5148 | task->xhlock_idx = UINT_MAX; | ||
| 5149 | task->hist_id = 0; | ||
| 5150 | |||
| 5151 | for (i = 0; i < XHLOCK_CTX_NR; i++) { | ||
| 5152 | task->xhlock_idx_hist[i] = UINT_MAX; | ||
| 5153 | task->hist_id_save[i] = 0; | ||
| 5154 | } | ||
| 5155 | |||
| 5156 | task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR, | ||
| 5157 | GFP_KERNEL); | ||
| 5158 | } | ||
| 5159 | |||
| 5160 | void lockdep_free_task(struct task_struct *task) | ||
| 5161 | { | ||
| 5162 | if (task->xhlocks) { | ||
| 5163 | void *tmp = task->xhlocks; | ||
| 5164 | /* Diable crossrelease for current */ | ||
| 5165 | task->xhlocks = NULL; | ||
| 5166 | kfree(tmp); | ||
| 5167 | } | ||
| 5168 | } | ||
| 5169 | #endif | ||
diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index 1fd1a7543cdd..936f3d14dd6b 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c | |||
| @@ -66,12 +66,8 @@ void __lockfunc __raw_##op##_lock(locktype##_t *lock) \ | |||
| 66 | break; \ | 66 | break; \ |
| 67 | preempt_enable(); \ | 67 | preempt_enable(); \ |
| 68 | \ | 68 | \ |
| 69 | if (!(lock)->break_lock) \ | 69 | arch_##op##_relax(&lock->raw_lock); \ |
| 70 | (lock)->break_lock = 1; \ | ||
| 71 | while ((lock)->break_lock) \ | ||
| 72 | arch_##op##_relax(&lock->raw_lock); \ | ||
| 73 | } \ | 70 | } \ |
| 74 | (lock)->break_lock = 0; \ | ||
| 75 | } \ | 71 | } \ |
| 76 | \ | 72 | \ |
| 77 | unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ | 73 | unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ |
| @@ -86,12 +82,9 @@ unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \ | |||
| 86 | local_irq_restore(flags); \ | 82 | local_irq_restore(flags); \ |
| 87 | preempt_enable(); \ | 83 | preempt_enable(); \ |
| 88 | \ | 84 | \ |
| 89 | if (!(lock)->break_lock) \ | 85 | arch_##op##_relax(&lock->raw_lock); \ |
| 90 | (lock)->break_lock = 1; \ | ||
| 91 | while ((lock)->break_lock) \ | ||
| 92 | arch_##op##_relax(&lock->raw_lock); \ | ||
| 93 | } \ | 86 | } \ |
| 94 | (lock)->break_lock = 0; \ | 87 | \ |
| 95 | return flags; \ | 88 | return flags; \ |
| 96 | } \ | 89 | } \ |
| 97 | \ | 90 | \ |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 75554f366fd3..644fa2e3d993 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
| @@ -5097,17 +5097,6 @@ SYSCALL_DEFINE1(sched_get_priority_min, int, policy) | |||
| 5097 | return ret; | 5097 | return ret; |
| 5098 | } | 5098 | } |
| 5099 | 5099 | ||
| 5100 | /** | ||
| 5101 | * sys_sched_rr_get_interval - return the default timeslice of a process. | ||
| 5102 | * @pid: pid of the process. | ||
| 5103 | * @interval: userspace pointer to the timeslice value. | ||
| 5104 | * | ||
| 5105 | * this syscall writes the default timeslice value of a given process | ||
| 5106 | * into the user-space timespec buffer. A value of '0' means infinity. | ||
| 5107 | * | ||
| 5108 | * Return: On success, 0 and the timeslice is in @interval. Otherwise, | ||
| 5109 | * an error code. | ||
| 5110 | */ | ||
| 5111 | static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) | 5100 | static int sched_rr_get_interval(pid_t pid, struct timespec64 *t) |
| 5112 | { | 5101 | { |
| 5113 | struct task_struct *p; | 5102 | struct task_struct *p; |
| @@ -5144,6 +5133,17 @@ out_unlock: | |||
| 5144 | return retval; | 5133 | return retval; |
| 5145 | } | 5134 | } |
| 5146 | 5135 | ||
| 5136 | /** | ||
| 5137 | * sys_sched_rr_get_interval - return the default timeslice of a process. | ||
| 5138 | * @pid: pid of the process. | ||
| 5139 | * @interval: userspace pointer to the timeslice value. | ||
| 5140 | * | ||
| 5141 | * this syscall writes the default timeslice value of a given process | ||
| 5142 | * into the user-space timespec buffer. A value of '0' means infinity. | ||
| 5143 | * | ||
| 5144 | * Return: On success, 0 and the timeslice is in @interval. Otherwise, | ||
| 5145 | * an error code. | ||
| 5146 | */ | ||
| 5147 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, | 5147 | SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, |
| 5148 | struct timespec __user *, interval) | 5148 | struct timespec __user *, interval) |
| 5149 | { | 5149 | { |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 4056c19ca3f0..665ace2fc558 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
| @@ -2034,8 +2034,9 @@ static void pull_rt_task(struct rq *this_rq) | |||
| 2034 | bool resched = false; | 2034 | bool resched = false; |
| 2035 | struct task_struct *p; | 2035 | struct task_struct *p; |
| 2036 | struct rq *src_rq; | 2036 | struct rq *src_rq; |
| 2037 | int rt_overload_count = rt_overloaded(this_rq); | ||
| 2037 | 2038 | ||
| 2038 | if (likely(!rt_overloaded(this_rq))) | 2039 | if (likely(!rt_overload_count)) |
| 2039 | return; | 2040 | return; |
| 2040 | 2041 | ||
| 2041 | /* | 2042 | /* |
| @@ -2044,6 +2045,11 @@ static void pull_rt_task(struct rq *this_rq) | |||
| 2044 | */ | 2045 | */ |
| 2045 | smp_rmb(); | 2046 | smp_rmb(); |
| 2046 | 2047 | ||
| 2048 | /* If we are the only overloaded CPU do nothing */ | ||
| 2049 | if (rt_overload_count == 1 && | ||
| 2050 | cpumask_test_cpu(this_rq->cpu, this_rq->rd->rto_mask)) | ||
| 2051 | return; | ||
| 2052 | |||
| 2047 | #ifdef HAVE_RT_PUSH_IPI | 2053 | #ifdef HAVE_RT_PUSH_IPI |
| 2048 | if (sched_feat(RT_PUSH_IPI)) { | 2054 | if (sched_feat(RT_PUSH_IPI)) { |
| 2049 | tell_cpu_to_push(this_rq); | 2055 | tell_cpu_to_push(this_rq); |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index af7dad126c13..904c952ac383 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -164,6 +164,7 @@ config PREEMPTIRQ_EVENTS | |||
| 164 | bool "Enable trace events for preempt and irq disable/enable" | 164 | bool "Enable trace events for preempt and irq disable/enable" |
| 165 | select TRACE_IRQFLAGS | 165 | select TRACE_IRQFLAGS |
| 166 | depends on DEBUG_PREEMPT || !PROVE_LOCKING | 166 | depends on DEBUG_PREEMPT || !PROVE_LOCKING |
| 167 | depends on TRACING | ||
| 167 | default n | 168 | default n |
| 168 | help | 169 | help |
| 169 | Enable tracing of disable and enable events for preemption and irqs. | 170 | Enable tracing of disable and enable events for preemption and irqs. |
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 0ce99c379c30..40207c2a4113 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
| @@ -343,14 +343,13 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = { | |||
| 343 | .arg4_type = ARG_CONST_SIZE, | 343 | .arg4_type = ARG_CONST_SIZE, |
| 344 | }; | 344 | }; |
| 345 | 345 | ||
| 346 | static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd); | 346 | static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd); |
| 347 | 347 | ||
| 348 | static __always_inline u64 | 348 | static __always_inline u64 |
| 349 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | 349 | __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, |
| 350 | u64 flags, struct perf_raw_record *raw) | 350 | u64 flags, struct perf_sample_data *sd) |
| 351 | { | 351 | { |
| 352 | struct bpf_array *array = container_of(map, struct bpf_array, map); | 352 | struct bpf_array *array = container_of(map, struct bpf_array, map); |
| 353 | struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd); | ||
| 354 | unsigned int cpu = smp_processor_id(); | 353 | unsigned int cpu = smp_processor_id(); |
| 355 | u64 index = flags & BPF_F_INDEX_MASK; | 354 | u64 index = flags & BPF_F_INDEX_MASK; |
| 356 | struct bpf_event_entry *ee; | 355 | struct bpf_event_entry *ee; |
| @@ -373,8 +372,6 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | |||
| 373 | if (unlikely(event->oncpu != cpu)) | 372 | if (unlikely(event->oncpu != cpu)) |
| 374 | return -EOPNOTSUPP; | 373 | return -EOPNOTSUPP; |
| 375 | 374 | ||
| 376 | perf_sample_data_init(sd, 0, 0); | ||
| 377 | sd->raw = raw; | ||
| 378 | perf_event_output(event, sd, regs); | 375 | perf_event_output(event, sd, regs); |
| 379 | return 0; | 376 | return 0; |
| 380 | } | 377 | } |
| @@ -382,6 +379,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, | |||
| 382 | BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, | 379 | BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, |
| 383 | u64, flags, void *, data, u64, size) | 380 | u64, flags, void *, data, u64, size) |
| 384 | { | 381 | { |
| 382 | struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd); | ||
| 385 | struct perf_raw_record raw = { | 383 | struct perf_raw_record raw = { |
| 386 | .frag = { | 384 | .frag = { |
| 387 | .size = size, | 385 | .size = size, |
| @@ -392,7 +390,10 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, | |||
| 392 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) | 390 | if (unlikely(flags & ~(BPF_F_INDEX_MASK))) |
| 393 | return -EINVAL; | 391 | return -EINVAL; |
| 394 | 392 | ||
| 395 | return __bpf_perf_event_output(regs, map, flags, &raw); | 393 | perf_sample_data_init(sd, 0, 0); |
| 394 | sd->raw = &raw; | ||
| 395 | |||
| 396 | return __bpf_perf_event_output(regs, map, flags, sd); | ||
| 396 | } | 397 | } |
| 397 | 398 | ||
| 398 | static const struct bpf_func_proto bpf_perf_event_output_proto = { | 399 | static const struct bpf_func_proto bpf_perf_event_output_proto = { |
| @@ -407,10 +408,12 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = { | |||
| 407 | }; | 408 | }; |
| 408 | 409 | ||
| 409 | static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); | 410 | static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); |
| 411 | static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd); | ||
| 410 | 412 | ||
| 411 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, | 413 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, |
| 412 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) | 414 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) |
| 413 | { | 415 | { |
| 416 | struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd); | ||
| 414 | struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); | 417 | struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); |
| 415 | struct perf_raw_frag frag = { | 418 | struct perf_raw_frag frag = { |
| 416 | .copy = ctx_copy, | 419 | .copy = ctx_copy, |
| @@ -428,8 +431,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, | |||
| 428 | }; | 431 | }; |
| 429 | 432 | ||
| 430 | perf_fetch_caller_regs(regs); | 433 | perf_fetch_caller_regs(regs); |
| 434 | perf_sample_data_init(sd, 0, 0); | ||
| 435 | sd->raw = &raw; | ||
| 431 | 436 | ||
| 432 | return __bpf_perf_event_output(regs, map, flags, &raw); | 437 | return __bpf_perf_event_output(regs, map, flags, sd); |
| 433 | } | 438 | } |
| 434 | 439 | ||
| 435 | BPF_CALL_0(bpf_get_current_task) | 440 | BPF_CALL_0(bpf_get_current_task) |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 91874a95060d..c87766c1c204 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -1799,12 +1799,6 @@ void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) | |||
| 1799 | } | 1799 | } |
| 1800 | EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); | 1800 | EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); |
| 1801 | 1801 | ||
| 1802 | static __always_inline void * | ||
| 1803 | __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) | ||
| 1804 | { | ||
| 1805 | return bpage->data + index; | ||
| 1806 | } | ||
| 1807 | |||
| 1808 | static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) | 1802 | static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) |
| 1809 | { | 1803 | { |
| 1810 | return bpage->page->data + index; | 1804 | return bpage->page->data + index; |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 73e67b68c53b..59518b8126d0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -362,7 +362,7 @@ trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct | |||
| 362 | } | 362 | } |
| 363 | 363 | ||
| 364 | /** | 364 | /** |
| 365 | * trace_pid_filter_add_remove - Add or remove a task from a pid_list | 365 | * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list |
| 366 | * @pid_list: The list to modify | 366 | * @pid_list: The list to modify |
| 367 | * @self: The current task for fork or NULL for exit | 367 | * @self: The current task for fork or NULL for exit |
| 368 | * @task: The task to add or remove | 368 | * @task: The task to add or remove |
| @@ -925,7 +925,7 @@ static void tracing_snapshot_instance(struct trace_array *tr) | |||
| 925 | } | 925 | } |
| 926 | 926 | ||
| 927 | /** | 927 | /** |
| 928 | * trace_snapshot - take a snapshot of the current buffer. | 928 | * tracing_snapshot - take a snapshot of the current buffer. |
| 929 | * | 929 | * |
| 930 | * This causes a swap between the snapshot buffer and the current live | 930 | * This causes a swap between the snapshot buffer and the current live |
| 931 | * tracing buffer. You can use this to take snapshots of the live | 931 | * tracing buffer. You can use this to take snapshots of the live |
| @@ -1004,9 +1004,9 @@ int tracing_alloc_snapshot(void) | |||
| 1004 | EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); | 1004 | EXPORT_SYMBOL_GPL(tracing_alloc_snapshot); |
| 1005 | 1005 | ||
| 1006 | /** | 1006 | /** |
| 1007 | * trace_snapshot_alloc - allocate and take a snapshot of the current buffer. | 1007 | * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer. |
| 1008 | * | 1008 | * |
| 1009 | * This is similar to trace_snapshot(), but it will allocate the | 1009 | * This is similar to tracing_snapshot(), but it will allocate the |
| 1010 | * snapshot buffer if it isn't already allocated. Use this only | 1010 | * snapshot buffer if it isn't already allocated. Use this only |
| 1011 | * where it is safe to sleep, as the allocation may sleep. | 1011 | * where it is safe to sleep, as the allocation may sleep. |
| 1012 | * | 1012 | * |
| @@ -1303,7 +1303,7 @@ unsigned long __read_mostly tracing_thresh; | |||
| 1303 | /* | 1303 | /* |
| 1304 | * Copy the new maximum trace into the separate maximum-trace | 1304 | * Copy the new maximum trace into the separate maximum-trace |
| 1305 | * structure. (this way the maximum trace is permanently saved, | 1305 | * structure. (this way the maximum trace is permanently saved, |
| 1306 | * for later retrieval via /sys/kernel/debug/tracing/latency_trace) | 1306 | * for later retrieval via /sys/kernel/tracing/tracing_max_latency) |
| 1307 | */ | 1307 | */ |
| 1308 | static void | 1308 | static void |
| 1309 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 1309 | __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
| @@ -2415,7 +2415,7 @@ trace_process_export(struct trace_export *export, | |||
| 2415 | 2415 | ||
| 2416 | entry = ring_buffer_event_data(event); | 2416 | entry = ring_buffer_event_data(event); |
| 2417 | size = ring_buffer_event_length(event); | 2417 | size = ring_buffer_event_length(event); |
| 2418 | export->write(entry, size); | 2418 | export->write(export, entry, size); |
| 2419 | } | 2419 | } |
| 2420 | 2420 | ||
| 2421 | static DEFINE_MUTEX(ftrace_export_lock); | 2421 | static DEFINE_MUTEX(ftrace_export_lock); |
| @@ -4178,37 +4178,30 @@ static const struct file_operations show_traces_fops = { | |||
| 4178 | .llseek = seq_lseek, | 4178 | .llseek = seq_lseek, |
| 4179 | }; | 4179 | }; |
| 4180 | 4180 | ||
| 4181 | /* | ||
| 4182 | * The tracer itself will not take this lock, but still we want | ||
| 4183 | * to provide a consistent cpumask to user-space: | ||
| 4184 | */ | ||
| 4185 | static DEFINE_MUTEX(tracing_cpumask_update_lock); | ||
| 4186 | |||
| 4187 | /* | ||
| 4188 | * Temporary storage for the character representation of the | ||
| 4189 | * CPU bitmask (and one more byte for the newline): | ||
| 4190 | */ | ||
| 4191 | static char mask_str[NR_CPUS + 1]; | ||
| 4192 | |||
| 4193 | static ssize_t | 4181 | static ssize_t |
| 4194 | tracing_cpumask_read(struct file *filp, char __user *ubuf, | 4182 | tracing_cpumask_read(struct file *filp, char __user *ubuf, |
| 4195 | size_t count, loff_t *ppos) | 4183 | size_t count, loff_t *ppos) |
| 4196 | { | 4184 | { |
| 4197 | struct trace_array *tr = file_inode(filp)->i_private; | 4185 | struct trace_array *tr = file_inode(filp)->i_private; |
| 4186 | char *mask_str; | ||
| 4198 | int len; | 4187 | int len; |
| 4199 | 4188 | ||
| 4200 | mutex_lock(&tracing_cpumask_update_lock); | 4189 | len = snprintf(NULL, 0, "%*pb\n", |
| 4190 | cpumask_pr_args(tr->tracing_cpumask)) + 1; | ||
| 4191 | mask_str = kmalloc(len, GFP_KERNEL); | ||
| 4192 | if (!mask_str) | ||
| 4193 | return -ENOMEM; | ||
| 4201 | 4194 | ||
| 4202 | len = snprintf(mask_str, count, "%*pb\n", | 4195 | len = snprintf(mask_str, len, "%*pb\n", |
| 4203 | cpumask_pr_args(tr->tracing_cpumask)); | 4196 | cpumask_pr_args(tr->tracing_cpumask)); |
| 4204 | if (len >= count) { | 4197 | if (len >= count) { |
| 4205 | count = -EINVAL; | 4198 | count = -EINVAL; |
| 4206 | goto out_err; | 4199 | goto out_err; |
| 4207 | } | 4200 | } |
| 4208 | count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1); | 4201 | count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len); |
| 4209 | 4202 | ||
| 4210 | out_err: | 4203 | out_err: |
| 4211 | mutex_unlock(&tracing_cpumask_update_lock); | 4204 | kfree(mask_str); |
| 4212 | 4205 | ||
| 4213 | return count; | 4206 | return count; |
| 4214 | } | 4207 | } |
| @@ -4228,8 +4221,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
| 4228 | if (err) | 4221 | if (err) |
| 4229 | goto err_unlock; | 4222 | goto err_unlock; |
| 4230 | 4223 | ||
| 4231 | mutex_lock(&tracing_cpumask_update_lock); | ||
| 4232 | |||
| 4233 | local_irq_disable(); | 4224 | local_irq_disable(); |
| 4234 | arch_spin_lock(&tr->max_lock); | 4225 | arch_spin_lock(&tr->max_lock); |
| 4235 | for_each_tracing_cpu(cpu) { | 4226 | for_each_tracing_cpu(cpu) { |
| @@ -4252,8 +4243,6 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
| 4252 | local_irq_enable(); | 4243 | local_irq_enable(); |
| 4253 | 4244 | ||
| 4254 | cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); | 4245 | cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); |
| 4255 | |||
| 4256 | mutex_unlock(&tracing_cpumask_update_lock); | ||
| 4257 | free_cpumask_var(tracing_cpumask_new); | 4246 | free_cpumask_var(tracing_cpumask_new); |
| 4258 | 4247 | ||
| 4259 | return count; | 4248 | return count; |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 734accc02418..3c7bfc4bf5e9 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -209,6 +209,10 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, | |||
| 209 | if (__this_cpu_read(disable_stack_tracer) != 1) | 209 | if (__this_cpu_read(disable_stack_tracer) != 1) |
| 210 | goto out; | 210 | goto out; |
| 211 | 211 | ||
| 212 | /* If rcu is not watching, then save stack trace can fail */ | ||
| 213 | if (!rcu_is_watching()) | ||
| 214 | goto out; | ||
| 215 | |||
| 212 | ip += MCOUNT_INSN_SIZE; | 216 | ip += MCOUNT_INSN_SIZE; |
| 213 | 217 | ||
| 214 | check_stack(ip, &stack); | 218 | check_stack(ip, &stack); |
diff --git a/kernel/uid16.c b/kernel/uid16.c index ce74a4901d2b..ef1da2a5f9bd 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c | |||
| @@ -192,6 +192,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist) | |||
| 192 | return retval; | 192 | return retval; |
| 193 | } | 193 | } |
| 194 | 194 | ||
| 195 | groups_sort(group_info); | ||
| 195 | retval = set_current_groups(group_info); | 196 | retval = set_current_groups(group_info); |
| 196 | put_group_info(group_info); | 197 | put_group_info(group_info); |
| 197 | 198 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8fdb710bfdd7..43d18cb46308 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -38,7 +38,6 @@ | |||
| 38 | #include <linux/hardirq.h> | 38 | #include <linux/hardirq.h> |
| 39 | #include <linux/mempolicy.h> | 39 | #include <linux/mempolicy.h> |
| 40 | #include <linux/freezer.h> | 40 | #include <linux/freezer.h> |
| 41 | #include <linux/kallsyms.h> | ||
| 42 | #include <linux/debug_locks.h> | 41 | #include <linux/debug_locks.h> |
| 43 | #include <linux/lockdep.h> | 42 | #include <linux/lockdep.h> |
| 44 | #include <linux/idr.h> | 43 | #include <linux/idr.h> |
| @@ -48,6 +47,7 @@ | |||
| 48 | #include <linux/nodemask.h> | 47 | #include <linux/nodemask.h> |
| 49 | #include <linux/moduleparam.h> | 48 | #include <linux/moduleparam.h> |
| 50 | #include <linux/uaccess.h> | 49 | #include <linux/uaccess.h> |
| 50 | #include <linux/sched/isolation.h> | ||
| 51 | 51 | ||
| 52 | #include "workqueue_internal.h" | 52 | #include "workqueue_internal.h" |
| 53 | 53 | ||
| @@ -1634,7 +1634,7 @@ static void worker_enter_idle(struct worker *worker) | |||
| 1634 | mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); | 1634 | mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); |
| 1635 | 1635 | ||
| 1636 | /* | 1636 | /* |
| 1637 | * Sanity check nr_running. Because wq_unbind_fn() releases | 1637 | * Sanity check nr_running. Because unbind_workers() releases |
| 1638 | * pool->lock between setting %WORKER_UNBOUND and zapping | 1638 | * pool->lock between setting %WORKER_UNBOUND and zapping |
| 1639 | * nr_running, the warning may trigger spuriously. Check iff | 1639 | * nr_running, the warning may trigger spuriously. Check iff |
| 1640 | * unbind is not in progress. | 1640 | * unbind is not in progress. |
| @@ -4510,9 +4510,8 @@ void show_workqueue_state(void) | |||
| 4510 | * cpu comes back online. | 4510 | * cpu comes back online. |
| 4511 | */ | 4511 | */ |
| 4512 | 4512 | ||
| 4513 | static void wq_unbind_fn(struct work_struct *work) | 4513 | static void unbind_workers(int cpu) |
| 4514 | { | 4514 | { |
| 4515 | int cpu = smp_processor_id(); | ||
| 4516 | struct worker_pool *pool; | 4515 | struct worker_pool *pool; |
| 4517 | struct worker *worker; | 4516 | struct worker *worker; |
| 4518 | 4517 | ||
| @@ -4589,16 +4588,6 @@ static void rebind_workers(struct worker_pool *pool) | |||
| 4589 | 4588 | ||
| 4590 | spin_lock_irq(&pool->lock); | 4589 | spin_lock_irq(&pool->lock); |
| 4591 | 4590 | ||
| 4592 | /* | ||
| 4593 | * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED | ||
| 4594 | * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is | ||
| 4595 | * being reworked and this can go away in time. | ||
| 4596 | */ | ||
| 4597 | if (!(pool->flags & POOL_DISASSOCIATED)) { | ||
| 4598 | spin_unlock_irq(&pool->lock); | ||
| 4599 | return; | ||
| 4600 | } | ||
| 4601 | |||
| 4602 | pool->flags &= ~POOL_DISASSOCIATED; | 4591 | pool->flags &= ~POOL_DISASSOCIATED; |
| 4603 | 4592 | ||
| 4604 | for_each_pool_worker(worker, pool) { | 4593 | for_each_pool_worker(worker, pool) { |
| @@ -4709,12 +4698,13 @@ int workqueue_online_cpu(unsigned int cpu) | |||
| 4709 | 4698 | ||
| 4710 | int workqueue_offline_cpu(unsigned int cpu) | 4699 | int workqueue_offline_cpu(unsigned int cpu) |
| 4711 | { | 4700 | { |
| 4712 | struct work_struct unbind_work; | ||
| 4713 | struct workqueue_struct *wq; | 4701 | struct workqueue_struct *wq; |
| 4714 | 4702 | ||
| 4715 | /* unbinding per-cpu workers should happen on the local CPU */ | 4703 | /* unbinding per-cpu workers should happen on the local CPU */ |
| 4716 | INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); | 4704 | if (WARN_ON(cpu != smp_processor_id())) |
| 4717 | queue_work_on(cpu, system_highpri_wq, &unbind_work); | 4705 | return -1; |
| 4706 | |||
| 4707 | unbind_workers(cpu); | ||
| 4718 | 4708 | ||
| 4719 | /* update NUMA affinity of unbound workqueues */ | 4709 | /* update NUMA affinity of unbound workqueues */ |
| 4720 | mutex_lock(&wq_pool_mutex); | 4710 | mutex_lock(&wq_pool_mutex); |
| @@ -4722,9 +4712,6 @@ int workqueue_offline_cpu(unsigned int cpu) | |||
| 4722 | wq_update_unbound_numa(wq, cpu, false); | 4712 | wq_update_unbound_numa(wq, cpu, false); |
| 4723 | mutex_unlock(&wq_pool_mutex); | 4713 | mutex_unlock(&wq_pool_mutex); |
| 4724 | 4714 | ||
| 4725 | /* wait for per-cpu unbinding to finish */ | ||
| 4726 | flush_work(&unbind_work); | ||
| 4727 | destroy_work_on_stack(&unbind_work); | ||
| 4728 | return 0; | 4715 | return 0; |
| 4729 | } | 4716 | } |
| 4730 | 4717 | ||
| @@ -4957,6 +4944,10 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) | |||
| 4957 | if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) | 4944 | if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) |
| 4958 | return -ENOMEM; | 4945 | return -ENOMEM; |
| 4959 | 4946 | ||
| 4947 | /* | ||
| 4948 | * Not excluding isolated cpus on purpose. | ||
| 4949 | * If the user wishes to include them, we allow that. | ||
| 4950 | */ | ||
| 4960 | cpumask_and(cpumask, cpumask, cpu_possible_mask); | 4951 | cpumask_and(cpumask, cpumask, cpu_possible_mask); |
| 4961 | if (!cpumask_empty(cpumask)) { | 4952 | if (!cpumask_empty(cpumask)) { |
| 4962 | apply_wqattrs_lock(); | 4953 | apply_wqattrs_lock(); |
| @@ -5555,7 +5546,7 @@ int __init workqueue_init_early(void) | |||
| 5555 | WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); | 5546 | WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); |
| 5556 | 5547 | ||
| 5557 | BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); | 5548 | BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); |
| 5558 | cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); | 5549 | cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN)); |
| 5559 | 5550 | ||
| 5560 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); | 5551 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); |
| 5561 | 5552 | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 947d3e2ed5c2..9d5b78aad4c5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
| @@ -1099,8 +1099,6 @@ config PROVE_LOCKING | |||
| 1099 | select DEBUG_MUTEXES | 1099 | select DEBUG_MUTEXES |
| 1100 | select DEBUG_RT_MUTEXES if RT_MUTEXES | 1100 | select DEBUG_RT_MUTEXES if RT_MUTEXES |
| 1101 | select DEBUG_LOCK_ALLOC | 1101 | select DEBUG_LOCK_ALLOC |
| 1102 | select LOCKDEP_CROSSRELEASE | ||
| 1103 | select LOCKDEP_COMPLETIONS | ||
| 1104 | select TRACE_IRQFLAGS | 1102 | select TRACE_IRQFLAGS |
| 1105 | default n | 1103 | default n |
| 1106 | help | 1104 | help |
| @@ -1170,37 +1168,6 @@ config LOCK_STAT | |||
| 1170 | CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. | 1168 | CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. |
| 1171 | (CONFIG_LOCKDEP defines "acquire" and "release" events.) | 1169 | (CONFIG_LOCKDEP defines "acquire" and "release" events.) |
| 1172 | 1170 | ||
| 1173 | config LOCKDEP_CROSSRELEASE | ||
| 1174 | bool | ||
| 1175 | help | ||
| 1176 | This makes lockdep work for crosslock which is a lock allowed to | ||
| 1177 | be released in a different context from the acquisition context. | ||
| 1178 | Normally a lock must be released in the context acquiring the lock. | ||
| 1179 | However, relexing this constraint helps synchronization primitives | ||
| 1180 | such as page locks or completions can use the lock correctness | ||
| 1181 | detector, lockdep. | ||
| 1182 | |||
| 1183 | config LOCKDEP_COMPLETIONS | ||
| 1184 | bool | ||
| 1185 | help | ||
| 1186 | A deadlock caused by wait_for_completion() and complete() can be | ||
| 1187 | detected by lockdep using crossrelease feature. | ||
| 1188 | |||
| 1189 | config BOOTPARAM_LOCKDEP_CROSSRELEASE_FULLSTACK | ||
| 1190 | bool "Enable the boot parameter, crossrelease_fullstack" | ||
| 1191 | depends on LOCKDEP_CROSSRELEASE | ||
| 1192 | default n | ||
| 1193 | help | ||
| 1194 | The lockdep "cross-release" feature needs to record stack traces | ||
| 1195 | (of calling functions) for all acquisitions, for eventual later | ||
| 1196 | use during analysis. By default only a single caller is recorded, | ||
| 1197 | because the unwind operation can be very expensive with deeper | ||
| 1198 | stack chains. | ||
| 1199 | |||
| 1200 | However a boot parameter, crossrelease_fullstack, was | ||
| 1201 | introduced since sometimes deeper traces are required for full | ||
| 1202 | analysis. This option turns on the boot parameter. | ||
| 1203 | |||
| 1204 | config DEBUG_LOCKDEP | 1171 | config DEBUG_LOCKDEP |
| 1205 | bool "Lock dependency engine debugging" | 1172 | bool "Lock dependency engine debugging" |
| 1206 | depends on DEBUG_KERNEL && LOCKDEP | 1173 | depends on DEBUG_KERNEL && LOCKDEP |
diff --git a/lib/rbtree.c b/lib/rbtree.c index ba4a9d165f1b..d3ff682fd4b8 100644 --- a/lib/rbtree.c +++ b/lib/rbtree.c | |||
| @@ -603,6 +603,16 @@ void rb_replace_node(struct rb_node *victim, struct rb_node *new, | |||
| 603 | } | 603 | } |
| 604 | EXPORT_SYMBOL(rb_replace_node); | 604 | EXPORT_SYMBOL(rb_replace_node); |
| 605 | 605 | ||
| 606 | void rb_replace_node_cached(struct rb_node *victim, struct rb_node *new, | ||
| 607 | struct rb_root_cached *root) | ||
| 608 | { | ||
| 609 | rb_replace_node(victim, new, &root->rb_root); | ||
| 610 | |||
| 611 | if (root->rb_leftmost == victim) | ||
| 612 | root->rb_leftmost = new; | ||
| 613 | } | ||
| 614 | EXPORT_SYMBOL(rb_replace_node_cached); | ||
| 615 | |||
| 606 | void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, | 616 | void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, |
| 607 | struct rb_root *root) | 617 | struct rb_root *root) |
| 608 | { | 618 | { |
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index d04ac1ec0559..1826f191e72c 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c | |||
| @@ -111,7 +111,7 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) | |||
| 111 | enum fixed_addresses idx; | 111 | enum fixed_addresses idx; |
| 112 | int i, slot; | 112 | int i, slot; |
| 113 | 113 | ||
| 114 | WARN_ON(system_state != SYSTEM_BOOTING); | 114 | WARN_ON(system_state >= SYSTEM_RUNNING); |
| 115 | 115 | ||
| 116 | slot = -1; | 116 | slot = -1; |
| 117 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { | 117 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { |
diff --git a/mm/frame_vector.c b/mm/frame_vector.c index 297c7238f7d4..c64dca6e27c2 100644 --- a/mm/frame_vector.c +++ b/mm/frame_vector.c | |||
| @@ -62,8 +62,10 @@ int get_vaddr_frames(unsigned long start, unsigned int nr_frames, | |||
| 62 | * get_user_pages_longterm() and disallow it for filesystem-dax | 62 | * get_user_pages_longterm() and disallow it for filesystem-dax |
| 63 | * mappings. | 63 | * mappings. |
| 64 | */ | 64 | */ |
| 65 | if (vma_is_fsdax(vma)) | 65 | if (vma_is_fsdax(vma)) { |
| 66 | return -EOPNOTSUPP; | 66 | ret = -EOPNOTSUPP; |
| 67 | goto out; | ||
| 68 | } | ||
| 67 | 69 | ||
| 68 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { | 70 | if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) { |
| 69 | vec->got_ref = true; | 71 | vec->got_ref = true; |
| @@ -66,7 +66,7 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, | |||
| 66 | */ | 66 | */ |
| 67 | static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) | 67 | static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) |
| 68 | { | 68 | { |
| 69 | return pte_access_permitted(pte, WRITE) || | 69 | return pte_write(pte) || |
| 70 | ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); | 70 | ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); |
| 71 | } | 71 | } |
| 72 | 72 | ||
| @@ -391,11 +391,11 @@ again: | |||
| 391 | if (pmd_protnone(pmd)) | 391 | if (pmd_protnone(pmd)) |
| 392 | return hmm_vma_walk_clear(start, end, walk); | 392 | return hmm_vma_walk_clear(start, end, walk); |
| 393 | 393 | ||
| 394 | if (!pmd_access_permitted(pmd, write_fault)) | 394 | if (write_fault && !pmd_write(pmd)) |
| 395 | return hmm_vma_walk_clear(start, end, walk); | 395 | return hmm_vma_walk_clear(start, end, walk); |
| 396 | 396 | ||
| 397 | pfn = pmd_pfn(pmd) + pte_index(addr); | 397 | pfn = pmd_pfn(pmd) + pte_index(addr); |
| 398 | flag |= pmd_access_permitted(pmd, WRITE) ? HMM_PFN_WRITE : 0; | 398 | flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0; |
| 399 | for (; addr < end; addr += PAGE_SIZE, i++, pfn++) | 399 | for (; addr < end; addr += PAGE_SIZE, i++, pfn++) |
| 400 | pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag; | 400 | pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag; |
| 401 | return 0; | 401 | return 0; |
| @@ -456,11 +456,11 @@ again: | |||
| 456 | continue; | 456 | continue; |
| 457 | } | 457 | } |
| 458 | 458 | ||
| 459 | if (!pte_access_permitted(pte, write_fault)) | 459 | if (write_fault && !pte_write(pte)) |
| 460 | goto fault; | 460 | goto fault; |
| 461 | 461 | ||
| 462 | pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag; | 462 | pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag; |
| 463 | pfns[i] |= pte_access_permitted(pte, WRITE) ? HMM_PFN_WRITE : 0; | 463 | pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0; |
| 464 | continue; | 464 | continue; |
| 465 | 465 | ||
| 466 | fault: | 466 | fault: |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 2f2f5e774902..0e7ded98d114 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -870,7 +870,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, | |||
| 870 | */ | 870 | */ |
| 871 | WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); | 871 | WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set"); |
| 872 | 872 | ||
| 873 | if (!pmd_access_permitted(*pmd, flags & FOLL_WRITE)) | 873 | if (flags & FOLL_WRITE && !pmd_write(*pmd)) |
| 874 | return NULL; | 874 | return NULL; |
| 875 | 875 | ||
| 876 | if (pmd_present(*pmd) && pmd_devmap(*pmd)) | 876 | if (pmd_present(*pmd) && pmd_devmap(*pmd)) |
| @@ -1012,7 +1012,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr, | |||
| 1012 | 1012 | ||
| 1013 | assert_spin_locked(pud_lockptr(mm, pud)); | 1013 | assert_spin_locked(pud_lockptr(mm, pud)); |
| 1014 | 1014 | ||
| 1015 | if (!pud_access_permitted(*pud, flags & FOLL_WRITE)) | 1015 | if (flags & FOLL_WRITE && !pud_write(*pud)) |
| 1016 | return NULL; | 1016 | return NULL; |
| 1017 | 1017 | ||
| 1018 | if (pud_present(*pud) && pud_devmap(*pud)) | 1018 | if (pud_present(*pud) && pud_devmap(*pud)) |
| @@ -1386,7 +1386,7 @@ out_unlock: | |||
| 1386 | */ | 1386 | */ |
| 1387 | static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) | 1387 | static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags) |
| 1388 | { | 1388 | { |
| 1389 | return pmd_access_permitted(pmd, WRITE) || | 1389 | return pmd_write(pmd) || |
| 1390 | ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); | 1390 | ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd)); |
| 1391 | } | 1391 | } |
| 1392 | 1392 | ||
diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 3d4781756d50..d73c14294f3a 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c | |||
| @@ -1523,7 +1523,7 @@ static void kmemleak_scan(void) | |||
| 1523 | if (page_count(page) == 0) | 1523 | if (page_count(page) == 0) |
| 1524 | continue; | 1524 | continue; |
| 1525 | scan_block(page, page + 1, NULL); | 1525 | scan_block(page, page + 1, NULL); |
| 1526 | if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page)))) | 1526 | if (!(pfn & 63)) |
| 1527 | cond_resched(); | 1527 | cond_resched(); |
| 1528 | } | 1528 | } |
| 1529 | } | 1529 | } |
diff --git a/mm/memory.c b/mm/memory.c index 5eb3d2524bdc..ca5674cbaff2 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -3831,7 +3831,8 @@ static inline int create_huge_pmd(struct vm_fault *vmf) | |||
| 3831 | return VM_FAULT_FALLBACK; | 3831 | return VM_FAULT_FALLBACK; |
| 3832 | } | 3832 | } |
| 3833 | 3833 | ||
| 3834 | static int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) | 3834 | /* `inline' is required to avoid gcc 4.1.2 build error */ |
| 3835 | static inline int wp_huge_pmd(struct vm_fault *vmf, pmd_t orig_pmd) | ||
| 3835 | { | 3836 | { |
| 3836 | if (vma_is_anonymous(vmf->vma)) | 3837 | if (vma_is_anonymous(vmf->vma)) |
| 3837 | return do_huge_pmd_wp_page(vmf, orig_pmd); | 3838 | return do_huge_pmd_wp_page(vmf, orig_pmd); |
| @@ -3948,7 +3949,7 @@ static int handle_pte_fault(struct vm_fault *vmf) | |||
| 3948 | if (unlikely(!pte_same(*vmf->pte, entry))) | 3949 | if (unlikely(!pte_same(*vmf->pte, entry))) |
| 3949 | goto unlock; | 3950 | goto unlock; |
| 3950 | if (vmf->flags & FAULT_FLAG_WRITE) { | 3951 | if (vmf->flags & FAULT_FLAG_WRITE) { |
| 3951 | if (!pte_access_permitted(entry, WRITE)) | 3952 | if (!pte_write(entry)) |
| 3952 | return do_wp_page(vmf); | 3953 | return do_wp_page(vmf); |
| 3953 | entry = pte_mkdirty(entry); | 3954 | entry = pte_mkdirty(entry); |
| 3954 | } | 3955 | } |
| @@ -4013,7 +4014,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, | |||
| 4013 | 4014 | ||
| 4014 | /* NUMA case for anonymous PUDs would go here */ | 4015 | /* NUMA case for anonymous PUDs would go here */ |
| 4015 | 4016 | ||
| 4016 | if (dirty && !pud_access_permitted(orig_pud, WRITE)) { | 4017 | if (dirty && !pud_write(orig_pud)) { |
| 4017 | ret = wp_huge_pud(&vmf, orig_pud); | 4018 | ret = wp_huge_pud(&vmf, orig_pud); |
| 4018 | if (!(ret & VM_FAULT_FALLBACK)) | 4019 | if (!(ret & VM_FAULT_FALLBACK)) |
| 4019 | return ret; | 4020 | return ret; |
| @@ -4046,7 +4047,7 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address, | |||
| 4046 | if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) | 4047 | if (pmd_protnone(orig_pmd) && vma_is_accessible(vma)) |
| 4047 | return do_huge_pmd_numa_page(&vmf, orig_pmd); | 4048 | return do_huge_pmd_numa_page(&vmf, orig_pmd); |
| 4048 | 4049 | ||
| 4049 | if (dirty && !pmd_access_permitted(orig_pmd, WRITE)) { | 4050 | if (dirty && !pmd_write(orig_pmd)) { |
| 4050 | ret = wp_huge_pmd(&vmf, orig_pmd); | 4051 | ret = wp_huge_pmd(&vmf, orig_pmd); |
| 4051 | if (!(ret & VM_FAULT_FALLBACK)) | 4052 | if (!(ret & VM_FAULT_FALLBACK)) |
| 4052 | return ret; | 4053 | return ret; |
| @@ -4336,7 +4337,7 @@ int follow_phys(struct vm_area_struct *vma, | |||
| 4336 | goto out; | 4337 | goto out; |
| 4337 | pte = *ptep; | 4338 | pte = *ptep; |
| 4338 | 4339 | ||
| 4339 | if (!pte_access_permitted(pte, flags & FOLL_WRITE)) | 4340 | if ((flags & FOLL_WRITE) && !pte_write(pte)) |
| 4340 | goto unlock; | 4341 | goto unlock; |
| 4341 | 4342 | ||
| 4342 | *prot = pgprot_val(pte_pgprot(pte)); | 4343 | *prot = pgprot_val(pte_pgprot(pte)); |
| @@ -3019,20 +3019,20 @@ void exit_mmap(struct mm_struct *mm) | |||
| 3019 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 3019 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
| 3020 | unmap_vmas(&tlb, vma, 0, -1); | 3020 | unmap_vmas(&tlb, vma, 0, -1); |
| 3021 | 3021 | ||
| 3022 | set_bit(MMF_OOM_SKIP, &mm->flags); | 3022 | if (unlikely(mm_is_oom_victim(mm))) { |
| 3023 | if (unlikely(tsk_is_oom_victim(current))) { | ||
| 3024 | /* | 3023 | /* |
| 3025 | * Wait for oom_reap_task() to stop working on this | 3024 | * Wait for oom_reap_task() to stop working on this |
| 3026 | * mm. Because MMF_OOM_SKIP is already set before | 3025 | * mm. Because MMF_OOM_SKIP is already set before |
| 3027 | * calling down_read(), oom_reap_task() will not run | 3026 | * calling down_read(), oom_reap_task() will not run |
| 3028 | * on this "mm" post up_write(). | 3027 | * on this "mm" post up_write(). |
| 3029 | * | 3028 | * |
| 3030 | * tsk_is_oom_victim() cannot be set from under us | 3029 | * mm_is_oom_victim() cannot be set from under us |
| 3031 | * either because current->mm is already set to NULL | 3030 | * either because victim->mm is already set to NULL |
| 3032 | * under task_lock before calling mmput and oom_mm is | 3031 | * under task_lock before calling mmput and oom_mm is |
| 3033 | * set not NULL by the OOM killer only if current->mm | 3032 | * set not NULL by the OOM killer only if victim->mm |
| 3034 | * is found not NULL while holding the task_lock. | 3033 | * is found not NULL while holding the task_lock. |
| 3035 | */ | 3034 | */ |
| 3035 | set_bit(MMF_OOM_SKIP, &mm->flags); | ||
| 3036 | down_write(&mm->mmap_sem); | 3036 | down_write(&mm->mmap_sem); |
| 3037 | up_write(&mm->mmap_sem); | 3037 | up_write(&mm->mmap_sem); |
| 3038 | } | 3038 | } |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index c957be32b27a..29f855551efe 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
| @@ -683,8 +683,10 @@ static void mark_oom_victim(struct task_struct *tsk) | |||
| 683 | return; | 683 | return; |
| 684 | 684 | ||
| 685 | /* oom_mm is bound to the signal struct life time. */ | 685 | /* oom_mm is bound to the signal struct life time. */ |
| 686 | if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) | 686 | if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) { |
| 687 | mmgrab(tsk->signal->oom_mm); | 687 | mmgrab(tsk->signal->oom_mm); |
| 688 | set_bit(MMF_OOM_VICTIM, &mm->flags); | ||
| 689 | } | ||
| 688 | 690 | ||
| 689 | /* | 691 | /* |
| 690 | * Make sure that the task is woken up from uninterruptible sleep | 692 | * Make sure that the task is woken up from uninterruptible sleep |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 73f5d4556b3d..7e5e775e97f4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -2684,6 +2684,7 @@ void free_unref_page_list(struct list_head *list) | |||
| 2684 | { | 2684 | { |
| 2685 | struct page *page, *next; | 2685 | struct page *page, *next; |
| 2686 | unsigned long flags, pfn; | 2686 | unsigned long flags, pfn; |
| 2687 | int batch_count = 0; | ||
| 2687 | 2688 | ||
| 2688 | /* Prepare pages for freeing */ | 2689 | /* Prepare pages for freeing */ |
| 2689 | list_for_each_entry_safe(page, next, list, lru) { | 2690 | list_for_each_entry_safe(page, next, list, lru) { |
| @@ -2700,6 +2701,16 @@ void free_unref_page_list(struct list_head *list) | |||
| 2700 | set_page_private(page, 0); | 2701 | set_page_private(page, 0); |
| 2701 | trace_mm_page_free_batched(page); | 2702 | trace_mm_page_free_batched(page); |
| 2702 | free_unref_page_commit(page, pfn); | 2703 | free_unref_page_commit(page, pfn); |
| 2704 | |||
| 2705 | /* | ||
| 2706 | * Guard against excessive IRQ disabled times when we get | ||
| 2707 | * a large list of pages to free. | ||
| 2708 | */ | ||
| 2709 | if (++batch_count == SWAP_CLUSTER_MAX) { | ||
| 2710 | local_irq_restore(flags); | ||
| 2711 | batch_count = 0; | ||
| 2712 | local_irq_save(flags); | ||
| 2713 | } | ||
| 2703 | } | 2714 | } |
| 2704 | local_irq_restore(flags); | 2715 | local_irq_restore(flags); |
| 2705 | } | 2716 | } |
diff --git a/mm/percpu.c b/mm/percpu.c index 79e3549cab0f..50e7fdf84055 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
| @@ -2719,7 +2719,11 @@ void __init setup_per_cpu_areas(void) | |||
| 2719 | 2719 | ||
| 2720 | if (pcpu_setup_first_chunk(ai, fc) < 0) | 2720 | if (pcpu_setup_first_chunk(ai, fc) < 0) |
| 2721 | panic("Failed to initialize percpu areas."); | 2721 | panic("Failed to initialize percpu areas."); |
| 2722 | #ifdef CONFIG_CRIS | ||
| 2723 | #warning "the CRIS architecture has physical and virtual addresses confused" | ||
| 2724 | #else | ||
| 2722 | pcpu_free_alloc_info(ai); | 2725 | pcpu_free_alloc_info(ai); |
| 2726 | #endif | ||
| 2723 | } | 2727 | } |
| 2724 | 2728 | ||
| 2725 | #endif /* CONFIG_SMP */ | 2729 | #endif /* CONFIG_SMP */ |
| @@ -1584,11 +1584,8 @@ static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines) | |||
| 1584 | *dbg_redzone2(cachep, objp)); | 1584 | *dbg_redzone2(cachep, objp)); |
| 1585 | } | 1585 | } |
| 1586 | 1586 | ||
| 1587 | if (cachep->flags & SLAB_STORE_USER) { | 1587 | if (cachep->flags & SLAB_STORE_USER) |
| 1588 | pr_err("Last user: [<%p>](%pSR)\n", | 1588 | pr_err("Last user: (%pSR)\n", *dbg_userword(cachep, objp)); |
| 1589 | *dbg_userword(cachep, objp), | ||
| 1590 | *dbg_userword(cachep, objp)); | ||
| 1591 | } | ||
| 1592 | realobj = (char *)objp + obj_offset(cachep); | 1589 | realobj = (char *)objp + obj_offset(cachep); |
| 1593 | size = cachep->object_size; | 1590 | size = cachep->object_size; |
| 1594 | for (i = 0; i < size && lines; i += 16, lines--) { | 1591 | for (i = 0; i < size && lines; i += 16, lines--) { |
| @@ -1621,7 +1618,7 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) | |||
| 1621 | /* Mismatch ! */ | 1618 | /* Mismatch ! */ |
| 1622 | /* Print header */ | 1619 | /* Print header */ |
| 1623 | if (lines == 0) { | 1620 | if (lines == 0) { |
| 1624 | pr_err("Slab corruption (%s): %s start=%p, len=%d\n", | 1621 | pr_err("Slab corruption (%s): %s start=%px, len=%d\n", |
| 1625 | print_tainted(), cachep->name, | 1622 | print_tainted(), cachep->name, |
| 1626 | realobj, size); | 1623 | realobj, size); |
| 1627 | print_objinfo(cachep, objp, 0); | 1624 | print_objinfo(cachep, objp, 0); |
| @@ -1650,13 +1647,13 @@ static void check_poison_obj(struct kmem_cache *cachep, void *objp) | |||
| 1650 | if (objnr) { | 1647 | if (objnr) { |
| 1651 | objp = index_to_obj(cachep, page, objnr - 1); | 1648 | objp = index_to_obj(cachep, page, objnr - 1); |
| 1652 | realobj = (char *)objp + obj_offset(cachep); | 1649 | realobj = (char *)objp + obj_offset(cachep); |
| 1653 | pr_err("Prev obj: start=%p, len=%d\n", realobj, size); | 1650 | pr_err("Prev obj: start=%px, len=%d\n", realobj, size); |
| 1654 | print_objinfo(cachep, objp, 2); | 1651 | print_objinfo(cachep, objp, 2); |
| 1655 | } | 1652 | } |
| 1656 | if (objnr + 1 < cachep->num) { | 1653 | if (objnr + 1 < cachep->num) { |
| 1657 | objp = index_to_obj(cachep, page, objnr + 1); | 1654 | objp = index_to_obj(cachep, page, objnr + 1); |
| 1658 | realobj = (char *)objp + obj_offset(cachep); | 1655 | realobj = (char *)objp + obj_offset(cachep); |
| 1659 | pr_err("Next obj: start=%p, len=%d\n", realobj, size); | 1656 | pr_err("Next obj: start=%px, len=%d\n", realobj, size); |
| 1660 | print_objinfo(cachep, objp, 2); | 1657 | print_objinfo(cachep, objp, 2); |
| 1661 | } | 1658 | } |
| 1662 | } | 1659 | } |
| @@ -2608,7 +2605,7 @@ static void slab_put_obj(struct kmem_cache *cachep, | |||
| 2608 | /* Verify double free bug */ | 2605 | /* Verify double free bug */ |
| 2609 | for (i = page->active; i < cachep->num; i++) { | 2606 | for (i = page->active; i < cachep->num; i++) { |
| 2610 | if (get_free_obj(page, i) == objnr) { | 2607 | if (get_free_obj(page, i) == objnr) { |
| 2611 | pr_err("slab: double free detected in cache '%s', objp %p\n", | 2608 | pr_err("slab: double free detected in cache '%s', objp %px\n", |
| 2612 | cachep->name, objp); | 2609 | cachep->name, objp); |
| 2613 | BUG(); | 2610 | BUG(); |
| 2614 | } | 2611 | } |
| @@ -2772,7 +2769,7 @@ static inline void verify_redzone_free(struct kmem_cache *cache, void *obj) | |||
| 2772 | else | 2769 | else |
| 2773 | slab_error(cache, "memory outside object was overwritten"); | 2770 | slab_error(cache, "memory outside object was overwritten"); |
| 2774 | 2771 | ||
| 2775 | pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", | 2772 | pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n", |
| 2776 | obj, redzone1, redzone2); | 2773 | obj, redzone1, redzone2); |
| 2777 | } | 2774 | } |
| 2778 | 2775 | ||
| @@ -3078,7 +3075,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
| 3078 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || | 3075 | if (*dbg_redzone1(cachep, objp) != RED_INACTIVE || |
| 3079 | *dbg_redzone2(cachep, objp) != RED_INACTIVE) { | 3076 | *dbg_redzone2(cachep, objp) != RED_INACTIVE) { |
| 3080 | slab_error(cachep, "double free, or memory outside object was overwritten"); | 3077 | slab_error(cachep, "double free, or memory outside object was overwritten"); |
| 3081 | pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n", | 3078 | pr_err("%px: redzone 1:0x%llx, redzone 2:0x%llx\n", |
| 3082 | objp, *dbg_redzone1(cachep, objp), | 3079 | objp, *dbg_redzone1(cachep, objp), |
| 3083 | *dbg_redzone2(cachep, objp)); | 3080 | *dbg_redzone2(cachep, objp)); |
| 3084 | } | 3081 | } |
| @@ -3091,7 +3088,7 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, | |||
| 3091 | cachep->ctor(objp); | 3088 | cachep->ctor(objp); |
| 3092 | if (ARCH_SLAB_MINALIGN && | 3089 | if (ARCH_SLAB_MINALIGN && |
| 3093 | ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { | 3090 | ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) { |
| 3094 | pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n", | 3091 | pr_err("0x%px: not aligned to ARCH_SLAB_MINALIGN=%d\n", |
| 3095 | objp, (int)ARCH_SLAB_MINALIGN); | 3092 | objp, (int)ARCH_SLAB_MINALIGN); |
| 3096 | } | 3093 | } |
| 3097 | return objp; | 3094 | return objp; |
| @@ -4283,7 +4280,7 @@ static void show_symbol(struct seq_file *m, unsigned long address) | |||
| 4283 | return; | 4280 | return; |
| 4284 | } | 4281 | } |
| 4285 | #endif | 4282 | #endif |
| 4286 | seq_printf(m, "%p", (void *)address); | 4283 | seq_printf(m, "%px", (void *)address); |
| 4287 | } | 4284 | } |
| 4288 | 4285 | ||
| 4289 | static int leaks_show(struct seq_file *m, void *p) | 4286 | static int leaks_show(struct seq_file *m, void *p) |
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index 1b659ab652fb..bbe8414b6ee7 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c | |||
| @@ -1214,7 +1214,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, | |||
| 1214 | orig_node->last_seen = jiffies; | 1214 | orig_node->last_seen = jiffies; |
| 1215 | 1215 | ||
| 1216 | /* find packet count of corresponding one hop neighbor */ | 1216 | /* find packet count of corresponding one hop neighbor */ |
| 1217 | spin_lock_bh(&orig_node->bat_iv.ogm_cnt_lock); | 1217 | spin_lock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock); |
| 1218 | if_num = if_incoming->if_num; | 1218 | if_num = if_incoming->if_num; |
| 1219 | orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num]; | 1219 | orig_eq_count = orig_neigh_node->bat_iv.bcast_own_sum[if_num]; |
| 1220 | neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); | 1220 | neigh_ifinfo = batadv_neigh_ifinfo_new(neigh_node, if_outgoing); |
| @@ -1224,7 +1224,7 @@ static bool batadv_iv_ogm_calc_tq(struct batadv_orig_node *orig_node, | |||
| 1224 | } else { | 1224 | } else { |
| 1225 | neigh_rq_count = 0; | 1225 | neigh_rq_count = 0; |
| 1226 | } | 1226 | } |
| 1227 | spin_unlock_bh(&orig_node->bat_iv.ogm_cnt_lock); | 1227 | spin_unlock_bh(&orig_neigh_node->bat_iv.ogm_cnt_lock); |
| 1228 | 1228 | ||
| 1229 | /* pay attention to not get a value bigger than 100 % */ | 1229 | /* pay attention to not get a value bigger than 100 % */ |
| 1230 | if (orig_eq_count > neigh_rq_count) | 1230 | if (orig_eq_count > neigh_rq_count) |
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c index 341ceab8338d..e0e2bfcd6b3e 100644 --- a/net/batman-adv/bat_v.c +++ b/net/batman-adv/bat_v.c | |||
| @@ -814,7 +814,7 @@ static bool batadv_v_gw_is_eligible(struct batadv_priv *bat_priv, | |||
| 814 | } | 814 | } |
| 815 | 815 | ||
| 816 | orig_gw = batadv_gw_node_get(bat_priv, orig_node); | 816 | orig_gw = batadv_gw_node_get(bat_priv, orig_node); |
| 817 | if (!orig_node) | 817 | if (!orig_gw) |
| 818 | goto out; | 818 | goto out; |
| 819 | 819 | ||
| 820 | if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0) | 820 | if (batadv_v_gw_throughput_get(orig_gw, &orig_throughput) < 0) |
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c index a98cf1104a30..ebe6e38934e4 100644 --- a/net/batman-adv/fragmentation.c +++ b/net/batman-adv/fragmentation.c | |||
| @@ -499,6 +499,8 @@ int batadv_frag_send_packet(struct sk_buff *skb, | |||
| 499 | */ | 499 | */ |
| 500 | if (skb->priority >= 256 && skb->priority <= 263) | 500 | if (skb->priority >= 256 && skb->priority <= 263) |
| 501 | frag_header.priority = skb->priority - 256; | 501 | frag_header.priority = skb->priority - 256; |
| 502 | else | ||
| 503 | frag_header.priority = 0; | ||
| 502 | 504 | ||
| 503 | ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr); | 505 | ether_addr_copy(frag_header.orig, primary_if->net_dev->dev_addr); |
| 504 | ether_addr_copy(frag_header.dest, orig_node->orig); | 506 | ether_addr_copy(frag_header.dest, orig_node->orig); |
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c index 15cd2139381e..ebc4e2241c77 100644 --- a/net/batman-adv/tp_meter.c +++ b/net/batman-adv/tp_meter.c | |||
| @@ -482,7 +482,7 @@ static void batadv_tp_reset_sender_timer(struct batadv_tp_vars *tp_vars) | |||
| 482 | 482 | ||
| 483 | /** | 483 | /** |
| 484 | * batadv_tp_sender_timeout - timer that fires in case of packet loss | 484 | * batadv_tp_sender_timeout - timer that fires in case of packet loss |
| 485 | * @arg: address of the related tp_vars | 485 | * @t: address to timer_list inside tp_vars |
| 486 | * | 486 | * |
| 487 | * If fired it means that there was packet loss. | 487 | * If fired it means that there was packet loss. |
| 488 | * Switch to Slow Start, set the ss_threshold to half of the current cwnd and | 488 | * Switch to Slow Start, set the ss_threshold to half of the current cwnd and |
| @@ -1106,7 +1106,7 @@ static void batadv_tp_reset_receiver_timer(struct batadv_tp_vars *tp_vars) | |||
| 1106 | /** | 1106 | /** |
| 1107 | * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is | 1107 | * batadv_tp_receiver_shutdown - stop a tp meter receiver when timeout is |
| 1108 | * reached without received ack | 1108 | * reached without received ack |
| 1109 | * @arg: address of the related tp_vars | 1109 | * @t: address to timer_list inside tp_vars |
| 1110 | */ | 1110 | */ |
| 1111 | static void batadv_tp_receiver_shutdown(struct timer_list *t) | 1111 | static void batadv_tp_receiver_shutdown(struct timer_list *t) |
| 1112 | { | 1112 | { |
diff --git a/net/core/netprio_cgroup.c b/net/core/netprio_cgroup.c index 1c4810919a0a..b9057478d69c 100644 --- a/net/core/netprio_cgroup.c +++ b/net/core/netprio_cgroup.c | |||
| @@ -14,7 +14,6 @@ | |||
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
| 16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
| 17 | #include <linux/module.h> | ||
| 18 | #include <linux/string.h> | 17 | #include <linux/string.h> |
| 19 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
| 20 | #include <linux/skbuff.h> | 19 | #include <linux/skbuff.h> |
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 6b0ff396fa9d..a592ca025fc4 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
| @@ -4293,7 +4293,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, | |||
| 4293 | struct sock *sk = skb->sk; | 4293 | struct sock *sk = skb->sk; |
| 4294 | 4294 | ||
| 4295 | if (!skb_may_tx_timestamp(sk, false)) | 4295 | if (!skb_may_tx_timestamp(sk, false)) |
| 4296 | return; | 4296 | goto err; |
| 4297 | 4297 | ||
| 4298 | /* Take a reference to prevent skb_orphan() from freeing the socket, | 4298 | /* Take a reference to prevent skb_orphan() from freeing the socket, |
| 4299 | * but only if the socket refcount is not zero. | 4299 | * but only if the socket refcount is not zero. |
| @@ -4302,7 +4302,11 @@ void skb_complete_tx_timestamp(struct sk_buff *skb, | |||
| 4302 | *skb_hwtstamps(skb) = *hwtstamps; | 4302 | *skb_hwtstamps(skb) = *hwtstamps; |
| 4303 | __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); | 4303 | __skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false); |
| 4304 | sock_put(sk); | 4304 | sock_put(sk); |
| 4305 | return; | ||
| 4305 | } | 4306 | } |
| 4307 | |||
| 4308 | err: | ||
| 4309 | kfree_skb(skb); | ||
| 4306 | } | 4310 | } |
| 4307 | EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); | 4311 | EXPORT_SYMBOL_GPL(skb_complete_tx_timestamp); |
| 4308 | 4312 | ||
diff --git a/net/dsa/slave.c b/net/dsa/slave.c index d6e7a642493b..a95a55f79137 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c | |||
| @@ -16,7 +16,6 @@ | |||
| 16 | #include <linux/of_net.h> | 16 | #include <linux/of_net.h> |
| 17 | #include <linux/of_mdio.h> | 17 | #include <linux/of_mdio.h> |
| 18 | #include <linux/mdio.h> | 18 | #include <linux/mdio.h> |
| 19 | #include <linux/list.h> | ||
| 20 | #include <net/rtnetlink.h> | 19 | #include <net/rtnetlink.h> |
| 21 | #include <net/pkt_cls.h> | 20 | #include <net/pkt_cls.h> |
| 22 | #include <net/tc_act/tc_mirred.h> | 21 | #include <net/tc_act/tc_mirred.h> |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index a4573bccd6da..7a93359fbc72 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
| @@ -1428,7 +1428,7 @@ skip: | |||
| 1428 | 1428 | ||
| 1429 | static bool inetdev_valid_mtu(unsigned int mtu) | 1429 | static bool inetdev_valid_mtu(unsigned int mtu) |
| 1430 | { | 1430 | { |
| 1431 | return mtu >= 68; | 1431 | return mtu >= IPV4_MIN_MTU; |
| 1432 | } | 1432 | } |
| 1433 | 1433 | ||
| 1434 | static void inetdev_send_gratuitous_arp(struct net_device *dev, | 1434 | static void inetdev_send_gratuitous_arp(struct net_device *dev, |
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index d1f8f302dbf3..726f6b608274 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c | |||
| @@ -89,6 +89,7 @@ | |||
| 89 | #include <linux/rtnetlink.h> | 89 | #include <linux/rtnetlink.h> |
| 90 | #include <linux/times.h> | 90 | #include <linux/times.h> |
| 91 | #include <linux/pkt_sched.h> | 91 | #include <linux/pkt_sched.h> |
| 92 | #include <linux/byteorder/generic.h> | ||
| 92 | 93 | ||
| 93 | #include <net/net_namespace.h> | 94 | #include <net/net_namespace.h> |
| 94 | #include <net/arp.h> | 95 | #include <net/arp.h> |
| @@ -321,6 +322,23 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted) | |||
| 321 | return scount; | 322 | return scount; |
| 322 | } | 323 | } |
| 323 | 324 | ||
| 325 | /* source address selection per RFC 3376 section 4.2.13 */ | ||
| 326 | static __be32 igmpv3_get_srcaddr(struct net_device *dev, | ||
| 327 | const struct flowi4 *fl4) | ||
| 328 | { | ||
| 329 | struct in_device *in_dev = __in_dev_get_rcu(dev); | ||
| 330 | |||
| 331 | if (!in_dev) | ||
| 332 | return htonl(INADDR_ANY); | ||
| 333 | |||
| 334 | for_ifa(in_dev) { | ||
| 335 | if (inet_ifa_match(fl4->saddr, ifa)) | ||
| 336 | return fl4->saddr; | ||
| 337 | } endfor_ifa(in_dev); | ||
| 338 | |||
| 339 | return htonl(INADDR_ANY); | ||
| 340 | } | ||
| 341 | |||
| 324 | static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) | 342 | static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) |
| 325 | { | 343 | { |
| 326 | struct sk_buff *skb; | 344 | struct sk_buff *skb; |
| @@ -368,7 +386,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu) | |||
| 368 | pip->frag_off = htons(IP_DF); | 386 | pip->frag_off = htons(IP_DF); |
| 369 | pip->ttl = 1; | 387 | pip->ttl = 1; |
| 370 | pip->daddr = fl4.daddr; | 388 | pip->daddr = fl4.daddr; |
| 371 | pip->saddr = fl4.saddr; | 389 | pip->saddr = igmpv3_get_srcaddr(dev, &fl4); |
| 372 | pip->protocol = IPPROTO_IGMP; | 390 | pip->protocol = IPPROTO_IGMP; |
| 373 | pip->tot_len = 0; /* filled in later */ | 391 | pip->tot_len = 0; /* filled in later */ |
| 374 | ip_select_ident(net, skb, NULL); | 392 | ip_select_ident(net, skb, NULL); |
| @@ -404,16 +422,17 @@ static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel) | |||
| 404 | } | 422 | } |
| 405 | 423 | ||
| 406 | static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, | 424 | static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc, |
| 407 | int type, struct igmpv3_grec **ppgr) | 425 | int type, struct igmpv3_grec **ppgr, unsigned int mtu) |
| 408 | { | 426 | { |
| 409 | struct net_device *dev = pmc->interface->dev; | 427 | struct net_device *dev = pmc->interface->dev; |
| 410 | struct igmpv3_report *pih; | 428 | struct igmpv3_report *pih; |
| 411 | struct igmpv3_grec *pgr; | 429 | struct igmpv3_grec *pgr; |
| 412 | 430 | ||
| 413 | if (!skb) | 431 | if (!skb) { |
| 414 | skb = igmpv3_newpack(dev, dev->mtu); | 432 | skb = igmpv3_newpack(dev, mtu); |
| 415 | if (!skb) | 433 | if (!skb) |
| 416 | return NULL; | 434 | return NULL; |
| 435 | } | ||
| 417 | pgr = skb_put(skb, sizeof(struct igmpv3_grec)); | 436 | pgr = skb_put(skb, sizeof(struct igmpv3_grec)); |
| 418 | pgr->grec_type = type; | 437 | pgr->grec_type = type; |
| 419 | pgr->grec_auxwords = 0; | 438 | pgr->grec_auxwords = 0; |
| @@ -436,12 +455,17 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, | |||
| 436 | struct igmpv3_grec *pgr = NULL; | 455 | struct igmpv3_grec *pgr = NULL; |
| 437 | struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; | 456 | struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list; |
| 438 | int scount, stotal, first, isquery, truncate; | 457 | int scount, stotal, first, isquery, truncate; |
| 458 | unsigned int mtu; | ||
| 439 | 459 | ||
| 440 | if (pmc->multiaddr == IGMP_ALL_HOSTS) | 460 | if (pmc->multiaddr == IGMP_ALL_HOSTS) |
| 441 | return skb; | 461 | return skb; |
| 442 | if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) | 462 | if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) |
| 443 | return skb; | 463 | return skb; |
| 444 | 464 | ||
| 465 | mtu = READ_ONCE(dev->mtu); | ||
| 466 | if (mtu < IPV4_MIN_MTU) | ||
| 467 | return skb; | ||
| 468 | |||
| 445 | isquery = type == IGMPV3_MODE_IS_INCLUDE || | 469 | isquery = type == IGMPV3_MODE_IS_INCLUDE || |
| 446 | type == IGMPV3_MODE_IS_EXCLUDE; | 470 | type == IGMPV3_MODE_IS_EXCLUDE; |
| 447 | truncate = type == IGMPV3_MODE_IS_EXCLUDE || | 471 | truncate = type == IGMPV3_MODE_IS_EXCLUDE || |
| @@ -462,7 +486,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, | |||
| 462 | AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { | 486 | AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { |
| 463 | if (skb) | 487 | if (skb) |
| 464 | igmpv3_sendpack(skb); | 488 | igmpv3_sendpack(skb); |
| 465 | skb = igmpv3_newpack(dev, dev->mtu); | 489 | skb = igmpv3_newpack(dev, mtu); |
| 466 | } | 490 | } |
| 467 | } | 491 | } |
| 468 | first = 1; | 492 | first = 1; |
| @@ -498,12 +522,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, | |||
| 498 | pgr->grec_nsrcs = htons(scount); | 522 | pgr->grec_nsrcs = htons(scount); |
| 499 | if (skb) | 523 | if (skb) |
| 500 | igmpv3_sendpack(skb); | 524 | igmpv3_sendpack(skb); |
| 501 | skb = igmpv3_newpack(dev, dev->mtu); | 525 | skb = igmpv3_newpack(dev, mtu); |
| 502 | first = 1; | 526 | first = 1; |
| 503 | scount = 0; | 527 | scount = 0; |
| 504 | } | 528 | } |
| 505 | if (first) { | 529 | if (first) { |
| 506 | skb = add_grhead(skb, pmc, type, &pgr); | 530 | skb = add_grhead(skb, pmc, type, &pgr, mtu); |
| 507 | first = 0; | 531 | first = 0; |
| 508 | } | 532 | } |
| 509 | if (!skb) | 533 | if (!skb) |
| @@ -538,7 +562,7 @@ empty_source: | |||
| 538 | igmpv3_sendpack(skb); | 562 | igmpv3_sendpack(skb); |
| 539 | skb = NULL; /* add_grhead will get a new one */ | 563 | skb = NULL; /* add_grhead will get a new one */ |
| 540 | } | 564 | } |
| 541 | skb = add_grhead(skb, pmc, type, &pgr); | 565 | skb = add_grhead(skb, pmc, type, &pgr, mtu); |
| 542 | } | 566 | } |
| 543 | } | 567 | } |
| 544 | if (pgr) | 568 | if (pgr) |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index bb6239169b1a..9c1735632c8c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
| @@ -266,7 +266,7 @@ static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, | |||
| 266 | len = gre_hdr_len + sizeof(*ershdr); | 266 | len = gre_hdr_len + sizeof(*ershdr); |
| 267 | 267 | ||
| 268 | if (unlikely(!pskb_may_pull(skb, len))) | 268 | if (unlikely(!pskb_may_pull(skb, len))) |
| 269 | return -ENOMEM; | 269 | return PACKET_REJECT; |
| 270 | 270 | ||
| 271 | iph = ip_hdr(skb); | 271 | iph = ip_hdr(skb); |
| 272 | ershdr = (struct erspanhdr *)(skb->data + gre_hdr_len); | 272 | ershdr = (struct erspanhdr *)(skb->data + gre_hdr_len); |
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index fe6fee728ce4..5ddb1cb52bd4 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c | |||
| @@ -349,8 +349,8 @@ static int ip_tunnel_bind_dev(struct net_device *dev) | |||
| 349 | dev->needed_headroom = t_hlen + hlen; | 349 | dev->needed_headroom = t_hlen + hlen; |
| 350 | mtu -= (dev->hard_header_len + t_hlen); | 350 | mtu -= (dev->hard_header_len + t_hlen); |
| 351 | 351 | ||
| 352 | if (mtu < 68) | 352 | if (mtu < IPV4_MIN_MTU) |
| 353 | mtu = 68; | 353 | mtu = IPV4_MIN_MTU; |
| 354 | 354 | ||
| 355 | return mtu; | 355 | return mtu; |
| 356 | } | 356 | } |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index f88221aebc9d..0c3c944a7b72 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
| @@ -373,7 +373,6 @@ static int mark_source_chains(const struct xt_table_info *newinfo, | |||
| 373 | if (!xt_find_jump_offset(offsets, newpos, | 373 | if (!xt_find_jump_offset(offsets, newpos, |
| 374 | newinfo->number)) | 374 | newinfo->number)) |
| 375 | return 0; | 375 | return 0; |
| 376 | e = entry0 + newpos; | ||
| 377 | } else { | 376 | } else { |
| 378 | /* ... this is a fallthru */ | 377 | /* ... this is a fallthru */ |
| 379 | newpos = pos + e->next_offset; | 378 | newpos = pos + e->next_offset; |
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 4cbe5e80f3bf..2e0d339028bb 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c | |||
| @@ -439,7 +439,6 @@ mark_source_chains(const struct xt_table_info *newinfo, | |||
| 439 | if (!xt_find_jump_offset(offsets, newpos, | 439 | if (!xt_find_jump_offset(offsets, newpos, |
| 440 | newinfo->number)) | 440 | newinfo->number)) |
| 441 | return 0; | 441 | return 0; |
| 442 | e = entry0 + newpos; | ||
| 443 | } else { | 442 | } else { |
| 444 | /* ... this is a fallthru */ | 443 | /* ... this is a fallthru */ |
| 445 | newpos = pos + e->next_offset; | 444 | newpos = pos + e->next_offset; |
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c index 17b4ca562944..69060e3abe85 100644 --- a/net/ipv4/netfilter/ipt_CLUSTERIP.c +++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c | |||
| @@ -813,12 +813,13 @@ static int clusterip_net_init(struct net *net) | |||
| 813 | 813 | ||
| 814 | static void clusterip_net_exit(struct net *net) | 814 | static void clusterip_net_exit(struct net *net) |
| 815 | { | 815 | { |
| 816 | #ifdef CONFIG_PROC_FS | ||
| 817 | struct clusterip_net *cn = net_generic(net, clusterip_net_id); | 816 | struct clusterip_net *cn = net_generic(net, clusterip_net_id); |
| 817 | #ifdef CONFIG_PROC_FS | ||
| 818 | proc_remove(cn->procdir); | 818 | proc_remove(cn->procdir); |
| 819 | cn->procdir = NULL; | 819 | cn->procdir = NULL; |
| 820 | #endif | 820 | #endif |
| 821 | nf_unregister_net_hook(net, &cip_arp_ops); | 821 | nf_unregister_net_hook(net, &cip_arp_ops); |
| 822 | WARN_ON_ONCE(!list_empty(&cn->configs)); | ||
| 822 | } | 823 | } |
| 823 | 824 | ||
| 824 | static struct pernet_operations clusterip_net_ops = { | 825 | static struct pernet_operations clusterip_net_ops = { |
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 33b70bfd1122..125c1eab3eaa 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c | |||
| @@ -513,11 +513,16 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
| 513 | int err; | 513 | int err; |
| 514 | struct ip_options_data opt_copy; | 514 | struct ip_options_data opt_copy; |
| 515 | struct raw_frag_vec rfv; | 515 | struct raw_frag_vec rfv; |
| 516 | int hdrincl; | ||
| 516 | 517 | ||
| 517 | err = -EMSGSIZE; | 518 | err = -EMSGSIZE; |
| 518 | if (len > 0xFFFF) | 519 | if (len > 0xFFFF) |
| 519 | goto out; | 520 | goto out; |
| 520 | 521 | ||
| 522 | /* hdrincl should be READ_ONCE(inet->hdrincl) | ||
| 523 | * but READ_ONCE() doesn't work with bit fields | ||
| 524 | */ | ||
| 525 | hdrincl = inet->hdrincl; | ||
| 521 | /* | 526 | /* |
| 522 | * Check the flags. | 527 | * Check the flags. |
| 523 | */ | 528 | */ |
| @@ -593,7 +598,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
| 593 | /* Linux does not mangle headers on raw sockets, | 598 | /* Linux does not mangle headers on raw sockets, |
| 594 | * so that IP options + IP_HDRINCL is non-sense. | 599 | * so that IP options + IP_HDRINCL is non-sense. |
| 595 | */ | 600 | */ |
| 596 | if (inet->hdrincl) | 601 | if (hdrincl) |
| 597 | goto done; | 602 | goto done; |
| 598 | if (ipc.opt->opt.srr) { | 603 | if (ipc.opt->opt.srr) { |
| 599 | if (!daddr) | 604 | if (!daddr) |
| @@ -615,12 +620,12 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
| 615 | 620 | ||
| 616 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, | 621 | flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos, |
| 617 | RT_SCOPE_UNIVERSE, | 622 | RT_SCOPE_UNIVERSE, |
| 618 | inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, | 623 | hdrincl ? IPPROTO_RAW : sk->sk_protocol, |
| 619 | inet_sk_flowi_flags(sk) | | 624 | inet_sk_flowi_flags(sk) | |
| 620 | (inet->hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), | 625 | (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0), |
| 621 | daddr, saddr, 0, 0, sk->sk_uid); | 626 | daddr, saddr, 0, 0, sk->sk_uid); |
| 622 | 627 | ||
| 623 | if (!inet->hdrincl) { | 628 | if (!hdrincl) { |
| 624 | rfv.msg = msg; | 629 | rfv.msg = msg; |
| 625 | rfv.hlen = 0; | 630 | rfv.hlen = 0; |
| 626 | 631 | ||
| @@ -645,7 +650,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) | |||
| 645 | goto do_confirm; | 650 | goto do_confirm; |
| 646 | back_from_confirm: | 651 | back_from_confirm: |
| 647 | 652 | ||
| 648 | if (inet->hdrincl) | 653 | if (hdrincl) |
| 649 | err = raw_send_hdrinc(sk, &fl4, msg, len, | 654 | err = raw_send_hdrinc(sk, &fl4, msg, len, |
| 650 | &rt, msg->msg_flags, &ipc.sockc); | 655 | &rt, msg->msg_flags, &ipc.sockc); |
| 651 | 656 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9550cc42de2d..45f750e85714 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -508,9 +508,6 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) | |||
| 508 | u32 new_sample = tp->rcv_rtt_est.rtt_us; | 508 | u32 new_sample = tp->rcv_rtt_est.rtt_us; |
| 509 | long m = sample; | 509 | long m = sample; |
| 510 | 510 | ||
| 511 | if (m == 0) | ||
| 512 | m = 1; | ||
| 513 | |||
| 514 | if (new_sample != 0) { | 511 | if (new_sample != 0) { |
| 515 | /* If we sample in larger samples in the non-timestamp | 512 | /* If we sample in larger samples in the non-timestamp |
| 516 | * case, we could grossly overestimate the RTT especially | 513 | * case, we could grossly overestimate the RTT especially |
| @@ -547,6 +544,8 @@ static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) | |||
| 547 | if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) | 544 | if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) |
| 548 | return; | 545 | return; |
| 549 | delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); | 546 | delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); |
| 547 | if (!delta_us) | ||
| 548 | delta_us = 1; | ||
| 550 | tcp_rcv_rtt_update(tp, delta_us, 1); | 549 | tcp_rcv_rtt_update(tp, delta_us, 1); |
| 551 | 550 | ||
| 552 | new_measure: | 551 | new_measure: |
| @@ -563,8 +562,11 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, | |||
| 563 | (TCP_SKB_CB(skb)->end_seq - | 562 | (TCP_SKB_CB(skb)->end_seq - |
| 564 | TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) { | 563 | TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) { |
| 565 | u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; | 564 | u32 delta = tcp_time_stamp(tp) - tp->rx_opt.rcv_tsecr; |
| 566 | u32 delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); | 565 | u32 delta_us; |
| 567 | 566 | ||
| 567 | if (!delta) | ||
| 568 | delta = 1; | ||
| 569 | delta_us = delta * (USEC_PER_SEC / TCP_TS_HZ); | ||
| 568 | tcp_rcv_rtt_update(tp, delta_us, 0); | 570 | tcp_rcv_rtt_update(tp, delta_us, 0); |
| 569 | } | 571 | } |
| 570 | } | 572 | } |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 77ea45da0fe9..94e28350f420 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -848,7 +848,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, | |||
| 848 | tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, | 848 | tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, |
| 849 | req->ts_recent, | 849 | req->ts_recent, |
| 850 | 0, | 850 | 0, |
| 851 | tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, | 851 | tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr, |
| 852 | AF_INET), | 852 | AF_INET), |
| 853 | inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, | 853 | inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, |
| 854 | ip_hdr(skb)->tos); | 854 | ip_hdr(skb)->tos); |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 16df6dd44b98..968fda198376 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
| @@ -264,6 +264,7 @@ void tcp_delack_timer_handler(struct sock *sk) | |||
| 264 | icsk->icsk_ack.pingpong = 0; | 264 | icsk->icsk_ack.pingpong = 0; |
| 265 | icsk->icsk_ack.ato = TCP_ATO_MIN; | 265 | icsk->icsk_ack.ato = TCP_ATO_MIN; |
| 266 | } | 266 | } |
| 267 | tcp_mstamp_refresh(tcp_sk(sk)); | ||
| 267 | tcp_send_ack(sk); | 268 | tcp_send_ack(sk); |
| 268 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); | 269 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS); |
| 269 | } | 270 | } |
| @@ -632,6 +633,7 @@ static void tcp_keepalive_timer (struct timer_list *t) | |||
| 632 | goto out; | 633 | goto out; |
| 633 | } | 634 | } |
| 634 | 635 | ||
| 636 | tcp_mstamp_refresh(tp); | ||
| 635 | if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { | 637 | if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) { |
| 636 | if (tp->linger2 >= 0) { | 638 | if (tp->linger2 >= 0) { |
| 637 | const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; | 639 | const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN; |
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index fc6d7d143f2c..844642682b83 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c | |||
| @@ -1682,16 +1682,16 @@ static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) | |||
| 1682 | } | 1682 | } |
| 1683 | 1683 | ||
| 1684 | static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, | 1684 | static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, |
| 1685 | int type, struct mld2_grec **ppgr) | 1685 | int type, struct mld2_grec **ppgr, unsigned int mtu) |
| 1686 | { | 1686 | { |
| 1687 | struct net_device *dev = pmc->idev->dev; | ||
| 1688 | struct mld2_report *pmr; | 1687 | struct mld2_report *pmr; |
| 1689 | struct mld2_grec *pgr; | 1688 | struct mld2_grec *pgr; |
| 1690 | 1689 | ||
| 1691 | if (!skb) | 1690 | if (!skb) { |
| 1692 | skb = mld_newpack(pmc->idev, dev->mtu); | 1691 | skb = mld_newpack(pmc->idev, mtu); |
| 1693 | if (!skb) | 1692 | if (!skb) |
| 1694 | return NULL; | 1693 | return NULL; |
| 1694 | } | ||
| 1695 | pgr = skb_put(skb, sizeof(struct mld2_grec)); | 1695 | pgr = skb_put(skb, sizeof(struct mld2_grec)); |
| 1696 | pgr->grec_type = type; | 1696 | pgr->grec_type = type; |
| 1697 | pgr->grec_auxwords = 0; | 1697 | pgr->grec_auxwords = 0; |
| @@ -1714,10 +1714,15 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, | |||
| 1714 | struct mld2_grec *pgr = NULL; | 1714 | struct mld2_grec *pgr = NULL; |
| 1715 | struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; | 1715 | struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; |
| 1716 | int scount, stotal, first, isquery, truncate; | 1716 | int scount, stotal, first, isquery, truncate; |
| 1717 | unsigned int mtu; | ||
| 1717 | 1718 | ||
| 1718 | if (pmc->mca_flags & MAF_NOREPORT) | 1719 | if (pmc->mca_flags & MAF_NOREPORT) |
| 1719 | return skb; | 1720 | return skb; |
| 1720 | 1721 | ||
| 1722 | mtu = READ_ONCE(dev->mtu); | ||
| 1723 | if (mtu < IPV6_MIN_MTU) | ||
| 1724 | return skb; | ||
| 1725 | |||
| 1721 | isquery = type == MLD2_MODE_IS_INCLUDE || | 1726 | isquery = type == MLD2_MODE_IS_INCLUDE || |
| 1722 | type == MLD2_MODE_IS_EXCLUDE; | 1727 | type == MLD2_MODE_IS_EXCLUDE; |
| 1723 | truncate = type == MLD2_MODE_IS_EXCLUDE || | 1728 | truncate = type == MLD2_MODE_IS_EXCLUDE || |
| @@ -1738,7 +1743,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, | |||
| 1738 | AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { | 1743 | AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { |
| 1739 | if (skb) | 1744 | if (skb) |
| 1740 | mld_sendpack(skb); | 1745 | mld_sendpack(skb); |
| 1741 | skb = mld_newpack(idev, dev->mtu); | 1746 | skb = mld_newpack(idev, mtu); |
| 1742 | } | 1747 | } |
| 1743 | } | 1748 | } |
| 1744 | first = 1; | 1749 | first = 1; |
| @@ -1774,12 +1779,12 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, | |||
| 1774 | pgr->grec_nsrcs = htons(scount); | 1779 | pgr->grec_nsrcs = htons(scount); |
| 1775 | if (skb) | 1780 | if (skb) |
| 1776 | mld_sendpack(skb); | 1781 | mld_sendpack(skb); |
| 1777 | skb = mld_newpack(idev, dev->mtu); | 1782 | skb = mld_newpack(idev, mtu); |
| 1778 | first = 1; | 1783 | first = 1; |
| 1779 | scount = 0; | 1784 | scount = 0; |
| 1780 | } | 1785 | } |
| 1781 | if (first) { | 1786 | if (first) { |
| 1782 | skb = add_grhead(skb, pmc, type, &pgr); | 1787 | skb = add_grhead(skb, pmc, type, &pgr, mtu); |
| 1783 | first = 0; | 1788 | first = 0; |
| 1784 | } | 1789 | } |
| 1785 | if (!skb) | 1790 | if (!skb) |
| @@ -1814,7 +1819,7 @@ empty_source: | |||
| 1814 | mld_sendpack(skb); | 1819 | mld_sendpack(skb); |
| 1815 | skb = NULL; /* add_grhead will get a new one */ | 1820 | skb = NULL; /* add_grhead will get a new one */ |
| 1816 | } | 1821 | } |
| 1817 | skb = add_grhead(skb, pmc, type, &pgr); | 1822 | skb = add_grhead(skb, pmc, type, &pgr, mtu); |
| 1818 | } | 1823 | } |
| 1819 | } | 1824 | } |
| 1820 | if (pgr) | 1825 | if (pgr) |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index f06e25065a34..1d7ae9366335 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
| @@ -458,7 +458,6 @@ mark_source_chains(const struct xt_table_info *newinfo, | |||
| 458 | if (!xt_find_jump_offset(offsets, newpos, | 458 | if (!xt_find_jump_offset(offsets, newpos, |
| 459 | newinfo->number)) | 459 | newinfo->number)) |
| 460 | return 0; | 460 | return 0; |
| 461 | e = entry0 + newpos; | ||
| 462 | } else { | 461 | } else { |
| 463 | /* ... this is a fallthru */ | 462 | /* ... this is a fallthru */ |
| 464 | newpos = pos + e->next_offset; | 463 | newpos = pos + e->next_offset; |
diff --git a/net/ipv6/netfilter/ip6t_MASQUERADE.c b/net/ipv6/netfilter/ip6t_MASQUERADE.c index 2b1a15846f9a..92c0047e7e33 100644 --- a/net/ipv6/netfilter/ip6t_MASQUERADE.c +++ b/net/ipv6/netfilter/ip6t_MASQUERADE.c | |||
| @@ -33,13 +33,19 @@ static int masquerade_tg6_checkentry(const struct xt_tgchk_param *par) | |||
| 33 | 33 | ||
| 34 | if (range->flags & NF_NAT_RANGE_MAP_IPS) | 34 | if (range->flags & NF_NAT_RANGE_MAP_IPS) |
| 35 | return -EINVAL; | 35 | return -EINVAL; |
| 36 | return 0; | 36 | return nf_ct_netns_get(par->net, par->family); |
| 37 | } | ||
| 38 | |||
| 39 | static void masquerade_tg6_destroy(const struct xt_tgdtor_param *par) | ||
| 40 | { | ||
| 41 | nf_ct_netns_put(par->net, par->family); | ||
| 37 | } | 42 | } |
| 38 | 43 | ||
| 39 | static struct xt_target masquerade_tg6_reg __read_mostly = { | 44 | static struct xt_target masquerade_tg6_reg __read_mostly = { |
| 40 | .name = "MASQUERADE", | 45 | .name = "MASQUERADE", |
| 41 | .family = NFPROTO_IPV6, | 46 | .family = NFPROTO_IPV6, |
| 42 | .checkentry = masquerade_tg6_checkentry, | 47 | .checkentry = masquerade_tg6_checkentry, |
| 48 | .destroy = masquerade_tg6_destroy, | ||
| 43 | .target = masquerade_tg6, | 49 | .target = masquerade_tg6, |
| 44 | .targetsize = sizeof(struct nf_nat_range), | 50 | .targetsize = sizeof(struct nf_nat_range), |
| 45 | .table = "nat", | 51 | .table = "nat", |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 1f04ec0e4a7a..7178476b3d2f 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
| @@ -994,7 +994,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, | |||
| 994 | req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, | 994 | req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, |
| 995 | tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, | 995 | tcp_time_stamp_raw() + tcp_rsk(req)->ts_off, |
| 996 | req->ts_recent, sk->sk_bound_dev_if, | 996 | req->ts_recent, sk->sk_bound_dev_if, |
| 997 | tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), | 997 | tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr), |
| 998 | 0, 0); | 998 | 0, 0); |
| 999 | } | 999 | } |
| 1000 | 1000 | ||
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index 167f83b853e6..1621b6ab17ba 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c | |||
| @@ -291,16 +291,15 @@ void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta, | |||
| 291 | int i; | 291 | int i; |
| 292 | 292 | ||
| 293 | mutex_lock(&sta->ampdu_mlme.mtx); | 293 | mutex_lock(&sta->ampdu_mlme.mtx); |
| 294 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) { | 294 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) |
| 295 | ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, | 295 | ___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT, |
| 296 | WLAN_REASON_QSTA_LEAVE_QBSS, | 296 | WLAN_REASON_QSTA_LEAVE_QBSS, |
| 297 | reason != AGG_STOP_DESTROY_STA && | 297 | reason != AGG_STOP_DESTROY_STA && |
| 298 | reason != AGG_STOP_PEER_REQUEST); | 298 | reason != AGG_STOP_PEER_REQUEST); |
| 299 | } | ||
| 300 | mutex_unlock(&sta->ampdu_mlme.mtx); | ||
| 301 | 299 | ||
| 302 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) | 300 | for (i = 0; i < IEEE80211_NUM_TIDS; i++) |
| 303 | ___ieee80211_stop_tx_ba_session(sta, i, reason); | 301 | ___ieee80211_stop_tx_ba_session(sta, i, reason); |
| 302 | mutex_unlock(&sta->ampdu_mlme.mtx); | ||
| 304 | 303 | ||
| 305 | /* stopping might queue the work again - so cancel only afterwards */ | 304 | /* stopping might queue the work again - so cancel only afterwards */ |
| 306 | cancel_work_sync(&sta->ampdu_mlme.work); | 305 | cancel_work_sync(&sta->ampdu_mlme.work); |
diff --git a/net/netfilter/nf_conntrack_h323_asn1.c b/net/netfilter/nf_conntrack_h323_asn1.c index cf1bf2605c10..dc6347342e34 100644 --- a/net/netfilter/nf_conntrack_h323_asn1.c +++ b/net/netfilter/nf_conntrack_h323_asn1.c | |||
| @@ -103,7 +103,6 @@ struct bitstr { | |||
| 103 | #define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;} | 103 | #define INC_BIT(bs) if((++(bs)->bit)>7){(bs)->cur++;(bs)->bit=0;} |
| 104 | #define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;} | 104 | #define INC_BITS(bs,b) if(((bs)->bit+=(b))>7){(bs)->cur+=(bs)->bit>>3;(bs)->bit&=7;} |
| 105 | #define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;} | 105 | #define BYTE_ALIGN(bs) if((bs)->bit){(bs)->cur++;(bs)->bit=0;} |
| 106 | #define CHECK_BOUND(bs,n) if((bs)->cur+(n)>(bs)->end)return(H323_ERROR_BOUND) | ||
| 107 | static unsigned int get_len(struct bitstr *bs); | 106 | static unsigned int get_len(struct bitstr *bs); |
| 108 | static unsigned int get_bit(struct bitstr *bs); | 107 | static unsigned int get_bit(struct bitstr *bs); |
| 109 | static unsigned int get_bits(struct bitstr *bs, unsigned int b); | 108 | static unsigned int get_bits(struct bitstr *bs, unsigned int b); |
| @@ -165,6 +164,19 @@ static unsigned int get_len(struct bitstr *bs) | |||
| 165 | return v; | 164 | return v; |
| 166 | } | 165 | } |
| 167 | 166 | ||
| 167 | static int nf_h323_error_boundary(struct bitstr *bs, size_t bytes, size_t bits) | ||
| 168 | { | ||
| 169 | bits += bs->bit; | ||
| 170 | bytes += bits / BITS_PER_BYTE; | ||
| 171 | if (bits % BITS_PER_BYTE > 0) | ||
| 172 | bytes++; | ||
| 173 | |||
| 174 | if (*bs->cur + bytes > *bs->end) | ||
| 175 | return 1; | ||
| 176 | |||
| 177 | return 0; | ||
| 178 | } | ||
| 179 | |||
| 168 | /****************************************************************************/ | 180 | /****************************************************************************/ |
| 169 | static unsigned int get_bit(struct bitstr *bs) | 181 | static unsigned int get_bit(struct bitstr *bs) |
| 170 | { | 182 | { |
| @@ -279,8 +291,8 @@ static int decode_bool(struct bitstr *bs, const struct field_t *f, | |||
| 279 | PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); | 291 | PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); |
| 280 | 292 | ||
| 281 | INC_BIT(bs); | 293 | INC_BIT(bs); |
| 282 | 294 | if (nf_h323_error_boundary(bs, 0, 0)) | |
| 283 | CHECK_BOUND(bs, 0); | 295 | return H323_ERROR_BOUND; |
| 284 | return H323_ERROR_NONE; | 296 | return H323_ERROR_NONE; |
| 285 | } | 297 | } |
| 286 | 298 | ||
| @@ -293,11 +305,14 @@ static int decode_oid(struct bitstr *bs, const struct field_t *f, | |||
| 293 | PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); | 305 | PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); |
| 294 | 306 | ||
| 295 | BYTE_ALIGN(bs); | 307 | BYTE_ALIGN(bs); |
| 296 | CHECK_BOUND(bs, 1); | 308 | if (nf_h323_error_boundary(bs, 1, 0)) |
| 309 | return H323_ERROR_BOUND; | ||
| 310 | |||
| 297 | len = *bs->cur++; | 311 | len = *bs->cur++; |
| 298 | bs->cur += len; | 312 | bs->cur += len; |
| 313 | if (nf_h323_error_boundary(bs, 0, 0)) | ||
| 314 | return H323_ERROR_BOUND; | ||
| 299 | 315 | ||
| 300 | CHECK_BOUND(bs, 0); | ||
| 301 | return H323_ERROR_NONE; | 316 | return H323_ERROR_NONE; |
| 302 | } | 317 | } |
| 303 | 318 | ||
| @@ -319,6 +334,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f, | |||
| 319 | bs->cur += 2; | 334 | bs->cur += 2; |
| 320 | break; | 335 | break; |
| 321 | case CONS: /* 64K < Range < 4G */ | 336 | case CONS: /* 64K < Range < 4G */ |
| 337 | if (nf_h323_error_boundary(bs, 0, 2)) | ||
| 338 | return H323_ERROR_BOUND; | ||
| 322 | len = get_bits(bs, 2) + 1; | 339 | len = get_bits(bs, 2) + 1; |
| 323 | BYTE_ALIGN(bs); | 340 | BYTE_ALIGN(bs); |
| 324 | if (base && (f->attr & DECODE)) { /* timeToLive */ | 341 | if (base && (f->attr & DECODE)) { /* timeToLive */ |
| @@ -330,7 +347,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f, | |||
| 330 | break; | 347 | break; |
| 331 | case UNCO: | 348 | case UNCO: |
| 332 | BYTE_ALIGN(bs); | 349 | BYTE_ALIGN(bs); |
| 333 | CHECK_BOUND(bs, 2); | 350 | if (nf_h323_error_boundary(bs, 2, 0)) |
| 351 | return H323_ERROR_BOUND; | ||
| 334 | len = get_len(bs); | 352 | len = get_len(bs); |
| 335 | bs->cur += len; | 353 | bs->cur += len; |
| 336 | break; | 354 | break; |
| @@ -341,7 +359,8 @@ static int decode_int(struct bitstr *bs, const struct field_t *f, | |||
| 341 | 359 | ||
| 342 | PRINT("\n"); | 360 | PRINT("\n"); |
| 343 | 361 | ||
| 344 | CHECK_BOUND(bs, 0); | 362 | if (nf_h323_error_boundary(bs, 0, 0)) |
| 363 | return H323_ERROR_BOUND; | ||
| 345 | return H323_ERROR_NONE; | 364 | return H323_ERROR_NONE; |
| 346 | } | 365 | } |
| 347 | 366 | ||
| @@ -357,7 +376,8 @@ static int decode_enum(struct bitstr *bs, const struct field_t *f, | |||
| 357 | INC_BITS(bs, f->sz); | 376 | INC_BITS(bs, f->sz); |
| 358 | } | 377 | } |
| 359 | 378 | ||
| 360 | CHECK_BOUND(bs, 0); | 379 | if (nf_h323_error_boundary(bs, 0, 0)) |
| 380 | return H323_ERROR_BOUND; | ||
| 361 | return H323_ERROR_NONE; | 381 | return H323_ERROR_NONE; |
| 362 | } | 382 | } |
| 363 | 383 | ||
| @@ -375,12 +395,14 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f, | |||
| 375 | len = f->lb; | 395 | len = f->lb; |
| 376 | break; | 396 | break; |
| 377 | case WORD: /* 2-byte length */ | 397 | case WORD: /* 2-byte length */ |
| 378 | CHECK_BOUND(bs, 2); | 398 | if (nf_h323_error_boundary(bs, 2, 0)) |
| 399 | return H323_ERROR_BOUND; | ||
| 379 | len = (*bs->cur++) << 8; | 400 | len = (*bs->cur++) << 8; |
| 380 | len += (*bs->cur++) + f->lb; | 401 | len += (*bs->cur++) + f->lb; |
| 381 | break; | 402 | break; |
| 382 | case SEMI: | 403 | case SEMI: |
| 383 | CHECK_BOUND(bs, 2); | 404 | if (nf_h323_error_boundary(bs, 2, 0)) |
| 405 | return H323_ERROR_BOUND; | ||
| 384 | len = get_len(bs); | 406 | len = get_len(bs); |
| 385 | break; | 407 | break; |
| 386 | default: | 408 | default: |
| @@ -391,7 +413,8 @@ static int decode_bitstr(struct bitstr *bs, const struct field_t *f, | |||
| 391 | bs->cur += len >> 3; | 413 | bs->cur += len >> 3; |
| 392 | bs->bit = len & 7; | 414 | bs->bit = len & 7; |
| 393 | 415 | ||
| 394 | CHECK_BOUND(bs, 0); | 416 | if (nf_h323_error_boundary(bs, 0, 0)) |
| 417 | return H323_ERROR_BOUND; | ||
| 395 | return H323_ERROR_NONE; | 418 | return H323_ERROR_NONE; |
| 396 | } | 419 | } |
| 397 | 420 | ||
| @@ -404,12 +427,15 @@ static int decode_numstr(struct bitstr *bs, const struct field_t *f, | |||
| 404 | PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); | 427 | PRINT("%*.s%s\n", level * TAB_SIZE, " ", f->name); |
| 405 | 428 | ||
| 406 | /* 2 <= Range <= 255 */ | 429 | /* 2 <= Range <= 255 */ |
| 430 | if (nf_h323_error_boundary(bs, 0, f->sz)) | ||
| 431 | return H323_ERROR_BOUND; | ||
| 407 | len = get_bits(bs, f->sz) + f->lb; | 432 | len = get_bits(bs, f->sz) + f->lb; |
| 408 | 433 | ||
| 409 | BYTE_ALIGN(bs); | 434 | BYTE_ALIGN(bs); |
| 410 | INC_BITS(bs, (len << 2)); | 435 | INC_BITS(bs, (len << 2)); |
| 411 | 436 | ||
| 412 | CHECK_BOUND(bs, 0); | 437 | if (nf_h323_error_boundary(bs, 0, 0)) |
| 438 | return H323_ERROR_BOUND; | ||
| 413 | return H323_ERROR_NONE; | 439 | return H323_ERROR_NONE; |
| 414 | } | 440 | } |
| 415 | 441 | ||
| @@ -440,15 +466,19 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f, | |||
| 440 | break; | 466 | break; |
| 441 | case BYTE: /* Range == 256 */ | 467 | case BYTE: /* Range == 256 */ |
| 442 | BYTE_ALIGN(bs); | 468 | BYTE_ALIGN(bs); |
| 443 | CHECK_BOUND(bs, 1); | 469 | if (nf_h323_error_boundary(bs, 1, 0)) |
| 470 | return H323_ERROR_BOUND; | ||
| 444 | len = (*bs->cur++) + f->lb; | 471 | len = (*bs->cur++) + f->lb; |
| 445 | break; | 472 | break; |
| 446 | case SEMI: | 473 | case SEMI: |
| 447 | BYTE_ALIGN(bs); | 474 | BYTE_ALIGN(bs); |
| 448 | CHECK_BOUND(bs, 2); | 475 | if (nf_h323_error_boundary(bs, 2, 0)) |
| 476 | return H323_ERROR_BOUND; | ||
| 449 | len = get_len(bs) + f->lb; | 477 | len = get_len(bs) + f->lb; |
| 450 | break; | 478 | break; |
| 451 | default: /* 2 <= Range <= 255 */ | 479 | default: /* 2 <= Range <= 255 */ |
| 480 | if (nf_h323_error_boundary(bs, 0, f->sz)) | ||
| 481 | return H323_ERROR_BOUND; | ||
| 452 | len = get_bits(bs, f->sz) + f->lb; | 482 | len = get_bits(bs, f->sz) + f->lb; |
| 453 | BYTE_ALIGN(bs); | 483 | BYTE_ALIGN(bs); |
| 454 | break; | 484 | break; |
| @@ -458,7 +488,8 @@ static int decode_octstr(struct bitstr *bs, const struct field_t *f, | |||
| 458 | 488 | ||
| 459 | PRINT("\n"); | 489 | PRINT("\n"); |
| 460 | 490 | ||
| 461 | CHECK_BOUND(bs, 0); | 491 | if (nf_h323_error_boundary(bs, 0, 0)) |
| 492 | return H323_ERROR_BOUND; | ||
| 462 | return H323_ERROR_NONE; | 493 | return H323_ERROR_NONE; |
| 463 | } | 494 | } |
| 464 | 495 | ||
| @@ -473,10 +504,13 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f, | |||
| 473 | switch (f->sz) { | 504 | switch (f->sz) { |
| 474 | case BYTE: /* Range == 256 */ | 505 | case BYTE: /* Range == 256 */ |
| 475 | BYTE_ALIGN(bs); | 506 | BYTE_ALIGN(bs); |
| 476 | CHECK_BOUND(bs, 1); | 507 | if (nf_h323_error_boundary(bs, 1, 0)) |
| 508 | return H323_ERROR_BOUND; | ||
| 477 | len = (*bs->cur++) + f->lb; | 509 | len = (*bs->cur++) + f->lb; |
| 478 | break; | 510 | break; |
| 479 | default: /* 2 <= Range <= 255 */ | 511 | default: /* 2 <= Range <= 255 */ |
| 512 | if (nf_h323_error_boundary(bs, 0, f->sz)) | ||
| 513 | return H323_ERROR_BOUND; | ||
| 480 | len = get_bits(bs, f->sz) + f->lb; | 514 | len = get_bits(bs, f->sz) + f->lb; |
| 481 | BYTE_ALIGN(bs); | 515 | BYTE_ALIGN(bs); |
| 482 | break; | 516 | break; |
| @@ -484,7 +518,8 @@ static int decode_bmpstr(struct bitstr *bs, const struct field_t *f, | |||
| 484 | 518 | ||
| 485 | bs->cur += len << 1; | 519 | bs->cur += len << 1; |
| 486 | 520 | ||
| 487 | CHECK_BOUND(bs, 0); | 521 | if (nf_h323_error_boundary(bs, 0, 0)) |
| 522 | return H323_ERROR_BOUND; | ||
| 488 | return H323_ERROR_NONE; | 523 | return H323_ERROR_NONE; |
| 489 | } | 524 | } |
| 490 | 525 | ||
| @@ -503,9 +538,13 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f, | |||
| 503 | base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; | 538 | base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; |
| 504 | 539 | ||
| 505 | /* Extensible? */ | 540 | /* Extensible? */ |
| 541 | if (nf_h323_error_boundary(bs, 0, 1)) | ||
| 542 | return H323_ERROR_BOUND; | ||
| 506 | ext = (f->attr & EXT) ? get_bit(bs) : 0; | 543 | ext = (f->attr & EXT) ? get_bit(bs) : 0; |
| 507 | 544 | ||
| 508 | /* Get fields bitmap */ | 545 | /* Get fields bitmap */ |
| 546 | if (nf_h323_error_boundary(bs, 0, f->sz)) | ||
| 547 | return H323_ERROR_BOUND; | ||
| 509 | bmp = get_bitmap(bs, f->sz); | 548 | bmp = get_bitmap(bs, f->sz); |
| 510 | if (base) | 549 | if (base) |
| 511 | *(unsigned int *)base = bmp; | 550 | *(unsigned int *)base = bmp; |
| @@ -525,9 +564,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f, | |||
| 525 | 564 | ||
| 526 | /* Decode */ | 565 | /* Decode */ |
| 527 | if (son->attr & OPEN) { /* Open field */ | 566 | if (son->attr & OPEN) { /* Open field */ |
| 528 | CHECK_BOUND(bs, 2); | 567 | if (nf_h323_error_boundary(bs, 2, 0)) |
| 568 | return H323_ERROR_BOUND; | ||
| 529 | len = get_len(bs); | 569 | len = get_len(bs); |
| 530 | CHECK_BOUND(bs, len); | 570 | if (nf_h323_error_boundary(bs, len, 0)) |
| 571 | return H323_ERROR_BOUND; | ||
| 531 | if (!base || !(son->attr & DECODE)) { | 572 | if (!base || !(son->attr & DECODE)) { |
| 532 | PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, | 573 | PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, |
| 533 | " ", son->name); | 574 | " ", son->name); |
| @@ -555,8 +596,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f, | |||
| 555 | return H323_ERROR_NONE; | 596 | return H323_ERROR_NONE; |
| 556 | 597 | ||
| 557 | /* Get the extension bitmap */ | 598 | /* Get the extension bitmap */ |
| 599 | if (nf_h323_error_boundary(bs, 0, 7)) | ||
| 600 | return H323_ERROR_BOUND; | ||
| 558 | bmp2_len = get_bits(bs, 7) + 1; | 601 | bmp2_len = get_bits(bs, 7) + 1; |
| 559 | CHECK_BOUND(bs, (bmp2_len + 7) >> 3); | 602 | if (nf_h323_error_boundary(bs, 0, bmp2_len)) |
| 603 | return H323_ERROR_BOUND; | ||
| 560 | bmp2 = get_bitmap(bs, bmp2_len); | 604 | bmp2 = get_bitmap(bs, bmp2_len); |
| 561 | bmp |= bmp2 >> f->sz; | 605 | bmp |= bmp2 >> f->sz; |
| 562 | if (base) | 606 | if (base) |
| @@ -567,9 +611,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f, | |||
| 567 | for (opt = 0; opt < bmp2_len; opt++, i++, son++) { | 611 | for (opt = 0; opt < bmp2_len; opt++, i++, son++) { |
| 568 | /* Check Range */ | 612 | /* Check Range */ |
| 569 | if (i >= f->ub) { /* Newer Version? */ | 613 | if (i >= f->ub) { /* Newer Version? */ |
| 570 | CHECK_BOUND(bs, 2); | 614 | if (nf_h323_error_boundary(bs, 2, 0)) |
| 615 | return H323_ERROR_BOUND; | ||
| 571 | len = get_len(bs); | 616 | len = get_len(bs); |
| 572 | CHECK_BOUND(bs, len); | 617 | if (nf_h323_error_boundary(bs, len, 0)) |
| 618 | return H323_ERROR_BOUND; | ||
| 573 | bs->cur += len; | 619 | bs->cur += len; |
| 574 | continue; | 620 | continue; |
| 575 | } | 621 | } |
| @@ -583,9 +629,11 @@ static int decode_seq(struct bitstr *bs, const struct field_t *f, | |||
| 583 | if (!((0x80000000 >> opt) & bmp2)) /* Not present */ | 629 | if (!((0x80000000 >> opt) & bmp2)) /* Not present */ |
| 584 | continue; | 630 | continue; |
| 585 | 631 | ||
| 586 | CHECK_BOUND(bs, 2); | 632 | if (nf_h323_error_boundary(bs, 2, 0)) |
| 633 | return H323_ERROR_BOUND; | ||
| 587 | len = get_len(bs); | 634 | len = get_len(bs); |
| 588 | CHECK_BOUND(bs, len); | 635 | if (nf_h323_error_boundary(bs, len, 0)) |
| 636 | return H323_ERROR_BOUND; | ||
| 589 | if (!base || !(son->attr & DECODE)) { | 637 | if (!base || !(son->attr & DECODE)) { |
| 590 | PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", | 638 | PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", |
| 591 | son->name); | 639 | son->name); |
| @@ -623,22 +671,27 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f, | |||
| 623 | switch (f->sz) { | 671 | switch (f->sz) { |
| 624 | case BYTE: | 672 | case BYTE: |
| 625 | BYTE_ALIGN(bs); | 673 | BYTE_ALIGN(bs); |
| 626 | CHECK_BOUND(bs, 1); | 674 | if (nf_h323_error_boundary(bs, 1, 0)) |
| 675 | return H323_ERROR_BOUND; | ||
| 627 | count = *bs->cur++; | 676 | count = *bs->cur++; |
| 628 | break; | 677 | break; |
| 629 | case WORD: | 678 | case WORD: |
| 630 | BYTE_ALIGN(bs); | 679 | BYTE_ALIGN(bs); |
| 631 | CHECK_BOUND(bs, 2); | 680 | if (nf_h323_error_boundary(bs, 2, 0)) |
| 681 | return H323_ERROR_BOUND; | ||
| 632 | count = *bs->cur++; | 682 | count = *bs->cur++; |
| 633 | count <<= 8; | 683 | count <<= 8; |
| 634 | count += *bs->cur++; | 684 | count += *bs->cur++; |
| 635 | break; | 685 | break; |
| 636 | case SEMI: | 686 | case SEMI: |
| 637 | BYTE_ALIGN(bs); | 687 | BYTE_ALIGN(bs); |
| 638 | CHECK_BOUND(bs, 2); | 688 | if (nf_h323_error_boundary(bs, 2, 0)) |
| 689 | return H323_ERROR_BOUND; | ||
| 639 | count = get_len(bs); | 690 | count = get_len(bs); |
| 640 | break; | 691 | break; |
| 641 | default: | 692 | default: |
| 693 | if (nf_h323_error_boundary(bs, 0, f->sz)) | ||
| 694 | return H323_ERROR_BOUND; | ||
| 642 | count = get_bits(bs, f->sz); | 695 | count = get_bits(bs, f->sz); |
| 643 | break; | 696 | break; |
| 644 | } | 697 | } |
| @@ -658,8 +711,11 @@ static int decode_seqof(struct bitstr *bs, const struct field_t *f, | |||
| 658 | for (i = 0; i < count; i++) { | 711 | for (i = 0; i < count; i++) { |
| 659 | if (son->attr & OPEN) { | 712 | if (son->attr & OPEN) { |
| 660 | BYTE_ALIGN(bs); | 713 | BYTE_ALIGN(bs); |
| 714 | if (nf_h323_error_boundary(bs, 2, 0)) | ||
| 715 | return H323_ERROR_BOUND; | ||
| 661 | len = get_len(bs); | 716 | len = get_len(bs); |
| 662 | CHECK_BOUND(bs, len); | 717 | if (nf_h323_error_boundary(bs, len, 0)) |
| 718 | return H323_ERROR_BOUND; | ||
| 663 | if (!base || !(son->attr & DECODE)) { | 719 | if (!base || !(son->attr & DECODE)) { |
| 664 | PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, | 720 | PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, |
| 665 | " ", son->name); | 721 | " ", son->name); |
| @@ -710,11 +766,17 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f, | |||
| 710 | base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; | 766 | base = (base && (f->attr & DECODE)) ? base + f->offset : NULL; |
| 711 | 767 | ||
| 712 | /* Decode the choice index number */ | 768 | /* Decode the choice index number */ |
| 769 | if (nf_h323_error_boundary(bs, 0, 1)) | ||
| 770 | return H323_ERROR_BOUND; | ||
| 713 | if ((f->attr & EXT) && get_bit(bs)) { | 771 | if ((f->attr & EXT) && get_bit(bs)) { |
| 714 | ext = 1; | 772 | ext = 1; |
| 773 | if (nf_h323_error_boundary(bs, 0, 7)) | ||
| 774 | return H323_ERROR_BOUND; | ||
| 715 | type = get_bits(bs, 7) + f->lb; | 775 | type = get_bits(bs, 7) + f->lb; |
| 716 | } else { | 776 | } else { |
| 717 | ext = 0; | 777 | ext = 0; |
| 778 | if (nf_h323_error_boundary(bs, 0, f->sz)) | ||
| 779 | return H323_ERROR_BOUND; | ||
| 718 | type = get_bits(bs, f->sz); | 780 | type = get_bits(bs, f->sz); |
| 719 | if (type >= f->lb) | 781 | if (type >= f->lb) |
| 720 | return H323_ERROR_RANGE; | 782 | return H323_ERROR_RANGE; |
| @@ -727,8 +789,11 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f, | |||
| 727 | /* Check Range */ | 789 | /* Check Range */ |
| 728 | if (type >= f->ub) { /* Newer version? */ | 790 | if (type >= f->ub) { /* Newer version? */ |
| 729 | BYTE_ALIGN(bs); | 791 | BYTE_ALIGN(bs); |
| 792 | if (nf_h323_error_boundary(bs, 2, 0)) | ||
| 793 | return H323_ERROR_BOUND; | ||
| 730 | len = get_len(bs); | 794 | len = get_len(bs); |
| 731 | CHECK_BOUND(bs, len); | 795 | if (nf_h323_error_boundary(bs, len, 0)) |
| 796 | return H323_ERROR_BOUND; | ||
| 732 | bs->cur += len; | 797 | bs->cur += len; |
| 733 | return H323_ERROR_NONE; | 798 | return H323_ERROR_NONE; |
| 734 | } | 799 | } |
| @@ -742,8 +807,11 @@ static int decode_choice(struct bitstr *bs, const struct field_t *f, | |||
| 742 | 807 | ||
| 743 | if (ext || (son->attr & OPEN)) { | 808 | if (ext || (son->attr & OPEN)) { |
| 744 | BYTE_ALIGN(bs); | 809 | BYTE_ALIGN(bs); |
| 810 | if (nf_h323_error_boundary(bs, len, 0)) | ||
| 811 | return H323_ERROR_BOUND; | ||
| 745 | len = get_len(bs); | 812 | len = get_len(bs); |
| 746 | CHECK_BOUND(bs, len); | 813 | if (nf_h323_error_boundary(bs, len, 0)) |
| 814 | return H323_ERROR_BOUND; | ||
| 747 | if (!base || !(son->attr & DECODE)) { | 815 | if (!base || !(son->attr & DECODE)) { |
| 748 | PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", | 816 | PRINT("%*.s%s\n", (level + 1) * TAB_SIZE, " ", |
| 749 | son->name); | 817 | son->name); |
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 59c08997bfdf..382d49792f42 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c | |||
| @@ -45,7 +45,6 @@ | |||
| 45 | #include <net/netfilter/nf_conntrack_zones.h> | 45 | #include <net/netfilter/nf_conntrack_zones.h> |
| 46 | #include <net/netfilter/nf_conntrack_timestamp.h> | 46 | #include <net/netfilter/nf_conntrack_timestamp.h> |
| 47 | #include <net/netfilter/nf_conntrack_labels.h> | 47 | #include <net/netfilter/nf_conntrack_labels.h> |
| 48 | #include <net/netfilter/nf_conntrack_seqadj.h> | ||
| 49 | #include <net/netfilter/nf_conntrack_synproxy.h> | 48 | #include <net/netfilter/nf_conntrack_synproxy.h> |
| 50 | #ifdef CONFIG_NF_NAT_NEEDED | 49 | #ifdef CONFIG_NF_NAT_NEEDED |
| 51 | #include <net/netfilter/nf_nat_core.h> | 50 | #include <net/netfilter/nf_nat_core.h> |
| @@ -1566,9 +1565,11 @@ static int ctnetlink_change_helper(struct nf_conn *ct, | |||
| 1566 | static int ctnetlink_change_timeout(struct nf_conn *ct, | 1565 | static int ctnetlink_change_timeout(struct nf_conn *ct, |
| 1567 | const struct nlattr * const cda[]) | 1566 | const struct nlattr * const cda[]) |
| 1568 | { | 1567 | { |
| 1569 | u_int32_t timeout = ntohl(nla_get_be32(cda[CTA_TIMEOUT])); | 1568 | u64 timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ; |
| 1570 | 1569 | ||
| 1571 | ct->timeout = nfct_time_stamp + timeout * HZ; | 1570 | if (timeout > INT_MAX) |
| 1571 | timeout = INT_MAX; | ||
| 1572 | ct->timeout = nfct_time_stamp + (u32)timeout; | ||
| 1572 | 1573 | ||
| 1573 | if (test_bit(IPS_DYING_BIT, &ct->status)) | 1574 | if (test_bit(IPS_DYING_BIT, &ct->status)) |
| 1574 | return -ETIME; | 1575 | return -ETIME; |
| @@ -1768,6 +1769,7 @@ ctnetlink_create_conntrack(struct net *net, | |||
| 1768 | int err = -EINVAL; | 1769 | int err = -EINVAL; |
| 1769 | struct nf_conntrack_helper *helper; | 1770 | struct nf_conntrack_helper *helper; |
| 1770 | struct nf_conn_tstamp *tstamp; | 1771 | struct nf_conn_tstamp *tstamp; |
| 1772 | u64 timeout; | ||
| 1771 | 1773 | ||
| 1772 | ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); | 1774 | ct = nf_conntrack_alloc(net, zone, otuple, rtuple, GFP_ATOMIC); |
| 1773 | if (IS_ERR(ct)) | 1775 | if (IS_ERR(ct)) |
| @@ -1776,7 +1778,10 @@ ctnetlink_create_conntrack(struct net *net, | |||
| 1776 | if (!cda[CTA_TIMEOUT]) | 1778 | if (!cda[CTA_TIMEOUT]) |
| 1777 | goto err1; | 1779 | goto err1; |
| 1778 | 1780 | ||
| 1779 | ct->timeout = nfct_time_stamp + ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ; | 1781 | timeout = (u64)ntohl(nla_get_be32(cda[CTA_TIMEOUT])) * HZ; |
| 1782 | if (timeout > INT_MAX) | ||
| 1783 | timeout = INT_MAX; | ||
| 1784 | ct->timeout = (u32)timeout + nfct_time_stamp; | ||
| 1780 | 1785 | ||
| 1781 | rcu_read_lock(); | 1786 | rcu_read_lock(); |
| 1782 | if (cda[CTA_HELP]) { | 1787 | if (cda[CTA_HELP]) { |
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c index b12fc07111d0..37ef35b861f2 100644 --- a/net/netfilter/nf_conntrack_proto_tcp.c +++ b/net/netfilter/nf_conntrack_proto_tcp.c | |||
| @@ -1039,6 +1039,9 @@ static int tcp_packet(struct nf_conn *ct, | |||
| 1039 | IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED && | 1039 | IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED && |
| 1040 | timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK]) | 1040 | timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK]) |
| 1041 | timeout = timeouts[TCP_CONNTRACK_UNACK]; | 1041 | timeout = timeouts[TCP_CONNTRACK_UNACK]; |
| 1042 | else if (ct->proto.tcp.last_win == 0 && | ||
| 1043 | timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS]) | ||
| 1044 | timeout = timeouts[TCP_CONNTRACK_RETRANS]; | ||
| 1042 | else | 1045 | else |
| 1043 | timeout = timeouts[new_state]; | 1046 | timeout = timeouts[new_state]; |
| 1044 | spin_unlock_bh(&ct->lock); | 1047 | spin_unlock_bh(&ct->lock); |
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index d8327b43e4dc..10798b357481 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c | |||
| @@ -5847,6 +5847,12 @@ static int __net_init nf_tables_init_net(struct net *net) | |||
| 5847 | return 0; | 5847 | return 0; |
| 5848 | } | 5848 | } |
| 5849 | 5849 | ||
| 5850 | static void __net_exit nf_tables_exit_net(struct net *net) | ||
| 5851 | { | ||
| 5852 | WARN_ON_ONCE(!list_empty(&net->nft.af_info)); | ||
| 5853 | WARN_ON_ONCE(!list_empty(&net->nft.commit_list)); | ||
| 5854 | } | ||
| 5855 | |||
| 5850 | int __nft_release_basechain(struct nft_ctx *ctx) | 5856 | int __nft_release_basechain(struct nft_ctx *ctx) |
| 5851 | { | 5857 | { |
| 5852 | struct nft_rule *rule, *nr; | 5858 | struct nft_rule *rule, *nr; |
| @@ -5917,6 +5923,7 @@ static void __nft_release_afinfo(struct net *net, struct nft_af_info *afi) | |||
| 5917 | 5923 | ||
| 5918 | static struct pernet_operations nf_tables_net_ops = { | 5924 | static struct pernet_operations nf_tables_net_ops = { |
| 5919 | .init = nf_tables_init_net, | 5925 | .init = nf_tables_init_net, |
| 5926 | .exit = nf_tables_exit_net, | ||
| 5920 | }; | 5927 | }; |
| 5921 | 5928 | ||
| 5922 | static int __init nf_tables_module_init(void) | 5929 | static int __init nf_tables_module_init(void) |
diff --git a/net/netfilter/nfnetlink_cthelper.c b/net/netfilter/nfnetlink_cthelper.c index 41628b393673..d33ce6d5ebce 100644 --- a/net/netfilter/nfnetlink_cthelper.c +++ b/net/netfilter/nfnetlink_cthelper.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
| 18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
| 19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
| 20 | #include <linux/capability.h> | ||
| 20 | #include <net/netlink.h> | 21 | #include <net/netlink.h> |
| 21 | #include <net/sock.h> | 22 | #include <net/sock.h> |
| 22 | 23 | ||
| @@ -407,6 +408,9 @@ static int nfnl_cthelper_new(struct net *net, struct sock *nfnl, | |||
| 407 | struct nfnl_cthelper *nlcth; | 408 | struct nfnl_cthelper *nlcth; |
| 408 | int ret = 0; | 409 | int ret = 0; |
| 409 | 410 | ||
| 411 | if (!capable(CAP_NET_ADMIN)) | ||
| 412 | return -EPERM; | ||
| 413 | |||
| 410 | if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) | 414 | if (!tb[NFCTH_NAME] || !tb[NFCTH_TUPLE]) |
| 411 | return -EINVAL; | 415 | return -EINVAL; |
| 412 | 416 | ||
| @@ -611,6 +615,9 @@ static int nfnl_cthelper_get(struct net *net, struct sock *nfnl, | |||
| 611 | struct nfnl_cthelper *nlcth; | 615 | struct nfnl_cthelper *nlcth; |
| 612 | bool tuple_set = false; | 616 | bool tuple_set = false; |
| 613 | 617 | ||
| 618 | if (!capable(CAP_NET_ADMIN)) | ||
| 619 | return -EPERM; | ||
| 620 | |||
| 614 | if (nlh->nlmsg_flags & NLM_F_DUMP) { | 621 | if (nlh->nlmsg_flags & NLM_F_DUMP) { |
| 615 | struct netlink_dump_control c = { | 622 | struct netlink_dump_control c = { |
| 616 | .dump = nfnl_cthelper_dump_table, | 623 | .dump = nfnl_cthelper_dump_table, |
| @@ -678,6 +685,9 @@ static int nfnl_cthelper_del(struct net *net, struct sock *nfnl, | |||
| 678 | struct nfnl_cthelper *nlcth, *n; | 685 | struct nfnl_cthelper *nlcth, *n; |
| 679 | int j = 0, ret; | 686 | int j = 0, ret; |
| 680 | 687 | ||
| 688 | if (!capable(CAP_NET_ADMIN)) | ||
| 689 | return -EPERM; | ||
| 690 | |||
| 681 | if (tb[NFCTH_NAME]) | 691 | if (tb[NFCTH_NAME]) |
| 682 | helper_name = nla_data(tb[NFCTH_NAME]); | 692 | helper_name = nla_data(tb[NFCTH_NAME]); |
| 683 | 693 | ||
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index e5afab86381c..e955bec0acc6 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
| @@ -1093,10 +1093,15 @@ static int __net_init nfnl_log_net_init(struct net *net) | |||
| 1093 | 1093 | ||
| 1094 | static void __net_exit nfnl_log_net_exit(struct net *net) | 1094 | static void __net_exit nfnl_log_net_exit(struct net *net) |
| 1095 | { | 1095 | { |
| 1096 | struct nfnl_log_net *log = nfnl_log_pernet(net); | ||
| 1097 | unsigned int i; | ||
| 1098 | |||
| 1096 | #ifdef CONFIG_PROC_FS | 1099 | #ifdef CONFIG_PROC_FS |
| 1097 | remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); | 1100 | remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); |
| 1098 | #endif | 1101 | #endif |
| 1099 | nf_log_unset(net, &nfulnl_logger); | 1102 | nf_log_unset(net, &nfulnl_logger); |
| 1103 | for (i = 0; i < INSTANCE_BUCKETS; i++) | ||
| 1104 | WARN_ON_ONCE(!hlist_empty(&log->instance_table[i])); | ||
| 1100 | } | 1105 | } |
| 1101 | 1106 | ||
| 1102 | static struct pernet_operations nfnl_log_net_ops = { | 1107 | static struct pernet_operations nfnl_log_net_ops = { |
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c index a16356cacec3..c09b36755ed7 100644 --- a/net/netfilter/nfnetlink_queue.c +++ b/net/netfilter/nfnetlink_queue.c | |||
| @@ -1512,10 +1512,15 @@ static int __net_init nfnl_queue_net_init(struct net *net) | |||
| 1512 | 1512 | ||
| 1513 | static void __net_exit nfnl_queue_net_exit(struct net *net) | 1513 | static void __net_exit nfnl_queue_net_exit(struct net *net) |
| 1514 | { | 1514 | { |
| 1515 | struct nfnl_queue_net *q = nfnl_queue_pernet(net); | ||
| 1516 | unsigned int i; | ||
| 1517 | |||
| 1515 | nf_unregister_queue_handler(net); | 1518 | nf_unregister_queue_handler(net); |
| 1516 | #ifdef CONFIG_PROC_FS | 1519 | #ifdef CONFIG_PROC_FS |
| 1517 | remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); | 1520 | remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); |
| 1518 | #endif | 1521 | #endif |
| 1522 | for (i = 0; i < INSTANCE_BUCKETS; i++) | ||
| 1523 | WARN_ON_ONCE(!hlist_empty(&q->instance_table[i])); | ||
| 1519 | } | 1524 | } |
| 1520 | 1525 | ||
| 1521 | static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list) | 1526 | static void nfnl_queue_net_exit_batch(struct list_head *net_exit_list) |
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c index a0a93d987a3b..47ec1046ad11 100644 --- a/net/netfilter/nft_exthdr.c +++ b/net/netfilter/nft_exthdr.c | |||
| @@ -214,6 +214,8 @@ static const struct nla_policy nft_exthdr_policy[NFTA_EXTHDR_MAX + 1] = { | |||
| 214 | [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 }, | 214 | [NFTA_EXTHDR_OFFSET] = { .type = NLA_U32 }, |
| 215 | [NFTA_EXTHDR_LEN] = { .type = NLA_U32 }, | 215 | [NFTA_EXTHDR_LEN] = { .type = NLA_U32 }, |
| 216 | [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 }, | 216 | [NFTA_EXTHDR_FLAGS] = { .type = NLA_U32 }, |
| 217 | [NFTA_EXTHDR_OP] = { .type = NLA_U32 }, | ||
| 218 | [NFTA_EXTHDR_SREG] = { .type = NLA_U32 }, | ||
| 217 | }; | 219 | }; |
| 218 | 220 | ||
| 219 | static int nft_exthdr_init(const struct nft_ctx *ctx, | 221 | static int nft_exthdr_init(const struct nft_ctx *ctx, |
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index a77dd514297c..55802e97f906 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c | |||
| @@ -1729,8 +1729,17 @@ static int __net_init xt_net_init(struct net *net) | |||
| 1729 | return 0; | 1729 | return 0; |
| 1730 | } | 1730 | } |
| 1731 | 1731 | ||
| 1732 | static void __net_exit xt_net_exit(struct net *net) | ||
| 1733 | { | ||
| 1734 | int i; | ||
| 1735 | |||
| 1736 | for (i = 0; i < NFPROTO_NUMPROTO; i++) | ||
| 1737 | WARN_ON_ONCE(!list_empty(&net->xt.tables[i])); | ||
| 1738 | } | ||
| 1739 | |||
| 1732 | static struct pernet_operations xt_net_ops = { | 1740 | static struct pernet_operations xt_net_ops = { |
| 1733 | .init = xt_net_init, | 1741 | .init = xt_net_init, |
| 1742 | .exit = xt_net_exit, | ||
| 1734 | }; | 1743 | }; |
| 1735 | 1744 | ||
| 1736 | static int __init xt_init(void) | 1745 | static int __init xt_init(void) |
diff --git a/net/netfilter/xt_bpf.c b/net/netfilter/xt_bpf.c index 041da0d9c06f..1f7fbd3c7e5a 100644 --- a/net/netfilter/xt_bpf.c +++ b/net/netfilter/xt_bpf.c | |||
| @@ -27,6 +27,9 @@ static int __bpf_mt_check_bytecode(struct sock_filter *insns, __u16 len, | |||
| 27 | { | 27 | { |
| 28 | struct sock_fprog_kern program; | 28 | struct sock_fprog_kern program; |
| 29 | 29 | ||
| 30 | if (len > XT_BPF_MAX_NUM_INSTR) | ||
| 31 | return -EINVAL; | ||
| 32 | |||
| 30 | program.len = len; | 33 | program.len = len; |
| 31 | program.filter = insns; | 34 | program.filter = insns; |
| 32 | 35 | ||
| @@ -55,6 +58,9 @@ static int __bpf_mt_check_path(const char *path, struct bpf_prog **ret) | |||
| 55 | mm_segment_t oldfs = get_fs(); | 58 | mm_segment_t oldfs = get_fs(); |
| 56 | int retval, fd; | 59 | int retval, fd; |
| 57 | 60 | ||
| 61 | if (strnlen(path, XT_BPF_PATH_MAX) == XT_BPF_PATH_MAX) | ||
| 62 | return -EINVAL; | ||
| 63 | |||
| 58 | set_fs(KERNEL_DS); | 64 | set_fs(KERNEL_DS); |
| 59 | fd = bpf_obj_get_user(path, 0); | 65 | fd = bpf_obj_get_user(path, 0); |
| 60 | set_fs(oldfs); | 66 | set_fs(oldfs); |
diff --git a/net/netfilter/xt_osf.c b/net/netfilter/xt_osf.c index 36e14b1f061d..a34f314a8c23 100644 --- a/net/netfilter/xt_osf.c +++ b/net/netfilter/xt_osf.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
| 20 | #include <linux/kernel.h> | 20 | #include <linux/kernel.h> |
| 21 | 21 | ||
| 22 | #include <linux/capability.h> | ||
| 22 | #include <linux/if.h> | 23 | #include <linux/if.h> |
| 23 | #include <linux/inetdevice.h> | 24 | #include <linux/inetdevice.h> |
| 24 | #include <linux/ip.h> | 25 | #include <linux/ip.h> |
| @@ -70,6 +71,9 @@ static int xt_osf_add_callback(struct net *net, struct sock *ctnl, | |||
| 70 | struct xt_osf_finger *kf = NULL, *sf; | 71 | struct xt_osf_finger *kf = NULL, *sf; |
| 71 | int err = 0; | 72 | int err = 0; |
| 72 | 73 | ||
| 74 | if (!capable(CAP_NET_ADMIN)) | ||
| 75 | return -EPERM; | ||
| 76 | |||
| 73 | if (!osf_attrs[OSF_ATTR_FINGER]) | 77 | if (!osf_attrs[OSF_ATTR_FINGER]) |
| 74 | return -EINVAL; | 78 | return -EINVAL; |
| 75 | 79 | ||
| @@ -115,6 +119,9 @@ static int xt_osf_remove_callback(struct net *net, struct sock *ctnl, | |||
| 115 | struct xt_osf_finger *sf; | 119 | struct xt_osf_finger *sf; |
| 116 | int err = -ENOENT; | 120 | int err = -ENOENT; |
| 117 | 121 | ||
| 122 | if (!capable(CAP_NET_ADMIN)) | ||
| 123 | return -EPERM; | ||
| 124 | |||
| 118 | if (!osf_attrs[OSF_ATTR_FINGER]) | 125 | if (!osf_attrs[OSF_ATTR_FINGER]) |
| 119 | return -EINVAL; | 126 | return -EINVAL; |
| 120 | 127 | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index b9e0ee4e22f5..79cc1bf36e4a 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -253,6 +253,9 @@ static int __netlink_deliver_tap_skb(struct sk_buff *skb, | |||
| 253 | struct sock *sk = skb->sk; | 253 | struct sock *sk = skb->sk; |
| 254 | int ret = -ENOMEM; | 254 | int ret = -ENOMEM; |
| 255 | 255 | ||
| 256 | if (!net_eq(dev_net(dev), sock_net(sk))) | ||
| 257 | return 0; | ||
| 258 | |||
| 256 | dev_hold(dev); | 259 | dev_hold(dev); |
| 257 | 260 | ||
| 258 | if (is_vmalloc_addr(skb->head)) | 261 | if (is_vmalloc_addr(skb->head)) |
diff --git a/net/sched/act_meta_mark.c b/net/sched/act_meta_mark.c index 1e3f10e5da99..6445184b2759 100644 --- a/net/sched/act_meta_mark.c +++ b/net/sched/act_meta_mark.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | #include <net/pkt_sched.h> | 22 | #include <net/pkt_sched.h> |
| 23 | #include <uapi/linux/tc_act/tc_ife.h> | 23 | #include <uapi/linux/tc_act/tc_ife.h> |
| 24 | #include <net/tc_act/tc_ife.h> | 24 | #include <net/tc_act/tc_ife.h> |
| 25 | #include <linux/rtnetlink.h> | ||
| 26 | 25 | ||
| 27 | static int skbmark_encode(struct sk_buff *skb, void *skbdata, | 26 | static int skbmark_encode(struct sk_buff *skb, void *skbdata, |
| 28 | struct tcf_meta_info *e) | 27 | struct tcf_meta_info *e) |
diff --git a/net/sched/act_meta_skbtcindex.c b/net/sched/act_meta_skbtcindex.c index 2ea1f26c9e96..7221437ca3a6 100644 --- a/net/sched/act_meta_skbtcindex.c +++ b/net/sched/act_meta_skbtcindex.c | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | #include <net/pkt_sched.h> | 22 | #include <net/pkt_sched.h> |
| 23 | #include <uapi/linux/tc_act/tc_ife.h> | 23 | #include <uapi/linux/tc_act/tc_ife.h> |
| 24 | #include <net/tc_act/tc_ife.h> | 24 | #include <net/tc_act/tc_ife.h> |
| 25 | #include <linux/rtnetlink.h> | ||
| 26 | 25 | ||
| 27 | static int skbtcindex_encode(struct sk_buff *skb, void *skbdata, | 26 | static int skbtcindex_encode(struct sk_buff *skb, void *skbdata, |
| 28 | struct tcf_meta_info *e) | 27 | struct tcf_meta_info *e) |
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index ddcf04b4ab43..b91ea03e3afa 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c | |||
| @@ -23,7 +23,6 @@ | |||
| 23 | #include <linux/skbuff.h> | 23 | #include <linux/skbuff.h> |
| 24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
| 25 | #include <linux/kmod.h> | 25 | #include <linux/kmod.h> |
| 26 | #include <linux/err.h> | ||
| 27 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
| 28 | #include <net/net_namespace.h> | 27 | #include <net/net_namespace.h> |
| 29 | #include <net/sock.h> | 28 | #include <net/sock.h> |
| @@ -352,6 +351,8 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, | |||
| 352 | { | 351 | { |
| 353 | struct tcf_chain *chain; | 352 | struct tcf_chain *chain; |
| 354 | 353 | ||
| 354 | if (!block) | ||
| 355 | return; | ||
| 355 | /* Hold a refcnt for all chains, except 0, so that they don't disappear | 356 | /* Hold a refcnt for all chains, except 0, so that they don't disappear |
| 356 | * while we are iterating. | 357 | * while we are iterating. |
| 357 | */ | 358 | */ |
| @@ -378,8 +379,6 @@ void tcf_block_put(struct tcf_block *block) | |||
| 378 | { | 379 | { |
| 379 | struct tcf_block_ext_info ei = {0, }; | 380 | struct tcf_block_ext_info ei = {0, }; |
| 380 | 381 | ||
| 381 | if (!block) | ||
| 382 | return; | ||
| 383 | tcf_block_put_ext(block, block->q, &ei); | 382 | tcf_block_put_ext(block, block->q, &ei); |
| 384 | } | 383 | } |
| 385 | 384 | ||
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index ac152b4f4247..507859cdd1cb 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c | |||
| @@ -45,7 +45,6 @@ | |||
| 45 | #include <net/netlink.h> | 45 | #include <net/netlink.h> |
| 46 | #include <net/act_api.h> | 46 | #include <net/act_api.h> |
| 47 | #include <net/pkt_cls.h> | 47 | #include <net/pkt_cls.h> |
| 48 | #include <linux/netdevice.h> | ||
| 49 | #include <linux/idr.h> | 48 | #include <linux/idr.h> |
| 50 | 49 | ||
| 51 | struct tc_u_knode { | 50 | struct tc_u_knode { |
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index b6c4f536876b..0f1eab99ff4e 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c | |||
| @@ -795,6 +795,8 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid, | |||
| 795 | tcm->tcm_info = refcount_read(&q->refcnt); | 795 | tcm->tcm_info = refcount_read(&q->refcnt); |
| 796 | if (nla_put_string(skb, TCA_KIND, q->ops->id)) | 796 | if (nla_put_string(skb, TCA_KIND, q->ops->id)) |
| 797 | goto nla_put_failure; | 797 | goto nla_put_failure; |
| 798 | if (nla_put_u8(skb, TCA_HW_OFFLOAD, !!(q->flags & TCQ_F_OFFLOADED))) | ||
| 799 | goto nla_put_failure; | ||
| 798 | if (q->ops->dump && q->ops->dump(q, skb) < 0) | 800 | if (q->ops->dump && q->ops->dump(q, skb) < 0) |
| 799 | goto nla_put_failure; | 801 | goto nla_put_failure; |
| 800 | qlen = q->q.qlen; | 802 | qlen = q->q.qlen; |
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c index 5ecc38f35d47..fc1286f499c1 100644 --- a/net/sched/sch_ingress.c +++ b/net/sched/sch_ingress.c | |||
| @@ -68,6 +68,8 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt) | |||
| 68 | struct net_device *dev = qdisc_dev(sch); | 68 | struct net_device *dev = qdisc_dev(sch); |
| 69 | int err; | 69 | int err; |
| 70 | 70 | ||
| 71 | net_inc_ingress_queue(); | ||
| 72 | |||
| 71 | mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress); | 73 | mini_qdisc_pair_init(&q->miniqp, sch, &dev->miniq_ingress); |
| 72 | 74 | ||
| 73 | q->block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS; | 75 | q->block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS; |
| @@ -78,7 +80,6 @@ static int ingress_init(struct Qdisc *sch, struct nlattr *opt) | |||
| 78 | if (err) | 80 | if (err) |
| 79 | return err; | 81 | return err; |
| 80 | 82 | ||
| 81 | net_inc_ingress_queue(); | ||
| 82 | sch->flags |= TCQ_F_CPUSTATS; | 83 | sch->flags |= TCQ_F_CPUSTATS; |
| 83 | 84 | ||
| 84 | return 0; | 85 | return 0; |
| @@ -172,6 +173,9 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt) | |||
| 172 | struct net_device *dev = qdisc_dev(sch); | 173 | struct net_device *dev = qdisc_dev(sch); |
| 173 | int err; | 174 | int err; |
| 174 | 175 | ||
| 176 | net_inc_ingress_queue(); | ||
| 177 | net_inc_egress_queue(); | ||
| 178 | |||
| 175 | mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress); | 179 | mini_qdisc_pair_init(&q->miniqp_ingress, sch, &dev->miniq_ingress); |
| 176 | 180 | ||
| 177 | q->ingress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS; | 181 | q->ingress_block_info.binder_type = TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS; |
| @@ -190,18 +194,11 @@ static int clsact_init(struct Qdisc *sch, struct nlattr *opt) | |||
| 190 | 194 | ||
| 191 | err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info); | 195 | err = tcf_block_get_ext(&q->egress_block, sch, &q->egress_block_info); |
| 192 | if (err) | 196 | if (err) |
| 193 | goto err_egress_block_get; | 197 | return err; |
| 194 | |||
| 195 | net_inc_ingress_queue(); | ||
| 196 | net_inc_egress_queue(); | ||
| 197 | 198 | ||
| 198 | sch->flags |= TCQ_F_CPUSTATS; | 199 | sch->flags |= TCQ_F_CPUSTATS; |
| 199 | 200 | ||
| 200 | return 0; | 201 | return 0; |
| 201 | |||
| 202 | err_egress_block_get: | ||
| 203 | tcf_block_put_ext(q->ingress_block, sch, &q->ingress_block_info); | ||
| 204 | return err; | ||
| 205 | } | 202 | } |
| 206 | 203 | ||
| 207 | static void clsact_destroy(struct Qdisc *sch) | 204 | static void clsact_destroy(struct Qdisc *sch) |
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c index 9d874e60e032..f0747eb87dc4 100644 --- a/net/sched/sch_red.c +++ b/net/sched/sch_red.c | |||
| @@ -157,6 +157,7 @@ static int red_offload(struct Qdisc *sch, bool enable) | |||
| 157 | .handle = sch->handle, | 157 | .handle = sch->handle, |
| 158 | .parent = sch->parent, | 158 | .parent = sch->parent, |
| 159 | }; | 159 | }; |
| 160 | int err; | ||
| 160 | 161 | ||
| 161 | if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) | 162 | if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) |
| 162 | return -EOPNOTSUPP; | 163 | return -EOPNOTSUPP; |
| @@ -171,7 +172,14 @@ static int red_offload(struct Qdisc *sch, bool enable) | |||
| 171 | opt.command = TC_RED_DESTROY; | 172 | opt.command = TC_RED_DESTROY; |
| 172 | } | 173 | } |
| 173 | 174 | ||
| 174 | return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt); | 175 | err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt); |
| 176 | |||
| 177 | if (!err && enable) | ||
| 178 | sch->flags |= TCQ_F_OFFLOADED; | ||
| 179 | else | ||
| 180 | sch->flags &= ~TCQ_F_OFFLOADED; | ||
| 181 | |||
| 182 | return err; | ||
| 175 | } | 183 | } |
| 176 | 184 | ||
| 177 | static void red_destroy(struct Qdisc *sch) | 185 | static void red_destroy(struct Qdisc *sch) |
| @@ -274,7 +282,7 @@ static int red_init(struct Qdisc *sch, struct nlattr *opt) | |||
| 274 | return red_change(sch, opt); | 282 | return red_change(sch, opt); |
| 275 | } | 283 | } |
| 276 | 284 | ||
| 277 | static int red_dump_offload(struct Qdisc *sch, struct tc_red_qopt *opt) | 285 | static int red_dump_offload_stats(struct Qdisc *sch, struct tc_red_qopt *opt) |
| 278 | { | 286 | { |
| 279 | struct net_device *dev = qdisc_dev(sch); | 287 | struct net_device *dev = qdisc_dev(sch); |
| 280 | struct tc_red_qopt_offload hw_stats = { | 288 | struct tc_red_qopt_offload hw_stats = { |
| @@ -286,21 +294,12 @@ static int red_dump_offload(struct Qdisc *sch, struct tc_red_qopt *opt) | |||
| 286 | .stats.qstats = &sch->qstats, | 294 | .stats.qstats = &sch->qstats, |
| 287 | }, | 295 | }, |
| 288 | }; | 296 | }; |
| 289 | int err; | ||
| 290 | 297 | ||
| 291 | opt->flags &= ~TC_RED_OFFLOADED; | 298 | if (!(sch->flags & TCQ_F_OFFLOADED)) |
| 292 | if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) | ||
| 293 | return 0; | ||
| 294 | |||
| 295 | err = dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, | ||
| 296 | &hw_stats); | ||
| 297 | if (err == -EOPNOTSUPP) | ||
| 298 | return 0; | 299 | return 0; |
| 299 | 300 | ||
| 300 | if (!err) | 301 | return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, |
| 301 | opt->flags |= TC_RED_OFFLOADED; | 302 | &hw_stats); |
| 302 | |||
| 303 | return err; | ||
| 304 | } | 303 | } |
| 305 | 304 | ||
| 306 | static int red_dump(struct Qdisc *sch, struct sk_buff *skb) | 305 | static int red_dump(struct Qdisc *sch, struct sk_buff *skb) |
| @@ -319,7 +318,7 @@ static int red_dump(struct Qdisc *sch, struct sk_buff *skb) | |||
| 319 | int err; | 318 | int err; |
| 320 | 319 | ||
| 321 | sch->qstats.backlog = q->qdisc->qstats.backlog; | 320 | sch->qstats.backlog = q->qdisc->qstats.backlog; |
| 322 | err = red_dump_offload(sch, &opt); | 321 | err = red_dump_offload_stats(sch, &opt); |
| 323 | if (err) | 322 | if (err) |
| 324 | goto nla_put_failure; | 323 | goto nla_put_failure; |
| 325 | 324 | ||
| @@ -347,7 +346,7 @@ static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) | |||
| 347 | .marked = q->stats.prob_mark + q->stats.forced_mark, | 346 | .marked = q->stats.prob_mark + q->stats.forced_mark, |
| 348 | }; | 347 | }; |
| 349 | 348 | ||
| 350 | if (tc_can_offload(dev) && dev->netdev_ops->ndo_setup_tc) { | 349 | if (sch->flags & TCQ_F_OFFLOADED) { |
| 351 | struct red_stats hw_stats = {0}; | 350 | struct red_stats hw_stats = {0}; |
| 352 | struct tc_red_qopt_offload hw_stats_request = { | 351 | struct tc_red_qopt_offload hw_stats_request = { |
| 353 | .command = TC_RED_XSTATS, | 352 | .command = TC_RED_XSTATS, |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index eb17a911aa29..3253f724a995 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
| @@ -3891,13 +3891,17 @@ static int sctp_setsockopt_reset_streams(struct sock *sk, | |||
| 3891 | struct sctp_association *asoc; | 3891 | struct sctp_association *asoc; |
| 3892 | int retval = -EINVAL; | 3892 | int retval = -EINVAL; |
| 3893 | 3893 | ||
| 3894 | if (optlen < sizeof(struct sctp_reset_streams)) | 3894 | if (optlen < sizeof(*params)) |
| 3895 | return -EINVAL; | 3895 | return -EINVAL; |
| 3896 | 3896 | ||
| 3897 | params = memdup_user(optval, optlen); | 3897 | params = memdup_user(optval, optlen); |
| 3898 | if (IS_ERR(params)) | 3898 | if (IS_ERR(params)) |
| 3899 | return PTR_ERR(params); | 3899 | return PTR_ERR(params); |
| 3900 | 3900 | ||
| 3901 | if (params->srs_number_streams * sizeof(__u16) > | ||
| 3902 | optlen - sizeof(*params)) | ||
| 3903 | goto out; | ||
| 3904 | |||
| 3901 | asoc = sctp_id2assoc(sk, params->srs_assoc_id); | 3905 | asoc = sctp_id2assoc(sk, params->srs_assoc_id); |
| 3902 | if (!asoc) | 3906 | if (!asoc) |
| 3903 | goto out; | 3907 | goto out; |
diff --git a/net/sunrpc/auth_gss/gss_rpc_xdr.c b/net/sunrpc/auth_gss/gss_rpc_xdr.c index c4778cae58ef..444380f968f1 100644 --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c | |||
| @@ -231,6 +231,7 @@ static int gssx_dec_linux_creds(struct xdr_stream *xdr, | |||
| 231 | goto out_free_groups; | 231 | goto out_free_groups; |
| 232 | creds->cr_group_info->gid[i] = kgid; | 232 | creds->cr_group_info->gid[i] = kgid; |
| 233 | } | 233 | } |
| 234 | groups_sort(creds->cr_group_info); | ||
| 234 | 235 | ||
| 235 | return 0; | 236 | return 0; |
| 236 | out_free_groups: | 237 | out_free_groups: |
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 5dd4e6c9fef2..26531193fce4 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c | |||
| @@ -481,6 +481,7 @@ static int rsc_parse(struct cache_detail *cd, | |||
| 481 | goto out; | 481 | goto out; |
| 482 | rsci.cred.cr_group_info->gid[i] = kgid; | 482 | rsci.cred.cr_group_info->gid[i] = kgid; |
| 483 | } | 483 | } |
| 484 | groups_sort(rsci.cred.cr_group_info); | ||
| 484 | 485 | ||
| 485 | /* mech name */ | 486 | /* mech name */ |
| 486 | len = qword_get(&mesg, buf, mlen); | 487 | len = qword_get(&mesg, buf, mlen); |
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 740b67d5a733..af7f28fb8102 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c | |||
| @@ -520,6 +520,7 @@ static int unix_gid_parse(struct cache_detail *cd, | |||
| 520 | ug.gi->gid[i] = kgid; | 520 | ug.gi->gid[i] = kgid; |
| 521 | } | 521 | } |
| 522 | 522 | ||
| 523 | groups_sort(ug.gi); | ||
| 523 | ugp = unix_gid_lookup(cd, uid); | 524 | ugp = unix_gid_lookup(cd, uid); |
| 524 | if (ugp) { | 525 | if (ugp) { |
| 525 | struct cache_head *ch; | 526 | struct cache_head *ch; |
| @@ -819,6 +820,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp) | |||
| 819 | kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv)); | 820 | kgid_t kgid = make_kgid(&init_user_ns, svc_getnl(argv)); |
| 820 | cred->cr_group_info->gid[i] = kgid; | 821 | cred->cr_group_info->gid[i] = kgid; |
| 821 | } | 822 | } |
| 823 | groups_sort(cred->cr_group_info); | ||
| 822 | if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { | 824 | if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) { |
| 823 | *authp = rpc_autherr_badverf; | 825 | *authp = rpc_autherr_badverf; |
| 824 | return SVC_DENIED; | 826 | return SVC_DENIED; |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 333b9d697ae5..33b74fd84051 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
| @@ -1001,6 +1001,7 @@ void xprt_transmit(struct rpc_task *task) | |||
| 1001 | { | 1001 | { |
| 1002 | struct rpc_rqst *req = task->tk_rqstp; | 1002 | struct rpc_rqst *req = task->tk_rqstp; |
| 1003 | struct rpc_xprt *xprt = req->rq_xprt; | 1003 | struct rpc_xprt *xprt = req->rq_xprt; |
| 1004 | unsigned int connect_cookie; | ||
| 1004 | int status, numreqs; | 1005 | int status, numreqs; |
| 1005 | 1006 | ||
| 1006 | dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); | 1007 | dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); |
| @@ -1024,6 +1025,7 @@ void xprt_transmit(struct rpc_task *task) | |||
| 1024 | } else if (!req->rq_bytes_sent) | 1025 | } else if (!req->rq_bytes_sent) |
| 1025 | return; | 1026 | return; |
| 1026 | 1027 | ||
| 1028 | connect_cookie = xprt->connect_cookie; | ||
| 1027 | req->rq_xtime = ktime_get(); | 1029 | req->rq_xtime = ktime_get(); |
| 1028 | status = xprt->ops->send_request(task); | 1030 | status = xprt->ops->send_request(task); |
| 1029 | trace_xprt_transmit(xprt, req->rq_xid, status); | 1031 | trace_xprt_transmit(xprt, req->rq_xid, status); |
| @@ -1047,20 +1049,28 @@ void xprt_transmit(struct rpc_task *task) | |||
| 1047 | xprt->stat.bklog_u += xprt->backlog.qlen; | 1049 | xprt->stat.bklog_u += xprt->backlog.qlen; |
| 1048 | xprt->stat.sending_u += xprt->sending.qlen; | 1050 | xprt->stat.sending_u += xprt->sending.qlen; |
| 1049 | xprt->stat.pending_u += xprt->pending.qlen; | 1051 | xprt->stat.pending_u += xprt->pending.qlen; |
| 1052 | spin_unlock_bh(&xprt->transport_lock); | ||
| 1050 | 1053 | ||
| 1051 | /* Don't race with disconnect */ | 1054 | req->rq_connect_cookie = connect_cookie; |
| 1052 | if (!xprt_connected(xprt)) | 1055 | if (rpc_reply_expected(task) && !READ_ONCE(req->rq_reply_bytes_recvd)) { |
| 1053 | task->tk_status = -ENOTCONN; | ||
| 1054 | else { | ||
| 1055 | /* | 1056 | /* |
| 1056 | * Sleep on the pending queue since | 1057 | * Sleep on the pending queue if we're expecting a reply. |
| 1057 | * we're expecting a reply. | 1058 | * The spinlock ensures atomicity between the test of |
| 1059 | * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on(). | ||
| 1058 | */ | 1060 | */ |
| 1059 | if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) | 1061 | spin_lock(&xprt->recv_lock); |
| 1062 | if (!req->rq_reply_bytes_recvd) { | ||
| 1060 | rpc_sleep_on(&xprt->pending, task, xprt_timer); | 1063 | rpc_sleep_on(&xprt->pending, task, xprt_timer); |
| 1061 | req->rq_connect_cookie = xprt->connect_cookie; | 1064 | /* |
| 1065 | * Send an extra queue wakeup call if the | ||
| 1066 | * connection was dropped in case the call to | ||
| 1067 | * rpc_sleep_on() raced. | ||
| 1068 | */ | ||
| 1069 | if (!xprt_connected(xprt)) | ||
| 1070 | xprt_wake_pending_tasks(xprt, -ENOTCONN); | ||
| 1071 | } | ||
| 1072 | spin_unlock(&xprt->recv_lock); | ||
| 1062 | } | 1073 | } |
| 1063 | spin_unlock_bh(&xprt->transport_lock); | ||
| 1064 | } | 1074 | } |
| 1065 | 1075 | ||
| 1066 | static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) | 1076 | static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task) |
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c index ed34dc0f144c..a3f2ab283aeb 100644 --- a/net/sunrpc/xprtrdma/rpc_rdma.c +++ b/net/sunrpc/xprtrdma/rpc_rdma.c | |||
| @@ -1408,11 +1408,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) | |||
| 1408 | dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n", | 1408 | dprintk("RPC: %s: reply %p completes request %p (xid 0x%08x)\n", |
| 1409 | __func__, rep, req, be32_to_cpu(rep->rr_xid)); | 1409 | __func__, rep, req, be32_to_cpu(rep->rr_xid)); |
| 1410 | 1410 | ||
| 1411 | if (list_empty(&req->rl_registered) && | 1411 | queue_work_on(req->rl_cpu, rpcrdma_receive_wq, &rep->rr_work); |
| 1412 | !test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) | ||
| 1413 | rpcrdma_complete_rqst(rep); | ||
| 1414 | else | ||
| 1415 | queue_work(rpcrdma_receive_wq, &rep->rr_work); | ||
| 1416 | return; | 1412 | return; |
| 1417 | 1413 | ||
| 1418 | out_badstatus: | 1414 | out_badstatus: |
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 646c24494ea7..6ee1ad8978f3 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c | |||
| @@ -52,6 +52,7 @@ | |||
| 52 | #include <linux/slab.h> | 52 | #include <linux/slab.h> |
| 53 | #include <linux/seq_file.h> | 53 | #include <linux/seq_file.h> |
| 54 | #include <linux/sunrpc/addr.h> | 54 | #include <linux/sunrpc/addr.h> |
| 55 | #include <linux/smp.h> | ||
| 55 | 56 | ||
| 56 | #include "xprt_rdma.h" | 57 | #include "xprt_rdma.h" |
| 57 | 58 | ||
| @@ -656,6 +657,7 @@ xprt_rdma_allocate(struct rpc_task *task) | |||
| 656 | task->tk_pid, __func__, rqst->rq_callsize, | 657 | task->tk_pid, __func__, rqst->rq_callsize, |
| 657 | rqst->rq_rcvsize, req); | 658 | rqst->rq_rcvsize, req); |
| 658 | 659 | ||
| 660 | req->rl_cpu = smp_processor_id(); | ||
| 659 | req->rl_connect_cookie = 0; /* our reserved value */ | 661 | req->rl_connect_cookie = 0; /* our reserved value */ |
| 660 | rpcrdma_set_xprtdata(rqst, req); | 662 | rpcrdma_set_xprtdata(rqst, req); |
| 661 | rqst->rq_buffer = req->rl_sendbuf->rg_base; | 663 | rqst->rq_buffer = req->rl_sendbuf->rg_base; |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 710b3f77db82..8607c029c0dd 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
| @@ -83,7 +83,7 @@ rpcrdma_alloc_wq(void) | |||
| 83 | struct workqueue_struct *recv_wq; | 83 | struct workqueue_struct *recv_wq; |
| 84 | 84 | ||
| 85 | recv_wq = alloc_workqueue("xprtrdma_receive", | 85 | recv_wq = alloc_workqueue("xprtrdma_receive", |
| 86 | WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_HIGHPRI, | 86 | WQ_MEM_RECLAIM | WQ_HIGHPRI, |
| 87 | 0); | 87 | 0); |
| 88 | if (!recv_wq) | 88 | if (!recv_wq) |
| 89 | return -ENOMEM; | 89 | return -ENOMEM; |
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h index 51686d9eac5f..1342f743f1c4 100644 --- a/net/sunrpc/xprtrdma/xprt_rdma.h +++ b/net/sunrpc/xprtrdma/xprt_rdma.h | |||
| @@ -342,6 +342,7 @@ enum { | |||
| 342 | struct rpcrdma_buffer; | 342 | struct rpcrdma_buffer; |
| 343 | struct rpcrdma_req { | 343 | struct rpcrdma_req { |
| 344 | struct list_head rl_list; | 344 | struct list_head rl_list; |
| 345 | int rl_cpu; | ||
| 345 | unsigned int rl_connect_cookie; | 346 | unsigned int rl_connect_cookie; |
| 346 | struct rpcrdma_buffer *rl_buffer; | 347 | struct rpcrdma_buffer *rl_buffer; |
| 347 | struct rpcrdma_rep *rl_reply; | 348 | struct rpcrdma_rep *rl_reply; |
diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 5d18c0caa92b..41127d0b925e 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c | |||
| @@ -1140,7 +1140,7 @@ void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq, | |||
| 1140 | __skb_dequeue(arrvq); | 1140 | __skb_dequeue(arrvq); |
| 1141 | __skb_queue_tail(inputq, skb); | 1141 | __skb_queue_tail(inputq, skb); |
| 1142 | } | 1142 | } |
| 1143 | refcount_dec(&skb->users); | 1143 | kfree_skb(skb); |
| 1144 | spin_unlock_bh(&inputq->lock); | 1144 | spin_unlock_bh(&inputq->lock); |
| 1145 | continue; | 1145 | continue; |
| 1146 | } | 1146 | } |
diff --git a/net/wireless/Makefile b/net/wireless/Makefile index 278d979c211a..d7d6cb00c47b 100644 --- a/net/wireless/Makefile +++ b/net/wireless/Makefile | |||
| @@ -25,17 +25,45 @@ endif | |||
| 25 | 25 | ||
| 26 | $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.x509) | 26 | $(obj)/shipped-certs.c: $(wildcard $(srctree)/$(src)/certs/*.x509) |
| 27 | @$(kecho) " GEN $@" | 27 | @$(kecho) " GEN $@" |
| 28 | @echo '#include "reg.h"' > $@ | 28 | @(set -e; \ |
| 29 | @echo 'const u8 shipped_regdb_certs[] = {' >> $@ | 29 | allf=""; \ |
| 30 | @for f in $^ ; do hexdump -v -e '1/1 "0x%.2x," "\n"' < $$f >> $@ ; done | 30 | for f in $^ ; do \ |
| 31 | @echo '};' >> $@ | 31 | # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \ |
| 32 | @echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);' >> $@ | 32 | thisf=$$(od -An -v -tx1 < $$f | \ |
| 33 | sed -e 's/ /\n/g' | \ | ||
| 34 | sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \ | ||
| 35 | sed -e 's/^/0x/;s/$$/,/'); \ | ||
| 36 | # file should not be empty - maybe command substitution failed? \ | ||
| 37 | test ! -z "$$thisf";\ | ||
| 38 | allf=$$allf$$thisf;\ | ||
| 39 | done; \ | ||
| 40 | ( \ | ||
| 41 | echo '#include "reg.h"'; \ | ||
| 42 | echo 'const u8 shipped_regdb_certs[] = {'; \ | ||
| 43 | echo "$$allf"; \ | ||
| 44 | echo '};'; \ | ||
| 45 | echo 'unsigned int shipped_regdb_certs_len = sizeof(shipped_regdb_certs);'; \ | ||
| 46 | ) >> $@) | ||
| 33 | 47 | ||
| 34 | $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \ | 48 | $(obj)/extra-certs.c: $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%) \ |
| 35 | $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509) | 49 | $(wildcard $(CONFIG_CFG80211_EXTRA_REGDB_KEYDIR:"%"=%)/*.x509) |
| 36 | @$(kecho) " GEN $@" | 50 | @$(kecho) " GEN $@" |
| 37 | @echo '#include "reg.h"' > $@ | 51 | @(set -e; \ |
| 38 | @echo 'const u8 extra_regdb_certs[] = {' >> $@ | 52 | allf=""; \ |
| 39 | @for f in $^ ; do test -f $$f && hexdump -v -e '1/1 "0x%.2x," "\n"' < $$f >> $@ || true ; done | 53 | for f in $^ ; do \ |
| 40 | @echo '};' >> $@ | 54 | # similar to hexdump -v -e '1/1 "0x%.2x," "\n"' \ |
| 41 | @echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);' >> $@ | 55 | thisf=$$(od -An -v -tx1 < $$f | \ |
| 56 | sed -e 's/ /\n/g' | \ | ||
| 57 | sed -e 's/^[0-9a-f]\+$$/\0/;t;d' | \ | ||
| 58 | sed -e 's/^/0x/;s/$$/,/'); \ | ||
| 59 | # file should not be empty - maybe command substitution failed? \ | ||
| 60 | test ! -z "$$thisf";\ | ||
| 61 | allf=$$allf$$thisf;\ | ||
| 62 | done; \ | ||
| 63 | ( \ | ||
| 64 | echo '#include "reg.h"'; \ | ||
| 65 | echo 'const u8 extra_regdb_certs[] = {'; \ | ||
| 66 | echo "$$allf"; \ | ||
| 67 | echo '};'; \ | ||
| 68 | echo 'unsigned int extra_regdb_certs_len = sizeof(extra_regdb_certs);'; \ | ||
| 69 | ) >> $@) | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 040aa79e1d9d..31031f10fe56 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
| @@ -6233,28 +6233,6 @@ sub process { | |||
| 6233 | } | 6233 | } |
| 6234 | } | 6234 | } |
| 6235 | 6235 | ||
| 6236 | # whine about ACCESS_ONCE | ||
| 6237 | if ($^V && $^V ge 5.10.0 && | ||
| 6238 | $line =~ /\bACCESS_ONCE\s*$balanced_parens\s*(=(?!=))?\s*($FuncArg)?/) { | ||
| 6239 | my $par = $1; | ||
| 6240 | my $eq = $2; | ||
| 6241 | my $fun = $3; | ||
| 6242 | $par =~ s/^\(\s*(.*)\s*\)$/$1/; | ||
| 6243 | if (defined($eq)) { | ||
| 6244 | if (WARN("PREFER_WRITE_ONCE", | ||
| 6245 | "Prefer WRITE_ONCE(<FOO>, <BAR>) over ACCESS_ONCE(<FOO>) = <BAR>\n" . $herecurr) && | ||
| 6246 | $fix) { | ||
| 6247 | $fixed[$fixlinenr] =~ s/\bACCESS_ONCE\s*\(\s*\Q$par\E\s*\)\s*$eq\s*\Q$fun\E/WRITE_ONCE($par, $fun)/; | ||
| 6248 | } | ||
| 6249 | } else { | ||
| 6250 | if (WARN("PREFER_READ_ONCE", | ||
| 6251 | "Prefer READ_ONCE(<FOO>) over ACCESS_ONCE(<FOO>)\n" . $herecurr) && | ||
| 6252 | $fix) { | ||
| 6253 | $fixed[$fixlinenr] =~ s/\bACCESS_ONCE\s*\(\s*\Q$par\E\s*\)/READ_ONCE($par)/; | ||
| 6254 | } | ||
| 6255 | } | ||
| 6256 | } | ||
| 6257 | |||
| 6258 | # check for mutex_trylock_recursive usage | 6236 | # check for mutex_trylock_recursive usage |
| 6259 | if ($line =~ /mutex_trylock_recursive/) { | 6237 | if ($line =~ /mutex_trylock_recursive/) { |
| 6260 | ERROR("LOCKING", | 6238 | ERROR("LOCKING", |
diff --git a/scripts/faddr2line b/scripts/faddr2line index 39e07d8574dd..7721d5b2b0c0 100755 --- a/scripts/faddr2line +++ b/scripts/faddr2line | |||
| @@ -44,10 +44,10 @@ | |||
| 44 | set -o errexit | 44 | set -o errexit |
| 45 | set -o nounset | 45 | set -o nounset |
| 46 | 46 | ||
| 47 | READELF="${CROSS_COMPILE}readelf" | 47 | READELF="${CROSS_COMPILE:-}readelf" |
| 48 | ADDR2LINE="${CROSS_COMPILE}addr2line" | 48 | ADDR2LINE="${CROSS_COMPILE:-}addr2line" |
| 49 | SIZE="${CROSS_COMPILE}size" | 49 | SIZE="${CROSS_COMPILE:-}size" |
| 50 | NM="${CROSS_COMPILE}nm" | 50 | NM="${CROSS_COMPILE:-}nm" |
| 51 | 51 | ||
| 52 | command -v awk >/dev/null 2>&1 || die "awk isn't installed" | 52 | command -v awk >/dev/null 2>&1 || die "awk isn't installed" |
| 53 | command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed" | 53 | command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed" |
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index c0b0e9e8aa66..800104c8a3ed 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h | |||
| @@ -266,6 +266,7 @@ | |||
| 266 | /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ | 266 | /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ |
| 267 | #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ | 267 | #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ |
| 268 | #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ | 268 | #define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */ |
| 269 | #define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */ | ||
| 269 | 270 | ||
| 270 | /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ | 271 | /* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */ |
| 271 | #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ | 272 | #define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */ |
diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index 07fd03c74a77..04e32f965ad7 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h | |||
| @@ -84,8 +84,6 @@ | |||
| 84 | 84 | ||
| 85 | #define uninitialized_var(x) x = *(&(x)) | 85 | #define uninitialized_var(x) x = *(&(x)) |
| 86 | 86 | ||
| 87 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | ||
| 88 | |||
| 89 | #include <linux/types.h> | 87 | #include <linux/types.h> |
| 90 | 88 | ||
| 91 | /* | 89 | /* |
| @@ -135,20 +133,19 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
| 135 | /* | 133 | /* |
| 136 | * Prevent the compiler from merging or refetching reads or writes. The | 134 | * Prevent the compiler from merging or refetching reads or writes. The |
| 137 | * compiler is also forbidden from reordering successive instances of | 135 | * compiler is also forbidden from reordering successive instances of |
| 138 | * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the | 136 | * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some |
| 139 | * compiler is aware of some particular ordering. One way to make the | 137 | * particular ordering. One way to make the compiler aware of ordering is to |
| 140 | * compiler aware of ordering is to put the two invocations of READ_ONCE, | 138 | * put the two invocations of READ_ONCE or WRITE_ONCE in different C |
| 141 | * WRITE_ONCE or ACCESS_ONCE() in different C statements. | 139 | * statements. |
| 142 | * | 140 | * |
| 143 | * In contrast to ACCESS_ONCE these two macros will also work on aggregate | 141 | * These two macros will also work on aggregate data types like structs or |
| 144 | * data types like structs or unions. If the size of the accessed data | 142 | * unions. If the size of the accessed data type exceeds the word size of |
| 145 | * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) | 143 | * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will |
| 146 | * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a | 144 | * fall back to memcpy and print a compile-time warning. |
| 147 | * compile-time warning. | ||
| 148 | * | 145 | * |
| 149 | * Their two major use cases are: (1) Mediating communication between | 146 | * Their two major use cases are: (1) Mediating communication between |
| 150 | * process-level code and irq/NMI handlers, all running on the same CPU, | 147 | * process-level code and irq/NMI handlers, all running on the same CPU, |
| 151 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise | 148 | * and (2) Ensuring that the compiler does not fold, spindle, or otherwise |
| 152 | * mutilate accesses that either do not require ordering or that interact | 149 | * mutilate accesses that either do not require ordering or that interact |
| 153 | * with an explicit memory barrier or atomic instruction that provides the | 150 | * with an explicit memory barrier or atomic instruction that provides the |
| 154 | * required ordering. | 151 | * required ordering. |
diff --git a/tools/include/linux/lockdep.h b/tools/include/linux/lockdep.h index 940c1b075659..6b0c36a58fcb 100644 --- a/tools/include/linux/lockdep.h +++ b/tools/include/linux/lockdep.h | |||
| @@ -48,6 +48,7 @@ static inline int debug_locks_off(void) | |||
| 48 | #define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__) | 48 | #define printk(...) dprintf(STDOUT_FILENO, __VA_ARGS__) |
| 49 | #define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) | 49 | #define pr_err(format, ...) fprintf (stderr, format, ## __VA_ARGS__) |
| 50 | #define pr_warn pr_err | 50 | #define pr_warn pr_err |
| 51 | #define pr_cont pr_err | ||
| 51 | 52 | ||
| 52 | #define list_del_rcu list_del | 53 | #define list_del_rcu list_del |
| 53 | 54 | ||
diff --git a/tools/include/uapi/asm/bpf_perf_event.h b/tools/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 000000000000..13a58531e6fa --- /dev/null +++ b/tools/include/uapi/asm/bpf_perf_event.h | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | #if defined(__aarch64__) | ||
| 2 | #include "../../arch/arm64/include/uapi/asm/bpf_perf_event.h" | ||
| 3 | #elif defined(__s390__) | ||
| 4 | #include "../../arch/s390/include/uapi/asm/bpf_perf_event.h" | ||
| 5 | #else | ||
| 6 | #include <uapi/asm-generic/bpf_perf_event.h> | ||
| 7 | #endif | ||
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 282d7613fce8..496e59a2738b 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h | |||
| @@ -630,9 +630,9 @@ struct kvm_s390_irq { | |||
| 630 | 630 | ||
| 631 | struct kvm_s390_irq_state { | 631 | struct kvm_s390_irq_state { |
| 632 | __u64 buf; | 632 | __u64 buf; |
| 633 | __u32 flags; | 633 | __u32 flags; /* will stay unused for compatibility reasons */ |
| 634 | __u32 len; | 634 | __u32 len; |
| 635 | __u32 reserved[4]; | 635 | __u32 reserved[4]; /* will stay unused for compatibility reasons */ |
| 636 | }; | 636 | }; |
| 637 | 637 | ||
| 638 | /* for KVM_SET_GUEST_DEBUG */ | 638 | /* for KVM_SET_GUEST_DEBUG */ |
diff --git a/tools/objtool/arch/x86/lib/x86-opcode-map.txt b/tools/objtool/arch/x86/lib/x86-opcode-map.txt index 12e377184ee4..e0b85930dd77 100644 --- a/tools/objtool/arch/x86/lib/x86-opcode-map.txt +++ b/tools/objtool/arch/x86/lib/x86-opcode-map.txt | |||
| @@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1) | |||
| 607 | fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) | 607 | fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) |
| 608 | fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) | 608 | fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) |
| 609 | fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) | 609 | fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) |
| 610 | ff: | 610 | ff: UD0 |
| 611 | EndTable | 611 | EndTable |
| 612 | 612 | ||
| 613 | Table: 3-byte opcode 1 (0x0f 0x38) | 613 | Table: 3-byte opcode 1 (0x0f 0x38) |
| @@ -717,7 +717,7 @@ AVXcode: 2 | |||
| 717 | 7e: vpermt2d/q Vx,Hx,Wx (66),(ev) | 717 | 7e: vpermt2d/q Vx,Hx,Wx (66),(ev) |
| 718 | 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev) | 718 | 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev) |
| 719 | 80: INVEPT Gy,Mdq (66) | 719 | 80: INVEPT Gy,Mdq (66) |
| 720 | 81: INVPID Gy,Mdq (66) | 720 | 81: INVVPID Gy,Mdq (66) |
| 721 | 82: INVPCID Gy,Mdq (66) | 721 | 82: INVPCID Gy,Mdq (66) |
| 722 | 83: vpmultishiftqb Vx,Hx,Wx (66),(ev) | 722 | 83: vpmultishiftqb Vx,Hx,Wx (66),(ev) |
| 723 | 88: vexpandps/d Vpd,Wpd (66),(ev) | 723 | 88: vexpandps/d Vpd,Wpd (66),(ev) |
| @@ -896,7 +896,7 @@ EndTable | |||
| 896 | 896 | ||
| 897 | GrpTable: Grp3_1 | 897 | GrpTable: Grp3_1 |
| 898 | 0: TEST Eb,Ib | 898 | 0: TEST Eb,Ib |
| 899 | 1: | 899 | 1: TEST Eb,Ib |
| 900 | 2: NOT Eb | 900 | 2: NOT Eb |
| 901 | 3: NEG Eb | 901 | 3: NEG Eb |
| 902 | 4: MUL AL,Eb | 902 | 4: MUL AL,Eb |
| @@ -970,6 +970,15 @@ GrpTable: Grp9 | |||
| 970 | EndTable | 970 | EndTable |
| 971 | 971 | ||
| 972 | GrpTable: Grp10 | 972 | GrpTable: Grp10 |
| 973 | # all are UD1 | ||
| 974 | 0: UD1 | ||
| 975 | 1: UD1 | ||
| 976 | 2: UD1 | ||
| 977 | 3: UD1 | ||
| 978 | 4: UD1 | ||
| 979 | 5: UD1 | ||
| 980 | 6: UD1 | ||
| 981 | 7: UD1 | ||
| 973 | EndTable | 982 | EndTable |
| 974 | 983 | ||
| 975 | # Grp11A and Grp11B are expressed as Grp11 in Intel SDM | 984 | # Grp11A and Grp11B are expressed as Grp11 in Intel SDM |
diff --git a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt index c4d55919fac1..e0b85930dd77 100644 --- a/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt +++ b/tools/perf/util/intel-pt-decoder/x86-opcode-map.txt | |||
| @@ -607,7 +607,7 @@ fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1) | |||
| 607 | fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) | 607 | fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1) |
| 608 | fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) | 608 | fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1) |
| 609 | fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) | 609 | fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1) |
| 610 | ff: | 610 | ff: UD0 |
| 611 | EndTable | 611 | EndTable |
| 612 | 612 | ||
| 613 | Table: 3-byte opcode 1 (0x0f 0x38) | 613 | Table: 3-byte opcode 1 (0x0f 0x38) |
| @@ -717,7 +717,7 @@ AVXcode: 2 | |||
| 717 | 7e: vpermt2d/q Vx,Hx,Wx (66),(ev) | 717 | 7e: vpermt2d/q Vx,Hx,Wx (66),(ev) |
| 718 | 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev) | 718 | 7f: vpermt2ps/d Vx,Hx,Wx (66),(ev) |
| 719 | 80: INVEPT Gy,Mdq (66) | 719 | 80: INVEPT Gy,Mdq (66) |
| 720 | 81: INVPID Gy,Mdq (66) | 720 | 81: INVVPID Gy,Mdq (66) |
| 721 | 82: INVPCID Gy,Mdq (66) | 721 | 82: INVPCID Gy,Mdq (66) |
| 722 | 83: vpmultishiftqb Vx,Hx,Wx (66),(ev) | 722 | 83: vpmultishiftqb Vx,Hx,Wx (66),(ev) |
| 723 | 88: vexpandps/d Vpd,Wpd (66),(ev) | 723 | 88: vexpandps/d Vpd,Wpd (66),(ev) |
| @@ -970,6 +970,15 @@ GrpTable: Grp9 | |||
| 970 | EndTable | 970 | EndTable |
| 971 | 971 | ||
| 972 | GrpTable: Grp10 | 972 | GrpTable: Grp10 |
| 973 | # all are UD1 | ||
| 974 | 0: UD1 | ||
| 975 | 1: UD1 | ||
| 976 | 2: UD1 | ||
| 977 | 3: UD1 | ||
| 978 | 4: UD1 | ||
| 979 | 5: UD1 | ||
| 980 | 6: UD1 | ||
| 981 | 7: UD1 | ||
| 973 | EndTable | 982 | EndTable |
| 974 | 983 | ||
| 975 | # Grp11A and Grp11B are expressed as Grp11 in Intel SDM | 984 | # Grp11A and Grp11B are expressed as Grp11 in Intel SDM |
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h index efd78b827b05..3a5cb5a6e94a 100644 --- a/tools/perf/util/mmap.h +++ b/tools/perf/util/mmap.h | |||
| @@ -70,7 +70,7 @@ void perf_mmap__read_catchup(struct perf_mmap *md); | |||
| 70 | static inline u64 perf_mmap__read_head(struct perf_mmap *mm) | 70 | static inline u64 perf_mmap__read_head(struct perf_mmap *mm) |
| 71 | { | 71 | { |
| 72 | struct perf_event_mmap_page *pc = mm->base; | 72 | struct perf_event_mmap_page *pc = mm->base; |
| 73 | u64 head = ACCESS_ONCE(pc->data_head); | 73 | u64 head = READ_ONCE(pc->data_head); |
| 74 | rmb(); | 74 | rmb(); |
| 75 | return head; | 75 | return head; |
| 76 | } | 76 | } |
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 21a2d76b67dc..792af7c3b74f 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile | |||
| @@ -1,19 +1,8 @@ | |||
| 1 | # SPDX-License-Identifier: GPL-2.0 | 1 | # SPDX-License-Identifier: GPL-2.0 |
| 2 | 2 | ||
| 3 | ifeq ($(srctree),) | ||
| 4 | srctree := $(patsubst %/,%,$(dir $(CURDIR))) | ||
| 5 | srctree := $(patsubst %/,%,$(dir $(srctree))) | ||
| 6 | srctree := $(patsubst %/,%,$(dir $(srctree))) | ||
| 7 | srctree := $(patsubst %/,%,$(dir $(srctree))) | ||
| 8 | endif | ||
| 9 | include $(srctree)/tools/scripts/Makefile.arch | ||
| 10 | |||
| 11 | $(call detected_var,SRCARCH) | ||
| 12 | |||
| 13 | LIBDIR := ../../../lib | 3 | LIBDIR := ../../../lib |
| 14 | BPFDIR := $(LIBDIR)/bpf | 4 | BPFDIR := $(LIBDIR)/bpf |
| 15 | APIDIR := ../../../include/uapi | 5 | APIDIR := ../../../include/uapi |
| 16 | ASMDIR:= ../../../arch/$(ARCH)/include/uapi | ||
| 17 | GENDIR := ../../../../include/generated | 6 | GENDIR := ../../../../include/generated |
| 18 | GENHDR := $(GENDIR)/autoconf.h | 7 | GENHDR := $(GENDIR)/autoconf.h |
| 19 | 8 | ||
| @@ -21,7 +10,7 @@ ifneq ($(wildcard $(GENHDR)),) | |||
| 21 | GENFLAGS := -DHAVE_GENHDR | 10 | GENFLAGS := -DHAVE_GENHDR |
| 22 | endif | 11 | endif |
| 23 | 12 | ||
| 24 | CFLAGS += -Wall -O2 -I$(APIDIR) -I$(ASMDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include | 13 | CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include |
| 25 | LDLIBS += -lcap -lelf | 14 | LDLIBS += -lcap -lelf |
| 26 | 15 | ||
| 27 | TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ | 16 | TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ |
diff --git a/tools/usb/usbip/libsrc/vhci_driver.c b/tools/usb/usbip/libsrc/vhci_driver.c index 8a1cd1616de4..c9c81614a66a 100644 --- a/tools/usb/usbip/libsrc/vhci_driver.c +++ b/tools/usb/usbip/libsrc/vhci_driver.c | |||
| @@ -50,14 +50,14 @@ static int parse_status(const char *value) | |||
| 50 | 50 | ||
| 51 | while (*c != '\0') { | 51 | while (*c != '\0') { |
| 52 | int port, status, speed, devid; | 52 | int port, status, speed, devid; |
| 53 | unsigned long socket; | 53 | int sockfd; |
| 54 | char lbusid[SYSFS_BUS_ID_SIZE]; | 54 | char lbusid[SYSFS_BUS_ID_SIZE]; |
| 55 | struct usbip_imported_device *idev; | 55 | struct usbip_imported_device *idev; |
| 56 | char hub[3]; | 56 | char hub[3]; |
| 57 | 57 | ||
| 58 | ret = sscanf(c, "%2s %d %d %d %x %lx %31s\n", | 58 | ret = sscanf(c, "%2s %d %d %d %x %u %31s\n", |
| 59 | hub, &port, &status, &speed, | 59 | hub, &port, &status, &speed, |
| 60 | &devid, &socket, lbusid); | 60 | &devid, &sockfd, lbusid); |
| 61 | 61 | ||
| 62 | if (ret < 5) { | 62 | if (ret < 5) { |
| 63 | dbg("sscanf failed: %d", ret); | 63 | dbg("sscanf failed: %d", ret); |
| @@ -66,7 +66,7 @@ static int parse_status(const char *value) | |||
| 66 | 66 | ||
| 67 | dbg("hub %s port %d status %d speed %d devid %x", | 67 | dbg("hub %s port %d status %d speed %d devid %x", |
| 68 | hub, port, status, speed, devid); | 68 | hub, port, status, speed, devid); |
| 69 | dbg("socket %lx lbusid %s", socket, lbusid); | 69 | dbg("sockfd %u lbusid %s", sockfd, lbusid); |
| 70 | 70 | ||
| 71 | /* if a device is connected, look at it */ | 71 | /* if a device is connected, look at it */ |
| 72 | idev = &vhci_driver->idev[port]; | 72 | idev = &vhci_driver->idev[port]; |
| @@ -106,7 +106,7 @@ static int parse_status(const char *value) | |||
| 106 | return 0; | 106 | return 0; |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | #define MAX_STATUS_NAME 16 | 109 | #define MAX_STATUS_NAME 18 |
| 110 | 110 | ||
| 111 | static int refresh_imported_device_list(void) | 111 | static int refresh_imported_device_list(void) |
| 112 | { | 112 | { |
diff --git a/tools/virtio/ringtest/ptr_ring.c b/tools/virtio/ringtest/ptr_ring.c index 38bb171aceba..e6e81305ef46 100644 --- a/tools/virtio/ringtest/ptr_ring.c +++ b/tools/virtio/ringtest/ptr_ring.c | |||
| @@ -16,24 +16,41 @@ | |||
| 16 | #define unlikely(x) (__builtin_expect(!!(x), 0)) | 16 | #define unlikely(x) (__builtin_expect(!!(x), 0)) |
| 17 | #define likely(x) (__builtin_expect(!!(x), 1)) | 17 | #define likely(x) (__builtin_expect(!!(x), 1)) |
| 18 | #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) | 18 | #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) |
| 19 | #define SIZE_MAX (~(size_t)0) | ||
| 20 | |||
| 19 | typedef pthread_spinlock_t spinlock_t; | 21 | typedef pthread_spinlock_t spinlock_t; |
| 20 | 22 | ||
| 21 | typedef int gfp_t; | 23 | typedef int gfp_t; |
| 22 | static void *kmalloc(unsigned size, gfp_t gfp) | 24 | #define __GFP_ZERO 0x1 |
| 23 | { | ||
| 24 | return memalign(64, size); | ||
| 25 | } | ||
| 26 | 25 | ||
| 27 | static void *kzalloc(unsigned size, gfp_t gfp) | 26 | static void *kmalloc(unsigned size, gfp_t gfp) |
| 28 | { | 27 | { |
| 29 | void *p = memalign(64, size); | 28 | void *p = memalign(64, size); |
| 30 | if (!p) | 29 | if (!p) |
| 31 | return p; | 30 | return p; |
| 32 | memset(p, 0, size); | ||
| 33 | 31 | ||
| 32 | if (gfp & __GFP_ZERO) | ||
| 33 | memset(p, 0, size); | ||
| 34 | return p; | 34 | return p; |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | static inline void *kzalloc(unsigned size, gfp_t flags) | ||
| 38 | { | ||
| 39 | return kmalloc(size, flags | __GFP_ZERO); | ||
| 40 | } | ||
| 41 | |||
| 42 | static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) | ||
| 43 | { | ||
| 44 | if (size != 0 && n > SIZE_MAX / size) | ||
| 45 | return NULL; | ||
| 46 | return kmalloc(n * size, flags); | ||
| 47 | } | ||
| 48 | |||
| 49 | static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | ||
| 50 | { | ||
| 51 | return kmalloc_array(n, size, flags | __GFP_ZERO); | ||
| 52 | } | ||
| 53 | |||
| 37 | static void kfree(void *p) | 54 | static void kfree(void *p) |
| 38 | { | 55 | { |
| 39 | if (p) | 56 | if (p) |
diff --git a/tools/vm/slabinfo-gnuplot.sh b/tools/vm/slabinfo-gnuplot.sh index 35b039864b77..0cf28aa6f21c 100644 --- a/tools/vm/slabinfo-gnuplot.sh +++ b/tools/vm/slabinfo-gnuplot.sh | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | #!/bin/sh | 1 | #!/bin/bash |
| 2 | 2 | ||
| 3 | # Sergey Senozhatsky, 2015 | 3 | # Sergey Senozhatsky, 2015 |
| 4 | # sergey.senozhatsky.work@gmail.com | 4 | # sergey.senozhatsky.work@gmail.com |
