diff options
191 files changed, 5123 insertions, 2044 deletions
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index 4a3109b28847..356fd86f4ea8 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt | |||
| @@ -42,80 +42,81 @@ struct dev_pm_ops { | |||
| 42 | ... | 42 | ... |
| 43 | }; | 43 | }; |
| 44 | 44 | ||
| 45 | The ->runtime_suspend() callback is executed by the PM core for the bus type of | 45 | The ->runtime_suspend(), ->runtime_resume() and ->runtime_idle() callbacks are |
| 46 | the device being suspended. The bus type's callback is then _entirely_ | 46 | executed by the PM core for either the bus type, or device type (if the bus |
| 47 | _responsible_ for handling the device as appropriate, which may, but need not | 47 | type's callback is not defined), or device class (if the bus type's and device |
| 48 | include executing the device driver's own ->runtime_suspend() callback (from the | 48 | type's callbacks are not defined) of given device. The bus type, device type |
| 49 | and device class callbacks are referred to as subsystem-level callbacks in what | ||
| 50 | follows. | ||
| 51 | |||
| 52 | The subsystem-level suspend callback is _entirely_ _responsible_ for handling | ||
| 53 | the suspend of the device as appropriate, which may, but need not include | ||
| 54 | executing the device driver's own ->runtime_suspend() callback (from the | ||
| 49 | PM core's point of view it is not necessary to implement a ->runtime_suspend() | 55 | PM core's point of view it is not necessary to implement a ->runtime_suspend() |
| 50 | callback in a device driver as long as the bus type's ->runtime_suspend() knows | 56 | callback in a device driver as long as the subsystem-level suspend callback |
| 51 | what to do to handle the device). | 57 | knows what to do to handle the device). |
| 52 | 58 | ||
| 53 | * Once the bus type's ->runtime_suspend() callback has completed successfully | 59 | * Once the subsystem-level suspend callback has completed successfully |
| 54 | for given device, the PM core regards the device as suspended, which need | 60 | for given device, the PM core regards the device as suspended, which need |
| 55 | not mean that the device has been put into a low power state. It is | 61 | not mean that the device has been put into a low power state. It is |
| 56 | supposed to mean, however, that the device will not process data and will | 62 | supposed to mean, however, that the device will not process data and will |
| 57 | not communicate with the CPU(s) and RAM until its bus type's | 63 | not communicate with the CPU(s) and RAM until the subsystem-level resume |
| 58 | ->runtime_resume() callback is executed for it. The run-time PM status of | 64 | callback is executed for it. The run-time PM status of a device after |
| 59 | a device after successful execution of its bus type's ->runtime_suspend() | 65 | successful execution of the subsystem-level suspend callback is 'suspended'. |
| 60 | callback is 'suspended'. | 66 | |
| 61 | 67 | * If the subsystem-level suspend callback returns -EBUSY or -EAGAIN, | |
| 62 | * If the bus type's ->runtime_suspend() callback returns -EBUSY or -EAGAIN, | 68 | the device's run-time PM status is 'active', which means that the device |
| 63 | the device's run-time PM status is supposed to be 'active', which means that | 69 | _must_ be fully operational afterwards. |
| 64 | the device _must_ be fully operational afterwards. | 70 | |
| 65 | 71 | * If the subsystem-level suspend callback returns an error code different | |
| 66 | * If the bus type's ->runtime_suspend() callback returns an error code | 72 | from -EBUSY or -EAGAIN, the PM core regards this as a fatal error and will |
| 67 | different from -EBUSY or -EAGAIN, the PM core regards this as a fatal | 73 | refuse to run the helper functions described in Section 4 for the device, |
| 68 | error and will refuse to run the helper functions described in Section 4 | 74 | until the status of it is directly set either to 'active', or to 'suspended' |
| 69 | for the device, until the status of it is directly set either to 'active' | 75 | (the PM core provides special helper functions for this purpose). |
| 70 | or to 'suspended' (the PM core provides special helper functions for this | 76 | |
| 71 | purpose). | 77 | In particular, if the driver requires remote wake-up capability (i.e. hardware |
| 72 | 78 | mechanism allowing the device to request a change of its power state, such as | |
| 73 | In particular, if the driver requires remote wakeup capability for proper | 79 | PCI PME) for proper functioning and device_run_wake() returns 'false' for the |
| 74 | functioning and device_run_wake() returns 'false' for the device, then | 80 | device, then ->runtime_suspend() should return -EBUSY. On the other hand, if |
| 75 | ->runtime_suspend() should return -EBUSY. On the other hand, if | 81 | device_run_wake() returns 'true' for the device and the device is put into a low |
| 76 | device_run_wake() returns 'true' for the device and the device is put | 82 | power state during the execution of the subsystem-level suspend callback, it is |
| 77 | into a low power state during the execution of its bus type's | 83 | expected that remote wake-up will be enabled for the device. Generally, remote |
| 78 | ->runtime_suspend(), it is expected that remote wake-up (i.e. hardware mechanism | 84 | wake-up should be enabled for all input devices put into a low power state at |
| 79 | allowing the device to request a change of its power state, such as PCI PME) | 85 | run time. |
| 80 | will be enabled for the device. Generally, remote wake-up should be enabled | 86 | |
| 81 | for all input devices put into a low power state at run time. | 87 | The subsystem-level resume callback is _entirely_ _responsible_ for handling the |
| 82 | 88 | resume of the device as appropriate, which may, but need not include executing | |
| 83 | The ->runtime_resume() callback is executed by the PM core for the bus type of | 89 | the device driver's own ->runtime_resume() callback (from the PM core's point of |
| 84 | the device being woken up. The bus type's callback is then _entirely_ | 90 | view it is not necessary to implement a ->runtime_resume() callback in a device |
| 85 | _responsible_ for handling the device as appropriate, which may, but need not | 91 | driver as long as the subsystem-level resume callback knows what to do to handle |
| 86 | include executing the device driver's own ->runtime_resume() callback (from the | 92 | the device). |
| 87 | PM core's point of view it is not necessary to implement a ->runtime_resume() | 93 | |
| 88 | callback in a device driver as long as the bus type's ->runtime_resume() knows | 94 | * Once the subsystem-level resume callback has completed successfully, the PM |
| 89 | what to do to handle the device). | 95 | core regards the device as fully operational, which means that the device |
| 90 | 96 | _must_ be able to complete I/O operations as needed. The run-time PM status | |
| 91 | * Once the bus type's ->runtime_resume() callback has completed successfully, | 97 | of the device is then 'active'. |
| 92 | the PM core regards the device as fully operational, which means that the | 98 | |
| 93 | device _must_ be able to complete I/O operations as needed. The run-time | 99 | * If the subsystem-level resume callback returns an error code, the PM core |
| 94 | PM status of the device is then 'active'. | 100 | regards this as a fatal error and will refuse to run the helper functions |
| 95 | 101 | described in Section 4 for the device, until its status is directly set | |
| 96 | * If the bus type's ->runtime_resume() callback returns an error code, the PM | 102 | either to 'active' or to 'suspended' (the PM core provides special helper |
| 97 | core regards this as a fatal error and will refuse to run the helper | 103 | functions for this purpose). |
| 98 | functions described in Section 4 for the device, until its status is | 104 | |
| 99 | directly set either to 'active' or to 'suspended' (the PM core provides | 105 | The subsystem-level idle callback is executed by the PM core whenever the device |
| 100 | special helper functions for this purpose). | 106 | appears to be idle, which is indicated to the PM core by two counters, the |
| 101 | 107 | device's usage counter and the counter of 'active' children of the device. | |
| 102 | The ->runtime_idle() callback is executed by the PM core for the bus type of | ||
| 103 | given device whenever the device appears to be idle, which is indicated to the | ||
| 104 | PM core by two counters, the device's usage counter and the counter of 'active' | ||
| 105 | children of the device. | ||
| 106 | 108 | ||
| 107 | * If any of these counters is decreased using a helper function provided by | 109 | * If any of these counters is decreased using a helper function provided by |
| 108 | the PM core and it turns out to be equal to zero, the other counter is | 110 | the PM core and it turns out to be equal to zero, the other counter is |
| 109 | checked. If that counter also is equal to zero, the PM core executes the | 111 | checked. If that counter also is equal to zero, the PM core executes the |
| 110 | device bus type's ->runtime_idle() callback (with the device as an | 112 | subsystem-level idle callback with the device as an argument. |
| 111 | argument). | ||
| 112 | 113 | ||
| 113 | The action performed by a bus type's ->runtime_idle() callback is totally | 114 | The action performed by a subsystem-level idle callback is totally dependent on |
| 114 | dependent on the bus type in question, but the expected and recommended action | 115 | the subsystem in question, but the expected and recommended action is to check |
| 115 | is to check if the device can be suspended (i.e. if all of the conditions | 116 | if the device can be suspended (i.e. if all of the conditions necessary for |
| 116 | necessary for suspending the device are satisfied) and to queue up a suspend | 117 | suspending the device are satisfied) and to queue up a suspend request for the |
| 117 | request for the device in that case. The value returned by this callback is | 118 | device in that case. The value returned by this callback is ignored by the PM |
| 118 | ignored by the PM core. | 119 | core. |
| 119 | 120 | ||
| 120 | The helper functions provided by the PM core, described in Section 4, guarantee | 121 | The helper functions provided by the PM core, described in Section 4, guarantee |
| 121 | that the following constraints are met with respect to the bus type's run-time | 122 | that the following constraints are met with respect to the bus type's run-time |
| @@ -238,41 +239,41 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: | |||
| 238 | removing the device from device hierarchy | 239 | removing the device from device hierarchy |
| 239 | 240 | ||
| 240 | int pm_runtime_idle(struct device *dev); | 241 | int pm_runtime_idle(struct device *dev); |
| 241 | - execute ->runtime_idle() for the device's bus type; returns 0 on success | 242 | - execute the subsystem-level idle callback for the device; returns 0 on |
| 242 | or error code on failure, where -EINPROGRESS means that ->runtime_idle() | 243 | success or error code on failure, where -EINPROGRESS means that |
| 243 | is already being executed | 244 | ->runtime_idle() is already being executed |
| 244 | 245 | ||
| 245 | int pm_runtime_suspend(struct device *dev); | 246 | int pm_runtime_suspend(struct device *dev); |
| 246 | - execute ->runtime_suspend() for the device's bus type; returns 0 on | 247 | - execute the subsystem-level suspend callback for the device; returns 0 on |
| 247 | success, 1 if the device's run-time PM status was already 'suspended', or | 248 | success, 1 if the device's run-time PM status was already 'suspended', or |
| 248 | error code on failure, where -EAGAIN or -EBUSY means it is safe to attempt | 249 | error code on failure, where -EAGAIN or -EBUSY means it is safe to attempt |
| 249 | to suspend the device again in future | 250 | to suspend the device again in future |
| 250 | 251 | ||
| 251 | int pm_runtime_resume(struct device *dev); | 252 | int pm_runtime_resume(struct device *dev); |
| 252 | - execute ->runtime_resume() for the device's bus type; returns 0 on | 253 | - execute the subsystem-leve resume callback for the device; returns 0 on |
| 253 | success, 1 if the device's run-time PM status was already 'active' or | 254 | success, 1 if the device's run-time PM status was already 'active' or |
| 254 | error code on failure, where -EAGAIN means it may be safe to attempt to | 255 | error code on failure, where -EAGAIN means it may be safe to attempt to |
| 255 | resume the device again in future, but 'power.runtime_error' should be | 256 | resume the device again in future, but 'power.runtime_error' should be |
| 256 | checked additionally | 257 | checked additionally |
| 257 | 258 | ||
| 258 | int pm_request_idle(struct device *dev); | 259 | int pm_request_idle(struct device *dev); |
| 259 | - submit a request to execute ->runtime_idle() for the device's bus type | 260 | - submit a request to execute the subsystem-level idle callback for the |
| 260 | (the request is represented by a work item in pm_wq); returns 0 on success | 261 | device (the request is represented by a work item in pm_wq); returns 0 on |
| 261 | or error code if the request has not been queued up | 262 | success or error code if the request has not been queued up |
| 262 | 263 | ||
| 263 | int pm_schedule_suspend(struct device *dev, unsigned int delay); | 264 | int pm_schedule_suspend(struct device *dev, unsigned int delay); |
| 264 | - schedule the execution of ->runtime_suspend() for the device's bus type | 265 | - schedule the execution of the subsystem-level suspend callback for the |
| 265 | in future, where 'delay' is the time to wait before queuing up a suspend | 266 | device in future, where 'delay' is the time to wait before queuing up a |
| 266 | work item in pm_wq, in milliseconds (if 'delay' is zero, the work item is | 267 | suspend work item in pm_wq, in milliseconds (if 'delay' is zero, the work |
| 267 | queued up immediately); returns 0 on success, 1 if the device's PM | 268 | item is queued up immediately); returns 0 on success, 1 if the device's PM |
| 268 | run-time status was already 'suspended', or error code if the request | 269 | run-time status was already 'suspended', or error code if the request |
| 269 | hasn't been scheduled (or queued up if 'delay' is 0); if the execution of | 270 | hasn't been scheduled (or queued up if 'delay' is 0); if the execution of |
| 270 | ->runtime_suspend() is already scheduled and not yet expired, the new | 271 | ->runtime_suspend() is already scheduled and not yet expired, the new |
| 271 | value of 'delay' will be used as the time to wait | 272 | value of 'delay' will be used as the time to wait |
| 272 | 273 | ||
| 273 | int pm_request_resume(struct device *dev); | 274 | int pm_request_resume(struct device *dev); |
| 274 | - submit a request to execute ->runtime_resume() for the device's bus type | 275 | - submit a request to execute the subsystem-level resume callback for the |
| 275 | (the request is represented by a work item in pm_wq); returns 0 on | 276 | device (the request is represented by a work item in pm_wq); returns 0 on |
| 276 | success, 1 if the device's run-time PM status was already 'active', or | 277 | success, 1 if the device's run-time PM status was already 'active', or |
| 277 | error code if the request hasn't been queued up | 278 | error code if the request hasn't been queued up |
| 278 | 279 | ||
| @@ -303,12 +304,12 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: | |||
| 303 | run-time PM callbacks described in Section 2 | 304 | run-time PM callbacks described in Section 2 |
| 304 | 305 | ||
| 305 | int pm_runtime_disable(struct device *dev); | 306 | int pm_runtime_disable(struct device *dev); |
| 306 | - prevent the run-time PM helper functions from running the device bus | 307 | - prevent the run-time PM helper functions from running subsystem-level |
| 307 | type's run-time PM callbacks, make sure that all of the pending run-time | 308 | run-time PM callbacks for the device, make sure that all of the pending |
| 308 | PM operations on the device are either completed or canceled; returns | 309 | run-time PM operations on the device are either completed or canceled; |
| 309 | 1 if there was a resume request pending and it was necessary to execute | 310 | returns 1 if there was a resume request pending and it was necessary to |
| 310 | ->runtime_resume() for the device's bus type to satisfy that request, | 311 | execute the subsystem-level resume callback for the device to satisfy that |
| 311 | otherwise 0 is returned | 312 | request, otherwise 0 is returned |
| 312 | 313 | ||
| 313 | void pm_suspend_ignore_children(struct device *dev, bool enable); | 314 | void pm_suspend_ignore_children(struct device *dev, bool enable); |
| 314 | - set/unset the power.ignore_children flag of the device | 315 | - set/unset the power.ignore_children flag of the device |
| @@ -378,5 +379,55 @@ pm_runtime_suspend() or pm_runtime_idle() or their asynchronous counterparts, | |||
| 378 | they will fail returning -EAGAIN, because the device's usage counter is | 379 | they will fail returning -EAGAIN, because the device's usage counter is |
| 379 | incremented by the core before executing ->probe() and ->remove(). Still, it | 380 | incremented by the core before executing ->probe() and ->remove(). Still, it |
| 380 | may be desirable to suspend the device as soon as ->probe() or ->remove() has | 381 | may be desirable to suspend the device as soon as ->probe() or ->remove() has |
| 381 | finished, so the PM core uses pm_runtime_idle_sync() to invoke the device bus | 382 | finished, so the PM core uses pm_runtime_idle_sync() to invoke the |
| 382 | type's ->runtime_idle() callback at that time. | 383 | subsystem-level idle callback for the device at that time. |
| 384 | |||
| 385 | 6. Run-time PM and System Sleep | ||
| 386 | |||
| 387 | Run-time PM and system sleep (i.e., system suspend and hibernation, also known | ||
| 388 | as suspend-to-RAM and suspend-to-disk) interact with each other in a couple of | ||
| 389 | ways. If a device is active when a system sleep starts, everything is | ||
| 390 | straightforward. But what should happen if the device is already suspended? | ||
| 391 | |||
| 392 | The device may have different wake-up settings for run-time PM and system sleep. | ||
| 393 | For example, remote wake-up may be enabled for run-time suspend but disallowed | ||
| 394 | for system sleep (device_may_wakeup(dev) returns 'false'). When this happens, | ||
| 395 | the subsystem-level system suspend callback is responsible for changing the | ||
| 396 | device's wake-up setting (it may leave that to the device driver's system | ||
| 397 | suspend routine). It may be necessary to resume the device and suspend it again | ||
| 398 | in order to do so. The same is true if the driver uses different power levels | ||
| 399 | or other settings for run-time suspend and system sleep. | ||
| 400 | |||
| 401 | During system resume, devices generally should be brought back to full power, | ||
| 402 | even if they were suspended before the system sleep began. There are several | ||
| 403 | reasons for this, including: | ||
| 404 | |||
| 405 | * The device might need to switch power levels, wake-up settings, etc. | ||
| 406 | |||
| 407 | * Remote wake-up events might have been lost by the firmware. | ||
| 408 | |||
| 409 | * The device's children may need the device to be at full power in order | ||
| 410 | to resume themselves. | ||
| 411 | |||
| 412 | * The driver's idea of the device state may not agree with the device's | ||
| 413 | physical state. This can happen during resume from hibernation. | ||
| 414 | |||
| 415 | * The device might need to be reset. | ||
| 416 | |||
| 417 | * Even though the device was suspended, if its usage counter was > 0 then most | ||
| 418 | likely it would need a run-time resume in the near future anyway. | ||
| 419 | |||
| 420 | * Always going back to full power is simplest. | ||
| 421 | |||
| 422 | If the device was suspended before the sleep began, then its run-time PM status | ||
| 423 | will have to be updated to reflect the actual post-system sleep status. The way | ||
| 424 | to do this is: | ||
| 425 | |||
| 426 | pm_runtime_disable(dev); | ||
| 427 | pm_runtime_set_active(dev); | ||
| 428 | pm_runtime_enable(dev); | ||
| 429 | |||
| 430 | The PM core always increments the run-time usage counter before calling the | ||
| 431 | ->prepare() callback and decrements it after calling the ->complete() callback. | ||
| 432 | Hence disabling run-time PM temporarily like this will not cause any run-time | ||
| 433 | suspend callbacks to be lost. | ||
diff --git a/Documentation/powerpc/dts-bindings/fsl/mpic.txt b/Documentation/powerpc/dts-bindings/fsl/mpic.txt new file mode 100644 index 000000000000..71e39cf3215b --- /dev/null +++ b/Documentation/powerpc/dts-bindings/fsl/mpic.txt | |||
| @@ -0,0 +1,42 @@ | |||
| 1 | * OpenPIC and its interrupt numbers on Freescale's e500/e600 cores | ||
| 2 | |||
| 3 | The OpenPIC specification does not specify which interrupt source has to | ||
| 4 | become which interrupt number. This is up to the software implementation | ||
| 5 | of the interrupt controller. The only requirement is that every | ||
| 6 | interrupt source has to have an unique interrupt number / vector number. | ||
| 7 | To accomplish this the current implementation assigns the number zero to | ||
| 8 | the first source, the number one to the second source and so on until | ||
| 9 | all interrupt sources have their unique number. | ||
| 10 | Usually the assigned vector number equals the interrupt number mentioned | ||
| 11 | in the documentation for a given core / CPU. This is however not true | ||
| 12 | for the e500 cores (MPC85XX CPUs) where the documentation distinguishes | ||
| 13 | between internal and external interrupt sources and starts counting at | ||
| 14 | zero for both of them. | ||
| 15 | |||
| 16 | So what to write for external interrupt source X or internal interrupt | ||
| 17 | source Y into the device tree? Here is an example: | ||
| 18 | |||
| 19 | The memory map for the interrupt controller in the MPC8544[0] shows, | ||
| 20 | that the first interrupt source starts at 0x5_0000 (PIC Register Address | ||
| 21 | Map-Interrupt Source Configuration Registers). This source becomes the | ||
| 22 | number zero therefore: | ||
| 23 | External interrupt 0 = interrupt number 0 | ||
| 24 | External interrupt 1 = interrupt number 1 | ||
| 25 | External interrupt 2 = interrupt number 2 | ||
| 26 | ... | ||
| 27 | Every interrupt number allocates 0x20 bytes register space. So to get | ||
| 28 | its number it is sufficient to shift the lower 16bits to right by five. | ||
| 29 | So for the external interrupt 10 we have: | ||
| 30 | 0x0140 >> 5 = 10 | ||
| 31 | |||
| 32 | After the external sources, the internal sources follow. The in core I2C | ||
| 33 | controller on the MPC8544 for instance has the internal source number | ||
| 34 | 27. Oo obtain its interrupt number we take the lower 16bits of its memory | ||
| 35 | address (0x5_0560) and shift it right: | ||
| 36 | 0x0560 >> 5 = 43 | ||
| 37 | |||
| 38 | Therefore the I2C device node for the MPC8544 CPU has to have the | ||
| 39 | interrupt number 43 specified in the device tree. | ||
| 40 | |||
| 41 | [0] MPC8544E PowerQUICCTM III, Integrated Host Processor Family Reference Manual | ||
| 42 | MPC8544ERM Rev. 1 10/2007 | ||
diff --git a/Documentation/trace/events-kmem.txt b/Documentation/trace/events-kmem.txt index 6ef2a8652e17..aa82ee4a5a87 100644 --- a/Documentation/trace/events-kmem.txt +++ b/Documentation/trace/events-kmem.txt | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | Subsystem Trace Points: kmem | 1 | Subsystem Trace Points: kmem |
| 2 | 2 | ||
| 3 | The tracing system kmem captures events related to object and page allocation | 3 | The kmem tracing system captures events related to object and page allocation |
| 4 | within the kernel. Broadly speaking there are four major subheadings. | 4 | within the kernel. Broadly speaking there are five major subheadings. |
| 5 | 5 | ||
| 6 | o Slab allocation of small objects of unknown type (kmalloc) | 6 | o Slab allocation of small objects of unknown type (kmalloc) |
| 7 | o Slab allocation of small objects of known type | 7 | o Slab allocation of small objects of known type |
| @@ -9,7 +9,7 @@ within the kernel. Broadly speaking there are four major subheadings. | |||
| 9 | o Per-CPU Allocator Activity | 9 | o Per-CPU Allocator Activity |
| 10 | o External Fragmentation | 10 | o External Fragmentation |
| 11 | 11 | ||
| 12 | This document will describe what each of the tracepoints are and why they | 12 | This document describes what each of the tracepoints is and why they |
| 13 | might be useful. | 13 | might be useful. |
| 14 | 14 | ||
| 15 | 1. Slab allocation of small objects of unknown type | 15 | 1. Slab allocation of small objects of unknown type |
| @@ -34,7 +34,7 @@ kmem_cache_free call_site=%lx ptr=%p | |||
| 34 | These events are similar in usage to the kmalloc-related events except that | 34 | These events are similar in usage to the kmalloc-related events except that |
| 35 | it is likely easier to pin the event down to a specific cache. At the time | 35 | it is likely easier to pin the event down to a specific cache. At the time |
| 36 | of writing, no information is available on what slab is being allocated from, | 36 | of writing, no information is available on what slab is being allocated from, |
| 37 | but the call_site can usually be used to extrapolate that information | 37 | but the call_site can usually be used to extrapolate that information. |
| 38 | 38 | ||
| 39 | 3. Page allocation | 39 | 3. Page allocation |
| 40 | ================== | 40 | ================== |
| @@ -80,9 +80,9 @@ event indicating whether it is for a percpu_refill or not. | |||
| 80 | When the per-CPU list is too full, a number of pages are freed, each one | 80 | When the per-CPU list is too full, a number of pages are freed, each one |
| 81 | which triggers a mm_page_pcpu_drain event. | 81 | which triggers a mm_page_pcpu_drain event. |
| 82 | 82 | ||
| 83 | The individual nature of the events are so that pages can be tracked | 83 | The individual nature of the events is so that pages can be tracked |
| 84 | between allocation and freeing. A number of drain or refill pages that occur | 84 | between allocation and freeing. A number of drain or refill pages that occur |
| 85 | consecutively imply the zone->lock being taken once. Large amounts of PCP | 85 | consecutively imply the zone->lock being taken once. Large amounts of per-CPU |
| 86 | refills and drains could imply an imbalance between CPUs where too much work | 86 | refills and drains could imply an imbalance between CPUs where too much work |
| 87 | is being concentrated in one place. It could also indicate that the per-CPU | 87 | is being concentrated in one place. It could also indicate that the per-CPU |
| 88 | lists should be a larger size. Finally, large amounts of refills on one CPU | 88 | lists should be a larger size. Finally, large amounts of refills on one CPU |
| @@ -102,6 +102,6 @@ is important. | |||
| 102 | 102 | ||
| 103 | Large numbers of this event implies that memory is fragmenting and | 103 | Large numbers of this event implies that memory is fragmenting and |
| 104 | high-order allocations will start failing at some time in the future. One | 104 | high-order allocations will start failing at some time in the future. One |
| 105 | means of reducing the occurange of this event is to increase the size of | 105 | means of reducing the occurrence of this event is to increase the size of |
| 106 | min_free_kbytes in increments of 3*pageblock_size*nr_online_nodes where | 106 | min_free_kbytes in increments of 3*pageblock_size*nr_online_nodes where |
| 107 | pageblock_size is usually the size of the default hugepage size. | 107 | pageblock_size is usually the size of the default hugepage size. |
diff --git a/MAINTAINERS b/MAINTAINERS index efd2ef2c2660..d5244f1580bc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1402,6 +1402,8 @@ L: linux-usb@vger.kernel.org | |||
| 1402 | S: Supported | 1402 | S: Supported |
| 1403 | F: Documentation/usb/WUSB-Design-overview.txt | 1403 | F: Documentation/usb/WUSB-Design-overview.txt |
| 1404 | F: Documentation/usb/wusb-cbaf | 1404 | F: Documentation/usb/wusb-cbaf |
| 1405 | F: drivers/usb/host/hwa-hc.c | ||
| 1406 | F: drivers/usb/host/whci/ | ||
| 1405 | F: drivers/usb/wusbcore/ | 1407 | F: drivers/usb/wusbcore/ |
| 1406 | F: include/linux/usb/wusb* | 1408 | F: include/linux/usb/wusb* |
| 1407 | 1409 | ||
| @@ -5430,7 +5432,10 @@ ULTRA-WIDEBAND (UWB) SUBSYSTEM: | |||
| 5430 | M: David Vrabel <david.vrabel@csr.com> | 5432 | M: David Vrabel <david.vrabel@csr.com> |
| 5431 | L: linux-usb@vger.kernel.org | 5433 | L: linux-usb@vger.kernel.org |
| 5432 | S: Supported | 5434 | S: Supported |
| 5433 | F: drivers/uwb/* | 5435 | F: drivers/uwb/ |
| 5436 | X: drivers/uwb/wlp/ | ||
| 5437 | X: drivers/uwb/i1480/i1480u-wlp/ | ||
| 5438 | X: drivers/uwb/i1480/i1480-wlp.h | ||
| 5434 | F: include/linux/uwb.h | 5439 | F: include/linux/uwb.h |
| 5435 | F: include/linux/uwb/ | 5440 | F: include/linux/uwb/ |
| 5436 | 5441 | ||
| @@ -5943,9 +5948,12 @@ W: http://linuxwimax.org | |||
| 5943 | 5948 | ||
| 5944 | WIMEDIA LLC PROTOCOL (WLP) SUBSYSTEM | 5949 | WIMEDIA LLC PROTOCOL (WLP) SUBSYSTEM |
| 5945 | M: David Vrabel <david.vrabel@csr.com> | 5950 | M: David Vrabel <david.vrabel@csr.com> |
| 5951 | L: netdev@vger.kernel.org | ||
| 5946 | S: Maintained | 5952 | S: Maintained |
| 5947 | F: include/linux/wlp.h | 5953 | F: include/linux/wlp.h |
| 5948 | F: drivers/uwb/wlp/ | 5954 | F: drivers/uwb/wlp/ |
| 5955 | F: drivers/uwb/i1480/i1480u-wlp/ | ||
| 5956 | F: drivers/uwb/i1480/i1480-wlp.h | ||
| 5949 | 5957 | ||
| 5950 | WISTRON LAPTOP BUTTON DRIVER | 5958 | WISTRON LAPTOP BUTTON DRIVER |
| 5951 | M: Miloslav Trmac <mitr@volny.cz> | 5959 | M: Miloslav Trmac <mitr@volny.cz> |
diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts index 51eb6ed5da2d..8f345de960cd 100644 --- a/arch/powerpc/boot/dts/katmai.dts +++ b/arch/powerpc/boot/dts/katmai.dts | |||
| @@ -108,12 +108,19 @@ | |||
| 108 | dcr-reg = <0x00c 0x002>; | 108 | dcr-reg = <0x00c 0x002>; |
| 109 | }; | 109 | }; |
| 110 | 110 | ||
| 111 | MQ0: mq { | ||
| 112 | compatible = "ibm,mq-440spe"; | ||
| 113 | dcr-reg = <0x040 0x020>; | ||
| 114 | }; | ||
| 115 | |||
| 111 | plb { | 116 | plb { |
| 112 | compatible = "ibm,plb-440spe", "ibm,plb-440gp", "ibm,plb4"; | 117 | compatible = "ibm,plb-440spe", "ibm,plb-440gp", "ibm,plb4"; |
| 113 | #address-cells = <2>; | 118 | #address-cells = <2>; |
| 114 | #size-cells = <1>; | 119 | #size-cells = <1>; |
| 115 | /* addr-child addr-parent size */ | 120 | /* addr-child addr-parent size */ |
| 116 | ranges = <0x4 0xe0000000 0x4 0xe0000000 0x20000000 | 121 | ranges = <0x4 0x00100000 0x4 0x00100000 0x00001000 |
| 122 | 0x4 0x00200000 0x4 0x00200000 0x00000400 | ||
| 123 | 0x4 0xe0000000 0x4 0xe0000000 0x20000000 | ||
| 117 | 0xc 0x00000000 0xc 0x00000000 0x20000000 | 124 | 0xc 0x00000000 0xc 0x00000000 0x20000000 |
| 118 | 0xd 0x00000000 0xd 0x00000000 0x80000000 | 125 | 0xd 0x00000000 0xd 0x00000000 0x80000000 |
| 119 | 0xd 0x80000000 0xd 0x80000000 0x80000000 | 126 | 0xd 0x80000000 0xd 0x80000000 0x80000000 |
| @@ -400,6 +407,49 @@ | |||
| 400 | 0x0 0x0 0x0 0x3 &UIC3 0xa 0x4 /* swizzled int C */ | 407 | 0x0 0x0 0x0 0x3 &UIC3 0xa 0x4 /* swizzled int C */ |
| 401 | 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>; | 408 | 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>; |
| 402 | }; | 409 | }; |
| 410 | |||
| 411 | I2O: i2o@400100000 { | ||
| 412 | compatible = "ibm,i2o-440spe"; | ||
| 413 | reg = <0x00000004 0x00100000 0x100>; | ||
| 414 | dcr-reg = <0x060 0x020>; | ||
| 415 | }; | ||
| 416 | |||
| 417 | DMA0: dma0@400100100 { | ||
| 418 | compatible = "ibm,dma-440spe"; | ||
| 419 | cell-index = <0>; | ||
| 420 | reg = <0x00000004 0x00100100 0x100>; | ||
| 421 | dcr-reg = <0x060 0x020>; | ||
| 422 | interrupt-parent = <&DMA0>; | ||
| 423 | interrupts = <0 1>; | ||
| 424 | #interrupt-cells = <1>; | ||
| 425 | #address-cells = <0>; | ||
| 426 | #size-cells = <0>; | ||
| 427 | interrupt-map = < | ||
| 428 | 0 &UIC0 0x14 4 | ||
| 429 | 1 &UIC1 0x16 4>; | ||
| 430 | }; | ||
| 431 | |||
| 432 | DMA1: dma1@400100200 { | ||
| 433 | compatible = "ibm,dma-440spe"; | ||
| 434 | cell-index = <1>; | ||
| 435 | reg = <0x00000004 0x00100200 0x100>; | ||
| 436 | dcr-reg = <0x060 0x020>; | ||
| 437 | interrupt-parent = <&DMA1>; | ||
| 438 | interrupts = <0 1>; | ||
| 439 | #interrupt-cells = <1>; | ||
| 440 | #address-cells = <0>; | ||
| 441 | #size-cells = <0>; | ||
| 442 | interrupt-map = < | ||
| 443 | 0 &UIC0 0x16 4 | ||
| 444 | 1 &UIC1 0x16 4>; | ||
| 445 | }; | ||
| 446 | |||
| 447 | xor-accel@400200000 { | ||
| 448 | compatible = "amcc,xor-accelerator"; | ||
| 449 | reg = <0x00000004 0x00200000 0x400>; | ||
| 450 | interrupt-parent = <&UIC1>; | ||
| 451 | interrupts = <0x1f 4>; | ||
| 452 | }; | ||
| 403 | }; | 453 | }; |
| 404 | 454 | ||
| 405 | chosen { | 455 | chosen { |
diff --git a/arch/powerpc/boot/dts/mpc8315erdb.dts b/arch/powerpc/boot/dts/mpc8315erdb.dts index 32e10f588c1d..8a3a4f3ef831 100644 --- a/arch/powerpc/boot/dts/mpc8315erdb.dts +++ b/arch/powerpc/boot/dts/mpc8315erdb.dts | |||
| @@ -204,6 +204,7 @@ | |||
| 204 | interrupt-parent = <&ipic>; | 204 | interrupt-parent = <&ipic>; |
| 205 | tbi-handle = <&tbi0>; | 205 | tbi-handle = <&tbi0>; |
| 206 | phy-handle = < &phy0 >; | 206 | phy-handle = < &phy0 >; |
| 207 | fsl,magic-packet; | ||
| 207 | 208 | ||
| 208 | mdio@520 { | 209 | mdio@520 { |
| 209 | #address-cells = <1>; | 210 | #address-cells = <1>; |
| @@ -246,6 +247,7 @@ | |||
| 246 | interrupt-parent = <&ipic>; | 247 | interrupt-parent = <&ipic>; |
| 247 | tbi-handle = <&tbi1>; | 248 | tbi-handle = <&tbi1>; |
| 248 | phy-handle = < &phy1 >; | 249 | phy-handle = < &phy1 >; |
| 250 | fsl,magic-packet; | ||
| 249 | 251 | ||
| 250 | mdio@520 { | 252 | mdio@520 { |
| 251 | #address-cells = <1>; | 253 | #address-cells = <1>; |
| @@ -309,6 +311,22 @@ | |||
| 309 | interrupt-parent = <&ipic>; | 311 | interrupt-parent = <&ipic>; |
| 310 | }; | 312 | }; |
| 311 | 313 | ||
| 314 | gtm1: timer@500 { | ||
| 315 | compatible = "fsl,mpc8315-gtm", "fsl,gtm"; | ||
| 316 | reg = <0x500 0x100>; | ||
| 317 | interrupts = <90 8 78 8 84 8 72 8>; | ||
| 318 | interrupt-parent = <&ipic>; | ||
| 319 | clock-frequency = <133333333>; | ||
| 320 | }; | ||
| 321 | |||
| 322 | timer@600 { | ||
| 323 | compatible = "fsl,mpc8315-gtm", "fsl,gtm"; | ||
| 324 | reg = <0x600 0x100>; | ||
| 325 | interrupts = <91 8 79 8 85 8 73 8>; | ||
| 326 | interrupt-parent = <&ipic>; | ||
| 327 | clock-frequency = <133333333>; | ||
| 328 | }; | ||
| 329 | |||
| 312 | /* IPIC | 330 | /* IPIC |
| 313 | * interrupts cell = <intr #, sense> | 331 | * interrupts cell = <intr #, sense> |
| 314 | * sense values match linux IORESOURCE_IRQ_* defines: | 332 | * sense values match linux IORESOURCE_IRQ_* defines: |
| @@ -337,6 +355,15 @@ | |||
| 337 | 0x59 0x8>; | 355 | 0x59 0x8>; |
| 338 | interrupt-parent = < &ipic >; | 356 | interrupt-parent = < &ipic >; |
| 339 | }; | 357 | }; |
| 358 | |||
| 359 | pmc: power@b00 { | ||
| 360 | compatible = "fsl,mpc8315-pmc", "fsl,mpc8313-pmc", | ||
| 361 | "fsl,mpc8349-pmc"; | ||
| 362 | reg = <0xb00 0x100 0xa00 0x100>; | ||
| 363 | interrupts = <80 8>; | ||
| 364 | interrupt-parent = <&ipic>; | ||
| 365 | fsl,mpc8313-wakeup-timer = <>m1>; | ||
| 366 | }; | ||
| 340 | }; | 367 | }; |
| 341 | 368 | ||
| 342 | pci0: pci@e0008500 { | 369 | pci0: pci@e0008500 { |
diff --git a/arch/powerpc/boot/dts/mpc8349emitx.dts b/arch/powerpc/boot/dts/mpc8349emitx.dts index feeeb7f9d609..b53d1df11e2d 100644 --- a/arch/powerpc/boot/dts/mpc8349emitx.dts +++ b/arch/powerpc/boot/dts/mpc8349emitx.dts | |||
| @@ -63,6 +63,24 @@ | |||
| 63 | reg = <0x200 0x100>; | 63 | reg = <0x200 0x100>; |
| 64 | }; | 64 | }; |
| 65 | 65 | ||
| 66 | gpio1: gpio-controller@c00 { | ||
| 67 | #gpio-cells = <2>; | ||
| 68 | compatible = "fsl,mpc8349-gpio"; | ||
| 69 | reg = <0xc00 0x100>; | ||
| 70 | interrupts = <74 0x8>; | ||
| 71 | interrupt-parent = <&ipic>; | ||
| 72 | gpio-controller; | ||
| 73 | }; | ||
| 74 | |||
| 75 | gpio2: gpio-controller@d00 { | ||
| 76 | #gpio-cells = <2>; | ||
| 77 | compatible = "fsl,mpc8349-gpio"; | ||
| 78 | reg = <0xd00 0x100>; | ||
| 79 | interrupts = <75 0x8>; | ||
| 80 | interrupt-parent = <&ipic>; | ||
| 81 | gpio-controller; | ||
| 82 | }; | ||
| 83 | |||
| 66 | i2c@3000 { | 84 | i2c@3000 { |
| 67 | #address-cells = <1>; | 85 | #address-cells = <1>; |
| 68 | #size-cells = <0>; | 86 | #size-cells = <0>; |
| @@ -72,6 +90,12 @@ | |||
| 72 | interrupts = <14 0x8>; | 90 | interrupts = <14 0x8>; |
| 73 | interrupt-parent = <&ipic>; | 91 | interrupt-parent = <&ipic>; |
| 74 | dfsrr; | 92 | dfsrr; |
| 93 | |||
| 94 | eeprom: at24@50 { | ||
| 95 | compatible = "st-micro,24c256"; | ||
| 96 | reg = <0x50>; | ||
| 97 | }; | ||
| 98 | |||
| 75 | }; | 99 | }; |
| 76 | 100 | ||
| 77 | i2c@3100 { | 101 | i2c@3100 { |
| @@ -91,6 +115,25 @@ | |||
| 91 | interrupt-parent = <&ipic>; | 115 | interrupt-parent = <&ipic>; |
| 92 | }; | 116 | }; |
| 93 | 117 | ||
| 118 | pcf1: iexp@38 { | ||
| 119 | #gpio-cells = <2>; | ||
| 120 | compatible = "ti,pcf8574a"; | ||
| 121 | reg = <0x38>; | ||
| 122 | gpio-controller; | ||
| 123 | }; | ||
| 124 | |||
| 125 | pcf2: iexp@39 { | ||
| 126 | #gpio-cells = <2>; | ||
| 127 | compatible = "ti,pcf8574a"; | ||
| 128 | reg = <0x39>; | ||
| 129 | gpio-controller; | ||
| 130 | }; | ||
| 131 | |||
| 132 | spd: at24@51 { | ||
| 133 | compatible = "at24,spd"; | ||
| 134 | reg = <0x51>; | ||
| 135 | }; | ||
| 136 | |||
| 94 | mcu_pio: mcu@a { | 137 | mcu_pio: mcu@a { |
| 95 | #gpio-cells = <2>; | 138 | #gpio-cells = <2>; |
| 96 | compatible = "fsl,mc9s08qg8-mpc8349emitx", | 139 | compatible = "fsl,mc9s08qg8-mpc8349emitx", |
| @@ -275,6 +318,24 @@ | |||
| 275 | reg = <0x700 0x100>; | 318 | reg = <0x700 0x100>; |
| 276 | device_type = "ipic"; | 319 | device_type = "ipic"; |
| 277 | }; | 320 | }; |
| 321 | |||
| 322 | gpio-leds { | ||
| 323 | compatible = "gpio-leds"; | ||
| 324 | |||
| 325 | green { | ||
| 326 | label = "Green"; | ||
| 327 | gpios = <&pcf1 0 1>; | ||
| 328 | linux,default-trigger = "heartbeat"; | ||
| 329 | }; | ||
| 330 | |||
| 331 | yellow { | ||
| 332 | label = "Yellow"; | ||
| 333 | gpios = <&pcf1 1 1>; | ||
| 334 | /* linux,default-trigger = "heartbeat"; */ | ||
| 335 | default-state = "on"; | ||
| 336 | }; | ||
| 337 | }; | ||
| 338 | |||
| 278 | }; | 339 | }; |
| 279 | 340 | ||
| 280 | pci0: pci@e0008500 { | 341 | pci0: pci@e0008500 { |
| @@ -331,7 +392,26 @@ | |||
| 331 | compatible = "fsl,mpc8349e-localbus", | 392 | compatible = "fsl,mpc8349e-localbus", |
| 332 | "fsl,pq2pro-localbus"; | 393 | "fsl,pq2pro-localbus"; |
| 333 | reg = <0xe0005000 0xd8>; | 394 | reg = <0xe0005000 0xd8>; |
| 334 | ranges = <0x3 0x0 0xf0000000 0x210>; | 395 | ranges = <0x0 0x0 0xfe000000 0x1000000 /* flash */ |
| 396 | 0x1 0x0 0xf8000000 0x20000 /* VSC 7385 */ | ||
| 397 | 0x2 0x0 0xf9000000 0x200000 /* exp slot */ | ||
| 398 | 0x3 0x0 0xf0000000 0x210>; /* CF slot */ | ||
| 399 | |||
| 400 | flash@0,0 { | ||
| 401 | compatible = "cfi-flash"; | ||
| 402 | reg = <0x0 0x0 0x800000>; | ||
| 403 | bank-width = <2>; | ||
| 404 | device-width = <1>; | ||
| 405 | }; | ||
| 406 | |||
| 407 | flash@0,800000 { | ||
| 408 | #address-cells = <1>; | ||
| 409 | #size-cells = <1>; | ||
| 410 | compatible = "cfi-flash"; | ||
| 411 | reg = <0x0 0x800000 0x800000>; | ||
| 412 | bank-width = <2>; | ||
| 413 | device-width = <1>; | ||
| 414 | }; | ||
| 335 | 415 | ||
| 336 | pata@3,0 { | 416 | pata@3,0 { |
| 337 | compatible = "fsl,mpc8349emitx-pata", "ata-generic"; | 417 | compatible = "fsl,mpc8349emitx-pata", "ata-generic"; |
diff --git a/arch/powerpc/boot/dts/warp.dts b/arch/powerpc/boot/dts/warp.dts index 31605ee4afb6..e576ee85c42f 100644 --- a/arch/powerpc/boot/dts/warp.dts +++ b/arch/powerpc/boot/dts/warp.dts | |||
| @@ -146,7 +146,7 @@ | |||
| 146 | 146 | ||
| 147 | fpga@2,4000 { | 147 | fpga@2,4000 { |
| 148 | compatible = "pika,fpga-sd"; | 148 | compatible = "pika,fpga-sd"; |
| 149 | reg = <0x00000002 0x00004000 0x00000A00>; | 149 | reg = <0x00000002 0x00004000 0x00004000>; |
| 150 | }; | 150 | }; |
| 151 | 151 | ||
| 152 | nor@0,0 { | 152 | nor@0,0 { |
diff --git a/arch/powerpc/boot/ugecon.c b/arch/powerpc/boot/ugecon.c index 50609ea6ddf8..8f2a6b311534 100644 --- a/arch/powerpc/boot/ugecon.c +++ b/arch/powerpc/boot/ugecon.c | |||
| @@ -86,7 +86,7 @@ static void ug_putc(char ch) | |||
| 86 | 86 | ||
| 87 | while (!ug_is_txfifo_ready() && count--) | 87 | while (!ug_is_txfifo_ready() && count--) |
| 88 | barrier(); | 88 | barrier(); |
| 89 | if (count) | 89 | if (count >= 0) |
| 90 | ug_raw_putc(ch); | 90 | ug_raw_putc(ch); |
| 91 | } | 91 | } |
| 92 | 92 | ||
diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index fc905924c022..826a65d3f002 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig | |||
| @@ -757,7 +757,7 @@ CONFIG_SUNGEM=y | |||
| 757 | # CONFIG_B44 is not set | 757 | # CONFIG_B44 is not set |
| 758 | # CONFIG_ATL2 is not set | 758 | # CONFIG_ATL2 is not set |
| 759 | CONFIG_NETDEV_1000=y | 759 | CONFIG_NETDEV_1000=y |
| 760 | CONFIG_ACENIC=y | 760 | CONFIG_ACENIC=m |
| 761 | CONFIG_ACENIC_OMIT_TIGON_I=y | 761 | CONFIG_ACENIC_OMIT_TIGON_I=y |
| 762 | # CONFIG_DL2K is not set | 762 | # CONFIG_DL2K is not set |
| 763 | CONFIG_E1000=y | 763 | CONFIG_E1000=y |
| @@ -794,8 +794,8 @@ CONFIG_NETDEV_10000=y | |||
| 794 | # CONFIG_BNX2X is not set | 794 | # CONFIG_BNX2X is not set |
| 795 | # CONFIG_QLGE is not set | 795 | # CONFIG_QLGE is not set |
| 796 | # CONFIG_SFC is not set | 796 | # CONFIG_SFC is not set |
| 797 | CONFIG_TR=y | 797 | # CONFIG_TR is not set |
| 798 | CONFIG_IBMOL=y | 798 | # CONFIG_IBMOL is not set |
| 799 | # CONFIG_3C359 is not set | 799 | # CONFIG_3C359 is not set |
| 800 | # CONFIG_TMS380TR is not set | 800 | # CONFIG_TMS380TR is not set |
| 801 | 801 | ||
diff --git a/arch/powerpc/configs/iseries_defconfig b/arch/powerpc/configs/iseries_defconfig index f925c555508e..76982c51a4c7 100644 --- a/arch/powerpc/configs/iseries_defconfig +++ b/arch/powerpc/configs/iseries_defconfig | |||
| @@ -714,8 +714,8 @@ CONFIG_NETDEV_10000=y | |||
| 714 | # CONFIG_BNX2X is not set | 714 | # CONFIG_BNX2X is not set |
| 715 | # CONFIG_QLGE is not set | 715 | # CONFIG_QLGE is not set |
| 716 | # CONFIG_SFC is not set | 716 | # CONFIG_SFC is not set |
| 717 | CONFIG_TR=y | 717 | # CONFIG_TR is not set |
| 718 | CONFIG_IBMOL=y | 718 | # CONFIG_IBMOL is not set |
| 719 | # CONFIG_3C359 is not set | 719 | # CONFIG_3C359 is not set |
| 720 | # CONFIG_TMS380TR is not set | 720 | # CONFIG_TMS380TR is not set |
| 721 | 721 | ||
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index 252401824575..7b3804a6e363 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig | |||
| @@ -304,11 +304,11 @@ CONFIG_TICK_ONESHOT=y | |||
| 304 | CONFIG_NO_HZ=y | 304 | CONFIG_NO_HZ=y |
| 305 | CONFIG_HIGH_RES_TIMERS=y | 305 | CONFIG_HIGH_RES_TIMERS=y |
| 306 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | 306 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y |
| 307 | # CONFIG_HZ_100 is not set | 307 | CONFIG_HZ_100=y |
| 308 | CONFIG_HZ_250=y | 308 | # CONFIG_HZ_250 is not set |
| 309 | # CONFIG_HZ_300 is not set | 309 | # CONFIG_HZ_300 is not set |
| 310 | # CONFIG_HZ_1000 is not set | 310 | # CONFIG_HZ_1000 is not set |
| 311 | CONFIG_HZ=250 | 311 | CONFIG_HZ=100 |
| 312 | CONFIG_SCHED_HRTICK=y | 312 | CONFIG_SCHED_HRTICK=y |
| 313 | CONFIG_PREEMPT_NONE=y | 313 | CONFIG_PREEMPT_NONE=y |
| 314 | # CONFIG_PREEMPT_VOLUNTARY is not set | 314 | # CONFIG_PREEMPT_VOLUNTARY is not set |
| @@ -980,7 +980,7 @@ CONFIG_E100=y | |||
| 980 | # CONFIG_SC92031 is not set | 980 | # CONFIG_SC92031 is not set |
| 981 | # CONFIG_ATL2 is not set | 981 | # CONFIG_ATL2 is not set |
| 982 | CONFIG_NETDEV_1000=y | 982 | CONFIG_NETDEV_1000=y |
| 983 | CONFIG_ACENIC=y | 983 | CONFIG_ACENIC=m |
| 984 | CONFIG_ACENIC_OMIT_TIGON_I=y | 984 | CONFIG_ACENIC_OMIT_TIGON_I=y |
| 985 | # CONFIG_DL2K is not set | 985 | # CONFIG_DL2K is not set |
| 986 | CONFIG_E1000=y | 986 | CONFIG_E1000=y |
| @@ -1023,8 +1023,8 @@ CONFIG_PASEMI_MAC=y | |||
| 1023 | # CONFIG_BNX2X is not set | 1023 | # CONFIG_BNX2X is not set |
| 1024 | # CONFIG_QLGE is not set | 1024 | # CONFIG_QLGE is not set |
| 1025 | # CONFIG_SFC is not set | 1025 | # CONFIG_SFC is not set |
| 1026 | CONFIG_TR=y | 1026 | # CONFIG_TR is not set |
| 1027 | CONFIG_IBMOL=y | 1027 | # CONFIG_IBMOL is not set |
| 1028 | # CONFIG_3C359 is not set | 1028 | # CONFIG_3C359 is not set |
| 1029 | # CONFIG_TMS380TR is not set | 1029 | # CONFIG_TMS380TR is not set |
| 1030 | 1030 | ||
| @@ -1863,7 +1863,7 @@ CONFIG_HFSPLUS_FS=m | |||
| 1863 | # CONFIG_BEFS_FS is not set | 1863 | # CONFIG_BEFS_FS is not set |
| 1864 | # CONFIG_BFS_FS is not set | 1864 | # CONFIG_BFS_FS is not set |
| 1865 | # CONFIG_EFS_FS is not set | 1865 | # CONFIG_EFS_FS is not set |
| 1866 | CONFIG_CRAMFS=y | 1866 | CONFIG_CRAMFS=m |
| 1867 | # CONFIG_VXFS_FS is not set | 1867 | # CONFIG_VXFS_FS is not set |
| 1868 | # CONFIG_MINIX_FS is not set | 1868 | # CONFIG_MINIX_FS is not set |
| 1869 | # CONFIG_OMFS_FS is not set | 1869 | # CONFIG_OMFS_FS is not set |
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index 18af46036258..8195f1650cbf 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig | |||
| @@ -1008,8 +1008,8 @@ CONFIG_IXGB=m | |||
| 1008 | # CONFIG_QLGE is not set | 1008 | # CONFIG_QLGE is not set |
| 1009 | # CONFIG_SFC is not set | 1009 | # CONFIG_SFC is not set |
| 1010 | # CONFIG_BE2NET is not set | 1010 | # CONFIG_BE2NET is not set |
| 1011 | CONFIG_TR=y | 1011 | # CONFIG_TR is not set |
| 1012 | CONFIG_IBMOL=y | 1012 | # CONFIG_IBMOL is not set |
| 1013 | # CONFIG_3C359 is not set | 1013 | # CONFIG_3C359 is not set |
| 1014 | # CONFIG_TMS380TR is not set | 1014 | # CONFIG_TMS380TR is not set |
| 1015 | CONFIG_WLAN=y | 1015 | CONFIG_WLAN=y |
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index c568329723b8..ca9ff9aad74a 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig | |||
| @@ -230,11 +230,11 @@ CONFIG_TICK_ONESHOT=y | |||
| 230 | CONFIG_NO_HZ=y | 230 | CONFIG_NO_HZ=y |
| 231 | CONFIG_HIGH_RES_TIMERS=y | 231 | CONFIG_HIGH_RES_TIMERS=y |
| 232 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y | 232 | CONFIG_GENERIC_CLOCKEVENTS_BUILD=y |
| 233 | # CONFIG_HZ_100 is not set | 233 | CONFIG_HZ_100=y |
| 234 | CONFIG_HZ_250=y | 234 | # CONFIG_HZ_250 is not set |
| 235 | # CONFIG_HZ_300 is not set | 235 | # CONFIG_HZ_300 is not set |
| 236 | # CONFIG_HZ_1000 is not set | 236 | # CONFIG_HZ_1000 is not set |
| 237 | CONFIG_HZ=250 | 237 | CONFIG_HZ=100 |
| 238 | CONFIG_SCHED_HRTICK=y | 238 | CONFIG_SCHED_HRTICK=y |
| 239 | CONFIG_PREEMPT_NONE=y | 239 | CONFIG_PREEMPT_NONE=y |
| 240 | # CONFIG_PREEMPT_VOLUNTARY is not set | 240 | # CONFIG_PREEMPT_VOLUNTARY is not set |
| @@ -796,7 +796,7 @@ CONFIG_E100=y | |||
| 796 | # CONFIG_NET_POCKET is not set | 796 | # CONFIG_NET_POCKET is not set |
| 797 | # CONFIG_ATL2 is not set | 797 | # CONFIG_ATL2 is not set |
| 798 | CONFIG_NETDEV_1000=y | 798 | CONFIG_NETDEV_1000=y |
| 799 | CONFIG_ACENIC=y | 799 | CONFIG_ACENIC=m |
| 800 | CONFIG_ACENIC_OMIT_TIGON_I=y | 800 | CONFIG_ACENIC_OMIT_TIGON_I=y |
| 801 | # CONFIG_DL2K is not set | 801 | # CONFIG_DL2K is not set |
| 802 | CONFIG_E1000=y | 802 | CONFIG_E1000=y |
| @@ -834,8 +834,8 @@ CONFIG_S2IO=m | |||
| 834 | # CONFIG_BNX2X is not set | 834 | # CONFIG_BNX2X is not set |
| 835 | # CONFIG_QLGE is not set | 835 | # CONFIG_QLGE is not set |
| 836 | # CONFIG_SFC is not set | 836 | # CONFIG_SFC is not set |
| 837 | CONFIG_TR=y | 837 | # CONFIG_TR is not set |
| 838 | CONFIG_IBMOL=y | 838 | # CONFIG_IBMOL is not set |
| 839 | # CONFIG_3C359 is not set | 839 | # CONFIG_3C359 is not set |
| 840 | # CONFIG_TMS380TR is not set | 840 | # CONFIG_TMS380TR is not set |
| 841 | 841 | ||
| @@ -1494,7 +1494,7 @@ CONFIG_CONFIGFS_FS=m | |||
| 1494 | # CONFIG_BEFS_FS is not set | 1494 | # CONFIG_BEFS_FS is not set |
| 1495 | # CONFIG_BFS_FS is not set | 1495 | # CONFIG_BFS_FS is not set |
| 1496 | # CONFIG_EFS_FS is not set | 1496 | # CONFIG_EFS_FS is not set |
| 1497 | CONFIG_CRAMFS=y | 1497 | CONFIG_CRAMFS=m |
| 1498 | # CONFIG_VXFS_FS is not set | 1498 | # CONFIG_VXFS_FS is not set |
| 1499 | # CONFIG_MINIX_FS is not set | 1499 | # CONFIG_MINIX_FS is not set |
| 1500 | # CONFIG_OMFS_FS is not set | 1500 | # CONFIG_OMFS_FS is not set |
diff --git a/arch/powerpc/include/asm/bug.h b/arch/powerpc/include/asm/bug.h index 64e1fdca233e..2c15212e1700 100644 --- a/arch/powerpc/include/asm/bug.h +++ b/arch/powerpc/include/asm/bug.h | |||
| @@ -68,7 +68,7 @@ | |||
| 68 | _EMIT_BUG_ENTRY \ | 68 | _EMIT_BUG_ENTRY \ |
| 69 | : : "i" (__FILE__), "i" (__LINE__), \ | 69 | : : "i" (__FILE__), "i" (__LINE__), \ |
| 70 | "i" (0), "i" (sizeof(struct bug_entry))); \ | 70 | "i" (0), "i" (sizeof(struct bug_entry))); \ |
| 71 | for(;;) ; \ | 71 | unreachable(); \ |
| 72 | } while (0) | 72 | } while (0) |
| 73 | 73 | ||
| 74 | #define BUG_ON(x) do { \ | 74 | #define BUG_ON(x) do { \ |
diff --git a/arch/powerpc/include/asm/gpio.h b/arch/powerpc/include/asm/gpio.h index ea04632399d8..38762edb5e58 100644 --- a/arch/powerpc/include/asm/gpio.h +++ b/arch/powerpc/include/asm/gpio.h | |||
| @@ -38,12 +38,9 @@ static inline int gpio_cansleep(unsigned int gpio) | |||
| 38 | return __gpio_cansleep(gpio); | 38 | return __gpio_cansleep(gpio); |
| 39 | } | 39 | } |
| 40 | 40 | ||
| 41 | /* | ||
| 42 | * Not implemented, yet. | ||
| 43 | */ | ||
| 44 | static inline int gpio_to_irq(unsigned int gpio) | 41 | static inline int gpio_to_irq(unsigned int gpio) |
| 45 | { | 42 | { |
| 46 | return -ENOSYS; | 43 | return __gpio_to_irq(gpio); |
| 47 | } | 44 | } |
| 48 | 45 | ||
| 49 | static inline int irq_to_gpio(unsigned int irq) | 46 | static inline int irq_to_gpio(unsigned int irq) |
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 3839839f83c7..b876e989220b 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
| @@ -642,10 +642,14 @@ static int emulate_spe(struct pt_regs *regs, unsigned int reg, | |||
| 642 | */ | 642 | */ |
| 643 | static int emulate_vsx(unsigned char __user *addr, unsigned int reg, | 643 | static int emulate_vsx(unsigned char __user *addr, unsigned int reg, |
| 644 | unsigned int areg, struct pt_regs *regs, | 644 | unsigned int areg, struct pt_regs *regs, |
| 645 | unsigned int flags, unsigned int length) | 645 | unsigned int flags, unsigned int length, |
| 646 | unsigned int elsize) | ||
| 646 | { | 647 | { |
| 647 | char *ptr; | 648 | char *ptr; |
| 649 | unsigned long *lptr; | ||
| 648 | int ret = 0; | 650 | int ret = 0; |
| 651 | int sw = 0; | ||
| 652 | int i, j; | ||
| 649 | 653 | ||
| 650 | flush_vsx_to_thread(current); | 654 | flush_vsx_to_thread(current); |
| 651 | 655 | ||
| @@ -654,19 +658,35 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg, | |||
| 654 | else | 658 | else |
| 655 | ptr = (char *) ¤t->thread.vr[reg - 32]; | 659 | ptr = (char *) ¤t->thread.vr[reg - 32]; |
| 656 | 660 | ||
| 657 | if (flags & ST) | 661 | lptr = (unsigned long *) ptr; |
| 658 | ret = __copy_to_user(addr, ptr, length); | 662 | |
| 659 | else { | 663 | if (flags & SW) |
| 660 | if (flags & SPLT){ | 664 | sw = elsize-1; |
| 661 | ret = __copy_from_user(ptr, addr, length); | 665 | |
| 662 | ptr += length; | 666 | for (j = 0; j < length; j += elsize) { |
| 667 | for (i = 0; i < elsize; ++i) { | ||
| 668 | if (flags & ST) | ||
| 669 | ret |= __put_user(ptr[i^sw], addr + i); | ||
| 670 | else | ||
| 671 | ret |= __get_user(ptr[i^sw], addr + i); | ||
| 663 | } | 672 | } |
| 664 | ret |= __copy_from_user(ptr, addr, length); | 673 | ptr += elsize; |
| 674 | addr += elsize; | ||
| 665 | } | 675 | } |
| 666 | if (flags & U) | 676 | |
| 667 | regs->gpr[areg] = regs->dar; | 677 | if (!ret) { |
| 668 | if (ret) | 678 | if (flags & U) |
| 679 | regs->gpr[areg] = regs->dar; | ||
| 680 | |||
| 681 | /* Splat load copies the same data to top and bottom 8 bytes */ | ||
| 682 | if (flags & SPLT) | ||
| 683 | lptr[1] = lptr[0]; | ||
| 684 | /* For 8 byte loads, zero the top 8 bytes */ | ||
| 685 | else if (!(flags & ST) && (8 == length)) | ||
| 686 | lptr[1] = 0; | ||
| 687 | } else | ||
| 669 | return -EFAULT; | 688 | return -EFAULT; |
| 689 | |||
| 670 | return 1; | 690 | return 1; |
| 671 | } | 691 | } |
| 672 | #endif | 692 | #endif |
| @@ -767,16 +787,25 @@ int fix_alignment(struct pt_regs *regs) | |||
| 767 | 787 | ||
| 768 | #ifdef CONFIG_VSX | 788 | #ifdef CONFIG_VSX |
| 769 | if ((instruction & 0xfc00003e) == 0x7c000018) { | 789 | if ((instruction & 0xfc00003e) == 0x7c000018) { |
| 770 | /* Additional register addressing bit (64 VSX vs 32 FPR/GPR */ | 790 | unsigned int elsize; |
| 791 | |||
| 792 | /* Additional register addressing bit (64 VSX vs 32 FPR/GPR) */ | ||
| 771 | reg |= (instruction & 0x1) << 5; | 793 | reg |= (instruction & 0x1) << 5; |
| 772 | /* Simple inline decoder instead of a table */ | 794 | /* Simple inline decoder instead of a table */ |
| 795 | /* VSX has only 8 and 16 byte memory accesses */ | ||
| 796 | nb = 8; | ||
| 773 | if (instruction & 0x200) | 797 | if (instruction & 0x200) |
| 774 | nb = 16; | 798 | nb = 16; |
| 775 | else if (instruction & 0x080) | 799 | |
| 776 | nb = 8; | 800 | /* Vector stores in little-endian mode swap individual |
| 777 | else | 801 | elements, so process them separately */ |
| 778 | nb = 4; | 802 | elsize = 4; |
| 803 | if (instruction & 0x80) | ||
| 804 | elsize = 8; | ||
| 805 | |||
| 779 | flags = 0; | 806 | flags = 0; |
| 807 | if (regs->msr & MSR_LE) | ||
| 808 | flags |= SW; | ||
| 780 | if (instruction & 0x100) | 809 | if (instruction & 0x100) |
| 781 | flags |= ST; | 810 | flags |= ST; |
| 782 | if (instruction & 0x040) | 811 | if (instruction & 0x040) |
| @@ -787,7 +816,7 @@ int fix_alignment(struct pt_regs *regs) | |||
| 787 | nb = 8; | 816 | nb = 8; |
| 788 | } | 817 | } |
| 789 | PPC_WARN_ALIGNMENT(vsx, regs); | 818 | PPC_WARN_ALIGNMENT(vsx, regs); |
| 790 | return emulate_vsx(addr, reg, areg, regs, flags, nb); | 819 | return emulate_vsx(addr, reg, areg, regs, flags, nb, elsize); |
| 791 | } | 820 | } |
| 792 | #endif | 821 | #endif |
| 793 | /* A size of 0 indicates an instruction we don't support, with | 822 | /* A size of 0 indicates an instruction we don't support, with |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 50f867d657df..3ecdcec0a39e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
| @@ -340,7 +340,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, | |||
| 340 | else | 340 | else |
| 341 | def->tlbiel = 0; | 341 | def->tlbiel = 0; |
| 342 | 342 | ||
| 343 | DBG(" %d: shift=%02x, sllp=%04x, avpnm=%08x, " | 343 | DBG(" %d: shift=%02x, sllp=%04lx, avpnm=%08lx, " |
| 344 | "tlbiel=%d, penc=%d\n", | 344 | "tlbiel=%d, penc=%d\n", |
| 345 | idx, shift, def->sllp, def->avpnm, def->tlbiel, | 345 | idx, shift, def->sllp, def->avpnm, def->tlbiel, |
| 346 | def->penc); | 346 | def->penc); |
| @@ -663,7 +663,7 @@ static void __init htab_initialize(void) | |||
| 663 | base = (unsigned long)__va(lmb.memory.region[i].base); | 663 | base = (unsigned long)__va(lmb.memory.region[i].base); |
| 664 | size = lmb.memory.region[i].size; | 664 | size = lmb.memory.region[i].size; |
| 665 | 665 | ||
| 666 | DBG("creating mapping for region: %lx..%lx (prot: %x)\n", | 666 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", |
| 667 | base, size, prot); | 667 | base, size, prot); |
| 668 | 668 | ||
| 669 | #ifdef CONFIG_U3_DART | 669 | #ifdef CONFIG_U3_DART |
| @@ -879,7 +879,7 @@ static inline int subpage_protection(struct mm_struct *mm, unsigned long ea) | |||
| 879 | */ | 879 | */ |
| 880 | int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | 880 | int hash_page(unsigned long ea, unsigned long access, unsigned long trap) |
| 881 | { | 881 | { |
| 882 | void *pgdir; | 882 | pgd_t *pgdir; |
| 883 | unsigned long vsid; | 883 | unsigned long vsid; |
| 884 | struct mm_struct *mm; | 884 | struct mm_struct *mm; |
| 885 | pte_t *ptep; | 885 | pte_t *ptep; |
| @@ -1025,7 +1025,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
| 1025 | else | 1025 | else |
| 1026 | #endif /* CONFIG_PPC_HAS_HASH_64K */ | 1026 | #endif /* CONFIG_PPC_HAS_HASH_64K */ |
| 1027 | { | 1027 | { |
| 1028 | int spp = subpage_protection(pgdir, ea); | 1028 | int spp = subpage_protection(mm, ea); |
| 1029 | if (access & spp) | 1029 | if (access & spp) |
| 1030 | rc = -2; | 1030 | rc = -2; |
| 1031 | else | 1031 | else |
| @@ -1115,7 +1115,7 @@ void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, | |||
| 1115 | { | 1115 | { |
| 1116 | unsigned long hash, index, shift, hidx, slot; | 1116 | unsigned long hash, index, shift, hidx, slot; |
| 1117 | 1117 | ||
| 1118 | DBG_LOW("flush_hash_page(va=%016x)\n", va); | 1118 | DBG_LOW("flush_hash_page(va=%016lx)\n", va); |
| 1119 | pte_iterate_hashed_subpages(pte, psize, va, index, shift) { | 1119 | pte_iterate_hashed_subpages(pte, psize, va, index, shift) { |
| 1120 | hash = hpt_hash(va, shift, ssize); | 1120 | hash = hpt_hash(va, shift, ssize); |
| 1121 | hidx = __rpte_to_hidx(pte, index); | 1121 | hidx = __rpte_to_hidx(pte, index); |
| @@ -1123,7 +1123,7 @@ void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, | |||
| 1123 | hash = ~hash; | 1123 | hash = ~hash; |
| 1124 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; | 1124 | slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; |
| 1125 | slot += hidx & _PTEIDX_GROUP_IX; | 1125 | slot += hidx & _PTEIDX_GROUP_IX; |
| 1126 | DBG_LOW(" sub %d: hash=%x, hidx=%x\n", index, slot, hidx); | 1126 | DBG_LOW(" sub %ld: hash=%lx, hidx=%lx\n", index, slot, hidx); |
| 1127 | ppc_md.hpte_invalidate(slot, va, psize, ssize, local); | 1127 | ppc_md.hpte_invalidate(slot, va, psize, ssize, local); |
| 1128 | } pte_iterate_hashed_end(); | 1128 | } pte_iterate_hashed_end(); |
| 1129 | } | 1129 | } |
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index be4f34c30a0b..1044a634b6d0 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c | |||
| @@ -353,7 +353,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, | |||
| 353 | read_lock(&tasklist_lock); | 353 | read_lock(&tasklist_lock); |
| 354 | for_each_process(p) { | 354 | for_each_process(p) { |
| 355 | if (p->mm) | 355 | if (p->mm) |
| 356 | cpu_mask_clear_cpu(cpu, mm_cpumask(p->mm)); | 356 | cpumask_clear_cpu(cpu, mm_cpumask(p->mm)); |
| 357 | } | 357 | } |
| 358 | read_unlock(&tasklist_lock); | 358 | read_unlock(&tasklist_lock); |
| 359 | break; | 359 | break; |
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 177e4038b43c..573b3bd1c45b 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
| @@ -382,7 +382,7 @@ static int __change_page_attr(struct page *page, pgprot_t prot) | |||
| 382 | return 0; | 382 | return 0; |
| 383 | if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) | 383 | if (!get_pteptr(&init_mm, address, &kpte, &kpmd)) |
| 384 | return -EINVAL; | 384 | return -EINVAL; |
| 385 | set_pte_at(&init_mm, address, kpte, mk_pte(page, prot)); | 385 | __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0); |
| 386 | wmb(); | 386 | wmb(); |
| 387 | #ifdef CONFIG_PPC_STD_MMU | 387 | #ifdef CONFIG_PPC_STD_MMU |
| 388 | flush_hash_pages(0, address, pmd_val(*kpmd), 1); | 388 | flush_hash_pages(0, address, pmd_val(*kpmd), 1); |
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index d306f07b9aa1..43805348b81e 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #define PMCCR1_NEXT_STATE 0x0C /* Next state for power management */ | 32 | #define PMCCR1_NEXT_STATE 0x0C /* Next state for power management */ |
| 33 | #define PMCCR1_NEXT_STATE_SHIFT 2 | 33 | #define PMCCR1_NEXT_STATE_SHIFT 2 |
| 34 | #define PMCCR1_CURR_STATE 0x03 /* Current state for power management*/ | 34 | #define PMCCR1_CURR_STATE 0x03 /* Current state for power management*/ |
| 35 | #define IMMR_SYSCR_OFFSET 0x100 | ||
| 35 | #define IMMR_RCW_OFFSET 0x900 | 36 | #define IMMR_RCW_OFFSET 0x900 |
| 36 | #define RCW_PCI_HOST 0x80000000 | 37 | #define RCW_PCI_HOST 0x80000000 |
| 37 | 38 | ||
| @@ -78,6 +79,22 @@ struct mpc83xx_clock { | |||
| 78 | u32 sccr; | 79 | u32 sccr; |
| 79 | }; | 80 | }; |
| 80 | 81 | ||
| 82 | struct mpc83xx_syscr { | ||
| 83 | __be32 sgprl; | ||
| 84 | __be32 sgprh; | ||
| 85 | __be32 spridr; | ||
| 86 | __be32 :32; | ||
| 87 | __be32 spcr; | ||
| 88 | __be32 sicrl; | ||
| 89 | __be32 sicrh; | ||
| 90 | }; | ||
| 91 | |||
| 92 | struct mpc83xx_saved { | ||
| 93 | u32 sicrl; | ||
| 94 | u32 sicrh; | ||
| 95 | u32 sccr; | ||
| 96 | }; | ||
| 97 | |||
| 81 | struct pmc_type { | 98 | struct pmc_type { |
| 82 | int has_deep_sleep; | 99 | int has_deep_sleep; |
| 83 | }; | 100 | }; |
| @@ -87,6 +104,8 @@ static int has_deep_sleep, deep_sleeping; | |||
| 87 | static int pmc_irq; | 104 | static int pmc_irq; |
| 88 | static struct mpc83xx_pmc __iomem *pmc_regs; | 105 | static struct mpc83xx_pmc __iomem *pmc_regs; |
| 89 | static struct mpc83xx_clock __iomem *clock_regs; | 106 | static struct mpc83xx_clock __iomem *clock_regs; |
| 107 | static struct mpc83xx_syscr __iomem *syscr_regs; | ||
| 108 | static struct mpc83xx_saved saved_regs; | ||
| 90 | static int is_pci_agent, wake_from_pci; | 109 | static int is_pci_agent, wake_from_pci; |
| 91 | static phys_addr_t immrbase; | 110 | static phys_addr_t immrbase; |
| 92 | static int pci_pm_state; | 111 | static int pci_pm_state; |
| @@ -137,6 +156,20 @@ static irqreturn_t pmc_irq_handler(int irq, void *dev_id) | |||
| 137 | return ret; | 156 | return ret; |
| 138 | } | 157 | } |
| 139 | 158 | ||
| 159 | static void mpc83xx_suspend_restore_regs(void) | ||
| 160 | { | ||
| 161 | out_be32(&syscr_regs->sicrl, saved_regs.sicrl); | ||
| 162 | out_be32(&syscr_regs->sicrh, saved_regs.sicrh); | ||
| 163 | out_be32(&clock_regs->sccr, saved_regs.sccr); | ||
| 164 | } | ||
| 165 | |||
| 166 | static void mpc83xx_suspend_save_regs(void) | ||
| 167 | { | ||
| 168 | saved_regs.sicrl = in_be32(&syscr_regs->sicrl); | ||
| 169 | saved_regs.sicrh = in_be32(&syscr_regs->sicrh); | ||
| 170 | saved_regs.sccr = in_be32(&clock_regs->sccr); | ||
| 171 | } | ||
| 172 | |||
| 140 | static int mpc83xx_suspend_enter(suspend_state_t state) | 173 | static int mpc83xx_suspend_enter(suspend_state_t state) |
| 141 | { | 174 | { |
| 142 | int ret = -EAGAIN; | 175 | int ret = -EAGAIN; |
| @@ -166,6 +199,8 @@ static int mpc83xx_suspend_enter(suspend_state_t state) | |||
| 166 | */ | 199 | */ |
| 167 | 200 | ||
| 168 | if (deep_sleeping) { | 201 | if (deep_sleeping) { |
| 202 | mpc83xx_suspend_save_regs(); | ||
| 203 | |||
| 169 | out_be32(&pmc_regs->mask, PMCER_ALL); | 204 | out_be32(&pmc_regs->mask, PMCER_ALL); |
| 170 | 205 | ||
| 171 | out_be32(&pmc_regs->config1, | 206 | out_be32(&pmc_regs->config1, |
| @@ -179,6 +214,8 @@ static int mpc83xx_suspend_enter(suspend_state_t state) | |||
| 179 | in_be32(&pmc_regs->config1) & ~PMCCR1_POWER_OFF); | 214 | in_be32(&pmc_regs->config1) & ~PMCCR1_POWER_OFF); |
| 180 | 215 | ||
| 181 | out_be32(&pmc_regs->mask, PMCER_PMCI); | 216 | out_be32(&pmc_regs->mask, PMCER_PMCI); |
| 217 | |||
| 218 | mpc83xx_suspend_restore_regs(); | ||
| 182 | } else { | 219 | } else { |
| 183 | out_be32(&pmc_regs->mask, PMCER_PMCI); | 220 | out_be32(&pmc_regs->mask, PMCER_PMCI); |
| 184 | 221 | ||
| @@ -194,7 +231,7 @@ out: | |||
| 194 | return ret; | 231 | return ret; |
| 195 | } | 232 | } |
| 196 | 233 | ||
| 197 | static void mpc83xx_suspend_finish(void) | 234 | static void mpc83xx_suspend_end(void) |
| 198 | { | 235 | { |
| 199 | deep_sleeping = 0; | 236 | deep_sleeping = 0; |
| 200 | } | 237 | } |
| @@ -278,7 +315,7 @@ static struct platform_suspend_ops mpc83xx_suspend_ops = { | |||
| 278 | .valid = mpc83xx_suspend_valid, | 315 | .valid = mpc83xx_suspend_valid, |
| 279 | .begin = mpc83xx_suspend_begin, | 316 | .begin = mpc83xx_suspend_begin, |
| 280 | .enter = mpc83xx_suspend_enter, | 317 | .enter = mpc83xx_suspend_enter, |
| 281 | .finish = mpc83xx_suspend_finish, | 318 | .end = mpc83xx_suspend_end, |
| 282 | }; | 319 | }; |
| 283 | 320 | ||
| 284 | static int pmc_probe(struct of_device *ofdev, | 321 | static int pmc_probe(struct of_device *ofdev, |
| @@ -333,12 +370,23 @@ static int pmc_probe(struct of_device *ofdev, | |||
| 333 | goto out_pmc; | 370 | goto out_pmc; |
| 334 | } | 371 | } |
| 335 | 372 | ||
| 373 | if (has_deep_sleep) { | ||
| 374 | syscr_regs = ioremap(immrbase + IMMR_SYSCR_OFFSET, | ||
| 375 | sizeof(*syscr_regs)); | ||
| 376 | if (!syscr_regs) { | ||
| 377 | ret = -ENOMEM; | ||
| 378 | goto out_syscr; | ||
| 379 | } | ||
| 380 | } | ||
| 381 | |||
| 336 | if (is_pci_agent) | 382 | if (is_pci_agent) |
| 337 | mpc83xx_set_agent(); | 383 | mpc83xx_set_agent(); |
| 338 | 384 | ||
| 339 | suspend_set_ops(&mpc83xx_suspend_ops); | 385 | suspend_set_ops(&mpc83xx_suspend_ops); |
| 340 | return 0; | 386 | return 0; |
| 341 | 387 | ||
| 388 | out_syscr: | ||
| 389 | iounmap(clock_regs); | ||
| 342 | out_pmc: | 390 | out_pmc: |
| 343 | iounmap(pmc_regs); | 391 | iounmap(pmc_regs); |
| 344 | out: | 392 | out: |
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index c5028a2e5a58..21f61b8c445b 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c | |||
| @@ -86,7 +86,7 @@ static int mpc8568_fixup_125_clock(struct phy_device *phydev) | |||
| 86 | scr = phy_read(phydev, MV88E1111_SCR); | 86 | scr = phy_read(phydev, MV88E1111_SCR); |
| 87 | 87 | ||
| 88 | if (scr < 0) | 88 | if (scr < 0) |
| 89 | return err; | 89 | return scr; |
| 90 | 90 | ||
| 91 | err = phy_write(phydev, MV88E1111_SCR, scr | 0x0008); | 91 | err = phy_write(phydev, MV88E1111_SCR, scr | 0x0008); |
| 92 | 92 | ||
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c index d5963285e3be..c278bd3a8fec 100644 --- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c | |||
| @@ -102,7 +102,7 @@ static int flipper_pic_map(struct irq_host *h, unsigned int virq, | |||
| 102 | irq_hw_number_t hwirq) | 102 | irq_hw_number_t hwirq) |
| 103 | { | 103 | { |
| 104 | set_irq_chip_data(virq, h->host_data); | 104 | set_irq_chip_data(virq, h->host_data); |
| 105 | get_irq_desc(virq)->status |= IRQ_LEVEL; | 105 | irq_to_desc(virq)->status |= IRQ_LEVEL; |
| 106 | set_irq_chip_and_handler(virq, &flipper_pic, handle_level_irq); | 106 | set_irq_chip_and_handler(virq, &flipper_pic, handle_level_irq); |
| 107 | return 0; | 107 | return 0; |
| 108 | } | 108 | } |
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index dd20bff33207..a771f91e215b 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c | |||
| @@ -95,7 +95,7 @@ static int hlwd_pic_map(struct irq_host *h, unsigned int virq, | |||
| 95 | irq_hw_number_t hwirq) | 95 | irq_hw_number_t hwirq) |
| 96 | { | 96 | { |
| 97 | set_irq_chip_data(virq, h->host_data); | 97 | set_irq_chip_data(virq, h->host_data); |
| 98 | get_irq_desc(virq)->status |= IRQ_LEVEL; | 98 | irq_to_desc(virq)->status |= IRQ_LEVEL; |
| 99 | set_irq_chip_and_handler(virq, &hlwd_pic, handle_level_irq); | 99 | set_irq_chip_and_handler(virq, &hlwd_pic, handle_level_irq); |
| 100 | return 0; | 100 | return 0; |
| 101 | } | 101 | } |
| @@ -132,9 +132,9 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq, | |||
| 132 | struct irq_host *irq_host = get_irq_data(cascade_virq); | 132 | struct irq_host *irq_host = get_irq_data(cascade_virq); |
| 133 | unsigned int virq; | 133 | unsigned int virq; |
| 134 | 134 | ||
| 135 | spin_lock(&desc->lock); | 135 | raw_spin_lock(&desc->lock); |
| 136 | desc->chip->mask(cascade_virq); /* IRQ_LEVEL */ | 136 | desc->chip->mask(cascade_virq); /* IRQ_LEVEL */ |
| 137 | spin_unlock(&desc->lock); | 137 | raw_spin_unlock(&desc->lock); |
| 138 | 138 | ||
| 139 | virq = __hlwd_pic_get_irq(irq_host); | 139 | virq = __hlwd_pic_get_irq(irq_host); |
| 140 | if (virq != NO_IRQ) | 140 | if (virq != NO_IRQ) |
| @@ -142,11 +142,11 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq, | |||
| 142 | else | 142 | else |
| 143 | pr_err("spurious interrupt!\n"); | 143 | pr_err("spurious interrupt!\n"); |
| 144 | 144 | ||
| 145 | spin_lock(&desc->lock); | 145 | raw_spin_lock(&desc->lock); |
| 146 | desc->chip->ack(cascade_virq); /* IRQ_LEVEL */ | 146 | desc->chip->ack(cascade_virq); /* IRQ_LEVEL */ |
| 147 | if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | 147 | if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) |
| 148 | desc->chip->unmask(cascade_virq); | 148 | desc->chip->unmask(cascade_virq); |
| 149 | spin_unlock(&desc->lock); | 149 | raw_spin_unlock(&desc->lock); |
| 150 | } | 150 | } |
| 151 | 151 | ||
| 152 | /* | 152 | /* |
diff --git a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c index edc956cc8b13..20a8ed91962e 100644 --- a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c +++ b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c | |||
| @@ -120,7 +120,7 @@ static void ug_putc(char ch) | |||
| 120 | 120 | ||
| 121 | while (!ug_is_txfifo_ready() && count--) | 121 | while (!ug_is_txfifo_ready() && count--) |
| 122 | barrier(); | 122 | barrier(); |
| 123 | if (count) | 123 | if (count >= 0) |
| 124 | ug_raw_putc(ch); | 124 | ug_raw_putc(ch); |
| 125 | } | 125 | } |
| 126 | 126 | ||
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index 0d9343df35bc..6617915bcb1a 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c | |||
| @@ -855,59 +855,58 @@ static int mf_get_boot_rtc(struct rtc_time *tm) | |||
| 855 | } | 855 | } |
| 856 | 856 | ||
| 857 | #ifdef CONFIG_PROC_FS | 857 | #ifdef CONFIG_PROC_FS |
| 858 | 858 | static int mf_cmdline_proc_show(struct seq_file *m, void *v) | |
| 859 | static int proc_mf_dump_cmdline(char *page, char **start, off_t off, | ||
| 860 | int count, int *eof, void *data) | ||
| 861 | { | 859 | { |
| 862 | int len; | 860 | char *page, *p; |
| 863 | char *p; | ||
| 864 | struct vsp_cmd_data vsp_cmd; | 861 | struct vsp_cmd_data vsp_cmd; |
| 865 | int rc; | 862 | int rc; |
| 866 | dma_addr_t dma_addr; | 863 | dma_addr_t dma_addr; |
| 867 | 864 | ||
| 868 | /* The HV appears to return no more than 256 bytes of command line */ | 865 | /* The HV appears to return no more than 256 bytes of command line */ |
| 869 | if (off >= 256) | 866 | page = kmalloc(256, GFP_KERNEL); |
| 870 | return 0; | 867 | if (!page) |
| 871 | if ((off + count) > 256) | 868 | return -ENOMEM; |
| 872 | count = 256 - off; | ||
| 873 | 869 | ||
| 874 | dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE); | 870 | dma_addr = iseries_hv_map(page, 256, DMA_FROM_DEVICE); |
| 875 | if (dma_addr == DMA_ERROR_CODE) | 871 | if (dma_addr == DMA_ERROR_CODE) { |
| 872 | kfree(page); | ||
| 876 | return -ENOMEM; | 873 | return -ENOMEM; |
| 877 | memset(page, 0, off + count); | 874 | } |
| 875 | memset(page, 0, 256); | ||
| 878 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | 876 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); |
| 879 | vsp_cmd.cmd = 33; | 877 | vsp_cmd.cmd = 33; |
| 880 | vsp_cmd.sub_data.kern.token = dma_addr; | 878 | vsp_cmd.sub_data.kern.token = dma_addr; |
| 881 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; | 879 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; |
| 882 | vsp_cmd.sub_data.kern.side = (u64)data; | 880 | vsp_cmd.sub_data.kern.side = (u64)m->private; |
| 883 | vsp_cmd.sub_data.kern.length = off + count; | 881 | vsp_cmd.sub_data.kern.length = 256; |
| 884 | mb(); | 882 | mb(); |
| 885 | rc = signal_vsp_instruction(&vsp_cmd); | 883 | rc = signal_vsp_instruction(&vsp_cmd); |
| 886 | iseries_hv_unmap(dma_addr, off + count, DMA_FROM_DEVICE); | 884 | iseries_hv_unmap(dma_addr, 256, DMA_FROM_DEVICE); |
| 887 | if (rc) | 885 | if (rc) { |
| 886 | kfree(page); | ||
| 888 | return rc; | 887 | return rc; |
| 889 | if (vsp_cmd.result_code != 0) | 888 | } |
| 889 | if (vsp_cmd.result_code != 0) { | ||
| 890 | kfree(page); | ||
| 890 | return -ENOMEM; | 891 | return -ENOMEM; |
| 892 | } | ||
| 891 | p = page; | 893 | p = page; |
| 892 | len = 0; | 894 | while (p - page < 256) { |
| 893 | while (len < (off + count)) { | 895 | if (*p == '\0' || *p == '\n') { |
| 894 | if ((*p == '\0') || (*p == '\n')) { | 896 | *p = '\n'; |
| 895 | if (*p == '\0') | ||
| 896 | *p = '\n'; | ||
| 897 | p++; | ||
| 898 | len++; | ||
| 899 | *eof = 1; | ||
| 900 | break; | 897 | break; |
| 901 | } | 898 | } |
| 902 | p++; | 899 | p++; |
| 903 | len++; | ||
| 904 | } | ||
| 905 | 900 | ||
| 906 | if (len < off) { | ||
| 907 | *eof = 1; | ||
| 908 | len = 0; | ||
| 909 | } | 901 | } |
| 910 | return len; | 902 | seq_write(m, page, p - page); |
| 903 | kfree(page); | ||
| 904 | return 0; | ||
| 905 | } | ||
| 906 | |||
| 907 | static int mf_cmdline_proc_open(struct inode *inode, struct file *file) | ||
| 908 | { | ||
| 909 | return single_open(file, mf_cmdline_proc_show, PDE(inode)->data); | ||
| 911 | } | 910 | } |
| 912 | 911 | ||
| 913 | #if 0 | 912 | #if 0 |
| @@ -962,10 +961,8 @@ static int proc_mf_dump_vmlinux(char *page, char **start, off_t off, | |||
| 962 | } | 961 | } |
| 963 | #endif | 962 | #endif |
| 964 | 963 | ||
| 965 | static int proc_mf_dump_side(char *page, char **start, off_t off, | 964 | static int mf_side_proc_show(struct seq_file *m, void *v) |
| 966 | int count, int *eof, void *data) | ||
| 967 | { | 965 | { |
| 968 | int len; | ||
| 969 | char mf_current_side = ' '; | 966 | char mf_current_side = ' '; |
| 970 | struct vsp_cmd_data vsp_cmd; | 967 | struct vsp_cmd_data vsp_cmd; |
| 971 | 968 | ||
| @@ -989,21 +986,17 @@ static int proc_mf_dump_side(char *page, char **start, off_t off, | |||
| 989 | } | 986 | } |
| 990 | } | 987 | } |
| 991 | 988 | ||
| 992 | len = sprintf(page, "%c\n", mf_current_side); | 989 | seq_printf(m, "%c\n", mf_current_side); |
| 990 | return 0; | ||
| 991 | } | ||
| 993 | 992 | ||
| 994 | if (len <= (off + count)) | 993 | static int mf_side_proc_open(struct inode *inode, struct file *file) |
| 995 | *eof = 1; | 994 | { |
| 996 | *start = page + off; | 995 | return single_open(file, mf_side_proc_show, NULL); |
| 997 | len -= off; | ||
| 998 | if (len > count) | ||
| 999 | len = count; | ||
| 1000 | if (len < 0) | ||
| 1001 | len = 0; | ||
| 1002 | return len; | ||
| 1003 | } | 996 | } |
| 1004 | 997 | ||
| 1005 | static int proc_mf_change_side(struct file *file, const char __user *buffer, | 998 | static ssize_t mf_side_proc_write(struct file *file, const char __user *buffer, |
| 1006 | unsigned long count, void *data) | 999 | size_t count, loff_t *pos) |
| 1007 | { | 1000 | { |
| 1008 | char side; | 1001 | char side; |
| 1009 | u64 newSide; | 1002 | u64 newSide; |
| @@ -1041,6 +1034,15 @@ static int proc_mf_change_side(struct file *file, const char __user *buffer, | |||
| 1041 | return count; | 1034 | return count; |
| 1042 | } | 1035 | } |
| 1043 | 1036 | ||
| 1037 | static const struct file_operations mf_side_proc_fops = { | ||
| 1038 | .owner = THIS_MODULE, | ||
| 1039 | .open = mf_side_proc_open, | ||
| 1040 | .read = seq_read, | ||
| 1041 | .llseek = seq_lseek, | ||
| 1042 | .release = single_release, | ||
| 1043 | .write = mf_side_proc_write, | ||
| 1044 | }; | ||
| 1045 | |||
| 1044 | #if 0 | 1046 | #if 0 |
| 1045 | static void mf_getSrcHistory(char *buffer, int size) | 1047 | static void mf_getSrcHistory(char *buffer, int size) |
| 1046 | { | 1048 | { |
| @@ -1087,8 +1089,7 @@ static void mf_getSrcHistory(char *buffer, int size) | |||
| 1087 | } | 1089 | } |
| 1088 | #endif | 1090 | #endif |
| 1089 | 1091 | ||
| 1090 | static int proc_mf_dump_src(char *page, char **start, off_t off, | 1092 | static int mf_src_proc_show(struct seq_file *m, void *v) |
| 1091 | int count, int *eof, void *data) | ||
| 1092 | { | 1093 | { |
| 1093 | #if 0 | 1094 | #if 0 |
| 1094 | int len; | 1095 | int len; |
| @@ -1109,8 +1110,13 @@ static int proc_mf_dump_src(char *page, char **start, off_t off, | |||
| 1109 | #endif | 1110 | #endif |
| 1110 | } | 1111 | } |
| 1111 | 1112 | ||
| 1112 | static int proc_mf_change_src(struct file *file, const char __user *buffer, | 1113 | static int mf_src_proc_open(struct inode *inode, struct file *file) |
| 1113 | unsigned long count, void *data) | 1114 | { |
| 1115 | return single_open(file, mf_src_proc_show, NULL); | ||
| 1116 | } | ||
| 1117 | |||
| 1118 | static ssize_t mf_src_proc_write(struct file *file, const char __user *buffer, | ||
| 1119 | size_t count, loff_t *pos) | ||
| 1114 | { | 1120 | { |
| 1115 | char stkbuf[10]; | 1121 | char stkbuf[10]; |
| 1116 | 1122 | ||
| @@ -1135,9 +1141,19 @@ static int proc_mf_change_src(struct file *file, const char __user *buffer, | |||
| 1135 | return count; | 1141 | return count; |
| 1136 | } | 1142 | } |
| 1137 | 1143 | ||
| 1138 | static int proc_mf_change_cmdline(struct file *file, const char __user *buffer, | 1144 | static const struct file_operations mf_src_proc_fops = { |
| 1139 | unsigned long count, void *data) | 1145 | .owner = THIS_MODULE, |
| 1146 | .open = mf_src_proc_open, | ||
| 1147 | .read = seq_read, | ||
| 1148 | .llseek = seq_lseek, | ||
| 1149 | .release = single_release, | ||
| 1150 | .write = mf_src_proc_write, | ||
| 1151 | }; | ||
| 1152 | |||
| 1153 | static ssize_t mf_cmdline_proc_write(struct file *file, const char __user *buffer, | ||
| 1154 | size_t count, loff_t *pos) | ||
| 1140 | { | 1155 | { |
| 1156 | void *data = PDE(file->f_path.dentry->d_inode)->data; | ||
| 1141 | struct vsp_cmd_data vsp_cmd; | 1157 | struct vsp_cmd_data vsp_cmd; |
| 1142 | dma_addr_t dma_addr; | 1158 | dma_addr_t dma_addr; |
| 1143 | char *page; | 1159 | char *page; |
| @@ -1172,6 +1188,15 @@ out: | |||
| 1172 | return ret; | 1188 | return ret; |
| 1173 | } | 1189 | } |
| 1174 | 1190 | ||
| 1191 | static const struct file_operations mf_cmdline_proc_fops = { | ||
| 1192 | .owner = THIS_MODULE, | ||
| 1193 | .open = mf_cmdline_proc_open, | ||
| 1194 | .read = seq_read, | ||
| 1195 | .llseek = seq_lseek, | ||
| 1196 | .release = single_release, | ||
| 1197 | .write = mf_cmdline_proc_write, | ||
| 1198 | }; | ||
| 1199 | |||
| 1175 | static ssize_t proc_mf_change_vmlinux(struct file *file, | 1200 | static ssize_t proc_mf_change_vmlinux(struct file *file, |
| 1176 | const char __user *buf, | 1201 | const char __user *buf, |
| 1177 | size_t count, loff_t *ppos) | 1202 | size_t count, loff_t *ppos) |
| @@ -1246,12 +1271,10 @@ static int __init mf_proc_init(void) | |||
| 1246 | if (!mf) | 1271 | if (!mf) |
| 1247 | return 1; | 1272 | return 1; |
| 1248 | 1273 | ||
| 1249 | ent = create_proc_entry("cmdline", S_IFREG|S_IRUSR|S_IWUSR, mf); | 1274 | ent = proc_create_data("cmdline", S_IRUSR|S_IWUSR, mf, |
| 1275 | &mf_cmdline_proc_fops, (void *)(long)i); | ||
| 1250 | if (!ent) | 1276 | if (!ent) |
| 1251 | return 1; | 1277 | return 1; |
| 1252 | ent->data = (void *)(long)i; | ||
| 1253 | ent->read_proc = proc_mf_dump_cmdline; | ||
| 1254 | ent->write_proc = proc_mf_change_cmdline; | ||
| 1255 | 1278 | ||
| 1256 | if (i == 3) /* no vmlinux entry for 'D' */ | 1279 | if (i == 3) /* no vmlinux entry for 'D' */ |
| 1257 | continue; | 1280 | continue; |
| @@ -1263,19 +1286,15 @@ static int __init mf_proc_init(void) | |||
| 1263 | return 1; | 1286 | return 1; |
| 1264 | } | 1287 | } |
| 1265 | 1288 | ||
| 1266 | ent = create_proc_entry("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root); | 1289 | ent = proc_create("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root, |
| 1290 | &mf_side_proc_fops); | ||
| 1267 | if (!ent) | 1291 | if (!ent) |
| 1268 | return 1; | 1292 | return 1; |
| 1269 | ent->data = (void *)0; | ||
| 1270 | ent->read_proc = proc_mf_dump_side; | ||
| 1271 | ent->write_proc = proc_mf_change_side; | ||
| 1272 | 1293 | ||
| 1273 | ent = create_proc_entry("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root); | 1294 | ent = proc_create("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root, |
| 1295 | &mf_src_proc_fops); | ||
| 1274 | if (!ent) | 1296 | if (!ent) |
| 1275 | return 1; | 1297 | return 1; |
| 1276 | ent->data = (void *)0; | ||
| 1277 | ent->read_proc = proc_mf_dump_src; | ||
| 1278 | ent->write_proc = proc_mf_change_src; | ||
| 1279 | 1298 | ||
| 1280 | return 0; | 1299 | return 0; |
| 1281 | } | 1300 | } |
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c index 49ff4dc422b7..5aea94f30836 100644 --- a/arch/powerpc/platforms/iseries/viopath.c +++ b/arch/powerpc/platforms/iseries/viopath.c | |||
| @@ -116,7 +116,7 @@ static int proc_viopath_show(struct seq_file *m, void *v) | |||
| 116 | u16 vlanMap; | 116 | u16 vlanMap; |
| 117 | dma_addr_t handle; | 117 | dma_addr_t handle; |
| 118 | HvLpEvent_Rc hvrc; | 118 | HvLpEvent_Rc hvrc; |
| 119 | DECLARE_COMPLETION(done); | 119 | DECLARE_COMPLETION_ONSTACK(done); |
| 120 | struct device_node *node; | 120 | struct device_node *node; |
| 121 | const char *sysid; | 121 | const char *sysid; |
| 122 | 122 | ||
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 27554c807fd5..c667f0f02c34 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
| @@ -2,6 +2,8 @@ config PPC_PSERIES | |||
| 2 | depends on PPC64 && PPC_BOOK3S | 2 | depends on PPC64 && PPC_BOOK3S |
| 3 | bool "IBM pSeries & new (POWER5-based) iSeries" | 3 | bool "IBM pSeries & new (POWER5-based) iSeries" |
| 4 | select MPIC | 4 | select MPIC |
| 5 | select PCI_MSI | ||
| 6 | select XICS | ||
| 5 | select PPC_I8259 | 7 | select PPC_I8259 |
| 6 | select PPC_RTAS | 8 | select PPC_RTAS |
| 7 | select PPC_RTAS_DAEMON | 9 | select PPC_RTAS_DAEMON |
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index bcdcf0ccc8d7..a277f2e28dbc 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c | |||
| @@ -38,19 +38,28 @@ | |||
| 38 | #include <asm/mmu.h> | 38 | #include <asm/mmu.h> |
| 39 | #include <asm/pgalloc.h> | 39 | #include <asm/pgalloc.h> |
| 40 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
| 41 | #include <linux/memory.h> | ||
| 41 | 42 | ||
| 42 | #include "plpar_wrappers.h" | 43 | #include "plpar_wrappers.h" |
| 43 | 44 | ||
| 44 | #define CMM_DRIVER_VERSION "1.0.0" | 45 | #define CMM_DRIVER_VERSION "1.0.0" |
| 45 | #define CMM_DEFAULT_DELAY 1 | 46 | #define CMM_DEFAULT_DELAY 1 |
| 47 | #define CMM_HOTPLUG_DELAY 5 | ||
| 46 | #define CMM_DEBUG 0 | 48 | #define CMM_DEBUG 0 |
| 47 | #define CMM_DISABLE 0 | 49 | #define CMM_DISABLE 0 |
| 48 | #define CMM_OOM_KB 1024 | 50 | #define CMM_OOM_KB 1024 |
| 49 | #define CMM_MIN_MEM_MB 256 | 51 | #define CMM_MIN_MEM_MB 256 |
| 50 | #define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10)) | 52 | #define KB2PAGES(_p) ((_p)>>(PAGE_SHIFT-10)) |
| 51 | #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) | 53 | #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) |
| 54 | /* | ||
| 55 | * The priority level tries to ensure that this notifier is called as | ||
| 56 | * late as possible to reduce thrashing in the shared memory pool. | ||
| 57 | */ | ||
| 58 | #define CMM_MEM_HOTPLUG_PRI 1 | ||
| 59 | #define CMM_MEM_ISOLATE_PRI 15 | ||
| 52 | 60 | ||
| 53 | static unsigned int delay = CMM_DEFAULT_DELAY; | 61 | static unsigned int delay = CMM_DEFAULT_DELAY; |
| 62 | static unsigned int hotplug_delay = CMM_HOTPLUG_DELAY; | ||
| 54 | static unsigned int oom_kb = CMM_OOM_KB; | 63 | static unsigned int oom_kb = CMM_OOM_KB; |
| 55 | static unsigned int cmm_debug = CMM_DEBUG; | 64 | static unsigned int cmm_debug = CMM_DEBUG; |
| 56 | static unsigned int cmm_disabled = CMM_DISABLE; | 65 | static unsigned int cmm_disabled = CMM_DISABLE; |
| @@ -65,6 +74,10 @@ MODULE_VERSION(CMM_DRIVER_VERSION); | |||
| 65 | module_param_named(delay, delay, uint, S_IRUGO | S_IWUSR); | 74 | module_param_named(delay, delay, uint, S_IRUGO | S_IWUSR); |
| 66 | MODULE_PARM_DESC(delay, "Delay (in seconds) between polls to query hypervisor paging requests. " | 75 | MODULE_PARM_DESC(delay, "Delay (in seconds) between polls to query hypervisor paging requests. " |
| 67 | "[Default=" __stringify(CMM_DEFAULT_DELAY) "]"); | 76 | "[Default=" __stringify(CMM_DEFAULT_DELAY) "]"); |
| 77 | module_param_named(hotplug_delay, hotplug_delay, uint, S_IRUGO | S_IWUSR); | ||
| 78 | MODULE_PARM_DESC(delay, "Delay (in seconds) after memory hotplug remove " | ||
| 79 | "before loaning resumes. " | ||
| 80 | "[Default=" __stringify(CMM_HOTPLUG_DELAY) "]"); | ||
| 68 | module_param_named(oom_kb, oom_kb, uint, S_IRUGO | S_IWUSR); | 81 | module_param_named(oom_kb, oom_kb, uint, S_IRUGO | S_IWUSR); |
| 69 | MODULE_PARM_DESC(oom_kb, "Amount of memory in kb to free on OOM. " | 82 | MODULE_PARM_DESC(oom_kb, "Amount of memory in kb to free on OOM. " |
| 70 | "[Default=" __stringify(CMM_OOM_KB) "]"); | 83 | "[Default=" __stringify(CMM_OOM_KB) "]"); |
| @@ -92,6 +105,9 @@ static unsigned long oom_freed_pages; | |||
| 92 | static struct cmm_page_array *cmm_page_list; | 105 | static struct cmm_page_array *cmm_page_list; |
| 93 | static DEFINE_SPINLOCK(cmm_lock); | 106 | static DEFINE_SPINLOCK(cmm_lock); |
| 94 | 107 | ||
| 108 | static DEFINE_MUTEX(hotplug_mutex); | ||
| 109 | static int hotplug_occurred; /* protected by the hotplug mutex */ | ||
| 110 | |||
| 95 | static struct task_struct *cmm_thread_ptr; | 111 | static struct task_struct *cmm_thread_ptr; |
| 96 | 112 | ||
| 97 | /** | 113 | /** |
| @@ -110,6 +126,17 @@ static long cmm_alloc_pages(long nr) | |||
| 110 | cmm_dbg("Begin request for %ld pages\n", nr); | 126 | cmm_dbg("Begin request for %ld pages\n", nr); |
| 111 | 127 | ||
| 112 | while (nr) { | 128 | while (nr) { |
| 129 | /* Exit if a hotplug operation is in progress or occurred */ | ||
| 130 | if (mutex_trylock(&hotplug_mutex)) { | ||
| 131 | if (hotplug_occurred) { | ||
| 132 | mutex_unlock(&hotplug_mutex); | ||
| 133 | break; | ||
| 134 | } | ||
| 135 | mutex_unlock(&hotplug_mutex); | ||
| 136 | } else { | ||
| 137 | break; | ||
| 138 | } | ||
| 139 | |||
| 113 | addr = __get_free_page(GFP_NOIO | __GFP_NOWARN | | 140 | addr = __get_free_page(GFP_NOIO | __GFP_NOWARN | |
| 114 | __GFP_NORETRY | __GFP_NOMEMALLOC); | 141 | __GFP_NORETRY | __GFP_NOMEMALLOC); |
| 115 | if (!addr) | 142 | if (!addr) |
| @@ -119,8 +146,9 @@ static long cmm_alloc_pages(long nr) | |||
| 119 | if (!pa || pa->index >= CMM_NR_PAGES) { | 146 | if (!pa || pa->index >= CMM_NR_PAGES) { |
| 120 | /* Need a new page for the page list. */ | 147 | /* Need a new page for the page list. */ |
| 121 | spin_unlock(&cmm_lock); | 148 | spin_unlock(&cmm_lock); |
| 122 | npa = (struct cmm_page_array *)__get_free_page(GFP_NOIO | __GFP_NOWARN | | 149 | npa = (struct cmm_page_array *)__get_free_page( |
| 123 | __GFP_NORETRY | __GFP_NOMEMALLOC); | 150 | GFP_NOIO | __GFP_NOWARN | |
| 151 | __GFP_NORETRY | __GFP_NOMEMALLOC); | ||
| 124 | if (!npa) { | 152 | if (!npa) { |
| 125 | pr_info("%s: Can not allocate new page list\n", __func__); | 153 | pr_info("%s: Can not allocate new page list\n", __func__); |
| 126 | free_page(addr); | 154 | free_page(addr); |
| @@ -282,9 +310,28 @@ static int cmm_thread(void *dummy) | |||
| 282 | while (1) { | 310 | while (1) { |
| 283 | timeleft = msleep_interruptible(delay * 1000); | 311 | timeleft = msleep_interruptible(delay * 1000); |
| 284 | 312 | ||
| 285 | if (kthread_should_stop() || timeleft) { | 313 | if (kthread_should_stop() || timeleft) |
| 286 | loaned_pages_target = loaned_pages; | ||
| 287 | break; | 314 | break; |
| 315 | |||
| 316 | if (mutex_trylock(&hotplug_mutex)) { | ||
| 317 | if (hotplug_occurred) { | ||
| 318 | hotplug_occurred = 0; | ||
| 319 | mutex_unlock(&hotplug_mutex); | ||
| 320 | cmm_dbg("Hotplug operation has occurred, " | ||
| 321 | "loaning activity suspended " | ||
| 322 | "for %d seconds.\n", | ||
| 323 | hotplug_delay); | ||
| 324 | timeleft = msleep_interruptible(hotplug_delay * | ||
| 325 | 1000); | ||
| 326 | if (kthread_should_stop() || timeleft) | ||
| 327 | break; | ||
| 328 | continue; | ||
| 329 | } | ||
| 330 | mutex_unlock(&hotplug_mutex); | ||
| 331 | } else { | ||
| 332 | cmm_dbg("Hotplug operation in progress, activity " | ||
| 333 | "suspended\n"); | ||
| 334 | continue; | ||
| 288 | } | 335 | } |
| 289 | 336 | ||
| 290 | cmm_get_mpp(); | 337 | cmm_get_mpp(); |
| @@ -414,6 +461,193 @@ static struct notifier_block cmm_reboot_nb = { | |||
| 414 | }; | 461 | }; |
| 415 | 462 | ||
| 416 | /** | 463 | /** |
| 464 | * cmm_count_pages - Count the number of pages loaned in a particular range. | ||
| 465 | * | ||
| 466 | * @arg: memory_isolate_notify structure with address range and count | ||
| 467 | * | ||
| 468 | * Return value: | ||
| 469 | * 0 on success | ||
| 470 | **/ | ||
| 471 | static unsigned long cmm_count_pages(void *arg) | ||
| 472 | { | ||
| 473 | struct memory_isolate_notify *marg = arg; | ||
| 474 | struct cmm_page_array *pa; | ||
| 475 | unsigned long start = (unsigned long)pfn_to_kaddr(marg->start_pfn); | ||
| 476 | unsigned long end = start + (marg->nr_pages << PAGE_SHIFT); | ||
| 477 | unsigned long idx; | ||
| 478 | |||
| 479 | spin_lock(&cmm_lock); | ||
| 480 | pa = cmm_page_list; | ||
| 481 | while (pa) { | ||
| 482 | if ((unsigned long)pa >= start && (unsigned long)pa < end) | ||
| 483 | marg->pages_found++; | ||
| 484 | for (idx = 0; idx < pa->index; idx++) | ||
| 485 | if (pa->page[idx] >= start && pa->page[idx] < end) | ||
| 486 | marg->pages_found++; | ||
| 487 | pa = pa->next; | ||
| 488 | } | ||
| 489 | spin_unlock(&cmm_lock); | ||
| 490 | return 0; | ||
| 491 | } | ||
| 492 | |||
| 493 | /** | ||
| 494 | * cmm_memory_isolate_cb - Handle memory isolation notifier calls | ||
| 495 | * @self: notifier block struct | ||
| 496 | * @action: action to take | ||
| 497 | * @arg: struct memory_isolate_notify data for handler | ||
| 498 | * | ||
| 499 | * Return value: | ||
| 500 | * NOTIFY_OK or notifier error based on subfunction return value | ||
| 501 | **/ | ||
| 502 | static int cmm_memory_isolate_cb(struct notifier_block *self, | ||
| 503 | unsigned long action, void *arg) | ||
| 504 | { | ||
| 505 | int ret = 0; | ||
| 506 | |||
| 507 | if (action == MEM_ISOLATE_COUNT) | ||
| 508 | ret = cmm_count_pages(arg); | ||
| 509 | |||
| 510 | if (ret) | ||
| 511 | ret = notifier_from_errno(ret); | ||
| 512 | else | ||
| 513 | ret = NOTIFY_OK; | ||
| 514 | |||
| 515 | return ret; | ||
| 516 | } | ||
| 517 | |||
| 518 | static struct notifier_block cmm_mem_isolate_nb = { | ||
| 519 | .notifier_call = cmm_memory_isolate_cb, | ||
| 520 | .priority = CMM_MEM_ISOLATE_PRI | ||
| 521 | }; | ||
| 522 | |||
| 523 | /** | ||
| 524 | * cmm_mem_going_offline - Unloan pages where memory is to be removed | ||
| 525 | * @arg: memory_notify structure with page range to be offlined | ||
| 526 | * | ||
| 527 | * Return value: | ||
| 528 | * 0 on success | ||
| 529 | **/ | ||
| 530 | static int cmm_mem_going_offline(void *arg) | ||
| 531 | { | ||
| 532 | struct memory_notify *marg = arg; | ||
| 533 | unsigned long start_page = (unsigned long)pfn_to_kaddr(marg->start_pfn); | ||
| 534 | unsigned long end_page = start_page + (marg->nr_pages << PAGE_SHIFT); | ||
| 535 | struct cmm_page_array *pa_curr, *pa_last, *npa; | ||
| 536 | unsigned long idx; | ||
| 537 | unsigned long freed = 0; | ||
| 538 | |||
| 539 | cmm_dbg("Memory going offline, searching 0x%lx (%ld pages).\n", | ||
| 540 | start_page, marg->nr_pages); | ||
| 541 | spin_lock(&cmm_lock); | ||
| 542 | |||
| 543 | /* Search the page list for pages in the range to be offlined */ | ||
| 544 | pa_last = pa_curr = cmm_page_list; | ||
| 545 | while (pa_curr) { | ||
| 546 | for (idx = (pa_curr->index - 1); (idx + 1) > 0; idx--) { | ||
| 547 | if ((pa_curr->page[idx] < start_page) || | ||
| 548 | (pa_curr->page[idx] >= end_page)) | ||
| 549 | continue; | ||
| 550 | |||
| 551 | plpar_page_set_active(__pa(pa_curr->page[idx])); | ||
| 552 | free_page(pa_curr->page[idx]); | ||
| 553 | freed++; | ||
| 554 | loaned_pages--; | ||
| 555 | totalram_pages++; | ||
| 556 | pa_curr->page[idx] = pa_last->page[--pa_last->index]; | ||
| 557 | if (pa_last->index == 0) { | ||
| 558 | if (pa_curr == pa_last) | ||
| 559 | pa_curr = pa_last->next; | ||
| 560 | pa_last = pa_last->next; | ||
| 561 | free_page((unsigned long)cmm_page_list); | ||
| 562 | cmm_page_list = pa_last; | ||
| 563 | continue; | ||
| 564 | } | ||
| 565 | } | ||
| 566 | pa_curr = pa_curr->next; | ||
| 567 | } | ||
| 568 | |||
| 569 | /* Search for page list structures in the range to be offlined */ | ||
| 570 | pa_last = NULL; | ||
| 571 | pa_curr = cmm_page_list; | ||
| 572 | while (pa_curr) { | ||
| 573 | if (((unsigned long)pa_curr >= start_page) && | ||
| 574 | ((unsigned long)pa_curr < end_page)) { | ||
| 575 | npa = (struct cmm_page_array *)__get_free_page( | ||
| 576 | GFP_NOIO | __GFP_NOWARN | | ||
| 577 | __GFP_NORETRY | __GFP_NOMEMALLOC); | ||
| 578 | if (!npa) { | ||
| 579 | spin_unlock(&cmm_lock); | ||
| 580 | cmm_dbg("Failed to allocate memory for list " | ||
| 581 | "management. Memory hotplug " | ||
| 582 | "failed.\n"); | ||
| 583 | return ENOMEM; | ||
| 584 | } | ||
| 585 | memcpy(npa, pa_curr, PAGE_SIZE); | ||
| 586 | if (pa_curr == cmm_page_list) | ||
| 587 | cmm_page_list = npa; | ||
| 588 | if (pa_last) | ||
| 589 | pa_last->next = npa; | ||
| 590 | free_page((unsigned long) pa_curr); | ||
| 591 | freed++; | ||
| 592 | pa_curr = npa; | ||
| 593 | } | ||
| 594 | |||
| 595 | pa_last = pa_curr; | ||
| 596 | pa_curr = pa_curr->next; | ||
| 597 | } | ||
| 598 | |||
| 599 | spin_unlock(&cmm_lock); | ||
| 600 | cmm_dbg("Released %ld pages in the search range.\n", freed); | ||
| 601 | |||
| 602 | return 0; | ||
| 603 | } | ||
| 604 | |||
| 605 | /** | ||
| 606 | * cmm_memory_cb - Handle memory hotplug notifier calls | ||
| 607 | * @self: notifier block struct | ||
| 608 | * @action: action to take | ||
| 609 | * @arg: struct memory_notify data for handler | ||
| 610 | * | ||
| 611 | * Return value: | ||
| 612 | * NOTIFY_OK or notifier error based on subfunction return value | ||
| 613 | * | ||
| 614 | **/ | ||
| 615 | static int cmm_memory_cb(struct notifier_block *self, | ||
| 616 | unsigned long action, void *arg) | ||
| 617 | { | ||
| 618 | int ret = 0; | ||
| 619 | |||
| 620 | switch (action) { | ||
| 621 | case MEM_GOING_OFFLINE: | ||
| 622 | mutex_lock(&hotplug_mutex); | ||
| 623 | hotplug_occurred = 1; | ||
| 624 | ret = cmm_mem_going_offline(arg); | ||
| 625 | break; | ||
| 626 | case MEM_OFFLINE: | ||
| 627 | case MEM_CANCEL_OFFLINE: | ||
| 628 | mutex_unlock(&hotplug_mutex); | ||
| 629 | cmm_dbg("Memory offline operation complete.\n"); | ||
| 630 | break; | ||
| 631 | case MEM_GOING_ONLINE: | ||
| 632 | case MEM_ONLINE: | ||
| 633 | case MEM_CANCEL_ONLINE: | ||
| 634 | break; | ||
| 635 | } | ||
| 636 | |||
| 637 | if (ret) | ||
| 638 | ret = notifier_from_errno(ret); | ||
| 639 | else | ||
| 640 | ret = NOTIFY_OK; | ||
| 641 | |||
| 642 | return ret; | ||
| 643 | } | ||
| 644 | |||
| 645 | static struct notifier_block cmm_mem_nb = { | ||
| 646 | .notifier_call = cmm_memory_cb, | ||
| 647 | .priority = CMM_MEM_HOTPLUG_PRI | ||
| 648 | }; | ||
| 649 | |||
| 650 | /** | ||
| 417 | * cmm_init - Module initialization | 651 | * cmm_init - Module initialization |
| 418 | * | 652 | * |
| 419 | * Return value: | 653 | * Return value: |
| @@ -435,18 +669,24 @@ static int cmm_init(void) | |||
| 435 | if ((rc = cmm_sysfs_register(&cmm_sysdev))) | 669 | if ((rc = cmm_sysfs_register(&cmm_sysdev))) |
| 436 | goto out_reboot_notifier; | 670 | goto out_reboot_notifier; |
| 437 | 671 | ||
| 672 | if (register_memory_notifier(&cmm_mem_nb) || | ||
| 673 | register_memory_isolate_notifier(&cmm_mem_isolate_nb)) | ||
| 674 | goto out_unregister_notifier; | ||
| 675 | |||
| 438 | if (cmm_disabled) | 676 | if (cmm_disabled) |
| 439 | return rc; | 677 | return rc; |
| 440 | 678 | ||
| 441 | cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); | 679 | cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread"); |
| 442 | if (IS_ERR(cmm_thread_ptr)) { | 680 | if (IS_ERR(cmm_thread_ptr)) { |
| 443 | rc = PTR_ERR(cmm_thread_ptr); | 681 | rc = PTR_ERR(cmm_thread_ptr); |
| 444 | goto out_unregister_sysfs; | 682 | goto out_unregister_notifier; |
| 445 | } | 683 | } |
| 446 | 684 | ||
| 447 | return rc; | 685 | return rc; |
| 448 | 686 | ||
| 449 | out_unregister_sysfs: | 687 | out_unregister_notifier: |
| 688 | unregister_memory_notifier(&cmm_mem_nb); | ||
| 689 | unregister_memory_isolate_notifier(&cmm_mem_isolate_nb); | ||
| 450 | cmm_unregister_sysfs(&cmm_sysdev); | 690 | cmm_unregister_sysfs(&cmm_sysdev); |
| 451 | out_reboot_notifier: | 691 | out_reboot_notifier: |
| 452 | unregister_reboot_notifier(&cmm_reboot_nb); | 692 | unregister_reboot_notifier(&cmm_reboot_nb); |
| @@ -467,6 +707,8 @@ static void cmm_exit(void) | |||
| 467 | kthread_stop(cmm_thread_ptr); | 707 | kthread_stop(cmm_thread_ptr); |
| 468 | unregister_oom_notifier(&cmm_oom_nb); | 708 | unregister_oom_notifier(&cmm_oom_nb); |
| 469 | unregister_reboot_notifier(&cmm_reboot_nb); | 709 | unregister_reboot_notifier(&cmm_reboot_nb); |
| 710 | unregister_memory_notifier(&cmm_mem_nb); | ||
| 711 | unregister_memory_isolate_notifier(&cmm_mem_isolate_nb); | ||
| 470 | cmm_free_pages(loaned_pages); | 712 | cmm_free_pages(loaned_pages); |
| 471 | cmm_unregister_sysfs(&cmm_sysdev); | 713 | cmm_unregister_sysfs(&cmm_sysdev); |
| 472 | } | 714 | } |
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 12df9e8812a9..67b7a10f9fce 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
| @@ -346,12 +346,14 @@ int dlpar_release_drc(u32 drc_index) | |||
| 346 | 346 | ||
| 347 | static DEFINE_MUTEX(pseries_cpu_hotplug_mutex); | 347 | static DEFINE_MUTEX(pseries_cpu_hotplug_mutex); |
| 348 | 348 | ||
| 349 | void cpu_hotplug_driver_lock() | 349 | void cpu_hotplug_driver_lock(void) |
| 350 | __acquires(pseries_cpu_hotplug_mutex) | ||
| 350 | { | 351 | { |
| 351 | mutex_lock(&pseries_cpu_hotplug_mutex); | 352 | mutex_lock(&pseries_cpu_hotplug_mutex); |
| 352 | } | 353 | } |
| 353 | 354 | ||
| 354 | void cpu_hotplug_driver_unlock() | 355 | void cpu_hotplug_driver_unlock(void) |
| 356 | __releases(pseries_cpu_hotplug_mutex) | ||
| 355 | { | 357 | { |
| 356 | mutex_unlock(&pseries_cpu_hotplug_mutex); | 358 | mutex_unlock(&pseries_cpu_hotplug_mutex); |
| 357 | } | 359 | } |
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 8868c012268a..b4886635972c 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
| @@ -144,8 +144,8 @@ static void __devinit smp_pSeries_kick_cpu(int nr) | |||
| 144 | hcpuid = get_hard_smp_processor_id(nr); | 144 | hcpuid = get_hard_smp_processor_id(nr); |
| 145 | rc = plpar_hcall_norets(H_PROD, hcpuid); | 145 | rc = plpar_hcall_norets(H_PROD, hcpuid); |
| 146 | if (rc != H_SUCCESS) | 146 | if (rc != H_SUCCESS) |
| 147 | panic("Error: Prod to wake up processor %d Ret= %ld\n", | 147 | printk(KERN_ERR "Error: Prod to wake up processor %d\ |
| 148 | nr, rc); | 148 | Ret= %ld\n", nr, rc); |
| 149 | } | 149 | } |
| 150 | } | 150 | } |
| 151 | 151 | ||
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index 971483f0dfac..1709ac5aac7c 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c | |||
| @@ -143,13 +143,23 @@ static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
| 143 | struct irq_desc *desc = irq_to_desc(virq); | 143 | struct irq_desc *desc = irq_to_desc(virq); |
| 144 | unsigned int vold, vnew, edibit; | 144 | unsigned int vold, vnew, edibit; |
| 145 | 145 | ||
| 146 | if (flow_type == IRQ_TYPE_NONE) | 146 | /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or |
| 147 | flow_type = IRQ_TYPE_LEVEL_LOW; | 147 | * IRQ_TYPE_EDGE_BOTH (default). All others are IRQ_TYPE_EDGE_FALLING |
| 148 | 148 | * or IRQ_TYPE_LEVEL_LOW (default) | |
| 149 | if (flow_type & IRQ_TYPE_EDGE_RISING) { | 149 | */ |
| 150 | printk(KERN_ERR "CPM2 PIC: sense type 0x%x not supported\n", | 150 | if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) { |
| 151 | flow_type); | 151 | if (flow_type == IRQ_TYPE_NONE) |
| 152 | return -EINVAL; | 152 | flow_type = IRQ_TYPE_EDGE_BOTH; |
| 153 | |||
| 154 | if (flow_type != IRQ_TYPE_EDGE_BOTH && | ||
| 155 | flow_type != IRQ_TYPE_EDGE_FALLING) | ||
| 156 | goto err_sense; | ||
| 157 | } else { | ||
| 158 | if (flow_type == IRQ_TYPE_NONE) | ||
| 159 | flow_type = IRQ_TYPE_LEVEL_LOW; | ||
| 160 | |||
| 161 | if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)) | ||
| 162 | goto err_sense; | ||
| 153 | } | 163 | } |
| 154 | 164 | ||
| 155 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | 165 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); |
| @@ -181,6 +191,10 @@ static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
| 181 | if (vold != vnew) | 191 | if (vold != vnew) |
| 182 | out_be32(&cpm2_intctl->ic_siexr, vnew); | 192 | out_be32(&cpm2_intctl->ic_siexr, vnew); |
| 183 | return 0; | 193 | return 0; |
| 194 | |||
| 195 | err_sense: | ||
| 196 | pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type); | ||
| 197 | return -EINVAL; | ||
| 184 | } | 198 | } |
| 185 | 199 | ||
| 186 | static struct irq_chip cpm2_pic = { | 200 | static struct irq_chip cpm2_pic = { |
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 4e3a3e345ab3..e1a028c1f18d 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c | |||
| @@ -464,8 +464,7 @@ static void __iomem *mpc83xx_pcie_remap_cfg(struct pci_bus *bus, | |||
| 464 | { | 464 | { |
| 465 | struct pci_controller *hose = pci_bus_to_host(bus); | 465 | struct pci_controller *hose = pci_bus_to_host(bus); |
| 466 | struct mpc83xx_pcie_priv *pcie = hose->dn->data; | 466 | struct mpc83xx_pcie_priv *pcie = hose->dn->data; |
| 467 | u8 bus_no = bus->number - hose->first_busno; | 467 | u32 dev_base = bus->number << 24 | devfn << 16; |
| 468 | u32 dev_base = bus_no << 24 | devfn << 16; | ||
| 469 | int ret; | 468 | int ret; |
| 470 | 469 | ||
| 471 | ret = mpc83xx_pcie_exclude_device(bus, devfn); | 470 | ret = mpc83xx_pcie_exclude_device(bus, devfn); |
| @@ -515,12 +514,17 @@ static int mpc83xx_pcie_read_config(struct pci_bus *bus, unsigned int devfn, | |||
| 515 | static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, | 514 | static int mpc83xx_pcie_write_config(struct pci_bus *bus, unsigned int devfn, |
| 516 | int offset, int len, u32 val) | 515 | int offset, int len, u32 val) |
| 517 | { | 516 | { |
| 517 | struct pci_controller *hose = pci_bus_to_host(bus); | ||
| 518 | void __iomem *cfg_addr; | 518 | void __iomem *cfg_addr; |
| 519 | 519 | ||
| 520 | cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset); | 520 | cfg_addr = mpc83xx_pcie_remap_cfg(bus, devfn, offset); |
| 521 | if (!cfg_addr) | 521 | if (!cfg_addr) |
| 522 | return PCIBIOS_DEVICE_NOT_FOUND; | 522 | return PCIBIOS_DEVICE_NOT_FOUND; |
| 523 | 523 | ||
| 524 | /* PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS */ | ||
| 525 | if (offset == PCI_PRIMARY_BUS && bus->number == hose->first_busno) | ||
| 526 | val &= 0xffffff00; | ||
| 527 | |||
| 524 | switch (len) { | 528 | switch (len) { |
| 525 | case 1: | 529 | case 1: |
| 526 | out_8(cfg_addr, val); | 530 | out_8(cfg_addr, val); |
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c index 103eace36194..ee1c0e1cf4a7 100644 --- a/arch/powerpc/sysdev/mpc8xxx_gpio.c +++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c | |||
| @@ -54,6 +54,22 @@ static void mpc8xxx_gpio_save_regs(struct of_mm_gpio_chip *mm) | |||
| 54 | mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT); | 54 | mpc8xxx_gc->data = in_be32(mm->regs + GPIO_DAT); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | /* Workaround GPIO 1 errata on MPC8572/MPC8536. The status of GPIOs | ||
| 58 | * defined as output cannot be determined by reading GPDAT register, | ||
| 59 | * so we use shadow data register instead. The status of input pins | ||
| 60 | * is determined by reading GPDAT register. | ||
| 61 | */ | ||
| 62 | static int mpc8572_gpio_get(struct gpio_chip *gc, unsigned int gpio) | ||
| 63 | { | ||
| 64 | u32 val; | ||
| 65 | struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); | ||
| 66 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = to_mpc8xxx_gpio_chip(mm); | ||
| 67 | |||
| 68 | val = in_be32(mm->regs + GPIO_DAT) & ~in_be32(mm->regs + GPIO_DIR); | ||
| 69 | |||
| 70 | return (val | mpc8xxx_gc->data) & mpc8xxx_gpio2mask(gpio); | ||
| 71 | } | ||
| 72 | |||
| 57 | static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio) | 73 | static int mpc8xxx_gpio_get(struct gpio_chip *gc, unsigned int gpio) |
| 58 | { | 74 | { |
| 59 | struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); | 75 | struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc); |
| @@ -136,7 +152,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np) | |||
| 136 | gc->ngpio = MPC8XXX_GPIO_PINS; | 152 | gc->ngpio = MPC8XXX_GPIO_PINS; |
| 137 | gc->direction_input = mpc8xxx_gpio_dir_in; | 153 | gc->direction_input = mpc8xxx_gpio_dir_in; |
| 138 | gc->direction_output = mpc8xxx_gpio_dir_out; | 154 | gc->direction_output = mpc8xxx_gpio_dir_out; |
| 139 | gc->get = mpc8xxx_gpio_get; | 155 | if (of_device_is_compatible(np, "fsl,mpc8572-gpio")) |
| 156 | gc->get = mpc8572_gpio_get; | ||
| 157 | else | ||
| 158 | gc->get = mpc8xxx_gpio_get; | ||
| 140 | gc->set = mpc8xxx_gpio_set; | 159 | gc->set = mpc8xxx_gpio_set; |
| 141 | 160 | ||
| 142 | ret = of_mm_gpiochip_add(np, mm_gc); | 161 | ret = of_mm_gpiochip_add(np, mm_gc); |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index aa9d06e5925b..470dc6c11d57 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
| @@ -567,13 +567,11 @@ static void __init mpic_scan_ht_pics(struct mpic *mpic) | |||
| 567 | #endif /* CONFIG_MPIC_U3_HT_IRQS */ | 567 | #endif /* CONFIG_MPIC_U3_HT_IRQS */ |
| 568 | 568 | ||
| 569 | #ifdef CONFIG_SMP | 569 | #ifdef CONFIG_SMP |
| 570 | static int irq_choose_cpu(unsigned int virt_irq) | 570 | static int irq_choose_cpu(const cpumask_t *mask) |
| 571 | { | 571 | { |
| 572 | cpumask_t mask; | ||
| 573 | int cpuid; | 572 | int cpuid; |
| 574 | 573 | ||
| 575 | cpumask_copy(&mask, irq_to_desc(virt_irq)->affinity); | 574 | if (cpumask_equal(mask, cpu_all_mask)) { |
| 576 | if (cpus_equal(mask, CPU_MASK_ALL)) { | ||
| 577 | static int irq_rover; | 575 | static int irq_rover; |
| 578 | static DEFINE_SPINLOCK(irq_rover_lock); | 576 | static DEFINE_SPINLOCK(irq_rover_lock); |
| 579 | unsigned long flags; | 577 | unsigned long flags; |
| @@ -594,20 +592,15 @@ static int irq_choose_cpu(unsigned int virt_irq) | |||
| 594 | 592 | ||
| 595 | spin_unlock_irqrestore(&irq_rover_lock, flags); | 593 | spin_unlock_irqrestore(&irq_rover_lock, flags); |
| 596 | } else { | 594 | } else { |
| 597 | cpumask_t tmp; | 595 | cpuid = cpumask_first_and(mask, cpu_online_mask); |
| 598 | 596 | if (cpuid >= nr_cpu_ids) | |
| 599 | cpus_and(tmp, cpu_online_map, mask); | ||
| 600 | |||
| 601 | if (cpus_empty(tmp)) | ||
| 602 | goto do_round_robin; | 597 | goto do_round_robin; |
| 603 | |||
| 604 | cpuid = first_cpu(tmp); | ||
| 605 | } | 598 | } |
| 606 | 599 | ||
| 607 | return get_hard_smp_processor_id(cpuid); | 600 | return get_hard_smp_processor_id(cpuid); |
| 608 | } | 601 | } |
| 609 | #else | 602 | #else |
| 610 | static int irq_choose_cpu(unsigned int virt_irq) | 603 | static int irq_choose_cpu(const cpumask_t *mask) |
| 611 | { | 604 | { |
| 612 | return hard_smp_processor_id(); | 605 | return hard_smp_processor_id(); |
| 613 | } | 606 | } |
| @@ -816,7 +809,7 @@ int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 816 | unsigned int src = mpic_irq_to_hw(irq); | 809 | unsigned int src = mpic_irq_to_hw(irq); |
| 817 | 810 | ||
| 818 | if (mpic->flags & MPIC_SINGLE_DEST_CPU) { | 811 | if (mpic->flags & MPIC_SINGLE_DEST_CPU) { |
| 819 | int cpuid = irq_choose_cpu(irq); | 812 | int cpuid = irq_choose_cpu(cpumask); |
| 820 | 813 | ||
| 821 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); | 814 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); |
| 822 | } else { | 815 | } else { |
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c index 1d44eee80fa1..0f67cd79d481 100644 --- a/arch/powerpc/sysdev/mpic_msi.c +++ b/arch/powerpc/sysdev/mpic_msi.c | |||
| @@ -39,7 +39,12 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) | |||
| 39 | 39 | ||
| 40 | pr_debug("mpic: found U3, guessing msi allocator setup\n"); | 40 | pr_debug("mpic: found U3, guessing msi allocator setup\n"); |
| 41 | 41 | ||
| 42 | /* Reserve source numbers we know are reserved in the HW */ | 42 | /* Reserve source numbers we know are reserved in the HW. |
| 43 | * | ||
| 44 | * This is a bit of a mix of U3 and U4 reserves but that's going | ||
| 45 | * to work fine, we have plenty enugh numbers left so let's just | ||
| 46 | * mark anything we don't like reserved. | ||
| 47 | */ | ||
| 43 | for (i = 0; i < 8; i++) | 48 | for (i = 0; i < 8; i++) |
| 44 | msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); | 49 | msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); |
| 45 | 50 | ||
| @@ -49,6 +54,10 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) | |||
| 49 | for (i = 100; i < 105; i++) | 54 | for (i = 100; i < 105; i++) |
| 50 | msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); | 55 | msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); |
| 51 | 56 | ||
| 57 | for (i = 124; i < mpic->irq_count; i++) | ||
| 58 | msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); | ||
| 59 | |||
| 60 | |||
| 52 | np = NULL; | 61 | np = NULL; |
| 53 | while ((np = of_find_all_nodes(np))) { | 62 | while ((np = of_find_all_nodes(np))) { |
| 54 | pr_debug("mpic: mapping hwirqs for %s\n", np->full_name); | 63 | pr_debug("mpic: mapping hwirqs for %s\n", np->full_name); |
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c index d3caf23e6312..bcbfe79c704b 100644 --- a/arch/powerpc/sysdev/mpic_u3msi.c +++ b/arch/powerpc/sysdev/mpic_u3msi.c | |||
| @@ -64,12 +64,12 @@ static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos) | |||
| 64 | return addr; | 64 | return addr; |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | static u64 find_ht_magic_addr(struct pci_dev *pdev) | 67 | static u64 find_ht_magic_addr(struct pci_dev *pdev, unsigned int hwirq) |
| 68 | { | 68 | { |
| 69 | struct pci_bus *bus; | 69 | struct pci_bus *bus; |
| 70 | unsigned int pos; | 70 | unsigned int pos; |
| 71 | 71 | ||
| 72 | for (bus = pdev->bus; bus; bus = bus->parent) { | 72 | for (bus = pdev->bus; bus && bus->self; bus = bus->parent) { |
| 73 | pos = pci_find_ht_capability(bus->self, HT_CAPTYPE_MSI_MAPPING); | 73 | pos = pci_find_ht_capability(bus->self, HT_CAPTYPE_MSI_MAPPING); |
| 74 | if (pos) | 74 | if (pos) |
| 75 | return read_ht_magic_addr(bus->self, pos); | 75 | return read_ht_magic_addr(bus->self, pos); |
| @@ -78,13 +78,41 @@ static u64 find_ht_magic_addr(struct pci_dev *pdev) | |||
| 78 | return 0; | 78 | return 0; |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | static u64 find_u4_magic_addr(struct pci_dev *pdev, unsigned int hwirq) | ||
| 82 | { | ||
| 83 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); | ||
| 84 | |||
| 85 | /* U4 PCIe MSIs need to write to the special register in | ||
| 86 | * the bridge that generates interrupts. There should be | ||
| 87 | * theorically a register at 0xf8005000 where you just write | ||
| 88 | * the MSI number and that triggers the right interrupt, but | ||
| 89 | * unfortunately, this is busted in HW, the bridge endian swaps | ||
| 90 | * the value and hits the wrong nibble in the register. | ||
| 91 | * | ||
| 92 | * So instead we use another register set which is used normally | ||
| 93 | * for converting HT interrupts to MPIC interrupts, which decodes | ||
| 94 | * the interrupt number as part of the low address bits | ||
| 95 | * | ||
| 96 | * This will not work if we ever use more than one legacy MSI in | ||
| 97 | * a block but we never do. For one MSI or multiple MSI-X where | ||
| 98 | * each interrupt address can be specified separately, it works | ||
| 99 | * just fine. | ||
| 100 | */ | ||
| 101 | if (of_device_is_compatible(hose->dn, "u4-pcie") || | ||
| 102 | of_device_is_compatible(hose->dn, "U4-pcie")) | ||
| 103 | return 0xf8004000 | (hwirq << 4); | ||
| 104 | |||
| 105 | return 0; | ||
| 106 | } | ||
| 107 | |||
| 81 | static int u3msi_msi_check_device(struct pci_dev *pdev, int nvec, int type) | 108 | static int u3msi_msi_check_device(struct pci_dev *pdev, int nvec, int type) |
| 82 | { | 109 | { |
| 83 | if (type == PCI_CAP_ID_MSIX) | 110 | if (type == PCI_CAP_ID_MSIX) |
| 84 | pr_debug("u3msi: MSI-X untested, trying anyway.\n"); | 111 | pr_debug("u3msi: MSI-X untested, trying anyway.\n"); |
| 85 | 112 | ||
| 86 | /* If we can't find a magic address then MSI ain't gonna work */ | 113 | /* If we can't find a magic address then MSI ain't gonna work */ |
| 87 | if (find_ht_magic_addr(pdev) == 0) { | 114 | if (find_ht_magic_addr(pdev, 0) == 0 && |
| 115 | find_u4_magic_addr(pdev, 0) == 0) { | ||
| 88 | pr_debug("u3msi: no magic address found for %s\n", | 116 | pr_debug("u3msi: no magic address found for %s\n", |
| 89 | pci_name(pdev)); | 117 | pci_name(pdev)); |
| 90 | return -ENXIO; | 118 | return -ENXIO; |
| @@ -118,10 +146,6 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
| 118 | u64 addr; | 146 | u64 addr; |
| 119 | int hwirq; | 147 | int hwirq; |
| 120 | 148 | ||
| 121 | addr = find_ht_magic_addr(pdev); | ||
| 122 | msg.address_lo = addr & 0xFFFFFFFF; | ||
| 123 | msg.address_hi = addr >> 32; | ||
| 124 | |||
| 125 | list_for_each_entry(entry, &pdev->msi_list, list) { | 149 | list_for_each_entry(entry, &pdev->msi_list, list) { |
| 126 | hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1); | 150 | hwirq = msi_bitmap_alloc_hwirqs(&msi_mpic->msi_bitmap, 1); |
| 127 | if (hwirq < 0) { | 151 | if (hwirq < 0) { |
| @@ -129,6 +153,12 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
| 129 | return hwirq; | 153 | return hwirq; |
| 130 | } | 154 | } |
| 131 | 155 | ||
| 156 | addr = find_ht_magic_addr(pdev, hwirq); | ||
| 157 | if (addr == 0) | ||
| 158 | addr = find_u4_magic_addr(pdev, hwirq); | ||
| 159 | msg.address_lo = addr & 0xFFFFFFFF; | ||
| 160 | msg.address_hi = addr >> 32; | ||
| 161 | |||
| 132 | virq = irq_create_mapping(msi_mpic->irqhost, hwirq); | 162 | virq = irq_create_mapping(msi_mpic->irqhost, hwirq); |
| 133 | if (virq == NO_IRQ) { | 163 | if (virq == NO_IRQ) { |
| 134 | pr_debug("u3msi: failed mapping hwirq 0x%x\n", hwirq); | 164 | pr_debug("u3msi: failed mapping hwirq 0x%x\n", hwirq); |
| @@ -143,6 +173,8 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
| 143 | pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", | 173 | pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", |
| 144 | virq, hwirq, (unsigned long)addr); | 174 | virq, hwirq, (unsigned long)addr); |
| 145 | 175 | ||
| 176 | printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", | ||
| 177 | virq, hwirq, (unsigned long)addr); | ||
| 146 | msg.data = hwirq; | 178 | msg.data = hwirq; |
| 147 | write_msi_msg(virq, &msg); | 179 | write_msi_msg(virq, &msg); |
| 148 | 180 | ||
diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c index c4b47a3e5446..02c81f12c702 100644 --- a/drivers/ata/pata_bf54x.c +++ b/drivers/ata/pata_bf54x.c | |||
| @@ -1557,6 +1557,25 @@ static unsigned short atapi_io_port[] = { | |||
| 1557 | P_ATAPI_DMARQ, | 1557 | P_ATAPI_DMARQ, |
| 1558 | P_ATAPI_INTRQ, | 1558 | P_ATAPI_INTRQ, |
| 1559 | P_ATAPI_IORDY, | 1559 | P_ATAPI_IORDY, |
| 1560 | P_ATAPI_D0A, | ||
| 1561 | P_ATAPI_D1A, | ||
| 1562 | P_ATAPI_D2A, | ||
| 1563 | P_ATAPI_D3A, | ||
| 1564 | P_ATAPI_D4A, | ||
| 1565 | P_ATAPI_D5A, | ||
| 1566 | P_ATAPI_D6A, | ||
| 1567 | P_ATAPI_D7A, | ||
| 1568 | P_ATAPI_D8A, | ||
| 1569 | P_ATAPI_D9A, | ||
| 1570 | P_ATAPI_D10A, | ||
| 1571 | P_ATAPI_D11A, | ||
| 1572 | P_ATAPI_D12A, | ||
| 1573 | P_ATAPI_D13A, | ||
| 1574 | P_ATAPI_D14A, | ||
| 1575 | P_ATAPI_D15A, | ||
| 1576 | P_ATAPI_A0A, | ||
| 1577 | P_ATAPI_A1A, | ||
| 1578 | P_ATAPI_A2A, | ||
| 1560 | 0 | 1579 | 0 |
| 1561 | }; | 1580 | }; |
| 1562 | 1581 | ||
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index c4c8f2e1dd15..d7d77d4a402c 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
| @@ -63,6 +63,20 @@ void unregister_memory_notifier(struct notifier_block *nb) | |||
| 63 | } | 63 | } |
| 64 | EXPORT_SYMBOL(unregister_memory_notifier); | 64 | EXPORT_SYMBOL(unregister_memory_notifier); |
| 65 | 65 | ||
| 66 | static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain); | ||
| 67 | |||
| 68 | int register_memory_isolate_notifier(struct notifier_block *nb) | ||
| 69 | { | ||
| 70 | return atomic_notifier_chain_register(&memory_isolate_chain, nb); | ||
| 71 | } | ||
| 72 | EXPORT_SYMBOL(register_memory_isolate_notifier); | ||
| 73 | |||
| 74 | void unregister_memory_isolate_notifier(struct notifier_block *nb) | ||
| 75 | { | ||
| 76 | atomic_notifier_chain_unregister(&memory_isolate_chain, nb); | ||
| 77 | } | ||
| 78 | EXPORT_SYMBOL(unregister_memory_isolate_notifier); | ||
| 79 | |||
| 66 | /* | 80 | /* |
| 67 | * register_memory - Setup a sysfs device for a memory block | 81 | * register_memory - Setup a sysfs device for a memory block |
| 68 | */ | 82 | */ |
| @@ -157,6 +171,11 @@ int memory_notify(unsigned long val, void *v) | |||
| 157 | return blocking_notifier_call_chain(&memory_chain, val, v); | 171 | return blocking_notifier_call_chain(&memory_chain, val, v); |
| 158 | } | 172 | } |
| 159 | 173 | ||
| 174 | int memory_isolate_notify(unsigned long val, void *v) | ||
| 175 | { | ||
| 176 | return atomic_notifier_call_chain(&memory_isolate_chain, val, v); | ||
| 177 | } | ||
| 178 | |||
| 160 | /* | 179 | /* |
| 161 | * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is | 180 | * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is |
| 162 | * OK to have direct references to sparsemem variables in here. | 181 | * OK to have direct references to sparsemem variables in here. |
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 1a216c114a0f..48adf80926a0 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -161,6 +161,32 @@ void device_pm_move_last(struct device *dev) | |||
| 161 | list_move_tail(&dev->power.entry, &dpm_list); | 161 | list_move_tail(&dev->power.entry, &dpm_list); |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | static ktime_t initcall_debug_start(struct device *dev) | ||
| 165 | { | ||
| 166 | ktime_t calltime = ktime_set(0, 0); | ||
| 167 | |||
| 168 | if (initcall_debug) { | ||
| 169 | pr_info("calling %s+ @ %i\n", | ||
| 170 | dev_name(dev), task_pid_nr(current)); | ||
| 171 | calltime = ktime_get(); | ||
| 172 | } | ||
| 173 | |||
| 174 | return calltime; | ||
| 175 | } | ||
| 176 | |||
| 177 | static void initcall_debug_report(struct device *dev, ktime_t calltime, | ||
| 178 | int error) | ||
| 179 | { | ||
| 180 | ktime_t delta, rettime; | ||
| 181 | |||
| 182 | if (initcall_debug) { | ||
| 183 | rettime = ktime_get(); | ||
| 184 | delta = ktime_sub(rettime, calltime); | ||
| 185 | pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), | ||
| 186 | error, (unsigned long long)ktime_to_ns(delta) >> 10); | ||
| 187 | } | ||
| 188 | } | ||
| 189 | |||
| 164 | /** | 190 | /** |
| 165 | * pm_op - Execute the PM operation appropriate for given PM event. | 191 | * pm_op - Execute the PM operation appropriate for given PM event. |
| 166 | * @dev: Device to handle. | 192 | * @dev: Device to handle. |
| @@ -172,13 +198,9 @@ static int pm_op(struct device *dev, | |||
| 172 | pm_message_t state) | 198 | pm_message_t state) |
| 173 | { | 199 | { |
| 174 | int error = 0; | 200 | int error = 0; |
| 175 | ktime_t calltime, delta, rettime; | 201 | ktime_t calltime; |
| 176 | 202 | ||
| 177 | if (initcall_debug) { | 203 | calltime = initcall_debug_start(dev); |
| 178 | pr_info("calling %s+ @ %i\n", | ||
| 179 | dev_name(dev), task_pid_nr(current)); | ||
| 180 | calltime = ktime_get(); | ||
| 181 | } | ||
| 182 | 204 | ||
| 183 | switch (state.event) { | 205 | switch (state.event) { |
| 184 | #ifdef CONFIG_SUSPEND | 206 | #ifdef CONFIG_SUSPEND |
| @@ -227,12 +249,7 @@ static int pm_op(struct device *dev, | |||
| 227 | error = -EINVAL; | 249 | error = -EINVAL; |
| 228 | } | 250 | } |
| 229 | 251 | ||
| 230 | if (initcall_debug) { | 252 | initcall_debug_report(dev, calltime, error); |
| 231 | rettime = ktime_get(); | ||
| 232 | delta = ktime_sub(rettime, calltime); | ||
| 233 | pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), | ||
| 234 | error, (unsigned long long)ktime_to_ns(delta) >> 10); | ||
| 235 | } | ||
| 236 | 253 | ||
| 237 | return error; | 254 | return error; |
| 238 | } | 255 | } |
| @@ -309,8 +326,9 @@ static int pm_noirq_op(struct device *dev, | |||
| 309 | if (initcall_debug) { | 326 | if (initcall_debug) { |
| 310 | rettime = ktime_get(); | 327 | rettime = ktime_get(); |
| 311 | delta = ktime_sub(rettime, calltime); | 328 | delta = ktime_sub(rettime, calltime); |
| 312 | printk("initcall %s_i+ returned %d after %Ld usecs\n", dev_name(dev), | 329 | printk("initcall %s_i+ returned %d after %Ld usecs\n", |
| 313 | error, (unsigned long long)ktime_to_ns(delta) >> 10); | 330 | dev_name(dev), error, |
| 331 | (unsigned long long)ktime_to_ns(delta) >> 10); | ||
| 314 | } | 332 | } |
| 315 | 333 | ||
| 316 | return error; | 334 | return error; |
| @@ -354,6 +372,23 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info, | |||
| 354 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); | 372 | kobject_name(&dev->kobj), pm_verb(state.event), info, error); |
| 355 | } | 373 | } |
| 356 | 374 | ||
| 375 | static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) | ||
| 376 | { | ||
| 377 | ktime_t calltime; | ||
| 378 | s64 usecs64; | ||
| 379 | int usecs; | ||
| 380 | |||
| 381 | calltime = ktime_get(); | ||
| 382 | usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); | ||
| 383 | do_div(usecs64, NSEC_PER_USEC); | ||
| 384 | usecs = usecs64; | ||
| 385 | if (usecs == 0) | ||
| 386 | usecs = 1; | ||
| 387 | pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", | ||
| 388 | info ?: "", info ? " " : "", pm_verb(state.event), | ||
| 389 | usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); | ||
| 390 | } | ||
| 391 | |||
| 357 | /*------------------------- Resume routines -------------------------*/ | 392 | /*------------------------- Resume routines -------------------------*/ |
| 358 | 393 | ||
| 359 | /** | 394 | /** |
| @@ -390,6 +425,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
| 390 | void dpm_resume_noirq(pm_message_t state) | 425 | void dpm_resume_noirq(pm_message_t state) |
| 391 | { | 426 | { |
| 392 | struct device *dev; | 427 | struct device *dev; |
| 428 | ktime_t starttime = ktime_get(); | ||
| 393 | 429 | ||
| 394 | mutex_lock(&dpm_list_mtx); | 430 | mutex_lock(&dpm_list_mtx); |
| 395 | transition_started = false; | 431 | transition_started = false; |
| @@ -403,11 +439,32 @@ void dpm_resume_noirq(pm_message_t state) | |||
| 403 | pm_dev_err(dev, state, " early", error); | 439 | pm_dev_err(dev, state, " early", error); |
| 404 | } | 440 | } |
| 405 | mutex_unlock(&dpm_list_mtx); | 441 | mutex_unlock(&dpm_list_mtx); |
| 442 | dpm_show_time(starttime, state, "early"); | ||
| 406 | resume_device_irqs(); | 443 | resume_device_irqs(); |
| 407 | } | 444 | } |
| 408 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); | 445 | EXPORT_SYMBOL_GPL(dpm_resume_noirq); |
| 409 | 446 | ||
| 410 | /** | 447 | /** |
| 448 | * legacy_resume - Execute a legacy (bus or class) resume callback for device. | ||
| 449 | * dev: Device to resume. | ||
| 450 | * cb: Resume callback to execute. | ||
| 451 | */ | ||
| 452 | static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) | ||
| 453 | { | ||
| 454 | int error; | ||
| 455 | ktime_t calltime; | ||
| 456 | |||
| 457 | calltime = initcall_debug_start(dev); | ||
| 458 | |||
| 459 | error = cb(dev); | ||
| 460 | suspend_report_result(cb, error); | ||
| 461 | |||
| 462 | initcall_debug_report(dev, calltime, error); | ||
| 463 | |||
| 464 | return error; | ||
| 465 | } | ||
| 466 | |||
| 467 | /** | ||
| 411 | * device_resume - Execute "resume" callbacks for given device. | 468 | * device_resume - Execute "resume" callbacks for given device. |
| 412 | * @dev: Device to handle. | 469 | * @dev: Device to handle. |
| 413 | * @state: PM transition of the system being carried out. | 470 | * @state: PM transition of the system being carried out. |
| @@ -427,7 +484,7 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
| 427 | error = pm_op(dev, dev->bus->pm, state); | 484 | error = pm_op(dev, dev->bus->pm, state); |
| 428 | } else if (dev->bus->resume) { | 485 | } else if (dev->bus->resume) { |
| 429 | pm_dev_dbg(dev, state, "legacy "); | 486 | pm_dev_dbg(dev, state, "legacy "); |
| 430 | error = dev->bus->resume(dev); | 487 | error = legacy_resume(dev, dev->bus->resume); |
| 431 | } | 488 | } |
| 432 | if (error) | 489 | if (error) |
| 433 | goto End; | 490 | goto End; |
| @@ -448,7 +505,7 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
| 448 | error = pm_op(dev, dev->class->pm, state); | 505 | error = pm_op(dev, dev->class->pm, state); |
| 449 | } else if (dev->class->resume) { | 506 | } else if (dev->class->resume) { |
| 450 | pm_dev_dbg(dev, state, "legacy class "); | 507 | pm_dev_dbg(dev, state, "legacy class "); |
| 451 | error = dev->class->resume(dev); | 508 | error = legacy_resume(dev, dev->class->resume); |
| 452 | } | 509 | } |
| 453 | } | 510 | } |
| 454 | End: | 511 | End: |
| @@ -468,6 +525,7 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
| 468 | static void dpm_resume(pm_message_t state) | 525 | static void dpm_resume(pm_message_t state) |
| 469 | { | 526 | { |
| 470 | struct list_head list; | 527 | struct list_head list; |
| 528 | ktime_t starttime = ktime_get(); | ||
| 471 | 529 | ||
| 472 | INIT_LIST_HEAD(&list); | 530 | INIT_LIST_HEAD(&list); |
| 473 | mutex_lock(&dpm_list_mtx); | 531 | mutex_lock(&dpm_list_mtx); |
| @@ -496,6 +554,7 @@ static void dpm_resume(pm_message_t state) | |||
| 496 | } | 554 | } |
| 497 | list_splice(&list, &dpm_list); | 555 | list_splice(&list, &dpm_list); |
| 498 | mutex_unlock(&dpm_list_mtx); | 556 | mutex_unlock(&dpm_list_mtx); |
| 557 | dpm_show_time(starttime, state, NULL); | ||
| 499 | } | 558 | } |
| 500 | 559 | ||
| 501 | /** | 560 | /** |
| @@ -548,7 +607,7 @@ static void dpm_complete(pm_message_t state) | |||
| 548 | mutex_unlock(&dpm_list_mtx); | 607 | mutex_unlock(&dpm_list_mtx); |
| 549 | 608 | ||
| 550 | device_complete(dev, state); | 609 | device_complete(dev, state); |
| 551 | pm_runtime_put_noidle(dev); | 610 | pm_runtime_put_sync(dev); |
| 552 | 611 | ||
| 553 | mutex_lock(&dpm_list_mtx); | 612 | mutex_lock(&dpm_list_mtx); |
| 554 | } | 613 | } |
| @@ -628,6 +687,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) | |||
| 628 | int dpm_suspend_noirq(pm_message_t state) | 687 | int dpm_suspend_noirq(pm_message_t state) |
| 629 | { | 688 | { |
| 630 | struct device *dev; | 689 | struct device *dev; |
| 690 | ktime_t starttime = ktime_get(); | ||
| 631 | int error = 0; | 691 | int error = 0; |
| 632 | 692 | ||
| 633 | suspend_device_irqs(); | 693 | suspend_device_irqs(); |
| @@ -643,11 +703,34 @@ int dpm_suspend_noirq(pm_message_t state) | |||
| 643 | mutex_unlock(&dpm_list_mtx); | 703 | mutex_unlock(&dpm_list_mtx); |
| 644 | if (error) | 704 | if (error) |
| 645 | dpm_resume_noirq(resume_event(state)); | 705 | dpm_resume_noirq(resume_event(state)); |
| 706 | else | ||
| 707 | dpm_show_time(starttime, state, "late"); | ||
| 646 | return error; | 708 | return error; |
| 647 | } | 709 | } |
| 648 | EXPORT_SYMBOL_GPL(dpm_suspend_noirq); | 710 | EXPORT_SYMBOL_GPL(dpm_suspend_noirq); |
| 649 | 711 | ||
| 650 | /** | 712 | /** |
| 713 | * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. | ||
| 714 | * dev: Device to suspend. | ||
| 715 | * cb: Suspend callback to execute. | ||
| 716 | */ | ||
| 717 | static int legacy_suspend(struct device *dev, pm_message_t state, | ||
| 718 | int (*cb)(struct device *dev, pm_message_t state)) | ||
| 719 | { | ||
| 720 | int error; | ||
| 721 | ktime_t calltime; | ||
| 722 | |||
| 723 | calltime = initcall_debug_start(dev); | ||
| 724 | |||
| 725 | error = cb(dev, state); | ||
| 726 | suspend_report_result(cb, error); | ||
| 727 | |||
| 728 | initcall_debug_report(dev, calltime, error); | ||
| 729 | |||
| 730 | return error; | ||
| 731 | } | ||
| 732 | |||
| 733 | /** | ||
| 651 | * device_suspend - Execute "suspend" callbacks for given device. | 734 | * device_suspend - Execute "suspend" callbacks for given device. |
| 652 | * @dev: Device to handle. | 735 | * @dev: Device to handle. |
| 653 | * @state: PM transition of the system being carried out. | 736 | * @state: PM transition of the system being carried out. |
| @@ -664,8 +747,7 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
| 664 | error = pm_op(dev, dev->class->pm, state); | 747 | error = pm_op(dev, dev->class->pm, state); |
| 665 | } else if (dev->class->suspend) { | 748 | } else if (dev->class->suspend) { |
| 666 | pm_dev_dbg(dev, state, "legacy class "); | 749 | pm_dev_dbg(dev, state, "legacy class "); |
| 667 | error = dev->class->suspend(dev, state); | 750 | error = legacy_suspend(dev, state, dev->class->suspend); |
| 668 | suspend_report_result(dev->class->suspend, error); | ||
| 669 | } | 751 | } |
| 670 | if (error) | 752 | if (error) |
| 671 | goto End; | 753 | goto End; |
| @@ -686,8 +768,7 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
| 686 | error = pm_op(dev, dev->bus->pm, state); | 768 | error = pm_op(dev, dev->bus->pm, state); |
| 687 | } else if (dev->bus->suspend) { | 769 | } else if (dev->bus->suspend) { |
| 688 | pm_dev_dbg(dev, state, "legacy "); | 770 | pm_dev_dbg(dev, state, "legacy "); |
| 689 | error = dev->bus->suspend(dev, state); | 771 | error = legacy_suspend(dev, state, dev->bus->suspend); |
| 690 | suspend_report_result(dev->bus->suspend, error); | ||
| 691 | } | 772 | } |
| 692 | } | 773 | } |
| 693 | End: | 774 | End: |
| @@ -703,6 +784,7 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
| 703 | static int dpm_suspend(pm_message_t state) | 784 | static int dpm_suspend(pm_message_t state) |
| 704 | { | 785 | { |
| 705 | struct list_head list; | 786 | struct list_head list; |
| 787 | ktime_t starttime = ktime_get(); | ||
| 706 | int error = 0; | 788 | int error = 0; |
| 707 | 789 | ||
| 708 | INIT_LIST_HEAD(&list); | 790 | INIT_LIST_HEAD(&list); |
| @@ -728,6 +810,8 @@ static int dpm_suspend(pm_message_t state) | |||
| 728 | } | 810 | } |
| 729 | list_splice(&list, dpm_list.prev); | 811 | list_splice(&list, dpm_list.prev); |
| 730 | mutex_unlock(&dpm_list_mtx); | 812 | mutex_unlock(&dpm_list_mtx); |
| 813 | if (!error) | ||
| 814 | dpm_show_time(starttime, state, NULL); | ||
| 731 | return error; | 815 | return error; |
| 732 | } | 816 | } |
| 733 | 817 | ||
| @@ -796,7 +880,7 @@ static int dpm_prepare(pm_message_t state) | |||
| 796 | pm_runtime_get_noresume(dev); | 880 | pm_runtime_get_noresume(dev); |
| 797 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { | 881 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { |
| 798 | /* Wake-up requested during system sleep transition. */ | 882 | /* Wake-up requested during system sleep transition. */ |
| 799 | pm_runtime_put_noidle(dev); | 883 | pm_runtime_put_sync(dev); |
| 800 | error = -EBUSY; | 884 | error = -EBUSY; |
| 801 | } else { | 885 | } else { |
| 802 | error = device_prepare(dev, state); | 886 | error = device_prepare(dev, state); |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 40d7720a4b21..f8b044e8aef7 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
| @@ -85,6 +85,19 @@ static int __pm_runtime_idle(struct device *dev) | |||
| 85 | dev->bus->pm->runtime_idle(dev); | 85 | dev->bus->pm->runtime_idle(dev); |
| 86 | 86 | ||
| 87 | spin_lock_irq(&dev->power.lock); | 87 | spin_lock_irq(&dev->power.lock); |
| 88 | } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { | ||
| 89 | spin_unlock_irq(&dev->power.lock); | ||
| 90 | |||
| 91 | dev->type->pm->runtime_idle(dev); | ||
| 92 | |||
| 93 | spin_lock_irq(&dev->power.lock); | ||
| 94 | } else if (dev->class && dev->class->pm | ||
| 95 | && dev->class->pm->runtime_idle) { | ||
| 96 | spin_unlock_irq(&dev->power.lock); | ||
| 97 | |||
| 98 | dev->class->pm->runtime_idle(dev); | ||
| 99 | |||
| 100 | spin_lock_irq(&dev->power.lock); | ||
| 88 | } | 101 | } |
| 89 | 102 | ||
| 90 | dev->power.idle_notification = false; | 103 | dev->power.idle_notification = false; |
| @@ -194,6 +207,22 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) | |||
| 194 | 207 | ||
| 195 | spin_lock_irq(&dev->power.lock); | 208 | spin_lock_irq(&dev->power.lock); |
| 196 | dev->power.runtime_error = retval; | 209 | dev->power.runtime_error = retval; |
| 210 | } else if (dev->type && dev->type->pm | ||
| 211 | && dev->type->pm->runtime_suspend) { | ||
| 212 | spin_unlock_irq(&dev->power.lock); | ||
| 213 | |||
| 214 | retval = dev->type->pm->runtime_suspend(dev); | ||
| 215 | |||
| 216 | spin_lock_irq(&dev->power.lock); | ||
| 217 | dev->power.runtime_error = retval; | ||
| 218 | } else if (dev->class && dev->class->pm | ||
| 219 | && dev->class->pm->runtime_suspend) { | ||
| 220 | spin_unlock_irq(&dev->power.lock); | ||
| 221 | |||
| 222 | retval = dev->class->pm->runtime_suspend(dev); | ||
| 223 | |||
| 224 | spin_lock_irq(&dev->power.lock); | ||
| 225 | dev->power.runtime_error = retval; | ||
| 197 | } else { | 226 | } else { |
| 198 | retval = -ENOSYS; | 227 | retval = -ENOSYS; |
| 199 | } | 228 | } |
| @@ -359,6 +388,22 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) | |||
| 359 | 388 | ||
| 360 | spin_lock_irq(&dev->power.lock); | 389 | spin_lock_irq(&dev->power.lock); |
| 361 | dev->power.runtime_error = retval; | 390 | dev->power.runtime_error = retval; |
| 391 | } else if (dev->type && dev->type->pm | ||
| 392 | && dev->type->pm->runtime_resume) { | ||
| 393 | spin_unlock_irq(&dev->power.lock); | ||
| 394 | |||
| 395 | retval = dev->type->pm->runtime_resume(dev); | ||
| 396 | |||
| 397 | spin_lock_irq(&dev->power.lock); | ||
| 398 | dev->power.runtime_error = retval; | ||
| 399 | } else if (dev->class && dev->class->pm | ||
| 400 | && dev->class->pm->runtime_resume) { | ||
| 401 | spin_unlock_irq(&dev->power.lock); | ||
| 402 | |||
| 403 | retval = dev->class->pm->runtime_resume(dev); | ||
| 404 | |||
| 405 | spin_lock_irq(&dev->power.lock); | ||
| 406 | dev->power.runtime_error = retval; | ||
| 362 | } else { | 407 | } else { |
| 363 | retval = -ENOSYS; | 408 | retval = -ENOSYS; |
| 364 | } | 409 | } |
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c index d3400b20444f..7d73cd430340 100644 --- a/drivers/char/nozomi.c +++ b/drivers/char/nozomi.c | |||
| @@ -358,7 +358,7 @@ struct port { | |||
| 358 | u8 update_flow_control; | 358 | u8 update_flow_control; |
| 359 | struct ctrl_ul ctrl_ul; | 359 | struct ctrl_ul ctrl_ul; |
| 360 | struct ctrl_dl ctrl_dl; | 360 | struct ctrl_dl ctrl_dl; |
| 361 | struct kfifo *fifo_ul; | 361 | struct kfifo fifo_ul; |
| 362 | void __iomem *dl_addr[2]; | 362 | void __iomem *dl_addr[2]; |
| 363 | u32 dl_size[2]; | 363 | u32 dl_size[2]; |
| 364 | u8 toggle_dl; | 364 | u8 toggle_dl; |
| @@ -685,8 +685,6 @@ static int nozomi_read_config_table(struct nozomi *dc) | |||
| 685 | dump_table(dc); | 685 | dump_table(dc); |
| 686 | 686 | ||
| 687 | for (i = PORT_MDM; i < MAX_PORT; i++) { | 687 | for (i = PORT_MDM; i < MAX_PORT; i++) { |
| 688 | dc->port[i].fifo_ul = | ||
| 689 | kfifo_alloc(FIFO_BUFFER_SIZE_UL, GFP_ATOMIC, NULL); | ||
| 690 | memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl)); | 688 | memset(&dc->port[i].ctrl_dl, 0, sizeof(struct ctrl_dl)); |
| 691 | memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul)); | 689 | memset(&dc->port[i].ctrl_ul, 0, sizeof(struct ctrl_ul)); |
| 692 | } | 690 | } |
| @@ -798,7 +796,7 @@ static int send_data(enum port_type index, struct nozomi *dc) | |||
| 798 | struct tty_struct *tty = tty_port_tty_get(&port->port); | 796 | struct tty_struct *tty = tty_port_tty_get(&port->port); |
| 799 | 797 | ||
| 800 | /* Get data from tty and place in buf for now */ | 798 | /* Get data from tty and place in buf for now */ |
| 801 | size = __kfifo_get(port->fifo_ul, dc->send_buf, | 799 | size = kfifo_out(&port->fifo_ul, dc->send_buf, |
| 802 | ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX); | 800 | ul_size < SEND_BUF_MAX ? ul_size : SEND_BUF_MAX); |
| 803 | 801 | ||
| 804 | if (size == 0) { | 802 | if (size == 0) { |
| @@ -988,11 +986,11 @@ static int receive_flow_control(struct nozomi *dc) | |||
| 988 | 986 | ||
| 989 | } else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) { | 987 | } else if (old_ctrl.CTS == 0 && ctrl_dl.CTS == 1) { |
| 990 | 988 | ||
| 991 | if (__kfifo_len(dc->port[port].fifo_ul)) { | 989 | if (kfifo_len(&dc->port[port].fifo_ul)) { |
| 992 | DBG1("Enable interrupt (0x%04X) on port: %d", | 990 | DBG1("Enable interrupt (0x%04X) on port: %d", |
| 993 | enable_ier, port); | 991 | enable_ier, port); |
| 994 | DBG1("Data in buffer [%d], enable transmit! ", | 992 | DBG1("Data in buffer [%d], enable transmit! ", |
| 995 | __kfifo_len(dc->port[port].fifo_ul)); | 993 | kfifo_len(&dc->port[port].fifo_ul)); |
| 996 | enable_transmit_ul(port, dc); | 994 | enable_transmit_ul(port, dc); |
| 997 | } else { | 995 | } else { |
| 998 | DBG1("No data in buffer..."); | 996 | DBG1("No data in buffer..."); |
| @@ -1433,6 +1431,16 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev, | |||
| 1433 | goto err_free_sbuf; | 1431 | goto err_free_sbuf; |
| 1434 | } | 1432 | } |
| 1435 | 1433 | ||
| 1434 | for (i = PORT_MDM; i < MAX_PORT; i++) { | ||
| 1435 | if (kfifo_alloc(&dc->port[i].fifo_ul, | ||
| 1436 | FIFO_BUFFER_SIZE_UL, GFP_ATOMIC)) { | ||
| 1437 | dev_err(&pdev->dev, | ||
| 1438 | "Could not allocate kfifo buffer\n"); | ||
| 1439 | ret = -ENOMEM; | ||
| 1440 | goto err_free_kfifo; | ||
| 1441 | } | ||
| 1442 | } | ||
| 1443 | |||
| 1436 | spin_lock_init(&dc->spin_mutex); | 1444 | spin_lock_init(&dc->spin_mutex); |
| 1437 | 1445 | ||
| 1438 | nozomi_setup_private_data(dc); | 1446 | nozomi_setup_private_data(dc); |
| @@ -1445,7 +1453,7 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev, | |||
| 1445 | NOZOMI_NAME, dc); | 1453 | NOZOMI_NAME, dc); |
| 1446 | if (unlikely(ret)) { | 1454 | if (unlikely(ret)) { |
| 1447 | dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq); | 1455 | dev_err(&pdev->dev, "can't request irq %d\n", pdev->irq); |
| 1448 | goto err_free_sbuf; | 1456 | goto err_free_kfifo; |
| 1449 | } | 1457 | } |
| 1450 | 1458 | ||
| 1451 | DBG1("base_addr: %p", dc->base_addr); | 1459 | DBG1("base_addr: %p", dc->base_addr); |
| @@ -1464,13 +1472,28 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev, | |||
| 1464 | dc->state = NOZOMI_STATE_ENABLED; | 1472 | dc->state = NOZOMI_STATE_ENABLED; |
| 1465 | 1473 | ||
| 1466 | for (i = 0; i < MAX_PORT; i++) { | 1474 | for (i = 0; i < MAX_PORT; i++) { |
| 1475 | struct device *tty_dev; | ||
| 1476 | |||
| 1467 | mutex_init(&dc->port[i].tty_sem); | 1477 | mutex_init(&dc->port[i].tty_sem); |
| 1468 | tty_port_init(&dc->port[i].port); | 1478 | tty_port_init(&dc->port[i].port); |
| 1469 | tty_register_device(ntty_driver, dc->index_start + i, | 1479 | tty_dev = tty_register_device(ntty_driver, dc->index_start + i, |
| 1470 | &pdev->dev); | 1480 | &pdev->dev); |
| 1481 | |||
| 1482 | if (IS_ERR(tty_dev)) { | ||
| 1483 | ret = PTR_ERR(tty_dev); | ||
| 1484 | dev_err(&pdev->dev, "Could not allocate tty?\n"); | ||
| 1485 | goto err_free_tty; | ||
| 1486 | } | ||
| 1471 | } | 1487 | } |
| 1488 | |||
| 1472 | return 0; | 1489 | return 0; |
| 1473 | 1490 | ||
| 1491 | err_free_tty: | ||
| 1492 | for (i = dc->index_start; i < dc->index_start + MAX_PORT; ++i) | ||
| 1493 | tty_unregister_device(ntty_driver, i); | ||
| 1494 | err_free_kfifo: | ||
| 1495 | for (i = 0; i < MAX_PORT; i++) | ||
| 1496 | kfifo_free(&dc->port[i].fifo_ul); | ||
| 1474 | err_free_sbuf: | 1497 | err_free_sbuf: |
| 1475 | kfree(dc->send_buf); | 1498 | kfree(dc->send_buf); |
| 1476 | iounmap(dc->base_addr); | 1499 | iounmap(dc->base_addr); |
| @@ -1536,8 +1559,7 @@ static void __devexit nozomi_card_exit(struct pci_dev *pdev) | |||
| 1536 | free_irq(pdev->irq, dc); | 1559 | free_irq(pdev->irq, dc); |
| 1537 | 1560 | ||
| 1538 | for (i = 0; i < MAX_PORT; i++) | 1561 | for (i = 0; i < MAX_PORT; i++) |
| 1539 | if (dc->port[i].fifo_ul) | 1562 | kfifo_free(&dc->port[i].fifo_ul); |
| 1540 | kfifo_free(dc->port[i].fifo_ul); | ||
| 1541 | 1563 | ||
| 1542 | kfree(dc->send_buf); | 1564 | kfree(dc->send_buf); |
| 1543 | 1565 | ||
| @@ -1673,7 +1695,7 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer, | |||
| 1673 | goto exit; | 1695 | goto exit; |
| 1674 | } | 1696 | } |
| 1675 | 1697 | ||
| 1676 | rval = __kfifo_put(port->fifo_ul, (unsigned char *)buffer, count); | 1698 | rval = kfifo_in(&port->fifo_ul, (unsigned char *)buffer, count); |
| 1677 | 1699 | ||
| 1678 | /* notify card */ | 1700 | /* notify card */ |
| 1679 | if (unlikely(dc == NULL)) { | 1701 | if (unlikely(dc == NULL)) { |
| @@ -1721,7 +1743,7 @@ static int ntty_write_room(struct tty_struct *tty) | |||
| 1721 | if (!port->port.count) | 1743 | if (!port->port.count) |
| 1722 | goto exit; | 1744 | goto exit; |
| 1723 | 1745 | ||
| 1724 | room = port->fifo_ul->size - __kfifo_len(port->fifo_ul); | 1746 | room = port->fifo_ul.size - kfifo_len(&port->fifo_ul); |
| 1725 | 1747 | ||
| 1726 | exit: | 1748 | exit: |
| 1727 | mutex_unlock(&port->tty_sem); | 1749 | mutex_unlock(&port->tty_sem); |
| @@ -1878,7 +1900,7 @@ static s32 ntty_chars_in_buffer(struct tty_struct *tty) | |||
| 1878 | goto exit_in_buffer; | 1900 | goto exit_in_buffer; |
| 1879 | } | 1901 | } |
| 1880 | 1902 | ||
| 1881 | rval = __kfifo_len(port->fifo_ul); | 1903 | rval = kfifo_len(&port->fifo_ul); |
| 1882 | 1904 | ||
| 1883 | exit_in_buffer: | 1905 | exit_in_buffer: |
| 1884 | return rval; | 1906 | return rval; |
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index 8c262aaf7c26..0798754a607c 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c | |||
| @@ -487,7 +487,7 @@ static struct sonypi_device { | |||
| 487 | int camera_power; | 487 | int camera_power; |
| 488 | int bluetooth_power; | 488 | int bluetooth_power; |
| 489 | struct mutex lock; | 489 | struct mutex lock; |
| 490 | struct kfifo *fifo; | 490 | struct kfifo fifo; |
| 491 | spinlock_t fifo_lock; | 491 | spinlock_t fifo_lock; |
| 492 | wait_queue_head_t fifo_proc_list; | 492 | wait_queue_head_t fifo_proc_list; |
| 493 | struct fasync_struct *fifo_async; | 493 | struct fasync_struct *fifo_async; |
| @@ -496,7 +496,7 @@ static struct sonypi_device { | |||
| 496 | struct input_dev *input_jog_dev; | 496 | struct input_dev *input_jog_dev; |
| 497 | struct input_dev *input_key_dev; | 497 | struct input_dev *input_key_dev; |
| 498 | struct work_struct input_work; | 498 | struct work_struct input_work; |
| 499 | struct kfifo *input_fifo; | 499 | struct kfifo input_fifo; |
| 500 | spinlock_t input_fifo_lock; | 500 | spinlock_t input_fifo_lock; |
| 501 | } sonypi_device; | 501 | } sonypi_device; |
| 502 | 502 | ||
| @@ -777,8 +777,9 @@ static void input_keyrelease(struct work_struct *work) | |||
| 777 | { | 777 | { |
| 778 | struct sonypi_keypress kp; | 778 | struct sonypi_keypress kp; |
| 779 | 779 | ||
| 780 | while (kfifo_get(sonypi_device.input_fifo, (unsigned char *)&kp, | 780 | while (kfifo_out_locked(&sonypi_device.input_fifo, (unsigned char *)&kp, |
| 781 | sizeof(kp)) == sizeof(kp)) { | 781 | sizeof(kp), &sonypi_device.input_fifo_lock) |
| 782 | == sizeof(kp)) { | ||
| 782 | msleep(10); | 783 | msleep(10); |
| 783 | input_report_key(kp.dev, kp.key, 0); | 784 | input_report_key(kp.dev, kp.key, 0); |
| 784 | input_sync(kp.dev); | 785 | input_sync(kp.dev); |
| @@ -827,8 +828,9 @@ static void sonypi_report_input_event(u8 event) | |||
| 827 | if (kp.dev) { | 828 | if (kp.dev) { |
| 828 | input_report_key(kp.dev, kp.key, 1); | 829 | input_report_key(kp.dev, kp.key, 1); |
| 829 | input_sync(kp.dev); | 830 | input_sync(kp.dev); |
| 830 | kfifo_put(sonypi_device.input_fifo, | 831 | kfifo_in_locked(&sonypi_device.input_fifo, |
| 831 | (unsigned char *)&kp, sizeof(kp)); | 832 | (unsigned char *)&kp, sizeof(kp), |
| 833 | &sonypi_device.input_fifo_lock); | ||
| 832 | schedule_work(&sonypi_device.input_work); | 834 | schedule_work(&sonypi_device.input_work); |
| 833 | } | 835 | } |
| 834 | } | 836 | } |
| @@ -880,7 +882,8 @@ found: | |||
| 880 | acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event); | 882 | acpi_bus_generate_proc_event(sonypi_acpi_device, 1, event); |
| 881 | #endif | 883 | #endif |
| 882 | 884 | ||
| 883 | kfifo_put(sonypi_device.fifo, (unsigned char *)&event, sizeof(event)); | 885 | kfifo_in_locked(&sonypi_device.fifo, (unsigned char *)&event, |
| 886 | sizeof(event), &sonypi_device.fifo_lock); | ||
| 884 | kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN); | 887 | kill_fasync(&sonypi_device.fifo_async, SIGIO, POLL_IN); |
| 885 | wake_up_interruptible(&sonypi_device.fifo_proc_list); | 888 | wake_up_interruptible(&sonypi_device.fifo_proc_list); |
| 886 | 889 | ||
| @@ -906,7 +909,7 @@ static int sonypi_misc_open(struct inode *inode, struct file *file) | |||
| 906 | mutex_lock(&sonypi_device.lock); | 909 | mutex_lock(&sonypi_device.lock); |
| 907 | /* Flush input queue on first open */ | 910 | /* Flush input queue on first open */ |
| 908 | if (!sonypi_device.open_count) | 911 | if (!sonypi_device.open_count) |
| 909 | kfifo_reset(sonypi_device.fifo); | 912 | kfifo_reset(&sonypi_device.fifo); |
| 910 | sonypi_device.open_count++; | 913 | sonypi_device.open_count++; |
| 911 | mutex_unlock(&sonypi_device.lock); | 914 | mutex_unlock(&sonypi_device.lock); |
| 912 | unlock_kernel(); | 915 | unlock_kernel(); |
| @@ -919,17 +922,18 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf, | |||
| 919 | ssize_t ret; | 922 | ssize_t ret; |
| 920 | unsigned char c; | 923 | unsigned char c; |
| 921 | 924 | ||
| 922 | if ((kfifo_len(sonypi_device.fifo) == 0) && | 925 | if ((kfifo_len(&sonypi_device.fifo) == 0) && |
| 923 | (file->f_flags & O_NONBLOCK)) | 926 | (file->f_flags & O_NONBLOCK)) |
| 924 | return -EAGAIN; | 927 | return -EAGAIN; |
| 925 | 928 | ||
| 926 | ret = wait_event_interruptible(sonypi_device.fifo_proc_list, | 929 | ret = wait_event_interruptible(sonypi_device.fifo_proc_list, |
| 927 | kfifo_len(sonypi_device.fifo) != 0); | 930 | kfifo_len(&sonypi_device.fifo) != 0); |
| 928 | if (ret) | 931 | if (ret) |
| 929 | return ret; | 932 | return ret; |
| 930 | 933 | ||
| 931 | while (ret < count && | 934 | while (ret < count && |
| 932 | (kfifo_get(sonypi_device.fifo, &c, sizeof(c)) == sizeof(c))) { | 935 | (kfifo_out_locked(&sonypi_device.fifo, &c, sizeof(c), |
| 936 | &sonypi_device.fifo_lock) == sizeof(c))) { | ||
| 933 | if (put_user(c, buf++)) | 937 | if (put_user(c, buf++)) |
| 934 | return -EFAULT; | 938 | return -EFAULT; |
| 935 | ret++; | 939 | ret++; |
| @@ -946,7 +950,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf, | |||
| 946 | static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) | 950 | static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) |
| 947 | { | 951 | { |
| 948 | poll_wait(file, &sonypi_device.fifo_proc_list, wait); | 952 | poll_wait(file, &sonypi_device.fifo_proc_list, wait); |
| 949 | if (kfifo_len(sonypi_device.fifo)) | 953 | if (kfifo_len(&sonypi_device.fifo)) |
| 950 | return POLLIN | POLLRDNORM; | 954 | return POLLIN | POLLRDNORM; |
| 951 | return 0; | 955 | return 0; |
| 952 | } | 956 | } |
| @@ -1313,11 +1317,10 @@ static int __devinit sonypi_probe(struct platform_device *dev) | |||
| 1313 | "http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n"); | 1317 | "http://www.linux.it/~malattia/wiki/index.php/Sony_drivers\n"); |
| 1314 | 1318 | ||
| 1315 | spin_lock_init(&sonypi_device.fifo_lock); | 1319 | spin_lock_init(&sonypi_device.fifo_lock); |
| 1316 | sonypi_device.fifo = kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL, | 1320 | error = kfifo_alloc(&sonypi_device.fifo, SONYPI_BUF_SIZE, GFP_KERNEL); |
| 1317 | &sonypi_device.fifo_lock); | 1321 | if (error) { |
| 1318 | if (IS_ERR(sonypi_device.fifo)) { | ||
| 1319 | printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); | 1322 | printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); |
| 1320 | return PTR_ERR(sonypi_device.fifo); | 1323 | return error; |
| 1321 | } | 1324 | } |
| 1322 | 1325 | ||
| 1323 | init_waitqueue_head(&sonypi_device.fifo_proc_list); | 1326 | init_waitqueue_head(&sonypi_device.fifo_proc_list); |
| @@ -1393,12 +1396,10 @@ static int __devinit sonypi_probe(struct platform_device *dev) | |||
| 1393 | } | 1396 | } |
| 1394 | 1397 | ||
| 1395 | spin_lock_init(&sonypi_device.input_fifo_lock); | 1398 | spin_lock_init(&sonypi_device.input_fifo_lock); |
| 1396 | sonypi_device.input_fifo = | 1399 | error = kfifo_alloc(&sonypi_device.input_fifo, SONYPI_BUF_SIZE, |
| 1397 | kfifo_alloc(SONYPI_BUF_SIZE, GFP_KERNEL, | 1400 | GFP_KERNEL); |
| 1398 | &sonypi_device.input_fifo_lock); | 1401 | if (error) { |
| 1399 | if (IS_ERR(sonypi_device.input_fifo)) { | ||
| 1400 | printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); | 1402 | printk(KERN_ERR "sonypi: kfifo_alloc failed\n"); |
| 1401 | error = PTR_ERR(sonypi_device.input_fifo); | ||
| 1402 | goto err_inpdev_unregister; | 1403 | goto err_inpdev_unregister; |
| 1403 | } | 1404 | } |
| 1404 | 1405 | ||
| @@ -1423,7 +1424,7 @@ static int __devinit sonypi_probe(struct platform_device *dev) | |||
| 1423 | pci_disable_device(pcidev); | 1424 | pci_disable_device(pcidev); |
| 1424 | err_put_pcidev: | 1425 | err_put_pcidev: |
| 1425 | pci_dev_put(pcidev); | 1426 | pci_dev_put(pcidev); |
| 1426 | kfifo_free(sonypi_device.fifo); | 1427 | kfifo_free(&sonypi_device.fifo); |
| 1427 | 1428 | ||
| 1428 | return error; | 1429 | return error; |
| 1429 | } | 1430 | } |
| @@ -1438,7 +1439,7 @@ static int __devexit sonypi_remove(struct platform_device *dev) | |||
| 1438 | if (useinput) { | 1439 | if (useinput) { |
| 1439 | input_unregister_device(sonypi_device.input_key_dev); | 1440 | input_unregister_device(sonypi_device.input_key_dev); |
| 1440 | input_unregister_device(sonypi_device.input_jog_dev); | 1441 | input_unregister_device(sonypi_device.input_jog_dev); |
| 1441 | kfifo_free(sonypi_device.input_fifo); | 1442 | kfifo_free(&sonypi_device.input_fifo); |
| 1442 | } | 1443 | } |
| 1443 | 1444 | ||
| 1444 | misc_deregister(&sonypi_misc_device); | 1445 | misc_deregister(&sonypi_misc_device); |
| @@ -1451,7 +1452,7 @@ static int __devexit sonypi_remove(struct platform_device *dev) | |||
| 1451 | pci_dev_put(sonypi_device.dev); | 1452 | pci_dev_put(sonypi_device.dev); |
| 1452 | } | 1453 | } |
| 1453 | 1454 | ||
| 1454 | kfifo_free(sonypi_device.fifo); | 1455 | kfifo_free(&sonypi_device.fifo); |
| 1455 | 1456 | ||
| 1456 | return 0; | 1457 | return 0; |
| 1457 | } | 1458 | } |
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index ff2f1042cb44..766c46875a20 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c | |||
| @@ -434,11 +434,11 @@ static int drm_version(struct drm_device *dev, void *data, | |||
| 434 | * Looks up the ioctl function in the ::ioctls table, checking for root | 434 | * Looks up the ioctl function in the ::ioctls table, checking for root |
| 435 | * previleges if so required, and dispatches to the respective function. | 435 | * previleges if so required, and dispatches to the respective function. |
| 436 | */ | 436 | */ |
| 437 | int drm_ioctl(struct inode *inode, struct file *filp, | 437 | long drm_ioctl(struct file *filp, |
| 438 | unsigned int cmd, unsigned long arg) | 438 | unsigned int cmd, unsigned long arg) |
| 439 | { | 439 | { |
| 440 | struct drm_file *file_priv = filp->private_data; | 440 | struct drm_file *file_priv = filp->private_data; |
| 441 | struct drm_device *dev = file_priv->minor->dev; | 441 | struct drm_device *dev; |
| 442 | struct drm_ioctl_desc *ioctl; | 442 | struct drm_ioctl_desc *ioctl; |
| 443 | drm_ioctl_t *func; | 443 | drm_ioctl_t *func; |
| 444 | unsigned int nr = DRM_IOCTL_NR(cmd); | 444 | unsigned int nr = DRM_IOCTL_NR(cmd); |
| @@ -446,6 +446,7 @@ int drm_ioctl(struct inode *inode, struct file *filp, | |||
| 446 | char stack_kdata[128]; | 446 | char stack_kdata[128]; |
| 447 | char *kdata = NULL; | 447 | char *kdata = NULL; |
| 448 | 448 | ||
| 449 | dev = file_priv->minor->dev; | ||
| 449 | atomic_inc(&dev->ioctl_count); | 450 | atomic_inc(&dev->ioctl_count); |
| 450 | atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); | 451 | atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); |
| 451 | ++file_priv->ioctl_count; | 452 | ++file_priv->ioctl_count; |
| @@ -501,7 +502,13 @@ int drm_ioctl(struct inode *inode, struct file *filp, | |||
| 501 | goto err_i1; | 502 | goto err_i1; |
| 502 | } | 503 | } |
| 503 | } | 504 | } |
| 504 | retcode = func(dev, kdata, file_priv); | 505 | if (ioctl->flags & DRM_UNLOCKED) |
| 506 | retcode = func(dev, kdata, file_priv); | ||
| 507 | else { | ||
| 508 | lock_kernel(); | ||
| 509 | retcode = func(dev, kdata, file_priv); | ||
| 510 | unlock_kernel(); | ||
| 511 | } | ||
| 505 | 512 | ||
| 506 | if (cmd & IOC_OUT) { | 513 | if (cmd & IOC_OUT) { |
| 507 | if (copy_to_user((void __user *)arg, kdata, | 514 | if (copy_to_user((void __user *)arg, kdata, |
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index c39b26f1abed..5c9f79877cbf 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c | |||
| @@ -913,7 +913,7 @@ static int drm_cvt_modes(struct drm_connector *connector, | |||
| 913 | const int rates[] = { 60, 85, 75, 60, 50 }; | 913 | const int rates[] = { 60, 85, 75, 60, 50 }; |
| 914 | 914 | ||
| 915 | for (i = 0; i < 4; i++) { | 915 | for (i = 0; i < 4; i++) { |
| 916 | int width, height; | 916 | int uninitialized_var(width), height; |
| 917 | cvt = &(timing->data.other_data.data.cvt[i]); | 917 | cvt = &(timing->data.other_data.data.cvt[i]); |
| 918 | 918 | ||
| 919 | height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2; | 919 | height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2; |
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 282d9fdf9f4e..d61d185cf040 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c | |||
| @@ -104,7 +104,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd, | |||
| 104 | &version->desc)) | 104 | &version->desc)) |
| 105 | return -EFAULT; | 105 | return -EFAULT; |
| 106 | 106 | ||
| 107 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 107 | err = drm_ioctl(file, |
| 108 | DRM_IOCTL_VERSION, (unsigned long)version); | 108 | DRM_IOCTL_VERSION, (unsigned long)version); |
| 109 | if (err) | 109 | if (err) |
| 110 | return err; | 110 | return err; |
| @@ -145,8 +145,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, | |||
| 145 | &u->unique)) | 145 | &u->unique)) |
| 146 | return -EFAULT; | 146 | return -EFAULT; |
| 147 | 147 | ||
| 148 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 148 | err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u); |
| 149 | DRM_IOCTL_GET_UNIQUE, (unsigned long)u); | ||
| 150 | if (err) | 149 | if (err) |
| 151 | return err; | 150 | return err; |
| 152 | 151 | ||
| @@ -174,8 +173,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd, | |||
| 174 | &u->unique)) | 173 | &u->unique)) |
| 175 | return -EFAULT; | 174 | return -EFAULT; |
| 176 | 175 | ||
| 177 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 176 | return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u); |
| 178 | DRM_IOCTL_SET_UNIQUE, (unsigned long)u); | ||
| 179 | } | 177 | } |
| 180 | 178 | ||
| 181 | typedef struct drm_map32 { | 179 | typedef struct drm_map32 { |
| @@ -205,8 +203,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd, | |||
| 205 | if (__put_user(idx, &map->offset)) | 203 | if (__put_user(idx, &map->offset)) |
| 206 | return -EFAULT; | 204 | return -EFAULT; |
| 207 | 205 | ||
| 208 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 206 | err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map); |
| 209 | DRM_IOCTL_GET_MAP, (unsigned long)map); | ||
| 210 | if (err) | 207 | if (err) |
| 211 | return err; | 208 | return err; |
| 212 | 209 | ||
| @@ -246,8 +243,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd, | |||
| 246 | || __put_user(m32.flags, &map->flags)) | 243 | || __put_user(m32.flags, &map->flags)) |
| 247 | return -EFAULT; | 244 | return -EFAULT; |
| 248 | 245 | ||
| 249 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 246 | err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map); |
| 250 | DRM_IOCTL_ADD_MAP, (unsigned long)map); | ||
| 251 | if (err) | 247 | if (err) |
| 252 | return err; | 248 | return err; |
| 253 | 249 | ||
| @@ -284,8 +280,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd, | |||
| 284 | if (__put_user((void *)(unsigned long)handle, &map->handle)) | 280 | if (__put_user((void *)(unsigned long)handle, &map->handle)) |
| 285 | return -EFAULT; | 281 | return -EFAULT; |
| 286 | 282 | ||
| 287 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 283 | return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map); |
| 288 | DRM_IOCTL_RM_MAP, (unsigned long)map); | ||
| 289 | } | 284 | } |
| 290 | 285 | ||
| 291 | typedef struct drm_client32 { | 286 | typedef struct drm_client32 { |
| @@ -314,8 +309,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd, | |||
| 314 | if (__put_user(idx, &client->idx)) | 309 | if (__put_user(idx, &client->idx)) |
| 315 | return -EFAULT; | 310 | return -EFAULT; |
| 316 | 311 | ||
| 317 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 312 | err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client); |
| 318 | DRM_IOCTL_GET_CLIENT, (unsigned long)client); | ||
| 319 | if (err) | 313 | if (err) |
| 320 | return err; | 314 | return err; |
| 321 | 315 | ||
| @@ -351,8 +345,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd, | |||
| 351 | if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats))) | 345 | if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats))) |
| 352 | return -EFAULT; | 346 | return -EFAULT; |
| 353 | 347 | ||
| 354 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 348 | err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats); |
| 355 | DRM_IOCTL_GET_STATS, (unsigned long)stats); | ||
| 356 | if (err) | 349 | if (err) |
| 357 | return err; | 350 | return err; |
| 358 | 351 | ||
| @@ -395,8 +388,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd, | |||
| 395 | || __put_user(agp_start, &buf->agp_start)) | 388 | || __put_user(agp_start, &buf->agp_start)) |
| 396 | return -EFAULT; | 389 | return -EFAULT; |
| 397 | 390 | ||
| 398 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 391 | err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf); |
| 399 | DRM_IOCTL_ADD_BUFS, (unsigned long)buf); | ||
| 400 | if (err) | 392 | if (err) |
| 401 | return err; | 393 | return err; |
| 402 | 394 | ||
| @@ -427,8 +419,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd, | |||
| 427 | || __put_user(b32.high_mark, &buf->high_mark)) | 419 | || __put_user(b32.high_mark, &buf->high_mark)) |
| 428 | return -EFAULT; | 420 | return -EFAULT; |
| 429 | 421 | ||
| 430 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 422 | return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf); |
| 431 | DRM_IOCTL_MARK_BUFS, (unsigned long)buf); | ||
| 432 | } | 423 | } |
| 433 | 424 | ||
| 434 | typedef struct drm_buf_info32 { | 425 | typedef struct drm_buf_info32 { |
| @@ -469,8 +460,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, | |||
| 469 | || __put_user(list, &request->list)) | 460 | || __put_user(list, &request->list)) |
| 470 | return -EFAULT; | 461 | return -EFAULT; |
| 471 | 462 | ||
| 472 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 463 | err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request); |
| 473 | DRM_IOCTL_INFO_BUFS, (unsigned long)request); | ||
| 474 | if (err) | 464 | if (err) |
| 475 | return err; | 465 | return err; |
| 476 | 466 | ||
| @@ -531,8 +521,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, | |||
| 531 | || __put_user(list, &request->list)) | 521 | || __put_user(list, &request->list)) |
| 532 | return -EFAULT; | 522 | return -EFAULT; |
| 533 | 523 | ||
| 534 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 524 | err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request); |
| 535 | DRM_IOCTL_MAP_BUFS, (unsigned long)request); | ||
| 536 | if (err) | 525 | if (err) |
| 537 | return err; | 526 | return err; |
| 538 | 527 | ||
| @@ -578,8 +567,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd, | |||
| 578 | &request->list)) | 567 | &request->list)) |
| 579 | return -EFAULT; | 568 | return -EFAULT; |
| 580 | 569 | ||
| 581 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 570 | return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request); |
| 582 | DRM_IOCTL_FREE_BUFS, (unsigned long)request); | ||
| 583 | } | 571 | } |
| 584 | 572 | ||
| 585 | typedef struct drm_ctx_priv_map32 { | 573 | typedef struct drm_ctx_priv_map32 { |
| @@ -605,8 +593,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd, | |||
| 605 | &request->handle)) | 593 | &request->handle)) |
| 606 | return -EFAULT; | 594 | return -EFAULT; |
| 607 | 595 | ||
| 608 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 596 | return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request); |
| 609 | DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request); | ||
| 610 | } | 597 | } |
| 611 | 598 | ||
| 612 | static int compat_drm_getsareactx(struct file *file, unsigned int cmd, | 599 | static int compat_drm_getsareactx(struct file *file, unsigned int cmd, |
| @@ -628,8 +615,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd, | |||
| 628 | if (__put_user(ctx_id, &request->ctx_id)) | 615 | if (__put_user(ctx_id, &request->ctx_id)) |
| 629 | return -EFAULT; | 616 | return -EFAULT; |
| 630 | 617 | ||
| 631 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 618 | err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request); |
| 632 | DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request); | ||
| 633 | if (err) | 619 | if (err) |
| 634 | return err; | 620 | return err; |
| 635 | 621 | ||
| @@ -664,8 +650,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd, | |||
| 664 | &res->contexts)) | 650 | &res->contexts)) |
| 665 | return -EFAULT; | 651 | return -EFAULT; |
| 666 | 652 | ||
| 667 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 653 | err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res); |
| 668 | DRM_IOCTL_RES_CTX, (unsigned long)res); | ||
| 669 | if (err) | 654 | if (err) |
| 670 | return err; | 655 | return err; |
| 671 | 656 | ||
| @@ -718,8 +703,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd, | |||
| 718 | &d->request_sizes)) | 703 | &d->request_sizes)) |
| 719 | return -EFAULT; | 704 | return -EFAULT; |
| 720 | 705 | ||
| 721 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 706 | err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d); |
| 722 | DRM_IOCTL_DMA, (unsigned long)d); | ||
| 723 | if (err) | 707 | if (err) |
| 724 | return err; | 708 | return err; |
| 725 | 709 | ||
| @@ -751,8 +735,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd, | |||
| 751 | if (put_user(m32.mode, &mode->mode)) | 735 | if (put_user(m32.mode, &mode->mode)) |
| 752 | return -EFAULT; | 736 | return -EFAULT; |
| 753 | 737 | ||
| 754 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 738 | return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode); |
| 755 | DRM_IOCTL_AGP_ENABLE, (unsigned long)mode); | ||
| 756 | } | 739 | } |
| 757 | 740 | ||
| 758 | typedef struct drm_agp_info32 { | 741 | typedef struct drm_agp_info32 { |
| @@ -781,8 +764,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd, | |||
| 781 | if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) | 764 | if (!access_ok(VERIFY_WRITE, info, sizeof(*info))) |
| 782 | return -EFAULT; | 765 | return -EFAULT; |
| 783 | 766 | ||
| 784 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 767 | err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info); |
| 785 | DRM_IOCTL_AGP_INFO, (unsigned long)info); | ||
| 786 | if (err) | 768 | if (err) |
| 787 | return err; | 769 | return err; |
| 788 | 770 | ||
| @@ -827,16 +809,14 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, | |||
| 827 | || __put_user(req32.type, &request->type)) | 809 | || __put_user(req32.type, &request->type)) |
| 828 | return -EFAULT; | 810 | return -EFAULT; |
| 829 | 811 | ||
| 830 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 812 | err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request); |
| 831 | DRM_IOCTL_AGP_ALLOC, (unsigned long)request); | ||
| 832 | if (err) | 813 | if (err) |
| 833 | return err; | 814 | return err; |
| 834 | 815 | ||
| 835 | if (__get_user(req32.handle, &request->handle) | 816 | if (__get_user(req32.handle, &request->handle) |
| 836 | || __get_user(req32.physical, &request->physical) | 817 | || __get_user(req32.physical, &request->physical) |
| 837 | || copy_to_user(argp, &req32, sizeof(req32))) { | 818 | || copy_to_user(argp, &req32, sizeof(req32))) { |
| 838 | drm_ioctl(file->f_path.dentry->d_inode, file, | 819 | drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request); |
| 839 | DRM_IOCTL_AGP_FREE, (unsigned long)request); | ||
| 840 | return -EFAULT; | 820 | return -EFAULT; |
| 841 | } | 821 | } |
| 842 | 822 | ||
| @@ -856,8 +836,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd, | |||
| 856 | || __put_user(handle, &request->handle)) | 836 | || __put_user(handle, &request->handle)) |
| 857 | return -EFAULT; | 837 | return -EFAULT; |
| 858 | 838 | ||
| 859 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 839 | return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request); |
| 860 | DRM_IOCTL_AGP_FREE, (unsigned long)request); | ||
| 861 | } | 840 | } |
| 862 | 841 | ||
| 863 | typedef struct drm_agp_binding32 { | 842 | typedef struct drm_agp_binding32 { |
| @@ -881,8 +860,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd, | |||
| 881 | || __put_user(req32.offset, &request->offset)) | 860 | || __put_user(req32.offset, &request->offset)) |
| 882 | return -EFAULT; | 861 | return -EFAULT; |
| 883 | 862 | ||
| 884 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 863 | return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request); |
| 885 | DRM_IOCTL_AGP_BIND, (unsigned long)request); | ||
| 886 | } | 864 | } |
| 887 | 865 | ||
| 888 | static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, | 866 | static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, |
| @@ -898,8 +876,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, | |||
| 898 | || __put_user(handle, &request->handle)) | 876 | || __put_user(handle, &request->handle)) |
| 899 | return -EFAULT; | 877 | return -EFAULT; |
| 900 | 878 | ||
| 901 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 879 | return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request); |
| 902 | DRM_IOCTL_AGP_UNBIND, (unsigned long)request); | ||
| 903 | } | 880 | } |
| 904 | #endif /* __OS_HAS_AGP */ | 881 | #endif /* __OS_HAS_AGP */ |
| 905 | 882 | ||
| @@ -923,8 +900,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, | |||
| 923 | || __put_user(x, &request->size)) | 900 | || __put_user(x, &request->size)) |
| 924 | return -EFAULT; | 901 | return -EFAULT; |
| 925 | 902 | ||
| 926 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 903 | err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request); |
| 927 | DRM_IOCTL_SG_ALLOC, (unsigned long)request); | ||
| 928 | if (err) | 904 | if (err) |
| 929 | return err; | 905 | return err; |
| 930 | 906 | ||
| @@ -950,8 +926,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd, | |||
| 950 | || __put_user(x << PAGE_SHIFT, &request->handle)) | 926 | || __put_user(x << PAGE_SHIFT, &request->handle)) |
| 951 | return -EFAULT; | 927 | return -EFAULT; |
| 952 | 928 | ||
| 953 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 929 | return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request); |
| 954 | DRM_IOCTL_SG_FREE, (unsigned long)request); | ||
| 955 | } | 930 | } |
| 956 | 931 | ||
| 957 | #if defined(CONFIG_X86) || defined(CONFIG_IA64) | 932 | #if defined(CONFIG_X86) || defined(CONFIG_IA64) |
| @@ -981,8 +956,7 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd, | |||
| 981 | __put_user(update32.data, &request->data)) | 956 | __put_user(update32.data, &request->data)) |
| 982 | return -EFAULT; | 957 | return -EFAULT; |
| 983 | 958 | ||
| 984 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 959 | err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request); |
| 985 | DRM_IOCTL_UPDATE_DRAW, (unsigned long)request); | ||
| 986 | return err; | 960 | return err; |
| 987 | } | 961 | } |
| 988 | #endif | 962 | #endif |
| @@ -1023,8 +997,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, | |||
| 1023 | || __put_user(req32.request.signal, &request->request.signal)) | 997 | || __put_user(req32.request.signal, &request->request.signal)) |
| 1024 | return -EFAULT; | 998 | return -EFAULT; |
| 1025 | 999 | ||
| 1026 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 1000 | err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request); |
| 1027 | DRM_IOCTL_WAIT_VBLANK, (unsigned long)request); | ||
| 1028 | if (err) | 1001 | if (err) |
| 1029 | return err; | 1002 | return err; |
| 1030 | 1003 | ||
| @@ -1094,16 +1067,14 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 1094 | * than always failing. | 1067 | * than always failing. |
| 1095 | */ | 1068 | */ |
| 1096 | if (nr >= ARRAY_SIZE(drm_compat_ioctls)) | 1069 | if (nr >= ARRAY_SIZE(drm_compat_ioctls)) |
| 1097 | return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); | 1070 | return drm_ioctl(filp, cmd, arg); |
| 1098 | 1071 | ||
| 1099 | fn = drm_compat_ioctls[nr]; | 1072 | fn = drm_compat_ioctls[nr]; |
| 1100 | 1073 | ||
| 1101 | lock_kernel(); /* XXX for now */ | ||
| 1102 | if (fn != NULL) | 1074 | if (fn != NULL) |
| 1103 | ret = (*fn) (filp, cmd, arg); | 1075 | ret = (*fn) (filp, cmd, arg); |
| 1104 | else | 1076 | else |
| 1105 | ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); | 1077 | ret = drm_ioctl(filp, cmd, arg); |
| 1106 | unlock_kernel(); | ||
| 1107 | 1078 | ||
| 1108 | return ret; | 1079 | return ret; |
| 1109 | } | 1080 | } |
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index d7d7eac3ddd2..cdec32977129 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c | |||
| @@ -358,7 +358,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, | |||
| 358 | if (entry->size >= size + wasted) { | 358 | if (entry->size >= size + wasted) { |
| 359 | if (!best_match) | 359 | if (!best_match) |
| 360 | return entry; | 360 | return entry; |
| 361 | if (size < best_size) { | 361 | if (entry->size < best_size) { |
| 362 | best = entry; | 362 | best = entry; |
| 363 | best_size = entry->size; | 363 | best_size = entry->size; |
| 364 | } | 364 | } |
| @@ -408,7 +408,7 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm, | |||
| 408 | if (entry->size >= size + wasted) { | 408 | if (entry->size >= size + wasted) { |
| 409 | if (!best_match) | 409 | if (!best_match) |
| 410 | return entry; | 410 | return entry; |
| 411 | if (size < best_size) { | 411 | if (entry->size < best_size) { |
| 412 | best = entry; | 412 | best = entry; |
| 413 | best_size = entry->size; | 413 | best_size = entry->size; |
| 414 | } | 414 | } |
diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c index 9422a74c8b54..81681a07a806 100644 --- a/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/drivers/gpu/drm/i2c/ch7006_drv.c | |||
| @@ -408,6 +408,11 @@ static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *i | |||
| 408 | 408 | ||
| 409 | ch7006_info(client, "Detected version ID: %x\n", val); | 409 | ch7006_info(client, "Detected version ID: %x\n", val); |
| 410 | 410 | ||
| 411 | /* I don't know what this is for, but otherwise I get no | ||
| 412 | * signal. | ||
| 413 | */ | ||
| 414 | ch7006_write(client, 0x3d, 0x0); | ||
| 415 | |||
| 411 | return 0; | 416 | return 0; |
| 412 | 417 | ||
| 413 | fail: | 418 | fail: |
diff --git a/drivers/gpu/drm/i2c/ch7006_mode.c b/drivers/gpu/drm/i2c/ch7006_mode.c index 87f5445092e8..e447dfb63890 100644 --- a/drivers/gpu/drm/i2c/ch7006_mode.c +++ b/drivers/gpu/drm/i2c/ch7006_mode.c | |||
| @@ -427,11 +427,6 @@ void ch7006_state_load(struct i2c_client *client, | |||
| 427 | ch7006_load_reg(client, state, CH7006_SUBC_INC7); | 427 | ch7006_load_reg(client, state, CH7006_SUBC_INC7); |
| 428 | ch7006_load_reg(client, state, CH7006_PLL_CONTROL); | 428 | ch7006_load_reg(client, state, CH7006_PLL_CONTROL); |
| 429 | ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0); | 429 | ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0); |
| 430 | |||
| 431 | /* I don't know what this is for, but otherwise I get no | ||
| 432 | * signal. | ||
| 433 | */ | ||
| 434 | ch7006_write(client, 0x3d, 0x0); | ||
| 435 | } | 430 | } |
| 436 | 431 | ||
| 437 | void ch7006_state_save(struct i2c_client *client, | 432 | void ch7006_state_save(struct i2c_client *client, |
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 7d1d88cdf2dc..de32d22a8c39 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c | |||
| @@ -115,7 +115,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
| 115 | static const struct file_operations i810_buffer_fops = { | 115 | static const struct file_operations i810_buffer_fops = { |
| 116 | .open = drm_open, | 116 | .open = drm_open, |
| 117 | .release = drm_release, | 117 | .release = drm_release, |
| 118 | .ioctl = drm_ioctl, | 118 | .unlocked_ioctl = drm_ioctl, |
| 119 | .mmap = i810_mmap_buffers, | 119 | .mmap = i810_mmap_buffers, |
| 120 | .fasync = drm_fasync, | 120 | .fasync = drm_fasync, |
| 121 | }; | 121 | }; |
diff --git a/drivers/gpu/drm/i810/i810_drv.c b/drivers/gpu/drm/i810/i810_drv.c index fabb9a817966..c1e02752e023 100644 --- a/drivers/gpu/drm/i810/i810_drv.c +++ b/drivers/gpu/drm/i810/i810_drv.c | |||
| @@ -59,7 +59,7 @@ static struct drm_driver driver = { | |||
| 59 | .owner = THIS_MODULE, | 59 | .owner = THIS_MODULE, |
| 60 | .open = drm_open, | 60 | .open = drm_open, |
| 61 | .release = drm_release, | 61 | .release = drm_release, |
| 62 | .ioctl = drm_ioctl, | 62 | .unlocked_ioctl = drm_ioctl, |
| 63 | .mmap = drm_mmap, | 63 | .mmap = drm_mmap, |
| 64 | .poll = drm_poll, | 64 | .poll = drm_poll, |
| 65 | .fasync = drm_fasync, | 65 | .fasync = drm_fasync, |
diff --git a/drivers/gpu/drm/i830/i830_dma.c b/drivers/gpu/drm/i830/i830_dma.c index 877bf6cb14a4..06bd732e6463 100644 --- a/drivers/gpu/drm/i830/i830_dma.c +++ b/drivers/gpu/drm/i830/i830_dma.c | |||
| @@ -117,7 +117,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) | |||
| 117 | static const struct file_operations i830_buffer_fops = { | 117 | static const struct file_operations i830_buffer_fops = { |
| 118 | .open = drm_open, | 118 | .open = drm_open, |
| 119 | .release = drm_release, | 119 | .release = drm_release, |
| 120 | .ioctl = drm_ioctl, | 120 | .unlocked_ioctl = drm_ioctl, |
| 121 | .mmap = i830_mmap_buffers, | 121 | .mmap = i830_mmap_buffers, |
| 122 | .fasync = drm_fasync, | 122 | .fasync = drm_fasync, |
| 123 | }; | 123 | }; |
diff --git a/drivers/gpu/drm/i830/i830_drv.c b/drivers/gpu/drm/i830/i830_drv.c index 389597e4a623..44f990bed8f4 100644 --- a/drivers/gpu/drm/i830/i830_drv.c +++ b/drivers/gpu/drm/i830/i830_drv.c | |||
| @@ -70,7 +70,7 @@ static struct drm_driver driver = { | |||
| 70 | .owner = THIS_MODULE, | 70 | .owner = THIS_MODULE, |
| 71 | .open = drm_open, | 71 | .open = drm_open, |
| 72 | .release = drm_release, | 72 | .release = drm_release, |
| 73 | .ioctl = drm_ioctl, | 73 | .unlocked_ioctl = drm_ioctl, |
| 74 | .mmap = drm_mmap, | 74 | .mmap = drm_mmap, |
| 75 | .poll = drm_poll, | 75 | .poll = drm_poll, |
| 76 | .fasync = drm_fasync, | 76 | .fasync = drm_fasync, |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 2fa217862058..24286ca168fc 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -329,7 +329,7 @@ static struct drm_driver driver = { | |||
| 329 | .owner = THIS_MODULE, | 329 | .owner = THIS_MODULE, |
| 330 | .open = drm_open, | 330 | .open = drm_open, |
| 331 | .release = drm_release, | 331 | .release = drm_release, |
| 332 | .ioctl = drm_ioctl, | 332 | .unlocked_ioctl = drm_ioctl, |
| 333 | .mmap = drm_gem_mmap, | 333 | .mmap = drm_gem_mmap, |
| 334 | .poll = drm_poll, | 334 | .poll = drm_poll, |
| 335 | .fasync = drm_fasync, | 335 | .fasync = drm_fasync, |
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 1fe68a251b75..13b028994b2b 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c | |||
| @@ -66,8 +66,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd, | |||
| 66 | &batchbuffer->cliprects)) | 66 | &batchbuffer->cliprects)) |
| 67 | return -EFAULT; | 67 | return -EFAULT; |
| 68 | 68 | ||
| 69 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 69 | return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER, |
| 70 | DRM_IOCTL_I915_BATCHBUFFER, | ||
| 71 | (unsigned long)batchbuffer); | 70 | (unsigned long)batchbuffer); |
| 72 | } | 71 | } |
| 73 | 72 | ||
| @@ -102,8 +101,8 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd, | |||
| 102 | &cmdbuffer->cliprects)) | 101 | &cmdbuffer->cliprects)) |
| 103 | return -EFAULT; | 102 | return -EFAULT; |
| 104 | 103 | ||
| 105 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 104 | return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER, |
| 106 | DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer); | 105 | (unsigned long)cmdbuffer); |
| 107 | } | 106 | } |
| 108 | 107 | ||
| 109 | typedef struct drm_i915_irq_emit32 { | 108 | typedef struct drm_i915_irq_emit32 { |
| @@ -125,8 +124,8 @@ static int compat_i915_irq_emit(struct file *file, unsigned int cmd, | |||
| 125 | &request->irq_seq)) | 124 | &request->irq_seq)) |
| 126 | return -EFAULT; | 125 | return -EFAULT; |
| 127 | 126 | ||
| 128 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 127 | return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT, |
| 129 | DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request); | 128 | (unsigned long)request); |
| 130 | } | 129 | } |
| 131 | typedef struct drm_i915_getparam32 { | 130 | typedef struct drm_i915_getparam32 { |
| 132 | int param; | 131 | int param; |
| @@ -149,8 +148,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd, | |||
| 149 | &request->value)) | 148 | &request->value)) |
| 150 | return -EFAULT; | 149 | return -EFAULT; |
| 151 | 150 | ||
| 152 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 151 | return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM, |
| 153 | DRM_IOCTL_I915_GETPARAM, (unsigned long)request); | 152 | (unsigned long)request); |
| 154 | } | 153 | } |
| 155 | 154 | ||
| 156 | typedef struct drm_i915_mem_alloc32 { | 155 | typedef struct drm_i915_mem_alloc32 { |
| @@ -178,8 +177,8 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd, | |||
| 178 | &request->region_offset)) | 177 | &request->region_offset)) |
| 179 | return -EFAULT; | 178 | return -EFAULT; |
| 180 | 179 | ||
| 181 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 180 | return drm_ioctl(file, DRM_IOCTL_I915_ALLOC, |
| 182 | DRM_IOCTL_I915_ALLOC, (unsigned long)request); | 181 | (unsigned long)request); |
| 183 | } | 182 | } |
| 184 | 183 | ||
| 185 | drm_ioctl_compat_t *i915_compat_ioctls[] = { | 184 | drm_ioctl_compat_t *i915_compat_ioctls[] = { |
| @@ -211,12 +210,10 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 211 | if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) | 210 | if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) |
| 212 | fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; | 211 | fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; |
| 213 | 212 | ||
| 214 | lock_kernel(); /* XXX for now */ | ||
| 215 | if (fn != NULL) | 213 | if (fn != NULL) |
| 216 | ret = (*fn) (filp, cmd, arg); | 214 | ret = (*fn) (filp, cmd, arg); |
| 217 | else | 215 | else |
| 218 | ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); | 216 | ret = drm_ioctl(filp, cmd, arg); |
| 219 | unlock_kernel(); | ||
| 220 | 217 | ||
| 221 | return ret; | 218 | return ret; |
| 222 | } | 219 | } |
diff --git a/drivers/gpu/drm/mga/mga_drv.c b/drivers/gpu/drm/mga/mga_drv.c index 97ee566ef749..ddfe16197b59 100644 --- a/drivers/gpu/drm/mga/mga_drv.c +++ b/drivers/gpu/drm/mga/mga_drv.c | |||
| @@ -68,7 +68,7 @@ static struct drm_driver driver = { | |||
| 68 | .owner = THIS_MODULE, | 68 | .owner = THIS_MODULE, |
| 69 | .open = drm_open, | 69 | .open = drm_open, |
| 70 | .release = drm_release, | 70 | .release = drm_release, |
| 71 | .ioctl = drm_ioctl, | 71 | .unlocked_ioctl = drm_ioctl, |
| 72 | .mmap = drm_mmap, | 72 | .mmap = drm_mmap, |
| 73 | .poll = drm_poll, | 73 | .poll = drm_poll, |
| 74 | .fasync = drm_fasync, | 74 | .fasync = drm_fasync, |
diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c index 30d00478ddee..c1f877b7bac1 100644 --- a/drivers/gpu/drm/mga/mga_ioc32.c +++ b/drivers/gpu/drm/mga/mga_ioc32.c | |||
| @@ -100,8 +100,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd, | |||
| 100 | if (err) | 100 | if (err) |
| 101 | return -EFAULT; | 101 | return -EFAULT; |
| 102 | 102 | ||
| 103 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 103 | return drm_ioctl(file, DRM_IOCTL_MGA_INIT, (unsigned long)init); |
| 104 | DRM_IOCTL_MGA_INIT, (unsigned long)init); | ||
| 105 | } | 104 | } |
| 106 | 105 | ||
| 107 | typedef struct drm_mga_getparam32 { | 106 | typedef struct drm_mga_getparam32 { |
| @@ -125,8 +124,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd, | |||
| 125 | &getparam->value)) | 124 | &getparam->value)) |
| 126 | return -EFAULT; | 125 | return -EFAULT; |
| 127 | 126 | ||
| 128 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 127 | return drm_ioctl(file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); |
| 129 | DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam); | ||
| 130 | } | 128 | } |
| 131 | 129 | ||
| 132 | typedef struct drm_mga_drm_bootstrap32 { | 130 | typedef struct drm_mga_drm_bootstrap32 { |
| @@ -166,8 +164,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd, | |||
| 166 | || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size)) | 164 | || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size)) |
| 167 | return -EFAULT; | 165 | return -EFAULT; |
| 168 | 166 | ||
| 169 | err = drm_ioctl(file->f_path.dentry->d_inode, file, | 167 | err = drm_ioctl(file, DRM_IOCTL_MGA_DMA_BOOTSTRAP, |
| 170 | DRM_IOCTL_MGA_DMA_BOOTSTRAP, | ||
| 171 | (unsigned long)dma_bootstrap); | 168 | (unsigned long)dma_bootstrap); |
| 172 | if (err) | 169 | if (err) |
| 173 | return err; | 170 | return err; |
| @@ -220,12 +217,10 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 220 | if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) | 217 | if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) |
| 221 | fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; | 218 | fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; |
| 222 | 219 | ||
| 223 | lock_kernel(); /* XXX for now */ | ||
| 224 | if (fn != NULL) | 220 | if (fn != NULL) |
| 225 | ret = (*fn) (filp, cmd, arg); | 221 | ret = (*fn) (filp, cmd, arg); |
| 226 | else | 222 | else |
| 227 | ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); | 223 | ret = drm_ioctl(filp, cmd, arg); |
| 228 | unlock_kernel(); | ||
| 229 | 224 | ||
| 230 | return ret; | 225 | return ret; |
| 231 | } | 226 | } |
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile index 1d90d4d0144f..48c290b5da8c 100644 --- a/drivers/gpu/drm/nouveau/Makefile +++ b/drivers/gpu/drm/nouveau/Makefile | |||
| @@ -8,14 +8,15 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \ | |||
| 8 | nouveau_sgdma.o nouveau_dma.o \ | 8 | nouveau_sgdma.o nouveau_dma.o \ |
| 9 | nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ | 9 | nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \ |
| 10 | nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ | 10 | nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \ |
| 11 | nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ | 11 | nouveau_display.o nouveau_connector.o nouveau_fbcon.o \ |
| 12 | nouveau_dp.o \ | 12 | nouveau_dp.o nouveau_grctx.o \ |
| 13 | nv04_timer.o \ | 13 | nv04_timer.o \ |
| 14 | nv04_mc.o nv40_mc.o nv50_mc.o \ | 14 | nv04_mc.o nv40_mc.o nv50_mc.o \ |
| 15 | nv04_fb.o nv10_fb.o nv40_fb.o \ | 15 | nv04_fb.o nv10_fb.o nv40_fb.o \ |
| 16 | nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ | 16 | nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ |
| 17 | nv04_graph.o nv10_graph.o nv20_graph.o \ | 17 | nv04_graph.o nv10_graph.o nv20_graph.o \ |
| 18 | nv40_graph.o nv50_graph.o \ | 18 | nv40_graph.o nv50_graph.o \ |
| 19 | nv40_grctx.o \ | ||
| 19 | nv04_instmem.o nv50_instmem.o \ | 20 | nv04_instmem.o nv50_instmem.o \ |
| 20 | nv50_crtc.o nv50_dac.o nv50_sor.o \ | 21 | nv50_crtc.o nv50_dac.o nv50_sor.o \ |
| 21 | nv50_cursor.o nv50_display.o nv50_fbcon.o \ | 22 | nv50_cursor.o nv50_display.o nv50_fbcon.o \ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 5eec5ed69489..ba143972769f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
| @@ -181,43 +181,42 @@ struct methods { | |||
| 181 | const char desc[8]; | 181 | const char desc[8]; |
| 182 | void (*loadbios)(struct drm_device *, uint8_t *); | 182 | void (*loadbios)(struct drm_device *, uint8_t *); |
| 183 | const bool rw; | 183 | const bool rw; |
| 184 | int score; | ||
| 185 | }; | 184 | }; |
| 186 | 185 | ||
| 187 | static struct methods nv04_methods[] = { | 186 | static struct methods nv04_methods[] = { |
| 188 | { "PROM", load_vbios_prom, false }, | 187 | { "PROM", load_vbios_prom, false }, |
| 189 | { "PRAMIN", load_vbios_pramin, true }, | 188 | { "PRAMIN", load_vbios_pramin, true }, |
| 190 | { "PCIROM", load_vbios_pci, true }, | 189 | { "PCIROM", load_vbios_pci, true }, |
| 191 | { } | ||
| 192 | }; | 190 | }; |
| 193 | 191 | ||
| 194 | static struct methods nv50_methods[] = { | 192 | static struct methods nv50_methods[] = { |
| 195 | { "PRAMIN", load_vbios_pramin, true }, | 193 | { "PRAMIN", load_vbios_pramin, true }, |
| 196 | { "PROM", load_vbios_prom, false }, | 194 | { "PROM", load_vbios_prom, false }, |
| 197 | { "PCIROM", load_vbios_pci, true }, | 195 | { "PCIROM", load_vbios_pci, true }, |
| 198 | { } | ||
| 199 | }; | 196 | }; |
| 200 | 197 | ||
| 198 | #define METHODCNT 3 | ||
| 199 | |||
| 201 | static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) | 200 | static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) |
| 202 | { | 201 | { |
| 203 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 202 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 204 | struct methods *methods, *method; | 203 | struct methods *methods; |
| 204 | int i; | ||
| 205 | int testscore = 3; | 205 | int testscore = 3; |
| 206 | int scores[METHODCNT]; | ||
| 206 | 207 | ||
| 207 | if (nouveau_vbios) { | 208 | if (nouveau_vbios) { |
| 208 | method = nv04_methods; | 209 | methods = nv04_methods; |
| 209 | while (method->loadbios) { | 210 | for (i = 0; i < METHODCNT; i++) |
| 210 | if (!strcasecmp(nouveau_vbios, method->desc)) | 211 | if (!strcasecmp(nouveau_vbios, methods[i].desc)) |
| 211 | break; | 212 | break; |
| 212 | method++; | ||
| 213 | } | ||
| 214 | 213 | ||
| 215 | if (method->loadbios) { | 214 | if (i < METHODCNT) { |
| 216 | NV_INFO(dev, "Attempting to use BIOS image from %s\n", | 215 | NV_INFO(dev, "Attempting to use BIOS image from %s\n", |
| 217 | method->desc); | 216 | methods[i].desc); |
| 218 | 217 | ||
| 219 | method->loadbios(dev, data); | 218 | methods[i].loadbios(dev, data); |
| 220 | if (score_vbios(dev, data, method->rw)) | 219 | if (score_vbios(dev, data, methods[i].rw)) |
| 221 | return true; | 220 | return true; |
| 222 | } | 221 | } |
| 223 | 222 | ||
| @@ -229,28 +228,24 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) | |||
| 229 | else | 228 | else |
| 230 | methods = nv50_methods; | 229 | methods = nv50_methods; |
| 231 | 230 | ||
| 232 | method = methods; | 231 | for (i = 0; i < METHODCNT; i++) { |
| 233 | while (method->loadbios) { | ||
| 234 | NV_TRACE(dev, "Attempting to load BIOS image from %s\n", | 232 | NV_TRACE(dev, "Attempting to load BIOS image from %s\n", |
| 235 | method->desc); | 233 | methods[i].desc); |
| 236 | data[0] = data[1] = 0; /* avoid reuse of previous image */ | 234 | data[0] = data[1] = 0; /* avoid reuse of previous image */ |
| 237 | method->loadbios(dev, data); | 235 | methods[i].loadbios(dev, data); |
| 238 | method->score = score_vbios(dev, data, method->rw); | 236 | scores[i] = score_vbios(dev, data, methods[i].rw); |
| 239 | if (method->score == testscore) | 237 | if (scores[i] == testscore) |
| 240 | return true; | 238 | return true; |
| 241 | method++; | ||
| 242 | } | 239 | } |
| 243 | 240 | ||
| 244 | while (--testscore > 0) { | 241 | while (--testscore > 0) { |
| 245 | method = methods; | 242 | for (i = 0; i < METHODCNT; i++) { |
| 246 | while (method->loadbios) { | 243 | if (scores[i] == testscore) { |
| 247 | if (method->score == testscore) { | ||
| 248 | NV_TRACE(dev, "Using BIOS image from %s\n", | 244 | NV_TRACE(dev, "Using BIOS image from %s\n", |
| 249 | method->desc); | 245 | methods[i].desc); |
| 250 | method->loadbios(dev, data); | 246 | methods[i].loadbios(dev, data); |
| 251 | return true; | 247 | return true; |
| 252 | } | 248 | } |
| 253 | method++; | ||
| 254 | } | 249 | } |
| 255 | } | 250 | } |
| 256 | 251 | ||
| @@ -261,10 +256,7 @@ static bool NVShadowVBIOS(struct drm_device *dev, uint8_t *data) | |||
| 261 | struct init_tbl_entry { | 256 | struct init_tbl_entry { |
| 262 | char *name; | 257 | char *name; |
| 263 | uint8_t id; | 258 | uint8_t id; |
| 264 | int length; | 259 | int (*handler)(struct nvbios *, uint16_t, struct init_exec *); |
| 265 | int length_offset; | ||
| 266 | int length_multiplier; | ||
| 267 | bool (*handler)(struct nvbios *, uint16_t, struct init_exec *); | ||
| 268 | }; | 260 | }; |
| 269 | 261 | ||
| 270 | struct bit_entry { | 262 | struct bit_entry { |
| @@ -820,7 +812,7 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv) | |||
| 820 | } | 812 | } |
| 821 | } | 813 | } |
| 822 | 814 | ||
| 823 | static bool | 815 | static int |
| 824 | init_io_restrict_prog(struct nvbios *bios, uint16_t offset, | 816 | init_io_restrict_prog(struct nvbios *bios, uint16_t offset, |
| 825 | struct init_exec *iexec) | 817 | struct init_exec *iexec) |
| 826 | { | 818 | { |
| @@ -852,9 +844,10 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset, | |||
| 852 | uint32_t reg = ROM32(bios->data[offset + 7]); | 844 | uint32_t reg = ROM32(bios->data[offset + 7]); |
| 853 | uint8_t config; | 845 | uint8_t config; |
| 854 | uint32_t configval; | 846 | uint32_t configval; |
| 847 | int len = 11 + count * 4; | ||
| 855 | 848 | ||
| 856 | if (!iexec->execute) | 849 | if (!iexec->execute) |
| 857 | return true; | 850 | return len; |
| 858 | 851 | ||
| 859 | BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " | 852 | BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " |
| 860 | "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n", | 853 | "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n", |
| @@ -865,7 +858,7 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset, | |||
| 865 | NV_ERROR(bios->dev, | 858 | NV_ERROR(bios->dev, |
| 866 | "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", | 859 | "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", |
| 867 | offset, config, count); | 860 | offset, config, count); |
| 868 | return false; | 861 | return 0; |
| 869 | } | 862 | } |
| 870 | 863 | ||
| 871 | configval = ROM32(bios->data[offset + 11 + config * 4]); | 864 | configval = ROM32(bios->data[offset + 11 + config * 4]); |
| @@ -874,10 +867,10 @@ init_io_restrict_prog(struct nvbios *bios, uint16_t offset, | |||
| 874 | 867 | ||
| 875 | bios_wr32(bios, reg, configval); | 868 | bios_wr32(bios, reg, configval); |
| 876 | 869 | ||
| 877 | return true; | 870 | return len; |
| 878 | } | 871 | } |
| 879 | 872 | ||
| 880 | static bool | 873 | static int |
| 881 | init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 874 | init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 882 | { | 875 | { |
| 883 | /* | 876 | /* |
| @@ -912,10 +905,10 @@ init_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 912 | 905 | ||
| 913 | iexec->repeat = false; | 906 | iexec->repeat = false; |
| 914 | 907 | ||
| 915 | return true; | 908 | return 2; |
| 916 | } | 909 | } |
| 917 | 910 | ||
| 918 | static bool | 911 | static int |
| 919 | init_io_restrict_pll(struct nvbios *bios, uint16_t offset, | 912 | init_io_restrict_pll(struct nvbios *bios, uint16_t offset, |
| 920 | struct init_exec *iexec) | 913 | struct init_exec *iexec) |
| 921 | { | 914 | { |
| @@ -951,9 +944,10 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset, | |||
| 951 | uint32_t reg = ROM32(bios->data[offset + 8]); | 944 | uint32_t reg = ROM32(bios->data[offset + 8]); |
| 952 | uint8_t config; | 945 | uint8_t config; |
| 953 | uint16_t freq; | 946 | uint16_t freq; |
| 947 | int len = 12 + count * 2; | ||
| 954 | 948 | ||
| 955 | if (!iexec->execute) | 949 | if (!iexec->execute) |
| 956 | return true; | 950 | return len; |
| 957 | 951 | ||
| 958 | BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " | 952 | BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " |
| 959 | "Shift: 0x%02X, IO Flag Condition: 0x%02X, " | 953 | "Shift: 0x%02X, IO Flag Condition: 0x%02X, " |
| @@ -966,7 +960,7 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset, | |||
| 966 | NV_ERROR(bios->dev, | 960 | NV_ERROR(bios->dev, |
| 967 | "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", | 961 | "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", |
| 968 | offset, config, count); | 962 | offset, config, count); |
| 969 | return false; | 963 | return 0; |
| 970 | } | 964 | } |
| 971 | 965 | ||
| 972 | freq = ROM16(bios->data[offset + 12 + config * 2]); | 966 | freq = ROM16(bios->data[offset + 12 + config * 2]); |
| @@ -986,10 +980,10 @@ init_io_restrict_pll(struct nvbios *bios, uint16_t offset, | |||
| 986 | 980 | ||
| 987 | setPLL(bios, reg, freq * 10); | 981 | setPLL(bios, reg, freq * 10); |
| 988 | 982 | ||
| 989 | return true; | 983 | return len; |
| 990 | } | 984 | } |
| 991 | 985 | ||
| 992 | static bool | 986 | static int |
| 993 | init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 987 | init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 994 | { | 988 | { |
| 995 | /* | 989 | /* |
| @@ -1007,12 +1001,12 @@ init_end_repeat(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1007 | * we're not in repeat mode | 1001 | * we're not in repeat mode |
| 1008 | */ | 1002 | */ |
| 1009 | if (iexec->repeat) | 1003 | if (iexec->repeat) |
| 1010 | return false; | 1004 | return 0; |
| 1011 | 1005 | ||
| 1012 | return true; | 1006 | return 1; |
| 1013 | } | 1007 | } |
| 1014 | 1008 | ||
| 1015 | static bool | 1009 | static int |
| 1016 | init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1010 | init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1017 | { | 1011 | { |
| 1018 | /* | 1012 | /* |
| @@ -1041,7 +1035,7 @@ init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1041 | uint8_t crtcdata; | 1035 | uint8_t crtcdata; |
| 1042 | 1036 | ||
| 1043 | if (!iexec->execute) | 1037 | if (!iexec->execute) |
| 1044 | return true; | 1038 | return 11; |
| 1045 | 1039 | ||
| 1046 | BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, " | 1040 | BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%02X, " |
| 1047 | "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n", | 1041 | "Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X\n", |
| @@ -1060,10 +1054,10 @@ init_copy(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1060 | crtcdata |= (uint8_t)data; | 1054 | crtcdata |= (uint8_t)data; |
| 1061 | bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata); | 1055 | bios_idxprt_wr(bios, crtcport, crtcindex, crtcdata); |
| 1062 | 1056 | ||
| 1063 | return true; | 1057 | return 11; |
| 1064 | } | 1058 | } |
| 1065 | 1059 | ||
| 1066 | static bool | 1060 | static int |
| 1067 | init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1061 | init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1068 | { | 1062 | { |
| 1069 | /* | 1063 | /* |
| @@ -1079,10 +1073,10 @@ init_not(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1079 | BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset); | 1073 | BIOSLOG(bios, "0x%04X: ------ Executing following commands ------\n", offset); |
| 1080 | 1074 | ||
| 1081 | iexec->execute = !iexec->execute; | 1075 | iexec->execute = !iexec->execute; |
| 1082 | return true; | 1076 | return 1; |
| 1083 | } | 1077 | } |
| 1084 | 1078 | ||
| 1085 | static bool | 1079 | static int |
| 1086 | init_io_flag_condition(struct nvbios *bios, uint16_t offset, | 1080 | init_io_flag_condition(struct nvbios *bios, uint16_t offset, |
| 1087 | struct init_exec *iexec) | 1081 | struct init_exec *iexec) |
| 1088 | { | 1082 | { |
| @@ -1100,7 +1094,7 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset, | |||
| 1100 | uint8_t cond = bios->data[offset + 1]; | 1094 | uint8_t cond = bios->data[offset + 1]; |
| 1101 | 1095 | ||
| 1102 | if (!iexec->execute) | 1096 | if (!iexec->execute) |
| 1103 | return true; | 1097 | return 2; |
| 1104 | 1098 | ||
| 1105 | if (io_flag_condition_met(bios, offset, cond)) | 1099 | if (io_flag_condition_met(bios, offset, cond)) |
| 1106 | BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset); | 1100 | BIOSLOG(bios, "0x%04X: Condition fulfilled -- continuing to execute\n", offset); |
| @@ -1109,10 +1103,10 @@ init_io_flag_condition(struct nvbios *bios, uint16_t offset, | |||
| 1109 | iexec->execute = false; | 1103 | iexec->execute = false; |
| 1110 | } | 1104 | } |
| 1111 | 1105 | ||
| 1112 | return true; | 1106 | return 2; |
| 1113 | } | 1107 | } |
| 1114 | 1108 | ||
| 1115 | static bool | 1109 | static int |
| 1116 | init_idx_addr_latched(struct nvbios *bios, uint16_t offset, | 1110 | init_idx_addr_latched(struct nvbios *bios, uint16_t offset, |
| 1117 | struct init_exec *iexec) | 1111 | struct init_exec *iexec) |
| 1118 | { | 1112 | { |
| @@ -1140,11 +1134,12 @@ init_idx_addr_latched(struct nvbios *bios, uint16_t offset, | |||
| 1140 | uint32_t mask = ROM32(bios->data[offset + 9]); | 1134 | uint32_t mask = ROM32(bios->data[offset + 9]); |
| 1141 | uint32_t data = ROM32(bios->data[offset + 13]); | 1135 | uint32_t data = ROM32(bios->data[offset + 13]); |
| 1142 | uint8_t count = bios->data[offset + 17]; | 1136 | uint8_t count = bios->data[offset + 17]; |
| 1137 | int len = 18 + count * 2; | ||
| 1143 | uint32_t value; | 1138 | uint32_t value; |
| 1144 | int i; | 1139 | int i; |
| 1145 | 1140 | ||
| 1146 | if (!iexec->execute) | 1141 | if (!iexec->execute) |
| 1147 | return true; | 1142 | return len; |
| 1148 | 1143 | ||
| 1149 | BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, " | 1144 | BIOSLOG(bios, "0x%04X: ControlReg: 0x%08X, DataReg: 0x%08X, " |
| 1150 | "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n", | 1145 | "Mask: 0x%08X, Data: 0x%08X, Count: 0x%02X\n", |
| @@ -1164,10 +1159,10 @@ init_idx_addr_latched(struct nvbios *bios, uint16_t offset, | |||
| 1164 | bios_wr32(bios, controlreg, value); | 1159 | bios_wr32(bios, controlreg, value); |
| 1165 | } | 1160 | } |
| 1166 | 1161 | ||
| 1167 | return true; | 1162 | return len; |
| 1168 | } | 1163 | } |
| 1169 | 1164 | ||
| 1170 | static bool | 1165 | static int |
| 1171 | init_io_restrict_pll2(struct nvbios *bios, uint16_t offset, | 1166 | init_io_restrict_pll2(struct nvbios *bios, uint16_t offset, |
| 1172 | struct init_exec *iexec) | 1167 | struct init_exec *iexec) |
| 1173 | { | 1168 | { |
| @@ -1196,25 +1191,26 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset, | |||
| 1196 | uint8_t shift = bios->data[offset + 5]; | 1191 | uint8_t shift = bios->data[offset + 5]; |
| 1197 | uint8_t count = bios->data[offset + 6]; | 1192 | uint8_t count = bios->data[offset + 6]; |
| 1198 | uint32_t reg = ROM32(bios->data[offset + 7]); | 1193 | uint32_t reg = ROM32(bios->data[offset + 7]); |
| 1194 | int len = 11 + count * 4; | ||
| 1199 | uint8_t config; | 1195 | uint8_t config; |
| 1200 | uint32_t freq; | 1196 | uint32_t freq; |
| 1201 | 1197 | ||
| 1202 | if (!iexec->execute) | 1198 | if (!iexec->execute) |
| 1203 | return true; | 1199 | return len; |
| 1204 | 1200 | ||
| 1205 | BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " | 1201 | BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " |
| 1206 | "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n", | 1202 | "Shift: 0x%02X, Count: 0x%02X, Reg: 0x%08X\n", |
| 1207 | offset, crtcport, crtcindex, mask, shift, count, reg); | 1203 | offset, crtcport, crtcindex, mask, shift, count, reg); |
| 1208 | 1204 | ||
| 1209 | if (!reg) | 1205 | if (!reg) |
| 1210 | return true; | 1206 | return len; |
| 1211 | 1207 | ||
| 1212 | config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift; | 1208 | config = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) >> shift; |
| 1213 | if (config > count) { | 1209 | if (config > count) { |
| 1214 | NV_ERROR(bios->dev, | 1210 | NV_ERROR(bios->dev, |
| 1215 | "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", | 1211 | "0x%04X: Config 0x%02X exceeds maximal bound 0x%02X\n", |
| 1216 | offset, config, count); | 1212 | offset, config, count); |
| 1217 | return false; | 1213 | return 0; |
| 1218 | } | 1214 | } |
| 1219 | 1215 | ||
| 1220 | freq = ROM32(bios->data[offset + 11 + config * 4]); | 1216 | freq = ROM32(bios->data[offset + 11 + config * 4]); |
| @@ -1224,10 +1220,10 @@ init_io_restrict_pll2(struct nvbios *bios, uint16_t offset, | |||
| 1224 | 1220 | ||
| 1225 | setPLL(bios, reg, freq); | 1221 | setPLL(bios, reg, freq); |
| 1226 | 1222 | ||
| 1227 | return true; | 1223 | return len; |
| 1228 | } | 1224 | } |
| 1229 | 1225 | ||
| 1230 | static bool | 1226 | static int |
| 1231 | init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1227 | init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1232 | { | 1228 | { |
| 1233 | /* | 1229 | /* |
| @@ -1244,16 +1240,16 @@ init_pll2(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1244 | uint32_t freq = ROM32(bios->data[offset + 5]); | 1240 | uint32_t freq = ROM32(bios->data[offset + 5]); |
| 1245 | 1241 | ||
| 1246 | if (!iexec->execute) | 1242 | if (!iexec->execute) |
| 1247 | return true; | 1243 | return 9; |
| 1248 | 1244 | ||
| 1249 | BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n", | 1245 | BIOSLOG(bios, "0x%04X: Reg: 0x%04X, Freq: %dkHz\n", |
| 1250 | offset, reg, freq); | 1246 | offset, reg, freq); |
| 1251 | 1247 | ||
| 1252 | setPLL(bios, reg, freq); | 1248 | setPLL(bios, reg, freq); |
| 1253 | return true; | 1249 | return 9; |
| 1254 | } | 1250 | } |
| 1255 | 1251 | ||
| 1256 | static bool | 1252 | static int |
| 1257 | init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1253 | init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1258 | { | 1254 | { |
| 1259 | /* | 1255 | /* |
| @@ -1277,12 +1273,13 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1277 | uint8_t i2c_index = bios->data[offset + 1]; | 1273 | uint8_t i2c_index = bios->data[offset + 1]; |
| 1278 | uint8_t i2c_address = bios->data[offset + 2]; | 1274 | uint8_t i2c_address = bios->data[offset + 2]; |
| 1279 | uint8_t count = bios->data[offset + 3]; | 1275 | uint8_t count = bios->data[offset + 3]; |
| 1276 | int len = 4 + count * 3; | ||
| 1280 | struct nouveau_i2c_chan *chan; | 1277 | struct nouveau_i2c_chan *chan; |
| 1281 | struct i2c_msg msg; | 1278 | struct i2c_msg msg; |
| 1282 | int i; | 1279 | int i; |
| 1283 | 1280 | ||
| 1284 | if (!iexec->execute) | 1281 | if (!iexec->execute) |
| 1285 | return true; | 1282 | return len; |
| 1286 | 1283 | ||
| 1287 | BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, " | 1284 | BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, " |
| 1288 | "Count: 0x%02X\n", | 1285 | "Count: 0x%02X\n", |
| @@ -1290,7 +1287,7 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1290 | 1287 | ||
| 1291 | chan = init_i2c_device_find(bios->dev, i2c_index); | 1288 | chan = init_i2c_device_find(bios->dev, i2c_index); |
| 1292 | if (!chan) | 1289 | if (!chan) |
| 1293 | return false; | 1290 | return 0; |
| 1294 | 1291 | ||
| 1295 | for (i = 0; i < count; i++) { | 1292 | for (i = 0; i < count; i++) { |
| 1296 | uint8_t i2c_reg = bios->data[offset + 4 + i * 3]; | 1293 | uint8_t i2c_reg = bios->data[offset + 4 + i * 3]; |
| @@ -1303,7 +1300,7 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1303 | msg.len = 1; | 1300 | msg.len = 1; |
| 1304 | msg.buf = &value; | 1301 | msg.buf = &value; |
| 1305 | if (i2c_transfer(&chan->adapter, &msg, 1) != 1) | 1302 | if (i2c_transfer(&chan->adapter, &msg, 1) != 1) |
| 1306 | return false; | 1303 | return 0; |
| 1307 | 1304 | ||
| 1308 | BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, " | 1305 | BIOSLOG(bios, "0x%04X: I2CReg: 0x%02X, Value: 0x%02X, " |
| 1309 | "Mask: 0x%02X, Data: 0x%02X\n", | 1306 | "Mask: 0x%02X, Data: 0x%02X\n", |
| @@ -1317,14 +1314,14 @@ init_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1317 | msg.len = 1; | 1314 | msg.len = 1; |
| 1318 | msg.buf = &value; | 1315 | msg.buf = &value; |
| 1319 | if (i2c_transfer(&chan->adapter, &msg, 1) != 1) | 1316 | if (i2c_transfer(&chan->adapter, &msg, 1) != 1) |
| 1320 | return false; | 1317 | return 0; |
| 1321 | } | 1318 | } |
| 1322 | } | 1319 | } |
| 1323 | 1320 | ||
| 1324 | return true; | 1321 | return len; |
| 1325 | } | 1322 | } |
| 1326 | 1323 | ||
| 1327 | static bool | 1324 | static int |
| 1328 | init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1325 | init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1329 | { | 1326 | { |
| 1330 | /* | 1327 | /* |
| @@ -1346,12 +1343,13 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1346 | uint8_t i2c_index = bios->data[offset + 1]; | 1343 | uint8_t i2c_index = bios->data[offset + 1]; |
| 1347 | uint8_t i2c_address = bios->data[offset + 2]; | 1344 | uint8_t i2c_address = bios->data[offset + 2]; |
| 1348 | uint8_t count = bios->data[offset + 3]; | 1345 | uint8_t count = bios->data[offset + 3]; |
| 1346 | int len = 4 + count * 2; | ||
| 1349 | struct nouveau_i2c_chan *chan; | 1347 | struct nouveau_i2c_chan *chan; |
| 1350 | struct i2c_msg msg; | 1348 | struct i2c_msg msg; |
| 1351 | int i; | 1349 | int i; |
| 1352 | 1350 | ||
| 1353 | if (!iexec->execute) | 1351 | if (!iexec->execute) |
| 1354 | return true; | 1352 | return len; |
| 1355 | 1353 | ||
| 1356 | BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, " | 1354 | BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, " |
| 1357 | "Count: 0x%02X\n", | 1355 | "Count: 0x%02X\n", |
| @@ -1359,7 +1357,7 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1359 | 1357 | ||
| 1360 | chan = init_i2c_device_find(bios->dev, i2c_index); | 1358 | chan = init_i2c_device_find(bios->dev, i2c_index); |
| 1361 | if (!chan) | 1359 | if (!chan) |
| 1362 | return false; | 1360 | return 0; |
| 1363 | 1361 | ||
| 1364 | for (i = 0; i < count; i++) { | 1362 | for (i = 0; i < count; i++) { |
| 1365 | uint8_t i2c_reg = bios->data[offset + 4 + i * 2]; | 1363 | uint8_t i2c_reg = bios->data[offset + 4 + i * 2]; |
| @@ -1374,14 +1372,14 @@ init_zm_i2c_byte(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1374 | msg.len = 1; | 1372 | msg.len = 1; |
| 1375 | msg.buf = &data; | 1373 | msg.buf = &data; |
| 1376 | if (i2c_transfer(&chan->adapter, &msg, 1) != 1) | 1374 | if (i2c_transfer(&chan->adapter, &msg, 1) != 1) |
| 1377 | return false; | 1375 | return 0; |
| 1378 | } | 1376 | } |
| 1379 | } | 1377 | } |
| 1380 | 1378 | ||
| 1381 | return true; | 1379 | return len; |
| 1382 | } | 1380 | } |
| 1383 | 1381 | ||
| 1384 | static bool | 1382 | static int |
| 1385 | init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1383 | init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1386 | { | 1384 | { |
| 1387 | /* | 1385 | /* |
| @@ -1401,13 +1399,14 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1401 | uint8_t i2c_index = bios->data[offset + 1]; | 1399 | uint8_t i2c_index = bios->data[offset + 1]; |
| 1402 | uint8_t i2c_address = bios->data[offset + 2]; | 1400 | uint8_t i2c_address = bios->data[offset + 2]; |
| 1403 | uint8_t count = bios->data[offset + 3]; | 1401 | uint8_t count = bios->data[offset + 3]; |
| 1402 | int len = 4 + count; | ||
| 1404 | struct nouveau_i2c_chan *chan; | 1403 | struct nouveau_i2c_chan *chan; |
| 1405 | struct i2c_msg msg; | 1404 | struct i2c_msg msg; |
| 1406 | uint8_t data[256]; | 1405 | uint8_t data[256]; |
| 1407 | int i; | 1406 | int i; |
| 1408 | 1407 | ||
| 1409 | if (!iexec->execute) | 1408 | if (!iexec->execute) |
| 1410 | return true; | 1409 | return len; |
| 1411 | 1410 | ||
| 1412 | BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, " | 1411 | BIOSLOG(bios, "0x%04X: DCBI2CIndex: 0x%02X, I2CAddress: 0x%02X, " |
| 1413 | "Count: 0x%02X\n", | 1412 | "Count: 0x%02X\n", |
| @@ -1415,7 +1414,7 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1415 | 1414 | ||
| 1416 | chan = init_i2c_device_find(bios->dev, i2c_index); | 1415 | chan = init_i2c_device_find(bios->dev, i2c_index); |
| 1417 | if (!chan) | 1416 | if (!chan) |
| 1418 | return false; | 1417 | return 0; |
| 1419 | 1418 | ||
| 1420 | for (i = 0; i < count; i++) { | 1419 | for (i = 0; i < count; i++) { |
| 1421 | data[i] = bios->data[offset + 4 + i]; | 1420 | data[i] = bios->data[offset + 4 + i]; |
| @@ -1429,13 +1428,13 @@ init_zm_i2c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1429 | msg.len = count; | 1428 | msg.len = count; |
| 1430 | msg.buf = data; | 1429 | msg.buf = data; |
| 1431 | if (i2c_transfer(&chan->adapter, &msg, 1) != 1) | 1430 | if (i2c_transfer(&chan->adapter, &msg, 1) != 1) |
| 1432 | return false; | 1431 | return 0; |
| 1433 | } | 1432 | } |
| 1434 | 1433 | ||
| 1435 | return true; | 1434 | return len; |
| 1436 | } | 1435 | } |
| 1437 | 1436 | ||
| 1438 | static bool | 1437 | static int |
| 1439 | init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1438 | init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1440 | { | 1439 | { |
| 1441 | /* | 1440 | /* |
| @@ -1460,7 +1459,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1460 | uint32_t reg, value; | 1459 | uint32_t reg, value; |
| 1461 | 1460 | ||
| 1462 | if (!iexec->execute) | 1461 | if (!iexec->execute) |
| 1463 | return true; | 1462 | return 5; |
| 1464 | 1463 | ||
| 1465 | BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, " | 1464 | BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, TMDSAddr: 0x%02X, " |
| 1466 | "Mask: 0x%02X, Data: 0x%02X\n", | 1465 | "Mask: 0x%02X, Data: 0x%02X\n", |
| @@ -1468,7 +1467,7 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1468 | 1467 | ||
| 1469 | reg = get_tmds_index_reg(bios->dev, mlv); | 1468 | reg = get_tmds_index_reg(bios->dev, mlv); |
| 1470 | if (!reg) | 1469 | if (!reg) |
| 1471 | return false; | 1470 | return 0; |
| 1472 | 1471 | ||
| 1473 | bios_wr32(bios, reg, | 1472 | bios_wr32(bios, reg, |
| 1474 | tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE); | 1473 | tmdsaddr | NV_PRAMDAC_FP_TMDS_CONTROL_WRITE_DISABLE); |
| @@ -1476,10 +1475,10 @@ init_tmds(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1476 | bios_wr32(bios, reg + 4, value); | 1475 | bios_wr32(bios, reg + 4, value); |
| 1477 | bios_wr32(bios, reg, tmdsaddr); | 1476 | bios_wr32(bios, reg, tmdsaddr); |
| 1478 | 1477 | ||
| 1479 | return true; | 1478 | return 5; |
| 1480 | } | 1479 | } |
| 1481 | 1480 | ||
| 1482 | static bool | 1481 | static int |
| 1483 | init_zm_tmds_group(struct nvbios *bios, uint16_t offset, | 1482 | init_zm_tmds_group(struct nvbios *bios, uint16_t offset, |
| 1484 | struct init_exec *iexec) | 1483 | struct init_exec *iexec) |
| 1485 | { | 1484 | { |
| @@ -1500,18 +1499,19 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset, | |||
| 1500 | 1499 | ||
| 1501 | uint8_t mlv = bios->data[offset + 1]; | 1500 | uint8_t mlv = bios->data[offset + 1]; |
| 1502 | uint8_t count = bios->data[offset + 2]; | 1501 | uint8_t count = bios->data[offset + 2]; |
| 1502 | int len = 3 + count * 2; | ||
| 1503 | uint32_t reg; | 1503 | uint32_t reg; |
| 1504 | int i; | 1504 | int i; |
| 1505 | 1505 | ||
| 1506 | if (!iexec->execute) | 1506 | if (!iexec->execute) |
| 1507 | return true; | 1507 | return len; |
| 1508 | 1508 | ||
| 1509 | BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n", | 1509 | BIOSLOG(bios, "0x%04X: MagicLookupValue: 0x%02X, Count: 0x%02X\n", |
| 1510 | offset, mlv, count); | 1510 | offset, mlv, count); |
| 1511 | 1511 | ||
| 1512 | reg = get_tmds_index_reg(bios->dev, mlv); | 1512 | reg = get_tmds_index_reg(bios->dev, mlv); |
| 1513 | if (!reg) | 1513 | if (!reg) |
| 1514 | return false; | 1514 | return 0; |
| 1515 | 1515 | ||
| 1516 | for (i = 0; i < count; i++) { | 1516 | for (i = 0; i < count; i++) { |
| 1517 | uint8_t tmdsaddr = bios->data[offset + 3 + i * 2]; | 1517 | uint8_t tmdsaddr = bios->data[offset + 3 + i * 2]; |
| @@ -1521,10 +1521,10 @@ init_zm_tmds_group(struct nvbios *bios, uint16_t offset, | |||
| 1521 | bios_wr32(bios, reg, tmdsaddr); | 1521 | bios_wr32(bios, reg, tmdsaddr); |
| 1522 | } | 1522 | } |
| 1523 | 1523 | ||
| 1524 | return true; | 1524 | return len; |
| 1525 | } | 1525 | } |
| 1526 | 1526 | ||
| 1527 | static bool | 1527 | static int |
| 1528 | init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset, | 1528 | init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset, |
| 1529 | struct init_exec *iexec) | 1529 | struct init_exec *iexec) |
| 1530 | { | 1530 | { |
| @@ -1547,11 +1547,12 @@ init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset, | |||
| 1547 | uint8_t crtcindex2 = bios->data[offset + 2]; | 1547 | uint8_t crtcindex2 = bios->data[offset + 2]; |
| 1548 | uint8_t baseaddr = bios->data[offset + 3]; | 1548 | uint8_t baseaddr = bios->data[offset + 3]; |
| 1549 | uint8_t count = bios->data[offset + 4]; | 1549 | uint8_t count = bios->data[offset + 4]; |
| 1550 | int len = 5 + count; | ||
| 1550 | uint8_t oldaddr, data; | 1551 | uint8_t oldaddr, data; |
| 1551 | int i; | 1552 | int i; |
| 1552 | 1553 | ||
| 1553 | if (!iexec->execute) | 1554 | if (!iexec->execute) |
| 1554 | return true; | 1555 | return len; |
| 1555 | 1556 | ||
| 1556 | BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, " | 1557 | BIOSLOG(bios, "0x%04X: Index1: 0x%02X, Index2: 0x%02X, " |
| 1557 | "BaseAddr: 0x%02X, Count: 0x%02X\n", | 1558 | "BaseAddr: 0x%02X, Count: 0x%02X\n", |
| @@ -1568,10 +1569,10 @@ init_cr_idx_adr_latch(struct nvbios *bios, uint16_t offset, | |||
| 1568 | 1569 | ||
| 1569 | bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr); | 1570 | bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex1, oldaddr); |
| 1570 | 1571 | ||
| 1571 | return true; | 1572 | return len; |
| 1572 | } | 1573 | } |
| 1573 | 1574 | ||
| 1574 | static bool | 1575 | static int |
| 1575 | init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1576 | init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1576 | { | 1577 | { |
| 1577 | /* | 1578 | /* |
| @@ -1592,7 +1593,7 @@ init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1592 | uint8_t value; | 1593 | uint8_t value; |
| 1593 | 1594 | ||
| 1594 | if (!iexec->execute) | 1595 | if (!iexec->execute) |
| 1595 | return true; | 1596 | return 4; |
| 1596 | 1597 | ||
| 1597 | BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n", | 1598 | BIOSLOG(bios, "0x%04X: Index: 0x%02X, Mask: 0x%02X, Data: 0x%02X\n", |
| 1598 | offset, crtcindex, mask, data); | 1599 | offset, crtcindex, mask, data); |
| @@ -1601,10 +1602,10 @@ init_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1601 | value |= data; | 1602 | value |= data; |
| 1602 | bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value); | 1603 | bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, value); |
| 1603 | 1604 | ||
| 1604 | return true; | 1605 | return 4; |
| 1605 | } | 1606 | } |
| 1606 | 1607 | ||
| 1607 | static bool | 1608 | static int |
| 1608 | init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1609 | init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1609 | { | 1610 | { |
| 1610 | /* | 1611 | /* |
| @@ -1621,14 +1622,14 @@ init_zm_cr(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1621 | uint8_t data = bios->data[offset + 2]; | 1622 | uint8_t data = bios->data[offset + 2]; |
| 1622 | 1623 | ||
| 1623 | if (!iexec->execute) | 1624 | if (!iexec->execute) |
| 1624 | return true; | 1625 | return 3; |
| 1625 | 1626 | ||
| 1626 | bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data); | 1627 | bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, crtcindex, data); |
| 1627 | 1628 | ||
| 1628 | return true; | 1629 | return 3; |
| 1629 | } | 1630 | } |
| 1630 | 1631 | ||
| 1631 | static bool | 1632 | static int |
| 1632 | init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1633 | init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1633 | { | 1634 | { |
| 1634 | /* | 1635 | /* |
| @@ -1645,18 +1646,19 @@ init_zm_cr_group(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1645 | */ | 1646 | */ |
| 1646 | 1647 | ||
| 1647 | uint8_t count = bios->data[offset + 1]; | 1648 | uint8_t count = bios->data[offset + 1]; |
| 1649 | int len = 2 + count * 2; | ||
| 1648 | int i; | 1650 | int i; |
| 1649 | 1651 | ||
| 1650 | if (!iexec->execute) | 1652 | if (!iexec->execute) |
| 1651 | return true; | 1653 | return len; |
| 1652 | 1654 | ||
| 1653 | for (i = 0; i < count; i++) | 1655 | for (i = 0; i < count; i++) |
| 1654 | init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec); | 1656 | init_zm_cr(bios, offset + 2 + 2 * i - 1, iexec); |
| 1655 | 1657 | ||
| 1656 | return true; | 1658 | return len; |
| 1657 | } | 1659 | } |
| 1658 | 1660 | ||
| 1659 | static bool | 1661 | static int |
| 1660 | init_condition_time(struct nvbios *bios, uint16_t offset, | 1662 | init_condition_time(struct nvbios *bios, uint16_t offset, |
| 1661 | struct init_exec *iexec) | 1663 | struct init_exec *iexec) |
| 1662 | { | 1664 | { |
| @@ -1680,7 +1682,7 @@ init_condition_time(struct nvbios *bios, uint16_t offset, | |||
| 1680 | unsigned cnt; | 1682 | unsigned cnt; |
| 1681 | 1683 | ||
| 1682 | if (!iexec->execute) | 1684 | if (!iexec->execute) |
| 1683 | return true; | 1685 | return 3; |
| 1684 | 1686 | ||
| 1685 | if (retries > 100) | 1687 | if (retries > 100) |
| 1686 | retries = 100; | 1688 | retries = 100; |
| @@ -1711,10 +1713,10 @@ init_condition_time(struct nvbios *bios, uint16_t offset, | |||
| 1711 | iexec->execute = false; | 1713 | iexec->execute = false; |
| 1712 | } | 1714 | } |
| 1713 | 1715 | ||
| 1714 | return true; | 1716 | return 3; |
| 1715 | } | 1717 | } |
| 1716 | 1718 | ||
| 1717 | static bool | 1719 | static int |
| 1718 | init_zm_reg_sequence(struct nvbios *bios, uint16_t offset, | 1720 | init_zm_reg_sequence(struct nvbios *bios, uint16_t offset, |
| 1719 | struct init_exec *iexec) | 1721 | struct init_exec *iexec) |
| 1720 | { | 1722 | { |
| @@ -1734,10 +1736,11 @@ init_zm_reg_sequence(struct nvbios *bios, uint16_t offset, | |||
| 1734 | 1736 | ||
| 1735 | uint32_t basereg = ROM32(bios->data[offset + 1]); | 1737 | uint32_t basereg = ROM32(bios->data[offset + 1]); |
| 1736 | uint32_t count = bios->data[offset + 5]; | 1738 | uint32_t count = bios->data[offset + 5]; |
| 1739 | int len = 6 + count * 4; | ||
| 1737 | int i; | 1740 | int i; |
| 1738 | 1741 | ||
| 1739 | if (!iexec->execute) | 1742 | if (!iexec->execute) |
| 1740 | return true; | 1743 | return len; |
| 1741 | 1744 | ||
| 1742 | BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n", | 1745 | BIOSLOG(bios, "0x%04X: BaseReg: 0x%08X, Count: 0x%02X\n", |
| 1743 | offset, basereg, count); | 1746 | offset, basereg, count); |
| @@ -1749,10 +1752,10 @@ init_zm_reg_sequence(struct nvbios *bios, uint16_t offset, | |||
| 1749 | bios_wr32(bios, reg, data); | 1752 | bios_wr32(bios, reg, data); |
| 1750 | } | 1753 | } |
| 1751 | 1754 | ||
| 1752 | return true; | 1755 | return len; |
| 1753 | } | 1756 | } |
| 1754 | 1757 | ||
| 1755 | static bool | 1758 | static int |
| 1756 | init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1759 | init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1757 | { | 1760 | { |
| 1758 | /* | 1761 | /* |
| @@ -1768,7 +1771,7 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1768 | uint16_t sub_offset = ROM16(bios->data[offset + 1]); | 1771 | uint16_t sub_offset = ROM16(bios->data[offset + 1]); |
| 1769 | 1772 | ||
| 1770 | if (!iexec->execute) | 1773 | if (!iexec->execute) |
| 1771 | return true; | 1774 | return 3; |
| 1772 | 1775 | ||
| 1773 | BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n", | 1776 | BIOSLOG(bios, "0x%04X: Executing subroutine at 0x%04X\n", |
| 1774 | offset, sub_offset); | 1777 | offset, sub_offset); |
| @@ -1777,10 +1780,10 @@ init_sub_direct(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1777 | 1780 | ||
| 1778 | BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset); | 1781 | BIOSLOG(bios, "0x%04X: End of 0x%04X subroutine\n", offset, sub_offset); |
| 1779 | 1782 | ||
| 1780 | return true; | 1783 | return 3; |
| 1781 | } | 1784 | } |
| 1782 | 1785 | ||
| 1783 | static bool | 1786 | static int |
| 1784 | init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1787 | init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1785 | { | 1788 | { |
| 1786 | /* | 1789 | /* |
| @@ -1808,7 +1811,7 @@ init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1808 | uint32_t srcvalue, dstvalue; | 1811 | uint32_t srcvalue, dstvalue; |
| 1809 | 1812 | ||
| 1810 | if (!iexec->execute) | 1813 | if (!iexec->execute) |
| 1811 | return true; | 1814 | return 22; |
| 1812 | 1815 | ||
| 1813 | BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, " | 1816 | BIOSLOG(bios, "0x%04X: SrcReg: 0x%08X, Shift: 0x%02X, SrcMask: 0x%08X, " |
| 1814 | "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n", | 1817 | "Xor: 0x%08X, DstReg: 0x%08X, DstMask: 0x%08X\n", |
| @@ -1827,10 +1830,10 @@ init_copy_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1827 | 1830 | ||
| 1828 | bios_wr32(bios, dstreg, dstvalue | srcvalue); | 1831 | bios_wr32(bios, dstreg, dstvalue | srcvalue); |
| 1829 | 1832 | ||
| 1830 | return true; | 1833 | return 22; |
| 1831 | } | 1834 | } |
| 1832 | 1835 | ||
| 1833 | static bool | 1836 | static int |
| 1834 | init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1837 | init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1835 | { | 1838 | { |
| 1836 | /* | 1839 | /* |
| @@ -1848,14 +1851,14 @@ init_zm_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1848 | uint8_t data = bios->data[offset + 4]; | 1851 | uint8_t data = bios->data[offset + 4]; |
| 1849 | 1852 | ||
| 1850 | if (!iexec->execute) | 1853 | if (!iexec->execute) |
| 1851 | return true; | 1854 | return 5; |
| 1852 | 1855 | ||
| 1853 | bios_idxprt_wr(bios, crtcport, crtcindex, data); | 1856 | bios_idxprt_wr(bios, crtcport, crtcindex, data); |
| 1854 | 1857 | ||
| 1855 | return true; | 1858 | return 5; |
| 1856 | } | 1859 | } |
| 1857 | 1860 | ||
| 1858 | static bool | 1861 | static int |
| 1859 | init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1862 | init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1860 | { | 1863 | { |
| 1861 | /* | 1864 | /* |
| @@ -1904,7 +1907,7 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1904 | struct drm_nouveau_private *dev_priv = bios->dev->dev_private; | 1907 | struct drm_nouveau_private *dev_priv = bios->dev->dev_private; |
| 1905 | 1908 | ||
| 1906 | if (dev_priv->card_type >= NV_50) | 1909 | if (dev_priv->card_type >= NV_50) |
| 1907 | return true; | 1910 | return 1; |
| 1908 | 1911 | ||
| 1909 | /* | 1912 | /* |
| 1910 | * On every card I've seen, this step gets done for us earlier in | 1913 | * On every card I've seen, this step gets done for us earlier in |
| @@ -1922,10 +1925,10 @@ init_compute_mem(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1922 | /* write back the saved configuration value */ | 1925 | /* write back the saved configuration value */ |
| 1923 | bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0); | 1926 | bios_wr32(bios, NV_PFB_CFG0, bios->state.saved_nv_pfb_cfg0); |
| 1924 | 1927 | ||
| 1925 | return true; | 1928 | return 1; |
| 1926 | } | 1929 | } |
| 1927 | 1930 | ||
| 1928 | static bool | 1931 | static int |
| 1929 | init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 1932 | init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 1930 | { | 1933 | { |
| 1931 | /* | 1934 | /* |
| @@ -1959,10 +1962,10 @@ init_reset(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 1959 | pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */ | 1962 | pci_nv_20 &= ~NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED; /* 0xfffffffe */ |
| 1960 | bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20); | 1963 | bios_wr32(bios, NV_PBUS_PCI_NV_20, pci_nv_20); |
| 1961 | 1964 | ||
| 1962 | return true; | 1965 | return 13; |
| 1963 | } | 1966 | } |
| 1964 | 1967 | ||
| 1965 | static bool | 1968 | static int |
| 1966 | init_configure_mem(struct nvbios *bios, uint16_t offset, | 1969 | init_configure_mem(struct nvbios *bios, uint16_t offset, |
| 1967 | struct init_exec *iexec) | 1970 | struct init_exec *iexec) |
| 1968 | { | 1971 | { |
| @@ -1983,7 +1986,7 @@ init_configure_mem(struct nvbios *bios, uint16_t offset, | |||
| 1983 | uint32_t reg, data; | 1986 | uint32_t reg, data; |
| 1984 | 1987 | ||
| 1985 | if (bios->major_version > 2) | 1988 | if (bios->major_version > 2) |
| 1986 | return false; | 1989 | return 0; |
| 1987 | 1990 | ||
| 1988 | bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd( | 1991 | bios_idxprt_wr(bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX, bios_idxprt_rd( |
| 1989 | bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20); | 1992 | bios, NV_VIO_SRX, NV_VIO_SR_CLOCK_INDEX) | 0x20); |
| @@ -2015,10 +2018,10 @@ init_configure_mem(struct nvbios *bios, uint16_t offset, | |||
| 2015 | bios_wr32(bios, reg, data); | 2018 | bios_wr32(bios, reg, data); |
| 2016 | } | 2019 | } |
| 2017 | 2020 | ||
| 2018 | return true; | 2021 | return 1; |
| 2019 | } | 2022 | } |
| 2020 | 2023 | ||
| 2021 | static bool | 2024 | static int |
| 2022 | init_configure_clk(struct nvbios *bios, uint16_t offset, | 2025 | init_configure_clk(struct nvbios *bios, uint16_t offset, |
| 2023 | struct init_exec *iexec) | 2026 | struct init_exec *iexec) |
| 2024 | { | 2027 | { |
| @@ -2038,7 +2041,7 @@ init_configure_clk(struct nvbios *bios, uint16_t offset, | |||
| 2038 | int clock; | 2041 | int clock; |
| 2039 | 2042 | ||
| 2040 | if (bios->major_version > 2) | 2043 | if (bios->major_version > 2) |
| 2041 | return false; | 2044 | return 0; |
| 2042 | 2045 | ||
| 2043 | clock = ROM16(bios->data[meminitoffs + 4]) * 10; | 2046 | clock = ROM16(bios->data[meminitoffs + 4]) * 10; |
| 2044 | setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock); | 2047 | setPLL(bios, NV_PRAMDAC_NVPLL_COEFF, clock); |
| @@ -2048,10 +2051,10 @@ init_configure_clk(struct nvbios *bios, uint16_t offset, | |||
| 2048 | clock *= 2; | 2051 | clock *= 2; |
| 2049 | setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock); | 2052 | setPLL(bios, NV_PRAMDAC_MPLL_COEFF, clock); |
| 2050 | 2053 | ||
| 2051 | return true; | 2054 | return 1; |
| 2052 | } | 2055 | } |
| 2053 | 2056 | ||
| 2054 | static bool | 2057 | static int |
| 2055 | init_configure_preinit(struct nvbios *bios, uint16_t offset, | 2058 | init_configure_preinit(struct nvbios *bios, uint16_t offset, |
| 2056 | struct init_exec *iexec) | 2059 | struct init_exec *iexec) |
| 2057 | { | 2060 | { |
| @@ -2071,15 +2074,15 @@ init_configure_preinit(struct nvbios *bios, uint16_t offset, | |||
| 2071 | uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6)); | 2074 | uint8_t cr3c = ((straps << 2) & 0xf0) | (straps & (1 << 6)); |
| 2072 | 2075 | ||
| 2073 | if (bios->major_version > 2) | 2076 | if (bios->major_version > 2) |
| 2074 | return false; | 2077 | return 0; |
| 2075 | 2078 | ||
| 2076 | bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, | 2079 | bios_idxprt_wr(bios, NV_CIO_CRX__COLOR, |
| 2077 | NV_CIO_CRE_SCRATCH4__INDEX, cr3c); | 2080 | NV_CIO_CRE_SCRATCH4__INDEX, cr3c); |
| 2078 | 2081 | ||
| 2079 | return true; | 2082 | return 1; |
| 2080 | } | 2083 | } |
| 2081 | 2084 | ||
| 2082 | static bool | 2085 | static int |
| 2083 | init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2086 | init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2084 | { | 2087 | { |
| 2085 | /* | 2088 | /* |
| @@ -2099,7 +2102,7 @@ init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2099 | uint8_t data = bios->data[offset + 4]; | 2102 | uint8_t data = bios->data[offset + 4]; |
| 2100 | 2103 | ||
| 2101 | if (!iexec->execute) | 2104 | if (!iexec->execute) |
| 2102 | return true; | 2105 | return 5; |
| 2103 | 2106 | ||
| 2104 | BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n", | 2107 | BIOSLOG(bios, "0x%04X: Port: 0x%04X, Mask: 0x%02X, Data: 0x%02X\n", |
| 2105 | offset, crtcport, mask, data); | 2108 | offset, crtcport, mask, data); |
| @@ -2158,15 +2161,15 @@ init_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2158 | for (i = 0; i < 2; i++) | 2161 | for (i = 0; i < 2; i++) |
| 2159 | bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32( | 2162 | bios_wr32(bios, 0x614108 + (i*0x800), bios_rd32( |
| 2160 | bios, 0x614108 + (i*0x800)) & 0x0fffffff); | 2163 | bios, 0x614108 + (i*0x800)) & 0x0fffffff); |
| 2161 | return true; | 2164 | return 5; |
| 2162 | } | 2165 | } |
| 2163 | 2166 | ||
| 2164 | bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) | | 2167 | bios_port_wr(bios, crtcport, (bios_port_rd(bios, crtcport) & mask) | |
| 2165 | data); | 2168 | data); |
| 2166 | return true; | 2169 | return 5; |
| 2167 | } | 2170 | } |
| 2168 | 2171 | ||
| 2169 | static bool | 2172 | static int |
| 2170 | init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2173 | init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2171 | { | 2174 | { |
| 2172 | /* | 2175 | /* |
| @@ -2181,7 +2184,7 @@ init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2181 | uint8_t sub = bios->data[offset + 1]; | 2184 | uint8_t sub = bios->data[offset + 1]; |
| 2182 | 2185 | ||
| 2183 | if (!iexec->execute) | 2186 | if (!iexec->execute) |
| 2184 | return true; | 2187 | return 2; |
| 2185 | 2188 | ||
| 2186 | BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub); | 2189 | BIOSLOG(bios, "0x%04X: Calling script %d\n", offset, sub); |
| 2187 | 2190 | ||
| @@ -2191,10 +2194,10 @@ init_sub(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2191 | 2194 | ||
| 2192 | BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub); | 2195 | BIOSLOG(bios, "0x%04X: End of script %d\n", offset, sub); |
| 2193 | 2196 | ||
| 2194 | return true; | 2197 | return 2; |
| 2195 | } | 2198 | } |
| 2196 | 2199 | ||
| 2197 | static bool | 2200 | static int |
| 2198 | init_ram_condition(struct nvbios *bios, uint16_t offset, | 2201 | init_ram_condition(struct nvbios *bios, uint16_t offset, |
| 2199 | struct init_exec *iexec) | 2202 | struct init_exec *iexec) |
| 2200 | { | 2203 | { |
| @@ -2215,7 +2218,7 @@ init_ram_condition(struct nvbios *bios, uint16_t offset, | |||
| 2215 | uint8_t data; | 2218 | uint8_t data; |
| 2216 | 2219 | ||
| 2217 | if (!iexec->execute) | 2220 | if (!iexec->execute) |
| 2218 | return true; | 2221 | return 3; |
| 2219 | 2222 | ||
| 2220 | data = bios_rd32(bios, NV_PFB_BOOT_0) & mask; | 2223 | data = bios_rd32(bios, NV_PFB_BOOT_0) & mask; |
| 2221 | 2224 | ||
| @@ -2229,10 +2232,10 @@ init_ram_condition(struct nvbios *bios, uint16_t offset, | |||
| 2229 | iexec->execute = false; | 2232 | iexec->execute = false; |
| 2230 | } | 2233 | } |
| 2231 | 2234 | ||
| 2232 | return true; | 2235 | return 3; |
| 2233 | } | 2236 | } |
| 2234 | 2237 | ||
| 2235 | static bool | 2238 | static int |
| 2236 | init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2239 | init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2237 | { | 2240 | { |
| 2238 | /* | 2241 | /* |
| @@ -2251,17 +2254,17 @@ init_nv_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2251 | uint32_t data = ROM32(bios->data[offset + 9]); | 2254 | uint32_t data = ROM32(bios->data[offset + 9]); |
| 2252 | 2255 | ||
| 2253 | if (!iexec->execute) | 2256 | if (!iexec->execute) |
| 2254 | return true; | 2257 | return 13; |
| 2255 | 2258 | ||
| 2256 | BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n", | 2259 | BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Mask: 0x%08X, Data: 0x%08X\n", |
| 2257 | offset, reg, mask, data); | 2260 | offset, reg, mask, data); |
| 2258 | 2261 | ||
| 2259 | bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data); | 2262 | bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | data); |
| 2260 | 2263 | ||
| 2261 | return true; | 2264 | return 13; |
| 2262 | } | 2265 | } |
| 2263 | 2266 | ||
| 2264 | static bool | 2267 | static int |
| 2265 | init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2268 | init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2266 | { | 2269 | { |
| 2267 | /* | 2270 | /* |
| @@ -2285,7 +2288,7 @@ init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2285 | int i; | 2288 | int i; |
| 2286 | 2289 | ||
| 2287 | if (!iexec->execute) | 2290 | if (!iexec->execute) |
| 2288 | return true; | 2291 | return 2; |
| 2289 | 2292 | ||
| 2290 | BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, " | 2293 | BIOSLOG(bios, "0x%04X: Macro: 0x%02X, MacroTableIndex: 0x%02X, " |
| 2291 | "Count: 0x%02X\n", | 2294 | "Count: 0x%02X\n", |
| @@ -2300,10 +2303,10 @@ init_macro(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2300 | bios_wr32(bios, reg, data); | 2303 | bios_wr32(bios, reg, data); |
| 2301 | } | 2304 | } |
| 2302 | 2305 | ||
| 2303 | return true; | 2306 | return 2; |
| 2304 | } | 2307 | } |
| 2305 | 2308 | ||
| 2306 | static bool | 2309 | static int |
| 2307 | init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2310 | init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2308 | { | 2311 | { |
| 2309 | /* | 2312 | /* |
| @@ -2315,10 +2318,10 @@ init_done(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2315 | */ | 2318 | */ |
| 2316 | 2319 | ||
| 2317 | /* mild retval abuse to stop parsing this table */ | 2320 | /* mild retval abuse to stop parsing this table */ |
| 2318 | return false; | 2321 | return 0; |
| 2319 | } | 2322 | } |
| 2320 | 2323 | ||
| 2321 | static bool | 2324 | static int |
| 2322 | init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2325 | init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2323 | { | 2326 | { |
| 2324 | /* | 2327 | /* |
| @@ -2330,15 +2333,15 @@ init_resume(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2330 | */ | 2333 | */ |
| 2331 | 2334 | ||
| 2332 | if (iexec->execute) | 2335 | if (iexec->execute) |
| 2333 | return true; | 2336 | return 1; |
| 2334 | 2337 | ||
| 2335 | iexec->execute = true; | 2338 | iexec->execute = true; |
| 2336 | BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset); | 2339 | BIOSLOG(bios, "0x%04X: ---- Executing following commands ----\n", offset); |
| 2337 | 2340 | ||
| 2338 | return true; | 2341 | return 1; |
| 2339 | } | 2342 | } |
| 2340 | 2343 | ||
| 2341 | static bool | 2344 | static int |
| 2342 | init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2345 | init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2343 | { | 2346 | { |
| 2344 | /* | 2347 | /* |
| @@ -2353,7 +2356,7 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2353 | unsigned time = ROM16(bios->data[offset + 1]); | 2356 | unsigned time = ROM16(bios->data[offset + 1]); |
| 2354 | 2357 | ||
| 2355 | if (!iexec->execute) | 2358 | if (!iexec->execute) |
| 2356 | return true; | 2359 | return 3; |
| 2357 | 2360 | ||
| 2358 | BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n", | 2361 | BIOSLOG(bios, "0x%04X: Sleeping for 0x%04X microseconds\n", |
| 2359 | offset, time); | 2362 | offset, time); |
| @@ -2363,10 +2366,10 @@ init_time(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2363 | else | 2366 | else |
| 2364 | msleep((time + 900) / 1000); | 2367 | msleep((time + 900) / 1000); |
| 2365 | 2368 | ||
| 2366 | return true; | 2369 | return 3; |
| 2367 | } | 2370 | } |
| 2368 | 2371 | ||
| 2369 | static bool | 2372 | static int |
| 2370 | init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2373 | init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2371 | { | 2374 | { |
| 2372 | /* | 2375 | /* |
| @@ -2383,7 +2386,7 @@ init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2383 | uint8_t cond = bios->data[offset + 1]; | 2386 | uint8_t cond = bios->data[offset + 1]; |
| 2384 | 2387 | ||
| 2385 | if (!iexec->execute) | 2388 | if (!iexec->execute) |
| 2386 | return true; | 2389 | return 2; |
| 2387 | 2390 | ||
| 2388 | BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond); | 2391 | BIOSLOG(bios, "0x%04X: Condition: 0x%02X\n", offset, cond); |
| 2389 | 2392 | ||
| @@ -2394,10 +2397,10 @@ init_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2394 | iexec->execute = false; | 2397 | iexec->execute = false; |
| 2395 | } | 2398 | } |
| 2396 | 2399 | ||
| 2397 | return true; | 2400 | return 2; |
| 2398 | } | 2401 | } |
| 2399 | 2402 | ||
| 2400 | static bool | 2403 | static int |
| 2401 | init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2404 | init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2402 | { | 2405 | { |
| 2403 | /* | 2406 | /* |
| @@ -2414,7 +2417,7 @@ init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2414 | uint8_t cond = bios->data[offset + 1]; | 2417 | uint8_t cond = bios->data[offset + 1]; |
| 2415 | 2418 | ||
| 2416 | if (!iexec->execute) | 2419 | if (!iexec->execute) |
| 2417 | return true; | 2420 | return 2; |
| 2418 | 2421 | ||
| 2419 | BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond); | 2422 | BIOSLOG(bios, "0x%04X: IO condition: 0x%02X\n", offset, cond); |
| 2420 | 2423 | ||
| @@ -2425,10 +2428,10 @@ init_io_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2425 | iexec->execute = false; | 2428 | iexec->execute = false; |
| 2426 | } | 2429 | } |
| 2427 | 2430 | ||
| 2428 | return true; | 2431 | return 2; |
| 2429 | } | 2432 | } |
| 2430 | 2433 | ||
| 2431 | static bool | 2434 | static int |
| 2432 | init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2435 | init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2433 | { | 2436 | { |
| 2434 | /* | 2437 | /* |
| @@ -2451,7 +2454,7 @@ init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2451 | uint8_t value; | 2454 | uint8_t value; |
| 2452 | 2455 | ||
| 2453 | if (!iexec->execute) | 2456 | if (!iexec->execute) |
| 2454 | return true; | 2457 | return 6; |
| 2455 | 2458 | ||
| 2456 | BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " | 2459 | BIOSLOG(bios, "0x%04X: Port: 0x%04X, Index: 0x%02X, Mask: 0x%02X, " |
| 2457 | "Data: 0x%02X\n", | 2460 | "Data: 0x%02X\n", |
| @@ -2460,10 +2463,10 @@ init_index_io(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2460 | value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data; | 2463 | value = (bios_idxprt_rd(bios, crtcport, crtcindex) & mask) | data; |
| 2461 | bios_idxprt_wr(bios, crtcport, crtcindex, value); | 2464 | bios_idxprt_wr(bios, crtcport, crtcindex, value); |
| 2462 | 2465 | ||
| 2463 | return true; | 2466 | return 6; |
| 2464 | } | 2467 | } |
| 2465 | 2468 | ||
| 2466 | static bool | 2469 | static int |
| 2467 | init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2470 | init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2468 | { | 2471 | { |
| 2469 | /* | 2472 | /* |
| @@ -2481,16 +2484,16 @@ init_pll(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2481 | uint16_t freq = ROM16(bios->data[offset + 5]); | 2484 | uint16_t freq = ROM16(bios->data[offset + 5]); |
| 2482 | 2485 | ||
| 2483 | if (!iexec->execute) | 2486 | if (!iexec->execute) |
| 2484 | return true; | 2487 | return 7; |
| 2485 | 2488 | ||
| 2486 | BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq); | 2489 | BIOSLOG(bios, "0x%04X: Reg: 0x%08X, Freq: %d0kHz\n", offset, reg, freq); |
| 2487 | 2490 | ||
| 2488 | setPLL(bios, reg, freq * 10); | 2491 | setPLL(bios, reg, freq * 10); |
| 2489 | 2492 | ||
| 2490 | return true; | 2493 | return 7; |
| 2491 | } | 2494 | } |
| 2492 | 2495 | ||
| 2493 | static bool | 2496 | static int |
| 2494 | init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2497 | init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2495 | { | 2498 | { |
| 2496 | /* | 2499 | /* |
| @@ -2507,17 +2510,17 @@ init_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2507 | uint32_t value = ROM32(bios->data[offset + 5]); | 2510 | uint32_t value = ROM32(bios->data[offset + 5]); |
| 2508 | 2511 | ||
| 2509 | if (!iexec->execute) | 2512 | if (!iexec->execute) |
| 2510 | return true; | 2513 | return 9; |
| 2511 | 2514 | ||
| 2512 | if (reg == 0x000200) | 2515 | if (reg == 0x000200) |
| 2513 | value |= 1; | 2516 | value |= 1; |
| 2514 | 2517 | ||
| 2515 | bios_wr32(bios, reg, value); | 2518 | bios_wr32(bios, reg, value); |
| 2516 | 2519 | ||
| 2517 | return true; | 2520 | return 9; |
| 2518 | } | 2521 | } |
| 2519 | 2522 | ||
| 2520 | static bool | 2523 | static int |
| 2521 | init_ram_restrict_pll(struct nvbios *bios, uint16_t offset, | 2524 | init_ram_restrict_pll(struct nvbios *bios, uint16_t offset, |
| 2522 | struct init_exec *iexec) | 2525 | struct init_exec *iexec) |
| 2523 | { | 2526 | { |
| @@ -2543,14 +2546,15 @@ init_ram_restrict_pll(struct nvbios *bios, uint16_t offset, | |||
| 2543 | uint8_t type = bios->data[offset + 1]; | 2546 | uint8_t type = bios->data[offset + 1]; |
| 2544 | uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]); | 2547 | uint32_t freq = ROM32(bios->data[offset + 2 + (index * 4)]); |
| 2545 | uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry; | 2548 | uint8_t *pll_limits = &bios->data[bios->pll_limit_tbl_ptr], *entry; |
| 2549 | int len = 2 + bios->ram_restrict_group_count * 4; | ||
| 2546 | int i; | 2550 | int i; |
| 2547 | 2551 | ||
| 2548 | if (!iexec->execute) | 2552 | if (!iexec->execute) |
| 2549 | return true; | 2553 | return len; |
| 2550 | 2554 | ||
| 2551 | if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) { | 2555 | if (!bios->pll_limit_tbl_ptr || (pll_limits[0] & 0xf0) != 0x30) { |
| 2552 | NV_ERROR(dev, "PLL limits table not version 3.x\n"); | 2556 | NV_ERROR(dev, "PLL limits table not version 3.x\n"); |
| 2553 | return true; /* deliberate, allow default clocks to remain */ | 2557 | return len; /* deliberate, allow default clocks to remain */ |
| 2554 | } | 2558 | } |
| 2555 | 2559 | ||
| 2556 | entry = pll_limits + pll_limits[1]; | 2560 | entry = pll_limits + pll_limits[1]; |
| @@ -2563,15 +2567,15 @@ init_ram_restrict_pll(struct nvbios *bios, uint16_t offset, | |||
| 2563 | offset, type, reg, freq); | 2567 | offset, type, reg, freq); |
| 2564 | 2568 | ||
| 2565 | setPLL(bios, reg, freq); | 2569 | setPLL(bios, reg, freq); |
| 2566 | return true; | 2570 | return len; |
| 2567 | } | 2571 | } |
| 2568 | } | 2572 | } |
| 2569 | 2573 | ||
| 2570 | NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type); | 2574 | NV_ERROR(dev, "PLL type 0x%02x not found in PLL limits table", type); |
| 2571 | return true; | 2575 | return len; |
| 2572 | } | 2576 | } |
| 2573 | 2577 | ||
| 2574 | static bool | 2578 | static int |
| 2575 | init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2579 | init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2576 | { | 2580 | { |
| 2577 | /* | 2581 | /* |
| @@ -2581,10 +2585,10 @@ init_8c(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2581 | * | 2585 | * |
| 2582 | */ | 2586 | */ |
| 2583 | 2587 | ||
| 2584 | return true; | 2588 | return 1; |
| 2585 | } | 2589 | } |
| 2586 | 2590 | ||
| 2587 | static bool | 2591 | static int |
| 2588 | init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2592 | init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2589 | { | 2593 | { |
| 2590 | /* | 2594 | /* |
| @@ -2594,10 +2598,10 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2594 | * | 2598 | * |
| 2595 | */ | 2599 | */ |
| 2596 | 2600 | ||
| 2597 | return true; | 2601 | return 1; |
| 2598 | } | 2602 | } |
| 2599 | 2603 | ||
| 2600 | static bool | 2604 | static int |
| 2601 | init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2605 | init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2602 | { | 2606 | { |
| 2603 | /* | 2607 | /* |
| @@ -2615,14 +2619,17 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2615 | const uint8_t *gpio_entry; | 2619 | const uint8_t *gpio_entry; |
| 2616 | int i; | 2620 | int i; |
| 2617 | 2621 | ||
| 2622 | if (!iexec->execute) | ||
| 2623 | return 1; | ||
| 2624 | |||
| 2618 | if (bios->bdcb.version != 0x40) { | 2625 | if (bios->bdcb.version != 0x40) { |
| 2619 | NV_ERROR(bios->dev, "DCB table not version 4.0\n"); | 2626 | NV_ERROR(bios->dev, "DCB table not version 4.0\n"); |
| 2620 | return false; | 2627 | return 0; |
| 2621 | } | 2628 | } |
| 2622 | 2629 | ||
| 2623 | if (!bios->bdcb.gpio_table_ptr) { | 2630 | if (!bios->bdcb.gpio_table_ptr) { |
| 2624 | NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); | 2631 | NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n"); |
| 2625 | return false; | 2632 | return 0; |
| 2626 | } | 2633 | } |
| 2627 | 2634 | ||
| 2628 | gpio_entry = gpio_table + gpio_table[1]; | 2635 | gpio_entry = gpio_table + gpio_table[1]; |
| @@ -2660,13 +2667,10 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2660 | bios_wr32(bios, r, v); | 2667 | bios_wr32(bios, r, v); |
| 2661 | } | 2668 | } |
| 2662 | 2669 | ||
| 2663 | return true; | 2670 | return 1; |
| 2664 | } | 2671 | } |
| 2665 | 2672 | ||
| 2666 | /* hack to avoid moving the itbl_entry array before this function */ | 2673 | static int |
| 2667 | int init_ram_restrict_zm_reg_group_blocklen; | ||
| 2668 | |||
| 2669 | static bool | ||
| 2670 | init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, | 2674 | init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, |
| 2671 | struct init_exec *iexec) | 2675 | struct init_exec *iexec) |
| 2672 | { | 2676 | { |
| @@ -2692,21 +2696,21 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, | |||
| 2692 | uint8_t regincrement = bios->data[offset + 5]; | 2696 | uint8_t regincrement = bios->data[offset + 5]; |
| 2693 | uint8_t count = bios->data[offset + 6]; | 2697 | uint8_t count = bios->data[offset + 6]; |
| 2694 | uint32_t strap_ramcfg, data; | 2698 | uint32_t strap_ramcfg, data; |
| 2695 | uint16_t blocklen; | 2699 | /* previously set by 'M' BIT table */ |
| 2700 | uint16_t blocklen = bios->ram_restrict_group_count * 4; | ||
| 2701 | int len = 7 + count * blocklen; | ||
| 2696 | uint8_t index; | 2702 | uint8_t index; |
| 2697 | int i; | 2703 | int i; |
| 2698 | 2704 | ||
| 2699 | /* previously set by 'M' BIT table */ | ||
| 2700 | blocklen = init_ram_restrict_zm_reg_group_blocklen; | ||
| 2701 | 2705 | ||
| 2702 | if (!iexec->execute) | 2706 | if (!iexec->execute) |
| 2703 | return true; | 2707 | return len; |
| 2704 | 2708 | ||
| 2705 | if (!blocklen) { | 2709 | if (!blocklen) { |
| 2706 | NV_ERROR(bios->dev, | 2710 | NV_ERROR(bios->dev, |
| 2707 | "0x%04X: Zero block length - has the M table " | 2711 | "0x%04X: Zero block length - has the M table " |
| 2708 | "been parsed?\n", offset); | 2712 | "been parsed?\n", offset); |
| 2709 | return false; | 2713 | return 0; |
| 2710 | } | 2714 | } |
| 2711 | 2715 | ||
| 2712 | strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf; | 2716 | strap_ramcfg = (bios_rd32(bios, NV_PEXTDEV_BOOT_0) >> 2) & 0xf; |
| @@ -2724,10 +2728,10 @@ init_ram_restrict_zm_reg_group(struct nvbios *bios, uint16_t offset, | |||
| 2724 | reg += regincrement; | 2728 | reg += regincrement; |
| 2725 | } | 2729 | } |
| 2726 | 2730 | ||
| 2727 | return true; | 2731 | return len; |
| 2728 | } | 2732 | } |
| 2729 | 2733 | ||
| 2730 | static bool | 2734 | static int |
| 2731 | init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2735 | init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2732 | { | 2736 | { |
| 2733 | /* | 2737 | /* |
| @@ -2744,14 +2748,14 @@ init_copy_zm_reg(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2744 | uint32_t dstreg = ROM32(bios->data[offset + 5]); | 2748 | uint32_t dstreg = ROM32(bios->data[offset + 5]); |
| 2745 | 2749 | ||
| 2746 | if (!iexec->execute) | 2750 | if (!iexec->execute) |
| 2747 | return true; | 2751 | return 9; |
| 2748 | 2752 | ||
| 2749 | bios_wr32(bios, dstreg, bios_rd32(bios, srcreg)); | 2753 | bios_wr32(bios, dstreg, bios_rd32(bios, srcreg)); |
| 2750 | 2754 | ||
| 2751 | return true; | 2755 | return 9; |
| 2752 | } | 2756 | } |
| 2753 | 2757 | ||
| 2754 | static bool | 2758 | static int |
| 2755 | init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset, | 2759 | init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset, |
| 2756 | struct init_exec *iexec) | 2760 | struct init_exec *iexec) |
| 2757 | { | 2761 | { |
| @@ -2769,20 +2773,21 @@ init_zm_reg_group_addr_latched(struct nvbios *bios, uint16_t offset, | |||
| 2769 | 2773 | ||
| 2770 | uint32_t reg = ROM32(bios->data[offset + 1]); | 2774 | uint32_t reg = ROM32(bios->data[offset + 1]); |
| 2771 | uint8_t count = bios->data[offset + 5]; | 2775 | uint8_t count = bios->data[offset + 5]; |
| 2776 | int len = 6 + count * 4; | ||
| 2772 | int i; | 2777 | int i; |
| 2773 | 2778 | ||
| 2774 | if (!iexec->execute) | 2779 | if (!iexec->execute) |
| 2775 | return true; | 2780 | return len; |
| 2776 | 2781 | ||
| 2777 | for (i = 0; i < count; i++) { | 2782 | for (i = 0; i < count; i++) { |
| 2778 | uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]); | 2783 | uint32_t data = ROM32(bios->data[offset + 6 + 4 * i]); |
| 2779 | bios_wr32(bios, reg, data); | 2784 | bios_wr32(bios, reg, data); |
| 2780 | } | 2785 | } |
| 2781 | 2786 | ||
| 2782 | return true; | 2787 | return len; |
| 2783 | } | 2788 | } |
| 2784 | 2789 | ||
| 2785 | static bool | 2790 | static int |
| 2786 | init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2791 | init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2787 | { | 2792 | { |
| 2788 | /* | 2793 | /* |
| @@ -2793,10 +2798,10 @@ init_reserved(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2793 | * Seemingly does nothing | 2798 | * Seemingly does nothing |
| 2794 | */ | 2799 | */ |
| 2795 | 2800 | ||
| 2796 | return true; | 2801 | return 1; |
| 2797 | } | 2802 | } |
| 2798 | 2803 | ||
| 2799 | static bool | 2804 | static int |
| 2800 | init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2805 | init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2801 | { | 2806 | { |
| 2802 | /* | 2807 | /* |
| @@ -2829,13 +2834,13 @@ init_96(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2829 | val <<= bios->data[offset + 16]; | 2834 | val <<= bios->data[offset + 16]; |
| 2830 | 2835 | ||
| 2831 | if (!iexec->execute) | 2836 | if (!iexec->execute) |
| 2832 | return true; | 2837 | return 17; |
| 2833 | 2838 | ||
| 2834 | bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val); | 2839 | bios_wr32(bios, reg, (bios_rd32(bios, reg) & mask) | val); |
| 2835 | return true; | 2840 | return 17; |
| 2836 | } | 2841 | } |
| 2837 | 2842 | ||
| 2838 | static bool | 2843 | static int |
| 2839 | init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2844 | init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2840 | { | 2845 | { |
| 2841 | /* | 2846 | /* |
| @@ -2859,13 +2864,13 @@ init_97(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2859 | val = (val & mask) | ((val + add) & ~mask); | 2864 | val = (val & mask) | ((val + add) & ~mask); |
| 2860 | 2865 | ||
| 2861 | if (!iexec->execute) | 2866 | if (!iexec->execute) |
| 2862 | return true; | 2867 | return 13; |
| 2863 | 2868 | ||
| 2864 | bios_wr32(bios, reg, val); | 2869 | bios_wr32(bios, reg, val); |
| 2865 | return true; | 2870 | return 13; |
| 2866 | } | 2871 | } |
| 2867 | 2872 | ||
| 2868 | static bool | 2873 | static int |
| 2869 | init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2874 | init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2870 | { | 2875 | { |
| 2871 | /* | 2876 | /* |
| @@ -2883,32 +2888,33 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2883 | struct drm_device *dev = bios->dev; | 2888 | struct drm_device *dev = bios->dev; |
| 2884 | struct nouveau_i2c_chan *auxch; | 2889 | struct nouveau_i2c_chan *auxch; |
| 2885 | uint32_t addr = ROM32(bios->data[offset + 1]); | 2890 | uint32_t addr = ROM32(bios->data[offset + 1]); |
| 2886 | uint8_t len = bios->data[offset + 5]; | 2891 | uint8_t count = bios->data[offset + 5]; |
| 2892 | int len = 6 + count * 2; | ||
| 2887 | int ret, i; | 2893 | int ret, i; |
| 2888 | 2894 | ||
| 2889 | if (!bios->display.output) { | 2895 | if (!bios->display.output) { |
| 2890 | NV_ERROR(dev, "INIT_AUXCH: no active output\n"); | 2896 | NV_ERROR(dev, "INIT_AUXCH: no active output\n"); |
| 2891 | return false; | 2897 | return 0; |
| 2892 | } | 2898 | } |
| 2893 | 2899 | ||
| 2894 | auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); | 2900 | auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); |
| 2895 | if (!auxch) { | 2901 | if (!auxch) { |
| 2896 | NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n", | 2902 | NV_ERROR(dev, "INIT_AUXCH: couldn't get auxch %d\n", |
| 2897 | bios->display.output->i2c_index); | 2903 | bios->display.output->i2c_index); |
| 2898 | return false; | 2904 | return 0; |
| 2899 | } | 2905 | } |
| 2900 | 2906 | ||
| 2901 | if (!iexec->execute) | 2907 | if (!iexec->execute) |
| 2902 | return true; | 2908 | return len; |
| 2903 | 2909 | ||
| 2904 | offset += 6; | 2910 | offset += 6; |
| 2905 | for (i = 0; i < len; i++, offset += 2) { | 2911 | for (i = 0; i < count; i++, offset += 2) { |
| 2906 | uint8_t data; | 2912 | uint8_t data; |
| 2907 | 2913 | ||
| 2908 | ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1); | 2914 | ret = nouveau_dp_auxch(auxch, 9, addr, &data, 1); |
| 2909 | if (ret) { | 2915 | if (ret) { |
| 2910 | NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret); | 2916 | NV_ERROR(dev, "INIT_AUXCH: rd auxch fail %d\n", ret); |
| 2911 | return false; | 2917 | return 0; |
| 2912 | } | 2918 | } |
| 2913 | 2919 | ||
| 2914 | data &= bios->data[offset + 0]; | 2920 | data &= bios->data[offset + 0]; |
| @@ -2917,14 +2923,14 @@ init_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2917 | ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1); | 2923 | ret = nouveau_dp_auxch(auxch, 8, addr, &data, 1); |
| 2918 | if (ret) { | 2924 | if (ret) { |
| 2919 | NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret); | 2925 | NV_ERROR(dev, "INIT_AUXCH: wr auxch fail %d\n", ret); |
| 2920 | return false; | 2926 | return 0; |
| 2921 | } | 2927 | } |
| 2922 | } | 2928 | } |
| 2923 | 2929 | ||
| 2924 | return true; | 2930 | return len; |
| 2925 | } | 2931 | } |
| 2926 | 2932 | ||
| 2927 | static bool | 2933 | static int |
| 2928 | init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | 2934 | init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) |
| 2929 | { | 2935 | { |
| 2930 | /* | 2936 | /* |
| @@ -2941,106 +2947,99 @@ init_zm_auxch(struct nvbios *bios, uint16_t offset, struct init_exec *iexec) | |||
| 2941 | struct drm_device *dev = bios->dev; | 2947 | struct drm_device *dev = bios->dev; |
| 2942 | struct nouveau_i2c_chan *auxch; | 2948 | struct nouveau_i2c_chan *auxch; |
| 2943 | uint32_t addr = ROM32(bios->data[offset + 1]); | 2949 | uint32_t addr = ROM32(bios->data[offset + 1]); |
| 2944 | uint8_t len = bios->data[offset + 5]; | 2950 | uint8_t count = bios->data[offset + 5]; |
| 2951 | int len = 6 + count; | ||
| 2945 | int ret, i; | 2952 | int ret, i; |
| 2946 | 2953 | ||
| 2947 | if (!bios->display.output) { | 2954 | if (!bios->display.output) { |
| 2948 | NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n"); | 2955 | NV_ERROR(dev, "INIT_ZM_AUXCH: no active output\n"); |
| 2949 | return false; | 2956 | return 0; |
| 2950 | } | 2957 | } |
| 2951 | 2958 | ||
| 2952 | auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); | 2959 | auxch = init_i2c_device_find(dev, bios->display.output->i2c_index); |
| 2953 | if (!auxch) { | 2960 | if (!auxch) { |
| 2954 | NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n", | 2961 | NV_ERROR(dev, "INIT_ZM_AUXCH: couldn't get auxch %d\n", |
| 2955 | bios->display.output->i2c_index); | 2962 | bios->display.output->i2c_index); |
| 2956 | return false; | 2963 | return 0; |
| 2957 | } | 2964 | } |
| 2958 | 2965 | ||
| 2959 | if (!iexec->execute) | 2966 | if (!iexec->execute) |
| 2960 | return true; | 2967 | return len; |
| 2961 | 2968 | ||
| 2962 | offset += 6; | 2969 | offset += 6; |
| 2963 | for (i = 0; i < len; i++, offset++) { | 2970 | for (i = 0; i < count; i++, offset++) { |
| 2964 | ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1); | 2971 | ret = nouveau_dp_auxch(auxch, 8, addr, &bios->data[offset], 1); |
| 2965 | if (ret) { | 2972 | if (ret) { |
| 2966 | NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret); | 2973 | NV_ERROR(dev, "INIT_ZM_AUXCH: wr auxch fail %d\n", ret); |
| 2967 | return false; | 2974 | return 0; |
| 2968 | } | 2975 | } |
| 2969 | } | 2976 | } |
| 2970 | 2977 | ||
| 2971 | return true; | 2978 | return len; |
| 2972 | } | 2979 | } |
| 2973 | 2980 | ||
| 2974 | static struct init_tbl_entry itbl_entry[] = { | 2981 | static struct init_tbl_entry itbl_entry[] = { |
| 2975 | /* command name , id , length , offset , mult , command handler */ | 2982 | /* command name , id , length , offset , mult , command handler */ |
| 2976 | /* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */ | 2983 | /* INIT_PROG (0x31, 15, 10, 4) removed due to no example of use */ |
| 2977 | { "INIT_IO_RESTRICT_PROG" , 0x32, 11 , 6 , 4 , init_io_restrict_prog }, | 2984 | { "INIT_IO_RESTRICT_PROG" , 0x32, init_io_restrict_prog }, |
| 2978 | { "INIT_REPEAT" , 0x33, 2 , 0 , 0 , init_repeat }, | 2985 | { "INIT_REPEAT" , 0x33, init_repeat }, |
| 2979 | { "INIT_IO_RESTRICT_PLL" , 0x34, 12 , 7 , 2 , init_io_restrict_pll }, | 2986 | { "INIT_IO_RESTRICT_PLL" , 0x34, init_io_restrict_pll }, |
| 2980 | { "INIT_END_REPEAT" , 0x36, 1 , 0 , 0 , init_end_repeat }, | 2987 | { "INIT_END_REPEAT" , 0x36, init_end_repeat }, |
| 2981 | { "INIT_COPY" , 0x37, 11 , 0 , 0 , init_copy }, | 2988 | { "INIT_COPY" , 0x37, init_copy }, |
| 2982 | { "INIT_NOT" , 0x38, 1 , 0 , 0 , init_not }, | 2989 | { "INIT_NOT" , 0x38, init_not }, |
| 2983 | { "INIT_IO_FLAG_CONDITION" , 0x39, 2 , 0 , 0 , init_io_flag_condition }, | 2990 | { "INIT_IO_FLAG_CONDITION" , 0x39, init_io_flag_condition }, |
| 2984 | { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, 18 , 17 , 2 , init_idx_addr_latched }, | 2991 | { "INIT_INDEX_ADDRESS_LATCHED" , 0x49, init_idx_addr_latched }, |
| 2985 | { "INIT_IO_RESTRICT_PLL2" , 0x4A, 11 , 6 , 4 , init_io_restrict_pll2 }, | 2992 | { "INIT_IO_RESTRICT_PLL2" , 0x4A, init_io_restrict_pll2 }, |
| 2986 | { "INIT_PLL2" , 0x4B, 9 , 0 , 0 , init_pll2 }, | 2993 | { "INIT_PLL2" , 0x4B, init_pll2 }, |
| 2987 | { "INIT_I2C_BYTE" , 0x4C, 4 , 3 , 3 , init_i2c_byte }, | 2994 | { "INIT_I2C_BYTE" , 0x4C, init_i2c_byte }, |
| 2988 | { "INIT_ZM_I2C_BYTE" , 0x4D, 4 , 3 , 2 , init_zm_i2c_byte }, | 2995 | { "INIT_ZM_I2C_BYTE" , 0x4D, init_zm_i2c_byte }, |
| 2989 | { "INIT_ZM_I2C" , 0x4E, 4 , 3 , 1 , init_zm_i2c }, | 2996 | { "INIT_ZM_I2C" , 0x4E, init_zm_i2c }, |
| 2990 | { "INIT_TMDS" , 0x4F, 5 , 0 , 0 , init_tmds }, | 2997 | { "INIT_TMDS" , 0x4F, init_tmds }, |
| 2991 | { "INIT_ZM_TMDS_GROUP" , 0x50, 3 , 2 , 2 , init_zm_tmds_group }, | 2998 | { "INIT_ZM_TMDS_GROUP" , 0x50, init_zm_tmds_group }, |
| 2992 | { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, 5 , 4 , 1 , init_cr_idx_adr_latch }, | 2999 | { "INIT_CR_INDEX_ADDRESS_LATCHED" , 0x51, init_cr_idx_adr_latch }, |
| 2993 | { "INIT_CR" , 0x52, 4 , 0 , 0 , init_cr }, | 3000 | { "INIT_CR" , 0x52, init_cr }, |
| 2994 | { "INIT_ZM_CR" , 0x53, 3 , 0 , 0 , init_zm_cr }, | 3001 | { "INIT_ZM_CR" , 0x53, init_zm_cr }, |
| 2995 | { "INIT_ZM_CR_GROUP" , 0x54, 2 , 1 , 2 , init_zm_cr_group }, | 3002 | { "INIT_ZM_CR_GROUP" , 0x54, init_zm_cr_group }, |
| 2996 | { "INIT_CONDITION_TIME" , 0x56, 3 , 0 , 0 , init_condition_time }, | 3003 | { "INIT_CONDITION_TIME" , 0x56, init_condition_time }, |
| 2997 | { "INIT_ZM_REG_SEQUENCE" , 0x58, 6 , 5 , 4 , init_zm_reg_sequence }, | 3004 | { "INIT_ZM_REG_SEQUENCE" , 0x58, init_zm_reg_sequence }, |
| 2998 | /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */ | 3005 | /* INIT_INDIRECT_REG (0x5A, 7, 0, 0) removed due to no example of use */ |
| 2999 | { "INIT_SUB_DIRECT" , 0x5B, 3 , 0 , 0 , init_sub_direct }, | 3006 | { "INIT_SUB_DIRECT" , 0x5B, init_sub_direct }, |
| 3000 | { "INIT_COPY_NV_REG" , 0x5F, 22 , 0 , 0 , init_copy_nv_reg }, | 3007 | { "INIT_COPY_NV_REG" , 0x5F, init_copy_nv_reg }, |
| 3001 | { "INIT_ZM_INDEX_IO" , 0x62, 5 , 0 , 0 , init_zm_index_io }, | 3008 | { "INIT_ZM_INDEX_IO" , 0x62, init_zm_index_io }, |
| 3002 | { "INIT_COMPUTE_MEM" , 0x63, 1 , 0 , 0 , init_compute_mem }, | 3009 | { "INIT_COMPUTE_MEM" , 0x63, init_compute_mem }, |
| 3003 | { "INIT_RESET" , 0x65, 13 , 0 , 0 , init_reset }, | 3010 | { "INIT_RESET" , 0x65, init_reset }, |
| 3004 | { "INIT_CONFIGURE_MEM" , 0x66, 1 , 0 , 0 , init_configure_mem }, | 3011 | { "INIT_CONFIGURE_MEM" , 0x66, init_configure_mem }, |
| 3005 | { "INIT_CONFIGURE_CLK" , 0x67, 1 , 0 , 0 , init_configure_clk }, | 3012 | { "INIT_CONFIGURE_CLK" , 0x67, init_configure_clk }, |
| 3006 | { "INIT_CONFIGURE_PREINIT" , 0x68, 1 , 0 , 0 , init_configure_preinit }, | 3013 | { "INIT_CONFIGURE_PREINIT" , 0x68, init_configure_preinit }, |
| 3007 | { "INIT_IO" , 0x69, 5 , 0 , 0 , init_io }, | 3014 | { "INIT_IO" , 0x69, init_io }, |
| 3008 | { "INIT_SUB" , 0x6B, 2 , 0 , 0 , init_sub }, | 3015 | { "INIT_SUB" , 0x6B, init_sub }, |
| 3009 | { "INIT_RAM_CONDITION" , 0x6D, 3 , 0 , 0 , init_ram_condition }, | 3016 | { "INIT_RAM_CONDITION" , 0x6D, init_ram_condition }, |
| 3010 | { "INIT_NV_REG" , 0x6E, 13 , 0 , 0 , init_nv_reg }, | 3017 | { "INIT_NV_REG" , 0x6E, init_nv_reg }, |
| 3011 | { "INIT_MACRO" , 0x6F, 2 , 0 , 0 , init_macro }, | 3018 | { "INIT_MACRO" , 0x6F, init_macro }, |
| 3012 | { "INIT_DONE" , 0x71, 1 , 0 , 0 , init_done }, | 3019 | { "INIT_DONE" , 0x71, init_done }, |
| 3013 | { "INIT_RESUME" , 0x72, 1 , 0 , 0 , init_resume }, | 3020 | { "INIT_RESUME" , 0x72, init_resume }, |
| 3014 | /* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */ | 3021 | /* INIT_RAM_CONDITION2 (0x73, 9, 0, 0) removed due to no example of use */ |
| 3015 | { "INIT_TIME" , 0x74, 3 , 0 , 0 , init_time }, | 3022 | { "INIT_TIME" , 0x74, init_time }, |
| 3016 | { "INIT_CONDITION" , 0x75, 2 , 0 , 0 , init_condition }, | 3023 | { "INIT_CONDITION" , 0x75, init_condition }, |
| 3017 | { "INIT_IO_CONDITION" , 0x76, 2 , 0 , 0 , init_io_condition }, | 3024 | { "INIT_IO_CONDITION" , 0x76, init_io_condition }, |
| 3018 | { "INIT_INDEX_IO" , 0x78, 6 , 0 , 0 , init_index_io }, | 3025 | { "INIT_INDEX_IO" , 0x78, init_index_io }, |
| 3019 | { "INIT_PLL" , 0x79, 7 , 0 , 0 , init_pll }, | 3026 | { "INIT_PLL" , 0x79, init_pll }, |
| 3020 | { "INIT_ZM_REG" , 0x7A, 9 , 0 , 0 , init_zm_reg }, | 3027 | { "INIT_ZM_REG" , 0x7A, init_zm_reg }, |
| 3021 | /* INIT_RAM_RESTRICT_PLL's length is adjusted by the BIT M table */ | 3028 | { "INIT_RAM_RESTRICT_PLL" , 0x87, init_ram_restrict_pll }, |
| 3022 | { "INIT_RAM_RESTRICT_PLL" , 0x87, 2 , 0 , 0 , init_ram_restrict_pll }, | 3029 | { "INIT_8C" , 0x8C, init_8c }, |
| 3023 | { "INIT_8C" , 0x8C, 1 , 0 , 0 , init_8c }, | 3030 | { "INIT_8D" , 0x8D, init_8d }, |
| 3024 | { "INIT_8D" , 0x8D, 1 , 0 , 0 , init_8d }, | 3031 | { "INIT_GPIO" , 0x8E, init_gpio }, |
| 3025 | { "INIT_GPIO" , 0x8E, 1 , 0 , 0 , init_gpio }, | 3032 | { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, init_ram_restrict_zm_reg_group }, |
| 3026 | /* INIT_RAM_RESTRICT_ZM_REG_GROUP's mult is loaded by M table in BIT */ | 3033 | { "INIT_COPY_ZM_REG" , 0x90, init_copy_zm_reg }, |
| 3027 | { "INIT_RAM_RESTRICT_ZM_REG_GROUP" , 0x8F, 7 , 6 , 0 , init_ram_restrict_zm_reg_group }, | 3034 | { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, init_zm_reg_group_addr_latched }, |
| 3028 | { "INIT_COPY_ZM_REG" , 0x90, 9 , 0 , 0 , init_copy_zm_reg }, | 3035 | { "INIT_RESERVED" , 0x92, init_reserved }, |
| 3029 | { "INIT_ZM_REG_GROUP_ADDRESS_LATCHED" , 0x91, 6 , 5 , 4 , init_zm_reg_group_addr_latched }, | 3036 | { "INIT_96" , 0x96, init_96 }, |
| 3030 | { "INIT_RESERVED" , 0x92, 1 , 0 , 0 , init_reserved }, | 3037 | { "INIT_97" , 0x97, init_97 }, |
| 3031 | { "INIT_96" , 0x96, 17 , 0 , 0 , init_96 }, | 3038 | { "INIT_AUXCH" , 0x98, init_auxch }, |
| 3032 | { "INIT_97" , 0x97, 13 , 0 , 0 , init_97 }, | 3039 | { "INIT_ZM_AUXCH" , 0x99, init_zm_auxch }, |
| 3033 | { "INIT_AUXCH" , 0x98, 6 , 5 , 2 , init_auxch }, | 3040 | { NULL , 0 , NULL } |
| 3034 | { "INIT_ZM_AUXCH" , 0x99, 6 , 5 , 1 , init_zm_auxch }, | ||
| 3035 | { NULL , 0 , 0 , 0 , 0 , NULL } | ||
| 3036 | }; | 3041 | }; |
| 3037 | 3042 | ||
| 3038 | static unsigned int get_init_table_entry_length(struct nvbios *bios, unsigned int offset, int i) | ||
| 3039 | { | ||
| 3040 | /* Calculates the length of a given init table entry. */ | ||
| 3041 | return itbl_entry[i].length + bios->data[offset + itbl_entry[i].length_offset]*itbl_entry[i].length_multiplier; | ||
| 3042 | } | ||
| 3043 | |||
| 3044 | #define MAX_TABLE_OPS 1000 | 3043 | #define MAX_TABLE_OPS 1000 |
| 3045 | 3044 | ||
| 3046 | static int | 3045 | static int |
| @@ -3056,7 +3055,7 @@ parse_init_table(struct nvbios *bios, unsigned int offset, | |||
| 3056 | * is changed back to EXECUTE. | 3055 | * is changed back to EXECUTE. |
| 3057 | */ | 3056 | */ |
| 3058 | 3057 | ||
| 3059 | int count = 0, i; | 3058 | int count = 0, i, res; |
| 3060 | uint8_t id; | 3059 | uint8_t id; |
| 3061 | 3060 | ||
| 3062 | /* | 3061 | /* |
| @@ -3076,22 +3075,21 @@ parse_init_table(struct nvbios *bios, unsigned int offset, | |||
| 3076 | offset, itbl_entry[i].id, itbl_entry[i].name); | 3075 | offset, itbl_entry[i].id, itbl_entry[i].name); |
| 3077 | 3076 | ||
| 3078 | /* execute eventual command handler */ | 3077 | /* execute eventual command handler */ |
| 3079 | if (itbl_entry[i].handler) | 3078 | res = (*itbl_entry[i].handler)(bios, offset, iexec); |
| 3080 | if (!(*itbl_entry[i].handler)(bios, offset, iexec)) | 3079 | if (!res) |
| 3081 | break; | 3080 | break; |
| 3081 | /* | ||
| 3082 | * Add the offset of the current command including all data | ||
| 3083 | * of that command. The offset will then be pointing on the | ||
| 3084 | * next op code. | ||
| 3085 | */ | ||
| 3086 | offset += res; | ||
| 3082 | } else { | 3087 | } else { |
| 3083 | NV_ERROR(bios->dev, | 3088 | NV_ERROR(bios->dev, |
| 3084 | "0x%04X: Init table command not found: " | 3089 | "0x%04X: Init table command not found: " |
| 3085 | "0x%02X\n", offset, id); | 3090 | "0x%02X\n", offset, id); |
| 3086 | return -ENOENT; | 3091 | return -ENOENT; |
| 3087 | } | 3092 | } |
| 3088 | |||
| 3089 | /* | ||
| 3090 | * Add the offset of the current command including all data | ||
| 3091 | * of that command. The offset will then be pointing on the | ||
| 3092 | * next op code. | ||
| 3093 | */ | ||
| 3094 | offset += get_init_table_entry_length(bios, offset, i); | ||
| 3095 | } | 3093 | } |
| 3096 | 3094 | ||
| 3097 | if (offset >= bios->length) | 3095 | if (offset >= bios->length) |
| @@ -3854,7 +3852,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3854 | * script tables is a pointer to the script to execute. | 3852 | * script tables is a pointer to the script to execute. |
| 3855 | */ | 3853 | */ |
| 3856 | 3854 | ||
| 3857 | NV_DEBUG(dev, "Searching for output entry for %d %d %d\n", | 3855 | NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n", |
| 3858 | dcbent->type, dcbent->location, dcbent->or); | 3856 | dcbent->type, dcbent->location, dcbent->or); |
| 3859 | otable = bios_output_config_match(dev, dcbent, table[1] + | 3857 | otable = bios_output_config_match(dev, dcbent, table[1] + |
| 3860 | bios->display.script_table_ptr, | 3858 | bios->display.script_table_ptr, |
| @@ -3884,7 +3882,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3884 | if (pxclk == 0) { | 3882 | if (pxclk == 0) { |
| 3885 | script = ROM16(otable[6]); | 3883 | script = ROM16(otable[6]); |
| 3886 | if (!script) { | 3884 | if (!script) { |
| 3887 | NV_DEBUG(dev, "output script 0 not found\n"); | 3885 | NV_DEBUG_KMS(dev, "output script 0 not found\n"); |
| 3888 | return 1; | 3886 | return 1; |
| 3889 | } | 3887 | } |
| 3890 | 3888 | ||
| @@ -3894,7 +3892,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3894 | if (pxclk == -1) { | 3892 | if (pxclk == -1) { |
| 3895 | script = ROM16(otable[8]); | 3893 | script = ROM16(otable[8]); |
| 3896 | if (!script) { | 3894 | if (!script) { |
| 3897 | NV_DEBUG(dev, "output script 1 not found\n"); | 3895 | NV_DEBUG_KMS(dev, "output script 1 not found\n"); |
| 3898 | return 1; | 3896 | return 1; |
| 3899 | } | 3897 | } |
| 3900 | 3898 | ||
| @@ -3907,7 +3905,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3907 | else | 3905 | else |
| 3908 | script = 0; | 3906 | script = 0; |
| 3909 | if (!script) { | 3907 | if (!script) { |
| 3910 | NV_DEBUG(dev, "output script 2 not found\n"); | 3908 | NV_DEBUG_KMS(dev, "output script 2 not found\n"); |
| 3911 | return 1; | 3909 | return 1; |
| 3912 | } | 3910 | } |
| 3913 | 3911 | ||
| @@ -3931,7 +3929,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent, | |||
| 3931 | if (script) | 3929 | if (script) |
| 3932 | script = clkcmptable(bios, script, -pxclk); | 3930 | script = clkcmptable(bios, script, -pxclk); |
| 3933 | if (!script) { | 3931 | if (!script) { |
| 3934 | NV_DEBUG(dev, "clock script 1 not found\n"); | 3932 | NV_DEBUG_KMS(dev, "clock script 1 not found\n"); |
| 3935 | return 1; | 3933 | return 1; |
| 3936 | } | 3934 | } |
| 3937 | 3935 | ||
| @@ -4606,10 +4604,6 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios, | |||
| 4606 | * stuff that we don't use - their use currently unknown | 4604 | * stuff that we don't use - their use currently unknown |
| 4607 | */ | 4605 | */ |
| 4608 | 4606 | ||
| 4609 | uint16_t rr_strap_xlat; | ||
| 4610 | uint8_t rr_group_count; | ||
| 4611 | int i; | ||
| 4612 | |||
| 4613 | /* | 4607 | /* |
| 4614 | * Older bios versions don't have a sufficiently long table for | 4608 | * Older bios versions don't have a sufficiently long table for |
| 4615 | * what we want | 4609 | * what we want |
| @@ -4618,24 +4612,13 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios, | |||
| 4618 | return 0; | 4612 | return 0; |
| 4619 | 4613 | ||
| 4620 | if (bitentry->id[1] < 2) { | 4614 | if (bitentry->id[1] < 2) { |
| 4621 | rr_group_count = bios->data[bitentry->offset + 2]; | 4615 | bios->ram_restrict_group_count = bios->data[bitentry->offset + 2]; |
| 4622 | rr_strap_xlat = ROM16(bios->data[bitentry->offset + 3]); | 4616 | bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]); |
| 4623 | } else { | 4617 | } else { |
| 4624 | rr_group_count = bios->data[bitentry->offset + 0]; | 4618 | bios->ram_restrict_group_count = bios->data[bitentry->offset + 0]; |
| 4625 | rr_strap_xlat = ROM16(bios->data[bitentry->offset + 1]); | 4619 | bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 1]); |
| 4626 | } | 4620 | } |
| 4627 | 4621 | ||
| 4628 | /* adjust length of INIT_87 */ | ||
| 4629 | for (i = 0; itbl_entry[i].name && (itbl_entry[i].id != 0x87); i++); | ||
| 4630 | itbl_entry[i].length += rr_group_count * 4; | ||
| 4631 | |||
| 4632 | /* set up multiplier for INIT_RAM_RESTRICT_ZM_REG_GROUP */ | ||
| 4633 | for (; itbl_entry[i].name && (itbl_entry[i].id != 0x8f); i++); | ||
| 4634 | itbl_entry[i].length_multiplier = rr_group_count * 4; | ||
| 4635 | |||
| 4636 | init_ram_restrict_zm_reg_group_blocklen = itbl_entry[i].length_multiplier; | ||
| 4637 | bios->ram_restrict_tbl_ptr = rr_strap_xlat; | ||
| 4638 | |||
| 4639 | return 0; | 4622 | return 0; |
| 4640 | } | 4623 | } |
| 4641 | 4624 | ||
| @@ -5234,7 +5217,7 @@ parse_dcb_connector_table(struct nvbios *bios) | |||
| 5234 | int i; | 5217 | int i; |
| 5235 | 5218 | ||
| 5236 | if (!bios->bdcb.connector_table_ptr) { | 5219 | if (!bios->bdcb.connector_table_ptr) { |
| 5237 | NV_DEBUG(dev, "No DCB connector table present\n"); | 5220 | NV_DEBUG_KMS(dev, "No DCB connector table present\n"); |
| 5238 | return; | 5221 | return; |
| 5239 | } | 5222 | } |
| 5240 | 5223 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h index 1d5f10bd78ed..058e98c76d89 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.h +++ b/drivers/gpu/drm/nouveau/nouveau_bios.h | |||
| @@ -227,6 +227,7 @@ struct nvbios { | |||
| 227 | 227 | ||
| 228 | uint16_t pll_limit_tbl_ptr; | 228 | uint16_t pll_limit_tbl_ptr; |
| 229 | uint16_t ram_restrict_tbl_ptr; | 229 | uint16_t ram_restrict_tbl_ptr; |
| 230 | uint8_t ram_restrict_group_count; | ||
| 230 | 231 | ||
| 231 | uint16_t some_script_ptr; /* BIT I + 14 */ | 232 | uint16_t some_script_ptr; /* BIT I + 14 */ |
| 232 | uint16_t init96_tbl_ptr; /* BIT I + 16 */ | 233 | uint16_t init96_tbl_ptr; /* BIT I + 16 */ |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index aa2dfbc3e351..0cad6d834eb2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
| @@ -154,6 +154,11 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype) | |||
| 154 | nvbo->placement.busy_placement = nvbo->placements; | 154 | nvbo->placement.busy_placement = nvbo->placements; |
| 155 | nvbo->placement.num_placement = n; | 155 | nvbo->placement.num_placement = n; |
| 156 | nvbo->placement.num_busy_placement = n; | 156 | nvbo->placement.num_busy_placement = n; |
| 157 | |||
| 158 | if (nvbo->pin_refcnt) { | ||
| 159 | while (n--) | ||
| 160 | nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT; | ||
| 161 | } | ||
| 157 | } | 162 | } |
| 158 | 163 | ||
| 159 | int | 164 | int |
| @@ -400,10 +405,16 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |||
| 400 | struct nouveau_bo *nvbo = nouveau_bo(bo); | 405 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 401 | 406 | ||
| 402 | switch (bo->mem.mem_type) { | 407 | switch (bo->mem.mem_type) { |
| 408 | case TTM_PL_VRAM: | ||
| 409 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT | | ||
| 410 | TTM_PL_FLAG_SYSTEM); | ||
| 411 | break; | ||
| 403 | default: | 412 | default: |
| 404 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); | 413 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM); |
| 405 | break; | 414 | break; |
| 406 | } | 415 | } |
| 416 | |||
| 417 | *pl = nvbo->placement; | ||
| 407 | } | 418 | } |
| 408 | 419 | ||
| 409 | 420 | ||
| @@ -455,11 +466,8 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait, | |||
| 455 | int ret; | 466 | int ret; |
| 456 | 467 | ||
| 457 | chan = nvbo->channel; | 468 | chan = nvbo->channel; |
| 458 | if (!chan || nvbo->tile_flags || nvbo->no_vm) { | 469 | if (!chan || nvbo->tile_flags || nvbo->no_vm) |
| 459 | chan = dev_priv->channel; | 470 | chan = dev_priv->channel; |
| 460 | if (!chan) | ||
| 461 | return -EINVAL; | ||
| 462 | } | ||
| 463 | 471 | ||
| 464 | src_offset = old_mem->mm_node->start << PAGE_SHIFT; | 472 | src_offset = old_mem->mm_node->start << PAGE_SHIFT; |
| 465 | dst_offset = new_mem->mm_node->start << PAGE_SHIFT; | 473 | dst_offset = new_mem->mm_node->start << PAGE_SHIFT; |
| @@ -625,7 +633,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
| 625 | return ret; | 633 | return ret; |
| 626 | } | 634 | } |
| 627 | 635 | ||
| 628 | if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE) | 636 | if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE || |
| 637 | !dev_priv->channel) | ||
| 629 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); | 638 | return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem); |
| 630 | 639 | ||
| 631 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { | 640 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 032cf098fa1c..5a10deb8bdbd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
| @@ -86,7 +86,7 @@ nouveau_connector_destroy(struct drm_connector *drm_connector) | |||
| 86 | struct nouveau_connector *connector = nouveau_connector(drm_connector); | 86 | struct nouveau_connector *connector = nouveau_connector(drm_connector); |
| 87 | struct drm_device *dev = connector->base.dev; | 87 | struct drm_device *dev = connector->base.dev; |
| 88 | 88 | ||
| 89 | NV_DEBUG(dev, "\n"); | 89 | NV_DEBUG_KMS(dev, "\n"); |
| 90 | 90 | ||
| 91 | if (!connector) | 91 | if (!connector) |
| 92 | return; | 92 | return; |
| @@ -420,7 +420,7 @@ nouveau_connector_native_mode(struct nouveau_connector *connector) | |||
| 420 | /* Use preferred mode if there is one.. */ | 420 | /* Use preferred mode if there is one.. */ |
| 421 | list_for_each_entry(mode, &connector->base.probed_modes, head) { | 421 | list_for_each_entry(mode, &connector->base.probed_modes, head) { |
| 422 | if (mode->type & DRM_MODE_TYPE_PREFERRED) { | 422 | if (mode->type & DRM_MODE_TYPE_PREFERRED) { |
| 423 | NV_DEBUG(dev, "native mode from preferred\n"); | 423 | NV_DEBUG_KMS(dev, "native mode from preferred\n"); |
| 424 | return drm_mode_duplicate(dev, mode); | 424 | return drm_mode_duplicate(dev, mode); |
| 425 | } | 425 | } |
| 426 | } | 426 | } |
| @@ -445,7 +445,7 @@ nouveau_connector_native_mode(struct nouveau_connector *connector) | |||
| 445 | largest = mode; | 445 | largest = mode; |
| 446 | } | 446 | } |
| 447 | 447 | ||
| 448 | NV_DEBUG(dev, "native mode from largest: %dx%d@%d\n", | 448 | NV_DEBUG_KMS(dev, "native mode from largest: %dx%d@%d\n", |
| 449 | high_w, high_h, high_v); | 449 | high_w, high_h, high_v); |
| 450 | return largest ? drm_mode_duplicate(dev, largest) : NULL; | 450 | return largest ? drm_mode_duplicate(dev, largest) : NULL; |
| 451 | } | 451 | } |
| @@ -725,7 +725,7 @@ nouveau_connector_create(struct drm_device *dev, int index, int type) | |||
| 725 | struct drm_encoder *encoder; | 725 | struct drm_encoder *encoder; |
| 726 | int ret; | 726 | int ret; |
| 727 | 727 | ||
| 728 | NV_DEBUG(dev, "\n"); | 728 | NV_DEBUG_KMS(dev, "\n"); |
| 729 | 729 | ||
| 730 | nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); | 730 | nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL); |
| 731 | if (!nv_connector) | 731 | if (!nv_connector) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dp.c b/drivers/gpu/drm/nouveau/nouveau_dp.c index de61f4640e12..9e2926c48579 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dp.c +++ b/drivers/gpu/drm/nouveau/nouveau_dp.c | |||
| @@ -187,7 +187,7 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config) | |||
| 187 | if (ret) | 187 | if (ret) |
| 188 | return false; | 188 | return false; |
| 189 | 189 | ||
| 190 | NV_DEBUG(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]); | 190 | NV_DEBUG_KMS(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]); |
| 191 | 191 | ||
| 192 | /* Keep all lanes at the same level.. */ | 192 | /* Keep all lanes at the same level.. */ |
| 193 | for (i = 0; i < nv_encoder->dp.link_nr; i++) { | 193 | for (i = 0; i < nv_encoder->dp.link_nr; i++) { |
| @@ -228,7 +228,7 @@ nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config) | |||
| 228 | int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1); | 228 | int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1); |
| 229 | int dpe_headerlen, ret, i; | 229 | int dpe_headerlen, ret, i; |
| 230 | 230 | ||
| 231 | NV_DEBUG(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n", | 231 | NV_DEBUG_KMS(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n", |
| 232 | config[0], config[1], config[2], config[3]); | 232 | config[0], config[1], config[2], config[3]); |
| 233 | 233 | ||
| 234 | dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); | 234 | dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen); |
| @@ -276,12 +276,12 @@ nouveau_dp_link_train(struct drm_encoder *encoder) | |||
| 276 | bool cr_done, cr_max_vs, eq_done; | 276 | bool cr_done, cr_max_vs, eq_done; |
| 277 | int ret = 0, i, tries, voltage; | 277 | int ret = 0, i, tries, voltage; |
| 278 | 278 | ||
| 279 | NV_DEBUG(dev, "link training!!\n"); | 279 | NV_DEBUG_KMS(dev, "link training!!\n"); |
| 280 | train: | 280 | train: |
| 281 | cr_done = eq_done = false; | 281 | cr_done = eq_done = false; |
| 282 | 282 | ||
| 283 | /* set link configuration */ | 283 | /* set link configuration */ |
| 284 | NV_DEBUG(dev, "\tbegin train: bw %d, lanes %d\n", | 284 | NV_DEBUG_KMS(dev, "\tbegin train: bw %d, lanes %d\n", |
| 285 | nv_encoder->dp.link_bw, nv_encoder->dp.link_nr); | 285 | nv_encoder->dp.link_bw, nv_encoder->dp.link_nr); |
| 286 | 286 | ||
| 287 | ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw); | 287 | ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw); |
| @@ -297,7 +297,7 @@ train: | |||
| 297 | return false; | 297 | return false; |
| 298 | 298 | ||
| 299 | /* clock recovery */ | 299 | /* clock recovery */ |
| 300 | NV_DEBUG(dev, "\tbegin cr\n"); | 300 | NV_DEBUG_KMS(dev, "\tbegin cr\n"); |
| 301 | ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1); | 301 | ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1); |
| 302 | if (ret) | 302 | if (ret) |
| 303 | goto stop; | 303 | goto stop; |
| @@ -314,7 +314,7 @@ train: | |||
| 314 | ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2); | 314 | ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2); |
| 315 | if (ret) | 315 | if (ret) |
| 316 | break; | 316 | break; |
| 317 | NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n", | 317 | NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n", |
| 318 | status[0], status[1]); | 318 | status[0], status[1]); |
| 319 | 319 | ||
| 320 | cr_done = true; | 320 | cr_done = true; |
| @@ -346,7 +346,7 @@ train: | |||
| 346 | goto stop; | 346 | goto stop; |
| 347 | 347 | ||
| 348 | /* channel equalisation */ | 348 | /* channel equalisation */ |
| 349 | NV_DEBUG(dev, "\tbegin eq\n"); | 349 | NV_DEBUG_KMS(dev, "\tbegin eq\n"); |
| 350 | ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2); | 350 | ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2); |
| 351 | if (ret) | 351 | if (ret) |
| 352 | goto stop; | 352 | goto stop; |
| @@ -357,7 +357,7 @@ train: | |||
| 357 | ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3); | 357 | ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3); |
| 358 | if (ret) | 358 | if (ret) |
| 359 | break; | 359 | break; |
| 360 | NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n", | 360 | NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n", |
| 361 | status[0], status[1]); | 361 | status[0], status[1]); |
| 362 | 362 | ||
| 363 | eq_done = true; | 363 | eq_done = true; |
| @@ -395,9 +395,9 @@ stop: | |||
| 395 | 395 | ||
| 396 | /* retry at a lower setting, if possible */ | 396 | /* retry at a lower setting, if possible */ |
| 397 | if (!ret && !(eq_done && cr_done)) { | 397 | if (!ret && !(eq_done && cr_done)) { |
| 398 | NV_DEBUG(dev, "\twe failed\n"); | 398 | NV_DEBUG_KMS(dev, "\twe failed\n"); |
| 399 | if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) { | 399 | if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) { |
| 400 | NV_DEBUG(dev, "retry link training at low rate\n"); | 400 | NV_DEBUG_KMS(dev, "retry link training at low rate\n"); |
| 401 | nv_encoder->dp.link_bw = DP_LINK_BW_1_62; | 401 | nv_encoder->dp.link_bw = DP_LINK_BW_1_62; |
| 402 | goto train; | 402 | goto train; |
| 403 | } | 403 | } |
| @@ -418,7 +418,7 @@ nouveau_dp_detect(struct drm_encoder *encoder) | |||
| 418 | if (ret) | 418 | if (ret) |
| 419 | return false; | 419 | return false; |
| 420 | 420 | ||
| 421 | NV_DEBUG(dev, "encoder: link_bw %d, link_nr %d\n" | 421 | NV_DEBUG_KMS(dev, "encoder: link_bw %d, link_nr %d\n" |
| 422 | "display: link_bw %d, link_nr %d version 0x%02x\n", | 422 | "display: link_bw %d, link_nr %d version 0x%02x\n", |
| 423 | nv_encoder->dcb->dpconf.link_bw, | 423 | nv_encoder->dcb->dpconf.link_bw, |
| 424 | nv_encoder->dcb->dpconf.link_nr, | 424 | nv_encoder->dcb->dpconf.link_nr, |
| @@ -446,7 +446,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | |||
| 446 | uint32_t tmp, ctrl, stat = 0, data32[4] = {}; | 446 | uint32_t tmp, ctrl, stat = 0, data32[4] = {}; |
| 447 | int ret = 0, i, index = auxch->rd; | 447 | int ret = 0, i, index = auxch->rd; |
| 448 | 448 | ||
| 449 | NV_DEBUG(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr); | 449 | NV_DEBUG_KMS(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr); |
| 450 | 450 | ||
| 451 | tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd)); | 451 | tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd)); |
| 452 | nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000); | 452 | nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000); |
| @@ -472,7 +472,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | |||
| 472 | if (!(cmd & 1)) { | 472 | if (!(cmd & 1)) { |
| 473 | memcpy(data32, data, data_nr); | 473 | memcpy(data32, data, data_nr); |
| 474 | for (i = 0; i < 4; i++) { | 474 | for (i = 0; i < 4; i++) { |
| 475 | NV_DEBUG(dev, "wr %d: 0x%08x\n", i, data32[i]); | 475 | NV_DEBUG_KMS(dev, "wr %d: 0x%08x\n", i, data32[i]); |
| 476 | nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]); | 476 | nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]); |
| 477 | } | 477 | } |
| 478 | } | 478 | } |
| @@ -504,7 +504,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr, | |||
| 504 | if (cmd & 1) { | 504 | if (cmd & 1) { |
| 505 | for (i = 0; i < 4; i++) { | 505 | for (i = 0; i < 4; i++) { |
| 506 | data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); | 506 | data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i)); |
| 507 | NV_DEBUG(dev, "rd %d: 0x%08x\n", i, data32[i]); | 507 | NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]); |
| 508 | } | 508 | } |
| 509 | memcpy(data, data32, data_nr); | 509 | memcpy(data, data32, data_nr); |
| 510 | } | 510 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c index 35249c35118f..06eb993e0883 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.c +++ b/drivers/gpu/drm/nouveau/nouveau_drv.c | |||
| @@ -35,6 +35,10 @@ | |||
| 35 | 35 | ||
| 36 | #include "drm_pciids.h" | 36 | #include "drm_pciids.h" |
| 37 | 37 | ||
| 38 | MODULE_PARM_DESC(ctxfw, "Use external firmware blob for grctx init (NV40)"); | ||
| 39 | int nouveau_ctxfw = 0; | ||
| 40 | module_param_named(ctxfw, nouveau_ctxfw, int, 0400); | ||
| 41 | |||
| 38 | MODULE_PARM_DESC(noagp, "Disable AGP"); | 42 | MODULE_PARM_DESC(noagp, "Disable AGP"); |
| 39 | int nouveau_noagp; | 43 | int nouveau_noagp; |
| 40 | module_param_named(noagp, nouveau_noagp, int, 0400); | 44 | module_param_named(noagp, nouveau_noagp, int, 0400); |
| @@ -273,7 +277,7 @@ nouveau_pci_resume(struct pci_dev *pdev) | |||
| 273 | 277 | ||
| 274 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { | 278 | for (i = 0; i < dev_priv->engine.fifo.channels; i++) { |
| 275 | chan = dev_priv->fifos[i]; | 279 | chan = dev_priv->fifos[i]; |
| 276 | if (!chan) | 280 | if (!chan || !chan->pushbuf_bo) |
| 277 | continue; | 281 | continue; |
| 278 | 282 | ||
| 279 | for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) | 283 | for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) |
| @@ -341,7 +345,7 @@ static struct drm_driver driver = { | |||
| 341 | .owner = THIS_MODULE, | 345 | .owner = THIS_MODULE, |
| 342 | .open = drm_open, | 346 | .open = drm_open, |
| 343 | .release = drm_release, | 347 | .release = drm_release, |
| 344 | .ioctl = drm_ioctl, | 348 | .unlocked_ioctl = drm_ioctl, |
| 345 | .mmap = nouveau_ttm_mmap, | 349 | .mmap = nouveau_ttm_mmap, |
| 346 | .poll = drm_poll, | 350 | .poll = drm_poll, |
| 347 | .fasync = drm_fasync, | 351 | .fasync = drm_fasync, |
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 88b4c7b77e7f..5f8cbb79c499 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -54,6 +54,7 @@ struct nouveau_fpriv { | |||
| 54 | #include "nouveau_drm.h" | 54 | #include "nouveau_drm.h" |
| 55 | #include "nouveau_reg.h" | 55 | #include "nouveau_reg.h" |
| 56 | #include "nouveau_bios.h" | 56 | #include "nouveau_bios.h" |
| 57 | struct nouveau_grctx; | ||
| 57 | 58 | ||
| 58 | #define MAX_NUM_DCB_ENTRIES 16 | 59 | #define MAX_NUM_DCB_ENTRIES 16 |
| 59 | 60 | ||
| @@ -317,6 +318,7 @@ struct nouveau_pgraph_engine { | |||
| 317 | bool accel_blocked; | 318 | bool accel_blocked; |
| 318 | void *ctxprog; | 319 | void *ctxprog; |
| 319 | void *ctxvals; | 320 | void *ctxvals; |
| 321 | int grctx_size; | ||
| 320 | 322 | ||
| 321 | int (*init)(struct drm_device *); | 323 | int (*init)(struct drm_device *); |
| 322 | void (*takedown)(struct drm_device *); | 324 | void (*takedown)(struct drm_device *); |
| @@ -647,6 +649,7 @@ extern int nouveau_fbpercrtc; | |||
| 647 | extern char *nouveau_tv_norm; | 649 | extern char *nouveau_tv_norm; |
| 648 | extern int nouveau_reg_debug; | 650 | extern int nouveau_reg_debug; |
| 649 | extern char *nouveau_vbios; | 651 | extern char *nouveau_vbios; |
| 652 | extern int nouveau_ctxfw; | ||
| 650 | 653 | ||
| 651 | /* nouveau_state.c */ | 654 | /* nouveau_state.c */ |
| 652 | extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); | 655 | extern void nouveau_preclose(struct drm_device *dev, struct drm_file *); |
| @@ -959,9 +962,7 @@ extern int nv40_graph_create_context(struct nouveau_channel *); | |||
| 959 | extern void nv40_graph_destroy_context(struct nouveau_channel *); | 962 | extern void nv40_graph_destroy_context(struct nouveau_channel *); |
| 960 | extern int nv40_graph_load_context(struct nouveau_channel *); | 963 | extern int nv40_graph_load_context(struct nouveau_channel *); |
| 961 | extern int nv40_graph_unload_context(struct drm_device *); | 964 | extern int nv40_graph_unload_context(struct drm_device *); |
| 962 | extern int nv40_grctx_init(struct drm_device *); | 965 | extern void nv40_grctx_init(struct nouveau_grctx *); |
| 963 | extern void nv40_grctx_fini(struct drm_device *); | ||
| 964 | extern void nv40_grctx_vals_load(struct drm_device *, struct nouveau_gpuobj *); | ||
| 965 | 966 | ||
| 966 | /* nv50_graph.c */ | 967 | /* nv50_graph.c */ |
| 967 | extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; | 968 | extern struct nouveau_pgraph_object_class nv50_graph_grclass[]; |
| @@ -975,6 +976,12 @@ extern int nv50_graph_load_context(struct nouveau_channel *); | |||
| 975 | extern int nv50_graph_unload_context(struct drm_device *); | 976 | extern int nv50_graph_unload_context(struct drm_device *); |
| 976 | extern void nv50_graph_context_switch(struct drm_device *); | 977 | extern void nv50_graph_context_switch(struct drm_device *); |
| 977 | 978 | ||
| 979 | /* nouveau_grctx.c */ | ||
| 980 | extern int nouveau_grctx_prog_load(struct drm_device *); | ||
| 981 | extern void nouveau_grctx_vals_load(struct drm_device *, | ||
| 982 | struct nouveau_gpuobj *); | ||
| 983 | extern void nouveau_grctx_fini(struct drm_device *); | ||
| 984 | |||
| 978 | /* nv04_instmem.c */ | 985 | /* nv04_instmem.c */ |
| 979 | extern int nv04_instmem_init(struct drm_device *); | 986 | extern int nv04_instmem_init(struct drm_device *); |
| 980 | extern void nv04_instmem_takedown(struct drm_device *); | 987 | extern void nv04_instmem_takedown(struct drm_device *); |
| @@ -1207,14 +1214,24 @@ static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj, | |||
| 1207 | pci_name(d->pdev), ##arg) | 1214 | pci_name(d->pdev), ##arg) |
| 1208 | #ifndef NV_DEBUG_NOTRACE | 1215 | #ifndef NV_DEBUG_NOTRACE |
| 1209 | #define NV_DEBUG(d, fmt, arg...) do { \ | 1216 | #define NV_DEBUG(d, fmt, arg...) do { \ |
| 1210 | if (drm_debug) { \ | 1217 | if (drm_debug & DRM_UT_DRIVER) { \ |
| 1218 | NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \ | ||
| 1219 | __LINE__, ##arg); \ | ||
| 1220 | } \ | ||
| 1221 | } while (0) | ||
| 1222 | #define NV_DEBUG_KMS(d, fmt, arg...) do { \ | ||
| 1223 | if (drm_debug & DRM_UT_KMS) { \ | ||
| 1211 | NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \ | 1224 | NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \ |
| 1212 | __LINE__, ##arg); \ | 1225 | __LINE__, ##arg); \ |
| 1213 | } \ | 1226 | } \ |
| 1214 | } while (0) | 1227 | } while (0) |
| 1215 | #else | 1228 | #else |
| 1216 | #define NV_DEBUG(d, fmt, arg...) do { \ | 1229 | #define NV_DEBUG(d, fmt, arg...) do { \ |
| 1217 | if (drm_debug) \ | 1230 | if (drm_debug & DRM_UT_DRIVER) \ |
| 1231 | NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \ | ||
| 1232 | } while (0) | ||
| 1233 | #define NV_DEBUG_KMS(d, fmt, arg...) do { \ | ||
| 1234 | if (drm_debug & DRM_UT_KMS) \ | ||
| 1218 | NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \ | 1235 | NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \ |
| 1219 | } while (0) | 1236 | } while (0) |
| 1220 | #endif | 1237 | #endif |
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 36e8c5e4503a..84af25c238b6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c | |||
| @@ -58,7 +58,7 @@ nouveau_fbcon_sync(struct fb_info *info) | |||
| 58 | struct nouveau_channel *chan = dev_priv->channel; | 58 | struct nouveau_channel *chan = dev_priv->channel; |
| 59 | int ret, i; | 59 | int ret, i; |
| 60 | 60 | ||
| 61 | if (!chan->accel_done || | 61 | if (!chan || !chan->accel_done || |
| 62 | info->state != FBINFO_STATE_RUNNING || | 62 | info->state != FBINFO_STATE_RUNNING || |
| 63 | info->flags & FBINFO_HWACCEL_DISABLED) | 63 | info->flags & FBINFO_HWACCEL_DISABLED) |
| 64 | return 0; | 64 | return 0; |
| @@ -318,14 +318,16 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width, | |||
| 318 | par->nouveau_fb = nouveau_fb; | 318 | par->nouveau_fb = nouveau_fb; |
| 319 | par->dev = dev; | 319 | par->dev = dev; |
| 320 | 320 | ||
| 321 | switch (dev_priv->card_type) { | 321 | if (dev_priv->channel) { |
| 322 | case NV_50: | 322 | switch (dev_priv->card_type) { |
| 323 | nv50_fbcon_accel_init(info); | 323 | case NV_50: |
| 324 | break; | 324 | nv50_fbcon_accel_init(info); |
| 325 | default: | 325 | break; |
| 326 | nv04_fbcon_accel_init(info); | 326 | default: |
| 327 | break; | 327 | nv04_fbcon_accel_init(info); |
| 328 | }; | 328 | break; |
| 329 | }; | ||
| 330 | } | ||
| 329 | 331 | ||
| 330 | nouveau_fbcon_zfill(dev); | 332 | nouveau_fbcon_zfill(dev); |
| 331 | 333 | ||
| @@ -347,7 +349,7 @@ out: | |||
| 347 | int | 349 | int |
| 348 | nouveau_fbcon_probe(struct drm_device *dev) | 350 | nouveau_fbcon_probe(struct drm_device *dev) |
| 349 | { | 351 | { |
| 350 | NV_DEBUG(dev, "\n"); | 352 | NV_DEBUG_KMS(dev, "\n"); |
| 351 | 353 | ||
| 352 | return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create); | 354 | return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create); |
| 353 | } | 355 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.c b/drivers/gpu/drm/nouveau/nouveau_grctx.c new file mode 100644 index 000000000000..419f4c2b3b89 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_grctx.c | |||
| @@ -0,0 +1,161 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2009 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs | ||
| 23 | */ | ||
| 24 | |||
| 25 | #include <linux/firmware.h> | ||
| 26 | |||
| 27 | #include "drmP.h" | ||
| 28 | #include "nouveau_drv.h" | ||
| 29 | |||
| 30 | struct nouveau_ctxprog { | ||
| 31 | uint32_t signature; | ||
| 32 | uint8_t version; | ||
| 33 | uint16_t length; | ||
| 34 | uint32_t data[]; | ||
| 35 | } __attribute__ ((packed)); | ||
| 36 | |||
| 37 | struct nouveau_ctxvals { | ||
| 38 | uint32_t signature; | ||
| 39 | uint8_t version; | ||
| 40 | uint32_t length; | ||
| 41 | struct { | ||
| 42 | uint32_t offset; | ||
| 43 | uint32_t value; | ||
| 44 | } data[]; | ||
| 45 | } __attribute__ ((packed)); | ||
| 46 | |||
| 47 | int | ||
| 48 | nouveau_grctx_prog_load(struct drm_device *dev) | ||
| 49 | { | ||
| 50 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 51 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
| 52 | const int chipset = dev_priv->chipset; | ||
| 53 | const struct firmware *fw; | ||
| 54 | const struct nouveau_ctxprog *cp; | ||
| 55 | const struct nouveau_ctxvals *cv; | ||
| 56 | char name[32]; | ||
| 57 | int ret, i; | ||
| 58 | |||
| 59 | if (pgraph->accel_blocked) | ||
| 60 | return -ENODEV; | ||
| 61 | |||
| 62 | if (!pgraph->ctxprog) { | ||
| 63 | sprintf(name, "nouveau/nv%02x.ctxprog", chipset); | ||
| 64 | ret = request_firmware(&fw, name, &dev->pdev->dev); | ||
| 65 | if (ret) { | ||
| 66 | NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset); | ||
| 67 | return ret; | ||
| 68 | } | ||
| 69 | |||
| 70 | pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL); | ||
| 71 | if (!pgraph->ctxprog) { | ||
| 72 | NV_ERROR(dev, "OOM copying ctxprog\n"); | ||
| 73 | release_firmware(fw); | ||
| 74 | return -ENOMEM; | ||
| 75 | } | ||
| 76 | memcpy(pgraph->ctxprog, fw->data, fw->size); | ||
| 77 | |||
| 78 | cp = pgraph->ctxprog; | ||
| 79 | if (le32_to_cpu(cp->signature) != 0x5043564e || | ||
| 80 | cp->version != 0 || | ||
| 81 | le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) { | ||
| 82 | NV_ERROR(dev, "ctxprog invalid\n"); | ||
| 83 | release_firmware(fw); | ||
| 84 | nouveau_grctx_fini(dev); | ||
| 85 | return -EINVAL; | ||
| 86 | } | ||
| 87 | release_firmware(fw); | ||
| 88 | } | ||
| 89 | |||
| 90 | if (!pgraph->ctxvals) { | ||
| 91 | sprintf(name, "nouveau/nv%02x.ctxvals", chipset); | ||
| 92 | ret = request_firmware(&fw, name, &dev->pdev->dev); | ||
| 93 | if (ret) { | ||
| 94 | NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset); | ||
| 95 | nouveau_grctx_fini(dev); | ||
| 96 | return ret; | ||
| 97 | } | ||
| 98 | |||
| 99 | pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); | ||
| 100 | if (!pgraph->ctxprog) { | ||
| 101 | NV_ERROR(dev, "OOM copying ctxprog\n"); | ||
| 102 | release_firmware(fw); | ||
| 103 | nouveau_grctx_fini(dev); | ||
| 104 | return -ENOMEM; | ||
| 105 | } | ||
| 106 | memcpy(pgraph->ctxvals, fw->data, fw->size); | ||
| 107 | |||
| 108 | cv = (void *)pgraph->ctxvals; | ||
| 109 | if (le32_to_cpu(cv->signature) != 0x5643564e || | ||
| 110 | cv->version != 0 || | ||
| 111 | le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) { | ||
| 112 | NV_ERROR(dev, "ctxvals invalid\n"); | ||
| 113 | release_firmware(fw); | ||
| 114 | nouveau_grctx_fini(dev); | ||
| 115 | return -EINVAL; | ||
| 116 | } | ||
| 117 | release_firmware(fw); | ||
| 118 | } | ||
| 119 | |||
| 120 | cp = pgraph->ctxprog; | ||
| 121 | |||
| 122 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); | ||
| 123 | for (i = 0; i < le16_to_cpu(cp->length); i++) | ||
| 124 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, | ||
| 125 | le32_to_cpu(cp->data[i])); | ||
| 126 | |||
| 127 | return 0; | ||
| 128 | } | ||
| 129 | |||
| 130 | void | ||
| 131 | nouveau_grctx_fini(struct drm_device *dev) | ||
| 132 | { | ||
| 133 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 134 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
| 135 | |||
| 136 | if (pgraph->ctxprog) { | ||
| 137 | kfree(pgraph->ctxprog); | ||
| 138 | pgraph->ctxprog = NULL; | ||
| 139 | } | ||
| 140 | |||
| 141 | if (pgraph->ctxvals) { | ||
| 142 | kfree(pgraph->ctxprog); | ||
| 143 | pgraph->ctxvals = NULL; | ||
| 144 | } | ||
| 145 | } | ||
| 146 | |||
| 147 | void | ||
| 148 | nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx) | ||
| 149 | { | ||
| 150 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 151 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
| 152 | struct nouveau_ctxvals *cv = pgraph->ctxvals; | ||
| 153 | int i; | ||
| 154 | |||
| 155 | if (!cv) | ||
| 156 | return; | ||
| 157 | |||
| 158 | for (i = 0; i < le32_to_cpu(cv->length); i++) | ||
| 159 | nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset), | ||
| 160 | le32_to_cpu(cv->data[i].value)); | ||
| 161 | } | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_grctx.h b/drivers/gpu/drm/nouveau/nouveau_grctx.h new file mode 100644 index 000000000000..5d39c4ce8006 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nouveau_grctx.h | |||
| @@ -0,0 +1,133 @@ | |||
| 1 | #ifndef __NOUVEAU_GRCTX_H__ | ||
| 2 | #define __NOUVEAU_GRCTX_H__ | ||
| 3 | |||
| 4 | struct nouveau_grctx { | ||
| 5 | struct drm_device *dev; | ||
| 6 | |||
| 7 | enum { | ||
| 8 | NOUVEAU_GRCTX_PROG, | ||
| 9 | NOUVEAU_GRCTX_VALS | ||
| 10 | } mode; | ||
| 11 | void *data; | ||
| 12 | |||
| 13 | uint32_t ctxprog_max; | ||
| 14 | uint32_t ctxprog_len; | ||
| 15 | uint32_t ctxprog_reg; | ||
| 16 | int ctxprog_label[32]; | ||
| 17 | uint32_t ctxvals_pos; | ||
| 18 | uint32_t ctxvals_base; | ||
| 19 | }; | ||
| 20 | |||
| 21 | #ifdef CP_CTX | ||
| 22 | static inline void | ||
| 23 | cp_out(struct nouveau_grctx *ctx, uint32_t inst) | ||
| 24 | { | ||
| 25 | uint32_t *ctxprog = ctx->data; | ||
| 26 | |||
| 27 | if (ctx->mode != NOUVEAU_GRCTX_PROG) | ||
| 28 | return; | ||
| 29 | |||
| 30 | BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max); | ||
| 31 | ctxprog[ctx->ctxprog_len++] = inst; | ||
| 32 | } | ||
| 33 | |||
| 34 | static inline void | ||
| 35 | cp_lsr(struct nouveau_grctx *ctx, uint32_t val) | ||
| 36 | { | ||
| 37 | cp_out(ctx, CP_LOAD_SR | val); | ||
| 38 | } | ||
| 39 | |||
| 40 | static inline void | ||
| 41 | cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length) | ||
| 42 | { | ||
| 43 | ctx->ctxprog_reg = (reg - 0x00400000) >> 2; | ||
| 44 | |||
| 45 | ctx->ctxvals_base = ctx->ctxvals_pos; | ||
| 46 | ctx->ctxvals_pos = ctx->ctxvals_base + length; | ||
| 47 | |||
| 48 | if (length > (CP_CTX_COUNT >> CP_CTX_COUNT_SHIFT)) { | ||
| 49 | cp_lsr(ctx, length); | ||
| 50 | length = 0; | ||
| 51 | } | ||
| 52 | |||
| 53 | cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg); | ||
| 54 | } | ||
| 55 | |||
| 56 | static inline void | ||
| 57 | cp_name(struct nouveau_grctx *ctx, int name) | ||
| 58 | { | ||
| 59 | uint32_t *ctxprog = ctx->data; | ||
| 60 | int i; | ||
| 61 | |||
| 62 | if (ctx->mode != NOUVEAU_GRCTX_PROG) | ||
| 63 | return; | ||
| 64 | |||
| 65 | ctx->ctxprog_label[name] = ctx->ctxprog_len; | ||
| 66 | for (i = 0; i < ctx->ctxprog_len; i++) { | ||
| 67 | if ((ctxprog[i] & 0xfff00000) != 0xff400000) | ||
| 68 | continue; | ||
| 69 | if ((ctxprog[i] & CP_BRA_IP) != ((name) << CP_BRA_IP_SHIFT)) | ||
| 70 | continue; | ||
| 71 | ctxprog[i] = (ctxprog[i] & 0x00ff00ff) | | ||
| 72 | (ctx->ctxprog_len << CP_BRA_IP_SHIFT); | ||
| 73 | } | ||
| 74 | } | ||
| 75 | |||
| 76 | static inline void | ||
| 77 | _cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name) | ||
| 78 | { | ||
| 79 | int ip = 0; | ||
| 80 | |||
| 81 | if (mod != 2) { | ||
| 82 | ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT; | ||
| 83 | if (ip == 0) | ||
| 84 | ip = 0xff000000 | (name << CP_BRA_IP_SHIFT); | ||
| 85 | } | ||
| 86 | |||
| 87 | cp_out(ctx, CP_BRA | (mod << 18) | ip | flag | | ||
| 88 | (state ? 0 : CP_BRA_IF_CLEAR)); | ||
| 89 | } | ||
| 90 | #define cp_bra(c,f,s,n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n) | ||
| 91 | #ifdef CP_BRA_MOD | ||
| 92 | #define cp_cal(c,f,s,n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n) | ||
| 93 | #define cp_ret(c,f,s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0) | ||
| 94 | #endif | ||
| 95 | |||
| 96 | static inline void | ||
| 97 | _cp_wait(struct nouveau_grctx *ctx, int flag, int state) | ||
| 98 | { | ||
| 99 | cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0)); | ||
| 100 | } | ||
| 101 | #define cp_wait(c,f,s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s) | ||
| 102 | |||
| 103 | static inline void | ||
| 104 | _cp_set(struct nouveau_grctx *ctx, int flag, int state) | ||
| 105 | { | ||
| 106 | cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0)); | ||
| 107 | } | ||
| 108 | #define cp_set(c,f,s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s) | ||
| 109 | |||
| 110 | static inline void | ||
| 111 | cp_pos(struct nouveau_grctx *ctx, int offset) | ||
| 112 | { | ||
| 113 | ctx->ctxvals_pos = offset; | ||
| 114 | ctx->ctxvals_base = ctx->ctxvals_pos; | ||
| 115 | |||
| 116 | cp_lsr(ctx, ctx->ctxvals_pos); | ||
| 117 | cp_out(ctx, CP_SET_CONTEXT_POINTER); | ||
| 118 | } | ||
| 119 | |||
| 120 | static inline void | ||
| 121 | gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val) | ||
| 122 | { | ||
| 123 | if (ctx->mode != NOUVEAU_GRCTX_VALS) | ||
| 124 | return; | ||
| 125 | |||
| 126 | reg = (reg - 0x00400000) / 4; | ||
| 127 | reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base; | ||
| 128 | |||
| 129 | nv_wo32(ctx->dev, ctx->data, reg, val); | ||
| 130 | } | ||
| 131 | #endif | ||
| 132 | |||
| 133 | #endif | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c index a2c30f4611ba..475ba810bba3 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c +++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c | |||
| @@ -61,12 +61,10 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, | |||
| 61 | if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) | 61 | if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) |
| 62 | fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE]; | 62 | fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE]; |
| 63 | #endif | 63 | #endif |
| 64 | lock_kernel(); /* XXX for now */ | ||
| 65 | if (fn != NULL) | 64 | if (fn != NULL) |
| 66 | ret = (*fn)(filp, cmd, arg); | 65 | ret = (*fn)(filp, cmd, arg); |
| 67 | else | 66 | else |
| 68 | ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); | 67 | ret = drm_ioctl(filp, cmd, arg); |
| 69 | unlock_kernel(); | ||
| 70 | 68 | ||
| 71 | return ret; | 69 | return ret; |
| 72 | } | 70 | } |
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c index 2ed41d339f6a..e76ec2d207a9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_state.c +++ b/drivers/gpu/drm/nouveau/nouveau_state.c | |||
| @@ -299,12 +299,57 @@ nouveau_vga_set_decode(void *priv, bool state) | |||
| 299 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; | 299 | return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; |
| 300 | } | 300 | } |
| 301 | 301 | ||
| 302 | static int | ||
| 303 | nouveau_card_init_channel(struct drm_device *dev) | ||
| 304 | { | ||
| 305 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 306 | struct nouveau_gpuobj *gpuobj; | ||
| 307 | int ret; | ||
| 308 | |||
| 309 | ret = nouveau_channel_alloc(dev, &dev_priv->channel, | ||
| 310 | (struct drm_file *)-2, | ||
| 311 | NvDmaFB, NvDmaTT); | ||
| 312 | if (ret) | ||
| 313 | return ret; | ||
| 314 | |||
| 315 | gpuobj = NULL; | ||
| 316 | ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, | ||
| 317 | 0, nouveau_mem_fb_amount(dev), | ||
| 318 | NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, | ||
| 319 | &gpuobj); | ||
| 320 | if (ret) | ||
| 321 | goto out_err; | ||
| 322 | |||
| 323 | ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM, | ||
| 324 | gpuobj, NULL); | ||
| 325 | if (ret) | ||
| 326 | goto out_err; | ||
| 327 | |||
| 328 | gpuobj = NULL; | ||
| 329 | ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, | ||
| 330 | dev_priv->gart_info.aper_size, | ||
| 331 | NV_DMA_ACCESS_RW, &gpuobj, NULL); | ||
| 332 | if (ret) | ||
| 333 | goto out_err; | ||
| 334 | |||
| 335 | ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART, | ||
| 336 | gpuobj, NULL); | ||
| 337 | if (ret) | ||
| 338 | goto out_err; | ||
| 339 | |||
| 340 | return 0; | ||
| 341 | out_err: | ||
| 342 | nouveau_gpuobj_del(dev, &gpuobj); | ||
| 343 | nouveau_channel_free(dev_priv->channel); | ||
| 344 | dev_priv->channel = NULL; | ||
| 345 | return ret; | ||
| 346 | } | ||
| 347 | |||
| 302 | int | 348 | int |
| 303 | nouveau_card_init(struct drm_device *dev) | 349 | nouveau_card_init(struct drm_device *dev) |
| 304 | { | 350 | { |
| 305 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 351 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 306 | struct nouveau_engine *engine; | 352 | struct nouveau_engine *engine; |
| 307 | struct nouveau_gpuobj *gpuobj; | ||
| 308 | int ret; | 353 | int ret; |
| 309 | 354 | ||
| 310 | NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); | 355 | NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state); |
| @@ -317,7 +362,7 @@ nouveau_card_init(struct drm_device *dev) | |||
| 317 | /* Initialise internal driver API hooks */ | 362 | /* Initialise internal driver API hooks */ |
| 318 | ret = nouveau_init_engine_ptrs(dev); | 363 | ret = nouveau_init_engine_ptrs(dev); |
| 319 | if (ret) | 364 | if (ret) |
| 320 | return ret; | 365 | goto out; |
| 321 | engine = &dev_priv->engine; | 366 | engine = &dev_priv->engine; |
| 322 | dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; | 367 | dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED; |
| 323 | 368 | ||
| @@ -325,12 +370,12 @@ nouveau_card_init(struct drm_device *dev) | |||
| 325 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 370 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
| 326 | ret = nouveau_bios_init(dev); | 371 | ret = nouveau_bios_init(dev); |
| 327 | if (ret) | 372 | if (ret) |
| 328 | return ret; | 373 | goto out; |
| 329 | } | 374 | } |
| 330 | 375 | ||
| 331 | ret = nouveau_gpuobj_early_init(dev); | 376 | ret = nouveau_gpuobj_early_init(dev); |
| 332 | if (ret) | 377 | if (ret) |
| 333 | return ret; | 378 | goto out_bios; |
| 334 | 379 | ||
| 335 | /* Initialise instance memory, must happen before mem_init so we | 380 | /* Initialise instance memory, must happen before mem_init so we |
| 336 | * know exactly how much VRAM we're able to use for "normal" | 381 | * know exactly how much VRAM we're able to use for "normal" |
| @@ -338,100 +383,68 @@ nouveau_card_init(struct drm_device *dev) | |||
| 338 | */ | 383 | */ |
| 339 | ret = engine->instmem.init(dev); | 384 | ret = engine->instmem.init(dev); |
| 340 | if (ret) | 385 | if (ret) |
| 341 | return ret; | 386 | goto out_gpuobj_early; |
| 342 | 387 | ||
| 343 | /* Setup the memory manager */ | 388 | /* Setup the memory manager */ |
| 344 | ret = nouveau_mem_init(dev); | 389 | ret = nouveau_mem_init(dev); |
| 345 | if (ret) | 390 | if (ret) |
| 346 | return ret; | 391 | goto out_instmem; |
| 347 | 392 | ||
| 348 | ret = nouveau_gpuobj_init(dev); | 393 | ret = nouveau_gpuobj_init(dev); |
| 349 | if (ret) | 394 | if (ret) |
| 350 | return ret; | 395 | goto out_mem; |
| 351 | 396 | ||
| 352 | /* PMC */ | 397 | /* PMC */ |
| 353 | ret = engine->mc.init(dev); | 398 | ret = engine->mc.init(dev); |
| 354 | if (ret) | 399 | if (ret) |
| 355 | return ret; | 400 | goto out_gpuobj; |
| 356 | 401 | ||
| 357 | /* PTIMER */ | 402 | /* PTIMER */ |
| 358 | ret = engine->timer.init(dev); | 403 | ret = engine->timer.init(dev); |
| 359 | if (ret) | 404 | if (ret) |
| 360 | return ret; | 405 | goto out_mc; |
| 361 | 406 | ||
| 362 | /* PFB */ | 407 | /* PFB */ |
| 363 | ret = engine->fb.init(dev); | 408 | ret = engine->fb.init(dev); |
| 364 | if (ret) | 409 | if (ret) |
| 365 | return ret; | 410 | goto out_timer; |
| 366 | 411 | ||
| 367 | /* PGRAPH */ | 412 | /* PGRAPH */ |
| 368 | ret = engine->graph.init(dev); | 413 | ret = engine->graph.init(dev); |
| 369 | if (ret) | 414 | if (ret) |
| 370 | return ret; | 415 | goto out_fb; |
| 371 | 416 | ||
| 372 | /* PFIFO */ | 417 | /* PFIFO */ |
| 373 | ret = engine->fifo.init(dev); | 418 | ret = engine->fifo.init(dev); |
| 374 | if (ret) | 419 | if (ret) |
| 375 | return ret; | 420 | goto out_graph; |
| 376 | 421 | ||
| 377 | /* this call irq_preinstall, register irq handler and | 422 | /* this call irq_preinstall, register irq handler and |
| 378 | * call irq_postinstall | 423 | * call irq_postinstall |
| 379 | */ | 424 | */ |
| 380 | ret = drm_irq_install(dev); | 425 | ret = drm_irq_install(dev); |
| 381 | if (ret) | 426 | if (ret) |
| 382 | return ret; | 427 | goto out_fifo; |
| 383 | 428 | ||
| 384 | ret = drm_vblank_init(dev, 0); | 429 | ret = drm_vblank_init(dev, 0); |
| 385 | if (ret) | 430 | if (ret) |
| 386 | return ret; | 431 | goto out_irq; |
| 387 | 432 | ||
| 388 | /* what about PVIDEO/PCRTC/PRAMDAC etc? */ | 433 | /* what about PVIDEO/PCRTC/PRAMDAC etc? */ |
| 389 | 434 | ||
| 390 | ret = nouveau_channel_alloc(dev, &dev_priv->channel, | 435 | if (!engine->graph.accel_blocked) { |
| 391 | (struct drm_file *)-2, | 436 | ret = nouveau_card_init_channel(dev); |
| 392 | NvDmaFB, NvDmaTT); | 437 | if (ret) |
| 393 | if (ret) | 438 | goto out_irq; |
| 394 | return ret; | ||
| 395 | |||
| 396 | gpuobj = NULL; | ||
| 397 | ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, | ||
| 398 | 0, nouveau_mem_fb_amount(dev), | ||
| 399 | NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, | ||
| 400 | &gpuobj); | ||
| 401 | if (ret) | ||
| 402 | return ret; | ||
| 403 | |||
| 404 | ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM, | ||
| 405 | gpuobj, NULL); | ||
| 406 | if (ret) { | ||
| 407 | nouveau_gpuobj_del(dev, &gpuobj); | ||
| 408 | return ret; | ||
| 409 | } | ||
| 410 | |||
| 411 | gpuobj = NULL; | ||
| 412 | ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, | ||
| 413 | dev_priv->gart_info.aper_size, | ||
| 414 | NV_DMA_ACCESS_RW, &gpuobj, NULL); | ||
| 415 | if (ret) | ||
| 416 | return ret; | ||
| 417 | |||
| 418 | ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART, | ||
| 419 | gpuobj, NULL); | ||
| 420 | if (ret) { | ||
| 421 | nouveau_gpuobj_del(dev, &gpuobj); | ||
| 422 | return ret; | ||
| 423 | } | 439 | } |
| 424 | 440 | ||
| 425 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { | 441 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
| 426 | if (dev_priv->card_type >= NV_50) { | 442 | if (dev_priv->card_type >= NV_50) |
| 427 | ret = nv50_display_create(dev); | 443 | ret = nv50_display_create(dev); |
| 428 | if (ret) | 444 | else |
| 429 | return ret; | ||
| 430 | } else { | ||
| 431 | ret = nv04_display_create(dev); | 445 | ret = nv04_display_create(dev); |
| 432 | if (ret) | 446 | if (ret) |
| 433 | return ret; | 447 | goto out_irq; |
| 434 | } | ||
| 435 | } | 448 | } |
| 436 | 449 | ||
| 437 | ret = nouveau_backlight_init(dev); | 450 | ret = nouveau_backlight_init(dev); |
| @@ -444,6 +457,32 @@ nouveau_card_init(struct drm_device *dev) | |||
| 444 | drm_helper_initial_config(dev); | 457 | drm_helper_initial_config(dev); |
| 445 | 458 | ||
| 446 | return 0; | 459 | return 0; |
| 460 | |||
| 461 | out_irq: | ||
| 462 | drm_irq_uninstall(dev); | ||
| 463 | out_fifo: | ||
| 464 | engine->fifo.takedown(dev); | ||
| 465 | out_graph: | ||
| 466 | engine->graph.takedown(dev); | ||
| 467 | out_fb: | ||
| 468 | engine->fb.takedown(dev); | ||
| 469 | out_timer: | ||
| 470 | engine->timer.takedown(dev); | ||
| 471 | out_mc: | ||
| 472 | engine->mc.takedown(dev); | ||
| 473 | out_gpuobj: | ||
| 474 | nouveau_gpuobj_takedown(dev); | ||
| 475 | out_mem: | ||
| 476 | nouveau_mem_close(dev); | ||
| 477 | out_instmem: | ||
| 478 | engine->instmem.takedown(dev); | ||
| 479 | out_gpuobj_early: | ||
| 480 | nouveau_gpuobj_late_takedown(dev); | ||
| 481 | out_bios: | ||
| 482 | nouveau_bios_takedown(dev); | ||
| 483 | out: | ||
| 484 | vga_client_register(dev->pdev, NULL, NULL, NULL); | ||
| 485 | return ret; | ||
| 447 | } | 486 | } |
| 448 | 487 | ||
| 449 | static void nouveau_card_takedown(struct drm_device *dev) | 488 | static void nouveau_card_takedown(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c index b91363606055..d2f143ed97c1 100644 --- a/drivers/gpu/drm/nouveau/nv04_crtc.c +++ b/drivers/gpu/drm/nouveau/nv04_crtc.c | |||
| @@ -143,10 +143,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod | |||
| 143 | state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; | 143 | state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK; |
| 144 | 144 | ||
| 145 | if (pv->NM2) | 145 | if (pv->NM2) |
| 146 | NV_TRACE(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n", | 146 | NV_DEBUG_KMS(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n", |
| 147 | pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P); | 147 | pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P); |
| 148 | else | 148 | else |
| 149 | NV_TRACE(dev, "vpll: n %d m %d log2p %d\n", | 149 | NV_DEBUG_KMS(dev, "vpll: n %d m %d log2p %d\n", |
| 150 | pv->N1, pv->M1, pv->log2P); | 150 | pv->N1, pv->M1, pv->log2P); |
| 151 | 151 | ||
| 152 | nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); | 152 | nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset); |
| @@ -160,7 +160,7 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode) | |||
| 160 | unsigned char seq1 = 0, crtc17 = 0; | 160 | unsigned char seq1 = 0, crtc17 = 0; |
| 161 | unsigned char crtc1A; | 161 | unsigned char crtc1A; |
| 162 | 162 | ||
| 163 | NV_TRACE(dev, "Setting dpms mode %d on CRTC %d\n", mode, | 163 | NV_DEBUG_KMS(dev, "Setting dpms mode %d on CRTC %d\n", mode, |
| 164 | nv_crtc->index); | 164 | nv_crtc->index); |
| 165 | 165 | ||
| 166 | if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */ | 166 | if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */ |
| @@ -603,7 +603,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
| 603 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 603 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
| 604 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 604 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 605 | 605 | ||
| 606 | NV_DEBUG(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index); | 606 | NV_DEBUG_KMS(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index); |
| 607 | drm_mode_debug_printmodeline(adjusted_mode); | 607 | drm_mode_debug_printmodeline(adjusted_mode); |
| 608 | 608 | ||
| 609 | /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ | 609 | /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ |
| @@ -703,7 +703,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc) | |||
| 703 | { | 703 | { |
| 704 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 704 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
| 705 | 705 | ||
| 706 | NV_DEBUG(crtc->dev, "\n"); | 706 | NV_DEBUG_KMS(crtc->dev, "\n"); |
| 707 | 707 | ||
| 708 | if (!nv_crtc) | 708 | if (!nv_crtc) |
| 709 | return; | 709 | return; |
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c index a5fa51714e87..d9f32879ba38 100644 --- a/drivers/gpu/drm/nouveau/nv04_dac.c +++ b/drivers/gpu/drm/nouveau/nv04_dac.c | |||
| @@ -205,7 +205,7 @@ out: | |||
| 205 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); | 205 | NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1); |
| 206 | 206 | ||
| 207 | if (blue == 0x18) { | 207 | if (blue == 0x18) { |
| 208 | NV_TRACE(dev, "Load detected on head A\n"); | 208 | NV_INFO(dev, "Load detected on head A\n"); |
| 209 | return connector_status_connected; | 209 | return connector_status_connected; |
| 210 | } | 210 | } |
| 211 | 211 | ||
| @@ -350,14 +350,10 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder, | |||
| 350 | struct drm_display_mode *mode, | 350 | struct drm_display_mode *mode, |
| 351 | struct drm_display_mode *adjusted_mode) | 351 | struct drm_display_mode *adjusted_mode) |
| 352 | { | 352 | { |
| 353 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | ||
| 354 | struct drm_device *dev = encoder->dev; | 353 | struct drm_device *dev = encoder->dev; |
| 355 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 354 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 356 | int head = nouveau_crtc(encoder->crtc)->index; | 355 | int head = nouveau_crtc(encoder->crtc)->index; |
| 357 | 356 | ||
| 358 | NV_TRACE(dev, "%s called for encoder %d\n", __func__, | ||
| 359 | nv_encoder->dcb->index); | ||
| 360 | |||
| 361 | if (nv_gf4_disp_arch(dev)) { | 357 | if (nv_gf4_disp_arch(dev)) { |
| 362 | struct drm_encoder *rebind; | 358 | struct drm_encoder *rebind; |
| 363 | uint32_t dac_offset = nv04_dac_output_offset(encoder); | 359 | uint32_t dac_offset = nv04_dac_output_offset(encoder); |
| @@ -466,7 +462,7 @@ static void nv04_dac_destroy(struct drm_encoder *encoder) | |||
| 466 | { | 462 | { |
| 467 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 463 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
| 468 | 464 | ||
| 469 | NV_DEBUG(encoder->dev, "\n"); | 465 | NV_DEBUG_KMS(encoder->dev, "\n"); |
| 470 | 466 | ||
| 471 | drm_encoder_cleanup(encoder); | 467 | drm_encoder_cleanup(encoder); |
| 472 | kfree(nv_encoder); | 468 | kfree(nv_encoder); |
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index e5b33339d595..483f875bdb6a 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c | |||
| @@ -261,7 +261,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, | |||
| 261 | struct drm_display_mode *output_mode = &nv_encoder->mode; | 261 | struct drm_display_mode *output_mode = &nv_encoder->mode; |
| 262 | uint32_t mode_ratio, panel_ratio; | 262 | uint32_t mode_ratio, panel_ratio; |
| 263 | 263 | ||
| 264 | NV_DEBUG(dev, "Output mode on CRTC %d:\n", nv_crtc->index); | 264 | NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index); |
| 265 | drm_mode_debug_printmodeline(output_mode); | 265 | drm_mode_debug_printmodeline(output_mode); |
| 266 | 266 | ||
| 267 | /* Initialize the FP registers in this CRTC. */ | 267 | /* Initialize the FP registers in this CRTC. */ |
| @@ -413,7 +413,9 @@ static void nv04_dfp_commit(struct drm_encoder *encoder) | |||
| 413 | struct dcb_entry *dcbe = nv_encoder->dcb; | 413 | struct dcb_entry *dcbe = nv_encoder->dcb; |
| 414 | int head = nouveau_crtc(encoder->crtc)->index; | 414 | int head = nouveau_crtc(encoder->crtc)->index; |
| 415 | 415 | ||
| 416 | NV_TRACE(dev, "%s called for encoder %d\n", __func__, nv_encoder->dcb->index); | 416 | NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", |
| 417 | drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base), | ||
| 418 | nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); | ||
| 417 | 419 | ||
| 418 | if (dcbe->type == OUTPUT_TMDS) | 420 | if (dcbe->type == OUTPUT_TMDS) |
| 419 | run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); | 421 | run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock); |
| @@ -550,7 +552,7 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder) | |||
| 550 | { | 552 | { |
| 551 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 553 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
| 552 | 554 | ||
| 553 | NV_DEBUG(encoder->dev, "\n"); | 555 | NV_DEBUG_KMS(encoder->dev, "\n"); |
| 554 | 556 | ||
| 555 | drm_encoder_cleanup(encoder); | 557 | drm_encoder_cleanup(encoder); |
| 556 | kfree(nv_encoder); | 558 | kfree(nv_encoder); |
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c index b47c757ff48b..ef77215fa5b9 100644 --- a/drivers/gpu/drm/nouveau/nv04_display.c +++ b/drivers/gpu/drm/nouveau/nv04_display.c | |||
| @@ -99,10 +99,11 @@ nv04_display_create(struct drm_device *dev) | |||
| 99 | uint16_t connector[16] = { 0 }; | 99 | uint16_t connector[16] = { 0 }; |
| 100 | int i, ret; | 100 | int i, ret; |
| 101 | 101 | ||
| 102 | NV_DEBUG(dev, "\n"); | 102 | NV_DEBUG_KMS(dev, "\n"); |
| 103 | 103 | ||
| 104 | if (nv_two_heads(dev)) | 104 | if (nv_two_heads(dev)) |
| 105 | nv04_display_store_initial_head_owner(dev); | 105 | nv04_display_store_initial_head_owner(dev); |
| 106 | nouveau_hw_save_vga_fonts(dev, 1); | ||
| 106 | 107 | ||
| 107 | drm_mode_config_init(dev); | 108 | drm_mode_config_init(dev); |
| 108 | drm_mode_create_scaling_mode_property(dev); | 109 | drm_mode_create_scaling_mode_property(dev); |
| @@ -203,8 +204,6 @@ nv04_display_create(struct drm_device *dev) | |||
| 203 | /* Save previous state */ | 204 | /* Save previous state */ |
| 204 | NVLockVgaCrtcs(dev, false); | 205 | NVLockVgaCrtcs(dev, false); |
| 205 | 206 | ||
| 206 | nouveau_hw_save_vga_fonts(dev, 1); | ||
| 207 | |||
| 208 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | 207 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
| 209 | crtc->funcs->save(crtc); | 208 | crtc->funcs->save(crtc); |
| 210 | 209 | ||
| @@ -223,7 +222,7 @@ nv04_display_destroy(struct drm_device *dev) | |||
| 223 | struct drm_encoder *encoder; | 222 | struct drm_encoder *encoder; |
| 224 | struct drm_crtc *crtc; | 223 | struct drm_crtc *crtc; |
| 225 | 224 | ||
| 226 | NV_DEBUG(dev, "\n"); | 225 | NV_DEBUG_KMS(dev, "\n"); |
| 227 | 226 | ||
| 228 | /* Turn every CRTC off. */ | 227 | /* Turn every CRTC off. */ |
| 229 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 228 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| @@ -246,9 +245,9 @@ nv04_display_destroy(struct drm_device *dev) | |||
| 246 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) | 245 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) |
| 247 | crtc->funcs->restore(crtc); | 246 | crtc->funcs->restore(crtc); |
| 248 | 247 | ||
| 249 | nouveau_hw_save_vga_fonts(dev, 0); | ||
| 250 | |||
| 251 | drm_mode_config_cleanup(dev); | 248 | drm_mode_config_cleanup(dev); |
| 249 | |||
| 250 | nouveau_hw_save_vga_fonts(dev, 0); | ||
| 252 | } | 251 | } |
| 253 | 252 | ||
| 254 | void | 253 | void |
diff --git a/drivers/gpu/drm/nouveau/nv04_graph.c b/drivers/gpu/drm/nouveau/nv04_graph.c index 396ee92118f6..d561d773c0f4 100644 --- a/drivers/gpu/drm/nouveau/nv04_graph.c +++ b/drivers/gpu/drm/nouveau/nv04_graph.c | |||
| @@ -543,7 +543,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass, | |||
| 543 | 543 | ||
| 544 | nv_wi32(dev, instance, tmp); | 544 | nv_wi32(dev, instance, tmp); |
| 545 | nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp); | 545 | nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp); |
| 546 | nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + subc, tmp); | 546 | nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp); |
| 547 | return 0; | 547 | return 0; |
| 548 | } | 548 | } |
| 549 | 549 | ||
diff --git a/drivers/gpu/drm/nouveau/nv10_graph.c b/drivers/gpu/drm/nouveau/nv10_graph.c index 6bf6804bb0ef..6870e0ee2e7e 100644 --- a/drivers/gpu/drm/nouveau/nv10_graph.c +++ b/drivers/gpu/drm/nouveau/nv10_graph.c | |||
| @@ -389,49 +389,50 @@ struct graph_state { | |||
| 389 | int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)]; | 389 | int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)]; |
| 390 | int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)]; | 390 | int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)]; |
| 391 | struct pipe_state pipe_state; | 391 | struct pipe_state pipe_state; |
| 392 | uint32_t lma_window[4]; | ||
| 392 | }; | 393 | }; |
| 393 | 394 | ||
| 395 | #define PIPE_SAVE(dev, state, addr) \ | ||
| 396 | do { \ | ||
| 397 | int __i; \ | ||
| 398 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \ | ||
| 399 | for (__i = 0; __i < ARRAY_SIZE(state); __i++) \ | ||
| 400 | state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \ | ||
| 401 | } while (0) | ||
| 402 | |||
| 403 | #define PIPE_RESTORE(dev, state, addr) \ | ||
| 404 | do { \ | ||
| 405 | int __i; \ | ||
| 406 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \ | ||
| 407 | for (__i = 0; __i < ARRAY_SIZE(state); __i++) \ | ||
| 408 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \ | ||
| 409 | } while (0) | ||
| 410 | |||
| 394 | static void nv10_graph_save_pipe(struct nouveau_channel *chan) | 411 | static void nv10_graph_save_pipe(struct nouveau_channel *chan) |
| 395 | { | 412 | { |
| 396 | struct drm_device *dev = chan->dev; | 413 | struct drm_device *dev = chan->dev; |
| 397 | struct graph_state *pgraph_ctx = chan->pgraph_ctx; | 414 | struct graph_state *pgraph_ctx = chan->pgraph_ctx; |
| 398 | struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; | 415 | struct pipe_state *pipe = &pgraph_ctx->pipe_state; |
| 399 | int i; | 416 | |
| 400 | #define PIPE_SAVE(addr) \ | 417 | PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400); |
| 401 | do { \ | 418 | PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200); |
| 402 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \ | 419 | PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400); |
| 403 | for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \ | 420 | PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800); |
| 404 | fifo_pipe_state->pipe_##addr[i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \ | 421 | PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00); |
| 405 | } while (0) | 422 | PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000); |
| 406 | 423 | PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400); | |
| 407 | PIPE_SAVE(0x4400); | 424 | PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800); |
| 408 | PIPE_SAVE(0x0200); | 425 | PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040); |
| 409 | PIPE_SAVE(0x6400); | 426 | PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000); |
| 410 | PIPE_SAVE(0x6800); | ||
| 411 | PIPE_SAVE(0x6c00); | ||
| 412 | PIPE_SAVE(0x7000); | ||
| 413 | PIPE_SAVE(0x7400); | ||
| 414 | PIPE_SAVE(0x7800); | ||
| 415 | PIPE_SAVE(0x0040); | ||
| 416 | PIPE_SAVE(0x0000); | ||
| 417 | |||
| 418 | #undef PIPE_SAVE | ||
| 419 | } | 427 | } |
| 420 | 428 | ||
| 421 | static void nv10_graph_load_pipe(struct nouveau_channel *chan) | 429 | static void nv10_graph_load_pipe(struct nouveau_channel *chan) |
| 422 | { | 430 | { |
| 423 | struct drm_device *dev = chan->dev; | 431 | struct drm_device *dev = chan->dev; |
| 424 | struct graph_state *pgraph_ctx = chan->pgraph_ctx; | 432 | struct graph_state *pgraph_ctx = chan->pgraph_ctx; |
| 425 | struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state; | 433 | struct pipe_state *pipe = &pgraph_ctx->pipe_state; |
| 426 | int i; | ||
| 427 | uint32_t xfmode0, xfmode1; | 434 | uint32_t xfmode0, xfmode1; |
| 428 | #define PIPE_RESTORE(addr) \ | 435 | int i; |
| 429 | do { \ | ||
| 430 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \ | ||
| 431 | for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \ | ||
| 432 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \ | ||
| 433 | } while (0) | ||
| 434 | |||
| 435 | 436 | ||
| 436 | nouveau_wait_for_idle(dev); | 437 | nouveau_wait_for_idle(dev); |
| 437 | /* XXX check haiku comments */ | 438 | /* XXX check haiku comments */ |
| @@ -457,24 +458,22 @@ static void nv10_graph_load_pipe(struct nouveau_channel *chan) | |||
| 457 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008); | 458 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008); |
| 458 | 459 | ||
| 459 | 460 | ||
| 460 | PIPE_RESTORE(0x0200); | 461 | PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200); |
| 461 | nouveau_wait_for_idle(dev); | 462 | nouveau_wait_for_idle(dev); |
| 462 | 463 | ||
| 463 | /* restore XFMODE */ | 464 | /* restore XFMODE */ |
| 464 | nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0); | 465 | nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0); |
| 465 | nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1); | 466 | nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1); |
| 466 | PIPE_RESTORE(0x6400); | 467 | PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400); |
| 467 | PIPE_RESTORE(0x6800); | 468 | PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800); |
| 468 | PIPE_RESTORE(0x6c00); | 469 | PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00); |
| 469 | PIPE_RESTORE(0x7000); | 470 | PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000); |
| 470 | PIPE_RESTORE(0x7400); | 471 | PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400); |
| 471 | PIPE_RESTORE(0x7800); | 472 | PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800); |
| 472 | PIPE_RESTORE(0x4400); | 473 | PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400); |
| 473 | PIPE_RESTORE(0x0000); | 474 | PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000); |
| 474 | PIPE_RESTORE(0x0040); | 475 | PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040); |
| 475 | nouveau_wait_for_idle(dev); | 476 | nouveau_wait_for_idle(dev); |
| 476 | |||
| 477 | #undef PIPE_RESTORE | ||
| 478 | } | 477 | } |
| 479 | 478 | ||
| 480 | static void nv10_graph_create_pipe(struct nouveau_channel *chan) | 479 | static void nv10_graph_create_pipe(struct nouveau_channel *chan) |
| @@ -832,6 +831,9 @@ int nv10_graph_init(struct drm_device *dev) | |||
| 832 | (1<<31)); | 831 | (1<<31)); |
| 833 | if (dev_priv->chipset >= 0x17) { | 832 | if (dev_priv->chipset >= 0x17) { |
| 834 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000); | 833 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000); |
| 834 | nv_wr32(dev, 0x400a10, 0x3ff3fb6); | ||
| 835 | nv_wr32(dev, 0x400838, 0x2f8684); | ||
| 836 | nv_wr32(dev, 0x40083c, 0x115f3f); | ||
| 835 | nv_wr32(dev, 0x004006b0, 0x40000020); | 837 | nv_wr32(dev, 0x004006b0, 0x40000020); |
| 836 | } else | 838 | } else |
| 837 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); | 839 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000); |
| @@ -867,6 +869,115 @@ void nv10_graph_takedown(struct drm_device *dev) | |||
| 867 | { | 869 | { |
| 868 | } | 870 | } |
| 869 | 871 | ||
| 872 | static int | ||
| 873 | nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass, | ||
| 874 | int mthd, uint32_t data) | ||
| 875 | { | ||
| 876 | struct drm_device *dev = chan->dev; | ||
| 877 | struct graph_state *ctx = chan->pgraph_ctx; | ||
| 878 | struct pipe_state *pipe = &ctx->pipe_state; | ||
| 879 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 880 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
| 881 | uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3]; | ||
| 882 | uint32_t xfmode0, xfmode1; | ||
| 883 | int i; | ||
| 884 | |||
| 885 | ctx->lma_window[(mthd - 0x1638) / 4] = data; | ||
| 886 | |||
| 887 | if (mthd != 0x1644) | ||
| 888 | return 0; | ||
| 889 | |||
| 890 | nouveau_wait_for_idle(dev); | ||
| 891 | |||
| 892 | PIPE_SAVE(dev, pipe_0x0040, 0x0040); | ||
| 893 | PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200); | ||
| 894 | |||
| 895 | PIPE_RESTORE(dev, ctx->lma_window, 0x6790); | ||
| 896 | |||
| 897 | nouveau_wait_for_idle(dev); | ||
| 898 | |||
| 899 | xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0); | ||
| 900 | xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1); | ||
| 901 | |||
| 902 | PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400); | ||
| 903 | PIPE_SAVE(dev, pipe_0x64c0, 0x64c0); | ||
| 904 | PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0); | ||
| 905 | PIPE_SAVE(dev, pipe_0x6a80, 0x6a80); | ||
| 906 | |||
| 907 | nouveau_wait_for_idle(dev); | ||
| 908 | |||
| 909 | nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000); | ||
| 910 | nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000); | ||
| 911 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0); | ||
| 912 | for (i = 0; i < 4; i++) | ||
| 913 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); | ||
| 914 | for (i = 0; i < 4; i++) | ||
| 915 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); | ||
| 916 | |||
| 917 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0); | ||
| 918 | for (i = 0; i < 3; i++) | ||
| 919 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000); | ||
| 920 | |||
| 921 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80); | ||
| 922 | for (i = 0; i < 3; i++) | ||
| 923 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); | ||
| 924 | |||
| 925 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040); | ||
| 926 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008); | ||
| 927 | |||
| 928 | PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200); | ||
| 929 | |||
| 930 | nouveau_wait_for_idle(dev); | ||
| 931 | |||
| 932 | PIPE_RESTORE(dev, pipe_0x0040, 0x0040); | ||
| 933 | |||
| 934 | nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0); | ||
| 935 | nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1); | ||
| 936 | |||
| 937 | PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0); | ||
| 938 | PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0); | ||
| 939 | PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80); | ||
| 940 | PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400); | ||
| 941 | |||
| 942 | nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0); | ||
| 943 | nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000); | ||
| 944 | |||
| 945 | nouveau_wait_for_idle(dev); | ||
| 946 | |||
| 947 | pgraph->fifo_access(dev, true); | ||
| 948 | |||
| 949 | return 0; | ||
| 950 | } | ||
| 951 | |||
| 952 | static int | ||
| 953 | nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass, | ||
| 954 | int mthd, uint32_t data) | ||
| 955 | { | ||
| 956 | struct drm_device *dev = chan->dev; | ||
| 957 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 958 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
| 959 | |||
| 960 | nouveau_wait_for_idle(dev); | ||
| 961 | |||
| 962 | nv_wr32(dev, NV10_PGRAPH_DEBUG_4, | ||
| 963 | nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8); | ||
| 964 | nv_wr32(dev, 0x004006b0, | ||
| 965 | nv_rd32(dev, 0x004006b0) | 0x8 << 24); | ||
| 966 | |||
| 967 | pgraph->fifo_access(dev, true); | ||
| 968 | |||
| 969 | return 0; | ||
| 970 | } | ||
| 971 | |||
| 972 | static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = { | ||
| 973 | { 0x1638, nv17_graph_mthd_lma_window }, | ||
| 974 | { 0x163c, nv17_graph_mthd_lma_window }, | ||
| 975 | { 0x1640, nv17_graph_mthd_lma_window }, | ||
| 976 | { 0x1644, nv17_graph_mthd_lma_window }, | ||
| 977 | { 0x1658, nv17_graph_mthd_lma_enable }, | ||
| 978 | {} | ||
| 979 | }; | ||
| 980 | |||
| 870 | struct nouveau_pgraph_object_class nv10_graph_grclass[] = { | 981 | struct nouveau_pgraph_object_class nv10_graph_grclass[] = { |
| 871 | { 0x0030, false, NULL }, /* null */ | 982 | { 0x0030, false, NULL }, /* null */ |
| 872 | { 0x0039, false, NULL }, /* m2mf */ | 983 | { 0x0039, false, NULL }, /* m2mf */ |
| @@ -887,6 +998,6 @@ struct nouveau_pgraph_object_class nv10_graph_grclass[] = { | |||
| 887 | { 0x0095, false, NULL }, /* multitex_tri */ | 998 | { 0x0095, false, NULL }, /* multitex_tri */ |
| 888 | { 0x0056, false, NULL }, /* celcius (nv10) */ | 999 | { 0x0056, false, NULL }, /* celcius (nv10) */ |
| 889 | { 0x0096, false, NULL }, /* celcius (nv11) */ | 1000 | { 0x0096, false, NULL }, /* celcius (nv11) */ |
| 890 | { 0x0099, false, NULL }, /* celcius (nv17) */ | 1001 | { 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */ |
| 891 | {} | 1002 | {} |
| 892 | }; | 1003 | }; |
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c index 46cfd9c60478..81c01353a9f9 100644 --- a/drivers/gpu/drm/nouveau/nv17_tv.c +++ b/drivers/gpu/drm/nouveau/nv17_tv.c | |||
| @@ -219,7 +219,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) | |||
| 219 | return; | 219 | return; |
| 220 | nouveau_encoder(encoder)->last_dpms = mode; | 220 | nouveau_encoder(encoder)->last_dpms = mode; |
| 221 | 221 | ||
| 222 | NV_TRACE(dev, "Setting dpms mode %d on TV encoder (output %d)\n", | 222 | NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n", |
| 223 | mode, nouveau_encoder(encoder)->dcb->index); | 223 | mode, nouveau_encoder(encoder)->dcb->index); |
| 224 | 224 | ||
| 225 | regs->ptv_200 &= ~1; | 225 | regs->ptv_200 &= ~1; |
| @@ -619,7 +619,7 @@ static void nv17_tv_destroy(struct drm_encoder *encoder) | |||
| 619 | { | 619 | { |
| 620 | struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); | 620 | struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); |
| 621 | 621 | ||
| 622 | NV_DEBUG(encoder->dev, "\n"); | 622 | NV_DEBUG_KMS(encoder->dev, "\n"); |
| 623 | 623 | ||
| 624 | drm_encoder_cleanup(encoder); | 624 | drm_encoder_cleanup(encoder); |
| 625 | kfree(tv_enc); | 625 | kfree(tv_enc); |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 7e8547cb5833..2b332bb55acf 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
| @@ -24,36 +24,10 @@ | |||
| 24 | * | 24 | * |
| 25 | */ | 25 | */ |
| 26 | 26 | ||
| 27 | #include <linux/firmware.h> | ||
| 28 | |||
| 29 | #include "drmP.h" | 27 | #include "drmP.h" |
| 30 | #include "drm.h" | 28 | #include "drm.h" |
| 31 | #include "nouveau_drv.h" | 29 | #include "nouveau_drv.h" |
| 32 | 30 | #include "nouveau_grctx.h" | |
| 33 | MODULE_FIRMWARE("nouveau/nv40.ctxprog"); | ||
| 34 | MODULE_FIRMWARE("nouveau/nv40.ctxvals"); | ||
| 35 | MODULE_FIRMWARE("nouveau/nv41.ctxprog"); | ||
| 36 | MODULE_FIRMWARE("nouveau/nv41.ctxvals"); | ||
| 37 | MODULE_FIRMWARE("nouveau/nv42.ctxprog"); | ||
| 38 | MODULE_FIRMWARE("nouveau/nv42.ctxvals"); | ||
| 39 | MODULE_FIRMWARE("nouveau/nv43.ctxprog"); | ||
| 40 | MODULE_FIRMWARE("nouveau/nv43.ctxvals"); | ||
| 41 | MODULE_FIRMWARE("nouveau/nv44.ctxprog"); | ||
| 42 | MODULE_FIRMWARE("nouveau/nv44.ctxvals"); | ||
| 43 | MODULE_FIRMWARE("nouveau/nv46.ctxprog"); | ||
| 44 | MODULE_FIRMWARE("nouveau/nv46.ctxvals"); | ||
| 45 | MODULE_FIRMWARE("nouveau/nv47.ctxprog"); | ||
| 46 | MODULE_FIRMWARE("nouveau/nv47.ctxvals"); | ||
| 47 | MODULE_FIRMWARE("nouveau/nv49.ctxprog"); | ||
| 48 | MODULE_FIRMWARE("nouveau/nv49.ctxvals"); | ||
| 49 | MODULE_FIRMWARE("nouveau/nv4a.ctxprog"); | ||
| 50 | MODULE_FIRMWARE("nouveau/nv4a.ctxvals"); | ||
| 51 | MODULE_FIRMWARE("nouveau/nv4b.ctxprog"); | ||
| 52 | MODULE_FIRMWARE("nouveau/nv4b.ctxvals"); | ||
| 53 | MODULE_FIRMWARE("nouveau/nv4c.ctxprog"); | ||
| 54 | MODULE_FIRMWARE("nouveau/nv4c.ctxvals"); | ||
| 55 | MODULE_FIRMWARE("nouveau/nv4e.ctxprog"); | ||
| 56 | MODULE_FIRMWARE("nouveau/nv4e.ctxvals"); | ||
| 57 | 31 | ||
| 58 | struct nouveau_channel * | 32 | struct nouveau_channel * |
| 59 | nv40_graph_channel(struct drm_device *dev) | 33 | nv40_graph_channel(struct drm_device *dev) |
| @@ -83,27 +57,30 @@ nv40_graph_create_context(struct nouveau_channel *chan) | |||
| 83 | { | 57 | { |
| 84 | struct drm_device *dev = chan->dev; | 58 | struct drm_device *dev = chan->dev; |
| 85 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 59 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 86 | struct nouveau_gpuobj *ctx; | 60 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; |
| 87 | int ret; | 61 | int ret; |
| 88 | 62 | ||
| 89 | /* Allocate a 175KiB block of PRAMIN to store the context. This | 63 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size, |
| 90 | * is massive overkill for a lot of chipsets, but it should be safe | 64 | 16, NVOBJ_FLAG_ZERO_ALLOC, |
| 91 | * until we're able to implement this properly (will happen at more | 65 | &chan->ramin_grctx); |
| 92 | * or less the same time we're able to write our own context programs. | ||
| 93 | */ | ||
| 94 | ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16, | ||
| 95 | NVOBJ_FLAG_ZERO_ALLOC, | ||
| 96 | &chan->ramin_grctx); | ||
| 97 | if (ret) | 66 | if (ret) |
| 98 | return ret; | 67 | return ret; |
| 99 | ctx = chan->ramin_grctx->gpuobj; | ||
| 100 | 68 | ||
| 101 | /* Initialise default context values */ | 69 | /* Initialise default context values */ |
| 102 | dev_priv->engine.instmem.prepare_access(dev, true); | 70 | dev_priv->engine.instmem.prepare_access(dev, true); |
| 103 | nv40_grctx_vals_load(dev, ctx); | 71 | if (!pgraph->ctxprog) { |
| 104 | nv_wo32(dev, ctx, 0, ctx->im_pramin->start); | 72 | struct nouveau_grctx ctx = {}; |
| 105 | dev_priv->engine.instmem.finish_access(dev); | ||
| 106 | 73 | ||
| 74 | ctx.dev = chan->dev; | ||
| 75 | ctx.mode = NOUVEAU_GRCTX_VALS; | ||
| 76 | ctx.data = chan->ramin_grctx->gpuobj; | ||
| 77 | nv40_grctx_init(&ctx); | ||
| 78 | } else { | ||
| 79 | nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj); | ||
| 80 | } | ||
| 81 | nv_wo32(dev, chan->ramin_grctx->gpuobj, 0, | ||
| 82 | chan->ramin_grctx->gpuobj->im_pramin->start); | ||
| 83 | dev_priv->engine.instmem.finish_access(dev); | ||
| 107 | return 0; | 84 | return 0; |
| 108 | } | 85 | } |
| 109 | 86 | ||
| @@ -204,139 +181,6 @@ nv40_graph_unload_context(struct drm_device *dev) | |||
| 204 | return ret; | 181 | return ret; |
| 205 | } | 182 | } |
| 206 | 183 | ||
| 207 | struct nouveau_ctxprog { | ||
| 208 | uint32_t signature; | ||
| 209 | uint8_t version; | ||
| 210 | uint16_t length; | ||
| 211 | uint32_t data[]; | ||
| 212 | } __attribute__ ((packed)); | ||
| 213 | |||
| 214 | struct nouveau_ctxvals { | ||
| 215 | uint32_t signature; | ||
| 216 | uint8_t version; | ||
| 217 | uint32_t length; | ||
| 218 | struct { | ||
| 219 | uint32_t offset; | ||
| 220 | uint32_t value; | ||
| 221 | } data[]; | ||
| 222 | } __attribute__ ((packed)); | ||
| 223 | |||
| 224 | int | ||
| 225 | nv40_grctx_init(struct drm_device *dev) | ||
| 226 | { | ||
| 227 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 228 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
| 229 | const int chipset = dev_priv->chipset; | ||
| 230 | const struct firmware *fw; | ||
| 231 | const struct nouveau_ctxprog *cp; | ||
| 232 | const struct nouveau_ctxvals *cv; | ||
| 233 | char name[32]; | ||
| 234 | int ret, i; | ||
| 235 | |||
| 236 | pgraph->accel_blocked = true; | ||
| 237 | |||
| 238 | if (!pgraph->ctxprog) { | ||
| 239 | sprintf(name, "nouveau/nv%02x.ctxprog", chipset); | ||
| 240 | ret = request_firmware(&fw, name, &dev->pdev->dev); | ||
| 241 | if (ret) { | ||
| 242 | NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset); | ||
| 243 | return ret; | ||
| 244 | } | ||
| 245 | |||
| 246 | pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL); | ||
| 247 | if (!pgraph->ctxprog) { | ||
| 248 | NV_ERROR(dev, "OOM copying ctxprog\n"); | ||
| 249 | release_firmware(fw); | ||
| 250 | return -ENOMEM; | ||
| 251 | } | ||
| 252 | memcpy(pgraph->ctxprog, fw->data, fw->size); | ||
| 253 | |||
| 254 | cp = pgraph->ctxprog; | ||
| 255 | if (le32_to_cpu(cp->signature) != 0x5043564e || | ||
| 256 | cp->version != 0 || | ||
| 257 | le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) { | ||
| 258 | NV_ERROR(dev, "ctxprog invalid\n"); | ||
| 259 | release_firmware(fw); | ||
| 260 | nv40_grctx_fini(dev); | ||
| 261 | return -EINVAL; | ||
| 262 | } | ||
| 263 | release_firmware(fw); | ||
| 264 | } | ||
| 265 | |||
| 266 | if (!pgraph->ctxvals) { | ||
| 267 | sprintf(name, "nouveau/nv%02x.ctxvals", chipset); | ||
| 268 | ret = request_firmware(&fw, name, &dev->pdev->dev); | ||
| 269 | if (ret) { | ||
| 270 | NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset); | ||
| 271 | nv40_grctx_fini(dev); | ||
| 272 | return ret; | ||
| 273 | } | ||
| 274 | |||
| 275 | pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL); | ||
| 276 | if (!pgraph->ctxprog) { | ||
| 277 | NV_ERROR(dev, "OOM copying ctxprog\n"); | ||
| 278 | release_firmware(fw); | ||
| 279 | nv40_grctx_fini(dev); | ||
| 280 | return -ENOMEM; | ||
| 281 | } | ||
| 282 | memcpy(pgraph->ctxvals, fw->data, fw->size); | ||
| 283 | |||
| 284 | cv = (void *)pgraph->ctxvals; | ||
| 285 | if (le32_to_cpu(cv->signature) != 0x5643564e || | ||
| 286 | cv->version != 0 || | ||
| 287 | le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) { | ||
| 288 | NV_ERROR(dev, "ctxvals invalid\n"); | ||
| 289 | release_firmware(fw); | ||
| 290 | nv40_grctx_fini(dev); | ||
| 291 | return -EINVAL; | ||
| 292 | } | ||
| 293 | release_firmware(fw); | ||
| 294 | } | ||
| 295 | |||
| 296 | cp = pgraph->ctxprog; | ||
| 297 | |||
| 298 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); | ||
| 299 | for (i = 0; i < le16_to_cpu(cp->length); i++) | ||
| 300 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, | ||
| 301 | le32_to_cpu(cp->data[i])); | ||
| 302 | |||
| 303 | pgraph->accel_blocked = false; | ||
| 304 | return 0; | ||
| 305 | } | ||
| 306 | |||
| 307 | void | ||
| 308 | nv40_grctx_fini(struct drm_device *dev) | ||
| 309 | { | ||
| 310 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 311 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
| 312 | |||
| 313 | if (pgraph->ctxprog) { | ||
| 314 | kfree(pgraph->ctxprog); | ||
| 315 | pgraph->ctxprog = NULL; | ||
| 316 | } | ||
| 317 | |||
| 318 | if (pgraph->ctxvals) { | ||
| 319 | kfree(pgraph->ctxprog); | ||
| 320 | pgraph->ctxvals = NULL; | ||
| 321 | } | ||
| 322 | } | ||
| 323 | |||
| 324 | void | ||
| 325 | nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx) | ||
| 326 | { | ||
| 327 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 328 | struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; | ||
| 329 | struct nouveau_ctxvals *cv = pgraph->ctxvals; | ||
| 330 | int i; | ||
| 331 | |||
| 332 | if (!cv) | ||
| 333 | return; | ||
| 334 | |||
| 335 | for (i = 0; i < le32_to_cpu(cv->length); i++) | ||
| 336 | nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset), | ||
| 337 | le32_to_cpu(cv->data[i].value)); | ||
| 338 | } | ||
| 339 | |||
| 340 | /* | 184 | /* |
| 341 | * G70 0x47 | 185 | * G70 0x47 |
| 342 | * G71 0x49 | 186 | * G71 0x49 |
| @@ -359,7 +203,26 @@ nv40_graph_init(struct drm_device *dev) | |||
| 359 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | | 203 | nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | |
| 360 | NV_PMC_ENABLE_PGRAPH); | 204 | NV_PMC_ENABLE_PGRAPH); |
| 361 | 205 | ||
| 362 | nv40_grctx_init(dev); | 206 | if (nouveau_ctxfw) { |
| 207 | nouveau_grctx_prog_load(dev); | ||
| 208 | dev_priv->engine.graph.grctx_size = 175 * 1024; | ||
| 209 | } | ||
| 210 | |||
| 211 | if (!dev_priv->engine.graph.ctxprog) { | ||
| 212 | struct nouveau_grctx ctx = {}; | ||
| 213 | uint32_t cp[256]; | ||
| 214 | |||
| 215 | ctx.dev = dev; | ||
| 216 | ctx.mode = NOUVEAU_GRCTX_PROG; | ||
| 217 | ctx.data = cp; | ||
| 218 | ctx.ctxprog_max = 256; | ||
| 219 | nv40_grctx_init(&ctx); | ||
| 220 | dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4; | ||
| 221 | |||
| 222 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); | ||
| 223 | for (i = 0; i < ctx.ctxprog_len; i++) | ||
| 224 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]); | ||
| 225 | } | ||
| 363 | 226 | ||
| 364 | /* No context present currently */ | 227 | /* No context present currently */ |
| 365 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); | 228 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000); |
| @@ -539,6 +402,7 @@ nv40_graph_init(struct drm_device *dev) | |||
| 539 | 402 | ||
| 540 | void nv40_graph_takedown(struct drm_device *dev) | 403 | void nv40_graph_takedown(struct drm_device *dev) |
| 541 | { | 404 | { |
| 405 | nouveau_grctx_fini(dev); | ||
| 542 | } | 406 | } |
| 543 | 407 | ||
| 544 | struct nouveau_pgraph_object_class nv40_graph_grclass[] = { | 408 | struct nouveau_pgraph_object_class nv40_graph_grclass[] = { |
diff --git a/drivers/gpu/drm/nouveau/nv40_grctx.c b/drivers/gpu/drm/nouveau/nv40_grctx.c new file mode 100644 index 000000000000..11b11c31f543 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nv40_grctx.c | |||
| @@ -0,0 +1,678 @@ | |||
| 1 | /* | ||
| 2 | * Copyright 2009 Red Hat Inc. | ||
| 3 | * | ||
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
| 5 | * copy of this software and associated documentation files (the "Software"), | ||
| 6 | * to deal in the Software without restriction, including without limitation | ||
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
| 8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
| 9 | * Software is furnished to do so, subject to the following conditions: | ||
| 10 | * | ||
| 11 | * The above copyright notice and this permission notice shall be included in | ||
| 12 | * all copies or substantial portions of the Software. | ||
| 13 | * | ||
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | ||
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | ||
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | ||
| 20 | * OTHER DEALINGS IN THE SOFTWARE. | ||
| 21 | * | ||
| 22 | * Authors: Ben Skeggs | ||
| 23 | */ | ||
| 24 | |||
| 25 | /* NVIDIA context programs handle a number of other conditions which are | ||
| 26 | * not implemented in our versions. It's not clear why NVIDIA context | ||
| 27 | * programs have this code, nor whether it's strictly necessary for | ||
| 28 | * correct operation. We'll implement additional handling if/when we | ||
| 29 | * discover it's necessary. | ||
| 30 | * | ||
| 31 | * - On context save, NVIDIA set 0x400314 bit 0 to 1 if the "3D state" | ||
| 32 | * flag is set, this gets saved into the context. | ||
| 33 | * - On context save, the context program for all cards load nsource | ||
| 34 | * into a flag register and check for ILLEGAL_MTHD. If it's set, | ||
| 35 | * opcode 0x60000d is called before resuming normal operation. | ||
| 36 | * - Some context programs check more conditions than the above. NV44 | ||
| 37 | * checks: ((nsource & 0x0857) || (0x400718 & 0x0100) || (intr & 0x0001)) | ||
| 38 | * and calls 0x60000d before resuming normal operation. | ||
| 39 | * - At the very beginning of NVIDIA's context programs, flag 9 is checked | ||
| 40 | * and if true 0x800001 is called with count=0, pos=0, the flag is cleared | ||
| 41 | * and then the ctxprog is aborted. It looks like a complicated NOP, | ||
| 42 | * its purpose is unknown. | ||
| 43 | * - In the section of code that loads the per-vs state, NVIDIA check | ||
| 44 | * flag 10. If it's set, they only transfer the small 0x300 byte block | ||
| 45 | * of state + the state for a single vs as opposed to the state for | ||
| 46 | * all vs units. It doesn't seem likely that it'll occur in normal | ||
| 47 | * operation, especially seeing as it appears NVIDIA may have screwed | ||
| 48 | * up the ctxprogs for some cards and have an invalid instruction | ||
| 49 | * rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction. | ||
| 50 | * - There's a number of places where context offset 0 (where we place | ||
| 51 | * the PRAMIN offset of the context) is loaded into either 0x408000, | ||
| 52 | * 0x408004 or 0x408008. Not sure what's up there either. | ||
| 53 | * - The ctxprogs for some cards save 0x400a00 again during the cleanup | ||
| 54 | * path for auto-loadctx. | ||
| 55 | */ | ||
| 56 | |||
| 57 | #define CP_FLAG_CLEAR 0 | ||
| 58 | #define CP_FLAG_SET 1 | ||
| 59 | #define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0) | ||
| 60 | #define CP_FLAG_SWAP_DIRECTION_LOAD 0 | ||
| 61 | #define CP_FLAG_SWAP_DIRECTION_SAVE 1 | ||
| 62 | #define CP_FLAG_USER_SAVE ((0 * 32) + 5) | ||
| 63 | #define CP_FLAG_USER_SAVE_NOT_PENDING 0 | ||
| 64 | #define CP_FLAG_USER_SAVE_PENDING 1 | ||
| 65 | #define CP_FLAG_USER_LOAD ((0 * 32) + 6) | ||
| 66 | #define CP_FLAG_USER_LOAD_NOT_PENDING 0 | ||
| 67 | #define CP_FLAG_USER_LOAD_PENDING 1 | ||
| 68 | #define CP_FLAG_STATUS ((3 * 32) + 0) | ||
| 69 | #define CP_FLAG_STATUS_IDLE 0 | ||
| 70 | #define CP_FLAG_STATUS_BUSY 1 | ||
| 71 | #define CP_FLAG_AUTO_SAVE ((3 * 32) + 4) | ||
| 72 | #define CP_FLAG_AUTO_SAVE_NOT_PENDING 0 | ||
| 73 | #define CP_FLAG_AUTO_SAVE_PENDING 1 | ||
| 74 | #define CP_FLAG_AUTO_LOAD ((3 * 32) + 5) | ||
| 75 | #define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 | ||
| 76 | #define CP_FLAG_AUTO_LOAD_PENDING 1 | ||
| 77 | #define CP_FLAG_UNK54 ((3 * 32) + 6) | ||
| 78 | #define CP_FLAG_UNK54_CLEAR 0 | ||
| 79 | #define CP_FLAG_UNK54_SET 1 | ||
| 80 | #define CP_FLAG_ALWAYS ((3 * 32) + 8) | ||
| 81 | #define CP_FLAG_ALWAYS_FALSE 0 | ||
| 82 | #define CP_FLAG_ALWAYS_TRUE 1 | ||
| 83 | #define CP_FLAG_UNK57 ((3 * 32) + 9) | ||
| 84 | #define CP_FLAG_UNK57_CLEAR 0 | ||
| 85 | #define CP_FLAG_UNK57_SET 1 | ||
| 86 | |||
| 87 | #define CP_CTX 0x00100000 | ||
| 88 | #define CP_CTX_COUNT 0x000fc000 | ||
| 89 | #define CP_CTX_COUNT_SHIFT 14 | ||
| 90 | #define CP_CTX_REG 0x00003fff | ||
| 91 | #define CP_LOAD_SR 0x00200000 | ||
| 92 | #define CP_LOAD_SR_VALUE 0x000fffff | ||
| 93 | #define CP_BRA 0x00400000 | ||
| 94 | #define CP_BRA_IP 0x0000ff00 | ||
| 95 | #define CP_BRA_IP_SHIFT 8 | ||
| 96 | #define CP_BRA_IF_CLEAR 0x00000080 | ||
| 97 | #define CP_BRA_FLAG 0x0000007f | ||
| 98 | #define CP_WAIT 0x00500000 | ||
| 99 | #define CP_WAIT_SET 0x00000080 | ||
| 100 | #define CP_WAIT_FLAG 0x0000007f | ||
| 101 | #define CP_SET 0x00700000 | ||
| 102 | #define CP_SET_1 0x00000080 | ||
| 103 | #define CP_SET_FLAG 0x0000007f | ||
| 104 | #define CP_NEXT_TO_SWAP 0x00600007 | ||
| 105 | #define CP_NEXT_TO_CURRENT 0x00600009 | ||
| 106 | #define CP_SET_CONTEXT_POINTER 0x0060000a | ||
| 107 | #define CP_END 0x0060000e | ||
| 108 | #define CP_LOAD_MAGIC_UNK01 0x00800001 /* unknown */ | ||
| 109 | #define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */ | ||
| 110 | #define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */ | ||
| 111 | |||
| 112 | #include "drmP.h" | ||
| 113 | #include "nouveau_drv.h" | ||
| 114 | #include "nouveau_grctx.h" | ||
| 115 | |||
| 116 | /* TODO: | ||
| 117 | * - get vs count from 0x1540 | ||
| 118 | * - document unimplemented bits compared to nvidia | ||
| 119 | * - nsource handling | ||
| 120 | * - R0 & 0x0200 handling | ||
| 121 | * - single-vs handling | ||
| 122 | * - 400314 bit 0 | ||
| 123 | */ | ||
| 124 | |||
| 125 | static int | ||
| 126 | nv40_graph_4097(struct drm_device *dev) | ||
| 127 | { | ||
| 128 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 129 | |||
| 130 | if ((dev_priv->chipset & 0xf0) == 0x60) | ||
| 131 | return 0; | ||
| 132 | |||
| 133 | return !!(0x0baf & (1 << dev_priv->chipset)); | ||
| 134 | } | ||
| 135 | |||
| 136 | static int | ||
| 137 | nv40_graph_vs_count(struct drm_device *dev) | ||
| 138 | { | ||
| 139 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 140 | |||
| 141 | switch (dev_priv->chipset) { | ||
| 142 | case 0x47: | ||
| 143 | case 0x49: | ||
| 144 | case 0x4b: | ||
| 145 | return 8; | ||
| 146 | case 0x40: | ||
| 147 | return 6; | ||
| 148 | case 0x41: | ||
| 149 | case 0x42: | ||
| 150 | return 5; | ||
| 151 | case 0x43: | ||
| 152 | case 0x44: | ||
| 153 | case 0x46: | ||
| 154 | case 0x4a: | ||
| 155 | return 3; | ||
| 156 | case 0x4c: | ||
| 157 | case 0x4e: | ||
| 158 | case 0x67: | ||
| 159 | default: | ||
| 160 | return 1; | ||
| 161 | } | ||
| 162 | } | ||
| 163 | |||
| 164 | |||
| 165 | enum cp_label { | ||
| 166 | cp_check_load = 1, | ||
| 167 | cp_setup_auto_load, | ||
| 168 | cp_setup_load, | ||
| 169 | cp_setup_save, | ||
| 170 | cp_swap_state, | ||
| 171 | cp_swap_state3d_3_is_save, | ||
| 172 | cp_prepare_exit, | ||
| 173 | cp_exit, | ||
| 174 | }; | ||
| 175 | |||
| 176 | static void | ||
| 177 | nv40_graph_construct_general(struct nouveau_grctx *ctx) | ||
| 178 | { | ||
| 179 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
| 180 | int i; | ||
| 181 | |||
| 182 | cp_ctx(ctx, 0x4000a4, 1); | ||
| 183 | gr_def(ctx, 0x4000a4, 0x00000008); | ||
| 184 | cp_ctx(ctx, 0x400144, 58); | ||
| 185 | gr_def(ctx, 0x400144, 0x00000001); | ||
| 186 | cp_ctx(ctx, 0x400314, 1); | ||
| 187 | gr_def(ctx, 0x400314, 0x00000000); | ||
| 188 | cp_ctx(ctx, 0x400400, 10); | ||
| 189 | cp_ctx(ctx, 0x400480, 10); | ||
| 190 | cp_ctx(ctx, 0x400500, 19); | ||
| 191 | gr_def(ctx, 0x400514, 0x00040000); | ||
| 192 | gr_def(ctx, 0x400524, 0x55555555); | ||
| 193 | gr_def(ctx, 0x400528, 0x55555555); | ||
| 194 | gr_def(ctx, 0x40052c, 0x55555555); | ||
| 195 | gr_def(ctx, 0x400530, 0x55555555); | ||
| 196 | cp_ctx(ctx, 0x400560, 6); | ||
| 197 | gr_def(ctx, 0x400568, 0x0000ffff); | ||
| 198 | gr_def(ctx, 0x40056c, 0x0000ffff); | ||
| 199 | cp_ctx(ctx, 0x40057c, 5); | ||
| 200 | cp_ctx(ctx, 0x400710, 3); | ||
| 201 | gr_def(ctx, 0x400710, 0x20010001); | ||
| 202 | gr_def(ctx, 0x400714, 0x0f73ef00); | ||
| 203 | cp_ctx(ctx, 0x400724, 1); | ||
| 204 | gr_def(ctx, 0x400724, 0x02008821); | ||
| 205 | cp_ctx(ctx, 0x400770, 3); | ||
| 206 | if (dev_priv->chipset == 0x40) { | ||
| 207 | cp_ctx(ctx, 0x400814, 4); | ||
| 208 | cp_ctx(ctx, 0x400828, 5); | ||
| 209 | cp_ctx(ctx, 0x400840, 5); | ||
| 210 | gr_def(ctx, 0x400850, 0x00000040); | ||
| 211 | cp_ctx(ctx, 0x400858, 4); | ||
| 212 | gr_def(ctx, 0x400858, 0x00000040); | ||
| 213 | gr_def(ctx, 0x40085c, 0x00000040); | ||
| 214 | gr_def(ctx, 0x400864, 0x80000000); | ||
| 215 | cp_ctx(ctx, 0x40086c, 9); | ||
| 216 | gr_def(ctx, 0x40086c, 0x80000000); | ||
| 217 | gr_def(ctx, 0x400870, 0x80000000); | ||
| 218 | gr_def(ctx, 0x400874, 0x80000000); | ||
| 219 | gr_def(ctx, 0x400878, 0x80000000); | ||
| 220 | gr_def(ctx, 0x400888, 0x00000040); | ||
| 221 | gr_def(ctx, 0x40088c, 0x80000000); | ||
| 222 | cp_ctx(ctx, 0x4009c0, 8); | ||
| 223 | gr_def(ctx, 0x4009cc, 0x80000000); | ||
| 224 | gr_def(ctx, 0x4009dc, 0x80000000); | ||
| 225 | } else { | ||
| 226 | cp_ctx(ctx, 0x400840, 20); | ||
| 227 | if (!nv40_graph_4097(ctx->dev)) { | ||
| 228 | for (i = 0; i < 8; i++) | ||
| 229 | gr_def(ctx, 0x400860 + (i * 4), 0x00000001); | ||
| 230 | } | ||
| 231 | gr_def(ctx, 0x400880, 0x00000040); | ||
| 232 | gr_def(ctx, 0x400884, 0x00000040); | ||
| 233 | gr_def(ctx, 0x400888, 0x00000040); | ||
| 234 | cp_ctx(ctx, 0x400894, 11); | ||
| 235 | gr_def(ctx, 0x400894, 0x00000040); | ||
| 236 | if (nv40_graph_4097(ctx->dev)) { | ||
| 237 | for (i = 0; i < 8; i++) | ||
| 238 | gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); | ||
| 239 | } | ||
| 240 | cp_ctx(ctx, 0x4008e0, 2); | ||
| 241 | cp_ctx(ctx, 0x4008f8, 2); | ||
| 242 | if (dev_priv->chipset == 0x4c || | ||
| 243 | (dev_priv->chipset & 0xf0) == 0x60) | ||
| 244 | cp_ctx(ctx, 0x4009f8, 1); | ||
| 245 | } | ||
| 246 | cp_ctx(ctx, 0x400a00, 73); | ||
| 247 | gr_def(ctx, 0x400b0c, 0x0b0b0b0c); | ||
| 248 | cp_ctx(ctx, 0x401000, 4); | ||
| 249 | cp_ctx(ctx, 0x405004, 1); | ||
| 250 | switch (dev_priv->chipset) { | ||
| 251 | case 0x47: | ||
| 252 | case 0x49: | ||
| 253 | case 0x4b: | ||
| 254 | cp_ctx(ctx, 0x403448, 1); | ||
| 255 | gr_def(ctx, 0x403448, 0x00001010); | ||
| 256 | break; | ||
| 257 | default: | ||
| 258 | cp_ctx(ctx, 0x403440, 1); | ||
| 259 | switch (dev_priv->chipset) { | ||
| 260 | case 0x40: | ||
| 261 | gr_def(ctx, 0x403440, 0x00000010); | ||
| 262 | break; | ||
| 263 | case 0x44: | ||
| 264 | case 0x46: | ||
| 265 | case 0x4a: | ||
| 266 | gr_def(ctx, 0x403440, 0x00003010); | ||
| 267 | break; | ||
| 268 | case 0x41: | ||
| 269 | case 0x42: | ||
| 270 | case 0x43: | ||
| 271 | case 0x4c: | ||
| 272 | case 0x4e: | ||
| 273 | case 0x67: | ||
| 274 | default: | ||
| 275 | gr_def(ctx, 0x403440, 0x00001010); | ||
| 276 | break; | ||
| 277 | } | ||
| 278 | break; | ||
| 279 | } | ||
| 280 | } | ||
| 281 | |||
| 282 | static void | ||
| 283 | nv40_graph_construct_state3d(struct nouveau_grctx *ctx) | ||
| 284 | { | ||
| 285 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
| 286 | int i; | ||
| 287 | |||
| 288 | if (dev_priv->chipset == 0x40) { | ||
| 289 | cp_ctx(ctx, 0x401880, 51); | ||
| 290 | gr_def(ctx, 0x401940, 0x00000100); | ||
| 291 | } else | ||
| 292 | if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 || | ||
| 293 | dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) { | ||
| 294 | cp_ctx(ctx, 0x401880, 32); | ||
| 295 | for (i = 0; i < 16; i++) | ||
| 296 | gr_def(ctx, 0x401880 + (i * 4), 0x00000111); | ||
| 297 | if (dev_priv->chipset == 0x46) | ||
| 298 | cp_ctx(ctx, 0x401900, 16); | ||
| 299 | cp_ctx(ctx, 0x401940, 3); | ||
| 300 | } | ||
| 301 | cp_ctx(ctx, 0x40194c, 18); | ||
| 302 | gr_def(ctx, 0x401954, 0x00000111); | ||
| 303 | gr_def(ctx, 0x401958, 0x00080060); | ||
| 304 | gr_def(ctx, 0x401974, 0x00000080); | ||
| 305 | gr_def(ctx, 0x401978, 0xffff0000); | ||
| 306 | gr_def(ctx, 0x40197c, 0x00000001); | ||
| 307 | gr_def(ctx, 0x401990, 0x46400000); | ||
| 308 | if (dev_priv->chipset == 0x40) { | ||
| 309 | cp_ctx(ctx, 0x4019a0, 2); | ||
| 310 | cp_ctx(ctx, 0x4019ac, 5); | ||
| 311 | } else { | ||
| 312 | cp_ctx(ctx, 0x4019a0, 1); | ||
| 313 | cp_ctx(ctx, 0x4019b4, 3); | ||
| 314 | } | ||
| 315 | gr_def(ctx, 0x4019bc, 0xffff0000); | ||
| 316 | switch (dev_priv->chipset) { | ||
| 317 | case 0x46: | ||
| 318 | case 0x47: | ||
| 319 | case 0x49: | ||
| 320 | case 0x4b: | ||
| 321 | cp_ctx(ctx, 0x4019c0, 18); | ||
| 322 | for (i = 0; i < 16; i++) | ||
| 323 | gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888); | ||
| 324 | break; | ||
| 325 | } | ||
| 326 | cp_ctx(ctx, 0x401a08, 8); | ||
| 327 | gr_def(ctx, 0x401a10, 0x0fff0000); | ||
| 328 | gr_def(ctx, 0x401a14, 0x0fff0000); | ||
| 329 | gr_def(ctx, 0x401a1c, 0x00011100); | ||
| 330 | cp_ctx(ctx, 0x401a2c, 4); | ||
| 331 | cp_ctx(ctx, 0x401a44, 26); | ||
| 332 | for (i = 0; i < 16; i++) | ||
| 333 | gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000); | ||
| 334 | gr_def(ctx, 0x401a8c, 0x4b7fffff); | ||
| 335 | if (dev_priv->chipset == 0x40) { | ||
| 336 | cp_ctx(ctx, 0x401ab8, 3); | ||
| 337 | } else { | ||
| 338 | cp_ctx(ctx, 0x401ab8, 1); | ||
| 339 | cp_ctx(ctx, 0x401ac0, 1); | ||
| 340 | } | ||
| 341 | cp_ctx(ctx, 0x401ad0, 8); | ||
| 342 | gr_def(ctx, 0x401ad0, 0x30201000); | ||
| 343 | gr_def(ctx, 0x401ad4, 0x70605040); | ||
| 344 | gr_def(ctx, 0x401ad8, 0xb8a89888); | ||
| 345 | gr_def(ctx, 0x401adc, 0xf8e8d8c8); | ||
| 346 | cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1); | ||
| 347 | gr_def(ctx, 0x401b10, 0x40100000); | ||
| 348 | cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5); | ||
| 349 | gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ? | ||
| 350 | 0x00000004 : 0x00000000); | ||
| 351 | cp_ctx(ctx, 0x401b30, 25); | ||
| 352 | gr_def(ctx, 0x401b34, 0x0000ffff); | ||
| 353 | gr_def(ctx, 0x401b68, 0x435185d6); | ||
| 354 | gr_def(ctx, 0x401b6c, 0x2155b699); | ||
| 355 | gr_def(ctx, 0x401b70, 0xfedcba98); | ||
| 356 | gr_def(ctx, 0x401b74, 0x00000098); | ||
| 357 | gr_def(ctx, 0x401b84, 0xffffffff); | ||
| 358 | gr_def(ctx, 0x401b88, 0x00ff7000); | ||
| 359 | gr_def(ctx, 0x401b8c, 0x0000ffff); | ||
| 360 | if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a && | ||
| 361 | dev_priv->chipset != 0x4e) | ||
| 362 | cp_ctx(ctx, 0x401b94, 1); | ||
| 363 | cp_ctx(ctx, 0x401b98, 8); | ||
| 364 | gr_def(ctx, 0x401b9c, 0x00ff0000); | ||
| 365 | cp_ctx(ctx, 0x401bc0, 9); | ||
| 366 | gr_def(ctx, 0x401be0, 0x00ffff00); | ||
| 367 | cp_ctx(ctx, 0x401c00, 192); | ||
| 368 | for (i = 0; i < 16; i++) { /* fragment texture units */ | ||
| 369 | gr_def(ctx, 0x401c40 + (i * 4), 0x00018488); | ||
| 370 | gr_def(ctx, 0x401c80 + (i * 4), 0x00028202); | ||
| 371 | gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4); | ||
| 372 | gr_def(ctx, 0x401d40 + (i * 4), 0x01012000); | ||
| 373 | gr_def(ctx, 0x401d80 + (i * 4), 0x00080008); | ||
| 374 | gr_def(ctx, 0x401e00 + (i * 4), 0x00100008); | ||
| 375 | } | ||
| 376 | for (i = 0; i < 4; i++) { /* vertex texture units */ | ||
| 377 | gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80); | ||
| 378 | gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202); | ||
| 379 | gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008); | ||
| 380 | gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008); | ||
| 381 | } | ||
| 382 | cp_ctx(ctx, 0x400f5c, 3); | ||
| 383 | gr_def(ctx, 0x400f5c, 0x00000002); | ||
| 384 | cp_ctx(ctx, 0x400f84, 1); | ||
| 385 | } | ||
| 386 | |||
| 387 | static void | ||
| 388 | nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx) | ||
| 389 | { | ||
| 390 | struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; | ||
| 391 | int i; | ||
| 392 | |||
| 393 | cp_ctx(ctx, 0x402000, 1); | ||
| 394 | cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2); | ||
| 395 | switch (dev_priv->chipset) { | ||
| 396 | case 0x40: | ||
| 397 | gr_def(ctx, 0x402404, 0x00000001); | ||
| 398 | break; | ||
| 399 | case 0x4c: | ||
| 400 | case 0x4e: | ||
| 401 | case 0x67: | ||
| 402 | gr_def(ctx, 0x402404, 0x00000020); | ||
| 403 | break; | ||
| 404 | case 0x46: | ||
| 405 | case 0x49: | ||
| 406 | case 0x4b: | ||
| 407 | gr_def(ctx, 0x402404, 0x00000421); | ||
| 408 | break; | ||
| 409 | default: | ||
| 410 | gr_def(ctx, 0x402404, 0x00000021); | ||
| 411 | } | ||
| 412 | if (dev_priv->chipset != 0x40) | ||
| 413 | gr_def(ctx, 0x402408, 0x030c30c3); | ||
| 414 | switch (dev_priv->chipset) { | ||
| 415 | case 0x44: | ||
| 416 | case 0x46: | ||
| 417 | case 0x4a: | ||
| 418 | case 0x4c: | ||
| 419 | case 0x4e: | ||
| 420 | case 0x67: | ||
| 421 | cp_ctx(ctx, 0x402440, 1); | ||
| 422 | gr_def(ctx, 0x402440, 0x00011001); | ||
| 423 | break; | ||
| 424 | default: | ||
| 425 | break; | ||
| 426 | } | ||
| 427 | cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9); | ||
| 428 | gr_def(ctx, 0x402488, 0x3e020200); | ||
| 429 | gr_def(ctx, 0x40248c, 0x00ffffff); | ||
| 430 | switch (dev_priv->chipset) { | ||
| 431 | case 0x40: | ||
| 432 | gr_def(ctx, 0x402490, 0x60103f00); | ||
| 433 | break; | ||
| 434 | case 0x47: | ||
| 435 | gr_def(ctx, 0x402490, 0x40103f00); | ||
| 436 | break; | ||
| 437 | case 0x41: | ||
| 438 | case 0x42: | ||
| 439 | case 0x49: | ||
| 440 | case 0x4b: | ||
| 441 | gr_def(ctx, 0x402490, 0x20103f00); | ||
| 442 | break; | ||
| 443 | default: | ||
| 444 | gr_def(ctx, 0x402490, 0x0c103f00); | ||
| 445 | break; | ||
| 446 | } | ||
| 447 | gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ? | ||
| 448 | 0x00020000 : 0x00040000); | ||
| 449 | cp_ctx(ctx, 0x402500, 31); | ||
| 450 | gr_def(ctx, 0x402530, 0x00008100); | ||
| 451 | if (dev_priv->chipset == 0x40) | ||
| 452 | cp_ctx(ctx, 0x40257c, 6); | ||
| 453 | cp_ctx(ctx, 0x402594, 16); | ||
| 454 | cp_ctx(ctx, 0x402800, 17); | ||
| 455 | gr_def(ctx, 0x402800, 0x00000001); | ||
| 456 | switch (dev_priv->chipset) { | ||
| 457 | case 0x47: | ||
| 458 | case 0x49: | ||
| 459 | case 0x4b: | ||
| 460 | cp_ctx(ctx, 0x402864, 1); | ||
| 461 | gr_def(ctx, 0x402864, 0x00001001); | ||
| 462 | cp_ctx(ctx, 0x402870, 3); | ||
| 463 | gr_def(ctx, 0x402878, 0x00000003); | ||
| 464 | if (dev_priv->chipset != 0x47) { /* belong at end!! */ | ||
| 465 | cp_ctx(ctx, 0x402900, 1); | ||
| 466 | cp_ctx(ctx, 0x402940, 1); | ||
| 467 | cp_ctx(ctx, 0x402980, 1); | ||
| 468 | cp_ctx(ctx, 0x4029c0, 1); | ||
| 469 | cp_ctx(ctx, 0x402a00, 1); | ||
| 470 | cp_ctx(ctx, 0x402a40, 1); | ||
| 471 | cp_ctx(ctx, 0x402a80, 1); | ||
| 472 | cp_ctx(ctx, 0x402ac0, 1); | ||
| 473 | } | ||
| 474 | break; | ||
| 475 | case 0x40: | ||
| 476 | cp_ctx(ctx, 0x402844, 1); | ||
| 477 | gr_def(ctx, 0x402844, 0x00000001); | ||
| 478 | cp_ctx(ctx, 0x402850, 1); | ||
| 479 | break; | ||
| 480 | default: | ||
| 481 | cp_ctx(ctx, 0x402844, 1); | ||
| 482 | gr_def(ctx, 0x402844, 0x00001001); | ||
| 483 | cp_ctx(ctx, 0x402850, 2); | ||
| 484 | gr_def(ctx, 0x402854, 0x00000003); | ||
| 485 | break; | ||
| 486 | } | ||
| 487 | |||
| 488 | cp_ctx(ctx, 0x402c00, 4); | ||
| 489 | gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ? | ||
| 490 | 0x80800001 : 0x00888001); | ||
| 491 | switch (dev_priv->chipset) { | ||
| 492 | case 0x47: | ||
| 493 | case 0x49: | ||
| 494 | case 0x4b: | ||
| 495 | cp_ctx(ctx, 0x402c20, 40); | ||
| 496 | for (i = 0; i < 32; i++) | ||
| 497 | gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff); | ||
| 498 | cp_ctx(ctx, 0x4030b8, 13); | ||
| 499 | gr_def(ctx, 0x4030dc, 0x00000005); | ||
| 500 | gr_def(ctx, 0x4030e8, 0x0000ffff); | ||
| 501 | break; | ||
| 502 | default: | ||
| 503 | cp_ctx(ctx, 0x402c10, 4); | ||
| 504 | if (dev_priv->chipset == 0x40) | ||
| 505 | cp_ctx(ctx, 0x402c20, 36); | ||
| 506 | else | ||
| 507 | if (dev_priv->chipset <= 0x42) | ||
| 508 | cp_ctx(ctx, 0x402c20, 24); | ||
| 509 | else | ||
| 510 | if (dev_priv->chipset <= 0x4a) | ||
| 511 | cp_ctx(ctx, 0x402c20, 16); | ||
| 512 | else | ||
| 513 | cp_ctx(ctx, 0x402c20, 8); | ||
| 514 | cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13); | ||
| 515 | gr_def(ctx, 0x402cd4, 0x00000005); | ||
| 516 | if (dev_priv->chipset != 0x40) | ||
| 517 | gr_def(ctx, 0x402ce0, 0x0000ffff); | ||
| 518 | break; | ||
| 519 | } | ||
| 520 | |||
| 521 | cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3); | ||
| 522 | cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3); | ||
| 523 | cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev)); | ||
| 524 | for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++) | ||
| 525 | gr_def(ctx, 0x403420 + (i * 4), 0x00005555); | ||
| 526 | |||
| 527 | if (dev_priv->chipset != 0x40) { | ||
| 528 | cp_ctx(ctx, 0x403600, 1); | ||
| 529 | gr_def(ctx, 0x403600, 0x00000001); | ||
| 530 | } | ||
| 531 | cp_ctx(ctx, 0x403800, 1); | ||
| 532 | |||
| 533 | cp_ctx(ctx, 0x403c18, 1); | ||
| 534 | gr_def(ctx, 0x403c18, 0x00000001); | ||
| 535 | switch (dev_priv->chipset) { | ||
| 536 | case 0x46: | ||
| 537 | case 0x47: | ||
| 538 | case 0x49: | ||
| 539 | case 0x4b: | ||
| 540 | cp_ctx(ctx, 0x405018, 1); | ||
| 541 | gr_def(ctx, 0x405018, 0x08e00001); | ||
| 542 | cp_ctx(ctx, 0x405c24, 1); | ||
| 543 | gr_def(ctx, 0x405c24, 0x000e3000); | ||
| 544 | break; | ||
| 545 | } | ||
| 546 | if (dev_priv->chipset != 0x4e) | ||
| 547 | cp_ctx(ctx, 0x405800, 11); | ||
| 548 | cp_ctx(ctx, 0x407000, 1); | ||
| 549 | } | ||
| 550 | |||
| 551 | static void | ||
| 552 | nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx) | ||
| 553 | { | ||
| 554 | int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084; | ||
| 555 | |||
| 556 | cp_out (ctx, 0x300000); | ||
| 557 | cp_lsr (ctx, len - 4); | ||
| 558 | cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save); | ||
| 559 | cp_lsr (ctx, len); | ||
| 560 | cp_name(ctx, cp_swap_state3d_3_is_save); | ||
| 561 | cp_out (ctx, 0x800001); | ||
| 562 | |||
| 563 | ctx->ctxvals_pos += len; | ||
| 564 | } | ||
| 565 | |||
| 566 | static void | ||
| 567 | nv40_graph_construct_shader(struct nouveau_grctx *ctx) | ||
| 568 | { | ||
| 569 | struct drm_device *dev = ctx->dev; | ||
| 570 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 571 | struct nouveau_gpuobj *obj = ctx->data; | ||
| 572 | int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset; | ||
| 573 | int offset, i; | ||
| 574 | |||
| 575 | vs_nr = nv40_graph_vs_count(ctx->dev); | ||
| 576 | vs_nr_b0 = 363; | ||
| 577 | vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64; | ||
| 578 | if (dev_priv->chipset == 0x40) { | ||
| 579 | b0_offset = 0x2200/4; /* 33a0 */ | ||
| 580 | b1_offset = 0x55a0/4; /* 1500 */ | ||
| 581 | vs_len = 0x6aa0/4; | ||
| 582 | } else | ||
| 583 | if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) { | ||
| 584 | b0_offset = 0x2200/4; /* 2200 */ | ||
| 585 | b1_offset = 0x4400/4; /* 0b00 */ | ||
| 586 | vs_len = 0x4f00/4; | ||
| 587 | } else { | ||
| 588 | b0_offset = 0x1d40/4; /* 2200 */ | ||
| 589 | b1_offset = 0x3f40/4; /* 0b00 : 0a40 */ | ||
| 590 | vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4; | ||
| 591 | } | ||
| 592 | |||
| 593 | cp_lsr(ctx, vs_len * vs_nr + 0x300/4); | ||
| 594 | cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029); | ||
| 595 | |||
| 596 | offset = ctx->ctxvals_pos; | ||
| 597 | ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); | ||
| 598 | |||
| 599 | if (ctx->mode != NOUVEAU_GRCTX_VALS) | ||
| 600 | return; | ||
| 601 | |||
| 602 | offset += 0x0280/4; | ||
| 603 | for (i = 0; i < 16; i++, offset += 2) | ||
| 604 | nv_wo32(dev, obj, offset, 0x3f800000); | ||
| 605 | |||
| 606 | for (vs = 0; vs < vs_nr; vs++, offset += vs_len) { | ||
| 607 | for (i = 0; i < vs_nr_b0 * 6; i += 6) | ||
| 608 | nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001); | ||
| 609 | for (i = 0; i < vs_nr_b1 * 4; i += 4) | ||
| 610 | nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000); | ||
| 611 | } | ||
| 612 | } | ||
| 613 | |||
| 614 | void | ||
| 615 | nv40_grctx_init(struct nouveau_grctx *ctx) | ||
| 616 | { | ||
| 617 | /* decide whether we're loading/unloading the context */ | ||
| 618 | cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); | ||
| 619 | cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save); | ||
| 620 | |||
| 621 | cp_name(ctx, cp_check_load); | ||
| 622 | cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load); | ||
| 623 | cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load); | ||
| 624 | cp_bra (ctx, ALWAYS, TRUE, cp_exit); | ||
| 625 | |||
| 626 | /* setup for context load */ | ||
| 627 | cp_name(ctx, cp_setup_auto_load); | ||
| 628 | cp_wait(ctx, STATUS, IDLE); | ||
| 629 | cp_out (ctx, CP_NEXT_TO_SWAP); | ||
| 630 | cp_name(ctx, cp_setup_load); | ||
| 631 | cp_wait(ctx, STATUS, IDLE); | ||
| 632 | cp_set (ctx, SWAP_DIRECTION, LOAD); | ||
| 633 | cp_out (ctx, 0x00910880); /* ?? */ | ||
| 634 | cp_out (ctx, 0x00901ffe); /* ?? */ | ||
| 635 | cp_out (ctx, 0x01940000); /* ?? */ | ||
| 636 | cp_lsr (ctx, 0x20); | ||
| 637 | cp_out (ctx, 0x0060000b); /* ?? */ | ||
| 638 | cp_wait(ctx, UNK57, CLEAR); | ||
| 639 | cp_out (ctx, 0x0060000c); /* ?? */ | ||
| 640 | cp_bra (ctx, ALWAYS, TRUE, cp_swap_state); | ||
| 641 | |||
| 642 | /* setup for context save */ | ||
| 643 | cp_name(ctx, cp_setup_save); | ||
| 644 | cp_set (ctx, SWAP_DIRECTION, SAVE); | ||
| 645 | |||
| 646 | /* general PGRAPH state */ | ||
| 647 | cp_name(ctx, cp_swap_state); | ||
| 648 | cp_pos (ctx, 0x00020/4); | ||
| 649 | nv40_graph_construct_general(ctx); | ||
| 650 | cp_wait(ctx, STATUS, IDLE); | ||
| 651 | |||
| 652 | /* 3D state, block 1 */ | ||
| 653 | cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit); | ||
| 654 | nv40_graph_construct_state3d(ctx); | ||
| 655 | cp_wait(ctx, STATUS, IDLE); | ||
| 656 | |||
| 657 | /* 3D state, block 2 */ | ||
| 658 | nv40_graph_construct_state3d_2(ctx); | ||
| 659 | |||
| 660 | /* Some other block of "random" state */ | ||
| 661 | nv40_graph_construct_state3d_3(ctx); | ||
| 662 | |||
| 663 | /* Per-vertex shader state */ | ||
| 664 | cp_pos (ctx, ctx->ctxvals_pos); | ||
| 665 | nv40_graph_construct_shader(ctx); | ||
| 666 | |||
| 667 | /* pre-exit state updates */ | ||
| 668 | cp_name(ctx, cp_prepare_exit); | ||
| 669 | cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load); | ||
| 670 | cp_bra (ctx, USER_SAVE, PENDING, cp_exit); | ||
| 671 | cp_out (ctx, CP_NEXT_TO_CURRENT); | ||
| 672 | |||
| 673 | cp_name(ctx, cp_exit); | ||
| 674 | cp_set (ctx, USER_SAVE, NOT_PENDING); | ||
| 675 | cp_set (ctx, USER_LOAD, NOT_PENDING); | ||
| 676 | cp_out (ctx, CP_END); | ||
| 677 | } | ||
| 678 | |||
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c index f8e28a1e44e7..118d3285fd8c 100644 --- a/drivers/gpu/drm/nouveau/nv50_crtc.c +++ b/drivers/gpu/drm/nouveau/nv50_crtc.c | |||
| @@ -45,7 +45,7 @@ nv50_crtc_lut_load(struct drm_crtc *crtc) | |||
| 45 | void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); | 45 | void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo); |
| 46 | int i; | 46 | int i; |
| 47 | 47 | ||
| 48 | NV_DEBUG(crtc->dev, "\n"); | 48 | NV_DEBUG_KMS(crtc->dev, "\n"); |
| 49 | 49 | ||
| 50 | for (i = 0; i < 256; i++) { | 50 | for (i = 0; i < 256; i++) { |
| 51 | writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0); | 51 | writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0); |
| @@ -68,8 +68,8 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked) | |||
| 68 | struct nouveau_channel *evo = dev_priv->evo; | 68 | struct nouveau_channel *evo = dev_priv->evo; |
| 69 | int index = nv_crtc->index, ret; | 69 | int index = nv_crtc->index, ret; |
| 70 | 70 | ||
| 71 | NV_DEBUG(dev, "index %d\n", nv_crtc->index); | 71 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); |
| 72 | NV_DEBUG(dev, "%s\n", blanked ? "blanked" : "unblanked"); | 72 | NV_DEBUG_KMS(dev, "%s\n", blanked ? "blanked" : "unblanked"); |
| 73 | 73 | ||
| 74 | if (blanked) { | 74 | if (blanked) { |
| 75 | nv_crtc->cursor.hide(nv_crtc, false); | 75 | nv_crtc->cursor.hide(nv_crtc, false); |
| @@ -139,7 +139,7 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update) | |||
| 139 | struct nouveau_channel *evo = dev_priv->evo; | 139 | struct nouveau_channel *evo = dev_priv->evo; |
| 140 | int ret; | 140 | int ret; |
| 141 | 141 | ||
| 142 | NV_DEBUG(dev, "\n"); | 142 | NV_DEBUG_KMS(dev, "\n"); |
| 143 | 143 | ||
| 144 | ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); | 144 | ret = RING_SPACE(evo, 2 + (update ? 2 : 0)); |
| 145 | if (ret) { | 145 | if (ret) { |
| @@ -193,7 +193,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update) | |||
| 193 | uint32_t outX, outY, horiz, vert; | 193 | uint32_t outX, outY, horiz, vert; |
| 194 | int ret; | 194 | int ret; |
| 195 | 195 | ||
| 196 | NV_DEBUG(dev, "\n"); | 196 | NV_DEBUG_KMS(dev, "\n"); |
| 197 | 197 | ||
| 198 | switch (scaling_mode) { | 198 | switch (scaling_mode) { |
| 199 | case DRM_MODE_SCALE_NONE: | 199 | case DRM_MODE_SCALE_NONE: |
| @@ -301,7 +301,7 @@ nv50_crtc_destroy(struct drm_crtc *crtc) | |||
| 301 | struct drm_device *dev = crtc->dev; | 301 | struct drm_device *dev = crtc->dev; |
| 302 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 302 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
| 303 | 303 | ||
| 304 | NV_DEBUG(dev, "\n"); | 304 | NV_DEBUG_KMS(dev, "\n"); |
| 305 | 305 | ||
| 306 | if (!crtc) | 306 | if (!crtc) |
| 307 | return; | 307 | return; |
| @@ -433,7 +433,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc) | |||
| 433 | struct drm_device *dev = crtc->dev; | 433 | struct drm_device *dev = crtc->dev; |
| 434 | struct drm_encoder *encoder; | 434 | struct drm_encoder *encoder; |
| 435 | 435 | ||
| 436 | NV_DEBUG(dev, "index %d\n", nv_crtc->index); | 436 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); |
| 437 | 437 | ||
| 438 | /* Disconnect all unused encoders. */ | 438 | /* Disconnect all unused encoders. */ |
| 439 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | 439 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { |
| @@ -458,7 +458,7 @@ nv50_crtc_commit(struct drm_crtc *crtc) | |||
| 458 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); | 458 | struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); |
| 459 | int ret; | 459 | int ret; |
| 460 | 460 | ||
| 461 | NV_DEBUG(dev, "index %d\n", nv_crtc->index); | 461 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); |
| 462 | 462 | ||
| 463 | nv50_crtc_blank(nv_crtc, false); | 463 | nv50_crtc_blank(nv_crtc, false); |
| 464 | 464 | ||
| @@ -497,7 +497,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y, | |||
| 497 | struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); | 497 | struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb); |
| 498 | int ret, format; | 498 | int ret, format; |
| 499 | 499 | ||
| 500 | NV_DEBUG(dev, "index %d\n", nv_crtc->index); | 500 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); |
| 501 | 501 | ||
| 502 | switch (drm_fb->depth) { | 502 | switch (drm_fb->depth) { |
| 503 | case 8: | 503 | case 8: |
| @@ -612,7 +612,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
| 612 | 612 | ||
| 613 | *nv_crtc->mode = *adjusted_mode; | 613 | *nv_crtc->mode = *adjusted_mode; |
| 614 | 614 | ||
| 615 | NV_DEBUG(dev, "index %d\n", nv_crtc->index); | 615 | NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index); |
| 616 | 616 | ||
| 617 | hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start; | 617 | hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start; |
| 618 | vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start; | 618 | vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start; |
| @@ -706,7 +706,7 @@ nv50_crtc_create(struct drm_device *dev, int index) | |||
| 706 | struct nouveau_crtc *nv_crtc = NULL; | 706 | struct nouveau_crtc *nv_crtc = NULL; |
| 707 | int ret, i; | 707 | int ret, i; |
| 708 | 708 | ||
| 709 | NV_DEBUG(dev, "\n"); | 709 | NV_DEBUG_KMS(dev, "\n"); |
| 710 | 710 | ||
| 711 | nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); | 711 | nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL); |
| 712 | if (!nv_crtc) | 712 | if (!nv_crtc) |
diff --git a/drivers/gpu/drm/nouveau/nv50_cursor.c b/drivers/gpu/drm/nouveau/nv50_cursor.c index e2e79a8f220d..753e723adb3a 100644 --- a/drivers/gpu/drm/nouveau/nv50_cursor.c +++ b/drivers/gpu/drm/nouveau/nv50_cursor.c | |||
| @@ -41,7 +41,7 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update) | |||
| 41 | struct drm_device *dev = nv_crtc->base.dev; | 41 | struct drm_device *dev = nv_crtc->base.dev; |
| 42 | int ret; | 42 | int ret; |
| 43 | 43 | ||
| 44 | NV_DEBUG(dev, "\n"); | 44 | NV_DEBUG_KMS(dev, "\n"); |
| 45 | 45 | ||
| 46 | if (update && nv_crtc->cursor.visible) | 46 | if (update && nv_crtc->cursor.visible) |
| 47 | return; | 47 | return; |
| @@ -76,7 +76,7 @@ nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update) | |||
| 76 | struct drm_device *dev = nv_crtc->base.dev; | 76 | struct drm_device *dev = nv_crtc->base.dev; |
| 77 | int ret; | 77 | int ret; |
| 78 | 78 | ||
| 79 | NV_DEBUG(dev, "\n"); | 79 | NV_DEBUG_KMS(dev, "\n"); |
| 80 | 80 | ||
| 81 | if (update && !nv_crtc->cursor.visible) | 81 | if (update && !nv_crtc->cursor.visible) |
| 82 | return; | 82 | return; |
| @@ -116,7 +116,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y) | |||
| 116 | static void | 116 | static void |
| 117 | nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) | 117 | nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset) |
| 118 | { | 118 | { |
| 119 | NV_DEBUG(nv_crtc->base.dev, "\n"); | 119 | NV_DEBUG_KMS(nv_crtc->base.dev, "\n"); |
| 120 | if (offset == nv_crtc->cursor.offset) | 120 | if (offset == nv_crtc->cursor.offset) |
| 121 | return; | 121 | return; |
| 122 | 122 | ||
| @@ -143,7 +143,7 @@ nv50_cursor_fini(struct nouveau_crtc *nv_crtc) | |||
| 143 | struct drm_device *dev = nv_crtc->base.dev; | 143 | struct drm_device *dev = nv_crtc->base.dev; |
| 144 | int idx = nv_crtc->index; | 144 | int idx = nv_crtc->index; |
| 145 | 145 | ||
| 146 | NV_DEBUG(dev, "\n"); | 146 | NV_DEBUG_KMS(dev, "\n"); |
| 147 | 147 | ||
| 148 | nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0); | 148 | nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0); |
| 149 | if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), | 149 | if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), |
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c index fb5838e3be24..f08f042a8e10 100644 --- a/drivers/gpu/drm/nouveau/nv50_dac.c +++ b/drivers/gpu/drm/nouveau/nv50_dac.c | |||
| @@ -44,7 +44,7 @@ nv50_dac_disconnect(struct nouveau_encoder *nv_encoder) | |||
| 44 | struct nouveau_channel *evo = dev_priv->evo; | 44 | struct nouveau_channel *evo = dev_priv->evo; |
| 45 | int ret; | 45 | int ret; |
| 46 | 46 | ||
| 47 | NV_DEBUG(dev, "Disconnecting DAC %d\n", nv_encoder->or); | 47 | NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or); |
| 48 | 48 | ||
| 49 | ret = RING_SPACE(evo, 2); | 49 | ret = RING_SPACE(evo, 2); |
| 50 | if (ret) { | 50 | if (ret) { |
| @@ -81,11 +81,11 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) | |||
| 81 | /* Use bios provided value if possible. */ | 81 | /* Use bios provided value if possible. */ |
| 82 | if (dev_priv->vbios->dactestval) { | 82 | if (dev_priv->vbios->dactestval) { |
| 83 | load_pattern = dev_priv->vbios->dactestval; | 83 | load_pattern = dev_priv->vbios->dactestval; |
| 84 | NV_DEBUG(dev, "Using bios provided load_pattern of %d\n", | 84 | NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n", |
| 85 | load_pattern); | 85 | load_pattern); |
| 86 | } else { | 86 | } else { |
| 87 | load_pattern = 340; | 87 | load_pattern = 340; |
| 88 | NV_DEBUG(dev, "Using default load_pattern of %d\n", | 88 | NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n", |
| 89 | load_pattern); | 89 | load_pattern); |
| 90 | } | 90 | } |
| 91 | 91 | ||
| @@ -103,9 +103,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector) | |||
| 103 | status = connector_status_connected; | 103 | status = connector_status_connected; |
| 104 | 104 | ||
| 105 | if (status == connector_status_connected) | 105 | if (status == connector_status_connected) |
| 106 | NV_DEBUG(dev, "Load was detected on output with or %d\n", or); | 106 | NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or); |
| 107 | else | 107 | else |
| 108 | NV_DEBUG(dev, "Load was not detected on output with or %d\n", or); | 108 | NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or); |
| 109 | 109 | ||
| 110 | return status; | 110 | return status; |
| 111 | } | 111 | } |
| @@ -118,7 +118,7 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode) | |||
| 118 | uint32_t val; | 118 | uint32_t val; |
| 119 | int or = nv_encoder->or; | 119 | int or = nv_encoder->or; |
| 120 | 120 | ||
| 121 | NV_DEBUG(dev, "or %d mode %d\n", or, mode); | 121 | NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); |
| 122 | 122 | ||
| 123 | /* wait for it to be done */ | 123 | /* wait for it to be done */ |
| 124 | if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or), | 124 | if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or), |
| @@ -173,7 +173,7 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 173 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 173 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
| 174 | struct nouveau_connector *connector; | 174 | struct nouveau_connector *connector; |
| 175 | 175 | ||
| 176 | NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or); | 176 | NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or); |
| 177 | 177 | ||
| 178 | connector = nouveau_encoder_connector_get(nv_encoder); | 178 | connector = nouveau_encoder_connector_get(nv_encoder); |
| 179 | if (!connector) { | 179 | if (!connector) { |
| @@ -213,7 +213,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 213 | uint32_t mode_ctl = 0, mode_ctl2 = 0; | 213 | uint32_t mode_ctl = 0, mode_ctl2 = 0; |
| 214 | int ret; | 214 | int ret; |
| 215 | 215 | ||
| 216 | NV_DEBUG(dev, "or %d\n", nv_encoder->or); | 216 | NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or); |
| 217 | 217 | ||
| 218 | nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); | 218 | nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON); |
| 219 | 219 | ||
| @@ -264,7 +264,7 @@ nv50_dac_destroy(struct drm_encoder *encoder) | |||
| 264 | if (!encoder) | 264 | if (!encoder) |
| 265 | return; | 265 | return; |
| 266 | 266 | ||
| 267 | NV_DEBUG(encoder->dev, "\n"); | 267 | NV_DEBUG_KMS(encoder->dev, "\n"); |
| 268 | 268 | ||
| 269 | drm_encoder_cleanup(encoder); | 269 | drm_encoder_cleanup(encoder); |
| 270 | kfree(nv_encoder); | 270 | kfree(nv_encoder); |
| @@ -280,7 +280,7 @@ nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry) | |||
| 280 | struct nouveau_encoder *nv_encoder; | 280 | struct nouveau_encoder *nv_encoder; |
| 281 | struct drm_encoder *encoder; | 281 | struct drm_encoder *encoder; |
| 282 | 282 | ||
| 283 | NV_DEBUG(dev, "\n"); | 283 | NV_DEBUG_KMS(dev, "\n"); |
| 284 | NV_INFO(dev, "Detected a DAC output\n"); | 284 | NV_INFO(dev, "Detected a DAC output\n"); |
| 285 | 285 | ||
| 286 | nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); | 286 | nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 12c5ee63495b..a9263d92a231 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c | |||
| @@ -188,7 +188,7 @@ nv50_display_init(struct drm_device *dev) | |||
| 188 | uint64_t start; | 188 | uint64_t start; |
| 189 | int ret, i; | 189 | int ret, i; |
| 190 | 190 | ||
| 191 | NV_DEBUG(dev, "\n"); | 191 | NV_DEBUG_KMS(dev, "\n"); |
| 192 | 192 | ||
| 193 | nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004)); | 193 | nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004)); |
| 194 | /* | 194 | /* |
| @@ -232,7 +232,7 @@ nv50_display_init(struct drm_device *dev) | |||
| 232 | nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); | 232 | nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0); |
| 233 | /* RAM is clamped to 256 MiB. */ | 233 | /* RAM is clamped to 256 MiB. */ |
| 234 | ram_amount = nouveau_mem_fb_amount(dev); | 234 | ram_amount = nouveau_mem_fb_amount(dev); |
| 235 | NV_DEBUG(dev, "ram_amount %d\n", ram_amount); | 235 | NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount); |
| 236 | if (ram_amount > 256*1024*1024) | 236 | if (ram_amount > 256*1024*1024) |
| 237 | ram_amount = 256*1024*1024; | 237 | ram_amount = 256*1024*1024; |
| 238 | nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1); | 238 | nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1); |
| @@ -398,7 +398,7 @@ static int nv50_display_disable(struct drm_device *dev) | |||
| 398 | struct drm_crtc *drm_crtc; | 398 | struct drm_crtc *drm_crtc; |
| 399 | int ret, i; | 399 | int ret, i; |
| 400 | 400 | ||
| 401 | NV_DEBUG(dev, "\n"); | 401 | NV_DEBUG_KMS(dev, "\n"); |
| 402 | 402 | ||
| 403 | list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { | 403 | list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) { |
| 404 | struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); | 404 | struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc); |
| @@ -469,7 +469,7 @@ int nv50_display_create(struct drm_device *dev) | |||
| 469 | uint32_t connector[16] = {}; | 469 | uint32_t connector[16] = {}; |
| 470 | int ret, i; | 470 | int ret, i; |
| 471 | 471 | ||
| 472 | NV_DEBUG(dev, "\n"); | 472 | NV_DEBUG_KMS(dev, "\n"); |
| 473 | 473 | ||
| 474 | /* init basic kernel modesetting */ | 474 | /* init basic kernel modesetting */ |
| 475 | drm_mode_config_init(dev); | 475 | drm_mode_config_init(dev); |
| @@ -573,7 +573,7 @@ int nv50_display_destroy(struct drm_device *dev) | |||
| 573 | { | 573 | { |
| 574 | struct drm_nouveau_private *dev_priv = dev->dev_private; | 574 | struct drm_nouveau_private *dev_priv = dev->dev_private; |
| 575 | 575 | ||
| 576 | NV_DEBUG(dev, "\n"); | 576 | NV_DEBUG_KMS(dev, "\n"); |
| 577 | 577 | ||
| 578 | drm_mode_config_cleanup(dev); | 578 | drm_mode_config_cleanup(dev); |
| 579 | 579 | ||
| @@ -617,7 +617,7 @@ nv50_display_irq_head(struct drm_device *dev, int *phead, | |||
| 617 | * CRTC separately, and submission will be blocked by the GPU | 617 | * CRTC separately, and submission will be blocked by the GPU |
| 618 | * until we handle each in turn. | 618 | * until we handle each in turn. |
| 619 | */ | 619 | */ |
| 620 | NV_DEBUG(dev, "0x610030: 0x%08x\n", unk30); | 620 | NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); |
| 621 | head = ffs((unk30 >> 9) & 3) - 1; | 621 | head = ffs((unk30 >> 9) & 3) - 1; |
| 622 | if (head < 0) | 622 | if (head < 0) |
| 623 | return -EINVAL; | 623 | return -EINVAL; |
| @@ -661,7 +661,7 @@ nv50_display_irq_head(struct drm_device *dev, int *phead, | |||
| 661 | or = i; | 661 | or = i; |
| 662 | } | 662 | } |
| 663 | 663 | ||
| 664 | NV_DEBUG(dev, "type %d, or %d\n", type, or); | 664 | NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or); |
| 665 | if (type == OUTPUT_ANY) { | 665 | if (type == OUTPUT_ANY) { |
| 666 | NV_ERROR(dev, "unknown encoder!!\n"); | 666 | NV_ERROR(dev, "unknown encoder!!\n"); |
| 667 | return -1; | 667 | return -1; |
| @@ -811,7 +811,7 @@ nv50_display_unk20_handler(struct drm_device *dev) | |||
| 811 | pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff; | 811 | pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff; |
| 812 | script = nv50_display_script_select(dev, dcbent, pclk); | 812 | script = nv50_display_script_select(dev, dcbent, pclk); |
| 813 | 813 | ||
| 814 | NV_DEBUG(dev, "head %d pxclk: %dKHz\n", head, pclk); | 814 | NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk); |
| 815 | 815 | ||
| 816 | if (dcbent->type != OUTPUT_DP) | 816 | if (dcbent->type != OUTPUT_DP) |
| 817 | nouveau_bios_run_display_table(dev, dcbent, 0, -2); | 817 | nouveau_bios_run_display_table(dev, dcbent, 0, -2); |
| @@ -870,7 +870,7 @@ nv50_display_irq_handler_bh(struct work_struct *work) | |||
| 870 | uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); | 870 | uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0); |
| 871 | uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); | 871 | uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); |
| 872 | 872 | ||
| 873 | NV_DEBUG(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1); | 873 | NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1); |
| 874 | 874 | ||
| 875 | if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10) | 875 | if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10) |
| 876 | nv50_display_unk10_handler(dev); | 876 | nv50_display_unk10_handler(dev); |
| @@ -974,7 +974,7 @@ nv50_display_irq_handler(struct drm_device *dev) | |||
| 974 | uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); | 974 | uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1); |
| 975 | uint32_t clock; | 975 | uint32_t clock; |
| 976 | 976 | ||
| 977 | NV_DEBUG(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1); | 977 | NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1); |
| 978 | 978 | ||
| 979 | if (!intr0 && !(intr1 & ~delayed)) | 979 | if (!intr0 && !(intr1 & ~delayed)) |
| 980 | break; | 980 | break; |
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c index 77ae1aaa0bce..b7282284f080 100644 --- a/drivers/gpu/drm/nouveau/nv50_fifo.c +++ b/drivers/gpu/drm/nouveau/nv50_fifo.c | |||
| @@ -416,7 +416,7 @@ nv50_fifo_unload_context(struct drm_device *dev) | |||
| 416 | NV_DEBUG(dev, "\n"); | 416 | NV_DEBUG(dev, "\n"); |
| 417 | 417 | ||
| 418 | chid = pfifo->channel_id(dev); | 418 | chid = pfifo->channel_id(dev); |
| 419 | if (chid < 0 || chid >= dev_priv->engine.fifo.channels) | 419 | if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1) |
| 420 | return 0; | 420 | return 0; |
| 421 | 421 | ||
| 422 | chan = dev_priv->fifos[chid]; | 422 | chan = dev_priv->fifos[chid]; |
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c index 177d8229336f..ca79f32be44c 100644 --- a/drivers/gpu/drm/nouveau/nv50_graph.c +++ b/drivers/gpu/drm/nouveau/nv50_graph.c | |||
| @@ -107,9 +107,13 @@ nv50_graph_init_regs(struct drm_device *dev) | |||
| 107 | static int | 107 | static int |
| 108 | nv50_graph_init_ctxctl(struct drm_device *dev) | 108 | nv50_graph_init_ctxctl(struct drm_device *dev) |
| 109 | { | 109 | { |
| 110 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 111 | |||
| 110 | NV_DEBUG(dev, "\n"); | 112 | NV_DEBUG(dev, "\n"); |
| 111 | 113 | ||
| 112 | nv40_grctx_init(dev); | 114 | nouveau_grctx_prog_load(dev); |
| 115 | if (!dev_priv->engine.graph.ctxprog) | ||
| 116 | dev_priv->engine.graph.accel_blocked = true; | ||
| 113 | 117 | ||
| 114 | nv_wr32(dev, 0x400320, 4); | 118 | nv_wr32(dev, 0x400320, 4); |
| 115 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); | 119 | nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0); |
| @@ -140,7 +144,7 @@ void | |||
| 140 | nv50_graph_takedown(struct drm_device *dev) | 144 | nv50_graph_takedown(struct drm_device *dev) |
| 141 | { | 145 | { |
| 142 | NV_DEBUG(dev, "\n"); | 146 | NV_DEBUG(dev, "\n"); |
| 143 | nv40_grctx_fini(dev); | 147 | nouveau_grctx_fini(dev); |
| 144 | } | 148 | } |
| 145 | 149 | ||
| 146 | void | 150 | void |
| @@ -207,7 +211,7 @@ nv50_graph_create_context(struct nouveau_channel *chan) | |||
| 207 | dev_priv->engine.instmem.finish_access(dev); | 211 | dev_priv->engine.instmem.finish_access(dev); |
| 208 | 212 | ||
| 209 | dev_priv->engine.instmem.prepare_access(dev, true); | 213 | dev_priv->engine.instmem.prepare_access(dev, true); |
| 210 | nv40_grctx_vals_load(dev, ctx); | 214 | nouveau_grctx_vals_load(dev, ctx); |
| 211 | nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); | 215 | nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12); |
| 212 | if ((dev_priv->chipset & 0xf0) == 0xa0) | 216 | if ((dev_priv->chipset & 0xf0) == 0xa0) |
| 213 | nv_wo32(dev, ctx, 0x00004/4, 0x00000000); | 217 | nv_wo32(dev, ctx, 0x00004/4, 0x00000000); |
diff --git a/drivers/gpu/drm/nouveau/nv50_sor.c b/drivers/gpu/drm/nouveau/nv50_sor.c index 8c280463a664..e395c16d30f5 100644 --- a/drivers/gpu/drm/nouveau/nv50_sor.c +++ b/drivers/gpu/drm/nouveau/nv50_sor.c | |||
| @@ -44,7 +44,7 @@ nv50_sor_disconnect(struct nouveau_encoder *nv_encoder) | |||
| 44 | struct nouveau_channel *evo = dev_priv->evo; | 44 | struct nouveau_channel *evo = dev_priv->evo; |
| 45 | int ret; | 45 | int ret; |
| 46 | 46 | ||
| 47 | NV_DEBUG(dev, "Disconnecting SOR %d\n", nv_encoder->or); | 47 | NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or); |
| 48 | 48 | ||
| 49 | ret = RING_SPACE(evo, 2); | 49 | ret = RING_SPACE(evo, 2); |
| 50 | if (ret) { | 50 | if (ret) { |
| @@ -70,7 +70,7 @@ nv50_sor_dp_link_train(struct drm_encoder *encoder) | |||
| 70 | } | 70 | } |
| 71 | 71 | ||
| 72 | if (dpe->script0) { | 72 | if (dpe->script0) { |
| 73 | NV_DEBUG(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); | 73 | NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or); |
| 74 | nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0), | 74 | nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0), |
| 75 | nv_encoder->dcb); | 75 | nv_encoder->dcb); |
| 76 | } | 76 | } |
| @@ -79,7 +79,7 @@ nv50_sor_dp_link_train(struct drm_encoder *encoder) | |||
| 79 | NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or); | 79 | NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or); |
| 80 | 80 | ||
| 81 | if (dpe->script1) { | 81 | if (dpe->script1) { |
| 82 | NV_DEBUG(dev, "SOR-%d: running DP script 1\n", nv_encoder->or); | 82 | NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or); |
| 83 | nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1), | 83 | nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1), |
| 84 | nv_encoder->dcb); | 84 | nv_encoder->dcb); |
| 85 | } | 85 | } |
| @@ -93,7 +93,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode) | |||
| 93 | uint32_t val; | 93 | uint32_t val; |
| 94 | int or = nv_encoder->or; | 94 | int or = nv_encoder->or; |
| 95 | 95 | ||
| 96 | NV_DEBUG(dev, "or %d mode %d\n", or, mode); | 96 | NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode); |
| 97 | 97 | ||
| 98 | /* wait for it to be done */ | 98 | /* wait for it to be done */ |
| 99 | if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), | 99 | if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or), |
| @@ -142,7 +142,7 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 142 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); | 142 | struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); |
| 143 | struct nouveau_connector *connector; | 143 | struct nouveau_connector *connector; |
| 144 | 144 | ||
| 145 | NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or); | 145 | NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or); |
| 146 | 146 | ||
| 147 | connector = nouveau_encoder_connector_get(nv_encoder); | 147 | connector = nouveau_encoder_connector_get(nv_encoder); |
| 148 | if (!connector) { | 148 | if (!connector) { |
| @@ -182,7 +182,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
| 182 | uint32_t mode_ctl = 0; | 182 | uint32_t mode_ctl = 0; |
| 183 | int ret; | 183 | int ret; |
| 184 | 184 | ||
| 185 | NV_DEBUG(dev, "or %d\n", nv_encoder->or); | 185 | NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or); |
| 186 | 186 | ||
| 187 | nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); | 187 | nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON); |
| 188 | 188 | ||
| @@ -246,7 +246,7 @@ nv50_sor_destroy(struct drm_encoder *encoder) | |||
| 246 | if (!encoder) | 246 | if (!encoder) |
| 247 | return; | 247 | return; |
| 248 | 248 | ||
| 249 | NV_DEBUG(encoder->dev, "\n"); | 249 | NV_DEBUG_KMS(encoder->dev, "\n"); |
| 250 | 250 | ||
| 251 | drm_encoder_cleanup(encoder); | 251 | drm_encoder_cleanup(encoder); |
| 252 | 252 | ||
| @@ -265,7 +265,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry) | |||
| 265 | bool dum; | 265 | bool dum; |
| 266 | int type; | 266 | int type; |
| 267 | 267 | ||
| 268 | NV_DEBUG(dev, "\n"); | 268 | NV_DEBUG_KMS(dev, "\n"); |
| 269 | 269 | ||
| 270 | switch (entry->type) { | 270 | switch (entry->type) { |
| 271 | case OUTPUT_TMDS: | 271 | case OUTPUT_TMDS: |
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c index 601f4c0e5da5..b806fdcc7170 100644 --- a/drivers/gpu/drm/r128/r128_drv.c +++ b/drivers/gpu/drm/r128/r128_drv.c | |||
| @@ -64,7 +64,7 @@ static struct drm_driver driver = { | |||
| 64 | .owner = THIS_MODULE, | 64 | .owner = THIS_MODULE, |
| 65 | .open = drm_open, | 65 | .open = drm_open, |
| 66 | .release = drm_release, | 66 | .release = drm_release, |
| 67 | .ioctl = drm_ioctl, | 67 | .unlocked_ioctl = drm_ioctl, |
| 68 | .mmap = drm_mmap, | 68 | .mmap = drm_mmap, |
| 69 | .poll = drm_poll, | 69 | .poll = drm_poll, |
| 70 | .fasync = drm_fasync, | 70 | .fasync = drm_fasync, |
diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c index d3cb676eee84..51c99fc4dd38 100644 --- a/drivers/gpu/drm/r128/r128_ioc32.c +++ b/drivers/gpu/drm/r128/r128_ioc32.c | |||
| @@ -95,8 +95,7 @@ static int compat_r128_init(struct file *file, unsigned int cmd, | |||
| 95 | &init->agp_textures_offset)) | 95 | &init->agp_textures_offset)) |
| 96 | return -EFAULT; | 96 | return -EFAULT; |
| 97 | 97 | ||
| 98 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 98 | return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init); |
| 99 | DRM_IOCTL_R128_INIT, (unsigned long)init); | ||
| 100 | } | 99 | } |
| 101 | 100 | ||
| 102 | typedef struct drm_r128_depth32 { | 101 | typedef struct drm_r128_depth32 { |
| @@ -129,8 +128,7 @@ static int compat_r128_depth(struct file *file, unsigned int cmd, | |||
| 129 | &depth->mask)) | 128 | &depth->mask)) |
| 130 | return -EFAULT; | 129 | return -EFAULT; |
| 131 | 130 | ||
| 132 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 131 | return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth); |
| 133 | DRM_IOCTL_R128_DEPTH, (unsigned long)depth); | ||
| 134 | 132 | ||
| 135 | } | 133 | } |
| 136 | 134 | ||
| @@ -153,8 +151,7 @@ static int compat_r128_stipple(struct file *file, unsigned int cmd, | |||
| 153 | &stipple->mask)) | 151 | &stipple->mask)) |
| 154 | return -EFAULT; | 152 | return -EFAULT; |
| 155 | 153 | ||
| 156 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 154 | return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple); |
| 157 | DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple); | ||
| 158 | } | 155 | } |
| 159 | 156 | ||
| 160 | typedef struct drm_r128_getparam32 { | 157 | typedef struct drm_r128_getparam32 { |
| @@ -178,8 +175,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd, | |||
| 178 | &getparam->value)) | 175 | &getparam->value)) |
| 179 | return -EFAULT; | 176 | return -EFAULT; |
| 180 | 177 | ||
| 181 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 178 | return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam); |
| 182 | DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam); | ||
| 183 | } | 179 | } |
| 184 | 180 | ||
| 185 | drm_ioctl_compat_t *r128_compat_ioctls[] = { | 181 | drm_ioctl_compat_t *r128_compat_ioctls[] = { |
| @@ -210,12 +206,10 @@ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 210 | if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) | 206 | if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) |
| 211 | fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE]; | 207 | fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE]; |
| 212 | 208 | ||
| 213 | lock_kernel(); /* XXX for now */ | ||
| 214 | if (fn != NULL) | 209 | if (fn != NULL) |
| 215 | ret = (*fn) (filp, cmd, arg); | 210 | ret = (*fn) (filp, cmd, arg); |
| 216 | else | 211 | else |
| 217 | ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); | 212 | ret = drm_ioctl(filp, cmd, arg); |
| 218 | unlock_kernel(); | ||
| 219 | 213 | ||
| 220 | return ret; | 214 | return ret; |
| 221 | } | 215 | } |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 6578d19dff93..388140a7e651 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
| @@ -58,6 +58,7 @@ typedef struct { | |||
| 58 | } atom_exec_context; | 58 | } atom_exec_context; |
| 59 | 59 | ||
| 60 | int atom_debug = 0; | 60 | int atom_debug = 0; |
| 61 | static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params); | ||
| 61 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); | 62 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params); |
| 62 | 63 | ||
| 63 | static uint32_t atom_arg_mask[8] = | 64 | static uint32_t atom_arg_mask[8] = |
| @@ -573,7 +574,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) | |||
| 573 | else | 574 | else |
| 574 | SDEBUG(" table: %d\n", idx); | 575 | SDEBUG(" table: %d\n", idx); |
| 575 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) | 576 | if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) |
| 576 | atom_execute_table(ctx->ctx, idx, ctx->ps + ctx->ps_shift); | 577 | atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); |
| 577 | } | 578 | } |
| 578 | 579 | ||
| 579 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) | 580 | static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) |
| @@ -1040,7 +1041,7 @@ static struct { | |||
| 1040 | atom_op_shr, ATOM_ARG_MC}, { | 1041 | atom_op_shr, ATOM_ARG_MC}, { |
| 1041 | atom_op_debug, 0},}; | 1042 | atom_op_debug, 0},}; |
| 1042 | 1043 | ||
| 1043 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | 1044 | static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) |
| 1044 | { | 1045 | { |
| 1045 | int base = CU16(ctx->cmd_table + 4 + 2 * index); | 1046 | int base = CU16(ctx->cmd_table + 4 + 2 * index); |
| 1046 | int len, ws, ps, ptr; | 1047 | int len, ws, ps, ptr; |
| @@ -1092,6 +1093,13 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | |||
| 1092 | kfree(ectx.ws); | 1093 | kfree(ectx.ws); |
| 1093 | } | 1094 | } |
| 1094 | 1095 | ||
| 1096 | void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) | ||
| 1097 | { | ||
| 1098 | mutex_lock(&ctx->mutex); | ||
| 1099 | atom_execute_table_locked(ctx, index, params); | ||
| 1100 | mutex_unlock(&ctx->mutex); | ||
| 1101 | } | ||
| 1102 | |||
| 1095 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; | 1103 | static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; |
| 1096 | 1104 | ||
| 1097 | static void atom_index_iio(struct atom_context *ctx, int base) | 1105 | static void atom_index_iio(struct atom_context *ctx, int base) |
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index 6671848e5ea1..47fd943f6d14 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h | |||
| @@ -120,6 +120,7 @@ struct card_info { | |||
| 120 | 120 | ||
| 121 | struct atom_context { | 121 | struct atom_context { |
| 122 | struct card_info *card; | 122 | struct card_info *card; |
| 123 | struct mutex mutex; | ||
| 123 | void *bios; | 124 | void *bios; |
| 124 | uint32_t cmd_table, data_table; | 125 | uint32_t cmd_table, data_table; |
| 125 | uint16_t *iio; | 126 | uint16_t *iio; |
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h index 5f48515c77a7..91ad0d1c1b17 100644 --- a/drivers/gpu/drm/radeon/atombios.h +++ b/drivers/gpu/drm/radeon/atombios.h | |||
| @@ -4690,6 +4690,205 @@ typedef struct _ATOM_POWERPLAY_INFO_V3 { | |||
| 4690 | ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; | 4690 | ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK]; |
| 4691 | } ATOM_POWERPLAY_INFO_V3; | 4691 | } ATOM_POWERPLAY_INFO_V3; |
| 4692 | 4692 | ||
| 4693 | /* New PPlib */ | ||
| 4694 | /**************************************************************************/ | ||
| 4695 | typedef struct _ATOM_PPLIB_THERMALCONTROLLER | ||
| 4696 | |||
| 4697 | { | ||
| 4698 | UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_* | ||
| 4699 | UCHAR ucI2cLine; // as interpreted by DAL I2C | ||
| 4700 | UCHAR ucI2cAddress; | ||
| 4701 | UCHAR ucFanParameters; // Fan Control Parameters. | ||
| 4702 | UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only. | ||
| 4703 | UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only. | ||
| 4704 | UCHAR ucReserved; // ---- | ||
| 4705 | UCHAR ucFlags; // to be defined | ||
| 4706 | } ATOM_PPLIB_THERMALCONTROLLER; | ||
| 4707 | |||
| 4708 | #define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f | ||
| 4709 | #define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller. | ||
| 4710 | |||
| 4711 | #define ATOM_PP_THERMALCONTROLLER_NONE 0 | ||
| 4712 | #define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib | ||
| 4713 | #define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib | ||
| 4714 | #define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib | ||
| 4715 | #define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib | ||
| 4716 | #define ATOM_PP_THERMALCONTROLLER_LM64 5 | ||
| 4717 | #define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib | ||
| 4718 | #define ATOM_PP_THERMALCONTROLLER_RV6xx 7 | ||
| 4719 | #define ATOM_PP_THERMALCONTROLLER_RV770 8 | ||
| 4720 | #define ATOM_PP_THERMALCONTROLLER_ADT7473 9 | ||
| 4721 | |||
| 4722 | typedef struct _ATOM_PPLIB_STATE | ||
| 4723 | { | ||
| 4724 | UCHAR ucNonClockStateIndex; | ||
| 4725 | UCHAR ucClockStateIndices[1]; // variable-sized | ||
| 4726 | } ATOM_PPLIB_STATE; | ||
| 4727 | |||
| 4728 | //// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps | ||
| 4729 | #define ATOM_PP_PLATFORM_CAP_BACKBIAS 1 | ||
| 4730 | #define ATOM_PP_PLATFORM_CAP_POWERPLAY 2 | ||
| 4731 | #define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4 | ||
| 4732 | #define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8 | ||
| 4733 | #define ATOM_PP_PLATFORM_CAP_ASPM_L1 16 | ||
| 4734 | #define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32 | ||
| 4735 | #define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64 | ||
| 4736 | #define ATOM_PP_PLATFORM_CAP_STEPVDDC 128 | ||
| 4737 | #define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256 | ||
| 4738 | #define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512 | ||
| 4739 | #define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024 | ||
| 4740 | #define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048 | ||
| 4741 | |||
| 4742 | typedef struct _ATOM_PPLIB_POWERPLAYTABLE | ||
| 4743 | { | ||
| 4744 | ATOM_COMMON_TABLE_HEADER sHeader; | ||
| 4745 | |||
| 4746 | UCHAR ucDataRevision; | ||
| 4747 | |||
| 4748 | UCHAR ucNumStates; | ||
| 4749 | UCHAR ucStateEntrySize; | ||
| 4750 | UCHAR ucClockInfoSize; | ||
| 4751 | UCHAR ucNonClockSize; | ||
| 4752 | |||
| 4753 | // offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures | ||
| 4754 | USHORT usStateArrayOffset; | ||
| 4755 | |||
| 4756 | // offset from start of this table to array of ASIC-specific structures, | ||
| 4757 | // currently ATOM_PPLIB_CLOCK_INFO. | ||
| 4758 | USHORT usClockInfoArrayOffset; | ||
| 4759 | |||
| 4760 | // offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO | ||
| 4761 | USHORT usNonClockInfoArrayOffset; | ||
| 4762 | |||
| 4763 | USHORT usBackbiasTime; // in microseconds | ||
| 4764 | USHORT usVoltageTime; // in microseconds | ||
| 4765 | USHORT usTableSize; //the size of this structure, or the extended structure | ||
| 4766 | |||
| 4767 | ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_* | ||
| 4768 | |||
| 4769 | ATOM_PPLIB_THERMALCONTROLLER sThermalController; | ||
| 4770 | |||
| 4771 | USHORT usBootClockInfoOffset; | ||
| 4772 | USHORT usBootNonClockInfoOffset; | ||
| 4773 | |||
| 4774 | } ATOM_PPLIB_POWERPLAYTABLE; | ||
| 4775 | |||
| 4776 | //// ATOM_PPLIB_NONCLOCK_INFO::usClassification | ||
| 4777 | #define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007 | ||
| 4778 | #define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0 | ||
| 4779 | #define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0 | ||
| 4780 | #define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1 | ||
| 4781 | #define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3 | ||
| 4782 | #define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5 | ||
| 4783 | // 2, 4, 6, 7 are reserved | ||
| 4784 | |||
| 4785 | #define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008 | ||
| 4786 | #define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010 | ||
| 4787 | #define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020 | ||
| 4788 | #define ATOM_PPLIB_CLASSIFICATION_REST 0x0040 | ||
| 4789 | #define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080 | ||
| 4790 | #define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100 | ||
| 4791 | #define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200 | ||
| 4792 | #define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400 | ||
| 4793 | #define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800 | ||
| 4794 | #define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000 | ||
| 4795 | // remaining 3 bits are reserved | ||
| 4796 | |||
| 4797 | //// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings | ||
| 4798 | #define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001 | ||
| 4799 | #define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002 | ||
| 4800 | |||
| 4801 | // 0 is 2.5Gb/s, 1 is 5Gb/s | ||
| 4802 | #define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004 | ||
| 4803 | #define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2 | ||
| 4804 | |||
| 4805 | // lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec | ||
| 4806 | #define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8 | ||
| 4807 | #define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3 | ||
| 4808 | |||
| 4809 | // lookup into reduced refresh-rate table | ||
| 4810 | #define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00 | ||
| 4811 | #define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8 | ||
| 4812 | |||
| 4813 | #define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0 | ||
| 4814 | #define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1 | ||
| 4815 | // 2-15 TBD as needed. | ||
| 4816 | |||
| 4817 | #define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000 | ||
| 4818 | #define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000 | ||
| 4819 | #define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000 | ||
| 4820 | |||
| 4821 | #define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000 | ||
| 4822 | |||
| 4823 | // Contained in an array starting at the offset | ||
| 4824 | // in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset. | ||
| 4825 | // referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex | ||
| 4826 | typedef struct _ATOM_PPLIB_NONCLOCK_INFO | ||
| 4827 | { | ||
| 4828 | USHORT usClassification; | ||
| 4829 | UCHAR ucMinTemperature; | ||
| 4830 | UCHAR ucMaxTemperature; | ||
| 4831 | ULONG ulCapsAndSettings; | ||
| 4832 | UCHAR ucRequiredPower; | ||
| 4833 | UCHAR ucUnused1[3]; | ||
| 4834 | } ATOM_PPLIB_NONCLOCK_INFO; | ||
| 4835 | |||
| 4836 | // Contained in an array starting at the offset | ||
| 4837 | // in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset. | ||
| 4838 | // referenced from ATOM_PPLIB_STATE::ucClockStateIndices | ||
| 4839 | typedef struct _ATOM_PPLIB_R600_CLOCK_INFO | ||
| 4840 | { | ||
| 4841 | USHORT usEngineClockLow; | ||
| 4842 | UCHAR ucEngineClockHigh; | ||
| 4843 | |||
| 4844 | USHORT usMemoryClockLow; | ||
| 4845 | UCHAR ucMemoryClockHigh; | ||
| 4846 | |||
| 4847 | USHORT usVDDC; | ||
| 4848 | USHORT usUnused1; | ||
| 4849 | USHORT usUnused2; | ||
| 4850 | |||
| 4851 | ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_* | ||
| 4852 | |||
| 4853 | } ATOM_PPLIB_R600_CLOCK_INFO; | ||
| 4854 | |||
| 4855 | // ulFlags in ATOM_PPLIB_R600_CLOCK_INFO | ||
| 4856 | #define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1 | ||
| 4857 | #define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2 | ||
| 4858 | #define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4 | ||
| 4859 | #define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8 | ||
| 4860 | #define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16 | ||
| 4861 | |||
| 4862 | typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO | ||
| 4863 | |||
| 4864 | { | ||
| 4865 | USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600). | ||
| 4866 | UCHAR ucLowEngineClockHigh; | ||
| 4867 | USHORT usHighEngineClockLow; // High Engine clock in MHz. | ||
| 4868 | UCHAR ucHighEngineClockHigh; | ||
| 4869 | USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants. | ||
| 4870 | UCHAR ucMemoryClockHigh; // Currentyl unused. | ||
| 4871 | UCHAR ucPadding; // For proper alignment and size. | ||
| 4872 | USHORT usVDDC; // For the 780, use: None, Low, High, Variable | ||
| 4873 | UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16} | ||
| 4874 | UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement. | ||
| 4875 | USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200). | ||
| 4876 | ULONG ulFlags; | ||
| 4877 | } ATOM_PPLIB_RS780_CLOCK_INFO; | ||
| 4878 | |||
| 4879 | #define ATOM_PPLIB_RS780_VOLTAGE_NONE 0 | ||
| 4880 | #define ATOM_PPLIB_RS780_VOLTAGE_LOW 1 | ||
| 4881 | #define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2 | ||
| 4882 | #define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3 | ||
| 4883 | |||
| 4884 | #define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is. | ||
| 4885 | #define ATOM_PPLIB_RS780_SPMCLK_LOW 1 | ||
| 4886 | #define ATOM_PPLIB_RS780_SPMCLK_HIGH 2 | ||
| 4887 | |||
| 4888 | #define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0 | ||
| 4889 | #define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1 | ||
| 4890 | #define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2 | ||
| 4891 | |||
| 4693 | /**************************************************************************/ | 4892 | /**************************************************************************/ |
| 4694 | 4893 | ||
| 4695 | /* Following definitions are for compatiblity issue in different SW components. */ | 4894 | /* Following definitions are for compatiblity issue in different SW components. */ |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 84e5df766d3f..71727460968f 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
| @@ -2881,6 +2881,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track) | |||
| 2881 | 2881 | ||
| 2882 | for (i = 0; i < track->num_cb; i++) { | 2882 | for (i = 0; i < track->num_cb; i++) { |
| 2883 | if (track->cb[i].robj == NULL) { | 2883 | if (track->cb[i].robj == NULL) { |
| 2884 | if (!(track->fastfill || track->color_channel_mask || | ||
| 2885 | track->blend_read_enable)) { | ||
| 2886 | continue; | ||
| 2887 | } | ||
| 2884 | DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); | 2888 | DRM_ERROR("[drm] No buffer for color buffer %d !\n", i); |
| 2885 | return -EINVAL; | 2889 | return -EINVAL; |
| 2886 | } | 2890 | } |
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h index 7188c3778ee2..b27a6999d219 100644 --- a/drivers/gpu/drm/radeon/r100_track.h +++ b/drivers/gpu/drm/radeon/r100_track.h | |||
| @@ -67,13 +67,15 @@ struct r100_cs_track { | |||
| 67 | unsigned immd_dwords; | 67 | unsigned immd_dwords; |
| 68 | unsigned num_arrays; | 68 | unsigned num_arrays; |
| 69 | unsigned max_indx; | 69 | unsigned max_indx; |
| 70 | unsigned color_channel_mask; | ||
| 70 | struct r100_cs_track_array arrays[11]; | 71 | struct r100_cs_track_array arrays[11]; |
| 71 | struct r100_cs_track_cb cb[R300_MAX_CB]; | 72 | struct r100_cs_track_cb cb[R300_MAX_CB]; |
| 72 | struct r100_cs_track_cb zb; | 73 | struct r100_cs_track_cb zb; |
| 73 | struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; | 74 | struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; |
| 74 | bool z_enabled; | 75 | bool z_enabled; |
| 75 | bool separate_cube; | 76 | bool separate_cube; |
| 76 | 77 | bool fastfill; | |
| 78 | bool blend_read_enable; | ||
| 77 | }; | 79 | }; |
| 78 | 80 | ||
| 79 | int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); | 81 | int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 83490c2b5061..3f2cc9e2e8d9 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
| @@ -887,6 +887,14 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
| 887 | track->textures[i].cpp = 1; | 887 | track->textures[i].cpp = 1; |
| 888 | track->textures[i].compress_format = R100_TRACK_COMP_DXT1; | 888 | track->textures[i].compress_format = R100_TRACK_COMP_DXT1; |
| 889 | break; | 889 | break; |
| 890 | case R300_TX_FORMAT_ATI2N: | ||
| 891 | if (p->rdev->family < CHIP_R420) { | ||
| 892 | DRM_ERROR("Invalid texture format %u\n", | ||
| 893 | (idx_value & 0x1F)); | ||
| 894 | return -EINVAL; | ||
| 895 | } | ||
| 896 | /* The same rules apply as for DXT3/5. */ | ||
| 897 | /* Pass through. */ | ||
| 890 | case R300_TX_FORMAT_DXT3: | 898 | case R300_TX_FORMAT_DXT3: |
| 891 | case R300_TX_FORMAT_DXT5: | 899 | case R300_TX_FORMAT_DXT5: |
| 892 | track->textures[i].cpp = 1; | 900 | track->textures[i].cpp = 1; |
| @@ -951,6 +959,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
| 951 | track->textures[i].width_11 = tmp; | 959 | track->textures[i].width_11 = tmp; |
| 952 | tmp = ((idx_value >> 16) & 1) << 11; | 960 | tmp = ((idx_value >> 16) & 1) << 11; |
| 953 | track->textures[i].height_11 = tmp; | 961 | track->textures[i].height_11 = tmp; |
| 962 | |||
| 963 | /* ATI1N */ | ||
| 964 | if (idx_value & (1 << 14)) { | ||
| 965 | /* The same rules apply as for DXT1. */ | ||
| 966 | track->textures[i].compress_format = | ||
| 967 | R100_TRACK_COMP_DXT1; | ||
| 968 | } | ||
| 969 | } else if (idx_value & (1 << 14)) { | ||
| 970 | DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); | ||
| 971 | return -EINVAL; | ||
| 954 | } | 972 | } |
| 955 | break; | 973 | break; |
| 956 | case 0x4480: | 974 | case 0x4480: |
| @@ -992,6 +1010,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
| 992 | } | 1010 | } |
| 993 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); | 1011 | ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); |
| 994 | break; | 1012 | break; |
| 1013 | case 0x4e0c: | ||
| 1014 | /* RB3D_COLOR_CHANNEL_MASK */ | ||
| 1015 | track->color_channel_mask = idx_value; | ||
| 1016 | break; | ||
| 1017 | case 0x4d1c: | ||
| 1018 | /* ZB_BW_CNTL */ | ||
| 1019 | track->fastfill = !!(idx_value & (1 << 2)); | ||
| 1020 | break; | ||
| 1021 | case 0x4e04: | ||
| 1022 | /* RB3D_BLENDCNTL */ | ||
| 1023 | track->blend_read_enable = !!(idx_value & (1 << 2)); | ||
| 1024 | break; | ||
| 995 | case 0x4be8: | 1025 | case 0x4be8: |
| 996 | /* valid register only on RV530 */ | 1026 | /* valid register only on RV530 */ |
| 997 | if (p->rdev->family == CHIP_RV530) | 1027 | if (p->rdev->family == CHIP_RV530) |
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index cb2e470f97d4..34bffa0e4b73 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c | |||
| @@ -990,7 +990,7 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv, | |||
| 990 | int sz; | 990 | int sz; |
| 991 | int addr; | 991 | int addr; |
| 992 | int type; | 992 | int type; |
| 993 | int clamp; | 993 | int isclamp; |
| 994 | int stride; | 994 | int stride; |
| 995 | RING_LOCALS; | 995 | RING_LOCALS; |
| 996 | 996 | ||
| @@ -999,10 +999,10 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv, | |||
| 999 | addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo; | 999 | addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo; |
| 1000 | 1000 | ||
| 1001 | type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE); | 1001 | type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE); |
| 1002 | clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP); | 1002 | isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP); |
| 1003 | 1003 | ||
| 1004 | addr |= (type << 16); | 1004 | addr |= (type << 16); |
| 1005 | addr |= (clamp << 17); | 1005 | addr |= (isclamp << 17); |
| 1006 | 1006 | ||
| 1007 | stride = type ? 4 : 6; | 1007 | stride = type ? 4 : 6; |
| 1008 | 1008 | ||
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h index 4b7afef35a65..1735a2b69580 100644 --- a/drivers/gpu/drm/radeon/r300_reg.h +++ b/drivers/gpu/drm/radeon/r300_reg.h | |||
| @@ -900,6 +900,7 @@ | |||
| 900 | # define R300_TX_FORMAT_FL_I32 0x1B | 900 | # define R300_TX_FORMAT_FL_I32 0x1B |
| 901 | # define R300_TX_FORMAT_FL_I32A32 0x1C | 901 | # define R300_TX_FORMAT_FL_I32A32 0x1C |
| 902 | # define R300_TX_FORMAT_FL_R32G32B32A32 0x1D | 902 | # define R300_TX_FORMAT_FL_R32G32B32A32 0x1D |
| 903 | # define R300_TX_FORMAT_ATI2N 0x1F | ||
| 903 | /* alpha modes, convenience mostly */ | 904 | /* alpha modes, convenience mostly */ |
| 904 | /* if you have alpha, pick constant appropriate to the | 905 | /* if you have alpha, pick constant appropriate to the |
| 905 | number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */ | 906 | number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */ |
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c index 0d820764f340..44060b92d9e6 100644 --- a/drivers/gpu/drm/radeon/r600_cs.c +++ b/drivers/gpu/drm/radeon/r600_cs.c | |||
| @@ -170,7 +170,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, | |||
| 170 | idx, relocs_chunk->length_dw); | 170 | idx, relocs_chunk->length_dw); |
| 171 | return -EINVAL; | 171 | return -EINVAL; |
| 172 | } | 172 | } |
| 173 | *cs_reloc = &p->relocs[0]; | 173 | *cs_reloc = p->relocs; |
| 174 | (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; | 174 | (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32; |
| 175 | (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; | 175 | (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0]; |
| 176 | return 0; | 176 | return 0; |
| @@ -717,7 +717,7 @@ static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p) | |||
| 717 | if (p->chunk_relocs_idx == -1) { | 717 | if (p->chunk_relocs_idx == -1) { |
| 718 | return 0; | 718 | return 0; |
| 719 | } | 719 | } |
| 720 | p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL); | 720 | p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL); |
| 721 | if (p->relocs == NULL) { | 721 | if (p->relocs == NULL) { |
| 722 | return -ENOMEM; | 722 | return -ENOMEM; |
| 723 | } | 723 | } |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index cd650fd3964e..53b55608102b 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
| @@ -162,6 +162,7 @@ struct radeon_fence_driver { | |||
| 162 | struct list_head created; | 162 | struct list_head created; |
| 163 | struct list_head emited; | 163 | struct list_head emited; |
| 164 | struct list_head signaled; | 164 | struct list_head signaled; |
| 165 | bool initialized; | ||
| 165 | }; | 166 | }; |
| 166 | 167 | ||
| 167 | struct radeon_fence { | 168 | struct radeon_fence { |
| @@ -202,8 +203,9 @@ struct radeon_surface_reg { | |||
| 202 | struct radeon_mman { | 203 | struct radeon_mman { |
| 203 | struct ttm_bo_global_ref bo_global_ref; | 204 | struct ttm_bo_global_ref bo_global_ref; |
| 204 | struct ttm_global_reference mem_global_ref; | 205 | struct ttm_global_reference mem_global_ref; |
| 205 | bool mem_global_referenced; | ||
| 206 | struct ttm_bo_device bdev; | 206 | struct ttm_bo_device bdev; |
| 207 | bool mem_global_referenced; | ||
| 208 | bool initialized; | ||
| 207 | }; | 209 | }; |
| 208 | 210 | ||
| 209 | struct radeon_bo { | 211 | struct radeon_bo { |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index 636116bedcb4..eb29217bbf1d 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | */ | 33 | */ |
| 34 | uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev); | 34 | uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev); |
| 35 | void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); | 35 | void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock); |
| 36 | uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev); | ||
| 36 | void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); | 37 | void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable); |
| 37 | 38 | ||
| 38 | uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev); | 39 | uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev); |
| @@ -106,7 +107,7 @@ static struct radeon_asic r100_asic = { | |||
| 106 | .copy = &r100_copy_blit, | 107 | .copy = &r100_copy_blit, |
| 107 | .get_engine_clock = &radeon_legacy_get_engine_clock, | 108 | .get_engine_clock = &radeon_legacy_get_engine_clock, |
| 108 | .set_engine_clock = &radeon_legacy_set_engine_clock, | 109 | .set_engine_clock = &radeon_legacy_set_engine_clock, |
| 109 | .get_memory_clock = NULL, | 110 | .get_memory_clock = &radeon_legacy_get_memory_clock, |
| 110 | .set_memory_clock = NULL, | 111 | .set_memory_clock = NULL, |
| 111 | .set_pcie_lanes = NULL, | 112 | .set_pcie_lanes = NULL, |
| 112 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 113 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
| @@ -166,7 +167,7 @@ static struct radeon_asic r300_asic = { | |||
| 166 | .copy = &r100_copy_blit, | 167 | .copy = &r100_copy_blit, |
| 167 | .get_engine_clock = &radeon_legacy_get_engine_clock, | 168 | .get_engine_clock = &radeon_legacy_get_engine_clock, |
| 168 | .set_engine_clock = &radeon_legacy_set_engine_clock, | 169 | .set_engine_clock = &radeon_legacy_set_engine_clock, |
| 169 | .get_memory_clock = NULL, | 170 | .get_memory_clock = &radeon_legacy_get_memory_clock, |
| 170 | .set_memory_clock = NULL, | 171 | .set_memory_clock = NULL, |
| 171 | .set_pcie_lanes = &rv370_set_pcie_lanes, | 172 | .set_pcie_lanes = &rv370_set_pcie_lanes, |
| 172 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 173 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
| @@ -259,7 +260,7 @@ static struct radeon_asic rs400_asic = { | |||
| 259 | .copy = &r100_copy_blit, | 260 | .copy = &r100_copy_blit, |
| 260 | .get_engine_clock = &radeon_legacy_get_engine_clock, | 261 | .get_engine_clock = &radeon_legacy_get_engine_clock, |
| 261 | .set_engine_clock = &radeon_legacy_set_engine_clock, | 262 | .set_engine_clock = &radeon_legacy_set_engine_clock, |
| 262 | .get_memory_clock = NULL, | 263 | .get_memory_clock = &radeon_legacy_get_memory_clock, |
| 263 | .set_memory_clock = NULL, | 264 | .set_memory_clock = NULL, |
| 264 | .set_pcie_lanes = NULL, | 265 | .set_pcie_lanes = NULL, |
| 265 | .set_clock_gating = &radeon_legacy_set_clock_gating, | 266 | .set_clock_gating = &radeon_legacy_set_clock_gating, |
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index 12a0c760e7ff..321044bef71c 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c | |||
| @@ -745,8 +745,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
| 745 | else | 745 | else |
| 746 | radeon_add_legacy_encoder(dev, | 746 | radeon_add_legacy_encoder(dev, |
| 747 | radeon_get_encoder_id(dev, | 747 | radeon_get_encoder_id(dev, |
| 748 | (1 << | 748 | (1 << i), |
| 749 | i), | ||
| 750 | dac), | 749 | dac), |
| 751 | (1 << i)); | 750 | (1 << i)); |
| 752 | } | 751 | } |
| @@ -758,32 +757,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct | |||
| 758 | if (bios_connectors[j].valid && (i != j)) { | 757 | if (bios_connectors[j].valid && (i != j)) { |
| 759 | if (bios_connectors[i].line_mux == | 758 | if (bios_connectors[i].line_mux == |
| 760 | bios_connectors[j].line_mux) { | 759 | bios_connectors[j].line_mux) { |
| 761 | if (((bios_connectors[i]. | 760 | /* make sure not to combine LVDS */ |
| 762 | devices & | 761 | if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
| 763 | (ATOM_DEVICE_DFP_SUPPORT)) | 762 | bios_connectors[i].line_mux = 53; |
| 764 | && (bios_connectors[j]. | 763 | bios_connectors[i].ddc_bus.valid = false; |
| 765 | devices & | 764 | continue; |
| 766 | (ATOM_DEVICE_CRT_SUPPORT))) | 765 | } |
| 767 | || | 766 | if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
| 768 | ((bios_connectors[j]. | 767 | bios_connectors[j].line_mux = 53; |
| 769 | devices & | 768 | bios_connectors[j].ddc_bus.valid = false; |
| 770 | (ATOM_DEVICE_DFP_SUPPORT)) | 769 | continue; |
| 771 | && (bios_connectors[i]. | 770 | } |
| 772 | devices & | 771 | /* combine analog and digital for DVI-I */ |
| 773 | (ATOM_DEVICE_CRT_SUPPORT)))) { | 772 | if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) && |
| 774 | bios_connectors[i]. | 773 | (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) || |
| 775 | devices |= | 774 | ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) && |
| 776 | bios_connectors[j]. | 775 | (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) { |
| 777 | devices; | 776 | bios_connectors[i].devices |= |
| 778 | bios_connectors[i]. | 777 | bios_connectors[j].devices; |
| 779 | connector_type = | 778 | bios_connectors[i].connector_type = |
| 780 | DRM_MODE_CONNECTOR_DVII; | 779 | DRM_MODE_CONNECTOR_DVII; |
| 781 | if (bios_connectors[j].devices & | 780 | if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) |
| 782 | (ATOM_DEVICE_DFP_SUPPORT)) | ||
| 783 | bios_connectors[i].hpd = | 781 | bios_connectors[i].hpd = |
| 784 | bios_connectors[j].hpd; | 782 | bios_connectors[j].hpd; |
| 785 | bios_connectors[j]. | 783 | bios_connectors[j].valid = false; |
| 786 | valid = false; | ||
| 787 | } | 784 | } |
| 788 | } | 785 | } |
| 789 | } | 786 | } |
| @@ -1234,6 +1231,61 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, | |||
| 1234 | return true; | 1231 | return true; |
| 1235 | } | 1232 | } |
| 1236 | 1233 | ||
| 1234 | enum radeon_tv_std | ||
| 1235 | radeon_atombios_get_tv_info(struct radeon_device *rdev) | ||
| 1236 | { | ||
| 1237 | struct radeon_mode_info *mode_info = &rdev->mode_info; | ||
| 1238 | int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info); | ||
| 1239 | uint16_t data_offset; | ||
| 1240 | uint8_t frev, crev; | ||
| 1241 | struct _ATOM_ANALOG_TV_INFO *tv_info; | ||
| 1242 | enum radeon_tv_std tv_std = TV_STD_NTSC; | ||
| 1243 | |||
| 1244 | atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset); | ||
| 1245 | |||
| 1246 | tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); | ||
| 1247 | |||
| 1248 | switch (tv_info->ucTV_BootUpDefaultStandard) { | ||
| 1249 | case ATOM_TV_NTSC: | ||
| 1250 | tv_std = TV_STD_NTSC; | ||
| 1251 | DRM_INFO("Default TV standard: NTSC\n"); | ||
| 1252 | break; | ||
| 1253 | case ATOM_TV_NTSCJ: | ||
| 1254 | tv_std = TV_STD_NTSC_J; | ||
| 1255 | DRM_INFO("Default TV standard: NTSC-J\n"); | ||
| 1256 | break; | ||
| 1257 | case ATOM_TV_PAL: | ||
| 1258 | tv_std = TV_STD_PAL; | ||
| 1259 | DRM_INFO("Default TV standard: PAL\n"); | ||
| 1260 | break; | ||
| 1261 | case ATOM_TV_PALM: | ||
| 1262 | tv_std = TV_STD_PAL_M; | ||
| 1263 | DRM_INFO("Default TV standard: PAL-M\n"); | ||
| 1264 | break; | ||
| 1265 | case ATOM_TV_PALN: | ||
| 1266 | tv_std = TV_STD_PAL_N; | ||
| 1267 | DRM_INFO("Default TV standard: PAL-N\n"); | ||
| 1268 | break; | ||
| 1269 | case ATOM_TV_PALCN: | ||
| 1270 | tv_std = TV_STD_PAL_CN; | ||
| 1271 | DRM_INFO("Default TV standard: PAL-CN\n"); | ||
| 1272 | break; | ||
| 1273 | case ATOM_TV_PAL60: | ||
| 1274 | tv_std = TV_STD_PAL_60; | ||
| 1275 | DRM_INFO("Default TV standard: PAL-60\n"); | ||
| 1276 | break; | ||
| 1277 | case ATOM_TV_SECAM: | ||
| 1278 | tv_std = TV_STD_SECAM; | ||
| 1279 | DRM_INFO("Default TV standard: SECAM\n"); | ||
| 1280 | break; | ||
| 1281 | default: | ||
| 1282 | tv_std = TV_STD_NTSC; | ||
| 1283 | DRM_INFO("Unknown TV standard; defaulting to NTSC\n"); | ||
| 1284 | break; | ||
| 1285 | } | ||
| 1286 | return tv_std; | ||
| 1287 | } | ||
| 1288 | |||
| 1237 | struct radeon_encoder_tv_dac * | 1289 | struct radeon_encoder_tv_dac * |
| 1238 | radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) | 1290 | radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) |
| 1239 | { | 1291 | { |
| @@ -1269,6 +1321,7 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) | |||
| 1269 | dac = dac_info->ucDAC2_NTSC_DAC_Adjustment; | 1321 | dac = dac_info->ucDAC2_NTSC_DAC_Adjustment; |
| 1270 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); | 1322 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); |
| 1271 | 1323 | ||
| 1324 | tv_dac->tv_std = radeon_atombios_get_tv_info(rdev); | ||
| 1272 | } | 1325 | } |
| 1273 | return tv_dac; | 1326 | return tv_dac; |
| 1274 | } | 1327 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c index b062109efbee..812f24dbc2a8 100644 --- a/drivers/gpu/drm/radeon/radeon_clocks.c +++ b/drivers/gpu/drm/radeon/radeon_clocks.c | |||
| @@ -62,7 +62,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev) | |||
| 62 | } | 62 | } |
| 63 | 63 | ||
| 64 | /* 10 khz */ | 64 | /* 10 khz */ |
| 65 | static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) | 65 | uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev) |
| 66 | { | 66 | { |
| 67 | struct radeon_pll *mpll = &rdev->clock.mpll; | 67 | struct radeon_pll *mpll = &rdev->clock.mpll; |
| 68 | uint32_t fb_div, ref_div, post_div, mclk; | 68 | uint32_t fb_div, ref_div, post_div, mclk; |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index c5021a3445de..fd94dbca33ac 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c | |||
| @@ -634,11 +634,10 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct | |||
| 634 | return p_dac; | 634 | return p_dac; |
| 635 | } | 635 | } |
| 636 | 636 | ||
| 637 | static enum radeon_tv_std | 637 | enum radeon_tv_std |
| 638 | radeon_combios_get_tv_info(struct radeon_encoder *encoder) | 638 | radeon_combios_get_tv_info(struct radeon_device *rdev) |
| 639 | { | 639 | { |
| 640 | struct drm_device *dev = encoder->base.dev; | 640 | struct drm_device *dev = rdev->ddev; |
| 641 | struct radeon_device *rdev = dev->dev_private; | ||
| 642 | uint16_t tv_info; | 641 | uint16_t tv_info; |
| 643 | enum radeon_tv_std tv_std = TV_STD_NTSC; | 642 | enum radeon_tv_std tv_std = TV_STD_NTSC; |
| 644 | 643 | ||
| @@ -779,7 +778,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct | |||
| 779 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); | 778 | tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); |
| 780 | found = 1; | 779 | found = 1; |
| 781 | } | 780 | } |
| 782 | tv_dac->tv_std = radeon_combios_get_tv_info(encoder); | 781 | tv_dac->tv_std = radeon_combios_get_tv_info(rdev); |
| 783 | } | 782 | } |
| 784 | if (!found) { | 783 | if (!found) { |
| 785 | /* then check CRT table */ | 784 | /* then check CRT table */ |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 5eece186e03c..20161567dbff 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
| @@ -208,6 +208,18 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode | |||
| 208 | drm_mode_set_name(mode); | 208 | drm_mode_set_name(mode); |
| 209 | 209 | ||
| 210 | DRM_DEBUG("Adding native panel mode %s\n", mode->name); | 210 | DRM_DEBUG("Adding native panel mode %s\n", mode->name); |
| 211 | } else if (native_mode->hdisplay != 0 && | ||
| 212 | native_mode->vdisplay != 0) { | ||
| 213 | /* mac laptops without an edid */ | ||
| 214 | /* Note that this is not necessarily the exact panel mode, | ||
| 215 | * but an approximation based on the cvt formula. For these | ||
| 216 | * systems we should ideally read the mode info out of the | ||
| 217 | * registers or add a mode table, but this works and is much | ||
| 218 | * simpler. | ||
| 219 | */ | ||
| 220 | mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false); | ||
| 221 | mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER; | ||
| 222 | DRM_DEBUG("Adding cvt approximation of native panel mode %s\n", mode->name); | ||
| 211 | } | 223 | } |
| 212 | return mode; | 224 | return mode; |
| 213 | } | 225 | } |
| @@ -1171,7 +1183,7 @@ radeon_add_atom_connector(struct drm_device *dev, | |||
| 1171 | 1); | 1183 | 1); |
| 1172 | drm_connector_attach_property(&radeon_connector->base, | 1184 | drm_connector_attach_property(&radeon_connector->base, |
| 1173 | rdev->mode_info.tv_std_property, | 1185 | rdev->mode_info.tv_std_property, |
| 1174 | 1); | 1186 | radeon_atombios_get_tv_info(rdev)); |
| 1175 | } | 1187 | } |
| 1176 | break; | 1188 | break; |
| 1177 | case DRM_MODE_CONNECTOR_LVDS: | 1189 | case DRM_MODE_CONNECTOR_LVDS: |
| @@ -1315,7 +1327,7 @@ radeon_add_legacy_connector(struct drm_device *dev, | |||
| 1315 | 1); | 1327 | 1); |
| 1316 | drm_connector_attach_property(&radeon_connector->base, | 1328 | drm_connector_attach_property(&radeon_connector->base, |
| 1317 | rdev->mode_info.tv_std_property, | 1329 | rdev->mode_info.tv_std_property, |
| 1318 | 1); | 1330 | radeon_combios_get_tv_info(rdev)); |
| 1319 | } | 1331 | } |
| 1320 | break; | 1332 | break; |
| 1321 | case DRM_MODE_CONNECTOR_LVDS: | 1333 | case DRM_MODE_CONNECTOR_LVDS: |
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 02bcdb1240c0..7c6848096bcd 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c | |||
| @@ -391,6 +391,12 @@ int radeon_asic_init(struct radeon_device *rdev) | |||
| 391 | /* FIXME: not supported yet */ | 391 | /* FIXME: not supported yet */ |
| 392 | return -EINVAL; | 392 | return -EINVAL; |
| 393 | } | 393 | } |
| 394 | |||
| 395 | if (rdev->flags & RADEON_IS_IGP) { | ||
| 396 | rdev->asic->get_memory_clock = NULL; | ||
| 397 | rdev->asic->set_memory_clock = NULL; | ||
| 398 | } | ||
| 399 | |||
| 394 | return 0; | 400 | return 0; |
| 395 | } | 401 | } |
| 396 | 402 | ||
| @@ -481,6 +487,7 @@ int radeon_atombios_init(struct radeon_device *rdev) | |||
| 481 | atom_card_info->pll_write = cail_pll_write; | 487 | atom_card_info->pll_write = cail_pll_write; |
| 482 | 488 | ||
| 483 | rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); | 489 | rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios); |
| 490 | mutex_init(&rdev->mode_info.atom_context->mutex); | ||
| 484 | radeon_atom_initialize_bios_scratch_regs(rdev->ddev); | 491 | radeon_atom_initialize_bios_scratch_regs(rdev->ddev); |
| 485 | atom_allocate_fb_scratch(rdev->mode_info.atom_context); | 492 | atom_allocate_fb_scratch(rdev->mode_info.atom_context); |
| 486 | return 0; | 493 | return 0; |
| @@ -539,9 +546,72 @@ void radeon_agp_disable(struct radeon_device *rdev) | |||
| 539 | } | 546 | } |
| 540 | } | 547 | } |
| 541 | 548 | ||
| 542 | /* | 549 | void radeon_check_arguments(struct radeon_device *rdev) |
| 543 | * Radeon device. | 550 | { |
| 544 | */ | 551 | /* vramlimit must be a power of two */ |
| 552 | switch (radeon_vram_limit) { | ||
| 553 | case 0: | ||
| 554 | case 4: | ||
| 555 | case 8: | ||
| 556 | case 16: | ||
| 557 | case 32: | ||
| 558 | case 64: | ||
| 559 | case 128: | ||
| 560 | case 256: | ||
| 561 | case 512: | ||
| 562 | case 1024: | ||
| 563 | case 2048: | ||
| 564 | case 4096: | ||
| 565 | break; | ||
| 566 | default: | ||
| 567 | dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", | ||
| 568 | radeon_vram_limit); | ||
| 569 | radeon_vram_limit = 0; | ||
| 570 | break; | ||
| 571 | } | ||
| 572 | radeon_vram_limit = radeon_vram_limit << 20; | ||
| 573 | /* gtt size must be power of two and greater or equal to 32M */ | ||
| 574 | switch (radeon_gart_size) { | ||
| 575 | case 4: | ||
| 576 | case 8: | ||
| 577 | case 16: | ||
| 578 | dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n", | ||
| 579 | radeon_gart_size); | ||
| 580 | radeon_gart_size = 512; | ||
| 581 | break; | ||
| 582 | case 32: | ||
| 583 | case 64: | ||
| 584 | case 128: | ||
| 585 | case 256: | ||
| 586 | case 512: | ||
| 587 | case 1024: | ||
| 588 | case 2048: | ||
| 589 | case 4096: | ||
| 590 | break; | ||
| 591 | default: | ||
| 592 | dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", | ||
| 593 | radeon_gart_size); | ||
| 594 | radeon_gart_size = 512; | ||
| 595 | break; | ||
| 596 | } | ||
| 597 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | ||
| 598 | /* AGP mode can only be -1, 1, 2, 4, 8 */ | ||
| 599 | switch (radeon_agpmode) { | ||
| 600 | case -1: | ||
| 601 | case 0: | ||
| 602 | case 1: | ||
| 603 | case 2: | ||
| 604 | case 4: | ||
| 605 | case 8: | ||
| 606 | break; | ||
| 607 | default: | ||
| 608 | dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: " | ||
| 609 | "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode); | ||
| 610 | radeon_agpmode = 0; | ||
| 611 | break; | ||
| 612 | } | ||
| 613 | } | ||
| 614 | |||
| 545 | int radeon_device_init(struct radeon_device *rdev, | 615 | int radeon_device_init(struct radeon_device *rdev, |
| 546 | struct drm_device *ddev, | 616 | struct drm_device *ddev, |
| 547 | struct pci_dev *pdev, | 617 | struct pci_dev *pdev, |
| @@ -580,9 +650,9 @@ int radeon_device_init(struct radeon_device *rdev, | |||
| 580 | 650 | ||
| 581 | /* Set asic functions */ | 651 | /* Set asic functions */ |
| 582 | r = radeon_asic_init(rdev); | 652 | r = radeon_asic_init(rdev); |
| 583 | if (r) { | 653 | if (r) |
| 584 | return r; | 654 | return r; |
| 585 | } | 655 | radeon_check_arguments(rdev); |
| 586 | 656 | ||
| 587 | if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { | 657 | if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) { |
| 588 | radeon_agp_disable(rdev); | 658 | radeon_agp_disable(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index a133b833e45d..91d72b70abc9 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
| @@ -739,7 +739,7 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] = | |||
| 739 | { TV_STD_SECAM, "secam" }, | 739 | { TV_STD_SECAM, "secam" }, |
| 740 | }; | 740 | }; |
| 741 | 741 | ||
| 742 | int radeon_modeset_create_props(struct radeon_device *rdev) | 742 | static int radeon_modeset_create_props(struct radeon_device *rdev) |
| 743 | { | 743 | { |
| 744 | int i, sz; | 744 | int i, sz; |
| 745 | 745 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index dbd56ef82f9c..8ba3de7994d4 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c | |||
| @@ -196,7 +196,7 @@ static struct drm_driver driver_old = { | |||
| 196 | .owner = THIS_MODULE, | 196 | .owner = THIS_MODULE, |
| 197 | .open = drm_open, | 197 | .open = drm_open, |
| 198 | .release = drm_release, | 198 | .release = drm_release, |
| 199 | .ioctl = drm_ioctl, | 199 | .unlocked_ioctl = drm_ioctl, |
| 200 | .mmap = drm_mmap, | 200 | .mmap = drm_mmap, |
| 201 | .poll = drm_poll, | 201 | .poll = drm_poll, |
| 202 | .fasync = drm_fasync, | 202 | .fasync = drm_fasync, |
| @@ -284,7 +284,7 @@ static struct drm_driver kms_driver = { | |||
| 284 | .owner = THIS_MODULE, | 284 | .owner = THIS_MODULE, |
| 285 | .open = drm_open, | 285 | .open = drm_open, |
| 286 | .release = drm_release, | 286 | .release = drm_release, |
| 287 | .ioctl = drm_ioctl, | 287 | .unlocked_ioctl = drm_ioctl, |
| 288 | .mmap = radeon_mmap, | 288 | .mmap = radeon_mmap, |
| 289 | .poll = drm_poll, | 289 | .poll = drm_poll, |
| 290 | .fasync = drm_fasync, | 290 | .fasync = drm_fasync, |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 0d1d908e5225..ccba95f83d11 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
| @@ -233,6 +233,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder, | |||
| 233 | if (!ASIC_IS_AVIVO(rdev)) { | 233 | if (!ASIC_IS_AVIVO(rdev)) { |
| 234 | adjusted_mode->hdisplay = mode->hdisplay; | 234 | adjusted_mode->hdisplay = mode->hdisplay; |
| 235 | adjusted_mode->vdisplay = mode->vdisplay; | 235 | adjusted_mode->vdisplay = mode->vdisplay; |
| 236 | adjusted_mode->crtc_hdisplay = mode->hdisplay; | ||
| 237 | adjusted_mode->crtc_vdisplay = mode->vdisplay; | ||
| 236 | } | 238 | } |
| 237 | adjusted_mode->base.id = mode_id; | 239 | adjusted_mode->base.id = mode_id; |
| 238 | } | 240 | } |
| @@ -495,9 +497,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
| 495 | args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; | 497 | args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE; |
| 496 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); | 498 | args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); |
| 497 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 499 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
| 498 | if (dig->lvds_misc & (1 << 0)) | 500 | if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) |
| 499 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 501 | args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
| 500 | if (dig->lvds_misc & (1 << 1)) | 502 | if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) |
| 501 | args.v1.ucMisc |= (1 << 1); | 503 | args.v1.ucMisc |= (1 << 1); |
| 502 | } else { | 504 | } else { |
| 503 | if (dig_connector->linkb) | 505 | if (dig_connector->linkb) |
| @@ -524,18 +526,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action) | |||
| 524 | args.v2.ucTemporal = 0; | 526 | args.v2.ucTemporal = 0; |
| 525 | args.v2.ucFRC = 0; | 527 | args.v2.ucFRC = 0; |
| 526 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 528 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
| 527 | if (dig->lvds_misc & (1 << 0)) | 529 | if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL) |
| 528 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; | 530 | args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL; |
| 529 | if (dig->lvds_misc & (1 << 5)) { | 531 | if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) { |
| 530 | args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; | 532 | args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN; |
| 531 | if (dig->lvds_misc & (1 << 1)) | 533 | if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) |
| 532 | args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; | 534 | args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH; |
| 533 | } | 535 | } |
| 534 | if (dig->lvds_misc & (1 << 6)) { | 536 | if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) { |
| 535 | args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; | 537 | args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN; |
| 536 | if (dig->lvds_misc & (1 << 1)) | 538 | if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB) |
| 537 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; | 539 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH; |
| 538 | if (((dig->lvds_misc >> 2) & 0x3) == 2) | 540 | if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2) |
| 539 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; | 541 | args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4; |
| 540 | } | 542 | } |
| 541 | } else { | 543 | } else { |
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index cb4cd97ae39f..4cdd8b4f7549 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c | |||
| @@ -324,7 +324,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev) | |||
| 324 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); | 324 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
| 325 | r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); | 325 | r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg); |
| 326 | if (r) { | 326 | if (r) { |
| 327 | DRM_ERROR("Fence failed to get a scratch register."); | 327 | dev_err(rdev->dev, "fence failed to get scratch register\n"); |
| 328 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); | 328 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
| 329 | return r; | 329 | return r; |
| 330 | } | 330 | } |
| @@ -335,9 +335,10 @@ int radeon_fence_driver_init(struct radeon_device *rdev) | |||
| 335 | INIT_LIST_HEAD(&rdev->fence_drv.signaled); | 335 | INIT_LIST_HEAD(&rdev->fence_drv.signaled); |
| 336 | rdev->fence_drv.count_timeout = 0; | 336 | rdev->fence_drv.count_timeout = 0; |
| 337 | init_waitqueue_head(&rdev->fence_drv.queue); | 337 | init_waitqueue_head(&rdev->fence_drv.queue); |
| 338 | rdev->fence_drv.initialized = true; | ||
| 338 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); | 339 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
| 339 | if (radeon_debugfs_fence_init(rdev)) { | 340 | if (radeon_debugfs_fence_init(rdev)) { |
| 340 | DRM_ERROR("Failed to register debugfs file for fence !\n"); | 341 | dev_err(rdev->dev, "fence debugfs file creation failed\n"); |
| 341 | } | 342 | } |
| 342 | return 0; | 343 | return 0; |
| 343 | } | 344 | } |
| @@ -346,11 +347,13 @@ void radeon_fence_driver_fini(struct radeon_device *rdev) | |||
| 346 | { | 347 | { |
| 347 | unsigned long irq_flags; | 348 | unsigned long irq_flags; |
| 348 | 349 | ||
| 350 | if (!rdev->fence_drv.initialized) | ||
| 351 | return; | ||
| 349 | wake_up_all(&rdev->fence_drv.queue); | 352 | wake_up_all(&rdev->fence_drv.queue); |
| 350 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); | 353 | write_lock_irqsave(&rdev->fence_drv.lock, irq_flags); |
| 351 | radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg); | 354 | radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg); |
| 352 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); | 355 | write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); |
| 353 | DRM_INFO("radeon: fence finalized\n"); | 356 | rdev->fence_drv.initialized = false; |
| 354 | } | 357 | } |
| 355 | 358 | ||
| 356 | 359 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c index a1bf11de308a..48b7cea31e08 100644 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c | |||
| @@ -92,8 +92,7 @@ static int compat_radeon_cp_init(struct file *file, unsigned int cmd, | |||
| 92 | &init->gart_textures_offset)) | 92 | &init->gart_textures_offset)) |
| 93 | return -EFAULT; | 93 | return -EFAULT; |
| 94 | 94 | ||
| 95 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 95 | return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init); |
| 96 | DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init); | ||
| 97 | } | 96 | } |
| 98 | 97 | ||
| 99 | typedef struct drm_radeon_clear32 { | 98 | typedef struct drm_radeon_clear32 { |
| @@ -125,8 +124,7 @@ static int compat_radeon_cp_clear(struct file *file, unsigned int cmd, | |||
| 125 | &clr->depth_boxes)) | 124 | &clr->depth_boxes)) |
| 126 | return -EFAULT; | 125 | return -EFAULT; |
| 127 | 126 | ||
| 128 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 127 | return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr); |
| 129 | DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr); | ||
| 130 | } | 128 | } |
| 131 | 129 | ||
| 132 | typedef struct drm_radeon_stipple32 { | 130 | typedef struct drm_radeon_stipple32 { |
| @@ -149,8 +147,7 @@ static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd, | |||
| 149 | &request->mask)) | 147 | &request->mask)) |
| 150 | return -EFAULT; | 148 | return -EFAULT; |
| 151 | 149 | ||
| 152 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 150 | return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request); |
| 153 | DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request); | ||
| 154 | } | 151 | } |
| 155 | 152 | ||
| 156 | typedef struct drm_radeon_tex_image32 { | 153 | typedef struct drm_radeon_tex_image32 { |
| @@ -204,8 +201,7 @@ static int compat_radeon_cp_texture(struct file *file, unsigned int cmd, | |||
| 204 | &image->data)) | 201 | &image->data)) |
| 205 | return -EFAULT; | 202 | return -EFAULT; |
| 206 | 203 | ||
| 207 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 204 | return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request); |
| 208 | DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request); | ||
| 209 | } | 205 | } |
| 210 | 206 | ||
| 211 | typedef struct drm_radeon_vertex2_32 { | 207 | typedef struct drm_radeon_vertex2_32 { |
| @@ -238,8 +234,7 @@ static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd, | |||
| 238 | &request->prim)) | 234 | &request->prim)) |
| 239 | return -EFAULT; | 235 | return -EFAULT; |
| 240 | 236 | ||
| 241 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 237 | return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request); |
| 242 | DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request); | ||
| 243 | } | 238 | } |
| 244 | 239 | ||
| 245 | typedef struct drm_radeon_cmd_buffer32 { | 240 | typedef struct drm_radeon_cmd_buffer32 { |
| @@ -268,8 +263,7 @@ static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd, | |||
| 268 | &request->boxes)) | 263 | &request->boxes)) |
| 269 | return -EFAULT; | 264 | return -EFAULT; |
| 270 | 265 | ||
| 271 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 266 | return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request); |
| 272 | DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request); | ||
| 273 | } | 267 | } |
| 274 | 268 | ||
| 275 | typedef struct drm_radeon_getparam32 { | 269 | typedef struct drm_radeon_getparam32 { |
| @@ -293,8 +287,7 @@ static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd, | |||
| 293 | &request->value)) | 287 | &request->value)) |
| 294 | return -EFAULT; | 288 | return -EFAULT; |
| 295 | 289 | ||
| 296 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 290 | return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request); |
| 297 | DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request); | ||
| 298 | } | 291 | } |
| 299 | 292 | ||
| 300 | typedef struct drm_radeon_mem_alloc32 { | 293 | typedef struct drm_radeon_mem_alloc32 { |
| @@ -322,8 +315,7 @@ static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd, | |||
| 322 | &request->region_offset)) | 315 | &request->region_offset)) |
| 323 | return -EFAULT; | 316 | return -EFAULT; |
| 324 | 317 | ||
| 325 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 318 | return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request); |
| 326 | DRM_IOCTL_RADEON_ALLOC, (unsigned long)request); | ||
| 327 | } | 319 | } |
| 328 | 320 | ||
| 329 | typedef struct drm_radeon_irq_emit32 { | 321 | typedef struct drm_radeon_irq_emit32 { |
| @@ -345,8 +337,7 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd, | |||
| 345 | &request->irq_seq)) | 337 | &request->irq_seq)) |
| 346 | return -EFAULT; | 338 | return -EFAULT; |
| 347 | 339 | ||
| 348 | return drm_ioctl(file->f_path.dentry->d_inode, file, | 340 | return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request); |
| 349 | DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request); | ||
| 350 | } | 341 | } |
| 351 | 342 | ||
| 352 | /* The two 64-bit arches where alignof(u64)==4 in 32-bit code */ | 343 | /* The two 64-bit arches where alignof(u64)==4 in 32-bit code */ |
| @@ -372,8 +363,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd, | |||
| 372 | &request->value)) | 363 | &request->value)) |
| 373 | return -EFAULT; | 364 | return -EFAULT; |
| 374 | 365 | ||
| 375 | return drm_ioctl(file->f_dentry->d_inode, file, | 366 | return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request); |
| 376 | DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request); | ||
| 377 | } | 367 | } |
| 378 | #else | 368 | #else |
| 379 | #define compat_radeon_cp_setparam NULL | 369 | #define compat_radeon_cp_setparam NULL |
| @@ -413,12 +403,10 @@ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
| 413 | if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) | 403 | if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) |
| 414 | fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; | 404 | fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE]; |
| 415 | 405 | ||
| 416 | lock_kernel(); /* XXX for now */ | ||
| 417 | if (fn != NULL) | 406 | if (fn != NULL) |
| 418 | ret = (*fn) (filp, cmd, arg); | 407 | ret = (*fn) (filp, cmd, arg); |
| 419 | else | 408 | else |
| 420 | ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); | 409 | ret = drm_ioctl(filp, cmd, arg); |
| 421 | unlock_kernel(); | ||
| 422 | 410 | ||
| 423 | return ret; | 411 | return ret; |
| 424 | } | 412 | } |
| @@ -431,9 +419,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long | |||
| 431 | if (nr < DRM_COMMAND_BASE) | 419 | if (nr < DRM_COMMAND_BASE) |
| 432 | return drm_compat_ioctl(filp, cmd, arg); | 420 | return drm_compat_ioctl(filp, cmd, arg); |
| 433 | 421 | ||
| 434 | lock_kernel(); /* XXX for now */ | 422 | ret = drm_ioctl(filp, cmd, arg); |
| 435 | ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); | ||
| 436 | unlock_kernel(); | ||
| 437 | 423 | ||
| 438 | return ret; | 424 | return ret; |
| 439 | } | 425 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index b82ede98e152..cc27485a07ad 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
| @@ -43,8 +43,7 @@ static void radeon_overscan_setup(struct drm_crtc *crtc, | |||
| 43 | } | 43 | } |
| 44 | 44 | ||
| 45 | static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, | 45 | static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc, |
| 46 | struct drm_display_mode *mode, | 46 | struct drm_display_mode *mode) |
| 47 | struct drm_display_mode *adjusted_mode) | ||
| 48 | { | 47 | { |
| 49 | struct drm_device *dev = crtc->dev; | 48 | struct drm_device *dev = crtc->dev; |
| 50 | struct radeon_device *rdev = dev->dev_private; | 49 | struct radeon_device *rdev = dev->dev_private; |
| @@ -1059,7 +1058,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc, | |||
| 1059 | radeon_set_pll(crtc, adjusted_mode); | 1058 | radeon_set_pll(crtc, adjusted_mode); |
| 1060 | radeon_overscan_setup(crtc, adjusted_mode); | 1059 | radeon_overscan_setup(crtc, adjusted_mode); |
| 1061 | if (radeon_crtc->crtc_id == 0) { | 1060 | if (radeon_crtc->crtc_id == 0) { |
| 1062 | radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode); | 1061 | radeon_legacy_rmx_mode_set(crtc, adjusted_mode); |
| 1063 | } else { | 1062 | } else { |
| 1064 | if (radeon_crtc->rmx_type != RMX_OFF) { | 1063 | if (radeon_crtc->rmx_type != RMX_OFF) { |
| 1065 | /* FIXME: only first crtc has rmx what should we | 1064 | /* FIXME: only first crtc has rmx what should we |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index df00515e81fa..981508ff7037 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c | |||
| @@ -207,6 +207,8 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder, | |||
| 207 | *adjusted_mode = *native_mode; | 207 | *adjusted_mode = *native_mode; |
| 208 | adjusted_mode->hdisplay = mode->hdisplay; | 208 | adjusted_mode->hdisplay = mode->hdisplay; |
| 209 | adjusted_mode->vdisplay = mode->vdisplay; | 209 | adjusted_mode->vdisplay = mode->vdisplay; |
| 210 | adjusted_mode->crtc_hdisplay = mode->hdisplay; | ||
| 211 | adjusted_mode->crtc_vdisplay = mode->vdisplay; | ||
| 210 | adjusted_mode->base.id = mode_id; | 212 | adjusted_mode->base.id = mode_id; |
| 211 | } | 213 | } |
| 212 | 214 | ||
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h index 3dcbe130c422..402369db5ba0 100644 --- a/drivers/gpu/drm/radeon/radeon_mode.h +++ b/drivers/gpu/drm/radeon/radeon_mode.h | |||
| @@ -88,6 +88,7 @@ enum radeon_tv_std { | |||
| 88 | TV_STD_SCART_PAL, | 88 | TV_STD_SCART_PAL, |
| 89 | TV_STD_SECAM, | 89 | TV_STD_SECAM, |
| 90 | TV_STD_PAL_CN, | 90 | TV_STD_PAL_CN, |
| 91 | TV_STD_PAL_N, | ||
| 91 | }; | 92 | }; |
| 92 | 93 | ||
| 93 | /* radeon gpio-based i2c | 94 | /* radeon gpio-based i2c |
| @@ -395,6 +396,11 @@ struct radeon_framebuffer { | |||
| 395 | struct drm_gem_object *obj; | 396 | struct drm_gem_object *obj; |
| 396 | }; | 397 | }; |
| 397 | 398 | ||
| 399 | extern enum radeon_tv_std | ||
| 400 | radeon_combios_get_tv_info(struct radeon_device *rdev); | ||
| 401 | extern enum radeon_tv_std | ||
| 402 | radeon_atombios_get_tv_info(struct radeon_device *rdev); | ||
| 403 | |||
| 398 | extern void radeon_connector_hotplug(struct drm_connector *connector); | 404 | extern void radeon_connector_hotplug(struct drm_connector *connector); |
| 399 | extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); | 405 | extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector); |
| 400 | extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, | 406 | extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector, |
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c index 391c973ec4db..9f5e2f929da9 100644 --- a/drivers/gpu/drm/radeon/radeon_test.c +++ b/drivers/gpu/drm/radeon/radeon_test.c | |||
| @@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev) | |||
| 42 | /* Number of tests = | 42 | /* Number of tests = |
| 43 | * (Total GTT - IB pool - writeback page - ring buffer) / test size | 43 | * (Total GTT - IB pool - writeback page - ring buffer) / test size |
| 44 | */ | 44 | */ |
| 45 | n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - | 45 | n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE - |
| 46 | rdev->cp.ring_size) / size; | 46 | rdev->cp.ring_size)) / size; |
| 47 | 47 | ||
| 48 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); | 48 | gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL); |
| 49 | if (!gtt_obj) { | 49 | if (!gtt_obj) { |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index d7fd160cc671..3b0c07b444a2 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
| @@ -494,6 +494,7 @@ int radeon_ttm_init(struct radeon_device *rdev) | |||
| 494 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | 494 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); |
| 495 | return r; | 495 | return r; |
| 496 | } | 496 | } |
| 497 | rdev->mman.initialized = true; | ||
| 497 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, | 498 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, |
| 498 | rdev->mc.real_vram_size >> PAGE_SHIFT); | 499 | rdev->mc.real_vram_size >> PAGE_SHIFT); |
| 499 | if (r) { | 500 | if (r) { |
| @@ -541,6 +542,8 @@ void radeon_ttm_fini(struct radeon_device *rdev) | |||
| 541 | { | 542 | { |
| 542 | int r; | 543 | int r; |
| 543 | 544 | ||
| 545 | if (!rdev->mman.initialized) | ||
| 546 | return; | ||
| 544 | if (rdev->stollen_vga_memory) { | 547 | if (rdev->stollen_vga_memory) { |
| 545 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); | 548 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
| 546 | if (r == 0) { | 549 | if (r == 0) { |
| @@ -554,6 +557,7 @@ void radeon_ttm_fini(struct radeon_device *rdev) | |||
| 554 | ttm_bo_device_release(&rdev->mman.bdev); | 557 | ttm_bo_device_release(&rdev->mman.bdev); |
| 555 | radeon_gart_fini(rdev); | 558 | radeon_gart_fini(rdev); |
| 556 | radeon_ttm_global_fini(rdev); | 559 | radeon_ttm_global_fini(rdev); |
| 560 | rdev->mman.initialized = false; | ||
| 557 | DRM_INFO("radeon: ttm finalized\n"); | 561 | DRM_INFO("radeon: ttm finalized\n"); |
| 558 | } | 562 | } |
| 559 | 563 | ||
diff --git a/drivers/gpu/drm/savage/savage_drv.c b/drivers/gpu/drm/savage/savage_drv.c index eee52aa92a7c..021de44c15ab 100644 --- a/drivers/gpu/drm/savage/savage_drv.c +++ b/drivers/gpu/drm/savage/savage_drv.c | |||
| @@ -50,7 +50,7 @@ static struct drm_driver driver = { | |||
| 50 | .owner = THIS_MODULE, | 50 | .owner = THIS_MODULE, |
| 51 | .open = drm_open, | 51 | .open = drm_open, |
| 52 | .release = drm_release, | 52 | .release = drm_release, |
| 53 | .ioctl = drm_ioctl, | 53 | .unlocked_ioctl = drm_ioctl, |
| 54 | .mmap = drm_mmap, | 54 | .mmap = drm_mmap, |
| 55 | .poll = drm_poll, | 55 | .poll = drm_poll, |
| 56 | .fasync = drm_fasync, | 56 | .fasync = drm_fasync, |
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c index e725cc0b1155..4fd1f067d380 100644 --- a/drivers/gpu/drm/sis/sis_drv.c +++ b/drivers/gpu/drm/sis/sis_drv.c | |||
| @@ -80,7 +80,7 @@ static struct drm_driver driver = { | |||
| 80 | .owner = THIS_MODULE, | 80 | .owner = THIS_MODULE, |
| 81 | .open = drm_open, | 81 | .open = drm_open, |
| 82 | .release = drm_release, | 82 | .release = drm_release, |
| 83 | .ioctl = drm_ioctl, | 83 | .unlocked_ioctl = drm_ioctl, |
| 84 | .mmap = drm_mmap, | 84 | .mmap = drm_mmap, |
| 85 | .poll = drm_poll, | 85 | .poll = drm_poll, |
| 86 | .fasync = drm_fasync, | 86 | .fasync = drm_fasync, |
diff --git a/drivers/gpu/drm/tdfx/tdfx_drv.c b/drivers/gpu/drm/tdfx/tdfx_drv.c index 012ff2e356b2..ec5a43e65722 100644 --- a/drivers/gpu/drm/tdfx/tdfx_drv.c +++ b/drivers/gpu/drm/tdfx/tdfx_drv.c | |||
| @@ -48,7 +48,7 @@ static struct drm_driver driver = { | |||
| 48 | .owner = THIS_MODULE, | 48 | .owner = THIS_MODULE, |
| 49 | .open = drm_open, | 49 | .open = drm_open, |
| 50 | .release = drm_release, | 50 | .release = drm_release, |
| 51 | .ioctl = drm_ioctl, | 51 | .unlocked_ioctl = drm_ioctl, |
| 52 | .mmap = drm_mmap, | 52 | .mmap = drm_mmap, |
| 53 | .poll = drm_poll, | 53 | .poll = drm_poll, |
| 54 | .fasync = drm_fasync, | 54 | .fasync = drm_fasync, |
diff --git a/drivers/gpu/drm/via/via_drv.c b/drivers/gpu/drm/via/via_drv.c index bc2f51843005..7a1b210401e0 100644 --- a/drivers/gpu/drm/via/via_drv.c +++ b/drivers/gpu/drm/via/via_drv.c | |||
| @@ -58,7 +58,7 @@ static struct drm_driver driver = { | |||
| 58 | .owner = THIS_MODULE, | 58 | .owner = THIS_MODULE, |
| 59 | .open = drm_open, | 59 | .open = drm_open, |
| 60 | .release = drm_release, | 60 | .release = drm_release, |
| 61 | .ioctl = drm_ioctl, | 61 | .unlocked_ioctl = drm_ioctl, |
| 62 | .mmap = drm_mmap, | 62 | .mmap = drm_mmap, |
| 63 | .poll = drm_poll, | 63 | .poll = drm_poll, |
| 64 | .fasync = drm_fasync, | 64 | .fasync = drm_fasync, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 7b48bb3b63b2..1db1ef30be2b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | |||
| @@ -103,37 +103,39 @@ | |||
| 103 | */ | 103 | */ |
| 104 | 104 | ||
| 105 | static struct drm_ioctl_desc vmw_ioctls[] = { | 105 | static struct drm_ioctl_desc vmw_ioctls[] = { |
| 106 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, 0), | 106 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, |
| 107 | DRM_AUTH | DRM_UNLOCKED), | ||
| 107 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, | 108 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl, |
| 108 | 0), | 109 | DRM_AUTH | DRM_UNLOCKED), |
| 109 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, | 110 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl, |
| 110 | 0), | 111 | DRM_AUTH | DRM_UNLOCKED), |
| 111 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS, | 112 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS, |
| 112 | vmw_kms_cursor_bypass_ioctl, 0), | 113 | vmw_kms_cursor_bypass_ioctl, |
| 114 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), | ||
| 113 | 115 | ||
| 114 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl, | 116 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl, |
| 115 | 0), | 117 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
| 116 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, | 118 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl, |
| 117 | 0), | 119 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
| 118 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl, | 120 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl, |
| 119 | 0), | 121 | DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED), |
| 120 | 122 | ||
| 121 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl, | 123 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl, |
| 122 | 0), | 124 | DRM_AUTH | DRM_UNLOCKED), |
| 123 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, | 125 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl, |
| 124 | 0), | 126 | DRM_AUTH | DRM_UNLOCKED), |
| 125 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl, | 127 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl, |
| 126 | 0), | 128 | DRM_AUTH | DRM_UNLOCKED), |
| 127 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, | 129 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl, |
| 128 | 0), | 130 | DRM_AUTH | DRM_UNLOCKED), |
| 129 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl, | 131 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl, |
| 130 | 0), | 132 | DRM_AUTH | DRM_UNLOCKED), |
| 131 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl, | 133 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl, |
| 132 | 0), | 134 | DRM_AUTH | DRM_UNLOCKED), |
| 133 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, | 135 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl, |
| 134 | 0), | 136 | DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED), |
| 135 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, | 137 | VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl, |
| 136 | 0) | 138 | DRM_AUTH | DRM_UNLOCKED) |
| 137 | }; | 139 | }; |
| 138 | 140 | ||
| 139 | static struct pci_device_id vmw_pci_id_list[] = { | 141 | static struct pci_device_id vmw_pci_id_list[] = { |
| @@ -460,11 +462,9 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | |||
| 460 | struct drm_file *file_priv = filp->private_data; | 462 | struct drm_file *file_priv = filp->private_data; |
| 461 | struct drm_device *dev = file_priv->minor->dev; | 463 | struct drm_device *dev = file_priv->minor->dev; |
| 462 | unsigned int nr = DRM_IOCTL_NR(cmd); | 464 | unsigned int nr = DRM_IOCTL_NR(cmd); |
| 463 | long ret; | ||
| 464 | 465 | ||
| 465 | /* | 466 | /* |
| 466 | * The driver private ioctls and TTM ioctls should be | 467 | * Do extra checking on driver private ioctls. |
| 467 | * thread-safe. | ||
| 468 | */ | 468 | */ |
| 469 | 469 | ||
| 470 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) | 470 | if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) |
| @@ -477,18 +477,9 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, | |||
| 477 | nr - DRM_COMMAND_BASE); | 477 | nr - DRM_COMMAND_BASE); |
| 478 | return -EINVAL; | 478 | return -EINVAL; |
| 479 | } | 479 | } |
| 480 | return drm_ioctl(filp->f_path.dentry->d_inode, | ||
| 481 | filp, cmd, arg); | ||
| 482 | } | 480 | } |
| 483 | 481 | ||
| 484 | /* | 482 | return drm_ioctl(filp, cmd, arg); |
| 485 | * Not all old drm ioctls are thread-safe. | ||
| 486 | */ | ||
| 487 | |||
| 488 | lock_kernel(); | ||
| 489 | ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg); | ||
| 490 | unlock_kernel(); | ||
| 491 | return ret; | ||
| 492 | } | 483 | } |
| 493 | 484 | ||
| 494 | static int vmw_firstopen(struct drm_device *dev) | 485 | static int vmw_firstopen(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 43546d09d1b0..e61bd85b6975 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h | |||
| @@ -123,6 +123,7 @@ struct vmw_sw_context{ | |||
| 123 | uint32_t last_cid; | 123 | uint32_t last_cid; |
| 124 | bool cid_valid; | 124 | bool cid_valid; |
| 125 | uint32_t last_sid; | 125 | uint32_t last_sid; |
| 126 | uint32_t sid_translation; | ||
| 126 | bool sid_valid; | 127 | bool sid_valid; |
| 127 | struct ttm_object_file *tfile; | 128 | struct ttm_object_file *tfile; |
| 128 | struct list_head validate_nodes; | 129 | struct list_head validate_nodes; |
| @@ -317,9 +318,10 @@ extern void vmw_surface_res_free(struct vmw_resource *res); | |||
| 317 | extern int vmw_surface_init(struct vmw_private *dev_priv, | 318 | extern int vmw_surface_init(struct vmw_private *dev_priv, |
| 318 | struct vmw_surface *srf, | 319 | struct vmw_surface *srf, |
| 319 | void (*res_free) (struct vmw_resource *res)); | 320 | void (*res_free) (struct vmw_resource *res)); |
| 320 | extern int vmw_user_surface_lookup(struct vmw_private *dev_priv, | 321 | extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, |
| 321 | struct ttm_object_file *tfile, | 322 | struct ttm_object_file *tfile, |
| 322 | int sid, struct vmw_surface **out); | 323 | uint32_t handle, |
| 324 | struct vmw_surface **out); | ||
| 323 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | 325 | extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, |
| 324 | struct drm_file *file_priv); | 326 | struct drm_file *file_priv); |
| 325 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | 327 | extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data, |
| @@ -328,7 +330,7 @@ extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
| 328 | struct drm_file *file_priv); | 330 | struct drm_file *file_priv); |
| 329 | extern int vmw_surface_check(struct vmw_private *dev_priv, | 331 | extern int vmw_surface_check(struct vmw_private *dev_priv, |
| 330 | struct ttm_object_file *tfile, | 332 | struct ttm_object_file *tfile, |
| 331 | int id); | 333 | uint32_t handle, int *id); |
| 332 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); | 334 | extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo); |
| 333 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, | 335 | extern int vmw_dmabuf_init(struct vmw_private *dev_priv, |
| 334 | struct vmw_dma_buffer *vmw_bo, | 336 | struct vmw_dma_buffer *vmw_bo, |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 7a39f3e6dc2c..2e92da567403 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | |||
| @@ -73,21 +73,32 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, | |||
| 73 | 73 | ||
| 74 | static int vmw_cmd_sid_check(struct vmw_private *dev_priv, | 74 | static int vmw_cmd_sid_check(struct vmw_private *dev_priv, |
| 75 | struct vmw_sw_context *sw_context, | 75 | struct vmw_sw_context *sw_context, |
| 76 | uint32_t sid) | 76 | uint32_t *sid) |
| 77 | { | 77 | { |
| 78 | if (unlikely((!sw_context->sid_valid || sid != sw_context->last_sid) && | 78 | if (*sid == SVGA3D_INVALID_ID) |
| 79 | sid != SVGA3D_INVALID_ID)) { | 79 | return 0; |
| 80 | int ret = vmw_surface_check(dev_priv, sw_context->tfile, sid); | 80 | |
| 81 | if (unlikely((!sw_context->sid_valid || | ||
| 82 | *sid != sw_context->last_sid))) { | ||
| 83 | int real_id; | ||
| 84 | int ret = vmw_surface_check(dev_priv, sw_context->tfile, | ||
| 85 | *sid, &real_id); | ||
| 81 | 86 | ||
| 82 | if (unlikely(ret != 0)) { | 87 | if (unlikely(ret != 0)) { |
| 83 | DRM_ERROR("Could ot find or use surface %u\n", | 88 | DRM_ERROR("Could ot find or use surface 0x%08x " |
| 84 | (unsigned) sid); | 89 | "address 0x%08lx\n", |
| 90 | (unsigned int) *sid, | ||
| 91 | (unsigned long) sid); | ||
| 85 | return ret; | 92 | return ret; |
| 86 | } | 93 | } |
| 87 | 94 | ||
| 88 | sw_context->last_sid = sid; | 95 | sw_context->last_sid = *sid; |
| 89 | sw_context->sid_valid = true; | 96 | sw_context->sid_valid = true; |
| 90 | } | 97 | *sid = real_id; |
| 98 | sw_context->sid_translation = real_id; | ||
| 99 | } else | ||
| 100 | *sid = sw_context->sid_translation; | ||
| 101 | |||
| 91 | return 0; | 102 | return 0; |
| 92 | } | 103 | } |
| 93 | 104 | ||
| @@ -107,7 +118,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, | |||
| 107 | return ret; | 118 | return ret; |
| 108 | 119 | ||
| 109 | cmd = container_of(header, struct vmw_sid_cmd, header); | 120 | cmd = container_of(header, struct vmw_sid_cmd, header); |
| 110 | return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.target.sid); | 121 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid); |
| 122 | return ret; | ||
| 111 | } | 123 | } |
| 112 | 124 | ||
| 113 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, | 125 | static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, |
| @@ -121,10 +133,10 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, | |||
| 121 | int ret; | 133 | int ret; |
| 122 | 134 | ||
| 123 | cmd = container_of(header, struct vmw_sid_cmd, header); | 135 | cmd = container_of(header, struct vmw_sid_cmd, header); |
| 124 | ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid); | 136 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); |
| 125 | if (unlikely(ret != 0)) | 137 | if (unlikely(ret != 0)) |
| 126 | return ret; | 138 | return ret; |
| 127 | return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid); | 139 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); |
| 128 | } | 140 | } |
| 129 | 141 | ||
| 130 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, | 142 | static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, |
| @@ -138,10 +150,10 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, | |||
| 138 | int ret; | 150 | int ret; |
| 139 | 151 | ||
| 140 | cmd = container_of(header, struct vmw_sid_cmd, header); | 152 | cmd = container_of(header, struct vmw_sid_cmd, header); |
| 141 | ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid); | 153 | ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid); |
| 142 | if (unlikely(ret != 0)) | 154 | if (unlikely(ret != 0)) |
| 143 | return ret; | 155 | return ret; |
| 144 | return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid); | 156 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid); |
| 145 | } | 157 | } |
| 146 | 158 | ||
| 147 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | 159 | static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, |
| @@ -154,7 +166,7 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, | |||
| 154 | } *cmd; | 166 | } *cmd; |
| 155 | 167 | ||
| 156 | cmd = container_of(header, struct vmw_sid_cmd, header); | 168 | cmd = container_of(header, struct vmw_sid_cmd, header); |
| 157 | return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.srcImage.sid); | 169 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid); |
| 158 | } | 170 | } |
| 159 | 171 | ||
| 160 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, | 172 | static int vmw_cmd_present_check(struct vmw_private *dev_priv, |
| @@ -167,7 +179,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, | |||
| 167 | } *cmd; | 179 | } *cmd; |
| 168 | 180 | ||
| 169 | cmd = container_of(header, struct vmw_sid_cmd, header); | 181 | cmd = container_of(header, struct vmw_sid_cmd, header); |
| 170 | return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.sid); | 182 | return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid); |
| 171 | } | 183 | } |
| 172 | 184 | ||
| 173 | static int vmw_cmd_dma(struct vmw_private *dev_priv, | 185 | static int vmw_cmd_dma(struct vmw_private *dev_priv, |
| @@ -187,12 +199,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
| 187 | uint32_t cur_validate_node; | 199 | uint32_t cur_validate_node; |
| 188 | struct ttm_validate_buffer *val_buf; | 200 | struct ttm_validate_buffer *val_buf; |
| 189 | 201 | ||
| 190 | |||
| 191 | cmd = container_of(header, struct vmw_dma_cmd, header); | 202 | cmd = container_of(header, struct vmw_dma_cmd, header); |
| 192 | ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->dma.host.sid); | ||
| 193 | if (unlikely(ret != 0)) | ||
| 194 | return ret; | ||
| 195 | |||
| 196 | handle = cmd->dma.guest.ptr.gmrId; | 203 | handle = cmd->dma.guest.ptr.gmrId; |
| 197 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); | 204 | ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo); |
| 198 | if (unlikely(ret != 0)) { | 205 | if (unlikely(ret != 0)) { |
| @@ -228,14 +235,23 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv, | |||
| 228 | ++sw_context->cur_val_buf; | 235 | ++sw_context->cur_val_buf; |
| 229 | } | 236 | } |
| 230 | 237 | ||
| 231 | ret = vmw_user_surface_lookup(dev_priv, sw_context->tfile, | 238 | ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile, |
| 232 | cmd->dma.host.sid, &srf); | 239 | cmd->dma.host.sid, &srf); |
| 233 | if (ret) { | 240 | if (ret) { |
| 234 | DRM_ERROR("could not find surface\n"); | 241 | DRM_ERROR("could not find surface\n"); |
| 235 | goto out_no_reloc; | 242 | goto out_no_reloc; |
| 236 | } | 243 | } |
| 237 | 244 | ||
| 245 | /** | ||
| 246 | * Patch command stream with device SID. | ||
| 247 | */ | ||
| 248 | |||
| 249 | cmd->dma.host.sid = srf->res.id; | ||
| 238 | vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); | 250 | vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header); |
| 251 | /** | ||
| 252 | * FIXME: May deadlock here when called from the | ||
| 253 | * command parsing code. | ||
| 254 | */ | ||
| 239 | vmw_surface_unreference(&srf); | 255 | vmw_surface_unreference(&srf); |
| 240 | 256 | ||
| 241 | out_no_reloc: | 257 | out_no_reloc: |
| @@ -243,6 +259,90 @@ out_no_reloc: | |||
| 243 | return ret; | 259 | return ret; |
| 244 | } | 260 | } |
| 245 | 261 | ||
| 262 | static int vmw_cmd_draw(struct vmw_private *dev_priv, | ||
| 263 | struct vmw_sw_context *sw_context, | ||
| 264 | SVGA3dCmdHeader *header) | ||
| 265 | { | ||
| 266 | struct vmw_draw_cmd { | ||
| 267 | SVGA3dCmdHeader header; | ||
| 268 | SVGA3dCmdDrawPrimitives body; | ||
| 269 | } *cmd; | ||
| 270 | SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)( | ||
| 271 | (unsigned long)header + sizeof(*cmd)); | ||
| 272 | SVGA3dPrimitiveRange *range; | ||
| 273 | uint32_t i; | ||
| 274 | uint32_t maxnum; | ||
| 275 | int ret; | ||
| 276 | |||
| 277 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
| 278 | if (unlikely(ret != 0)) | ||
| 279 | return ret; | ||
| 280 | |||
| 281 | cmd = container_of(header, struct vmw_draw_cmd, header); | ||
| 282 | maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl); | ||
| 283 | |||
| 284 | if (unlikely(cmd->body.numVertexDecls > maxnum)) { | ||
| 285 | DRM_ERROR("Illegal number of vertex declarations.\n"); | ||
| 286 | return -EINVAL; | ||
| 287 | } | ||
| 288 | |||
| 289 | for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) { | ||
| 290 | ret = vmw_cmd_sid_check(dev_priv, sw_context, | ||
| 291 | &decl->array.surfaceId); | ||
| 292 | if (unlikely(ret != 0)) | ||
| 293 | return ret; | ||
| 294 | } | ||
| 295 | |||
| 296 | maxnum = (header->size - sizeof(cmd->body) - | ||
| 297 | cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range); | ||
| 298 | if (unlikely(cmd->body.numRanges > maxnum)) { | ||
| 299 | DRM_ERROR("Illegal number of index ranges.\n"); | ||
| 300 | return -EINVAL; | ||
| 301 | } | ||
| 302 | |||
| 303 | range = (SVGA3dPrimitiveRange *) decl; | ||
| 304 | for (i = 0; i < cmd->body.numRanges; ++i, ++range) { | ||
| 305 | ret = vmw_cmd_sid_check(dev_priv, sw_context, | ||
| 306 | &range->indexArray.surfaceId); | ||
| 307 | if (unlikely(ret != 0)) | ||
| 308 | return ret; | ||
| 309 | } | ||
| 310 | return 0; | ||
| 311 | } | ||
| 312 | |||
| 313 | |||
| 314 | static int vmw_cmd_tex_state(struct vmw_private *dev_priv, | ||
| 315 | struct vmw_sw_context *sw_context, | ||
| 316 | SVGA3dCmdHeader *header) | ||
| 317 | { | ||
| 318 | struct vmw_tex_state_cmd { | ||
| 319 | SVGA3dCmdHeader header; | ||
| 320 | SVGA3dCmdSetTextureState state; | ||
| 321 | }; | ||
| 322 | |||
| 323 | SVGA3dTextureState *last_state = (SVGA3dTextureState *) | ||
| 324 | ((unsigned long) header + header->size + sizeof(header)); | ||
| 325 | SVGA3dTextureState *cur_state = (SVGA3dTextureState *) | ||
| 326 | ((unsigned long) header + sizeof(struct vmw_tex_state_cmd)); | ||
| 327 | int ret; | ||
| 328 | |||
| 329 | ret = vmw_cmd_cid_check(dev_priv, sw_context, header); | ||
| 330 | if (unlikely(ret != 0)) | ||
| 331 | return ret; | ||
| 332 | |||
| 333 | for (; cur_state < last_state; ++cur_state) { | ||
| 334 | if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) | ||
| 335 | continue; | ||
| 336 | |||
| 337 | ret = vmw_cmd_sid_check(dev_priv, sw_context, | ||
| 338 | &cur_state->value); | ||
| 339 | if (unlikely(ret != 0)) | ||
| 340 | return ret; | ||
| 341 | } | ||
| 342 | |||
| 343 | return 0; | ||
| 344 | } | ||
| 345 | |||
| 246 | 346 | ||
| 247 | typedef int (*vmw_cmd_func) (struct vmw_private *, | 347 | typedef int (*vmw_cmd_func) (struct vmw_private *, |
| 248 | struct vmw_sw_context *, | 348 | struct vmw_sw_context *, |
| @@ -264,7 +364,7 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | |||
| 264 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), | 364 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check), |
| 265 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, | 365 | VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET, |
| 266 | &vmw_cmd_set_render_target_check), | 366 | &vmw_cmd_set_render_target_check), |
| 267 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_cid_check), | 367 | VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state), |
| 268 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), | 368 | VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check), |
| 269 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), | 369 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check), |
| 270 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), | 370 | VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check), |
| @@ -276,7 +376,7 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = { | |||
| 276 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), | 376 | VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check), |
| 277 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), | 377 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check), |
| 278 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), | 378 | VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check), |
| 279 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_cid_check), | 379 | VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw), |
| 280 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), | 380 | VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check), |
| 281 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), | 381 | VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check), |
| 282 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), | 382 | VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check), |
| @@ -291,6 +391,7 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, | |||
| 291 | void *buf, uint32_t *size) | 391 | void *buf, uint32_t *size) |
| 292 | { | 392 | { |
| 293 | uint32_t cmd_id; | 393 | uint32_t cmd_id; |
| 394 | uint32_t size_remaining = *size; | ||
| 294 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; | 395 | SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf; |
| 295 | int ret; | 396 | int ret; |
| 296 | 397 | ||
| @@ -304,6 +405,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv, | |||
| 304 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); | 405 | *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader); |
| 305 | 406 | ||
| 306 | cmd_id -= SVGA_3D_CMD_BASE; | 407 | cmd_id -= SVGA_3D_CMD_BASE; |
| 408 | if (unlikely(*size > size_remaining)) | ||
| 409 | goto out_err; | ||
| 410 | |||
| 307 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) | 411 | if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE)) |
| 308 | goto out_err; | 412 | goto out_err; |
| 309 | 413 | ||
| @@ -326,6 +430,7 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv, | |||
| 326 | int ret; | 430 | int ret; |
| 327 | 431 | ||
| 328 | while (cur_size > 0) { | 432 | while (cur_size > 0) { |
| 433 | size = cur_size; | ||
| 329 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); | 434 | ret = vmw_cmd_check(dev_priv, sw_context, buf, &size); |
| 330 | if (unlikely(ret != 0)) | 435 | if (unlikely(ret != 0)) |
| 331 | return ret; | 436 | return ret; |
| @@ -386,7 +491,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv, | |||
| 386 | return 0; | 491 | return 0; |
| 387 | 492 | ||
| 388 | ret = vmw_gmr_bind(dev_priv, bo); | 493 | ret = vmw_gmr_bind(dev_priv, bo); |
| 389 | if (likely(ret == 0 || ret == -ERESTART)) | 494 | if (likely(ret == 0 || ret == -ERESTARTSYS)) |
| 390 | return ret; | 495 | return ret; |
| 391 | 496 | ||
| 392 | 497 | ||
| @@ -429,7 +534,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, | |||
| 429 | 534 | ||
| 430 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); | 535 | ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); |
| 431 | if (unlikely(ret != 0)) { | 536 | if (unlikely(ret != 0)) { |
| 432 | ret = -ERESTART; | 537 | ret = -ERESTARTSYS; |
| 433 | goto out_no_cmd_mutex; | 538 | goto out_no_cmd_mutex; |
| 434 | } | 539 | } |
| 435 | 540 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c index 76b0693e2458..01feb48af333 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | |||
| @@ -191,7 +191,7 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv, | |||
| 191 | } | 191 | } |
| 192 | schedule_timeout(1); | 192 | schedule_timeout(1); |
| 193 | if (interruptible && signal_pending(current)) { | 193 | if (interruptible && signal_pending(current)) { |
| 194 | ret = -ERESTART; | 194 | ret = -ERESTARTSYS; |
| 195 | break; | 195 | break; |
| 196 | } | 196 | } |
| 197 | } | 197 | } |
| @@ -237,9 +237,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv, | |||
| 237 | (dev_priv->fifo_queue, | 237 | (dev_priv->fifo_queue, |
| 238 | !vmw_fifo_is_full(dev_priv, bytes), timeout); | 238 | !vmw_fifo_is_full(dev_priv, bytes), timeout); |
| 239 | 239 | ||
| 240 | if (unlikely(ret == -ERESTARTSYS)) | 240 | if (unlikely(ret == 0)) |
| 241 | ret = -ERESTART; | ||
| 242 | else if (unlikely(ret == 0)) | ||
| 243 | ret = -EBUSY; | 241 | ret = -EBUSY; |
| 244 | else if (likely(ret > 0)) | 242 | else if (likely(ret > 0)) |
| 245 | ret = 0; | 243 | ret = 0; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c index 9e0f0306eedb..d40086fc8647 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | |||
| @@ -155,7 +155,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, | |||
| 155 | TASK_UNINTERRUPTIBLE); | 155 | TASK_UNINTERRUPTIBLE); |
| 156 | } | 156 | } |
| 157 | if (interruptible && signal_pending(current)) { | 157 | if (interruptible && signal_pending(current)) { |
| 158 | ret = -ERESTART; | 158 | ret = -ERESTARTSYS; |
| 159 | break; | 159 | break; |
| 160 | } | 160 | } |
| 161 | } | 161 | } |
| @@ -218,9 +218,7 @@ int vmw_wait_fence(struct vmw_private *dev_priv, | |||
| 218 | vmw_fence_signaled(dev_priv, sequence), | 218 | vmw_fence_signaled(dev_priv, sequence), |
| 219 | timeout); | 219 | timeout); |
| 220 | 220 | ||
| 221 | if (unlikely(ret == -ERESTARTSYS)) | 221 | if (unlikely(ret == 0)) |
| 222 | ret = -ERESTART; | ||
| 223 | else if (unlikely(ret == 0)) | ||
| 224 | ret = -EBUSY; | 222 | ret = -EBUSY; |
| 225 | else if (likely(ret > 0)) | 223 | else if (likely(ret > 0)) |
| 226 | ret = 0; | 224 | ret = 0; |
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index e9403be446fe..b1af76e371c3 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | |||
| @@ -106,8 +106,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, | |||
| 106 | int ret; | 106 | int ret; |
| 107 | 107 | ||
| 108 | if (handle) { | 108 | if (handle) { |
| 109 | ret = vmw_user_surface_lookup(dev_priv, tfile, | 109 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, |
| 110 | handle, &surface); | 110 | handle, &surface); |
| 111 | if (!ret) { | 111 | if (!ret) { |
| 112 | if (!surface->snooper.image) { | 112 | if (!surface->snooper.image) { |
| 113 | DRM_ERROR("surface not suitable for cursor\n"); | 113 | DRM_ERROR("surface not suitable for cursor\n"); |
| @@ -704,8 +704,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev, | |||
| 704 | struct vmw_dma_buffer *bo = NULL; | 704 | struct vmw_dma_buffer *bo = NULL; |
| 705 | int ret; | 705 | int ret; |
| 706 | 706 | ||
| 707 | ret = vmw_user_surface_lookup(dev_priv, tfile, | 707 | ret = vmw_user_surface_lookup_handle(dev_priv, tfile, |
| 708 | mode_cmd->handle, &surface); | 708 | mode_cmd->handle, &surface); |
| 709 | if (ret) | 709 | if (ret) |
| 710 | goto try_dmabuf; | 710 | goto try_dmabuf; |
| 711 | 711 | ||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index a1ceed0c8e07..c012d5927f65 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | |||
| @@ -488,28 +488,44 @@ static void vmw_user_surface_free(struct vmw_resource *res) | |||
| 488 | kfree(user_srf); | 488 | kfree(user_srf); |
| 489 | } | 489 | } |
| 490 | 490 | ||
| 491 | int vmw_user_surface_lookup(struct vmw_private *dev_priv, | 491 | int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv, |
| 492 | struct ttm_object_file *tfile, | 492 | struct ttm_object_file *tfile, |
| 493 | int sid, struct vmw_surface **out) | 493 | uint32_t handle, struct vmw_surface **out) |
| 494 | { | 494 | { |
| 495 | struct vmw_resource *res; | 495 | struct vmw_resource *res; |
| 496 | struct vmw_surface *srf; | 496 | struct vmw_surface *srf; |
| 497 | struct vmw_user_surface *user_srf; | 497 | struct vmw_user_surface *user_srf; |
| 498 | struct ttm_base_object *base; | ||
| 499 | int ret = -EINVAL; | ||
| 498 | 500 | ||
| 499 | res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, sid); | 501 | base = ttm_base_object_lookup(tfile, handle); |
| 500 | if (unlikely(res == NULL)) | 502 | if (unlikely(base == NULL)) |
| 501 | return -EINVAL; | 503 | return -EINVAL; |
| 502 | 504 | ||
| 503 | if (res->res_free != &vmw_user_surface_free) | 505 | if (unlikely(base->object_type != VMW_RES_SURFACE)) |
| 504 | return -EINVAL; | 506 | goto out_bad_resource; |
| 505 | 507 | ||
| 506 | srf = container_of(res, struct vmw_surface, res); | 508 | user_srf = container_of(base, struct vmw_user_surface, base); |
| 507 | user_srf = container_of(srf, struct vmw_user_surface, srf); | 509 | srf = &user_srf->srf; |
| 508 | if (user_srf->base.tfile != tfile && !user_srf->base.shareable) | 510 | res = &srf->res; |
| 509 | return -EPERM; | 511 | |
| 512 | read_lock(&dev_priv->resource_lock); | ||
| 513 | |||
| 514 | if (!res->avail || res->res_free != &vmw_user_surface_free) { | ||
| 515 | read_unlock(&dev_priv->resource_lock); | ||
| 516 | goto out_bad_resource; | ||
| 517 | } | ||
| 518 | |||
| 519 | kref_get(&res->kref); | ||
| 520 | read_unlock(&dev_priv->resource_lock); | ||
| 510 | 521 | ||
| 511 | *out = srf; | 522 | *out = srf; |
| 512 | return 0; | 523 | ret = 0; |
| 524 | |||
| 525 | out_bad_resource: | ||
| 526 | ttm_base_object_unref(&base); | ||
| 527 | |||
| 528 | return ret; | ||
| 513 | } | 529 | } |
| 514 | 530 | ||
| 515 | static void vmw_user_surface_base_release(struct ttm_base_object **p_base) | 531 | static void vmw_user_surface_base_release(struct ttm_base_object **p_base) |
| @@ -526,35 +542,10 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) | |||
| 526 | int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, | 542 | int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data, |
| 527 | struct drm_file *file_priv) | 543 | struct drm_file *file_priv) |
| 528 | { | 544 | { |
| 529 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 530 | struct vmw_resource *res; | ||
| 531 | struct vmw_surface *srf; | ||
| 532 | struct vmw_user_surface *user_srf; | ||
| 533 | struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; | 545 | struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data; |
| 534 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 546 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 535 | int ret = 0; | ||
| 536 | |||
| 537 | res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, arg->sid); | ||
| 538 | if (unlikely(res == NULL)) | ||
| 539 | return -EINVAL; | ||
| 540 | |||
| 541 | if (res->res_free != &vmw_user_surface_free) { | ||
| 542 | ret = -EINVAL; | ||
| 543 | goto out; | ||
| 544 | } | ||
| 545 | 547 | ||
| 546 | srf = container_of(res, struct vmw_surface, res); | 548 | return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE); |
| 547 | user_srf = container_of(srf, struct vmw_user_surface, srf); | ||
| 548 | if (user_srf->base.tfile != tfile && !user_srf->base.shareable) { | ||
| 549 | ret = -EPERM; | ||
| 550 | goto out; | ||
| 551 | } | ||
| 552 | |||
| 553 | ttm_ref_object_base_unref(tfile, user_srf->base.hash.key, | ||
| 554 | TTM_REF_USAGE); | ||
| 555 | out: | ||
| 556 | vmw_resource_unreference(&res); | ||
| 557 | return ret; | ||
| 558 | } | 549 | } |
| 559 | 550 | ||
| 560 | int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | 551 | int vmw_surface_define_ioctl(struct drm_device *dev, void *data, |
| @@ -649,7 +640,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, | |||
| 649 | } | 640 | } |
| 650 | srf->snooper.crtc = NULL; | 641 | srf->snooper.crtc = NULL; |
| 651 | 642 | ||
| 652 | rep->sid = res->id; | 643 | rep->sid = user_srf->base.hash.key; |
| 644 | if (rep->sid == SVGA3D_INVALID_ID) | ||
| 645 | DRM_ERROR("Created bad Surface ID.\n"); | ||
| 646 | |||
| 653 | vmw_resource_unreference(&res); | 647 | vmw_resource_unreference(&res); |
| 654 | return 0; | 648 | return 0; |
| 655 | out_err1: | 649 | out_err1: |
| @@ -662,39 +656,33 @@ out_err0: | |||
| 662 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | 656 | int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, |
| 663 | struct drm_file *file_priv) | 657 | struct drm_file *file_priv) |
| 664 | { | 658 | { |
| 665 | struct vmw_private *dev_priv = vmw_priv(dev); | ||
| 666 | union drm_vmw_surface_reference_arg *arg = | 659 | union drm_vmw_surface_reference_arg *arg = |
| 667 | (union drm_vmw_surface_reference_arg *)data; | 660 | (union drm_vmw_surface_reference_arg *)data; |
| 668 | struct drm_vmw_surface_arg *req = &arg->req; | 661 | struct drm_vmw_surface_arg *req = &arg->req; |
| 669 | struct drm_vmw_surface_create_req *rep = &arg->rep; | 662 | struct drm_vmw_surface_create_req *rep = &arg->rep; |
| 670 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; | 663 | struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; |
| 671 | struct vmw_resource *res; | ||
| 672 | struct vmw_surface *srf; | 664 | struct vmw_surface *srf; |
| 673 | struct vmw_user_surface *user_srf; | 665 | struct vmw_user_surface *user_srf; |
| 674 | struct drm_vmw_size __user *user_sizes; | 666 | struct drm_vmw_size __user *user_sizes; |
| 675 | int ret; | 667 | struct ttm_base_object *base; |
| 668 | int ret = -EINVAL; | ||
| 676 | 669 | ||
| 677 | res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, req->sid); | 670 | base = ttm_base_object_lookup(tfile, req->sid); |
| 678 | if (unlikely(res == NULL)) | 671 | if (unlikely(base == NULL)) { |
| 672 | DRM_ERROR("Could not find surface to reference.\n"); | ||
| 679 | return -EINVAL; | 673 | return -EINVAL; |
| 680 | |||
| 681 | if (res->res_free != &vmw_user_surface_free) { | ||
| 682 | ret = -EINVAL; | ||
| 683 | goto out; | ||
| 684 | } | 674 | } |
| 685 | 675 | ||
| 686 | srf = container_of(res, struct vmw_surface, res); | 676 | if (unlikely(base->object_type != VMW_RES_SURFACE)) |
| 687 | user_srf = container_of(srf, struct vmw_user_surface, srf); | 677 | goto out_bad_resource; |
| 688 | if (user_srf->base.tfile != tfile && !user_srf->base.shareable) { | 678 | |
| 689 | DRM_ERROR("Tried to reference none shareable surface\n"); | 679 | user_srf = container_of(base, struct vmw_user_surface, base); |
| 690 | ret = -EPERM; | 680 | srf = &user_srf->srf; |
| 691 | goto out; | ||
| 692 | } | ||
| 693 | 681 | ||
| 694 | ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); | 682 | ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); |
| 695 | if (unlikely(ret != 0)) { | 683 | if (unlikely(ret != 0)) { |
| 696 | DRM_ERROR("Could not add a reference to a surface.\n"); | 684 | DRM_ERROR("Could not add a reference to a surface.\n"); |
| 697 | goto out; | 685 | goto out_no_reference; |
| 698 | } | 686 | } |
| 699 | 687 | ||
| 700 | rep->flags = srf->flags; | 688 | rep->flags = srf->flags; |
| @@ -706,40 +694,43 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data, | |||
| 706 | if (user_sizes) | 694 | if (user_sizes) |
| 707 | ret = copy_to_user(user_sizes, srf->sizes, | 695 | ret = copy_to_user(user_sizes, srf->sizes, |
| 708 | srf->num_sizes * sizeof(*srf->sizes)); | 696 | srf->num_sizes * sizeof(*srf->sizes)); |
| 709 | if (unlikely(ret != 0)) { | 697 | if (unlikely(ret != 0)) |
| 710 | DRM_ERROR("copy_to_user failed %p %u\n", | 698 | DRM_ERROR("copy_to_user failed %p %u\n", |
| 711 | user_sizes, srf->num_sizes); | 699 | user_sizes, srf->num_sizes); |
| 712 | /** | 700 | out_bad_resource: |
| 713 | * FIXME: Unreference surface here? | 701 | out_no_reference: |
| 714 | */ | 702 | ttm_base_object_unref(&base); |
| 715 | goto out; | 703 | |
| 716 | } | ||
| 717 | out: | ||
| 718 | vmw_resource_unreference(&res); | ||
| 719 | return ret; | 704 | return ret; |
| 720 | } | 705 | } |
| 721 | 706 | ||
| 722 | int vmw_surface_check(struct vmw_private *dev_priv, | 707 | int vmw_surface_check(struct vmw_private *dev_priv, |
| 723 | struct ttm_object_file *tfile, | 708 | struct ttm_object_file *tfile, |
| 724 | int id) | 709 | uint32_t handle, int *id) |
| 725 | { | 710 | { |
| 726 | struct vmw_resource *res; | 711 | struct ttm_base_object *base; |
| 727 | int ret = 0; | 712 | struct vmw_user_surface *user_srf; |
| 728 | 713 | ||
| 729 | read_lock(&dev_priv->resource_lock); | 714 | int ret = -EPERM; |
| 730 | res = idr_find(&dev_priv->surface_idr, id); | ||
| 731 | if (res && res->avail) { | ||
| 732 | struct vmw_surface *srf = | ||
| 733 | container_of(res, struct vmw_surface, res); | ||
| 734 | struct vmw_user_surface *usrf = | ||
| 735 | container_of(srf, struct vmw_user_surface, srf); | ||
| 736 | 715 | ||
| 737 | if (usrf->base.tfile != tfile && !usrf->base.shareable) | 716 | base = ttm_base_object_lookup(tfile, handle); |
| 738 | ret = -EPERM; | 717 | if (unlikely(base == NULL)) |
| 739 | } else | 718 | return -EINVAL; |
| 740 | ret = -EINVAL; | 719 | |
| 741 | read_unlock(&dev_priv->resource_lock); | 720 | if (unlikely(base->object_type != VMW_RES_SURFACE)) |
| 721 | goto out_bad_surface; | ||
| 742 | 722 | ||
| 723 | user_srf = container_of(base, struct vmw_user_surface, base); | ||
| 724 | *id = user_srf->srf.res.id; | ||
| 725 | ret = 0; | ||
| 726 | |||
| 727 | out_bad_surface: | ||
| 728 | /** | ||
| 729 | * FIXME: May deadlock here when called from the | ||
| 730 | * command parsing code. | ||
| 731 | */ | ||
| 732 | |||
| 733 | ttm_base_object_unref(&base); | ||
| 743 | return ret; | 734 | return ret; |
| 744 | } | 735 | } |
| 745 | 736 | ||
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h index bfd03bf8be54..f3d440cc68f2 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_hal.h +++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | 34 | ||
| 35 | #include <linux/list.h> | 35 | #include <linux/list.h> |
| 36 | #include <linux/mutex.h> | 36 | #include <linux/mutex.h> |
| 37 | #include <linux/kfifo.h> | ||
| 37 | 38 | ||
| 38 | #include "t3_cpl.h" | 39 | #include "t3_cpl.h" |
| 39 | #include "t3cdev.h" | 40 | #include "t3cdev.h" |
| @@ -75,13 +76,13 @@ struct cxio_hal_ctrl_qp { | |||
| 75 | }; | 76 | }; |
| 76 | 77 | ||
| 77 | struct cxio_hal_resource { | 78 | struct cxio_hal_resource { |
| 78 | struct kfifo *tpt_fifo; | 79 | struct kfifo tpt_fifo; |
| 79 | spinlock_t tpt_fifo_lock; | 80 | spinlock_t tpt_fifo_lock; |
| 80 | struct kfifo *qpid_fifo; | 81 | struct kfifo qpid_fifo; |
| 81 | spinlock_t qpid_fifo_lock; | 82 | spinlock_t qpid_fifo_lock; |
| 82 | struct kfifo *cqid_fifo; | 83 | struct kfifo cqid_fifo; |
| 83 | spinlock_t cqid_fifo_lock; | 84 | spinlock_t cqid_fifo_lock; |
| 84 | struct kfifo *pdid_fifo; | 85 | struct kfifo pdid_fifo; |
| 85 | spinlock_t pdid_fifo_lock; | 86 | spinlock_t pdid_fifo_lock; |
| 86 | }; | 87 | }; |
| 87 | 88 | ||
diff --git a/drivers/infiniband/hw/cxgb3/cxio_resource.c b/drivers/infiniband/hw/cxgb3/cxio_resource.c index bd233c087653..31f9201b2980 100644 --- a/drivers/infiniband/hw/cxgb3/cxio_resource.c +++ b/drivers/infiniband/hw/cxgb3/cxio_resource.c | |||
| @@ -39,12 +39,12 @@ | |||
| 39 | #include "cxio_resource.h" | 39 | #include "cxio_resource.h" |
| 40 | #include "cxio_hal.h" | 40 | #include "cxio_hal.h" |
| 41 | 41 | ||
| 42 | static struct kfifo *rhdl_fifo; | 42 | static struct kfifo rhdl_fifo; |
| 43 | static spinlock_t rhdl_fifo_lock; | 43 | static spinlock_t rhdl_fifo_lock; |
| 44 | 44 | ||
| 45 | #define RANDOM_SIZE 16 | 45 | #define RANDOM_SIZE 16 |
| 46 | 46 | ||
| 47 | static int __cxio_init_resource_fifo(struct kfifo **fifo, | 47 | static int __cxio_init_resource_fifo(struct kfifo *fifo, |
| 48 | spinlock_t *fifo_lock, | 48 | spinlock_t *fifo_lock, |
| 49 | u32 nr, u32 skip_low, | 49 | u32 nr, u32 skip_low, |
| 50 | u32 skip_high, | 50 | u32 skip_high, |
| @@ -55,12 +55,11 @@ static int __cxio_init_resource_fifo(struct kfifo **fifo, | |||
| 55 | u32 rarray[16]; | 55 | u32 rarray[16]; |
| 56 | spin_lock_init(fifo_lock); | 56 | spin_lock_init(fifo_lock); |
| 57 | 57 | ||
| 58 | *fifo = kfifo_alloc(nr * sizeof(u32), GFP_KERNEL, fifo_lock); | 58 | if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL)) |
| 59 | if (IS_ERR(*fifo)) | ||
| 60 | return -ENOMEM; | 59 | return -ENOMEM; |
| 61 | 60 | ||
| 62 | for (i = 0; i < skip_low + skip_high; i++) | 61 | for (i = 0; i < skip_low + skip_high; i++) |
| 63 | __kfifo_put(*fifo, (unsigned char *) &entry, sizeof(u32)); | 62 | kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32)); |
| 64 | if (random) { | 63 | if (random) { |
| 65 | j = 0; | 64 | j = 0; |
| 66 | random_bytes = random32(); | 65 | random_bytes = random32(); |
| @@ -72,33 +71,35 @@ static int __cxio_init_resource_fifo(struct kfifo **fifo, | |||
| 72 | random_bytes = random32(); | 71 | random_bytes = random32(); |
| 73 | } | 72 | } |
| 74 | idx = (random_bytes >> (j * 2)) & 0xF; | 73 | idx = (random_bytes >> (j * 2)) & 0xF; |
| 75 | __kfifo_put(*fifo, | 74 | kfifo_in(fifo, |
| 76 | (unsigned char *) &rarray[idx], | 75 | (unsigned char *) &rarray[idx], |
| 77 | sizeof(u32)); | 76 | sizeof(u32)); |
| 78 | rarray[idx] = i; | 77 | rarray[idx] = i; |
| 79 | j++; | 78 | j++; |
| 80 | } | 79 | } |
| 81 | for (i = 0; i < RANDOM_SIZE; i++) | 80 | for (i = 0; i < RANDOM_SIZE; i++) |
| 82 | __kfifo_put(*fifo, | 81 | kfifo_in(fifo, |
| 83 | (unsigned char *) &rarray[i], | 82 | (unsigned char *) &rarray[i], |
| 84 | sizeof(u32)); | 83 | sizeof(u32)); |
| 85 | } else | 84 | } else |
| 86 | for (i = skip_low; i < nr - skip_high; i++) | 85 | for (i = skip_low; i < nr - skip_high; i++) |
| 87 | __kfifo_put(*fifo, (unsigned char *) &i, sizeof(u32)); | 86 | kfifo_in(fifo, (unsigned char *) &i, sizeof(u32)); |
| 88 | 87 | ||
| 89 | for (i = 0; i < skip_low + skip_high; i++) | 88 | for (i = 0; i < skip_low + skip_high; i++) |
| 90 | kfifo_get(*fifo, (unsigned char *) &entry, sizeof(u32)); | 89 | if (kfifo_out_locked(fifo, (unsigned char *) &entry, |
| 90 | sizeof(u32), fifo_lock) != sizeof(u32)) | ||
| 91 | break; | ||
| 91 | return 0; | 92 | return 0; |
| 92 | } | 93 | } |
| 93 | 94 | ||
| 94 | static int cxio_init_resource_fifo(struct kfifo **fifo, spinlock_t * fifo_lock, | 95 | static int cxio_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock, |
| 95 | u32 nr, u32 skip_low, u32 skip_high) | 96 | u32 nr, u32 skip_low, u32 skip_high) |
| 96 | { | 97 | { |
| 97 | return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low, | 98 | return (__cxio_init_resource_fifo(fifo, fifo_lock, nr, skip_low, |
| 98 | skip_high, 0)); | 99 | skip_high, 0)); |
| 99 | } | 100 | } |
| 100 | 101 | ||
| 101 | static int cxio_init_resource_fifo_random(struct kfifo **fifo, | 102 | static int cxio_init_resource_fifo_random(struct kfifo *fifo, |
| 102 | spinlock_t * fifo_lock, | 103 | spinlock_t * fifo_lock, |
| 103 | u32 nr, u32 skip_low, u32 skip_high) | 104 | u32 nr, u32 skip_low, u32 skip_high) |
| 104 | { | 105 | { |
| @@ -113,15 +114,13 @@ static int cxio_init_qpid_fifo(struct cxio_rdev *rdev_p) | |||
| 113 | 114 | ||
| 114 | spin_lock_init(&rdev_p->rscp->qpid_fifo_lock); | 115 | spin_lock_init(&rdev_p->rscp->qpid_fifo_lock); |
| 115 | 116 | ||
| 116 | rdev_p->rscp->qpid_fifo = kfifo_alloc(T3_MAX_NUM_QP * sizeof(u32), | 117 | if (kfifo_alloc(&rdev_p->rscp->qpid_fifo, T3_MAX_NUM_QP * sizeof(u32), |
| 117 | GFP_KERNEL, | 118 | GFP_KERNEL)) |
| 118 | &rdev_p->rscp->qpid_fifo_lock); | ||
| 119 | if (IS_ERR(rdev_p->rscp->qpid_fifo)) | ||
| 120 | return -ENOMEM; | 119 | return -ENOMEM; |
| 121 | 120 | ||
| 122 | for (i = 16; i < T3_MAX_NUM_QP; i++) | 121 | for (i = 16; i < T3_MAX_NUM_QP; i++) |
| 123 | if (!(i & rdev_p->qpmask)) | 122 | if (!(i & rdev_p->qpmask)) |
| 124 | __kfifo_put(rdev_p->rscp->qpid_fifo, | 123 | kfifo_in(&rdev_p->rscp->qpid_fifo, |
| 125 | (unsigned char *) &i, sizeof(u32)); | 124 | (unsigned char *) &i, sizeof(u32)); |
| 126 | return 0; | 125 | return 0; |
| 127 | } | 126 | } |
| @@ -134,7 +133,7 @@ int cxio_hal_init_rhdl_resource(u32 nr_rhdl) | |||
| 134 | 133 | ||
| 135 | void cxio_hal_destroy_rhdl_resource(void) | 134 | void cxio_hal_destroy_rhdl_resource(void) |
| 136 | { | 135 | { |
| 137 | kfifo_free(rhdl_fifo); | 136 | kfifo_free(&rhdl_fifo); |
| 138 | } | 137 | } |
| 139 | 138 | ||
| 140 | /* nr_* must be power of 2 */ | 139 | /* nr_* must be power of 2 */ |
| @@ -167,11 +166,11 @@ int cxio_hal_init_resource(struct cxio_rdev *rdev_p, | |||
| 167 | goto pdid_err; | 166 | goto pdid_err; |
| 168 | return 0; | 167 | return 0; |
| 169 | pdid_err: | 168 | pdid_err: |
| 170 | kfifo_free(rscp->cqid_fifo); | 169 | kfifo_free(&rscp->cqid_fifo); |
| 171 | cqid_err: | 170 | cqid_err: |
| 172 | kfifo_free(rscp->qpid_fifo); | 171 | kfifo_free(&rscp->qpid_fifo); |
| 173 | qpid_err: | 172 | qpid_err: |
| 174 | kfifo_free(rscp->tpt_fifo); | 173 | kfifo_free(&rscp->tpt_fifo); |
| 175 | tpt_err: | 174 | tpt_err: |
| 176 | return -ENOMEM; | 175 | return -ENOMEM; |
| 177 | } | 176 | } |
| @@ -179,33 +178,37 @@ tpt_err: | |||
| 179 | /* | 178 | /* |
| 180 | * returns 0 if no resource available | 179 | * returns 0 if no resource available |
| 181 | */ | 180 | */ |
| 182 | static u32 cxio_hal_get_resource(struct kfifo *fifo) | 181 | static u32 cxio_hal_get_resource(struct kfifo *fifo, spinlock_t * lock) |
| 183 | { | 182 | { |
| 184 | u32 entry; | 183 | u32 entry; |
| 185 | if (kfifo_get(fifo, (unsigned char *) &entry, sizeof(u32))) | 184 | if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock)) |
| 186 | return entry; | 185 | return entry; |
| 187 | else | 186 | else |
| 188 | return 0; /* fifo emptry */ | 187 | return 0; /* fifo emptry */ |
| 189 | } | 188 | } |
| 190 | 189 | ||
| 191 | static void cxio_hal_put_resource(struct kfifo *fifo, u32 entry) | 190 | static void cxio_hal_put_resource(struct kfifo *fifo, spinlock_t * lock, |
| 191 | u32 entry) | ||
| 192 | { | 192 | { |
| 193 | BUG_ON(kfifo_put(fifo, (unsigned char *) &entry, sizeof(u32)) == 0); | 193 | BUG_ON( |
| 194 | kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock) | ||
| 195 | == 0); | ||
| 194 | } | 196 | } |
| 195 | 197 | ||
| 196 | u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp) | 198 | u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp) |
| 197 | { | 199 | { |
| 198 | return cxio_hal_get_resource(rscp->tpt_fifo); | 200 | return cxio_hal_get_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock); |
| 199 | } | 201 | } |
| 200 | 202 | ||
| 201 | void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag) | 203 | void cxio_hal_put_stag(struct cxio_hal_resource *rscp, u32 stag) |
| 202 | { | 204 | { |
| 203 | cxio_hal_put_resource(rscp->tpt_fifo, stag); | 205 | cxio_hal_put_resource(&rscp->tpt_fifo, &rscp->tpt_fifo_lock, stag); |
| 204 | } | 206 | } |
| 205 | 207 | ||
| 206 | u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp) | 208 | u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp) |
| 207 | { | 209 | { |
| 208 | u32 qpid = cxio_hal_get_resource(rscp->qpid_fifo); | 210 | u32 qpid = cxio_hal_get_resource(&rscp->qpid_fifo, |
| 211 | &rscp->qpid_fifo_lock); | ||
| 209 | PDBG("%s qpid 0x%x\n", __func__, qpid); | 212 | PDBG("%s qpid 0x%x\n", __func__, qpid); |
| 210 | return qpid; | 213 | return qpid; |
| 211 | } | 214 | } |
| @@ -213,35 +216,35 @@ u32 cxio_hal_get_qpid(struct cxio_hal_resource *rscp) | |||
| 213 | void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid) | 216 | void cxio_hal_put_qpid(struct cxio_hal_resource *rscp, u32 qpid) |
| 214 | { | 217 | { |
| 215 | PDBG("%s qpid 0x%x\n", __func__, qpid); | 218 | PDBG("%s qpid 0x%x\n", __func__, qpid); |
| 216 | cxio_hal_put_resource(rscp->qpid_fifo, qpid); | 219 | cxio_hal_put_resource(&rscp->qpid_fifo, &rscp->qpid_fifo_lock, qpid); |
| 217 | } | 220 | } |
| 218 | 221 | ||
| 219 | u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp) | 222 | u32 cxio_hal_get_cqid(struct cxio_hal_resource *rscp) |
| 220 | { | 223 | { |
| 221 | return cxio_hal_get_resource(rscp->cqid_fifo); | 224 | return cxio_hal_get_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock); |
| 222 | } | 225 | } |
| 223 | 226 | ||
| 224 | void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid) | 227 | void cxio_hal_put_cqid(struct cxio_hal_resource *rscp, u32 cqid) |
| 225 | { | 228 | { |
| 226 | cxio_hal_put_resource(rscp->cqid_fifo, cqid); | 229 | cxio_hal_put_resource(&rscp->cqid_fifo, &rscp->cqid_fifo_lock, cqid); |
| 227 | } | 230 | } |
| 228 | 231 | ||
| 229 | u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp) | 232 | u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp) |
| 230 | { | 233 | { |
| 231 | return cxio_hal_get_resource(rscp->pdid_fifo); | 234 | return cxio_hal_get_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock); |
| 232 | } | 235 | } |
| 233 | 236 | ||
| 234 | void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid) | 237 | void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid) |
| 235 | { | 238 | { |
| 236 | cxio_hal_put_resource(rscp->pdid_fifo, pdid); | 239 | cxio_hal_put_resource(&rscp->pdid_fifo, &rscp->pdid_fifo_lock, pdid); |
| 237 | } | 240 | } |
| 238 | 241 | ||
| 239 | void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp) | 242 | void cxio_hal_destroy_resource(struct cxio_hal_resource *rscp) |
| 240 | { | 243 | { |
| 241 | kfifo_free(rscp->tpt_fifo); | 244 | kfifo_free(&rscp->tpt_fifo); |
| 242 | kfifo_free(rscp->cqid_fifo); | 245 | kfifo_free(&rscp->cqid_fifo); |
| 243 | kfifo_free(rscp->qpid_fifo); | 246 | kfifo_free(&rscp->qpid_fifo); |
| 244 | kfifo_free(rscp->pdid_fifo); | 247 | kfifo_free(&rscp->pdid_fifo); |
| 245 | kfree(rscp); | 248 | kfree(rscp); |
| 246 | } | 249 | } |
| 247 | 250 | ||
diff --git a/drivers/media/video/cx23885/cx23888-ir.c b/drivers/media/video/cx23885/cx23888-ir.c index 3ccc8afeccf3..2bf57a4527d3 100644 --- a/drivers/media/video/cx23885/cx23888-ir.c +++ b/drivers/media/video/cx23885/cx23888-ir.c | |||
| @@ -124,15 +124,12 @@ struct cx23888_ir_state { | |||
| 124 | atomic_t rxclk_divider; | 124 | atomic_t rxclk_divider; |
| 125 | atomic_t rx_invert; | 125 | atomic_t rx_invert; |
| 126 | 126 | ||
| 127 | struct kfifo *rx_kfifo; | 127 | struct kfifo rx_kfifo; |
| 128 | spinlock_t rx_kfifo_lock; | 128 | spinlock_t rx_kfifo_lock; |
| 129 | 129 | ||
| 130 | struct v4l2_subdev_ir_parameters tx_params; | 130 | struct v4l2_subdev_ir_parameters tx_params; |
| 131 | struct mutex tx_params_lock; | 131 | struct mutex tx_params_lock; |
| 132 | atomic_t txclk_divider; | 132 | atomic_t txclk_divider; |
| 133 | |||
| 134 | struct kfifo *tx_kfifo; | ||
| 135 | spinlock_t tx_kfifo_lock; | ||
| 136 | }; | 133 | }; |
| 137 | 134 | ||
| 138 | static inline struct cx23888_ir_state *to_state(struct v4l2_subdev *sd) | 135 | static inline struct cx23888_ir_state *to_state(struct v4l2_subdev *sd) |
| @@ -522,6 +519,7 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status, | |||
| 522 | { | 519 | { |
| 523 | struct cx23888_ir_state *state = to_state(sd); | 520 | struct cx23888_ir_state *state = to_state(sd); |
| 524 | struct cx23885_dev *dev = state->dev; | 521 | struct cx23885_dev *dev = state->dev; |
| 522 | unsigned long flags; | ||
| 525 | 523 | ||
| 526 | u32 cntrl = cx23888_ir_read4(dev, CX23888_IR_CNTRL_REG); | 524 | u32 cntrl = cx23888_ir_read4(dev, CX23888_IR_CNTRL_REG); |
| 527 | u32 irqen = cx23888_ir_read4(dev, CX23888_IR_IRQEN_REG); | 525 | u32 irqen = cx23888_ir_read4(dev, CX23888_IR_IRQEN_REG); |
| @@ -594,8 +592,9 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status, | |||
| 594 | if (i == 0) | 592 | if (i == 0) |
| 595 | break; | 593 | break; |
| 596 | j = i * sizeof(u32); | 594 | j = i * sizeof(u32); |
| 597 | k = kfifo_put(state->rx_kfifo, | 595 | k = kfifo_in_locked(&state->rx_kfifo, |
| 598 | (unsigned char *) rx_data, j); | 596 | (unsigned char *) rx_data, j, |
| 597 | &state->rx_kfifo_lock); | ||
| 599 | if (k != j) | 598 | if (k != j) |
| 600 | kror++; /* rx_kfifo over run */ | 599 | kror++; /* rx_kfifo over run */ |
| 601 | } | 600 | } |
| @@ -631,8 +630,11 @@ static int cx23888_ir_irq_handler(struct v4l2_subdev *sd, u32 status, | |||
| 631 | cx23888_ir_write4(dev, CX23888_IR_CNTRL_REG, cntrl); | 630 | cx23888_ir_write4(dev, CX23888_IR_CNTRL_REG, cntrl); |
| 632 | *handled = true; | 631 | *handled = true; |
| 633 | } | 632 | } |
| 634 | if (kfifo_len(state->rx_kfifo) >= CX23888_IR_RX_KFIFO_SIZE / 2) | 633 | |
| 634 | spin_lock_irqsave(&state->rx_kfifo_lock, flags); | ||
| 635 | if (kfifo_len(&state->rx_kfifo) >= CX23888_IR_RX_KFIFO_SIZE / 2) | ||
| 635 | events |= V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ; | 636 | events |= V4L2_SUBDEV_IR_RX_FIFO_SERVICE_REQ; |
| 637 | spin_unlock_irqrestore(&state->rx_kfifo_lock, flags); | ||
| 636 | 638 | ||
| 637 | if (events) | 639 | if (events) |
| 638 | v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_RX_NOTIFY, &events); | 640 | v4l2_subdev_notify(sd, V4L2_SUBDEV_IR_RX_NOTIFY, &events); |
| @@ -657,7 +659,7 @@ static int cx23888_ir_rx_read(struct v4l2_subdev *sd, u8 *buf, size_t count, | |||
| 657 | return 0; | 659 | return 0; |
| 658 | } | 660 | } |
| 659 | 661 | ||
| 660 | n = kfifo_get(state->rx_kfifo, buf, n); | 662 | n = kfifo_out_locked(&state->rx_kfifo, buf, n, &state->rx_kfifo_lock); |
| 661 | 663 | ||
| 662 | n /= sizeof(u32); | 664 | n /= sizeof(u32); |
| 663 | *num = n * sizeof(u32); | 665 | *num = n * sizeof(u32); |
| @@ -785,7 +787,12 @@ static int cx23888_ir_rx_s_parameters(struct v4l2_subdev *sd, | |||
| 785 | o->interrupt_enable = p->interrupt_enable; | 787 | o->interrupt_enable = p->interrupt_enable; |
| 786 | o->enable = p->enable; | 788 | o->enable = p->enable; |
| 787 | if (p->enable) { | 789 | if (p->enable) { |
| 788 | kfifo_reset(state->rx_kfifo); | 790 | unsigned long flags; |
| 791 | |||
| 792 | spin_lock_irqsave(&state->rx_kfifo_lock, flags); | ||
| 793 | kfifo_reset(&state->rx_kfifo); | ||
| 794 | /* reset tx_fifo too if there is one... */ | ||
| 795 | spin_unlock_irqrestore(&state->rx_kfifo_lock, flags); | ||
| 789 | if (p->interrupt_enable) | 796 | if (p->interrupt_enable) |
| 790 | irqenable_rx(dev, IRQEN_RSE | IRQEN_RTE | IRQEN_ROE); | 797 | irqenable_rx(dev, IRQEN_RSE | IRQEN_RTE | IRQEN_ROE); |
| 791 | control_rx_enable(dev, p->enable); | 798 | control_rx_enable(dev, p->enable); |
| @@ -892,7 +899,6 @@ static int cx23888_ir_tx_s_parameters(struct v4l2_subdev *sd, | |||
| 892 | o->interrupt_enable = p->interrupt_enable; | 899 | o->interrupt_enable = p->interrupt_enable; |
| 893 | o->enable = p->enable; | 900 | o->enable = p->enable; |
| 894 | if (p->enable) { | 901 | if (p->enable) { |
| 895 | kfifo_reset(state->tx_kfifo); | ||
| 896 | if (p->interrupt_enable) | 902 | if (p->interrupt_enable) |
| 897 | irqenable_tx(dev, IRQEN_TSE); | 903 | irqenable_tx(dev, IRQEN_TSE); |
| 898 | control_tx_enable(dev, p->enable); | 904 | control_tx_enable(dev, p->enable); |
| @@ -1168,18 +1174,8 @@ int cx23888_ir_probe(struct cx23885_dev *dev) | |||
| 1168 | return -ENOMEM; | 1174 | return -ENOMEM; |
| 1169 | 1175 | ||
| 1170 | spin_lock_init(&state->rx_kfifo_lock); | 1176 | spin_lock_init(&state->rx_kfifo_lock); |
| 1171 | state->rx_kfifo = kfifo_alloc(CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL, | 1177 | if (kfifo_alloc(&state->rx_kfifo, CX23888_IR_RX_KFIFO_SIZE, GFP_KERNEL)) |
| 1172 | &state->rx_kfifo_lock); | ||
| 1173 | if (state->rx_kfifo == NULL) | ||
| 1174 | return -ENOMEM; | ||
| 1175 | |||
| 1176 | spin_lock_init(&state->tx_kfifo_lock); | ||
| 1177 | state->tx_kfifo = kfifo_alloc(CX23888_IR_TX_KFIFO_SIZE, GFP_KERNEL, | ||
| 1178 | &state->tx_kfifo_lock); | ||
| 1179 | if (state->tx_kfifo == NULL) { | ||
| 1180 | kfifo_free(state->rx_kfifo); | ||
| 1181 | return -ENOMEM; | 1178 | return -ENOMEM; |
| 1182 | } | ||
| 1183 | 1179 | ||
| 1184 | state->dev = dev; | 1180 | state->dev = dev; |
| 1185 | state->id = V4L2_IDENT_CX23888_IR; | 1181 | state->id = V4L2_IDENT_CX23888_IR; |
| @@ -1211,8 +1207,7 @@ int cx23888_ir_probe(struct cx23885_dev *dev) | |||
| 1211 | sizeof(struct v4l2_subdev_ir_parameters)); | 1207 | sizeof(struct v4l2_subdev_ir_parameters)); |
| 1212 | v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params); | 1208 | v4l2_subdev_call(sd, ir, tx_s_parameters, &default_params); |
| 1213 | } else { | 1209 | } else { |
| 1214 | kfifo_free(state->rx_kfifo); | 1210 | kfifo_free(&state->rx_kfifo); |
| 1215 | kfifo_free(state->tx_kfifo); | ||
| 1216 | } | 1211 | } |
| 1217 | return ret; | 1212 | return ret; |
| 1218 | } | 1213 | } |
| @@ -1231,8 +1226,7 @@ int cx23888_ir_remove(struct cx23885_dev *dev) | |||
| 1231 | 1226 | ||
| 1232 | state = to_state(sd); | 1227 | state = to_state(sd); |
| 1233 | v4l2_device_unregister_subdev(sd); | 1228 | v4l2_device_unregister_subdev(sd); |
| 1234 | kfifo_free(state->rx_kfifo); | 1229 | kfifo_free(&state->rx_kfifo); |
| 1235 | kfifo_free(state->tx_kfifo); | ||
| 1236 | kfree(state); | 1230 | kfree(state); |
| 1237 | /* Nothing more to free() as state held the actual v4l2_subdev object */ | 1231 | /* Nothing more to free() as state held the actual v4l2_subdev object */ |
| 1238 | return 0; | 1232 | return 0; |
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c index 6ffa64cd1c6d..b421858ccf90 100644 --- a/drivers/media/video/meye.c +++ b/drivers/media/video/meye.c | |||
| @@ -800,8 +800,8 @@ again: | |||
| 800 | return IRQ_HANDLED; | 800 | return IRQ_HANDLED; |
| 801 | 801 | ||
| 802 | if (meye.mchip_mode == MCHIP_HIC_MODE_CONT_OUT) { | 802 | if (meye.mchip_mode == MCHIP_HIC_MODE_CONT_OUT) { |
| 803 | if (kfifo_get(meye.grabq, (unsigned char *)&reqnr, | 803 | if (kfifo_out_locked(&meye.grabq, (unsigned char *)&reqnr, |
| 804 | sizeof(int)) != sizeof(int)) { | 804 | sizeof(int), &meye.grabq_lock) != sizeof(int)) { |
| 805 | mchip_free_frame(); | 805 | mchip_free_frame(); |
| 806 | return IRQ_HANDLED; | 806 | return IRQ_HANDLED; |
| 807 | } | 807 | } |
| @@ -811,7 +811,8 @@ again: | |||
| 811 | meye.grab_buffer[reqnr].state = MEYE_BUF_DONE; | 811 | meye.grab_buffer[reqnr].state = MEYE_BUF_DONE; |
| 812 | do_gettimeofday(&meye.grab_buffer[reqnr].timestamp); | 812 | do_gettimeofday(&meye.grab_buffer[reqnr].timestamp); |
| 813 | meye.grab_buffer[reqnr].sequence = sequence++; | 813 | meye.grab_buffer[reqnr].sequence = sequence++; |
| 814 | kfifo_put(meye.doneq, (unsigned char *)&reqnr, sizeof(int)); | 814 | kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr, |
| 815 | sizeof(int), &meye.doneq_lock); | ||
| 815 | wake_up_interruptible(&meye.proc_list); | 816 | wake_up_interruptible(&meye.proc_list); |
| 816 | } else { | 817 | } else { |
| 817 | int size; | 818 | int size; |
| @@ -820,8 +821,8 @@ again: | |||
| 820 | mchip_free_frame(); | 821 | mchip_free_frame(); |
| 821 | goto again; | 822 | goto again; |
| 822 | } | 823 | } |
| 823 | if (kfifo_get(meye.grabq, (unsigned char *)&reqnr, | 824 | if (kfifo_out_locked(&meye.grabq, (unsigned char *)&reqnr, |
| 824 | sizeof(int)) != sizeof(int)) { | 825 | sizeof(int), &meye.grabq_lock) != sizeof(int)) { |
| 825 | mchip_free_frame(); | 826 | mchip_free_frame(); |
| 826 | goto again; | 827 | goto again; |
| 827 | } | 828 | } |
| @@ -831,7 +832,8 @@ again: | |||
| 831 | meye.grab_buffer[reqnr].state = MEYE_BUF_DONE; | 832 | meye.grab_buffer[reqnr].state = MEYE_BUF_DONE; |
| 832 | do_gettimeofday(&meye.grab_buffer[reqnr].timestamp); | 833 | do_gettimeofday(&meye.grab_buffer[reqnr].timestamp); |
| 833 | meye.grab_buffer[reqnr].sequence = sequence++; | 834 | meye.grab_buffer[reqnr].sequence = sequence++; |
| 834 | kfifo_put(meye.doneq, (unsigned char *)&reqnr, sizeof(int)); | 835 | kfifo_in_locked(&meye.doneq, (unsigned char *)&reqnr, |
| 836 | sizeof(int), &meye.doneq_lock); | ||
| 835 | wake_up_interruptible(&meye.proc_list); | 837 | wake_up_interruptible(&meye.proc_list); |
| 836 | } | 838 | } |
| 837 | mchip_free_frame(); | 839 | mchip_free_frame(); |
| @@ -859,8 +861,8 @@ static int meye_open(struct file *file) | |||
| 859 | 861 | ||
| 860 | for (i = 0; i < MEYE_MAX_BUFNBRS; i++) | 862 | for (i = 0; i < MEYE_MAX_BUFNBRS; i++) |
| 861 | meye.grab_buffer[i].state = MEYE_BUF_UNUSED; | 863 | meye.grab_buffer[i].state = MEYE_BUF_UNUSED; |
| 862 | kfifo_reset(meye.grabq); | 864 | kfifo_reset(&meye.grabq); |
| 863 | kfifo_reset(meye.doneq); | 865 | kfifo_reset(&meye.doneq); |
| 864 | return 0; | 866 | return 0; |
| 865 | } | 867 | } |
| 866 | 868 | ||
| @@ -933,7 +935,8 @@ static int meyeioc_qbuf_capt(int *nb) | |||
| 933 | mchip_cont_compression_start(); | 935 | mchip_cont_compression_start(); |
| 934 | 936 | ||
| 935 | meye.grab_buffer[*nb].state = MEYE_BUF_USING; | 937 | meye.grab_buffer[*nb].state = MEYE_BUF_USING; |
| 936 | kfifo_put(meye.grabq, (unsigned char *)nb, sizeof(int)); | 938 | kfifo_in_locked(&meye.grabq, (unsigned char *)nb, sizeof(int), |
| 939 | &meye.grabq_lock); | ||
| 937 | mutex_unlock(&meye.lock); | 940 | mutex_unlock(&meye.lock); |
| 938 | 941 | ||
| 939 | return 0; | 942 | return 0; |
| @@ -965,7 +968,9 @@ static int meyeioc_sync(struct file *file, void *fh, int *i) | |||
| 965 | /* fall through */ | 968 | /* fall through */ |
| 966 | case MEYE_BUF_DONE: | 969 | case MEYE_BUF_DONE: |
| 967 | meye.grab_buffer[*i].state = MEYE_BUF_UNUSED; | 970 | meye.grab_buffer[*i].state = MEYE_BUF_UNUSED; |
| 968 | kfifo_get(meye.doneq, (unsigned char *)&unused, sizeof(int)); | 971 | if (kfifo_out_locked(&meye.doneq, (unsigned char *)&unused, |
| 972 | sizeof(int), &meye.doneq_lock) != sizeof(int)) | ||
| 973 | break; | ||
| 969 | } | 974 | } |
| 970 | *i = meye.grab_buffer[*i].size; | 975 | *i = meye.grab_buffer[*i].size; |
| 971 | mutex_unlock(&meye.lock); | 976 | mutex_unlock(&meye.lock); |
| @@ -1452,7 +1457,8 @@ static int vidioc_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) | |||
| 1452 | buf->flags |= V4L2_BUF_FLAG_QUEUED; | 1457 | buf->flags |= V4L2_BUF_FLAG_QUEUED; |
| 1453 | buf->flags &= ~V4L2_BUF_FLAG_DONE; | 1458 | buf->flags &= ~V4L2_BUF_FLAG_DONE; |
| 1454 | meye.grab_buffer[buf->index].state = MEYE_BUF_USING; | 1459 | meye.grab_buffer[buf->index].state = MEYE_BUF_USING; |
| 1455 | kfifo_put(meye.grabq, (unsigned char *)&buf->index, sizeof(int)); | 1460 | kfifo_in_locked(&meye.grabq, (unsigned char *)&buf->index, |
| 1461 | sizeof(int), &meye.grabq_lock); | ||
| 1456 | mutex_unlock(&meye.lock); | 1462 | mutex_unlock(&meye.lock); |
| 1457 | 1463 | ||
| 1458 | return 0; | 1464 | return 0; |
| @@ -1467,19 +1473,19 @@ static int vidioc_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) | |||
| 1467 | 1473 | ||
| 1468 | mutex_lock(&meye.lock); | 1474 | mutex_lock(&meye.lock); |
| 1469 | 1475 | ||
| 1470 | if (kfifo_len(meye.doneq) == 0 && file->f_flags & O_NONBLOCK) { | 1476 | if (kfifo_len(&meye.doneq) == 0 && file->f_flags & O_NONBLOCK) { |
| 1471 | mutex_unlock(&meye.lock); | 1477 | mutex_unlock(&meye.lock); |
| 1472 | return -EAGAIN; | 1478 | return -EAGAIN; |
| 1473 | } | 1479 | } |
| 1474 | 1480 | ||
| 1475 | if (wait_event_interruptible(meye.proc_list, | 1481 | if (wait_event_interruptible(meye.proc_list, |
| 1476 | kfifo_len(meye.doneq) != 0) < 0) { | 1482 | kfifo_len(&meye.doneq) != 0) < 0) { |
| 1477 | mutex_unlock(&meye.lock); | 1483 | mutex_unlock(&meye.lock); |
| 1478 | return -EINTR; | 1484 | return -EINTR; |
| 1479 | } | 1485 | } |
| 1480 | 1486 | ||
| 1481 | if (!kfifo_get(meye.doneq, (unsigned char *)&reqnr, | 1487 | if (!kfifo_out_locked(&meye.doneq, (unsigned char *)&reqnr, |
| 1482 | sizeof(int))) { | 1488 | sizeof(int), &meye.doneq_lock)) { |
| 1483 | mutex_unlock(&meye.lock); | 1489 | mutex_unlock(&meye.lock); |
| 1484 | return -EBUSY; | 1490 | return -EBUSY; |
| 1485 | } | 1491 | } |
| @@ -1529,8 +1535,8 @@ static int vidioc_streamoff(struct file *file, void *fh, enum v4l2_buf_type i) | |||
| 1529 | { | 1535 | { |
| 1530 | mutex_lock(&meye.lock); | 1536 | mutex_lock(&meye.lock); |
| 1531 | mchip_hic_stop(); | 1537 | mchip_hic_stop(); |
| 1532 | kfifo_reset(meye.grabq); | 1538 | kfifo_reset(&meye.grabq); |
| 1533 | kfifo_reset(meye.doneq); | 1539 | kfifo_reset(&meye.doneq); |
| 1534 | 1540 | ||
| 1535 | for (i = 0; i < MEYE_MAX_BUFNBRS; i++) | 1541 | for (i = 0; i < MEYE_MAX_BUFNBRS; i++) |
| 1536 | meye.grab_buffer[i].state = MEYE_BUF_UNUSED; | 1542 | meye.grab_buffer[i].state = MEYE_BUF_UNUSED; |
| @@ -1572,7 +1578,7 @@ static unsigned int meye_poll(struct file *file, poll_table *wait) | |||
| 1572 | 1578 | ||
| 1573 | mutex_lock(&meye.lock); | 1579 | mutex_lock(&meye.lock); |
| 1574 | poll_wait(file, &meye.proc_list, wait); | 1580 | poll_wait(file, &meye.proc_list, wait); |
| 1575 | if (kfifo_len(meye.doneq)) | 1581 | if (kfifo_len(&meye.doneq)) |
| 1576 | res = POLLIN | POLLRDNORM; | 1582 | res = POLLIN | POLLRDNORM; |
| 1577 | mutex_unlock(&meye.lock); | 1583 | mutex_unlock(&meye.lock); |
| 1578 | return res; | 1584 | return res; |
| @@ -1745,16 +1751,14 @@ static int __devinit meye_probe(struct pci_dev *pcidev, | |||
| 1745 | } | 1751 | } |
| 1746 | 1752 | ||
| 1747 | spin_lock_init(&meye.grabq_lock); | 1753 | spin_lock_init(&meye.grabq_lock); |
| 1748 | meye.grabq = kfifo_alloc(sizeof(int) * MEYE_MAX_BUFNBRS, GFP_KERNEL, | 1754 | if (kfifo_alloc(&meye.grabq, sizeof(int) * MEYE_MAX_BUFNBRS, |
| 1749 | &meye.grabq_lock); | 1755 | GFP_KERNEL)) { |
| 1750 | if (IS_ERR(meye.grabq)) { | ||
| 1751 | printk(KERN_ERR "meye: fifo allocation failed\n"); | 1756 | printk(KERN_ERR "meye: fifo allocation failed\n"); |
| 1752 | goto outkfifoalloc1; | 1757 | goto outkfifoalloc1; |
| 1753 | } | 1758 | } |
| 1754 | spin_lock_init(&meye.doneq_lock); | 1759 | spin_lock_init(&meye.doneq_lock); |
| 1755 | meye.doneq = kfifo_alloc(sizeof(int) * MEYE_MAX_BUFNBRS, GFP_KERNEL, | 1760 | if (kfifo_alloc(&meye.doneq, sizeof(int) * MEYE_MAX_BUFNBRS, |
| 1756 | &meye.doneq_lock); | 1761 | GFP_KERNEL)) { |
| 1757 | if (IS_ERR(meye.doneq)) { | ||
| 1758 | printk(KERN_ERR "meye: fifo allocation failed\n"); | 1762 | printk(KERN_ERR "meye: fifo allocation failed\n"); |
| 1759 | goto outkfifoalloc2; | 1763 | goto outkfifoalloc2; |
| 1760 | } | 1764 | } |
| @@ -1868,9 +1872,9 @@ outregions: | |||
| 1868 | outenabledev: | 1872 | outenabledev: |
| 1869 | sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0); | 1873 | sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0); |
| 1870 | outsonypienable: | 1874 | outsonypienable: |
| 1871 | kfifo_free(meye.doneq); | 1875 | kfifo_free(&meye.doneq); |
| 1872 | outkfifoalloc2: | 1876 | outkfifoalloc2: |
| 1873 | kfifo_free(meye.grabq); | 1877 | kfifo_free(&meye.grabq); |
| 1874 | outkfifoalloc1: | 1878 | outkfifoalloc1: |
| 1875 | vfree(meye.grab_temp); | 1879 | vfree(meye.grab_temp); |
| 1876 | outvmalloc: | 1880 | outvmalloc: |
| @@ -1901,8 +1905,8 @@ static void __devexit meye_remove(struct pci_dev *pcidev) | |||
| 1901 | 1905 | ||
| 1902 | sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0); | 1906 | sony_pic_camera_command(SONY_PIC_COMMAND_SETCAMERA, 0); |
| 1903 | 1907 | ||
| 1904 | kfifo_free(meye.doneq); | 1908 | kfifo_free(&meye.doneq); |
| 1905 | kfifo_free(meye.grabq); | 1909 | kfifo_free(&meye.grabq); |
| 1906 | 1910 | ||
| 1907 | vfree(meye.grab_temp); | 1911 | vfree(meye.grab_temp); |
| 1908 | 1912 | ||
diff --git a/drivers/media/video/meye.h b/drivers/media/video/meye.h index 5f70a106ba2b..1321ad5d6597 100644 --- a/drivers/media/video/meye.h +++ b/drivers/media/video/meye.h | |||
| @@ -303,9 +303,9 @@ struct meye { | |||
| 303 | struct meye_grab_buffer grab_buffer[MEYE_MAX_BUFNBRS]; | 303 | struct meye_grab_buffer grab_buffer[MEYE_MAX_BUFNBRS]; |
| 304 | int vma_use_count[MEYE_MAX_BUFNBRS]; /* mmap count */ | 304 | int vma_use_count[MEYE_MAX_BUFNBRS]; /* mmap count */ |
| 305 | struct mutex lock; /* mutex for open/mmap... */ | 305 | struct mutex lock; /* mutex for open/mmap... */ |
| 306 | struct kfifo *grabq; /* queue for buffers to be grabbed */ | 306 | struct kfifo grabq; /* queue for buffers to be grabbed */ |
| 307 | spinlock_t grabq_lock; /* lock protecting the queue */ | 307 | spinlock_t grabq_lock; /* lock protecting the queue */ |
| 308 | struct kfifo *doneq; /* queue for grabbed buffers */ | 308 | struct kfifo doneq; /* queue for grabbed buffers */ |
| 309 | spinlock_t doneq_lock; /* lock protecting the queue */ | 309 | spinlock_t doneq_lock; /* lock protecting the queue */ |
| 310 | wait_queue_head_t proc_list; /* wait queue */ | 310 | wait_queue_head_t proc_list; /* wait queue */ |
| 311 | struct video_device *video_dev; /* video device parameters */ | 311 | struct video_device *video_dev; /* video device parameters */ |
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c index b9b371bfa30f..42611bea76a3 100644 --- a/drivers/net/wireless/libertas/cmd.c +++ b/drivers/net/wireless/libertas/cmd.c | |||
| @@ -1365,7 +1365,7 @@ static void lbs_send_confirmsleep(struct lbs_private *priv) | |||
| 1365 | priv->dnld_sent = DNLD_RES_RECEIVED; | 1365 | priv->dnld_sent = DNLD_RES_RECEIVED; |
| 1366 | 1366 | ||
| 1367 | /* If nothing to do, go back to sleep (?) */ | 1367 | /* If nothing to do, go back to sleep (?) */ |
| 1368 | if (!__kfifo_len(priv->event_fifo) && !priv->resp_len[priv->resp_idx]) | 1368 | if (!kfifo_len(&priv->event_fifo) && !priv->resp_len[priv->resp_idx]) |
| 1369 | priv->psstate = PS_STATE_SLEEP; | 1369 | priv->psstate = PS_STATE_SLEEP; |
| 1370 | 1370 | ||
| 1371 | spin_unlock_irqrestore(&priv->driver_lock, flags); | 1371 | spin_unlock_irqrestore(&priv->driver_lock, flags); |
| @@ -1439,7 +1439,7 @@ void lbs_ps_confirm_sleep(struct lbs_private *priv) | |||
| 1439 | } | 1439 | } |
| 1440 | 1440 | ||
| 1441 | /* Pending events or command responses? */ | 1441 | /* Pending events or command responses? */ |
| 1442 | if (__kfifo_len(priv->event_fifo) || priv->resp_len[priv->resp_idx]) { | 1442 | if (kfifo_len(&priv->event_fifo) || priv->resp_len[priv->resp_idx]) { |
| 1443 | allowed = 0; | 1443 | allowed = 0; |
| 1444 | lbs_deb_host("pending events or command responses\n"); | 1444 | lbs_deb_host("pending events or command responses\n"); |
| 1445 | } | 1445 | } |
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h index 6a8d2b291d8c..05bb298dfae9 100644 --- a/drivers/net/wireless/libertas/dev.h +++ b/drivers/net/wireless/libertas/dev.h | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | #include "scan.h" | 10 | #include "scan.h" |
| 11 | #include "assoc.h" | 11 | #include "assoc.h" |
| 12 | 12 | ||
| 13 | 13 | #include <linux/kfifo.h> | |
| 14 | 14 | ||
| 15 | /** sleep_params */ | 15 | /** sleep_params */ |
| 16 | struct sleep_params { | 16 | struct sleep_params { |
| @@ -120,7 +120,7 @@ struct lbs_private { | |||
| 120 | u32 resp_len[2]; | 120 | u32 resp_len[2]; |
| 121 | 121 | ||
| 122 | /* Events sent from hardware to driver */ | 122 | /* Events sent from hardware to driver */ |
| 123 | struct kfifo *event_fifo; | 123 | struct kfifo event_fifo; |
| 124 | 124 | ||
| 125 | /** thread to service interrupts */ | 125 | /** thread to service interrupts */ |
| 126 | struct task_struct *main_thread; | 126 | struct task_struct *main_thread; |
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c index db38a5a719fa..c2975c8e2f21 100644 --- a/drivers/net/wireless/libertas/main.c +++ b/drivers/net/wireless/libertas/main.c | |||
| @@ -459,7 +459,7 @@ static int lbs_thread(void *data) | |||
| 459 | else if (!list_empty(&priv->cmdpendingq) && | 459 | else if (!list_empty(&priv->cmdpendingq) && |
| 460 | !(priv->wakeup_dev_required)) | 460 | !(priv->wakeup_dev_required)) |
| 461 | shouldsleep = 0; /* We have a command to send */ | 461 | shouldsleep = 0; /* We have a command to send */ |
| 462 | else if (__kfifo_len(priv->event_fifo)) | 462 | else if (kfifo_len(&priv->event_fifo)) |
| 463 | shouldsleep = 0; /* We have an event to process */ | 463 | shouldsleep = 0; /* We have an event to process */ |
| 464 | else | 464 | else |
| 465 | shouldsleep = 1; /* No command */ | 465 | shouldsleep = 1; /* No command */ |
| @@ -511,10 +511,13 @@ static int lbs_thread(void *data) | |||
| 511 | 511 | ||
| 512 | /* Process hardware events, e.g. card removed, link lost */ | 512 | /* Process hardware events, e.g. card removed, link lost */ |
| 513 | spin_lock_irq(&priv->driver_lock); | 513 | spin_lock_irq(&priv->driver_lock); |
| 514 | while (__kfifo_len(priv->event_fifo)) { | 514 | while (kfifo_len(&priv->event_fifo)) { |
| 515 | u32 event; | 515 | u32 event; |
| 516 | __kfifo_get(priv->event_fifo, (unsigned char *) &event, | 516 | |
| 517 | sizeof(event)); | 517 | if (kfifo_out(&priv->event_fifo, |
| 518 | (unsigned char *) &event, sizeof(event)) != | ||
| 519 | sizeof(event)) | ||
| 520 | break; | ||
| 518 | spin_unlock_irq(&priv->driver_lock); | 521 | spin_unlock_irq(&priv->driver_lock); |
| 519 | lbs_process_event(priv, event); | 522 | lbs_process_event(priv, event); |
| 520 | spin_lock_irq(&priv->driver_lock); | 523 | spin_lock_irq(&priv->driver_lock); |
| @@ -883,10 +886,9 @@ static int lbs_init_adapter(struct lbs_private *priv) | |||
| 883 | priv->resp_len[0] = priv->resp_len[1] = 0; | 886 | priv->resp_len[0] = priv->resp_len[1] = 0; |
| 884 | 887 | ||
| 885 | /* Create the event FIFO */ | 888 | /* Create the event FIFO */ |
| 886 | priv->event_fifo = kfifo_alloc(sizeof(u32) * 16, GFP_KERNEL, NULL); | 889 | ret = kfifo_alloc(&priv->event_fifo, sizeof(u32) * 16, GFP_KERNEL); |
| 887 | if (IS_ERR(priv->event_fifo)) { | 890 | if (ret) { |
| 888 | lbs_pr_err("Out of memory allocating event FIFO buffer\n"); | 891 | lbs_pr_err("Out of memory allocating event FIFO buffer\n"); |
| 889 | ret = -ENOMEM; | ||
| 890 | goto out; | 892 | goto out; |
| 891 | } | 893 | } |
| 892 | 894 | ||
| @@ -901,8 +903,7 @@ static void lbs_free_adapter(struct lbs_private *priv) | |||
| 901 | lbs_deb_enter(LBS_DEB_MAIN); | 903 | lbs_deb_enter(LBS_DEB_MAIN); |
| 902 | 904 | ||
| 903 | lbs_free_cmd_buffer(priv); | 905 | lbs_free_cmd_buffer(priv); |
| 904 | if (priv->event_fifo) | 906 | kfifo_free(&priv->event_fifo); |
| 905 | kfifo_free(priv->event_fifo); | ||
| 906 | del_timer(&priv->command_timer); | 907 | del_timer(&priv->command_timer); |
| 907 | del_timer(&priv->auto_deepsleep_timer); | 908 | del_timer(&priv->auto_deepsleep_timer); |
| 908 | kfree(priv->networks); | 909 | kfree(priv->networks); |
| @@ -1177,7 +1178,7 @@ void lbs_queue_event(struct lbs_private *priv, u32 event) | |||
| 1177 | if (priv->psstate == PS_STATE_SLEEP) | 1178 | if (priv->psstate == PS_STATE_SLEEP) |
| 1178 | priv->psstate = PS_STATE_AWAKE; | 1179 | priv->psstate = PS_STATE_AWAKE; |
| 1179 | 1180 | ||
| 1180 | __kfifo_put(priv->event_fifo, (unsigned char *) &event, sizeof(u32)); | 1181 | kfifo_in(&priv->event_fifo, (unsigned char *) &event, sizeof(u32)); |
| 1181 | 1182 | ||
| 1182 | wake_up_interruptible(&priv->waitq); | 1183 | wake_up_interruptible(&priv->waitq); |
| 1183 | 1184 | ||
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index bcd4ba8be7db..b66029bd75d0 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c | |||
| @@ -164,7 +164,7 @@ struct fujitsu_hotkey_t { | |||
| 164 | struct input_dev *input; | 164 | struct input_dev *input; |
| 165 | char phys[32]; | 165 | char phys[32]; |
| 166 | struct platform_device *pf_device; | 166 | struct platform_device *pf_device; |
| 167 | struct kfifo *fifo; | 167 | struct kfifo fifo; |
| 168 | spinlock_t fifo_lock; | 168 | spinlock_t fifo_lock; |
| 169 | int rfkill_supported; | 169 | int rfkill_supported; |
| 170 | int rfkill_state; | 170 | int rfkill_state; |
| @@ -824,12 +824,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device) | |||
| 824 | 824 | ||
| 825 | /* kfifo */ | 825 | /* kfifo */ |
| 826 | spin_lock_init(&fujitsu_hotkey->fifo_lock); | 826 | spin_lock_init(&fujitsu_hotkey->fifo_lock); |
| 827 | fujitsu_hotkey->fifo = | 827 | error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int), |
| 828 | kfifo_alloc(RINGBUFFERSIZE * sizeof(int), GFP_KERNEL, | 828 | GFP_KERNEL); |
| 829 | &fujitsu_hotkey->fifo_lock); | 829 | if (error) { |
| 830 | if (IS_ERR(fujitsu_hotkey->fifo)) { | ||
| 831 | printk(KERN_ERR "kfifo_alloc failed\n"); | 830 | printk(KERN_ERR "kfifo_alloc failed\n"); |
| 832 | error = PTR_ERR(fujitsu_hotkey->fifo); | ||
| 833 | goto err_stop; | 831 | goto err_stop; |
| 834 | } | 832 | } |
| 835 | 833 | ||
| @@ -934,7 +932,7 @@ err_unregister_input_dev: | |||
| 934 | err_free_input_dev: | 932 | err_free_input_dev: |
| 935 | input_free_device(input); | 933 | input_free_device(input); |
| 936 | err_free_fifo: | 934 | err_free_fifo: |
| 937 | kfifo_free(fujitsu_hotkey->fifo); | 935 | kfifo_free(&fujitsu_hotkey->fifo); |
| 938 | err_stop: | 936 | err_stop: |
| 939 | return result; | 937 | return result; |
| 940 | } | 938 | } |
| @@ -956,7 +954,7 @@ static int acpi_fujitsu_hotkey_remove(struct acpi_device *device, int type) | |||
| 956 | 954 | ||
| 957 | input_free_device(input); | 955 | input_free_device(input); |
| 958 | 956 | ||
| 959 | kfifo_free(fujitsu_hotkey->fifo); | 957 | kfifo_free(&fujitsu_hotkey->fifo); |
| 960 | 958 | ||
| 961 | fujitsu_hotkey->acpi_handle = NULL; | 959 | fujitsu_hotkey->acpi_handle = NULL; |
| 962 | 960 | ||
| @@ -1008,9 +1006,10 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event) | |||
| 1008 | vdbg_printk(FUJLAPTOP_DBG_TRACE, | 1006 | vdbg_printk(FUJLAPTOP_DBG_TRACE, |
| 1009 | "Push keycode into ringbuffer [%d]\n", | 1007 | "Push keycode into ringbuffer [%d]\n", |
| 1010 | keycode); | 1008 | keycode); |
| 1011 | status = kfifo_put(fujitsu_hotkey->fifo, | 1009 | status = kfifo_in_locked(&fujitsu_hotkey->fifo, |
| 1012 | (unsigned char *)&keycode, | 1010 | (unsigned char *)&keycode, |
| 1013 | sizeof(keycode)); | 1011 | sizeof(keycode), |
| 1012 | &fujitsu_hotkey->fifo_lock); | ||
| 1014 | if (status != sizeof(keycode)) { | 1013 | if (status != sizeof(keycode)) { |
| 1015 | vdbg_printk(FUJLAPTOP_DBG_WARN, | 1014 | vdbg_printk(FUJLAPTOP_DBG_WARN, |
| 1016 | "Could not push keycode [0x%x]\n", | 1015 | "Could not push keycode [0x%x]\n", |
| @@ -1021,11 +1020,12 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event) | |||
| 1021 | } | 1020 | } |
| 1022 | } else if (keycode == 0) { | 1021 | } else if (keycode == 0) { |
| 1023 | while ((status = | 1022 | while ((status = |
| 1024 | kfifo_get | 1023 | kfifo_out_locked( |
| 1025 | (fujitsu_hotkey->fifo, (unsigned char *) | 1024 | &fujitsu_hotkey->fifo, |
| 1026 | &keycode_r, | 1025 | (unsigned char *) &keycode_r, |
| 1027 | sizeof | 1026 | sizeof(keycode_r), |
| 1028 | (keycode_r))) == sizeof(keycode_r)) { | 1027 | &fujitsu_hotkey->fifo_lock)) |
| 1028 | == sizeof(keycode_r)) { | ||
| 1029 | input_report_key(input, keycode_r, 0); | 1029 | input_report_key(input, keycode_r, 0); |
| 1030 | input_sync(input); | 1030 | input_sync(input); |
| 1031 | vdbg_printk(FUJLAPTOP_DBG_TRACE, | 1031 | vdbg_printk(FUJLAPTOP_DBG_TRACE, |
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 7a2cc8a5c975..2896ca4cd9ab 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c | |||
| @@ -142,7 +142,7 @@ struct sony_laptop_input_s { | |||
| 142 | atomic_t users; | 142 | atomic_t users; |
| 143 | struct input_dev *jog_dev; | 143 | struct input_dev *jog_dev; |
| 144 | struct input_dev *key_dev; | 144 | struct input_dev *key_dev; |
| 145 | struct kfifo *fifo; | 145 | struct kfifo fifo; |
| 146 | spinlock_t fifo_lock; | 146 | spinlock_t fifo_lock; |
| 147 | struct workqueue_struct *wq; | 147 | struct workqueue_struct *wq; |
| 148 | }; | 148 | }; |
| @@ -300,8 +300,9 @@ static void do_sony_laptop_release_key(struct work_struct *work) | |||
| 300 | { | 300 | { |
| 301 | struct sony_laptop_keypress kp; | 301 | struct sony_laptop_keypress kp; |
| 302 | 302 | ||
| 303 | while (kfifo_get(sony_laptop_input.fifo, (unsigned char *)&kp, | 303 | while (kfifo_out_locked(&sony_laptop_input.fifo, (unsigned char *)&kp, |
| 304 | sizeof(kp)) == sizeof(kp)) { | 304 | sizeof(kp), &sony_laptop_input.fifo_lock) |
| 305 | == sizeof(kp)) { | ||
| 305 | msleep(10); | 306 | msleep(10); |
| 306 | input_report_key(kp.dev, kp.key, 0); | 307 | input_report_key(kp.dev, kp.key, 0); |
| 307 | input_sync(kp.dev); | 308 | input_sync(kp.dev); |
| @@ -362,8 +363,9 @@ static void sony_laptop_report_input_event(u8 event) | |||
| 362 | /* we emit the scancode so we can always remap the key */ | 363 | /* we emit the scancode so we can always remap the key */ |
| 363 | input_event(kp.dev, EV_MSC, MSC_SCAN, event); | 364 | input_event(kp.dev, EV_MSC, MSC_SCAN, event); |
| 364 | input_sync(kp.dev); | 365 | input_sync(kp.dev); |
| 365 | kfifo_put(sony_laptop_input.fifo, | 366 | kfifo_in_locked(&sony_laptop_input.fifo, |
| 366 | (unsigned char *)&kp, sizeof(kp)); | 367 | (unsigned char *)&kp, sizeof(kp), |
| 368 | &sony_laptop_input.fifo_lock); | ||
| 367 | 369 | ||
| 368 | if (!work_pending(&sony_laptop_release_key_work)) | 370 | if (!work_pending(&sony_laptop_release_key_work)) |
| 369 | queue_work(sony_laptop_input.wq, | 371 | queue_work(sony_laptop_input.wq, |
| @@ -385,12 +387,10 @@ static int sony_laptop_setup_input(struct acpi_device *acpi_device) | |||
| 385 | 387 | ||
| 386 | /* kfifo */ | 388 | /* kfifo */ |
| 387 | spin_lock_init(&sony_laptop_input.fifo_lock); | 389 | spin_lock_init(&sony_laptop_input.fifo_lock); |
| 388 | sony_laptop_input.fifo = | 390 | error = |
| 389 | kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL, | 391 | kfifo_alloc(&sony_laptop_input.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); |
| 390 | &sony_laptop_input.fifo_lock); | 392 | if (error) { |
| 391 | if (IS_ERR(sony_laptop_input.fifo)) { | ||
| 392 | printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); | 393 | printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); |
| 393 | error = PTR_ERR(sony_laptop_input.fifo); | ||
| 394 | goto err_dec_users; | 394 | goto err_dec_users; |
| 395 | } | 395 | } |
| 396 | 396 | ||
| @@ -474,7 +474,7 @@ err_destroy_wq: | |||
| 474 | destroy_workqueue(sony_laptop_input.wq); | 474 | destroy_workqueue(sony_laptop_input.wq); |
| 475 | 475 | ||
| 476 | err_free_kfifo: | 476 | err_free_kfifo: |
| 477 | kfifo_free(sony_laptop_input.fifo); | 477 | kfifo_free(&sony_laptop_input.fifo); |
| 478 | 478 | ||
| 479 | err_dec_users: | 479 | err_dec_users: |
| 480 | atomic_dec(&sony_laptop_input.users); | 480 | atomic_dec(&sony_laptop_input.users); |
| @@ -500,7 +500,7 @@ static void sony_laptop_remove_input(void) | |||
| 500 | } | 500 | } |
| 501 | 501 | ||
| 502 | destroy_workqueue(sony_laptop_input.wq); | 502 | destroy_workqueue(sony_laptop_input.wq); |
| 503 | kfifo_free(sony_laptop_input.fifo); | 503 | kfifo_free(&sony_laptop_input.fifo); |
| 504 | } | 504 | } |
| 505 | 505 | ||
| 506 | /*********** Platform Device ***********/ | 506 | /*********** Platform Device ***********/ |
| @@ -2079,7 +2079,7 @@ static struct attribute_group spic_attribute_group = { | |||
| 2079 | 2079 | ||
| 2080 | struct sonypi_compat_s { | 2080 | struct sonypi_compat_s { |
| 2081 | struct fasync_struct *fifo_async; | 2081 | struct fasync_struct *fifo_async; |
| 2082 | struct kfifo *fifo; | 2082 | struct kfifo fifo; |
| 2083 | spinlock_t fifo_lock; | 2083 | spinlock_t fifo_lock; |
| 2084 | wait_queue_head_t fifo_proc_list; | 2084 | wait_queue_head_t fifo_proc_list; |
| 2085 | atomic_t open_count; | 2085 | atomic_t open_count; |
| @@ -2104,12 +2104,12 @@ static int sonypi_misc_open(struct inode *inode, struct file *file) | |||
| 2104 | /* Flush input queue on first open */ | 2104 | /* Flush input queue on first open */ |
| 2105 | unsigned long flags; | 2105 | unsigned long flags; |
| 2106 | 2106 | ||
| 2107 | spin_lock_irqsave(sonypi_compat.fifo->lock, flags); | 2107 | spin_lock_irqsave(&sonypi_compat.fifo_lock, flags); |
| 2108 | 2108 | ||
| 2109 | if (atomic_inc_return(&sonypi_compat.open_count) == 1) | 2109 | if (atomic_inc_return(&sonypi_compat.open_count) == 1) |
| 2110 | __kfifo_reset(sonypi_compat.fifo); | 2110 | kfifo_reset(&sonypi_compat.fifo); |
| 2111 | 2111 | ||
| 2112 | spin_unlock_irqrestore(sonypi_compat.fifo->lock, flags); | 2112 | spin_unlock_irqrestore(&sonypi_compat.fifo_lock, flags); |
| 2113 | 2113 | ||
| 2114 | return 0; | 2114 | return 0; |
| 2115 | } | 2115 | } |
| @@ -2120,17 +2120,18 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf, | |||
| 2120 | ssize_t ret; | 2120 | ssize_t ret; |
| 2121 | unsigned char c; | 2121 | unsigned char c; |
| 2122 | 2122 | ||
| 2123 | if ((kfifo_len(sonypi_compat.fifo) == 0) && | 2123 | if ((kfifo_len(&sonypi_compat.fifo) == 0) && |
| 2124 | (file->f_flags & O_NONBLOCK)) | 2124 | (file->f_flags & O_NONBLOCK)) |
| 2125 | return -EAGAIN; | 2125 | return -EAGAIN; |
| 2126 | 2126 | ||
| 2127 | ret = wait_event_interruptible(sonypi_compat.fifo_proc_list, | 2127 | ret = wait_event_interruptible(sonypi_compat.fifo_proc_list, |
| 2128 | kfifo_len(sonypi_compat.fifo) != 0); | 2128 | kfifo_len(&sonypi_compat.fifo) != 0); |
| 2129 | if (ret) | 2129 | if (ret) |
| 2130 | return ret; | 2130 | return ret; |
| 2131 | 2131 | ||
| 2132 | while (ret < count && | 2132 | while (ret < count && |
| 2133 | (kfifo_get(sonypi_compat.fifo, &c, sizeof(c)) == sizeof(c))) { | 2133 | (kfifo_out_locked(&sonypi_compat.fifo, &c, sizeof(c), |
| 2134 | &sonypi_compat.fifo_lock) == sizeof(c))) { | ||
| 2134 | if (put_user(c, buf++)) | 2135 | if (put_user(c, buf++)) |
| 2135 | return -EFAULT; | 2136 | return -EFAULT; |
| 2136 | ret++; | 2137 | ret++; |
| @@ -2147,7 +2148,7 @@ static ssize_t sonypi_misc_read(struct file *file, char __user *buf, | |||
| 2147 | static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) | 2148 | static unsigned int sonypi_misc_poll(struct file *file, poll_table *wait) |
| 2148 | { | 2149 | { |
| 2149 | poll_wait(file, &sonypi_compat.fifo_proc_list, wait); | 2150 | poll_wait(file, &sonypi_compat.fifo_proc_list, wait); |
| 2150 | if (kfifo_len(sonypi_compat.fifo)) | 2151 | if (kfifo_len(&sonypi_compat.fifo)) |
| 2151 | return POLLIN | POLLRDNORM; | 2152 | return POLLIN | POLLRDNORM; |
| 2152 | return 0; | 2153 | return 0; |
| 2153 | } | 2154 | } |
| @@ -2309,7 +2310,8 @@ static struct miscdevice sonypi_misc_device = { | |||
| 2309 | 2310 | ||
| 2310 | static void sonypi_compat_report_event(u8 event) | 2311 | static void sonypi_compat_report_event(u8 event) |
| 2311 | { | 2312 | { |
| 2312 | kfifo_put(sonypi_compat.fifo, (unsigned char *)&event, sizeof(event)); | 2313 | kfifo_in_locked(&sonypi_compat.fifo, (unsigned char *)&event, |
| 2314 | sizeof(event), &sonypi_compat.fifo_lock); | ||
| 2313 | kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN); | 2315 | kill_fasync(&sonypi_compat.fifo_async, SIGIO, POLL_IN); |
| 2314 | wake_up_interruptible(&sonypi_compat.fifo_proc_list); | 2316 | wake_up_interruptible(&sonypi_compat.fifo_proc_list); |
| 2315 | } | 2317 | } |
| @@ -2319,11 +2321,11 @@ static int sonypi_compat_init(void) | |||
| 2319 | int error; | 2321 | int error; |
| 2320 | 2322 | ||
| 2321 | spin_lock_init(&sonypi_compat.fifo_lock); | 2323 | spin_lock_init(&sonypi_compat.fifo_lock); |
| 2322 | sonypi_compat.fifo = kfifo_alloc(SONY_LAPTOP_BUF_SIZE, GFP_KERNEL, | 2324 | error = |
| 2323 | &sonypi_compat.fifo_lock); | 2325 | kfifo_alloc(&sonypi_compat.fifo, SONY_LAPTOP_BUF_SIZE, GFP_KERNEL); |
| 2324 | if (IS_ERR(sonypi_compat.fifo)) { | 2326 | if (error) { |
| 2325 | printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); | 2327 | printk(KERN_ERR DRV_PFX "kfifo_alloc failed\n"); |
| 2326 | return PTR_ERR(sonypi_compat.fifo); | 2328 | return error; |
| 2327 | } | 2329 | } |
| 2328 | 2330 | ||
| 2329 | init_waitqueue_head(&sonypi_compat.fifo_proc_list); | 2331 | init_waitqueue_head(&sonypi_compat.fifo_proc_list); |
| @@ -2342,14 +2344,14 @@ static int sonypi_compat_init(void) | |||
| 2342 | return 0; | 2344 | return 0; |
| 2343 | 2345 | ||
| 2344 | err_free_kfifo: | 2346 | err_free_kfifo: |
| 2345 | kfifo_free(sonypi_compat.fifo); | 2347 | kfifo_free(&sonypi_compat.fifo); |
| 2346 | return error; | 2348 | return error; |
| 2347 | } | 2349 | } |
| 2348 | 2350 | ||
| 2349 | static void sonypi_compat_exit(void) | 2351 | static void sonypi_compat_exit(void) |
| 2350 | { | 2352 | { |
| 2351 | misc_deregister(&sonypi_misc_device); | 2353 | misc_deregister(&sonypi_misc_device); |
| 2352 | kfifo_free(sonypi_compat.fifo); | 2354 | kfifo_free(&sonypi_compat.fifo); |
| 2353 | } | 2355 | } |
| 2354 | #else | 2356 | #else |
| 2355 | static int sonypi_compat_init(void) { return 0; } | 2357 | static int sonypi_compat_init(void) { return 0; } |
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index b7689f3d05f5..c28a712fd4db 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c | |||
| @@ -517,7 +517,7 @@ static void iscsi_free_task(struct iscsi_task *task) | |||
| 517 | if (conn->login_task == task) | 517 | if (conn->login_task == task) |
| 518 | return; | 518 | return; |
| 519 | 519 | ||
| 520 | __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*)); | 520 | kfifo_in(&session->cmdpool.queue, (void*)&task, sizeof(void*)); |
| 521 | 521 | ||
| 522 | if (sc) { | 522 | if (sc) { |
| 523 | task->sc = NULL; | 523 | task->sc = NULL; |
| @@ -737,7 +737,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr, | |||
| 737 | BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); | 737 | BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE); |
| 738 | BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); | 738 | BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED); |
| 739 | 739 | ||
| 740 | if (!__kfifo_get(session->cmdpool.queue, | 740 | if (!kfifo_out(&session->cmdpool.queue, |
| 741 | (void*)&task, sizeof(void*))) | 741 | (void*)&task, sizeof(void*))) |
| 742 | return NULL; | 742 | return NULL; |
| 743 | } | 743 | } |
| @@ -1567,7 +1567,7 @@ static inline struct iscsi_task *iscsi_alloc_task(struct iscsi_conn *conn, | |||
| 1567 | { | 1567 | { |
| 1568 | struct iscsi_task *task; | 1568 | struct iscsi_task *task; |
| 1569 | 1569 | ||
| 1570 | if (!__kfifo_get(conn->session->cmdpool.queue, | 1570 | if (!kfifo_out(&conn->session->cmdpool.queue, |
| 1571 | (void *) &task, sizeof(void *))) | 1571 | (void *) &task, sizeof(void *))) |
| 1572 | return NULL; | 1572 | return NULL; |
| 1573 | 1573 | ||
| @@ -2461,12 +2461,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size) | |||
| 2461 | if (q->pool == NULL) | 2461 | if (q->pool == NULL) |
| 2462 | return -ENOMEM; | 2462 | return -ENOMEM; |
| 2463 | 2463 | ||
| 2464 | q->queue = kfifo_init((void*)q->pool, max * sizeof(void*), | 2464 | kfifo_init(&q->queue, (void*)q->pool, max * sizeof(void*)); |
| 2465 | GFP_KERNEL, NULL); | ||
| 2466 | if (IS_ERR(q->queue)) { | ||
| 2467 | q->queue = NULL; | ||
| 2468 | goto enomem; | ||
| 2469 | } | ||
| 2470 | 2465 | ||
| 2471 | for (i = 0; i < max; i++) { | 2466 | for (i = 0; i < max; i++) { |
| 2472 | q->pool[i] = kzalloc(item_size, GFP_KERNEL); | 2467 | q->pool[i] = kzalloc(item_size, GFP_KERNEL); |
| @@ -2474,7 +2469,7 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size) | |||
| 2474 | q->max = i; | 2469 | q->max = i; |
| 2475 | goto enomem; | 2470 | goto enomem; |
| 2476 | } | 2471 | } |
| 2477 | __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*)); | 2472 | kfifo_in(&q->queue, (void*)&q->pool[i], sizeof(void*)); |
| 2478 | } | 2473 | } |
| 2479 | 2474 | ||
| 2480 | if (items) { | 2475 | if (items) { |
| @@ -2497,7 +2492,6 @@ void iscsi_pool_free(struct iscsi_pool *q) | |||
| 2497 | for (i = 0; i < q->max; i++) | 2492 | for (i = 0; i < q->max; i++) |
| 2498 | kfree(q->pool[i]); | 2493 | kfree(q->pool[i]); |
| 2499 | kfree(q->pool); | 2494 | kfree(q->pool); |
| 2500 | kfree(q->queue); | ||
| 2501 | } | 2495 | } |
| 2502 | EXPORT_SYMBOL_GPL(iscsi_pool_free); | 2496 | EXPORT_SYMBOL_GPL(iscsi_pool_free); |
| 2503 | 2497 | ||
| @@ -2825,7 +2819,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, | |||
| 2825 | 2819 | ||
| 2826 | /* allocate login_task used for the login/text sequences */ | 2820 | /* allocate login_task used for the login/text sequences */ |
| 2827 | spin_lock_bh(&session->lock); | 2821 | spin_lock_bh(&session->lock); |
| 2828 | if (!__kfifo_get(session->cmdpool.queue, | 2822 | if (!kfifo_out(&session->cmdpool.queue, |
| 2829 | (void*)&conn->login_task, | 2823 | (void*)&conn->login_task, |
| 2830 | sizeof(void*))) { | 2824 | sizeof(void*))) { |
| 2831 | spin_unlock_bh(&session->lock); | 2825 | spin_unlock_bh(&session->lock); |
| @@ -2845,7 +2839,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, | |||
| 2845 | return cls_conn; | 2839 | return cls_conn; |
| 2846 | 2840 | ||
| 2847 | login_task_data_alloc_fail: | 2841 | login_task_data_alloc_fail: |
| 2848 | __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, | 2842 | kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, |
| 2849 | sizeof(void*)); | 2843 | sizeof(void*)); |
| 2850 | login_task_alloc_fail: | 2844 | login_task_alloc_fail: |
| 2851 | iscsi_destroy_conn(cls_conn); | 2845 | iscsi_destroy_conn(cls_conn); |
| @@ -2908,7 +2902,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn) | |||
| 2908 | free_pages((unsigned long) conn->data, | 2902 | free_pages((unsigned long) conn->data, |
| 2909 | get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); | 2903 | get_order(ISCSI_DEF_MAX_RECV_SEG_LEN)); |
| 2910 | kfree(conn->persistent_address); | 2904 | kfree(conn->persistent_address); |
| 2911 | __kfifo_put(session->cmdpool.queue, (void*)&conn->login_task, | 2905 | kfifo_in(&session->cmdpool.queue, (void*)&conn->login_task, |
| 2912 | sizeof(void*)); | 2906 | sizeof(void*)); |
| 2913 | if (session->leadconn == conn) | 2907 | if (session->leadconn == conn) |
| 2914 | session->leadconn = NULL; | 2908 | session->leadconn = NULL; |
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c index ca25ee5190b0..db6856c138fc 100644 --- a/drivers/scsi/libiscsi_tcp.c +++ b/drivers/scsi/libiscsi_tcp.c | |||
| @@ -445,15 +445,15 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task) | |||
| 445 | return; | 445 | return; |
| 446 | 446 | ||
| 447 | /* flush task's r2t queues */ | 447 | /* flush task's r2t queues */ |
| 448 | while (__kfifo_get(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) { | 448 | while (kfifo_out(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*))) { |
| 449 | __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, | 449 | kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, |
| 450 | sizeof(void*)); | 450 | sizeof(void*)); |
| 451 | ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n"); | 451 | ISCSI_DBG_TCP(task->conn, "pending r2t dropped\n"); |
| 452 | } | 452 | } |
| 453 | 453 | ||
| 454 | r2t = tcp_task->r2t; | 454 | r2t = tcp_task->r2t; |
| 455 | if (r2t != NULL) { | 455 | if (r2t != NULL) { |
| 456 | __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, | 456 | kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, |
| 457 | sizeof(void*)); | 457 | sizeof(void*)); |
| 458 | tcp_task->r2t = NULL; | 458 | tcp_task->r2t = NULL; |
| 459 | } | 459 | } |
| @@ -541,7 +541,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) | |||
| 541 | return 0; | 541 | return 0; |
| 542 | } | 542 | } |
| 543 | 543 | ||
| 544 | rc = __kfifo_get(tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); | 544 | rc = kfifo_out(&tcp_task->r2tpool.queue, (void*)&r2t, sizeof(void*)); |
| 545 | if (!rc) { | 545 | if (!rc) { |
| 546 | iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. " | 546 | iscsi_conn_printk(KERN_ERR, conn, "Could not allocate R2T. " |
| 547 | "Target has sent more R2Ts than it " | 547 | "Target has sent more R2Ts than it " |
| @@ -554,7 +554,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) | |||
| 554 | if (r2t->data_length == 0) { | 554 | if (r2t->data_length == 0) { |
| 555 | iscsi_conn_printk(KERN_ERR, conn, | 555 | iscsi_conn_printk(KERN_ERR, conn, |
| 556 | "invalid R2T with zero data len\n"); | 556 | "invalid R2T with zero data len\n"); |
| 557 | __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, | 557 | kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, |
| 558 | sizeof(void*)); | 558 | sizeof(void*)); |
| 559 | return ISCSI_ERR_DATALEN; | 559 | return ISCSI_ERR_DATALEN; |
| 560 | } | 560 | } |
| @@ -570,7 +570,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) | |||
| 570 | "invalid R2T with data len %u at offset %u " | 570 | "invalid R2T with data len %u at offset %u " |
| 571 | "and total length %d\n", r2t->data_length, | 571 | "and total length %d\n", r2t->data_length, |
| 572 | r2t->data_offset, scsi_out(task->sc)->length); | 572 | r2t->data_offset, scsi_out(task->sc)->length); |
| 573 | __kfifo_put(tcp_task->r2tpool.queue, (void*)&r2t, | 573 | kfifo_in(&tcp_task->r2tpool.queue, (void*)&r2t, |
| 574 | sizeof(void*)); | 574 | sizeof(void*)); |
| 575 | return ISCSI_ERR_DATALEN; | 575 | return ISCSI_ERR_DATALEN; |
| 576 | } | 576 | } |
| @@ -580,7 +580,7 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task) | |||
| 580 | r2t->sent = 0; | 580 | r2t->sent = 0; |
| 581 | 581 | ||
| 582 | tcp_task->exp_datasn = r2tsn + 1; | 582 | tcp_task->exp_datasn = r2tsn + 1; |
| 583 | __kfifo_put(tcp_task->r2tqueue, (void*)&r2t, sizeof(void*)); | 583 | kfifo_in(&tcp_task->r2tqueue, (void*)&r2t, sizeof(void*)); |
| 584 | conn->r2t_pdus_cnt++; | 584 | conn->r2t_pdus_cnt++; |
| 585 | 585 | ||
| 586 | iscsi_requeue_task(task); | 586 | iscsi_requeue_task(task); |
| @@ -951,7 +951,7 @@ int iscsi_tcp_task_init(struct iscsi_task *task) | |||
| 951 | return conn->session->tt->init_pdu(task, 0, task->data_count); | 951 | return conn->session->tt->init_pdu(task, 0, task->data_count); |
| 952 | } | 952 | } |
| 953 | 953 | ||
| 954 | BUG_ON(__kfifo_len(tcp_task->r2tqueue)); | 954 | BUG_ON(kfifo_len(&tcp_task->r2tqueue)); |
| 955 | tcp_task->exp_datasn = 0; | 955 | tcp_task->exp_datasn = 0; |
| 956 | 956 | ||
| 957 | /* Prepare PDU, optionally w/ immediate data */ | 957 | /* Prepare PDU, optionally w/ immediate data */ |
| @@ -982,7 +982,7 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) | |||
| 982 | if (r2t->data_length <= r2t->sent) { | 982 | if (r2t->data_length <= r2t->sent) { |
| 983 | ISCSI_DBG_TCP(task->conn, | 983 | ISCSI_DBG_TCP(task->conn, |
| 984 | " done with r2t %p\n", r2t); | 984 | " done with r2t %p\n", r2t); |
| 985 | __kfifo_put(tcp_task->r2tpool.queue, | 985 | kfifo_in(&tcp_task->r2tpool.queue, |
| 986 | (void *)&tcp_task->r2t, | 986 | (void *)&tcp_task->r2t, |
| 987 | sizeof(void *)); | 987 | sizeof(void *)); |
| 988 | tcp_task->r2t = r2t = NULL; | 988 | tcp_task->r2t = r2t = NULL; |
| @@ -990,8 +990,13 @@ static struct iscsi_r2t_info *iscsi_tcp_get_curr_r2t(struct iscsi_task *task) | |||
| 990 | } | 990 | } |
| 991 | 991 | ||
| 992 | if (r2t == NULL) { | 992 | if (r2t == NULL) { |
| 993 | __kfifo_get(tcp_task->r2tqueue, | 993 | if (kfifo_out(&tcp_task->r2tqueue, |
| 994 | (void *)&tcp_task->r2t, sizeof(void *)); | 994 | (void *)&tcp_task->r2t, sizeof(void *)) != |
| 995 | sizeof(void *)) { | ||
| 996 | WARN_ONCE(1, "unexpected fifo state"); | ||
| 997 | r2t = NULL; | ||
| 998 | } | ||
| 999 | |||
| 995 | r2t = tcp_task->r2t; | 1000 | r2t = tcp_task->r2t; |
| 996 | } | 1001 | } |
| 997 | spin_unlock_bh(&session->lock); | 1002 | spin_unlock_bh(&session->lock); |
| @@ -1127,9 +1132,8 @@ int iscsi_tcp_r2tpool_alloc(struct iscsi_session *session) | |||
| 1127 | } | 1132 | } |
| 1128 | 1133 | ||
| 1129 | /* R2T xmit queue */ | 1134 | /* R2T xmit queue */ |
| 1130 | tcp_task->r2tqueue = kfifo_alloc( | 1135 | if (kfifo_alloc(&tcp_task->r2tqueue, |
| 1131 | session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL); | 1136 | session->max_r2t * 4 * sizeof(void*), GFP_KERNEL)) { |
| 1132 | if (tcp_task->r2tqueue == ERR_PTR(-ENOMEM)) { | ||
| 1133 | iscsi_pool_free(&tcp_task->r2tpool); | 1137 | iscsi_pool_free(&tcp_task->r2tpool); |
| 1134 | goto r2t_alloc_fail; | 1138 | goto r2t_alloc_fail; |
| 1135 | } | 1139 | } |
| @@ -1142,7 +1146,7 @@ r2t_alloc_fail: | |||
| 1142 | struct iscsi_task *task = session->cmds[i]; | 1146 | struct iscsi_task *task = session->cmds[i]; |
| 1143 | struct iscsi_tcp_task *tcp_task = task->dd_data; | 1147 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
| 1144 | 1148 | ||
| 1145 | kfifo_free(tcp_task->r2tqueue); | 1149 | kfifo_free(&tcp_task->r2tqueue); |
| 1146 | iscsi_pool_free(&tcp_task->r2tpool); | 1150 | iscsi_pool_free(&tcp_task->r2tpool); |
| 1147 | } | 1151 | } |
| 1148 | return -ENOMEM; | 1152 | return -ENOMEM; |
| @@ -1157,7 +1161,7 @@ void iscsi_tcp_r2tpool_free(struct iscsi_session *session) | |||
| 1157 | struct iscsi_task *task = session->cmds[i]; | 1161 | struct iscsi_task *task = session->cmds[i]; |
| 1158 | struct iscsi_tcp_task *tcp_task = task->dd_data; | 1162 | struct iscsi_tcp_task *tcp_task = task->dd_data; |
| 1159 | 1163 | ||
| 1160 | kfifo_free(tcp_task->r2tqueue); | 1164 | kfifo_free(&tcp_task->r2tqueue); |
| 1161 | iscsi_pool_free(&tcp_task->r2tpool); | 1165 | iscsi_pool_free(&tcp_task->r2tpool); |
| 1162 | } | 1166 | } |
| 1163 | } | 1167 | } |
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c index 9ad38e81e343..ab19b3b4be52 100644 --- a/drivers/scsi/libsrp.c +++ b/drivers/scsi/libsrp.c | |||
| @@ -58,19 +58,15 @@ static int srp_iu_pool_alloc(struct srp_queue *q, size_t max, | |||
| 58 | goto free_pool; | 58 | goto free_pool; |
| 59 | 59 | ||
| 60 | spin_lock_init(&q->lock); | 60 | spin_lock_init(&q->lock); |
| 61 | q->queue = kfifo_init((void *) q->pool, max * sizeof(void *), | 61 | kfifo_init(&q->queue, (void *) q->pool, max * sizeof(void *)); |
| 62 | GFP_KERNEL, &q->lock); | ||
| 63 | if (IS_ERR(q->queue)) | ||
| 64 | goto free_item; | ||
| 65 | 62 | ||
| 66 | for (i = 0, iue = q->items; i < max; i++) { | 63 | for (i = 0, iue = q->items; i < max; i++) { |
| 67 | __kfifo_put(q->queue, (void *) &iue, sizeof(void *)); | 64 | kfifo_in(&q->queue, (void *) &iue, sizeof(void *)); |
| 68 | iue->sbuf = ring[i]; | 65 | iue->sbuf = ring[i]; |
| 69 | iue++; | 66 | iue++; |
| 70 | } | 67 | } |
| 71 | return 0; | 68 | return 0; |
| 72 | 69 | ||
| 73 | free_item: | ||
| 74 | kfree(q->items); | 70 | kfree(q->items); |
| 75 | free_pool: | 71 | free_pool: |
| 76 | kfree(q->pool); | 72 | kfree(q->pool); |
| @@ -167,7 +163,11 @@ struct iu_entry *srp_iu_get(struct srp_target *target) | |||
| 167 | { | 163 | { |
| 168 | struct iu_entry *iue = NULL; | 164 | struct iu_entry *iue = NULL; |
| 169 | 165 | ||
| 170 | kfifo_get(target->iu_queue.queue, (void *) &iue, sizeof(void *)); | 166 | if (kfifo_out_locked(&target->iu_queue.queue, (void *) &iue, |
| 167 | sizeof(void *), &target->iu_queue.lock) != sizeof(void *)) { | ||
| 168 | WARN_ONCE(1, "unexpected fifo state"); | ||
| 169 | return NULL; | ||
| 170 | } | ||
| 171 | if (!iue) | 171 | if (!iue) |
| 172 | return iue; | 172 | return iue; |
| 173 | iue->target = target; | 173 | iue->target = target; |
| @@ -179,7 +179,8 @@ EXPORT_SYMBOL_GPL(srp_iu_get); | |||
| 179 | 179 | ||
| 180 | void srp_iu_put(struct iu_entry *iue) | 180 | void srp_iu_put(struct iu_entry *iue) |
| 181 | { | 181 | { |
| 182 | kfifo_put(iue->target->iu_queue.queue, (void *) &iue, sizeof(void *)); | 182 | kfifo_in_locked(&iue->target->iu_queue.queue, (void *) &iue, |
| 183 | sizeof(void *), &iue->target->iu_queue.lock); | ||
| 183 | } | 184 | } |
| 184 | EXPORT_SYMBOL_GPL(srp_iu_put); | 185 | EXPORT_SYMBOL_GPL(srp_iu_put); |
| 185 | 186 | ||
diff --git a/drivers/staging/pohmelfs/dir.c b/drivers/staging/pohmelfs/dir.c index 6c5b261e9f06..aacd25bfb0cb 100644 --- a/drivers/staging/pohmelfs/dir.c +++ b/drivers/staging/pohmelfs/dir.c | |||
| @@ -722,8 +722,6 @@ static int pohmelfs_remove_entry(struct inode *dir, struct dentry *dentry) | |||
| 722 | if (inode->i_nlink) | 722 | if (inode->i_nlink) |
| 723 | inode_dec_link_count(inode); | 723 | inode_dec_link_count(inode); |
| 724 | } | 724 | } |
| 725 | dprintk("%s: inode: %p, lock: %ld, unhashed: %d.\n", | ||
| 726 | __func__, pi, inode->i_state & I_LOCK, hlist_unhashed(&inode->i_hash)); | ||
| 727 | 725 | ||
| 728 | return err; | 726 | return err; |
| 729 | } | 727 | } |
diff --git a/drivers/usb/host/fhci-sched.c b/drivers/usb/host/fhci-sched.c index 00a29855d0c4..ff43747a614f 100644 --- a/drivers/usb/host/fhci-sched.c +++ b/drivers/usb/host/fhci-sched.c | |||
| @@ -37,7 +37,7 @@ static void recycle_frame(struct fhci_usb *usb, struct packet *pkt) | |||
| 37 | pkt->info = 0; | 37 | pkt->info = 0; |
| 38 | pkt->priv_data = NULL; | 38 | pkt->priv_data = NULL; |
| 39 | 39 | ||
| 40 | cq_put(usb->ep0->empty_frame_Q, pkt); | 40 | cq_put(&usb->ep0->empty_frame_Q, pkt); |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | /* confirm submitted packet */ | 43 | /* confirm submitted packet */ |
| @@ -57,7 +57,7 @@ void fhci_transaction_confirm(struct fhci_usb *usb, struct packet *pkt) | |||
| 57 | if ((td->data + td->actual_len) && trans_len) | 57 | if ((td->data + td->actual_len) && trans_len) |
| 58 | memcpy(td->data + td->actual_len, pkt->data, | 58 | memcpy(td->data + td->actual_len, pkt->data, |
| 59 | trans_len); | 59 | trans_len); |
| 60 | cq_put(usb->ep0->dummy_packets_Q, pkt->data); | 60 | cq_put(&usb->ep0->dummy_packets_Q, pkt->data); |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | recycle_frame(usb, pkt); | 63 | recycle_frame(usb, pkt); |
| @@ -213,7 +213,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td) | |||
| 213 | } | 213 | } |
| 214 | 214 | ||
| 215 | /* update frame object fields before transmitting */ | 215 | /* update frame object fields before transmitting */ |
| 216 | pkt = cq_get(usb->ep0->empty_frame_Q); | 216 | pkt = cq_get(&usb->ep0->empty_frame_Q); |
| 217 | if (!pkt) { | 217 | if (!pkt) { |
| 218 | fhci_dbg(usb->fhci, "there is no empty frame\n"); | 218 | fhci_dbg(usb->fhci, "there is no empty frame\n"); |
| 219 | return -1; | 219 | return -1; |
| @@ -222,7 +222,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td) | |||
| 222 | 222 | ||
| 223 | pkt->info = 0; | 223 | pkt->info = 0; |
| 224 | if (data == NULL) { | 224 | if (data == NULL) { |
| 225 | data = cq_get(usb->ep0->dummy_packets_Q); | 225 | data = cq_get(&usb->ep0->dummy_packets_Q); |
| 226 | BUG_ON(!data); | 226 | BUG_ON(!data); |
| 227 | pkt->info = PKT_DUMMY_PACKET; | 227 | pkt->info = PKT_DUMMY_PACKET; |
| 228 | } | 228 | } |
| @@ -246,7 +246,7 @@ static int add_packet(struct fhci_usb *usb, struct ed *ed, struct td *td) | |||
| 246 | list_del_init(&td->frame_lh); | 246 | list_del_init(&td->frame_lh); |
| 247 | td->status = USB_TD_OK; | 247 | td->status = USB_TD_OK; |
| 248 | if (pkt->info & PKT_DUMMY_PACKET) | 248 | if (pkt->info & PKT_DUMMY_PACKET) |
| 249 | cq_put(usb->ep0->dummy_packets_Q, pkt->data); | 249 | cq_put(&usb->ep0->dummy_packets_Q, pkt->data); |
| 250 | recycle_frame(usb, pkt); | 250 | recycle_frame(usb, pkt); |
| 251 | usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD); | 251 | usb->actual_frame->total_bytes -= (len + PROTOCOL_OVERHEAD); |
| 252 | fhci_err(usb->fhci, "host transaction failed\n"); | 252 | fhci_err(usb->fhci, "host transaction failed\n"); |
diff --git a/drivers/usb/host/fhci-tds.c b/drivers/usb/host/fhci-tds.c index b40332290319..d224ab467a40 100644 --- a/drivers/usb/host/fhci-tds.c +++ b/drivers/usb/host/fhci-tds.c | |||
| @@ -106,33 +106,33 @@ void fhci_ep0_free(struct fhci_usb *usb) | |||
| 106 | cpm_muram_free(cpm_muram_offset(ep->td_base)); | 106 | cpm_muram_free(cpm_muram_offset(ep->td_base)); |
| 107 | 107 | ||
| 108 | if (ep->conf_frame_Q) { | 108 | if (ep->conf_frame_Q) { |
| 109 | size = cq_howmany(ep->conf_frame_Q); | 109 | size = cq_howmany(&ep->conf_frame_Q); |
| 110 | for (; size; size--) { | 110 | for (; size; size--) { |
| 111 | struct packet *pkt = cq_get(ep->conf_frame_Q); | 111 | struct packet *pkt = cq_get(&ep->conf_frame_Q); |
| 112 | 112 | ||
| 113 | kfree(pkt); | 113 | kfree(pkt); |
| 114 | } | 114 | } |
| 115 | cq_delete(ep->conf_frame_Q); | 115 | cq_delete(&ep->conf_frame_Q); |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | if (ep->empty_frame_Q) { | 118 | if (ep->empty_frame_Q) { |
| 119 | size = cq_howmany(ep->empty_frame_Q); | 119 | size = cq_howmany(&ep->empty_frame_Q); |
| 120 | for (; size; size--) { | 120 | for (; size; size--) { |
| 121 | struct packet *pkt = cq_get(ep->empty_frame_Q); | 121 | struct packet *pkt = cq_get(&ep->empty_frame_Q); |
| 122 | 122 | ||
| 123 | kfree(pkt); | 123 | kfree(pkt); |
| 124 | } | 124 | } |
| 125 | cq_delete(ep->empty_frame_Q); | 125 | cq_delete(&ep->empty_frame_Q); |
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | if (ep->dummy_packets_Q) { | 128 | if (ep->dummy_packets_Q) { |
| 129 | size = cq_howmany(ep->dummy_packets_Q); | 129 | size = cq_howmany(&ep->dummy_packets_Q); |
| 130 | for (; size; size--) { | 130 | for (; size; size--) { |
| 131 | u8 *buff = cq_get(ep->dummy_packets_Q); | 131 | u8 *buff = cq_get(&ep->dummy_packets_Q); |
| 132 | 132 | ||
| 133 | kfree(buff); | 133 | kfree(buff); |
| 134 | } | 134 | } |
| 135 | cq_delete(ep->dummy_packets_Q); | 135 | cq_delete(&ep->dummy_packets_Q); |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | kfree(ep); | 138 | kfree(ep); |
| @@ -175,10 +175,9 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem, | |||
| 175 | ep->td_base = cpm_muram_addr(ep_offset); | 175 | ep->td_base = cpm_muram_addr(ep_offset); |
| 176 | 176 | ||
| 177 | /* zero all queue pointers */ | 177 | /* zero all queue pointers */ |
| 178 | ep->conf_frame_Q = cq_new(ring_len + 2); | 178 | if (cq_new(&ep->conf_frame_Q, ring_len + 2) || |
| 179 | ep->empty_frame_Q = cq_new(ring_len + 2); | 179 | cq_new(&ep->empty_frame_Q, ring_len + 2) || |
| 180 | ep->dummy_packets_Q = cq_new(ring_len + 2); | 180 | cq_new(&ep->dummy_packets_Q, ring_len + 2)) { |
| 181 | if (!ep->conf_frame_Q || !ep->empty_frame_Q || !ep->dummy_packets_Q) { | ||
| 182 | err_for = "frame_queues"; | 181 | err_for = "frame_queues"; |
| 183 | goto err; | 182 | goto err; |
| 184 | } | 183 | } |
| @@ -199,8 +198,8 @@ u32 fhci_create_ep(struct fhci_usb *usb, enum fhci_mem_alloc data_mem, | |||
| 199 | err_for = "buffer"; | 198 | err_for = "buffer"; |
| 200 | goto err; | 199 | goto err; |
| 201 | } | 200 | } |
| 202 | cq_put(ep->empty_frame_Q, pkt); | 201 | cq_put(&ep->empty_frame_Q, pkt); |
| 203 | cq_put(ep->dummy_packets_Q, buff); | 202 | cq_put(&ep->dummy_packets_Q, buff); |
| 204 | } | 203 | } |
| 205 | 204 | ||
| 206 | /* we put the endpoint parameter RAM right behind the TD ring */ | 205 | /* we put the endpoint parameter RAM right behind the TD ring */ |
| @@ -319,7 +318,7 @@ static void fhci_td_transaction_confirm(struct fhci_usb *usb) | |||
| 319 | if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W)) | 318 | if ((buf == DUMMY2_BD_BUFFER) && !(td_status & ~TD_W)) |
| 320 | continue; | 319 | continue; |
| 321 | 320 | ||
| 322 | pkt = cq_get(ep->conf_frame_Q); | 321 | pkt = cq_get(&ep->conf_frame_Q); |
| 323 | if (!pkt) | 322 | if (!pkt) |
| 324 | fhci_err(usb->fhci, "no frame to confirm\n"); | 323 | fhci_err(usb->fhci, "no frame to confirm\n"); |
| 325 | 324 | ||
| @@ -460,9 +459,9 @@ u32 fhci_host_transaction(struct fhci_usb *usb, | |||
| 460 | out_be16(&td->length, pkt->len); | 459 | out_be16(&td->length, pkt->len); |
| 461 | 460 | ||
| 462 | /* put the frame to the confirmation queue */ | 461 | /* put the frame to the confirmation queue */ |
| 463 | cq_put(ep->conf_frame_Q, pkt); | 462 | cq_put(&ep->conf_frame_Q, pkt); |
| 464 | 463 | ||
| 465 | if (cq_howmany(ep->conf_frame_Q) == 1) | 464 | if (cq_howmany(&ep->conf_frame_Q) == 1) |
| 466 | out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO); | 465 | out_8(&usb->fhci->regs->usb_comm, USB_CMD_STR_FIFO); |
| 467 | 466 | ||
| 468 | return 0; | 467 | return 0; |
diff --git a/drivers/usb/host/fhci.h b/drivers/usb/host/fhci.h index 7116284ed21a..72dae1c5ab38 100644 --- a/drivers/usb/host/fhci.h +++ b/drivers/usb/host/fhci.h | |||
| @@ -423,9 +423,9 @@ struct endpoint { | |||
| 423 | struct usb_td __iomem *td_base; /* first TD in the ring */ | 423 | struct usb_td __iomem *td_base; /* first TD in the ring */ |
| 424 | struct usb_td __iomem *conf_td; /* next TD for confirm after transac */ | 424 | struct usb_td __iomem *conf_td; /* next TD for confirm after transac */ |
| 425 | struct usb_td __iomem *empty_td;/* next TD for new transaction req. */ | 425 | struct usb_td __iomem *empty_td;/* next TD for new transaction req. */ |
| 426 | struct kfifo *empty_frame_Q; /* Empty frames list to use */ | 426 | struct kfifo empty_frame_Q; /* Empty frames list to use */ |
| 427 | struct kfifo *conf_frame_Q; /* frames passed to TDs,waiting for tx */ | 427 | struct kfifo conf_frame_Q; /* frames passed to TDs,waiting for tx */ |
| 428 | struct kfifo *dummy_packets_Q;/* dummy packets for the CRC overun */ | 428 | struct kfifo dummy_packets_Q;/* dummy packets for the CRC overun */ |
| 429 | 429 | ||
| 430 | bool already_pushed_dummy_bd; | 430 | bool already_pushed_dummy_bd; |
| 431 | }; | 431 | }; |
| @@ -493,9 +493,9 @@ static inline struct usb_hcd *fhci_to_hcd(struct fhci_hcd *fhci) | |||
| 493 | } | 493 | } |
| 494 | 494 | ||
| 495 | /* fifo of pointers */ | 495 | /* fifo of pointers */ |
| 496 | static inline struct kfifo *cq_new(int size) | 496 | static inline int cq_new(struct kfifo *fifo, int size) |
| 497 | { | 497 | { |
| 498 | return kfifo_alloc(size * sizeof(void *), GFP_KERNEL, NULL); | 498 | return kfifo_alloc(fifo, size * sizeof(void *), GFP_KERNEL); |
| 499 | } | 499 | } |
| 500 | 500 | ||
| 501 | static inline void cq_delete(struct kfifo *kfifo) | 501 | static inline void cq_delete(struct kfifo *kfifo) |
| @@ -505,19 +505,19 @@ static inline void cq_delete(struct kfifo *kfifo) | |||
| 505 | 505 | ||
| 506 | static inline unsigned int cq_howmany(struct kfifo *kfifo) | 506 | static inline unsigned int cq_howmany(struct kfifo *kfifo) |
| 507 | { | 507 | { |
| 508 | return __kfifo_len(kfifo) / sizeof(void *); | 508 | return kfifo_len(kfifo) / sizeof(void *); |
| 509 | } | 509 | } |
| 510 | 510 | ||
| 511 | static inline int cq_put(struct kfifo *kfifo, void *p) | 511 | static inline int cq_put(struct kfifo *kfifo, void *p) |
| 512 | { | 512 | { |
| 513 | return __kfifo_put(kfifo, (void *)&p, sizeof(p)); | 513 | return kfifo_in(kfifo, (void *)&p, sizeof(p)); |
| 514 | } | 514 | } |
| 515 | 515 | ||
| 516 | static inline void *cq_get(struct kfifo *kfifo) | 516 | static inline void *cq_get(struct kfifo *kfifo) |
| 517 | { | 517 | { |
| 518 | void *p = NULL; | 518 | void *p = NULL; |
| 519 | 519 | ||
| 520 | __kfifo_get(kfifo, (void *)&p, sizeof(p)); | 520 | kfifo_out(kfifo, (void *)&p, sizeof(p)); |
| 521 | return p; | 521 | return p; |
| 522 | } | 522 | } |
| 523 | 523 | ||
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index bbe005cefcfb..f1ea3a33b6e6 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c | |||
| @@ -276,7 +276,7 @@ static int usb_serial_generic_write_start(struct usb_serial_port *port) | |||
| 276 | if (port->write_urb_busy) | 276 | if (port->write_urb_busy) |
| 277 | start_io = false; | 277 | start_io = false; |
| 278 | else { | 278 | else { |
| 279 | start_io = (__kfifo_len(port->write_fifo) != 0); | 279 | start_io = (kfifo_len(&port->write_fifo) != 0); |
| 280 | port->write_urb_busy = start_io; | 280 | port->write_urb_busy = start_io; |
| 281 | } | 281 | } |
| 282 | spin_unlock_irqrestore(&port->lock, flags); | 282 | spin_unlock_irqrestore(&port->lock, flags); |
| @@ -285,7 +285,7 @@ static int usb_serial_generic_write_start(struct usb_serial_port *port) | |||
| 285 | return 0; | 285 | return 0; |
| 286 | 286 | ||
| 287 | data = port->write_urb->transfer_buffer; | 287 | data = port->write_urb->transfer_buffer; |
| 288 | count = kfifo_get(port->write_fifo, data, port->bulk_out_size); | 288 | count = kfifo_out_locked(&port->write_fifo, data, port->bulk_out_size, &port->lock); |
| 289 | usb_serial_debug_data(debug, &port->dev, __func__, count, data); | 289 | usb_serial_debug_data(debug, &port->dev, __func__, count, data); |
| 290 | 290 | ||
| 291 | /* set up our urb */ | 291 | /* set up our urb */ |
| @@ -345,7 +345,7 @@ int usb_serial_generic_write(struct tty_struct *tty, | |||
| 345 | return usb_serial_multi_urb_write(tty, port, | 345 | return usb_serial_multi_urb_write(tty, port, |
| 346 | buf, count); | 346 | buf, count); |
| 347 | 347 | ||
| 348 | count = kfifo_put(port->write_fifo, buf, count); | 348 | count = kfifo_in_locked(&port->write_fifo, buf, count, &port->lock); |
| 349 | result = usb_serial_generic_write_start(port); | 349 | result = usb_serial_generic_write_start(port); |
| 350 | 350 | ||
| 351 | if (result >= 0) | 351 | if (result >= 0) |
| @@ -370,7 +370,7 @@ int usb_serial_generic_write_room(struct tty_struct *tty) | |||
| 370 | (serial->type->max_in_flight_urbs - | 370 | (serial->type->max_in_flight_urbs - |
| 371 | port->urbs_in_flight); | 371 | port->urbs_in_flight); |
| 372 | } else if (serial->num_bulk_out) | 372 | } else if (serial->num_bulk_out) |
| 373 | room = port->write_fifo->size - __kfifo_len(port->write_fifo); | 373 | room = kfifo_avail(&port->write_fifo); |
| 374 | spin_unlock_irqrestore(&port->lock, flags); | 374 | spin_unlock_irqrestore(&port->lock, flags); |
| 375 | 375 | ||
| 376 | dbg("%s - returns %d", __func__, room); | 376 | dbg("%s - returns %d", __func__, room); |
| @@ -391,7 +391,7 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty) | |||
| 391 | chars = port->tx_bytes_flight; | 391 | chars = port->tx_bytes_flight; |
| 392 | spin_unlock_irqrestore(&port->lock, flags); | 392 | spin_unlock_irqrestore(&port->lock, flags); |
| 393 | } else if (serial->num_bulk_out) | 393 | } else if (serial->num_bulk_out) |
| 394 | chars = kfifo_len(port->write_fifo); | 394 | chars = kfifo_len(&port->write_fifo); |
| 395 | 395 | ||
| 396 | dbg("%s - returns %d", __func__, chars); | 396 | dbg("%s - returns %d", __func__, chars); |
| 397 | return chars; | 397 | return chars; |
| @@ -507,7 +507,7 @@ void usb_serial_generic_write_bulk_callback(struct urb *urb) | |||
| 507 | if (status) { | 507 | if (status) { |
| 508 | dbg("%s - nonzero multi-urb write bulk status " | 508 | dbg("%s - nonzero multi-urb write bulk status " |
| 509 | "received: %d", __func__, status); | 509 | "received: %d", __func__, status); |
| 510 | kfifo_reset(port->write_fifo); | 510 | kfifo_reset_out(&port->write_fifo); |
| 511 | } else | 511 | } else |
| 512 | usb_serial_generic_write_start(port); | 512 | usb_serial_generic_write_start(port); |
| 513 | } | 513 | } |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index 4543f359be75..33c85f7084f8 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
| @@ -595,8 +595,7 @@ static void port_release(struct device *dev) | |||
| 595 | usb_free_urb(port->write_urb); | 595 | usb_free_urb(port->write_urb); |
| 596 | usb_free_urb(port->interrupt_in_urb); | 596 | usb_free_urb(port->interrupt_in_urb); |
| 597 | usb_free_urb(port->interrupt_out_urb); | 597 | usb_free_urb(port->interrupt_out_urb); |
| 598 | if (!IS_ERR(port->write_fifo) && port->write_fifo) | 598 | kfifo_free(&port->write_fifo); |
| 599 | kfifo_free(port->write_fifo); | ||
| 600 | kfree(port->bulk_in_buffer); | 599 | kfree(port->bulk_in_buffer); |
| 601 | kfree(port->bulk_out_buffer); | 600 | kfree(port->bulk_out_buffer); |
| 602 | kfree(port->interrupt_in_buffer); | 601 | kfree(port->interrupt_in_buffer); |
| @@ -939,9 +938,7 @@ int usb_serial_probe(struct usb_interface *interface, | |||
| 939 | dev_err(&interface->dev, "No free urbs available\n"); | 938 | dev_err(&interface->dev, "No free urbs available\n"); |
| 940 | goto probe_error; | 939 | goto probe_error; |
| 941 | } | 940 | } |
| 942 | port->write_fifo = kfifo_alloc(PAGE_SIZE, GFP_KERNEL, | 941 | if (kfifo_alloc(&port->write_fifo, PAGE_SIZE, GFP_KERNEL)) |
| 943 | &port->lock); | ||
| 944 | if (IS_ERR(port->write_fifo)) | ||
| 945 | goto probe_error; | 942 | goto probe_error; |
| 946 | buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); | 943 | buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); |
| 947 | port->bulk_out_size = buffer_size; | 944 | port->bulk_out_size = buffer_size; |
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 2c994591f4d7..9f0bf13291e5 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c | |||
| @@ -121,13 +121,13 @@ struct file *anon_inode_getfile(const char *name, | |||
| 121 | d_instantiate(path.dentry, anon_inode_inode); | 121 | d_instantiate(path.dentry, anon_inode_inode); |
| 122 | 122 | ||
| 123 | error = -ENFILE; | 123 | error = -ENFILE; |
| 124 | file = alloc_file(&path, FMODE_READ | FMODE_WRITE, fops); | 124 | file = alloc_file(&path, OPEN_FMODE(flags), fops); |
| 125 | if (!file) | 125 | if (!file) |
| 126 | goto err_dput; | 126 | goto err_dput; |
| 127 | file->f_mapping = anon_inode_inode->i_mapping; | 127 | file->f_mapping = anon_inode_inode->i_mapping; |
| 128 | 128 | ||
| 129 | file->f_pos = 0; | 129 | file->f_pos = 0; |
| 130 | file->f_flags = O_RDWR | (flags & O_NONBLOCK); | 130 | file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); |
| 131 | file->f_version = 0; | 131 | file->f_version = 0; |
| 132 | file->private_data = priv; | 132 | file->private_data = priv; |
| 133 | 133 | ||
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 14cbc831422a..332dd00f0894 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c | |||
| @@ -1600,8 +1600,6 @@ static long do_ioctl_trans(int fd, unsigned int cmd, | |||
| 1600 | case KDSKBMETA: | 1600 | case KDSKBMETA: |
| 1601 | case KDSKBLED: | 1601 | case KDSKBLED: |
| 1602 | case KDSETLED: | 1602 | case KDSETLED: |
| 1603 | /* SG stuff */ | ||
| 1604 | case SG_SET_TRANSFORM: | ||
| 1605 | /* AUTOFS */ | 1603 | /* AUTOFS */ |
| 1606 | case AUTOFS_IOC_READY: | 1604 | case AUTOFS_IOC_READY: |
| 1607 | case AUTOFS_IOC_FAIL: | 1605 | case AUTOFS_IOC_FAIL: |
diff --git a/fs/eventfd.c b/fs/eventfd.c index 8b47e4200e65..d26402ff06ea 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c | |||
| @@ -339,7 +339,7 @@ struct file *eventfd_file_create(unsigned int count, int flags) | |||
| 339 | ctx->flags = flags; | 339 | ctx->flags = flags; |
| 340 | 340 | ||
| 341 | file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, | 341 | file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, |
| 342 | flags & EFD_SHARED_FCNTL_FLAGS); | 342 | O_RDWR | (flags & EFD_SHARED_FCNTL_FLAGS)); |
| 343 | if (IS_ERR(file)) | 343 | if (IS_ERR(file)) |
| 344 | eventfd_free_ctx(ctx); | 344 | eventfd_free_ctx(ctx); |
| 345 | 345 | ||
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 366c503f9657..bd056a5b4efc 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
| @@ -1206,7 +1206,7 @@ SYSCALL_DEFINE1(epoll_create1, int, flags) | |||
| 1206 | * a file structure and a free file descriptor. | 1206 | * a file structure and a free file descriptor. |
| 1207 | */ | 1207 | */ |
| 1208 | error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep, | 1208 | error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep, |
| 1209 | flags & O_CLOEXEC); | 1209 | O_RDWR | (flags & O_CLOEXEC)); |
| 1210 | if (error < 0) | 1210 | if (error < 0) |
| 1211 | ep_free(ep); | 1211 | ep_free(ep); |
| 1212 | 1212 | ||
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index ad14227f509e..455e6e6e5cb9 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
| @@ -970,7 +970,7 @@ static int ext3_get_block(struct inode *inode, sector_t iblock, | |||
| 970 | if (max_blocks > DIO_MAX_BLOCKS) | 970 | if (max_blocks > DIO_MAX_BLOCKS) |
| 971 | max_blocks = DIO_MAX_BLOCKS; | 971 | max_blocks = DIO_MAX_BLOCKS; |
| 972 | handle = ext3_journal_start(inode, DIO_CREDITS + | 972 | handle = ext3_journal_start(inode, DIO_CREDITS + |
| 973 | 2 * EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb)); | 973 | EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb)); |
| 974 | if (IS_ERR(handle)) { | 974 | if (IS_ERR(handle)) { |
| 975 | ret = PTR_ERR(handle); | 975 | ret = PTR_ERR(handle); |
| 976 | goto out; | 976 | goto out; |
| @@ -3146,8 +3146,8 @@ int ext3_setattr(struct dentry *dentry, struct iattr *attr) | |||
| 3146 | 3146 | ||
| 3147 | /* (user+group)*(old+new) structure, inode write (sb, | 3147 | /* (user+group)*(old+new) structure, inode write (sb, |
| 3148 | * inode block, ? - but truncate inode update has it) */ | 3148 | * inode block, ? - but truncate inode update has it) */ |
| 3149 | handle = ext3_journal_start(inode, 2*(EXT3_QUOTA_INIT_BLOCKS(inode->i_sb)+ | 3149 | handle = ext3_journal_start(inode, EXT3_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+ |
| 3150 | EXT3_QUOTA_DEL_BLOCKS(inode->i_sb))+3); | 3150 | EXT3_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)+3); |
| 3151 | if (IS_ERR(handle)) { | 3151 | if (IS_ERR(handle)) { |
| 3152 | error = PTR_ERR(handle); | 3152 | error = PTR_ERR(handle); |
| 3153 | goto err_out; | 3153 | goto err_out; |
| @@ -3239,7 +3239,7 @@ static int ext3_writepage_trans_blocks(struct inode *inode) | |||
| 3239 | #ifdef CONFIG_QUOTA | 3239 | #ifdef CONFIG_QUOTA |
| 3240 | /* We know that structure was already allocated during vfs_dq_init so | 3240 | /* We know that structure was already allocated during vfs_dq_init so |
| 3241 | * we will be updating only the data blocks + inodes */ | 3241 | * we will be updating only the data blocks + inodes */ |
| 3242 | ret += 2*EXT3_QUOTA_TRANS_BLOCKS(inode->i_sb); | 3242 | ret += EXT3_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb); |
| 3243 | #endif | 3243 | #endif |
| 3244 | 3244 | ||
| 3245 | return ret; | 3245 | return ret; |
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index aad6400c9b77..7b0e44f7d66f 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c | |||
| @@ -1699,7 +1699,7 @@ static int ext3_create (struct inode * dir, struct dentry * dentry, int mode, | |||
| 1699 | retry: | 1699 | retry: |
| 1700 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 1700 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 1701 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1701 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
| 1702 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 1702 | EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); |
| 1703 | if (IS_ERR(handle)) | 1703 | if (IS_ERR(handle)) |
| 1704 | return PTR_ERR(handle); | 1704 | return PTR_ERR(handle); |
| 1705 | 1705 | ||
| @@ -1733,7 +1733,7 @@ static int ext3_mknod (struct inode * dir, struct dentry *dentry, | |||
| 1733 | retry: | 1733 | retry: |
| 1734 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 1734 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 1735 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1735 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
| 1736 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 1736 | EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); |
| 1737 | if (IS_ERR(handle)) | 1737 | if (IS_ERR(handle)) |
| 1738 | return PTR_ERR(handle); | 1738 | return PTR_ERR(handle); |
| 1739 | 1739 | ||
| @@ -1769,7 +1769,7 @@ static int ext3_mkdir(struct inode * dir, struct dentry * dentry, int mode) | |||
| 1769 | retry: | 1769 | retry: |
| 1770 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 1770 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 1771 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + | 1771 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 3 + |
| 1772 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 1772 | EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); |
| 1773 | if (IS_ERR(handle)) | 1773 | if (IS_ERR(handle)) |
| 1774 | return PTR_ERR(handle); | 1774 | return PTR_ERR(handle); |
| 1775 | 1775 | ||
| @@ -1920,7 +1920,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) | |||
| 1920 | struct ext3_iloc iloc; | 1920 | struct ext3_iloc iloc; |
| 1921 | int err = 0, rc; | 1921 | int err = 0, rc; |
| 1922 | 1922 | ||
| 1923 | lock_super(sb); | 1923 | mutex_lock(&EXT3_SB(sb)->s_orphan_lock); |
| 1924 | if (!list_empty(&EXT3_I(inode)->i_orphan)) | 1924 | if (!list_empty(&EXT3_I(inode)->i_orphan)) |
| 1925 | goto out_unlock; | 1925 | goto out_unlock; |
| 1926 | 1926 | ||
| @@ -1929,9 +1929,13 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) | |||
| 1929 | 1929 | ||
| 1930 | /* @@@ FIXME: Observation from aviro: | 1930 | /* @@@ FIXME: Observation from aviro: |
| 1931 | * I think I can trigger J_ASSERT in ext3_orphan_add(). We block | 1931 | * I think I can trigger J_ASSERT in ext3_orphan_add(). We block |
| 1932 | * here (on lock_super()), so race with ext3_link() which might bump | 1932 | * here (on s_orphan_lock), so race with ext3_link() which might bump |
| 1933 | * ->i_nlink. For, say it, character device. Not a regular file, | 1933 | * ->i_nlink. For, say it, character device. Not a regular file, |
| 1934 | * not a directory, not a symlink and ->i_nlink > 0. | 1934 | * not a directory, not a symlink and ->i_nlink > 0. |
| 1935 | * | ||
| 1936 | * tytso, 4/25/2009: I'm not sure how that could happen; | ||
| 1937 | * shouldn't the fs core protect us from these sort of | ||
| 1938 | * unlink()/link() races? | ||
| 1935 | */ | 1939 | */ |
| 1936 | J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || | 1940 | J_ASSERT ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || |
| 1937 | S_ISLNK(inode->i_mode)) || inode->i_nlink == 0); | 1941 | S_ISLNK(inode->i_mode)) || inode->i_nlink == 0); |
| @@ -1968,7 +1972,7 @@ int ext3_orphan_add(handle_t *handle, struct inode *inode) | |||
| 1968 | jbd_debug(4, "orphan inode %lu will point to %d\n", | 1972 | jbd_debug(4, "orphan inode %lu will point to %d\n", |
| 1969 | inode->i_ino, NEXT_ORPHAN(inode)); | 1973 | inode->i_ino, NEXT_ORPHAN(inode)); |
| 1970 | out_unlock: | 1974 | out_unlock: |
| 1971 | unlock_super(sb); | 1975 | mutex_unlock(&EXT3_SB(sb)->s_orphan_lock); |
| 1972 | ext3_std_error(inode->i_sb, err); | 1976 | ext3_std_error(inode->i_sb, err); |
| 1973 | return err; | 1977 | return err; |
| 1974 | } | 1978 | } |
| @@ -1986,11 +1990,9 @@ int ext3_orphan_del(handle_t *handle, struct inode *inode) | |||
| 1986 | struct ext3_iloc iloc; | 1990 | struct ext3_iloc iloc; |
| 1987 | int err = 0; | 1991 | int err = 0; |
| 1988 | 1992 | ||
| 1989 | lock_super(inode->i_sb); | 1993 | mutex_lock(&EXT3_SB(inode->i_sb)->s_orphan_lock); |
| 1990 | if (list_empty(&ei->i_orphan)) { | 1994 | if (list_empty(&ei->i_orphan)) |
| 1991 | unlock_super(inode->i_sb); | 1995 | goto out; |
| 1992 | return 0; | ||
| 1993 | } | ||
| 1994 | 1996 | ||
| 1995 | ino_next = NEXT_ORPHAN(inode); | 1997 | ino_next = NEXT_ORPHAN(inode); |
| 1996 | prev = ei->i_orphan.prev; | 1998 | prev = ei->i_orphan.prev; |
| @@ -2040,7 +2042,7 @@ int ext3_orphan_del(handle_t *handle, struct inode *inode) | |||
| 2040 | out_err: | 2042 | out_err: |
| 2041 | ext3_std_error(inode->i_sb, err); | 2043 | ext3_std_error(inode->i_sb, err); |
| 2042 | out: | 2044 | out: |
| 2043 | unlock_super(inode->i_sb); | 2045 | mutex_unlock(&EXT3_SB(inode->i_sb)->s_orphan_lock); |
| 2044 | return err; | 2046 | return err; |
| 2045 | 2047 | ||
| 2046 | out_brelse: | 2048 | out_brelse: |
| @@ -2175,7 +2177,7 @@ static int ext3_symlink (struct inode * dir, | |||
| 2175 | retry: | 2177 | retry: |
| 2176 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + | 2178 | handle = ext3_journal_start(dir, EXT3_DATA_TRANS_BLOCKS(dir->i_sb) + |
| 2177 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 + | 2179 | EXT3_INDEX_EXTRA_TRANS_BLOCKS + 5 + |
| 2178 | 2*EXT3_QUOTA_INIT_BLOCKS(dir->i_sb)); | 2180 | EXT3_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); |
| 2179 | if (IS_ERR(handle)) | 2181 | if (IS_ERR(handle)) |
| 2180 | return PTR_ERR(handle); | 2182 | return PTR_ERR(handle); |
| 2181 | 2183 | ||
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c index 5f83b6179178..54351ac7cef9 100644 --- a/fs/ext3/resize.c +++ b/fs/ext3/resize.c | |||
| @@ -209,7 +209,7 @@ static int setup_new_group_blocks(struct super_block *sb, | |||
| 209 | if (IS_ERR(handle)) | 209 | if (IS_ERR(handle)) |
| 210 | return PTR_ERR(handle); | 210 | return PTR_ERR(handle); |
| 211 | 211 | ||
| 212 | lock_super(sb); | 212 | mutex_lock(&sbi->s_resize_lock); |
| 213 | if (input->group != sbi->s_groups_count) { | 213 | if (input->group != sbi->s_groups_count) { |
| 214 | err = -EBUSY; | 214 | err = -EBUSY; |
| 215 | goto exit_journal; | 215 | goto exit_journal; |
| @@ -324,7 +324,7 @@ exit_bh: | |||
| 324 | brelse(bh); | 324 | brelse(bh); |
| 325 | 325 | ||
| 326 | exit_journal: | 326 | exit_journal: |
| 327 | unlock_super(sb); | 327 | mutex_unlock(&sbi->s_resize_lock); |
| 328 | if ((err2 = ext3_journal_stop(handle)) && !err) | 328 | if ((err2 = ext3_journal_stop(handle)) && !err) |
| 329 | err = err2; | 329 | err = err2; |
| 330 | 330 | ||
| @@ -662,11 +662,12 @@ exit_free: | |||
| 662 | * important part is that the new block and inode counts are in the backup | 662 | * important part is that the new block and inode counts are in the backup |
| 663 | * superblocks, and the location of the new group metadata in the GDT backups. | 663 | * superblocks, and the location of the new group metadata in the GDT backups. |
| 664 | * | 664 | * |
| 665 | * We do not need lock_super() for this, because these blocks are not | 665 | * We do not need take the s_resize_lock for this, because these |
| 666 | * otherwise touched by the filesystem code when it is mounted. We don't | 666 | * blocks are not otherwise touched by the filesystem code when it is |
| 667 | * need to worry about last changing from sbi->s_groups_count, because the | 667 | * mounted. We don't need to worry about last changing from |
| 668 | * worst that can happen is that we do not copy the full number of backups | 668 | * sbi->s_groups_count, because the worst that can happen is that we |
| 669 | * at this time. The resize which changed s_groups_count will backup again. | 669 | * do not copy the full number of backups at this time. The resize |
| 670 | * which changed s_groups_count will backup again. | ||
| 670 | */ | 671 | */ |
| 671 | static void update_backups(struct super_block *sb, | 672 | static void update_backups(struct super_block *sb, |
| 672 | int blk_off, char *data, int size) | 673 | int blk_off, char *data, int size) |
| @@ -825,7 +826,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
| 825 | goto exit_put; | 826 | goto exit_put; |
| 826 | } | 827 | } |
| 827 | 828 | ||
| 828 | lock_super(sb); | 829 | mutex_lock(&sbi->s_resize_lock); |
| 829 | if (input->group != sbi->s_groups_count) { | 830 | if (input->group != sbi->s_groups_count) { |
| 830 | ext3_warning(sb, __func__, | 831 | ext3_warning(sb, __func__, |
| 831 | "multiple resizers run on filesystem!"); | 832 | "multiple resizers run on filesystem!"); |
| @@ -856,7 +857,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
| 856 | /* | 857 | /* |
| 857 | * OK, now we've set up the new group. Time to make it active. | 858 | * OK, now we've set up the new group. Time to make it active. |
| 858 | * | 859 | * |
| 859 | * Current kernels don't lock all allocations via lock_super(), | 860 | * We do not lock all allocations via s_resize_lock |
| 860 | * so we have to be safe wrt. concurrent accesses the group | 861 | * so we have to be safe wrt. concurrent accesses the group |
| 861 | * data. So we need to be careful to set all of the relevant | 862 | * data. So we need to be careful to set all of the relevant |
| 862 | * group descriptor data etc. *before* we enable the group. | 863 | * group descriptor data etc. *before* we enable the group. |
| @@ -900,12 +901,12 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
| 900 | * | 901 | * |
| 901 | * The precise rules we use are: | 902 | * The precise rules we use are: |
| 902 | * | 903 | * |
| 903 | * * Writers of s_groups_count *must* hold lock_super | 904 | * * Writers of s_groups_count *must* hold s_resize_lock |
| 904 | * AND | 905 | * AND |
| 905 | * * Writers must perform a smp_wmb() after updating all dependent | 906 | * * Writers must perform a smp_wmb() after updating all dependent |
| 906 | * data and before modifying the groups count | 907 | * data and before modifying the groups count |
| 907 | * | 908 | * |
| 908 | * * Readers must hold lock_super() over the access | 909 | * * Readers must hold s_resize_lock over the access |
| 909 | * OR | 910 | * OR |
| 910 | * * Readers must perform an smp_rmb() after reading the groups count | 911 | * * Readers must perform an smp_rmb() after reading the groups count |
| 911 | * and before reading any dependent data. | 912 | * and before reading any dependent data. |
| @@ -936,7 +937,7 @@ int ext3_group_add(struct super_block *sb, struct ext3_new_group_data *input) | |||
| 936 | ext3_journal_dirty_metadata(handle, sbi->s_sbh); | 937 | ext3_journal_dirty_metadata(handle, sbi->s_sbh); |
| 937 | 938 | ||
| 938 | exit_journal: | 939 | exit_journal: |
| 939 | unlock_super(sb); | 940 | mutex_unlock(&sbi->s_resize_lock); |
| 940 | if ((err2 = ext3_journal_stop(handle)) && !err) | 941 | if ((err2 = ext3_journal_stop(handle)) && !err) |
| 941 | err = err2; | 942 | err = err2; |
| 942 | if (!err) { | 943 | if (!err) { |
| @@ -973,7 +974,7 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es, | |||
| 973 | 974 | ||
| 974 | /* We don't need to worry about locking wrt other resizers just | 975 | /* We don't need to worry about locking wrt other resizers just |
| 975 | * yet: we're going to revalidate es->s_blocks_count after | 976 | * yet: we're going to revalidate es->s_blocks_count after |
| 976 | * taking lock_super() below. */ | 977 | * taking the s_resize_lock below. */ |
| 977 | o_blocks_count = le32_to_cpu(es->s_blocks_count); | 978 | o_blocks_count = le32_to_cpu(es->s_blocks_count); |
| 978 | o_groups_count = EXT3_SB(sb)->s_groups_count; | 979 | o_groups_count = EXT3_SB(sb)->s_groups_count; |
| 979 | 980 | ||
| @@ -1045,11 +1046,11 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es, | |||
| 1045 | goto exit_put; | 1046 | goto exit_put; |
| 1046 | } | 1047 | } |
| 1047 | 1048 | ||
| 1048 | lock_super(sb); | 1049 | mutex_lock(&EXT3_SB(sb)->s_resize_lock); |
| 1049 | if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) { | 1050 | if (o_blocks_count != le32_to_cpu(es->s_blocks_count)) { |
| 1050 | ext3_warning(sb, __func__, | 1051 | ext3_warning(sb, __func__, |
| 1051 | "multiple resizers run on filesystem!"); | 1052 | "multiple resizers run on filesystem!"); |
| 1052 | unlock_super(sb); | 1053 | mutex_unlock(&EXT3_SB(sb)->s_resize_lock); |
| 1053 | ext3_journal_stop(handle); | 1054 | ext3_journal_stop(handle); |
| 1054 | err = -EBUSY; | 1055 | err = -EBUSY; |
| 1055 | goto exit_put; | 1056 | goto exit_put; |
| @@ -1059,13 +1060,13 @@ int ext3_group_extend(struct super_block *sb, struct ext3_super_block *es, | |||
| 1059 | EXT3_SB(sb)->s_sbh))) { | 1060 | EXT3_SB(sb)->s_sbh))) { |
| 1060 | ext3_warning(sb, __func__, | 1061 | ext3_warning(sb, __func__, |
| 1061 | "error %d on journal write access", err); | 1062 | "error %d on journal write access", err); |
| 1062 | unlock_super(sb); | 1063 | mutex_unlock(&EXT3_SB(sb)->s_resize_lock); |
| 1063 | ext3_journal_stop(handle); | 1064 | ext3_journal_stop(handle); |
| 1064 | goto exit_put; | 1065 | goto exit_put; |
| 1065 | } | 1066 | } |
| 1066 | es->s_blocks_count = cpu_to_le32(o_blocks_count + add); | 1067 | es->s_blocks_count = cpu_to_le32(o_blocks_count + add); |
| 1067 | ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); | 1068 | ext3_journal_dirty_metadata(handle, EXT3_SB(sb)->s_sbh); |
| 1068 | unlock_super(sb); | 1069 | mutex_unlock(&EXT3_SB(sb)->s_resize_lock); |
| 1069 | ext3_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count, | 1070 | ext3_debug("freeing blocks %lu through "E3FSBLK"\n", o_blocks_count, |
| 1070 | o_blocks_count + add); | 1071 | o_blocks_count + add); |
| 1071 | ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks); | 1072 | ext3_free_blocks_sb(handle, sb, o_blocks_count, add, &freed_blocks); |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 7ad1e8c30bd0..afa2b569da10 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
| @@ -1928,6 +1928,8 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
| 1928 | sb->dq_op = &ext3_quota_operations; | 1928 | sb->dq_op = &ext3_quota_operations; |
| 1929 | #endif | 1929 | #endif |
| 1930 | INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ | 1930 | INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */ |
| 1931 | mutex_init(&sbi->s_orphan_lock); | ||
| 1932 | mutex_init(&sbi->s_resize_lock); | ||
| 1931 | 1933 | ||
| 1932 | sb->s_root = NULL; | 1934 | sb->s_root = NULL; |
| 1933 | 1935 | ||
| @@ -2014,14 +2016,7 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
| 2014 | } | 2016 | } |
| 2015 | 2017 | ||
| 2016 | ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY); | 2018 | ext3_setup_super (sb, es, sb->s_flags & MS_RDONLY); |
| 2017 | /* | 2019 | |
| 2018 | * akpm: core read_super() calls in here with the superblock locked. | ||
| 2019 | * That deadlocks, because orphan cleanup needs to lock the superblock | ||
| 2020 | * in numerous places. Here we just pop the lock - it's relatively | ||
| 2021 | * harmless, because we are now ready to accept write_super() requests, | ||
| 2022 | * and aviro says that's the only reason for hanging onto the | ||
| 2023 | * superblock lock. | ||
| 2024 | */ | ||
| 2025 | EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS; | 2020 | EXT3_SB(sb)->s_mount_state |= EXT3_ORPHAN_FS; |
| 2026 | ext3_orphan_cleanup(sb, es); | 2021 | ext3_orphan_cleanup(sb, es); |
| 2027 | EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS; | 2022 | EXT3_SB(sb)->s_mount_state &= ~EXT3_ORPHAN_FS; |
| @@ -2403,13 +2398,11 @@ static void ext3_mark_recovery_complete(struct super_block * sb, | |||
| 2403 | if (journal_flush(journal) < 0) | 2398 | if (journal_flush(journal) < 0) |
| 2404 | goto out; | 2399 | goto out; |
| 2405 | 2400 | ||
| 2406 | lock_super(sb); | ||
| 2407 | if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && | 2401 | if (EXT3_HAS_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER) && |
| 2408 | sb->s_flags & MS_RDONLY) { | 2402 | sb->s_flags & MS_RDONLY) { |
| 2409 | EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); | 2403 | EXT3_CLEAR_INCOMPAT_FEATURE(sb, EXT3_FEATURE_INCOMPAT_RECOVER); |
| 2410 | ext3_commit_super(sb, es, 1); | 2404 | ext3_commit_super(sb, es, 1); |
| 2411 | } | 2405 | } |
| 2412 | unlock_super(sb); | ||
| 2413 | 2406 | ||
| 2414 | out: | 2407 | out: |
| 2415 | journal_unlock_updates(journal); | 2408 | journal_unlock_updates(journal); |
| @@ -2601,13 +2594,7 @@ static int ext3_remount (struct super_block * sb, int * flags, char * data) | |||
| 2601 | (sbi->s_mount_state & EXT3_VALID_FS)) | 2594 | (sbi->s_mount_state & EXT3_VALID_FS)) |
| 2602 | es->s_state = cpu_to_le16(sbi->s_mount_state); | 2595 | es->s_state = cpu_to_le16(sbi->s_mount_state); |
| 2603 | 2596 | ||
| 2604 | /* | ||
| 2605 | * We have to unlock super so that we can wait for | ||
| 2606 | * transactions. | ||
| 2607 | */ | ||
| 2608 | unlock_super(sb); | ||
| 2609 | ext3_mark_recovery_complete(sb, es); | 2597 | ext3_mark_recovery_complete(sb, es); |
| 2610 | lock_super(sb); | ||
| 2611 | } else { | 2598 | } else { |
| 2612 | __le32 ret; | 2599 | __le32 ret; |
| 2613 | if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb, | 2600 | if ((ret = EXT3_HAS_RO_COMPAT_FEATURE(sb, |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index ab31e65d46d0..56f9271ee8cc 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
| @@ -704,6 +704,10 @@ struct ext4_inode_info { | |||
| 704 | __u16 i_extra_isize; | 704 | __u16 i_extra_isize; |
| 705 | 705 | ||
| 706 | spinlock_t i_block_reservation_lock; | 706 | spinlock_t i_block_reservation_lock; |
| 707 | #ifdef CONFIG_QUOTA | ||
| 708 | /* quota space reservation, managed internally by quota code */ | ||
| 709 | qsize_t i_reserved_quota; | ||
| 710 | #endif | ||
| 707 | 711 | ||
| 708 | /* completed async DIOs that might need unwritten extents handling */ | 712 | /* completed async DIOs that might need unwritten extents handling */ |
| 709 | struct list_head i_aio_dio_complete_list; | 713 | struct list_head i_aio_dio_complete_list; |
| @@ -1435,7 +1439,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); | |||
| 1435 | extern int ext4_block_truncate_page(handle_t *handle, | 1439 | extern int ext4_block_truncate_page(handle_t *handle, |
| 1436 | struct address_space *mapping, loff_t from); | 1440 | struct address_space *mapping, loff_t from); |
| 1437 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 1441 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); |
| 1438 | extern qsize_t ext4_get_reserved_space(struct inode *inode); | 1442 | extern qsize_t *ext4_get_reserved_space(struct inode *inode); |
| 1439 | extern int flush_aio_dio_completed_IO(struct inode *inode); | 1443 | extern int flush_aio_dio_completed_IO(struct inode *inode); |
| 1440 | /* ioctl.c */ | 1444 | /* ioctl.c */ |
| 1441 | extern long ext4_ioctl(struct file *, unsigned int, unsigned long); | 1445 | extern long ext4_ioctl(struct file *, unsigned int, unsigned long); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5352db1a3086..ab807963a614 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
| @@ -1003,17 +1003,12 @@ out: | |||
| 1003 | return err; | 1003 | return err; |
| 1004 | } | 1004 | } |
| 1005 | 1005 | ||
| 1006 | qsize_t ext4_get_reserved_space(struct inode *inode) | 1006 | #ifdef CONFIG_QUOTA |
| 1007 | qsize_t *ext4_get_reserved_space(struct inode *inode) | ||
| 1007 | { | 1008 | { |
| 1008 | unsigned long long total; | 1009 | return &EXT4_I(inode)->i_reserved_quota; |
| 1009 | |||
| 1010 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 1011 | total = EXT4_I(inode)->i_reserved_data_blocks + | ||
| 1012 | EXT4_I(inode)->i_reserved_meta_blocks; | ||
| 1013 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 1014 | |||
| 1015 | return (total << inode->i_blkbits); | ||
| 1016 | } | 1010 | } |
| 1011 | #endif | ||
| 1017 | /* | 1012 | /* |
| 1018 | * Calculate the number of metadata blocks need to reserve | 1013 | * Calculate the number of metadata blocks need to reserve |
| 1019 | * to allocate @blocks for non extent file based file | 1014 | * to allocate @blocks for non extent file based file |
| @@ -1051,7 +1046,7 @@ static int ext4_calc_metadata_amount(struct inode *inode, int blocks) | |||
| 1051 | static void ext4_da_update_reserve_space(struct inode *inode, int used) | 1046 | static void ext4_da_update_reserve_space(struct inode *inode, int used) |
| 1052 | { | 1047 | { |
| 1053 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); | 1048 | struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); |
| 1054 | int total, mdb, mdb_free; | 1049 | int total, mdb, mdb_free, mdb_claim = 0; |
| 1055 | 1050 | ||
| 1056 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | 1051 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
| 1057 | /* recalculate the number of metablocks still need to be reserved */ | 1052 | /* recalculate the number of metablocks still need to be reserved */ |
| @@ -1064,7 +1059,9 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) | |||
| 1064 | 1059 | ||
| 1065 | if (mdb_free) { | 1060 | if (mdb_free) { |
| 1066 | /* Account for allocated meta_blocks */ | 1061 | /* Account for allocated meta_blocks */ |
| 1067 | mdb_free -= EXT4_I(inode)->i_allocated_meta_blocks; | 1062 | mdb_claim = EXT4_I(inode)->i_allocated_meta_blocks; |
| 1063 | BUG_ON(mdb_free < mdb_claim); | ||
| 1064 | mdb_free -= mdb_claim; | ||
| 1068 | 1065 | ||
| 1069 | /* update fs dirty blocks counter */ | 1066 | /* update fs dirty blocks counter */ |
| 1070 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); | 1067 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, mdb_free); |
| @@ -1075,8 +1072,11 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used) | |||
| 1075 | /* update per-inode reservations */ | 1072 | /* update per-inode reservations */ |
| 1076 | BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); | 1073 | BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks); |
| 1077 | EXT4_I(inode)->i_reserved_data_blocks -= used; | 1074 | EXT4_I(inode)->i_reserved_data_blocks -= used; |
| 1075 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, used + mdb_claim); | ||
| 1078 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 1076 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
| 1079 | 1077 | ||
| 1078 | vfs_dq_claim_block(inode, used + mdb_claim); | ||
| 1079 | |||
| 1080 | /* | 1080 | /* |
| 1081 | * free those over-booking quota for metadata blocks | 1081 | * free those over-booking quota for metadata blocks |
| 1082 | */ | 1082 | */ |
| @@ -1816,19 +1816,17 @@ repeat: | |||
| 1816 | 1816 | ||
| 1817 | md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; | 1817 | md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks; |
| 1818 | total = md_needed + nrblocks; | 1818 | total = md_needed + nrblocks; |
| 1819 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 1819 | 1820 | ||
| 1820 | /* | 1821 | /* |
| 1821 | * Make quota reservation here to prevent quota overflow | 1822 | * Make quota reservation here to prevent quota overflow |
| 1822 | * later. Real quota accounting is done at pages writeout | 1823 | * later. Real quota accounting is done at pages writeout |
| 1823 | * time. | 1824 | * time. |
| 1824 | */ | 1825 | */ |
| 1825 | if (vfs_dq_reserve_block(inode, total)) { | 1826 | if (vfs_dq_reserve_block(inode, total)) |
| 1826 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 1827 | return -EDQUOT; | 1827 | return -EDQUOT; |
| 1828 | } | ||
| 1829 | 1828 | ||
| 1830 | if (ext4_claim_free_blocks(sbi, total)) { | 1829 | if (ext4_claim_free_blocks(sbi, total)) { |
| 1831 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 1832 | vfs_dq_release_reservation_block(inode, total); | 1830 | vfs_dq_release_reservation_block(inode, total); |
| 1833 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | 1831 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { |
| 1834 | yield(); | 1832 | yield(); |
| @@ -1836,10 +1834,11 @@ repeat: | |||
| 1836 | } | 1834 | } |
| 1837 | return -ENOSPC; | 1835 | return -ENOSPC; |
| 1838 | } | 1836 | } |
| 1837 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | ||
| 1839 | EXT4_I(inode)->i_reserved_data_blocks += nrblocks; | 1838 | EXT4_I(inode)->i_reserved_data_blocks += nrblocks; |
| 1840 | EXT4_I(inode)->i_reserved_meta_blocks = mdblocks; | 1839 | EXT4_I(inode)->i_reserved_meta_blocks += md_needed; |
| 1841 | |||
| 1842 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 1840 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
| 1841 | |||
| 1843 | return 0; /* success */ | 1842 | return 0; /* success */ |
| 1844 | } | 1843 | } |
| 1845 | 1844 | ||
| @@ -4794,6 +4793,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino) | |||
| 4794 | ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; | 4793 | ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32; |
| 4795 | inode->i_size = ext4_isize(raw_inode); | 4794 | inode->i_size = ext4_isize(raw_inode); |
| 4796 | ei->i_disksize = inode->i_size; | 4795 | ei->i_disksize = inode->i_size; |
| 4796 | #ifdef CONFIG_QUOTA | ||
| 4797 | ei->i_reserved_quota = 0; | ||
| 4798 | #endif | ||
| 4797 | inode->i_generation = le32_to_cpu(raw_inode->i_generation); | 4799 | inode->i_generation = le32_to_cpu(raw_inode->i_generation); |
| 4798 | ei->i_block_group = iloc.block_group; | 4800 | ei->i_block_group = iloc.block_group; |
| 4799 | ei->i_last_alloc_group = ~0; | 4801 | ei->i_last_alloc_group = ~0; |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index b1fd3daadc9c..d34afad3e137 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
| @@ -2755,12 +2755,6 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, | |||
| 2755 | if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) | 2755 | if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) |
| 2756 | /* release all the reserved blocks if non delalloc */ | 2756 | /* release all the reserved blocks if non delalloc */ |
| 2757 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); | 2757 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks); |
| 2758 | else { | ||
| 2759 | percpu_counter_sub(&sbi->s_dirtyblocks_counter, | ||
| 2760 | ac->ac_b_ex.fe_len); | ||
| 2761 | /* convert reserved quota blocks to real quota blocks */ | ||
| 2762 | vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len); | ||
| 2763 | } | ||
| 2764 | 2758 | ||
| 2765 | if (sbi->s_log_groups_per_flex) { | 2759 | if (sbi->s_log_groups_per_flex) { |
| 2766 | ext4_group_t flex_group = ext4_flex_group(sbi, | 2760 | ext4_group_t flex_group = ext4_flex_group(sbi, |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 827bde1f2594..6ed9aa91f27d 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
| @@ -704,6 +704,9 @@ static struct inode *ext4_alloc_inode(struct super_block *sb) | |||
| 704 | ei->i_allocated_meta_blocks = 0; | 704 | ei->i_allocated_meta_blocks = 0; |
| 705 | ei->i_delalloc_reserved_flag = 0; | 705 | ei->i_delalloc_reserved_flag = 0; |
| 706 | spin_lock_init(&(ei->i_block_reservation_lock)); | 706 | spin_lock_init(&(ei->i_block_reservation_lock)); |
| 707 | #ifdef CONFIG_QUOTA | ||
| 708 | ei->i_reserved_quota = 0; | ||
| 709 | #endif | ||
| 707 | INIT_LIST_HEAD(&ei->i_aio_dio_complete_list); | 710 | INIT_LIST_HEAD(&ei->i_aio_dio_complete_list); |
| 708 | ei->cur_aio_dio = NULL; | 711 | ei->cur_aio_dio = NULL; |
| 709 | ei->i_sync_tid = 0; | 712 | ei->i_sync_tid = 0; |
| @@ -1014,7 +1017,9 @@ static const struct dquot_operations ext4_quota_operations = { | |||
| 1014 | .reserve_space = dquot_reserve_space, | 1017 | .reserve_space = dquot_reserve_space, |
| 1015 | .claim_space = dquot_claim_space, | 1018 | .claim_space = dquot_claim_space, |
| 1016 | .release_rsv = dquot_release_reserved_space, | 1019 | .release_rsv = dquot_release_reserved_space, |
| 1020 | #ifdef CONFIG_QUOTA | ||
| 1017 | .get_reserved_space = ext4_get_reserved_space, | 1021 | .get_reserved_space = ext4_get_reserved_space, |
| 1022 | #endif | ||
| 1018 | .alloc_inode = dquot_alloc_inode, | 1023 | .alloc_inode = dquot_alloc_inode, |
| 1019 | .free_space = dquot_free_space, | 1024 | .free_space = dquot_free_space, |
| 1020 | .free_inode = dquot_free_inode, | 1025 | .free_inode = dquot_free_inode, |
diff --git a/fs/file_table.c b/fs/file_table.c index 0afacf654398..69652c5bd5f0 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
| @@ -186,10 +186,8 @@ struct file *alloc_file(struct path *path, fmode_t mode, | |||
| 186 | * that we can do debugging checks at __fput() | 186 | * that we can do debugging checks at __fput() |
| 187 | */ | 187 | */ |
| 188 | if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) { | 188 | if ((mode & FMODE_WRITE) && !special_file(path->dentry->d_inode->i_mode)) { |
| 189 | int error = 0; | ||
| 190 | file_take_write(file); | 189 | file_take_write(file); |
| 191 | error = mnt_clone_write(path->mnt); | 190 | WARN_ON(mnt_clone_write(path->mnt)); |
| 192 | WARN_ON(error); | ||
| 193 | } | 191 | } |
| 194 | ima_counts_get(file); | 192 | ima_counts_get(file); |
| 195 | return file; | 193 | return file; |
diff --git a/fs/internal.h b/fs/internal.h index f67cd141d9a8..e96a1667d749 100644 --- a/fs/internal.h +++ b/fs/internal.h | |||
| @@ -85,3 +85,10 @@ extern struct file *get_empty_filp(void); | |||
| 85 | * super.c | 85 | * super.c |
| 86 | */ | 86 | */ |
| 87 | extern int do_remount_sb(struct super_block *, int, void *, int); | 87 | extern int do_remount_sb(struct super_block *, int, void *, int); |
| 88 | |||
| 89 | /* | ||
| 90 | * open.c | ||
| 91 | */ | ||
| 92 | struct nameidata; | ||
| 93 | extern struct file *nameidata_to_filp(struct nameidata *); | ||
| 94 | extern void release_open_intent(struct nameidata *); | ||
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index 4160afad6d00..bd224eec9b07 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c | |||
| @@ -1913,7 +1913,7 @@ static void __init jbd_create_debugfs_entry(void) | |||
| 1913 | { | 1913 | { |
| 1914 | jbd_debugfs_dir = debugfs_create_dir("jbd", NULL); | 1914 | jbd_debugfs_dir = debugfs_create_dir("jbd", NULL); |
| 1915 | if (jbd_debugfs_dir) | 1915 | if (jbd_debugfs_dir) |
| 1916 | jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO, | 1916 | jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO | S_IWUSR, |
| 1917 | jbd_debugfs_dir, | 1917 | jbd_debugfs_dir, |
| 1918 | &journal_enable_debug); | 1918 | &journal_enable_debug); |
| 1919 | } | 1919 | } |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index b7ca3a92a4db..17af879e6e9e 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
| @@ -2115,7 +2115,8 @@ static void __init jbd2_create_debugfs_entry(void) | |||
| 2115 | { | 2115 | { |
| 2116 | jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL); | 2116 | jbd2_debugfs_dir = debugfs_create_dir("jbd2", NULL); |
| 2117 | if (jbd2_debugfs_dir) | 2117 | if (jbd2_debugfs_dir) |
| 2118 | jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME, S_IRUGO, | 2118 | jbd2_debug = debugfs_create_u8(JBD2_DEBUG_NAME, |
| 2119 | S_IRUGO | S_IWUSR, | ||
| 2119 | jbd2_debugfs_dir, | 2120 | jbd2_debugfs_dir, |
| 2120 | &jbd2_journal_enable_debug); | 2121 | &jbd2_journal_enable_debug); |
| 2121 | } | 2122 | } |
diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 2234c73fc577..d929a822a74e 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c | |||
| @@ -524,7 +524,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent) | |||
| 524 | * Page cache is indexed by long. | 524 | * Page cache is indexed by long. |
| 525 | * I would use MAX_LFS_FILESIZE, but it's only half as big | 525 | * I would use MAX_LFS_FILESIZE, but it's only half as big |
| 526 | */ | 526 | */ |
| 527 | sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, sb->s_maxbytes); | 527 | sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, (u64)sb->s_maxbytes); |
| 528 | #endif | 528 | #endif |
| 529 | sb->s_time_gran = 1; | 529 | sb->s_time_gran = 1; |
| 530 | return 0; | 530 | return 0; |
diff --git a/fs/namei.c b/fs/namei.c index dad4b80257db..68921d9b5302 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
| @@ -37,8 +37,6 @@ | |||
| 37 | 37 | ||
| 38 | #include "internal.h" | 38 | #include "internal.h" |
| 39 | 39 | ||
| 40 | #define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE]) | ||
| 41 | |||
| 42 | /* [Feb-1997 T. Schoebel-Theuer] | 40 | /* [Feb-1997 T. Schoebel-Theuer] |
| 43 | * Fundamental changes in the pathname lookup mechanisms (namei) | 41 | * Fundamental changes in the pathname lookup mechanisms (namei) |
| 44 | * were necessary because of omirr. The reason is that omirr needs | 42 | * were necessary because of omirr. The reason is that omirr needs |
| @@ -1640,6 +1638,7 @@ struct file *do_filp_open(int dfd, const char *pathname, | |||
| 1640 | if (filp == NULL) | 1638 | if (filp == NULL) |
| 1641 | return ERR_PTR(-ENFILE); | 1639 | return ERR_PTR(-ENFILE); |
| 1642 | nd.intent.open.file = filp; | 1640 | nd.intent.open.file = filp; |
| 1641 | filp->f_flags = open_flag; | ||
| 1643 | nd.intent.open.flags = flag; | 1642 | nd.intent.open.flags = flag; |
| 1644 | nd.intent.open.create_mode = 0; | 1643 | nd.intent.open.create_mode = 0; |
| 1645 | error = do_path_lookup(dfd, pathname, | 1644 | error = do_path_lookup(dfd, pathname, |
| @@ -1685,6 +1684,7 @@ struct file *do_filp_open(int dfd, const char *pathname, | |||
| 1685 | if (filp == NULL) | 1684 | if (filp == NULL) |
| 1686 | goto exit_parent; | 1685 | goto exit_parent; |
| 1687 | nd.intent.open.file = filp; | 1686 | nd.intent.open.file = filp; |
| 1687 | filp->f_flags = open_flag; | ||
| 1688 | nd.intent.open.flags = flag; | 1688 | nd.intent.open.flags = flag; |
| 1689 | nd.intent.open.create_mode = mode; | 1689 | nd.intent.open.create_mode = mode; |
| 1690 | dir = nd.path.dentry; | 1690 | dir = nd.path.dentry; |
| @@ -1725,7 +1725,7 @@ do_last: | |||
| 1725 | mnt_drop_write(nd.path.mnt); | 1725 | mnt_drop_write(nd.path.mnt); |
| 1726 | goto exit; | 1726 | goto exit; |
| 1727 | } | 1727 | } |
| 1728 | filp = nameidata_to_filp(&nd, open_flag); | 1728 | filp = nameidata_to_filp(&nd); |
| 1729 | mnt_drop_write(nd.path.mnt); | 1729 | mnt_drop_write(nd.path.mnt); |
| 1730 | if (nd.root.mnt) | 1730 | if (nd.root.mnt) |
| 1731 | path_put(&nd.root); | 1731 | path_put(&nd.root); |
| @@ -1789,7 +1789,7 @@ ok: | |||
| 1789 | mnt_drop_write(nd.path.mnt); | 1789 | mnt_drop_write(nd.path.mnt); |
| 1790 | goto exit; | 1790 | goto exit; |
| 1791 | } | 1791 | } |
| 1792 | filp = nameidata_to_filp(&nd, open_flag); | 1792 | filp = nameidata_to_filp(&nd); |
| 1793 | if (!IS_ERR(filp)) { | 1793 | if (!IS_ERR(filp)) { |
| 1794 | error = ima_path_check(&filp->f_path, filp->f_mode & | 1794 | error = ima_path_check(&filp->f_path, filp->f_mode & |
| 1795 | (MAY_READ | MAY_WRITE | MAY_EXEC)); | 1795 | (MAY_READ | MAY_WRITE | MAY_EXEC)); |
| @@ -821,15 +821,14 @@ static inline int __get_file_write_access(struct inode *inode, | |||
| 821 | } | 821 | } |
| 822 | 822 | ||
| 823 | static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, | 823 | static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, |
| 824 | int flags, struct file *f, | 824 | struct file *f, |
| 825 | int (*open)(struct inode *, struct file *), | 825 | int (*open)(struct inode *, struct file *), |
| 826 | const struct cred *cred) | 826 | const struct cred *cred) |
| 827 | { | 827 | { |
| 828 | struct inode *inode; | 828 | struct inode *inode; |
| 829 | int error; | 829 | int error; |
| 830 | 830 | ||
| 831 | f->f_flags = flags; | 831 | f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK | |
| 832 | f->f_mode = (__force fmode_t)((flags+1) & O_ACCMODE) | FMODE_LSEEK | | ||
| 833 | FMODE_PREAD | FMODE_PWRITE; | 832 | FMODE_PREAD | FMODE_PWRITE; |
| 834 | inode = dentry->d_inode; | 833 | inode = dentry->d_inode; |
| 835 | if (f->f_mode & FMODE_WRITE) { | 834 | if (f->f_mode & FMODE_WRITE) { |
| @@ -930,7 +929,6 @@ struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry | |||
| 930 | if (IS_ERR(dentry)) | 929 | if (IS_ERR(dentry)) |
| 931 | goto out_err; | 930 | goto out_err; |
| 932 | nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt), | 931 | nd->intent.open.file = __dentry_open(dget(dentry), mntget(nd->path.mnt), |
| 933 | nd->intent.open.flags - 1, | ||
| 934 | nd->intent.open.file, | 932 | nd->intent.open.file, |
| 935 | open, cred); | 933 | open, cred); |
| 936 | out: | 934 | out: |
| @@ -949,7 +947,7 @@ EXPORT_SYMBOL_GPL(lookup_instantiate_filp); | |||
| 949 | * | 947 | * |
| 950 | * Note that this function destroys the original nameidata | 948 | * Note that this function destroys the original nameidata |
| 951 | */ | 949 | */ |
| 952 | struct file *nameidata_to_filp(struct nameidata *nd, int flags) | 950 | struct file *nameidata_to_filp(struct nameidata *nd) |
| 953 | { | 951 | { |
| 954 | const struct cred *cred = current_cred(); | 952 | const struct cred *cred = current_cred(); |
| 955 | struct file *filp; | 953 | struct file *filp; |
| @@ -958,7 +956,7 @@ struct file *nameidata_to_filp(struct nameidata *nd, int flags) | |||
| 958 | filp = nd->intent.open.file; | 956 | filp = nd->intent.open.file; |
| 959 | /* Has the filesystem initialised the file for us? */ | 957 | /* Has the filesystem initialised the file for us? */ |
| 960 | if (filp->f_path.dentry == NULL) | 958 | if (filp->f_path.dentry == NULL) |
| 961 | filp = __dentry_open(nd->path.dentry, nd->path.mnt, flags, filp, | 959 | filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp, |
| 962 | NULL, cred); | 960 | NULL, cred); |
| 963 | else | 961 | else |
| 964 | path_put(&nd->path); | 962 | path_put(&nd->path); |
| @@ -997,7 +995,8 @@ struct file *dentry_open(struct dentry *dentry, struct vfsmount *mnt, int flags, | |||
| 997 | return ERR_PTR(error); | 995 | return ERR_PTR(error); |
| 998 | } | 996 | } |
| 999 | 997 | ||
| 1000 | return __dentry_open(dentry, mnt, flags, f, NULL, cred); | 998 | f->f_flags = flags; |
| 999 | return __dentry_open(dentry, mnt, f, NULL, cred); | ||
| 1001 | } | 1000 | } |
| 1002 | EXPORT_SYMBOL(dentry_open); | 1001 | EXPORT_SYMBOL(dentry_open); |
| 1003 | 1002 | ||
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index cd6bb9a33c13..dea86abdf2e7 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
| @@ -323,6 +323,30 @@ int dquot_mark_dquot_dirty(struct dquot *dquot) | |||
| 323 | } | 323 | } |
| 324 | EXPORT_SYMBOL(dquot_mark_dquot_dirty); | 324 | EXPORT_SYMBOL(dquot_mark_dquot_dirty); |
| 325 | 325 | ||
| 326 | /* Dirtify all the dquots - this can block when journalling */ | ||
| 327 | static inline int mark_all_dquot_dirty(struct dquot * const *dquot) | ||
| 328 | { | ||
| 329 | int ret, err, cnt; | ||
| 330 | |||
| 331 | ret = err = 0; | ||
| 332 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
| 333 | if (dquot[cnt]) | ||
| 334 | /* Even in case of error we have to continue */ | ||
| 335 | ret = mark_dquot_dirty(dquot[cnt]); | ||
| 336 | if (!err) | ||
| 337 | err = ret; | ||
| 338 | } | ||
| 339 | return err; | ||
| 340 | } | ||
| 341 | |||
| 342 | static inline void dqput_all(struct dquot **dquot) | ||
| 343 | { | ||
| 344 | unsigned int cnt; | ||
| 345 | |||
| 346 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
| 347 | dqput(dquot[cnt]); | ||
| 348 | } | ||
| 349 | |||
| 326 | /* This function needs dq_list_lock */ | 350 | /* This function needs dq_list_lock */ |
| 327 | static inline int clear_dquot_dirty(struct dquot *dquot) | 351 | static inline int clear_dquot_dirty(struct dquot *dquot) |
| 328 | { | 352 | { |
| @@ -1268,8 +1292,7 @@ int dquot_initialize(struct inode *inode, int type) | |||
| 1268 | out_err: | 1292 | out_err: |
| 1269 | up_write(&sb_dqopt(sb)->dqptr_sem); | 1293 | up_write(&sb_dqopt(sb)->dqptr_sem); |
| 1270 | /* Drop unused references */ | 1294 | /* Drop unused references */ |
| 1271 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1295 | dqput_all(got); |
| 1272 | dqput(got[cnt]); | ||
| 1273 | return ret; | 1296 | return ret; |
| 1274 | } | 1297 | } |
| 1275 | EXPORT_SYMBOL(dquot_initialize); | 1298 | EXPORT_SYMBOL(dquot_initialize); |
| @@ -1288,9 +1311,7 @@ int dquot_drop(struct inode *inode) | |||
| 1288 | inode->i_dquot[cnt] = NULL; | 1311 | inode->i_dquot[cnt] = NULL; |
| 1289 | } | 1312 | } |
| 1290 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1313 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1291 | 1314 | dqput_all(put); | |
| 1292 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
| 1293 | dqput(put[cnt]); | ||
| 1294 | return 0; | 1315 | return 0; |
| 1295 | } | 1316 | } |
| 1296 | EXPORT_SYMBOL(dquot_drop); | 1317 | EXPORT_SYMBOL(dquot_drop); |
| @@ -1319,6 +1340,67 @@ void vfs_dq_drop(struct inode *inode) | |||
| 1319 | EXPORT_SYMBOL(vfs_dq_drop); | 1340 | EXPORT_SYMBOL(vfs_dq_drop); |
| 1320 | 1341 | ||
| 1321 | /* | 1342 | /* |
| 1343 | * inode_reserved_space is managed internally by quota, and protected by | ||
| 1344 | * i_lock similar to i_blocks+i_bytes. | ||
| 1345 | */ | ||
| 1346 | static qsize_t *inode_reserved_space(struct inode * inode) | ||
| 1347 | { | ||
| 1348 | /* Filesystem must explicitly define it's own method in order to use | ||
| 1349 | * quota reservation interface */ | ||
| 1350 | BUG_ON(!inode->i_sb->dq_op->get_reserved_space); | ||
| 1351 | return inode->i_sb->dq_op->get_reserved_space(inode); | ||
| 1352 | } | ||
| 1353 | |||
| 1354 | static void inode_add_rsv_space(struct inode *inode, qsize_t number) | ||
| 1355 | { | ||
| 1356 | spin_lock(&inode->i_lock); | ||
| 1357 | *inode_reserved_space(inode) += number; | ||
| 1358 | spin_unlock(&inode->i_lock); | ||
| 1359 | } | ||
| 1360 | |||
| 1361 | |||
| 1362 | static void inode_claim_rsv_space(struct inode *inode, qsize_t number) | ||
| 1363 | { | ||
| 1364 | spin_lock(&inode->i_lock); | ||
| 1365 | *inode_reserved_space(inode) -= number; | ||
| 1366 | __inode_add_bytes(inode, number); | ||
| 1367 | spin_unlock(&inode->i_lock); | ||
| 1368 | } | ||
| 1369 | |||
| 1370 | static void inode_sub_rsv_space(struct inode *inode, qsize_t number) | ||
| 1371 | { | ||
| 1372 | spin_lock(&inode->i_lock); | ||
| 1373 | *inode_reserved_space(inode) -= number; | ||
| 1374 | spin_unlock(&inode->i_lock); | ||
| 1375 | } | ||
| 1376 | |||
| 1377 | static qsize_t inode_get_rsv_space(struct inode *inode) | ||
| 1378 | { | ||
| 1379 | qsize_t ret; | ||
| 1380 | spin_lock(&inode->i_lock); | ||
| 1381 | ret = *inode_reserved_space(inode); | ||
| 1382 | spin_unlock(&inode->i_lock); | ||
| 1383 | return ret; | ||
| 1384 | } | ||
| 1385 | |||
| 1386 | static void inode_incr_space(struct inode *inode, qsize_t number, | ||
| 1387 | int reserve) | ||
| 1388 | { | ||
| 1389 | if (reserve) | ||
| 1390 | inode_add_rsv_space(inode, number); | ||
| 1391 | else | ||
| 1392 | inode_add_bytes(inode, number); | ||
| 1393 | } | ||
| 1394 | |||
| 1395 | static void inode_decr_space(struct inode *inode, qsize_t number, int reserve) | ||
| 1396 | { | ||
| 1397 | if (reserve) | ||
| 1398 | inode_sub_rsv_space(inode, number); | ||
| 1399 | else | ||
| 1400 | inode_sub_bytes(inode, number); | ||
| 1401 | } | ||
| 1402 | |||
| 1403 | /* | ||
| 1322 | * Following four functions update i_blocks+i_bytes fields and | 1404 | * Following four functions update i_blocks+i_bytes fields and |
| 1323 | * quota information (together with appropriate checks) | 1405 | * quota information (together with appropriate checks) |
| 1324 | * NOTE: We absolutely rely on the fact that caller dirties | 1406 | * NOTE: We absolutely rely on the fact that caller dirties |
| @@ -1336,6 +1418,21 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | |||
| 1336 | int cnt, ret = QUOTA_OK; | 1418 | int cnt, ret = QUOTA_OK; |
| 1337 | char warntype[MAXQUOTAS]; | 1419 | char warntype[MAXQUOTAS]; |
| 1338 | 1420 | ||
| 1421 | /* | ||
| 1422 | * First test before acquiring mutex - solves deadlocks when we | ||
| 1423 | * re-enter the quota code and are already holding the mutex | ||
| 1424 | */ | ||
| 1425 | if (IS_NOQUOTA(inode)) { | ||
| 1426 | inode_incr_space(inode, number, reserve); | ||
| 1427 | goto out; | ||
| 1428 | } | ||
| 1429 | |||
| 1430 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1431 | if (IS_NOQUOTA(inode)) { | ||
| 1432 | inode_incr_space(inode, number, reserve); | ||
| 1433 | goto out_unlock; | ||
| 1434 | } | ||
| 1435 | |||
| 1339 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1436 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
| 1340 | warntype[cnt] = QUOTA_NL_NOWARN; | 1437 | warntype[cnt] = QUOTA_NL_NOWARN; |
| 1341 | 1438 | ||
| @@ -1346,7 +1443,8 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | |||
| 1346 | if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) | 1443 | if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) |
| 1347 | == NO_QUOTA) { | 1444 | == NO_QUOTA) { |
| 1348 | ret = NO_QUOTA; | 1445 | ret = NO_QUOTA; |
| 1349 | goto out_unlock; | 1446 | spin_unlock(&dq_data_lock); |
| 1447 | goto out_flush_warn; | ||
| 1350 | } | 1448 | } |
| 1351 | } | 1449 | } |
| 1352 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1450 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| @@ -1357,64 +1455,29 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number, | |||
| 1357 | else | 1455 | else |
| 1358 | dquot_incr_space(inode->i_dquot[cnt], number); | 1456 | dquot_incr_space(inode->i_dquot[cnt], number); |
| 1359 | } | 1457 | } |
| 1360 | if (!reserve) | 1458 | inode_incr_space(inode, number, reserve); |
| 1361 | inode_add_bytes(inode, number); | ||
| 1362 | out_unlock: | ||
| 1363 | spin_unlock(&dq_data_lock); | 1459 | spin_unlock(&dq_data_lock); |
| 1460 | |||
| 1461 | if (reserve) | ||
| 1462 | goto out_flush_warn; | ||
| 1463 | mark_all_dquot_dirty(inode->i_dquot); | ||
| 1464 | out_flush_warn: | ||
| 1364 | flush_warnings(inode->i_dquot, warntype); | 1465 | flush_warnings(inode->i_dquot, warntype); |
| 1466 | out_unlock: | ||
| 1467 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1468 | out: | ||
| 1365 | return ret; | 1469 | return ret; |
| 1366 | } | 1470 | } |
| 1367 | 1471 | ||
| 1368 | int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) | 1472 | int dquot_alloc_space(struct inode *inode, qsize_t number, int warn) |
| 1369 | { | 1473 | { |
| 1370 | int cnt, ret = QUOTA_OK; | 1474 | return __dquot_alloc_space(inode, number, warn, 0); |
| 1371 | |||
| 1372 | /* | ||
| 1373 | * First test before acquiring mutex - solves deadlocks when we | ||
| 1374 | * re-enter the quota code and are already holding the mutex | ||
| 1375 | */ | ||
| 1376 | if (IS_NOQUOTA(inode)) { | ||
| 1377 | inode_add_bytes(inode, number); | ||
| 1378 | goto out; | ||
| 1379 | } | ||
| 1380 | |||
| 1381 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1382 | if (IS_NOQUOTA(inode)) { | ||
| 1383 | inode_add_bytes(inode, number); | ||
| 1384 | goto out_unlock; | ||
| 1385 | } | ||
| 1386 | |||
| 1387 | ret = __dquot_alloc_space(inode, number, warn, 0); | ||
| 1388 | if (ret == NO_QUOTA) | ||
| 1389 | goto out_unlock; | ||
| 1390 | |||
| 1391 | /* Dirtify all the dquots - this can block when journalling */ | ||
| 1392 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
| 1393 | if (inode->i_dquot[cnt]) | ||
| 1394 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
| 1395 | out_unlock: | ||
| 1396 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1397 | out: | ||
| 1398 | return ret; | ||
| 1399 | } | 1475 | } |
| 1400 | EXPORT_SYMBOL(dquot_alloc_space); | 1476 | EXPORT_SYMBOL(dquot_alloc_space); |
| 1401 | 1477 | ||
| 1402 | int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) | 1478 | int dquot_reserve_space(struct inode *inode, qsize_t number, int warn) |
| 1403 | { | 1479 | { |
| 1404 | int ret = QUOTA_OK; | 1480 | return __dquot_alloc_space(inode, number, warn, 1); |
| 1405 | |||
| 1406 | if (IS_NOQUOTA(inode)) | ||
| 1407 | goto out; | ||
| 1408 | |||
| 1409 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1410 | if (IS_NOQUOTA(inode)) | ||
| 1411 | goto out_unlock; | ||
| 1412 | |||
| 1413 | ret = __dquot_alloc_space(inode, number, warn, 1); | ||
| 1414 | out_unlock: | ||
| 1415 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1416 | out: | ||
| 1417 | return ret; | ||
| 1418 | } | 1481 | } |
| 1419 | EXPORT_SYMBOL(dquot_reserve_space); | 1482 | EXPORT_SYMBOL(dquot_reserve_space); |
| 1420 | 1483 | ||
| @@ -1455,10 +1518,7 @@ int dquot_alloc_inode(const struct inode *inode, qsize_t number) | |||
| 1455 | warn_put_all: | 1518 | warn_put_all: |
| 1456 | spin_unlock(&dq_data_lock); | 1519 | spin_unlock(&dq_data_lock); |
| 1457 | if (ret == QUOTA_OK) | 1520 | if (ret == QUOTA_OK) |
| 1458 | /* Dirtify all the dquots - this can block when journalling */ | 1521 | mark_all_dquot_dirty(inode->i_dquot); |
| 1459 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
| 1460 | if (inode->i_dquot[cnt]) | ||
| 1461 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
| 1462 | flush_warnings(inode->i_dquot, warntype); | 1522 | flush_warnings(inode->i_dquot, warntype); |
| 1463 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1523 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1464 | return ret; | 1524 | return ret; |
| @@ -1471,14 +1531,14 @@ int dquot_claim_space(struct inode *inode, qsize_t number) | |||
| 1471 | int ret = QUOTA_OK; | 1531 | int ret = QUOTA_OK; |
| 1472 | 1532 | ||
| 1473 | if (IS_NOQUOTA(inode)) { | 1533 | if (IS_NOQUOTA(inode)) { |
| 1474 | inode_add_bytes(inode, number); | 1534 | inode_claim_rsv_space(inode, number); |
| 1475 | goto out; | 1535 | goto out; |
| 1476 | } | 1536 | } |
| 1477 | 1537 | ||
| 1478 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1538 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1479 | if (IS_NOQUOTA(inode)) { | 1539 | if (IS_NOQUOTA(inode)) { |
| 1480 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1540 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1481 | inode_add_bytes(inode, number); | 1541 | inode_claim_rsv_space(inode, number); |
| 1482 | goto out; | 1542 | goto out; |
| 1483 | } | 1543 | } |
| 1484 | 1544 | ||
| @@ -1490,12 +1550,9 @@ int dquot_claim_space(struct inode *inode, qsize_t number) | |||
| 1490 | number); | 1550 | number); |
| 1491 | } | 1551 | } |
| 1492 | /* Update inode bytes */ | 1552 | /* Update inode bytes */ |
| 1493 | inode_add_bytes(inode, number); | 1553 | inode_claim_rsv_space(inode, number); |
| 1494 | spin_unlock(&dq_data_lock); | 1554 | spin_unlock(&dq_data_lock); |
| 1495 | /* Dirtify all the dquots - this can block when journalling */ | 1555 | mark_all_dquot_dirty(inode->i_dquot); |
| 1496 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
| 1497 | if (inode->i_dquot[cnt]) | ||
| 1498 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
| 1499 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1556 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1500 | out: | 1557 | out: |
| 1501 | return ret; | 1558 | return ret; |
| @@ -1503,38 +1560,9 @@ out: | |||
| 1503 | EXPORT_SYMBOL(dquot_claim_space); | 1560 | EXPORT_SYMBOL(dquot_claim_space); |
| 1504 | 1561 | ||
| 1505 | /* | 1562 | /* |
| 1506 | * Release reserved quota space | ||
| 1507 | */ | ||
| 1508 | void dquot_release_reserved_space(struct inode *inode, qsize_t number) | ||
| 1509 | { | ||
| 1510 | int cnt; | ||
| 1511 | |||
| 1512 | if (IS_NOQUOTA(inode)) | ||
| 1513 | goto out; | ||
| 1514 | |||
| 1515 | down_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1516 | if (IS_NOQUOTA(inode)) | ||
| 1517 | goto out_unlock; | ||
| 1518 | |||
| 1519 | spin_lock(&dq_data_lock); | ||
| 1520 | /* Release reserved dquots */ | ||
| 1521 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | ||
| 1522 | if (inode->i_dquot[cnt]) | ||
| 1523 | dquot_free_reserved_space(inode->i_dquot[cnt], number); | ||
| 1524 | } | ||
| 1525 | spin_unlock(&dq_data_lock); | ||
| 1526 | |||
| 1527 | out_unlock: | ||
| 1528 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | ||
| 1529 | out: | ||
| 1530 | return; | ||
| 1531 | } | ||
| 1532 | EXPORT_SYMBOL(dquot_release_reserved_space); | ||
| 1533 | |||
| 1534 | /* | ||
| 1535 | * This operation can block, but only after everything is updated | 1563 | * This operation can block, but only after everything is updated |
| 1536 | */ | 1564 | */ |
| 1537 | int dquot_free_space(struct inode *inode, qsize_t number) | 1565 | int __dquot_free_space(struct inode *inode, qsize_t number, int reserve) |
| 1538 | { | 1566 | { |
| 1539 | unsigned int cnt; | 1567 | unsigned int cnt; |
| 1540 | char warntype[MAXQUOTAS]; | 1568 | char warntype[MAXQUOTAS]; |
| @@ -1543,7 +1571,7 @@ int dquot_free_space(struct inode *inode, qsize_t number) | |||
| 1543 | * re-enter the quota code and are already holding the mutex */ | 1571 | * re-enter the quota code and are already holding the mutex */ |
| 1544 | if (IS_NOQUOTA(inode)) { | 1572 | if (IS_NOQUOTA(inode)) { |
| 1545 | out_sub: | 1573 | out_sub: |
| 1546 | inode_sub_bytes(inode, number); | 1574 | inode_decr_space(inode, number, reserve); |
| 1547 | return QUOTA_OK; | 1575 | return QUOTA_OK; |
| 1548 | } | 1576 | } |
| 1549 | 1577 | ||
| @@ -1558,21 +1586,40 @@ out_sub: | |||
| 1558 | if (!inode->i_dquot[cnt]) | 1586 | if (!inode->i_dquot[cnt]) |
| 1559 | continue; | 1587 | continue; |
| 1560 | warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); | 1588 | warntype[cnt] = info_bdq_free(inode->i_dquot[cnt], number); |
| 1561 | dquot_decr_space(inode->i_dquot[cnt], number); | 1589 | if (reserve) |
| 1590 | dquot_free_reserved_space(inode->i_dquot[cnt], number); | ||
| 1591 | else | ||
| 1592 | dquot_decr_space(inode->i_dquot[cnt], number); | ||
| 1562 | } | 1593 | } |
| 1563 | inode_sub_bytes(inode, number); | 1594 | inode_decr_space(inode, number, reserve); |
| 1564 | spin_unlock(&dq_data_lock); | 1595 | spin_unlock(&dq_data_lock); |
| 1565 | /* Dirtify all the dquots - this can block when journalling */ | 1596 | |
| 1566 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | 1597 | if (reserve) |
| 1567 | if (inode->i_dquot[cnt]) | 1598 | goto out_unlock; |
| 1568 | mark_dquot_dirty(inode->i_dquot[cnt]); | 1599 | mark_all_dquot_dirty(inode->i_dquot); |
| 1600 | out_unlock: | ||
| 1569 | flush_warnings(inode->i_dquot, warntype); | 1601 | flush_warnings(inode->i_dquot, warntype); |
| 1570 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1602 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1571 | return QUOTA_OK; | 1603 | return QUOTA_OK; |
| 1572 | } | 1604 | } |
| 1605 | |||
| 1606 | int dquot_free_space(struct inode *inode, qsize_t number) | ||
| 1607 | { | ||
| 1608 | return __dquot_free_space(inode, number, 0); | ||
| 1609 | } | ||
| 1573 | EXPORT_SYMBOL(dquot_free_space); | 1610 | EXPORT_SYMBOL(dquot_free_space); |
| 1574 | 1611 | ||
| 1575 | /* | 1612 | /* |
| 1613 | * Release reserved quota space | ||
| 1614 | */ | ||
| 1615 | void dquot_release_reserved_space(struct inode *inode, qsize_t number) | ||
| 1616 | { | ||
| 1617 | __dquot_free_space(inode, number, 1); | ||
| 1618 | |||
| 1619 | } | ||
| 1620 | EXPORT_SYMBOL(dquot_release_reserved_space); | ||
| 1621 | |||
| 1622 | /* | ||
| 1576 | * This operation can block, but only after everything is updated | 1623 | * This operation can block, but only after everything is updated |
| 1577 | */ | 1624 | */ |
| 1578 | int dquot_free_inode(const struct inode *inode, qsize_t number) | 1625 | int dquot_free_inode(const struct inode *inode, qsize_t number) |
| @@ -1599,10 +1646,7 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) | |||
| 1599 | dquot_decr_inodes(inode->i_dquot[cnt], number); | 1646 | dquot_decr_inodes(inode->i_dquot[cnt], number); |
| 1600 | } | 1647 | } |
| 1601 | spin_unlock(&dq_data_lock); | 1648 | spin_unlock(&dq_data_lock); |
| 1602 | /* Dirtify all the dquots - this can block when journalling */ | 1649 | mark_all_dquot_dirty(inode->i_dquot); |
| 1603 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) | ||
| 1604 | if (inode->i_dquot[cnt]) | ||
| 1605 | mark_dquot_dirty(inode->i_dquot[cnt]); | ||
| 1606 | flush_warnings(inode->i_dquot, warntype); | 1650 | flush_warnings(inode->i_dquot, warntype); |
| 1607 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1651 | up_read(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1608 | return QUOTA_OK; | 1652 | return QUOTA_OK; |
| @@ -1610,19 +1654,6 @@ int dquot_free_inode(const struct inode *inode, qsize_t number) | |||
| 1610 | EXPORT_SYMBOL(dquot_free_inode); | 1654 | EXPORT_SYMBOL(dquot_free_inode); |
| 1611 | 1655 | ||
| 1612 | /* | 1656 | /* |
| 1613 | * call back function, get reserved quota space from underlying fs | ||
| 1614 | */ | ||
| 1615 | qsize_t dquot_get_reserved_space(struct inode *inode) | ||
| 1616 | { | ||
| 1617 | qsize_t reserved_space = 0; | ||
| 1618 | |||
| 1619 | if (sb_any_quota_active(inode->i_sb) && | ||
| 1620 | inode->i_sb->dq_op->get_reserved_space) | ||
| 1621 | reserved_space = inode->i_sb->dq_op->get_reserved_space(inode); | ||
| 1622 | return reserved_space; | ||
| 1623 | } | ||
| 1624 | |||
| 1625 | /* | ||
| 1626 | * Transfer the number of inode and blocks from one diskquota to an other. | 1657 | * Transfer the number of inode and blocks from one diskquota to an other. |
| 1627 | * | 1658 | * |
| 1628 | * This operation can block, but only after everything is updated | 1659 | * This operation can block, but only after everything is updated |
| @@ -1665,7 +1696,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
| 1665 | } | 1696 | } |
| 1666 | spin_lock(&dq_data_lock); | 1697 | spin_lock(&dq_data_lock); |
| 1667 | cur_space = inode_get_bytes(inode); | 1698 | cur_space = inode_get_bytes(inode); |
| 1668 | rsv_space = dquot_get_reserved_space(inode); | 1699 | rsv_space = inode_get_rsv_space(inode); |
| 1669 | space = cur_space + rsv_space; | 1700 | space = cur_space + rsv_space; |
| 1670 | /* Build the transfer_from list and check the limits */ | 1701 | /* Build the transfer_from list and check the limits */ |
| 1671 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1702 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { |
| @@ -1709,25 +1740,18 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr) | |||
| 1709 | spin_unlock(&dq_data_lock); | 1740 | spin_unlock(&dq_data_lock); |
| 1710 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); | 1741 | up_write(&sb_dqopt(inode->i_sb)->dqptr_sem); |
| 1711 | 1742 | ||
| 1712 | /* Dirtify all the dquots - this can block when journalling */ | 1743 | mark_all_dquot_dirty(transfer_from); |
| 1713 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1744 | mark_all_dquot_dirty(transfer_to); |
| 1714 | if (transfer_from[cnt]) | 1745 | /* The reference we got is transferred to the inode */ |
| 1715 | mark_dquot_dirty(transfer_from[cnt]); | 1746 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) |
| 1716 | if (transfer_to[cnt]) { | 1747 | transfer_to[cnt] = NULL; |
| 1717 | mark_dquot_dirty(transfer_to[cnt]); | ||
| 1718 | /* The reference we got is transferred to the inode */ | ||
| 1719 | transfer_to[cnt] = NULL; | ||
| 1720 | } | ||
| 1721 | } | ||
| 1722 | warn_put_all: | 1748 | warn_put_all: |
| 1723 | flush_warnings(transfer_to, warntype_to); | 1749 | flush_warnings(transfer_to, warntype_to); |
| 1724 | flush_warnings(transfer_from, warntype_from_inodes); | 1750 | flush_warnings(transfer_from, warntype_from_inodes); |
| 1725 | flush_warnings(transfer_from, warntype_from_space); | 1751 | flush_warnings(transfer_from, warntype_from_space); |
| 1726 | put_all: | 1752 | put_all: |
| 1727 | for (cnt = 0; cnt < MAXQUOTAS; cnt++) { | 1753 | dqput_all(transfer_from); |
| 1728 | dqput(transfer_from[cnt]); | 1754 | dqput_all(transfer_to); |
| 1729 | dqput(transfer_to[cnt]); | ||
| 1730 | } | ||
| 1731 | return ret; | 1755 | return ret; |
| 1732 | over_quota: | 1756 | over_quota: |
| 1733 | spin_unlock(&dq_data_lock); | 1757 | spin_unlock(&dq_data_lock); |
diff --git a/fs/quota/quota_v2.c b/fs/quota/quota_v2.c index 3dfc23e02135..e3da02f4986f 100644 --- a/fs/quota/quota_v2.c +++ b/fs/quota/quota_v2.c | |||
| @@ -97,8 +97,11 @@ static int v2_read_file_info(struct super_block *sb, int type) | |||
| 97 | unsigned int version; | 97 | unsigned int version; |
| 98 | 98 | ||
| 99 | if (!v2_read_header(sb, type, &dqhead)) | 99 | if (!v2_read_header(sb, type, &dqhead)) |
| 100 | return 0; | 100 | return -1; |
| 101 | version = le32_to_cpu(dqhead.dqh_version); | 101 | version = le32_to_cpu(dqhead.dqh_version); |
| 102 | if ((info->dqi_fmt_id == QFMT_VFS_V0 && version != 0) || | ||
| 103 | (info->dqi_fmt_id == QFMT_VFS_V1 && version != 1)) | ||
| 104 | return -1; | ||
| 102 | 105 | ||
| 103 | size = sb->s_op->quota_read(sb, type, (char *)&dinfo, | 106 | size = sb->s_op->quota_read(sb, type, (char *)&dinfo, |
| 104 | sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); | 107 | sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); |
| @@ -120,8 +123,8 @@ static int v2_read_file_info(struct super_block *sb, int type) | |||
| 120 | info->dqi_maxilimit = 0xffffffff; | 123 | info->dqi_maxilimit = 0xffffffff; |
| 121 | } else { | 124 | } else { |
| 122 | /* used space is stored as unsigned 64-bit value */ | 125 | /* used space is stored as unsigned 64-bit value */ |
| 123 | info->dqi_maxblimit = 0xffffffffffffffff; /* 2^64-1 */ | 126 | info->dqi_maxblimit = 0xffffffffffffffffULL; /* 2^64-1 */ |
| 124 | info->dqi_maxilimit = 0xffffffffffffffff; | 127 | info->dqi_maxilimit = 0xffffffffffffffffULL; |
| 125 | } | 128 | } |
| 126 | info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); | 129 | info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); |
| 127 | info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); | 130 | info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); |
diff --git a/fs/signalfd.c b/fs/signalfd.c index b07565c94386..1dabe4ee02fe 100644 --- a/fs/signalfd.c +++ b/fs/signalfd.c | |||
| @@ -236,7 +236,7 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask, | |||
| 236 | * anon_inode_getfd() will install the fd. | 236 | * anon_inode_getfd() will install the fd. |
| 237 | */ | 237 | */ |
| 238 | ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx, | 238 | ufd = anon_inode_getfd("[signalfd]", &signalfd_fops, ctx, |
| 239 | flags & (O_CLOEXEC | O_NONBLOCK)); | 239 | O_RDWR | (flags & (O_CLOEXEC | O_NONBLOCK))); |
| 240 | if (ufd < 0) | 240 | if (ufd < 0) |
| 241 | kfree(ctx); | 241 | kfree(ctx); |
| 242 | } else { | 242 | } else { |
| @@ -401,9 +401,9 @@ SYSCALL_DEFINE4(fstatat64, int, dfd, char __user *, filename, | |||
| 401 | } | 401 | } |
| 402 | #endif /* __ARCH_WANT_STAT64 */ | 402 | #endif /* __ARCH_WANT_STAT64 */ |
| 403 | 403 | ||
| 404 | void inode_add_bytes(struct inode *inode, loff_t bytes) | 404 | /* Caller is here responsible for sufficient locking (ie. inode->i_lock) */ |
| 405 | void __inode_add_bytes(struct inode *inode, loff_t bytes) | ||
| 405 | { | 406 | { |
| 406 | spin_lock(&inode->i_lock); | ||
| 407 | inode->i_blocks += bytes >> 9; | 407 | inode->i_blocks += bytes >> 9; |
| 408 | bytes &= 511; | 408 | bytes &= 511; |
| 409 | inode->i_bytes += bytes; | 409 | inode->i_bytes += bytes; |
| @@ -411,6 +411,12 @@ void inode_add_bytes(struct inode *inode, loff_t bytes) | |||
| 411 | inode->i_blocks++; | 411 | inode->i_blocks++; |
| 412 | inode->i_bytes -= 512; | 412 | inode->i_bytes -= 512; |
| 413 | } | 413 | } |
| 414 | } | ||
| 415 | |||
| 416 | void inode_add_bytes(struct inode *inode, loff_t bytes) | ||
| 417 | { | ||
| 418 | spin_lock(&inode->i_lock); | ||
| 419 | __inode_add_bytes(inode, bytes); | ||
| 414 | spin_unlock(&inode->i_lock); | 420 | spin_unlock(&inode->i_lock); |
| 415 | } | 421 | } |
| 416 | 422 | ||
diff --git a/fs/timerfd.c b/fs/timerfd.c index b042bd7034b1..1bfc95ad5f71 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c | |||
| @@ -200,7 +200,7 @@ SYSCALL_DEFINE2(timerfd_create, int, clockid, int, flags) | |||
| 200 | hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS); | 200 | hrtimer_init(&ctx->tmr, clockid, HRTIMER_MODE_ABS); |
| 201 | 201 | ||
| 202 | ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, | 202 | ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, |
| 203 | flags & TFD_SHARED_FCNTL_FLAGS); | 203 | O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS)); |
| 204 | if (ufd < 0) | 204 | if (ufd < 0) |
| 205 | kfree(ctx); | 205 | kfree(ctx); |
| 206 | 206 | ||
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 19ef8ebdc662..71dafb69cfeb 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
| @@ -296,6 +296,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, | |||
| 296 | #define DRM_MASTER 0x2 | 296 | #define DRM_MASTER 0x2 |
| 297 | #define DRM_ROOT_ONLY 0x4 | 297 | #define DRM_ROOT_ONLY 0x4 |
| 298 | #define DRM_CONTROL_ALLOW 0x8 | 298 | #define DRM_CONTROL_ALLOW 0x8 |
| 299 | #define DRM_UNLOCKED 0x10 | ||
| 299 | 300 | ||
| 300 | struct drm_ioctl_desc { | 301 | struct drm_ioctl_desc { |
| 301 | unsigned int cmd; | 302 | unsigned int cmd; |
| @@ -1128,8 +1129,8 @@ static inline int drm_mtrr_del(int handle, unsigned long offset, | |||
| 1128 | /* Driver support (drm_drv.h) */ | 1129 | /* Driver support (drm_drv.h) */ |
| 1129 | extern int drm_init(struct drm_driver *driver); | 1130 | extern int drm_init(struct drm_driver *driver); |
| 1130 | extern void drm_exit(struct drm_driver *driver); | 1131 | extern void drm_exit(struct drm_driver *driver); |
| 1131 | extern int drm_ioctl(struct inode *inode, struct file *filp, | 1132 | extern long drm_ioctl(struct file *filp, |
| 1132 | unsigned int cmd, unsigned long arg); | 1133 | unsigned int cmd, unsigned long arg); |
| 1133 | extern long drm_compat_ioctl(struct file *filp, | 1134 | extern long drm_compat_ioctl(struct file *filp, |
| 1134 | unsigned int cmd, unsigned long arg); | 1135 | unsigned int cmd, unsigned long arg); |
| 1135 | extern int drm_lastclose(struct drm_device *dev); | 1136 | extern int drm_lastclose(struct drm_device *dev); |
diff --git a/include/linux/ext3_fs_sb.h b/include/linux/ext3_fs_sb.h index f07f34de2f0e..258088ab3c6b 100644 --- a/include/linux/ext3_fs_sb.h +++ b/include/linux/ext3_fs_sb.h | |||
| @@ -72,6 +72,8 @@ struct ext3_sb_info { | |||
| 72 | struct inode * s_journal_inode; | 72 | struct inode * s_journal_inode; |
| 73 | struct journal_s * s_journal; | 73 | struct journal_s * s_journal; |
| 74 | struct list_head s_orphan; | 74 | struct list_head s_orphan; |
| 75 | struct mutex s_orphan_lock; | ||
| 76 | struct mutex s_resize_lock; | ||
| 75 | unsigned long s_commit_interval; | 77 | unsigned long s_commit_interval; |
| 76 | struct block_device *journal_bdev; | 78 | struct block_device *journal_bdev; |
| 77 | #ifdef CONFIG_JBD_DEBUG | 79 | #ifdef CONFIG_JBD_DEBUG |
diff --git a/include/linux/ext3_jbd.h b/include/linux/ext3_jbd.h index cf82d519be40..d7b5ddca99c2 100644 --- a/include/linux/ext3_jbd.h +++ b/include/linux/ext3_jbd.h | |||
| @@ -44,13 +44,13 @@ | |||
| 44 | 44 | ||
| 45 | #define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \ | 45 | #define EXT3_DATA_TRANS_BLOCKS(sb) (EXT3_SINGLEDATA_TRANS_BLOCKS + \ |
| 46 | EXT3_XATTR_TRANS_BLOCKS - 2 + \ | 46 | EXT3_XATTR_TRANS_BLOCKS - 2 + \ |
| 47 | 2*EXT3_QUOTA_TRANS_BLOCKS(sb)) | 47 | EXT3_MAXQUOTAS_TRANS_BLOCKS(sb)) |
| 48 | 48 | ||
| 49 | /* Delete operations potentially hit one directory's namespace plus an | 49 | /* Delete operations potentially hit one directory's namespace plus an |
| 50 | * entire inode, plus arbitrary amounts of bitmap/indirection data. Be | 50 | * entire inode, plus arbitrary amounts of bitmap/indirection data. Be |
| 51 | * generous. We can grow the delete transaction later if necessary. */ | 51 | * generous. We can grow the delete transaction later if necessary. */ |
| 52 | 52 | ||
| 53 | #define EXT3_DELETE_TRANS_BLOCKS(sb) (2 * EXT3_DATA_TRANS_BLOCKS(sb) + 64) | 53 | #define EXT3_DELETE_TRANS_BLOCKS(sb) (EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) + 64) |
| 54 | 54 | ||
| 55 | /* Define an arbitrary limit for the amount of data we will anticipate | 55 | /* Define an arbitrary limit for the amount of data we will anticipate |
| 56 | * writing to any given transaction. For unbounded transactions such as | 56 | * writing to any given transaction. For unbounded transactions such as |
| @@ -86,6 +86,9 @@ | |||
| 86 | #define EXT3_QUOTA_INIT_BLOCKS(sb) 0 | 86 | #define EXT3_QUOTA_INIT_BLOCKS(sb) 0 |
| 87 | #define EXT3_QUOTA_DEL_BLOCKS(sb) 0 | 87 | #define EXT3_QUOTA_DEL_BLOCKS(sb) 0 |
| 88 | #endif | 88 | #endif |
| 89 | #define EXT3_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_TRANS_BLOCKS(sb)) | ||
| 90 | #define EXT3_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_INIT_BLOCKS(sb)) | ||
| 91 | #define EXT3_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT3_QUOTA_DEL_BLOCKS(sb)) | ||
| 89 | 92 | ||
| 90 | int | 93 | int |
| 91 | ext3_mark_iloc_dirty(handle_t *handle, | 94 | ext3_mark_iloc_dirty(handle_t *handle, |
diff --git a/include/linux/fs.h b/include/linux/fs.h index cca191933ff6..9147ca88f253 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -1624,8 +1624,6 @@ struct super_operations { | |||
| 1624 | * on the bit address once it is done. | 1624 | * on the bit address once it is done. |
| 1625 | * | 1625 | * |
| 1626 | * Q: What is the difference between I_WILL_FREE and I_FREEING? | 1626 | * Q: What is the difference between I_WILL_FREE and I_FREEING? |
| 1627 | * Q: igrab() only checks on (I_FREEING|I_WILL_FREE). Should it also check on | ||
| 1628 | * I_CLEAR? If not, why? | ||
| 1629 | */ | 1627 | */ |
| 1630 | #define I_DIRTY_SYNC 1 | 1628 | #define I_DIRTY_SYNC 1 |
| 1631 | #define I_DIRTY_DATASYNC 2 | 1629 | #define I_DIRTY_DATASYNC 2 |
| @@ -2299,6 +2297,7 @@ extern const struct inode_operations page_symlink_inode_operations; | |||
| 2299 | extern int generic_readlink(struct dentry *, char __user *, int); | 2297 | extern int generic_readlink(struct dentry *, char __user *, int); |
| 2300 | extern void generic_fillattr(struct inode *, struct kstat *); | 2298 | extern void generic_fillattr(struct inode *, struct kstat *); |
| 2301 | extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); | 2299 | extern int vfs_getattr(struct vfsmount *, struct dentry *, struct kstat *); |
| 2300 | void __inode_add_bytes(struct inode *inode, loff_t bytes); | ||
| 2302 | void inode_add_bytes(struct inode *inode, loff_t bytes); | 2301 | void inode_add_bytes(struct inode *inode, loff_t bytes); |
| 2303 | void inode_sub_bytes(struct inode *inode, loff_t bytes); | 2302 | void inode_sub_bytes(struct inode *inode, loff_t bytes); |
| 2304 | loff_t inode_get_bytes(struct inode *inode); | 2303 | loff_t inode_get_bytes(struct inode *inode); |
| @@ -2464,5 +2463,8 @@ int proc_nr_files(struct ctl_table *table, int write, | |||
| 2464 | 2463 | ||
| 2465 | int __init get_filesystem_list(char *buf); | 2464 | int __init get_filesystem_list(char *buf); |
| 2466 | 2465 | ||
| 2466 | #define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE]) | ||
| 2467 | #define OPEN_FMODE(flag) ((__force fmode_t)((flag + 1) & O_ACCMODE)) | ||
| 2468 | |||
| 2467 | #endif /* __KERNEL__ */ | 2469 | #endif /* __KERNEL__ */ |
| 2468 | #endif /* _LINUX_FS_H */ | 2470 | #endif /* _LINUX_FS_H */ |
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index ad6bdf5a5970..486e8ad3bb50 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * A simple kernel FIFO implementation. | 2 | * A generic kernel FIFO implementation. |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net> | ||
| 4 | * Copyright (C) 2004 Stelian Pop <stelian@popies.net> | 5 | * Copyright (C) 2004 Stelian Pop <stelian@popies.net> |
| 5 | * | 6 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
| @@ -18,6 +19,25 @@ | |||
| 18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 19 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
| 19 | * | 20 | * |
| 20 | */ | 21 | */ |
| 22 | |||
| 23 | /* | ||
| 24 | * Howto porting drivers to the new generic fifo API: | ||
| 25 | * | ||
| 26 | * - Modify the declaration of the "struct kfifo *" object into a | ||
| 27 | * in-place "struct kfifo" object | ||
| 28 | * - Init the in-place object with kfifo_alloc() or kfifo_init() | ||
| 29 | * Note: The address of the in-place "struct kfifo" object must be | ||
| 30 | * passed as the first argument to this functions | ||
| 31 | * - Replace the use of __kfifo_put into kfifo_in and __kfifo_get | ||
| 32 | * into kfifo_out | ||
| 33 | * - Replace the use of kfifo_put into kfifo_in_locked and kfifo_get | ||
| 34 | * into kfifo_out_locked | ||
| 35 | * Note: the spinlock pointer formerly passed to kfifo_init/kfifo_alloc | ||
| 36 | * must be passed now to the kfifo_in_locked and kfifo_out_locked | ||
| 37 | * as the last parameter. | ||
| 38 | * - All formerly name __kfifo_* functions has been renamed into kfifo_* | ||
| 39 | */ | ||
| 40 | |||
| 21 | #ifndef _LINUX_KFIFO_H | 41 | #ifndef _LINUX_KFIFO_H |
| 22 | #define _LINUX_KFIFO_H | 42 | #define _LINUX_KFIFO_H |
| 23 | 43 | ||
| @@ -29,124 +49,563 @@ struct kfifo { | |||
| 29 | unsigned int size; /* the size of the allocated buffer */ | 49 | unsigned int size; /* the size of the allocated buffer */ |
| 30 | unsigned int in; /* data is added at offset (in % size) */ | 50 | unsigned int in; /* data is added at offset (in % size) */ |
| 31 | unsigned int out; /* data is extracted from off. (out % size) */ | 51 | unsigned int out; /* data is extracted from off. (out % size) */ |
| 32 | spinlock_t *lock; /* protects concurrent modifications */ | ||
| 33 | }; | 52 | }; |
| 34 | 53 | ||
| 35 | extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, | 54 | /* |
| 36 | gfp_t gfp_mask, spinlock_t *lock); | 55 | * Macros for declaration and initialization of the kfifo datatype |
| 37 | extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, | 56 | */ |
| 38 | spinlock_t *lock); | 57 | |
| 58 | /* helper macro */ | ||
| 59 | #define __kfifo_initializer(s, b) \ | ||
| 60 | (struct kfifo) { \ | ||
| 61 | .size = s, \ | ||
| 62 | .in = 0, \ | ||
| 63 | .out = 0, \ | ||
| 64 | .buffer = b \ | ||
| 65 | } | ||
| 66 | |||
| 67 | /** | ||
| 68 | * DECLARE_KFIFO - macro to declare a kfifo and the associated buffer | ||
| 69 | * @name: name of the declared kfifo datatype | ||
| 70 | * @size: size of the fifo buffer | ||
| 71 | * | ||
| 72 | * Note: the macro can be used inside struct or union declaration | ||
| 73 | * Note: the macro creates two objects: | ||
| 74 | * A kfifo object with the given name and a buffer for the kfifo | ||
| 75 | * object named name##kfifo_buffer | ||
| 76 | */ | ||
| 77 | #define DECLARE_KFIFO(name, size) \ | ||
| 78 | union { \ | ||
| 79 | struct kfifo name; \ | ||
| 80 | unsigned char name##kfifo_buffer[size + sizeof(struct kfifo)]; \ | ||
| 81 | } | ||
| 82 | |||
| 83 | /** | ||
| 84 | * INIT_KFIFO - Initialize a kfifo declared by DECLARED_KFIFO | ||
| 85 | * @name: name of the declared kfifo datatype | ||
| 86 | * @size: size of the fifo buffer | ||
| 87 | */ | ||
| 88 | #define INIT_KFIFO(name) \ | ||
| 89 | name = __kfifo_initializer(sizeof(name##kfifo_buffer) - \ | ||
| 90 | sizeof(struct kfifo), name##kfifo_buffer) | ||
| 91 | |||
| 92 | /** | ||
| 93 | * DEFINE_KFIFO - macro to define and initialize a kfifo | ||
| 94 | * @name: name of the declared kfifo datatype | ||
| 95 | * @size: size of the fifo buffer | ||
| 96 | * | ||
| 97 | * Note: the macro can be used for global and local kfifo data type variables | ||
| 98 | * Note: the macro creates two objects: | ||
| 99 | * A kfifo object with the given name and a buffer for the kfifo | ||
| 100 | * object named name##kfifo_buffer | ||
| 101 | */ | ||
| 102 | #define DEFINE_KFIFO(name, size) \ | ||
| 103 | unsigned char name##kfifo_buffer[size]; \ | ||
| 104 | struct kfifo name = __kfifo_initializer(size, name##kfifo_buffer) | ||
| 105 | |||
| 106 | #undef __kfifo_initializer | ||
| 107 | |||
| 108 | extern void kfifo_init(struct kfifo *fifo, unsigned char *buffer, | ||
| 109 | unsigned int size); | ||
| 110 | extern __must_check int kfifo_alloc(struct kfifo *fifo, unsigned int size, | ||
| 111 | gfp_t gfp_mask); | ||
| 39 | extern void kfifo_free(struct kfifo *fifo); | 112 | extern void kfifo_free(struct kfifo *fifo); |
| 40 | extern unsigned int __kfifo_put(struct kfifo *fifo, | 113 | extern unsigned int kfifo_in(struct kfifo *fifo, |
| 41 | const unsigned char *buffer, unsigned int len); | 114 | const unsigned char *from, unsigned int len); |
| 42 | extern unsigned int __kfifo_get(struct kfifo *fifo, | 115 | extern __must_check unsigned int kfifo_out(struct kfifo *fifo, |
| 43 | unsigned char *buffer, unsigned int len); | 116 | unsigned char *to, unsigned int len); |
| 44 | 117 | ||
| 45 | /** | 118 | /** |
| 46 | * __kfifo_reset - removes the entire FIFO contents, no locking version | 119 | * kfifo_reset - removes the entire FIFO contents |
| 47 | * @fifo: the fifo to be emptied. | 120 | * @fifo: the fifo to be emptied. |
| 48 | */ | 121 | */ |
| 49 | static inline void __kfifo_reset(struct kfifo *fifo) | 122 | static inline void kfifo_reset(struct kfifo *fifo) |
| 50 | { | 123 | { |
| 51 | fifo->in = fifo->out = 0; | 124 | fifo->in = fifo->out = 0; |
| 52 | } | 125 | } |
| 53 | 126 | ||
| 54 | /** | 127 | /** |
| 55 | * kfifo_reset - removes the entire FIFO contents | 128 | * kfifo_reset_out - skip FIFO contents |
| 56 | * @fifo: the fifo to be emptied. | 129 | * @fifo: the fifo to be emptied. |
| 57 | */ | 130 | */ |
| 58 | static inline void kfifo_reset(struct kfifo *fifo) | 131 | static inline void kfifo_reset_out(struct kfifo *fifo) |
| 59 | { | 132 | { |
| 60 | unsigned long flags; | 133 | smp_mb(); |
| 134 | fifo->out = fifo->in; | ||
| 135 | } | ||
| 61 | 136 | ||
| 62 | spin_lock_irqsave(fifo->lock, flags); | 137 | /** |
| 138 | * kfifo_size - returns the size of the fifo in bytes | ||
| 139 | * @fifo: the fifo to be used. | ||
| 140 | */ | ||
| 141 | static inline __must_check unsigned int kfifo_size(struct kfifo *fifo) | ||
| 142 | { | ||
| 143 | return fifo->size; | ||
| 144 | } | ||
| 63 | 145 | ||
| 64 | __kfifo_reset(fifo); | 146 | /** |
| 147 | * kfifo_len - returns the number of used bytes in the FIFO | ||
| 148 | * @fifo: the fifo to be used. | ||
| 149 | */ | ||
| 150 | static inline unsigned int kfifo_len(struct kfifo *fifo) | ||
| 151 | { | ||
| 152 | register unsigned int out; | ||
| 65 | 153 | ||
| 66 | spin_unlock_irqrestore(fifo->lock, flags); | 154 | out = fifo->out; |
| 155 | smp_rmb(); | ||
| 156 | return fifo->in - out; | ||
| 67 | } | 157 | } |
| 68 | 158 | ||
| 69 | /** | 159 | /** |
| 70 | * kfifo_put - puts some data into the FIFO | 160 | * kfifo_is_empty - returns true if the fifo is empty |
| 71 | * @fifo: the fifo to be used. | 161 | * @fifo: the fifo to be used. |
| 72 | * @buffer: the data to be added. | 162 | */ |
| 73 | * @len: the length of the data to be added. | 163 | static inline __must_check int kfifo_is_empty(struct kfifo *fifo) |
| 164 | { | ||
| 165 | return fifo->in == fifo->out; | ||
| 166 | } | ||
| 167 | |||
| 168 | /** | ||
| 169 | * kfifo_is_full - returns true if the fifo is full | ||
| 170 | * @fifo: the fifo to be used. | ||
| 171 | */ | ||
| 172 | static inline __must_check int kfifo_is_full(struct kfifo *fifo) | ||
| 173 | { | ||
| 174 | return kfifo_len(fifo) == kfifo_size(fifo); | ||
| 175 | } | ||
| 176 | |||
| 177 | /** | ||
| 178 | * kfifo_avail - returns the number of bytes available in the FIFO | ||
| 179 | * @fifo: the fifo to be used. | ||
| 180 | */ | ||
| 181 | static inline __must_check unsigned int kfifo_avail(struct kfifo *fifo) | ||
| 182 | { | ||
| 183 | return kfifo_size(fifo) - kfifo_len(fifo); | ||
| 184 | } | ||
| 185 | |||
| 186 | /** | ||
| 187 | * kfifo_in_locked - puts some data into the FIFO using a spinlock for locking | ||
| 188 | * @fifo: the fifo to be used. | ||
| 189 | * @from: the data to be added. | ||
| 190 | * @n: the length of the data to be added. | ||
| 191 | * @lock: pointer to the spinlock to use for locking. | ||
| 74 | * | 192 | * |
| 75 | * This function copies at most @len bytes from the @buffer into | 193 | * This function copies at most @len bytes from the @from buffer into |
| 76 | * the FIFO depending on the free space, and returns the number of | 194 | * the FIFO depending on the free space, and returns the number of |
| 77 | * bytes copied. | 195 | * bytes copied. |
| 78 | */ | 196 | */ |
| 79 | static inline unsigned int kfifo_put(struct kfifo *fifo, | 197 | static inline unsigned int kfifo_in_locked(struct kfifo *fifo, |
| 80 | const unsigned char *buffer, unsigned int len) | 198 | const unsigned char *from, unsigned int n, spinlock_t *lock) |
| 81 | { | 199 | { |
| 82 | unsigned long flags; | 200 | unsigned long flags; |
| 83 | unsigned int ret; | 201 | unsigned int ret; |
| 84 | 202 | ||
| 85 | spin_lock_irqsave(fifo->lock, flags); | 203 | spin_lock_irqsave(lock, flags); |
| 86 | 204 | ||
| 87 | ret = __kfifo_put(fifo, buffer, len); | 205 | ret = kfifo_in(fifo, from, n); |
| 88 | 206 | ||
| 89 | spin_unlock_irqrestore(fifo->lock, flags); | 207 | spin_unlock_irqrestore(lock, flags); |
| 90 | 208 | ||
| 91 | return ret; | 209 | return ret; |
| 92 | } | 210 | } |
| 93 | 211 | ||
| 94 | /** | 212 | /** |
| 95 | * kfifo_get - gets some data from the FIFO | 213 | * kfifo_out_locked - gets some data from the FIFO using a spinlock for locking |
| 96 | * @fifo: the fifo to be used. | 214 | * @fifo: the fifo to be used. |
| 97 | * @buffer: where the data must be copied. | 215 | * @to: where the data must be copied. |
| 98 | * @len: the size of the destination buffer. | 216 | * @n: the size of the destination buffer. |
| 217 | * @lock: pointer to the spinlock to use for locking. | ||
| 99 | * | 218 | * |
| 100 | * This function copies at most @len bytes from the FIFO into the | 219 | * This function copies at most @len bytes from the FIFO into the |
| 101 | * @buffer and returns the number of copied bytes. | 220 | * @to buffer and returns the number of copied bytes. |
| 102 | */ | 221 | */ |
| 103 | static inline unsigned int kfifo_get(struct kfifo *fifo, | 222 | static inline __must_check unsigned int kfifo_out_locked(struct kfifo *fifo, |
| 104 | unsigned char *buffer, unsigned int len) | 223 | unsigned char *to, unsigned int n, spinlock_t *lock) |
| 105 | { | 224 | { |
| 106 | unsigned long flags; | 225 | unsigned long flags; |
| 107 | unsigned int ret; | 226 | unsigned int ret; |
| 108 | 227 | ||
| 109 | spin_lock_irqsave(fifo->lock, flags); | 228 | spin_lock_irqsave(lock, flags); |
| 110 | 229 | ||
| 111 | ret = __kfifo_get(fifo, buffer, len); | 230 | ret = kfifo_out(fifo, to, n); |
| 112 | 231 | ||
| 113 | /* | 232 | /* |
| 114 | * optimization: if the FIFO is empty, set the indices to 0 | 233 | * optimization: if the FIFO is empty, set the indices to 0 |
| 115 | * so we don't wrap the next time | 234 | * so we don't wrap the next time |
| 116 | */ | 235 | */ |
| 117 | if (fifo->in == fifo->out) | 236 | if (kfifo_is_empty(fifo)) |
| 118 | fifo->in = fifo->out = 0; | 237 | kfifo_reset(fifo); |
| 238 | |||
| 239 | spin_unlock_irqrestore(lock, flags); | ||
| 240 | |||
| 241 | return ret; | ||
| 242 | } | ||
| 243 | |||
| 244 | extern void kfifo_skip(struct kfifo *fifo, unsigned int len); | ||
| 245 | |||
| 246 | extern __must_check unsigned int kfifo_from_user(struct kfifo *fifo, | ||
| 247 | const void __user *from, unsigned int n); | ||
| 248 | |||
| 249 | extern __must_check unsigned int kfifo_to_user(struct kfifo *fifo, | ||
| 250 | void __user *to, unsigned int n); | ||
| 251 | |||
| 252 | /** | ||
| 253 | * __kfifo_add_out internal helper function for updating the out offset | ||
| 254 | */ | ||
| 255 | static inline void __kfifo_add_out(struct kfifo *fifo, | ||
| 256 | unsigned int off) | ||
| 257 | { | ||
| 258 | smp_mb(); | ||
| 259 | fifo->out += off; | ||
| 260 | } | ||
| 261 | |||
| 262 | /** | ||
| 263 | * __kfifo_add_in internal helper function for updating the in offset | ||
| 264 | */ | ||
| 265 | static inline void __kfifo_add_in(struct kfifo *fifo, | ||
| 266 | unsigned int off) | ||
| 267 | { | ||
| 268 | smp_wmb(); | ||
| 269 | fifo->in += off; | ||
| 270 | } | ||
| 271 | |||
| 272 | /** | ||
| 273 | * __kfifo_off internal helper function for calculating the index of a | ||
| 274 | * given offeset | ||
| 275 | */ | ||
| 276 | static inline unsigned int __kfifo_off(struct kfifo *fifo, unsigned int off) | ||
| 277 | { | ||
| 278 | return off & (fifo->size - 1); | ||
| 279 | } | ||
| 280 | |||
| 281 | /** | ||
| 282 | * __kfifo_peek_n internal helper function for determinate the length of | ||
| 283 | * the next record in the fifo | ||
| 284 | */ | ||
| 285 | static inline unsigned int __kfifo_peek_n(struct kfifo *fifo, | ||
| 286 | unsigned int recsize) | ||
| 287 | { | ||
| 288 | #define __KFIFO_GET(fifo, off, shift) \ | ||
| 289 | ((fifo)->buffer[__kfifo_off((fifo), (fifo)->out+(off))] << (shift)) | ||
| 290 | |||
| 291 | unsigned int l; | ||
| 292 | |||
| 293 | l = __KFIFO_GET(fifo, 0, 0); | ||
| 294 | |||
| 295 | if (--recsize) | ||
| 296 | l |= __KFIFO_GET(fifo, 1, 8); | ||
| 297 | |||
| 298 | return l; | ||
| 299 | #undef __KFIFO_GET | ||
| 300 | } | ||
| 301 | |||
| 302 | /** | ||
| 303 | * __kfifo_poke_n internal helper function for storing the length of | ||
| 304 | * the next record into the fifo | ||
| 305 | */ | ||
| 306 | static inline void __kfifo_poke_n(struct kfifo *fifo, | ||
| 307 | unsigned int recsize, unsigned int n) | ||
| 308 | { | ||
| 309 | #define __KFIFO_PUT(fifo, off, val, shift) \ | ||
| 310 | ( \ | ||
| 311 | (fifo)->buffer[__kfifo_off((fifo), (fifo)->in+(off))] = \ | ||
| 312 | (unsigned char)((val) >> (shift)) \ | ||
| 313 | ) | ||
| 119 | 314 | ||
| 120 | spin_unlock_irqrestore(fifo->lock, flags); | 315 | __KFIFO_PUT(fifo, 0, n, 0); |
| 121 | 316 | ||
| 317 | if (--recsize) | ||
| 318 | __KFIFO_PUT(fifo, 1, n, 8); | ||
| 319 | #undef __KFIFO_PUT | ||
| 320 | } | ||
| 321 | |||
| 322 | /** | ||
| 323 | * __kfifo_in_... internal functions for put date into the fifo | ||
| 324 | * do not call it directly, use kfifo_in_rec() instead | ||
| 325 | */ | ||
| 326 | extern unsigned int __kfifo_in_n(struct kfifo *fifo, | ||
| 327 | const void *from, unsigned int n, unsigned int recsize); | ||
| 328 | |||
| 329 | extern unsigned int __kfifo_in_generic(struct kfifo *fifo, | ||
| 330 | const void *from, unsigned int n, unsigned int recsize); | ||
| 331 | |||
| 332 | static inline unsigned int __kfifo_in_rec(struct kfifo *fifo, | ||
| 333 | const void *from, unsigned int n, unsigned int recsize) | ||
| 334 | { | ||
| 335 | unsigned int ret; | ||
| 336 | |||
| 337 | ret = __kfifo_in_n(fifo, from, n, recsize); | ||
| 338 | |||
| 339 | if (likely(ret == 0)) { | ||
| 340 | if (recsize) | ||
| 341 | __kfifo_poke_n(fifo, recsize, n); | ||
| 342 | __kfifo_add_in(fifo, n + recsize); | ||
| 343 | } | ||
| 122 | return ret; | 344 | return ret; |
| 123 | } | 345 | } |
| 124 | 346 | ||
| 125 | /** | 347 | /** |
| 126 | * __kfifo_len - returns the number of bytes available in the FIFO, no locking version | 348 | * kfifo_in_rec - puts some record data into the FIFO |
| 127 | * @fifo: the fifo to be used. | 349 | * @fifo: the fifo to be used. |
| 350 | * @from: the data to be added. | ||
| 351 | * @n: the length of the data to be added. | ||
| 352 | * @recsize: size of record field | ||
| 353 | * | ||
| 354 | * This function copies @n bytes from the @from into the FIFO and returns | ||
| 355 | * the number of bytes which cannot be copied. | ||
| 356 | * A returned value greater than the @n value means that the record doesn't | ||
| 357 | * fit into the buffer. | ||
| 358 | * | ||
| 359 | * Note that with only one concurrent reader and one concurrent | ||
| 360 | * writer, you don't need extra locking to use these functions. | ||
| 128 | */ | 361 | */ |
| 129 | static inline unsigned int __kfifo_len(struct kfifo *fifo) | 362 | static inline __must_check unsigned int kfifo_in_rec(struct kfifo *fifo, |
| 363 | void *from, unsigned int n, unsigned int recsize) | ||
| 130 | { | 364 | { |
| 131 | return fifo->in - fifo->out; | 365 | if (!__builtin_constant_p(recsize)) |
| 366 | return __kfifo_in_generic(fifo, from, n, recsize); | ||
| 367 | return __kfifo_in_rec(fifo, from, n, recsize); | ||
| 132 | } | 368 | } |
| 133 | 369 | ||
| 134 | /** | 370 | /** |
| 135 | * kfifo_len - returns the number of bytes available in the FIFO | 371 | * __kfifo_out_... internal functions for get date from the fifo |
| 372 | * do not call it directly, use kfifo_out_rec() instead | ||
| 373 | */ | ||
| 374 | extern unsigned int __kfifo_out_n(struct kfifo *fifo, | ||
| 375 | void *to, unsigned int reclen, unsigned int recsize); | ||
| 376 | |||
| 377 | extern unsigned int __kfifo_out_generic(struct kfifo *fifo, | ||
| 378 | void *to, unsigned int n, | ||
| 379 | unsigned int recsize, unsigned int *total); | ||
| 380 | |||
| 381 | static inline unsigned int __kfifo_out_rec(struct kfifo *fifo, | ||
| 382 | void *to, unsigned int n, unsigned int recsize, | ||
| 383 | unsigned int *total) | ||
| 384 | { | ||
| 385 | unsigned int l; | ||
| 386 | |||
| 387 | if (!recsize) { | ||
| 388 | l = n; | ||
| 389 | if (total) | ||
| 390 | *total = l; | ||
| 391 | } else { | ||
| 392 | l = __kfifo_peek_n(fifo, recsize); | ||
| 393 | if (total) | ||
| 394 | *total = l; | ||
| 395 | if (n < l) | ||
| 396 | return l; | ||
| 397 | } | ||
| 398 | |||
| 399 | return __kfifo_out_n(fifo, to, l, recsize); | ||
| 400 | } | ||
| 401 | |||
| 402 | /** | ||
| 403 | * kfifo_out_rec - gets some record data from the FIFO | ||
| 136 | * @fifo: the fifo to be used. | 404 | * @fifo: the fifo to be used. |
| 405 | * @to: where the data must be copied. | ||
| 406 | * @n: the size of the destination buffer. | ||
| 407 | * @recsize: size of record field | ||
| 408 | * @total: pointer where the total number of to copied bytes should stored | ||
| 409 | * | ||
| 410 | * This function copies at most @n bytes from the FIFO to @to and returns the | ||
| 411 | * number of bytes which cannot be copied. | ||
| 412 | * A returned value greater than the @n value means that the record doesn't | ||
| 413 | * fit into the @to buffer. | ||
| 414 | * | ||
| 415 | * Note that with only one concurrent reader and one concurrent | ||
| 416 | * writer, you don't need extra locking to use these functions. | ||
| 137 | */ | 417 | */ |
| 138 | static inline unsigned int kfifo_len(struct kfifo *fifo) | 418 | static inline __must_check unsigned int kfifo_out_rec(struct kfifo *fifo, |
| 419 | void *to, unsigned int n, unsigned int recsize, | ||
| 420 | unsigned int *total) | ||
| 421 | |||
| 139 | { | 422 | { |
| 140 | unsigned long flags; | 423 | if (!__builtin_constant_p(recsize)) |
| 141 | unsigned int ret; | 424 | return __kfifo_out_generic(fifo, to, n, recsize, total); |
| 425 | return __kfifo_out_rec(fifo, to, n, recsize, total); | ||
| 426 | } | ||
| 427 | |||
| 428 | /** | ||
| 429 | * __kfifo_from_user_... internal functions for transfer from user space into | ||
| 430 | * the fifo. do not call it directly, use kfifo_from_user_rec() instead | ||
| 431 | */ | ||
| 432 | extern unsigned int __kfifo_from_user_n(struct kfifo *fifo, | ||
| 433 | const void __user *from, unsigned int n, unsigned int recsize); | ||
| 142 | 434 | ||
| 143 | spin_lock_irqsave(fifo->lock, flags); | 435 | extern unsigned int __kfifo_from_user_generic(struct kfifo *fifo, |
| 436 | const void __user *from, unsigned int n, unsigned int recsize); | ||
| 144 | 437 | ||
| 145 | ret = __kfifo_len(fifo); | 438 | static inline unsigned int __kfifo_from_user_rec(struct kfifo *fifo, |
| 439 | const void __user *from, unsigned int n, unsigned int recsize) | ||
| 440 | { | ||
| 441 | unsigned int ret; | ||
| 146 | 442 | ||
| 147 | spin_unlock_irqrestore(fifo->lock, flags); | 443 | ret = __kfifo_from_user_n(fifo, from, n, recsize); |
| 148 | 444 | ||
| 445 | if (likely(ret == 0)) { | ||
| 446 | if (recsize) | ||
| 447 | __kfifo_poke_n(fifo, recsize, n); | ||
| 448 | __kfifo_add_in(fifo, n + recsize); | ||
| 449 | } | ||
| 149 | return ret; | 450 | return ret; |
| 150 | } | 451 | } |
| 151 | 452 | ||
| 453 | /** | ||
| 454 | * kfifo_from_user_rec - puts some data from user space into the FIFO | ||
| 455 | * @fifo: the fifo to be used. | ||
| 456 | * @from: pointer to the data to be added. | ||
| 457 | * @n: the length of the data to be added. | ||
| 458 | * @recsize: size of record field | ||
| 459 | * | ||
| 460 | * This function copies @n bytes from the @from into the | ||
| 461 | * FIFO and returns the number of bytes which cannot be copied. | ||
| 462 | * | ||
| 463 | * If the returned value is equal or less the @n value, the copy_from_user() | ||
| 464 | * functions has failed. Otherwise the record doesn't fit into the buffer. | ||
| 465 | * | ||
| 466 | * Note that with only one concurrent reader and one concurrent | ||
| 467 | * writer, you don't need extra locking to use these functions. | ||
| 468 | */ | ||
| 469 | static inline __must_check unsigned int kfifo_from_user_rec(struct kfifo *fifo, | ||
| 470 | const void __user *from, unsigned int n, unsigned int recsize) | ||
| 471 | { | ||
| 472 | if (!__builtin_constant_p(recsize)) | ||
| 473 | return __kfifo_from_user_generic(fifo, from, n, recsize); | ||
| 474 | return __kfifo_from_user_rec(fifo, from, n, recsize); | ||
| 475 | } | ||
| 476 | |||
| 477 | /** | ||
| 478 | * __kfifo_to_user_... internal functions for transfer fifo data into user space | ||
| 479 | * do not call it directly, use kfifo_to_user_rec() instead | ||
| 480 | */ | ||
| 481 | extern unsigned int __kfifo_to_user_n(struct kfifo *fifo, | ||
| 482 | void __user *to, unsigned int n, unsigned int reclen, | ||
| 483 | unsigned int recsize); | ||
| 484 | |||
| 485 | extern unsigned int __kfifo_to_user_generic(struct kfifo *fifo, | ||
| 486 | void __user *to, unsigned int n, unsigned int recsize, | ||
| 487 | unsigned int *total); | ||
| 488 | |||
| 489 | static inline unsigned int __kfifo_to_user_rec(struct kfifo *fifo, | ||
| 490 | void __user *to, unsigned int n, | ||
| 491 | unsigned int recsize, unsigned int *total) | ||
| 492 | { | ||
| 493 | unsigned int l; | ||
| 494 | |||
| 495 | if (!recsize) { | ||
| 496 | l = n; | ||
| 497 | if (total) | ||
| 498 | *total = l; | ||
| 499 | } else { | ||
| 500 | l = __kfifo_peek_n(fifo, recsize); | ||
| 501 | if (total) | ||
| 502 | *total = l; | ||
| 503 | if (n < l) | ||
| 504 | return l; | ||
| 505 | } | ||
| 506 | |||
| 507 | return __kfifo_to_user_n(fifo, to, n, l, recsize); | ||
| 508 | } | ||
| 509 | |||
| 510 | /** | ||
| 511 | * kfifo_to_user_rec - gets data from the FIFO and write it to user space | ||
| 512 | * @fifo: the fifo to be used. | ||
| 513 | * @to: where the data must be copied. | ||
| 514 | * @n: the size of the destination buffer. | ||
| 515 | * @recsize: size of record field | ||
| 516 | * @total: pointer where the total number of to copied bytes should stored | ||
| 517 | * | ||
| 518 | * This function copies at most @n bytes from the FIFO to the @to. | ||
| 519 | * In case of an error, the function returns the number of bytes which cannot | ||
| 520 | * be copied. | ||
| 521 | * If the returned value is equal or less the @n value, the copy_to_user() | ||
| 522 | * functions has failed. Otherwise the record doesn't fit into the @to buffer. | ||
| 523 | * | ||
| 524 | * Note that with only one concurrent reader and one concurrent | ||
| 525 | * writer, you don't need extra locking to use these functions. | ||
| 526 | */ | ||
| 527 | static inline __must_check unsigned int kfifo_to_user_rec(struct kfifo *fifo, | ||
| 528 | void __user *to, unsigned int n, unsigned int recsize, | ||
| 529 | unsigned int *total) | ||
| 530 | { | ||
| 531 | if (!__builtin_constant_p(recsize)) | ||
| 532 | return __kfifo_to_user_generic(fifo, to, n, recsize, total); | ||
| 533 | return __kfifo_to_user_rec(fifo, to, n, recsize, total); | ||
| 534 | } | ||
| 535 | |||
| 536 | /** | ||
| 537 | * __kfifo_peek_... internal functions for peek into the next fifo record | ||
| 538 | * do not call it directly, use kfifo_peek_rec() instead | ||
| 539 | */ | ||
| 540 | extern unsigned int __kfifo_peek_generic(struct kfifo *fifo, | ||
| 541 | unsigned int recsize); | ||
| 542 | |||
| 543 | /** | ||
| 544 | * kfifo_peek_rec - gets the size of the next FIFO record data | ||
| 545 | * @fifo: the fifo to be used. | ||
| 546 | * @recsize: size of record field | ||
| 547 | * | ||
| 548 | * This function returns the size of the next FIFO record in number of bytes | ||
| 549 | */ | ||
| 550 | static inline __must_check unsigned int kfifo_peek_rec(struct kfifo *fifo, | ||
| 551 | unsigned int recsize) | ||
| 552 | { | ||
| 553 | if (!__builtin_constant_p(recsize)) | ||
| 554 | return __kfifo_peek_generic(fifo, recsize); | ||
| 555 | if (!recsize) | ||
| 556 | return kfifo_len(fifo); | ||
| 557 | return __kfifo_peek_n(fifo, recsize); | ||
| 558 | } | ||
| 559 | |||
| 560 | /** | ||
| 561 | * __kfifo_skip_... internal functions for skip the next fifo record | ||
| 562 | * do not call it directly, use kfifo_skip_rec() instead | ||
| 563 | */ | ||
| 564 | extern void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize); | ||
| 565 | |||
| 566 | static inline void __kfifo_skip_rec(struct kfifo *fifo, | ||
| 567 | unsigned int recsize) | ||
| 568 | { | ||
| 569 | unsigned int l; | ||
| 570 | |||
| 571 | if (recsize) { | ||
| 572 | l = __kfifo_peek_n(fifo, recsize); | ||
| 573 | |||
| 574 | if (l + recsize <= kfifo_len(fifo)) { | ||
| 575 | __kfifo_add_out(fifo, l + recsize); | ||
| 576 | return; | ||
| 577 | } | ||
| 578 | } | ||
| 579 | kfifo_reset_out(fifo); | ||
| 580 | } | ||
| 581 | |||
| 582 | /** | ||
| 583 | * kfifo_skip_rec - skip the next fifo out record | ||
| 584 | * @fifo: the fifo to be used. | ||
| 585 | * @recsize: size of record field | ||
| 586 | * | ||
| 587 | * This function skips the next FIFO record | ||
| 588 | */ | ||
| 589 | static inline void kfifo_skip_rec(struct kfifo *fifo, | ||
| 590 | unsigned int recsize) | ||
| 591 | { | ||
| 592 | if (!__builtin_constant_p(recsize)) | ||
| 593 | __kfifo_skip_generic(fifo, recsize); | ||
| 594 | else | ||
| 595 | __kfifo_skip_rec(fifo, recsize); | ||
| 596 | } | ||
| 597 | |||
| 598 | /** | ||
| 599 | * kfifo_avail_rec - returns the number of bytes available in a record FIFO | ||
| 600 | * @fifo: the fifo to be used. | ||
| 601 | * @recsize: size of record field | ||
| 602 | */ | ||
| 603 | static inline __must_check unsigned int kfifo_avail_rec(struct kfifo *fifo, | ||
| 604 | unsigned int recsize) | ||
| 605 | { | ||
| 606 | unsigned int l = kfifo_size(fifo) - kfifo_len(fifo); | ||
| 607 | |||
| 608 | return (l > recsize) ? l - recsize : 0; | ||
| 609 | } | ||
| 610 | |||
| 152 | #endif | 611 | #endif |
diff --git a/include/linux/memory.h b/include/linux/memory.h index 37fa19b34ef5..1adfe779eb99 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
| @@ -50,6 +50,19 @@ struct memory_notify { | |||
| 50 | int status_change_nid; | 50 | int status_change_nid; |
| 51 | }; | 51 | }; |
| 52 | 52 | ||
| 53 | /* | ||
| 54 | * During pageblock isolation, count the number of pages within the | ||
| 55 | * range [start_pfn, start_pfn + nr_pages) which are owned by code | ||
| 56 | * in the notifier chain. | ||
| 57 | */ | ||
| 58 | #define MEM_ISOLATE_COUNT (1<<0) | ||
| 59 | |||
| 60 | struct memory_isolate_notify { | ||
| 61 | unsigned long start_pfn; /* Start of range to check */ | ||
| 62 | unsigned int nr_pages; /* # pages in range to check */ | ||
| 63 | unsigned int pages_found; /* # pages owned found by callbacks */ | ||
| 64 | }; | ||
| 65 | |||
| 53 | struct notifier_block; | 66 | struct notifier_block; |
| 54 | struct mem_section; | 67 | struct mem_section; |
| 55 | 68 | ||
| @@ -76,14 +89,28 @@ static inline int memory_notify(unsigned long val, void *v) | |||
| 76 | { | 89 | { |
| 77 | return 0; | 90 | return 0; |
| 78 | } | 91 | } |
| 92 | static inline int register_memory_isolate_notifier(struct notifier_block *nb) | ||
| 93 | { | ||
| 94 | return 0; | ||
| 95 | } | ||
| 96 | static inline void unregister_memory_isolate_notifier(struct notifier_block *nb) | ||
| 97 | { | ||
| 98 | } | ||
| 99 | static inline int memory_isolate_notify(unsigned long val, void *v) | ||
| 100 | { | ||
| 101 | return 0; | ||
| 102 | } | ||
| 79 | #else | 103 | #else |
| 80 | extern int register_memory_notifier(struct notifier_block *nb); | 104 | extern int register_memory_notifier(struct notifier_block *nb); |
| 81 | extern void unregister_memory_notifier(struct notifier_block *nb); | 105 | extern void unregister_memory_notifier(struct notifier_block *nb); |
| 106 | extern int register_memory_isolate_notifier(struct notifier_block *nb); | ||
| 107 | extern void unregister_memory_isolate_notifier(struct notifier_block *nb); | ||
| 82 | extern int register_new_memory(int, struct mem_section *); | 108 | extern int register_new_memory(int, struct mem_section *); |
| 83 | extern int unregister_memory_section(struct mem_section *); | 109 | extern int unregister_memory_section(struct mem_section *); |
| 84 | extern int memory_dev_init(void); | 110 | extern int memory_dev_init(void); |
| 85 | extern int remove_memory_block(unsigned long, struct mem_section *, int); | 111 | extern int remove_memory_block(unsigned long, struct mem_section *, int); |
| 86 | extern int memory_notify(unsigned long val, void *v); | 112 | extern int memory_notify(unsigned long val, void *v); |
| 113 | extern int memory_isolate_notify(unsigned long val, void *v); | ||
| 87 | extern struct memory_block *find_memory_block(struct mem_section *); | 114 | extern struct memory_block *find_memory_block(struct mem_section *); |
| 88 | #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) | 115 | #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) |
| 89 | enum mem_add_context { BOOT, HOTPLUG }; | 116 | enum mem_add_context { BOOT, HOTPLUG }; |
diff --git a/include/linux/namei.h b/include/linux/namei.h index 028946750289..05b441d93642 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h | |||
| @@ -72,8 +72,6 @@ extern int vfs_path_lookup(struct dentry *, struct vfsmount *, | |||
| 72 | 72 | ||
| 73 | extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, | 73 | extern struct file *lookup_instantiate_filp(struct nameidata *nd, struct dentry *dentry, |
| 74 | int (*open)(struct inode *, struct file *)); | 74 | int (*open)(struct inode *, struct file *)); |
| 75 | extern struct file *nameidata_to_filp(struct nameidata *nd, int flags); | ||
| 76 | extern void release_open_intent(struct nameidata *); | ||
| 77 | 75 | ||
| 78 | extern struct dentry *lookup_one_len(const char *, struct dentry *, int); | 76 | extern struct dentry *lookup_one_len(const char *, struct dentry *, int); |
| 79 | 77 | ||
diff --git a/include/linux/quota.h b/include/linux/quota.h index e70e62194243..a6861f117480 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h | |||
| @@ -315,8 +315,9 @@ struct dquot_operations { | |||
| 315 | int (*claim_space) (struct inode *, qsize_t); | 315 | int (*claim_space) (struct inode *, qsize_t); |
| 316 | /* release rsved quota for delayed alloc */ | 316 | /* release rsved quota for delayed alloc */ |
| 317 | void (*release_rsv) (struct inode *, qsize_t); | 317 | void (*release_rsv) (struct inode *, qsize_t); |
| 318 | /* get reserved quota for delayed alloc */ | 318 | /* get reserved quota for delayed alloc, value returned is managed by |
| 319 | qsize_t (*get_reserved_space) (struct inode *); | 319 | * quota code only */ |
| 320 | qsize_t *(*get_reserved_space) (struct inode *); | ||
| 320 | }; | 321 | }; |
| 321 | 322 | ||
| 322 | /* Operations handling requests from userspace */ | 323 | /* Operations handling requests from userspace */ |
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index acf6e457c04b..1819396ed501 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/kref.h> | 16 | #include <linux/kref.h> |
| 17 | #include <linux/mutex.h> | 17 | #include <linux/mutex.h> |
| 18 | #include <linux/sysrq.h> | 18 | #include <linux/sysrq.h> |
| 19 | #include <linux/kfifo.h> | ||
| 19 | 20 | ||
| 20 | #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ | 21 | #define SERIAL_TTY_MAJOR 188 /* Nice legal number now */ |
| 21 | #define SERIAL_TTY_MINORS 254 /* loads of devices :) */ | 22 | #define SERIAL_TTY_MINORS 254 /* loads of devices :) */ |
| @@ -94,7 +95,7 @@ struct usb_serial_port { | |||
| 94 | unsigned char *bulk_out_buffer; | 95 | unsigned char *bulk_out_buffer; |
| 95 | int bulk_out_size; | 96 | int bulk_out_size; |
| 96 | struct urb *write_urb; | 97 | struct urb *write_urb; |
| 97 | struct kfifo *write_fifo; | 98 | struct kfifo write_fifo; |
| 98 | int write_urb_busy; | 99 | int write_urb_busy; |
| 99 | __u8 bulk_out_endpointAddress; | 100 | __u8 bulk_out_endpointAddress; |
| 100 | 101 | ||
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h index 7394e3bc8f4b..ff92b46f5153 100644 --- a/include/scsi/libiscsi.h +++ b/include/scsi/libiscsi.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/mutex.h> | 28 | #include <linux/mutex.h> |
| 29 | #include <linux/timer.h> | 29 | #include <linux/timer.h> |
| 30 | #include <linux/workqueue.h> | 30 | #include <linux/workqueue.h> |
| 31 | #include <linux/kfifo.h> | ||
| 31 | #include <scsi/iscsi_proto.h> | 32 | #include <scsi/iscsi_proto.h> |
| 32 | #include <scsi/iscsi_if.h> | 33 | #include <scsi/iscsi_if.h> |
| 33 | #include <scsi/scsi_transport_iscsi.h> | 34 | #include <scsi/scsi_transport_iscsi.h> |
| @@ -231,7 +232,7 @@ struct iscsi_conn { | |||
| 231 | }; | 232 | }; |
| 232 | 233 | ||
| 233 | struct iscsi_pool { | 234 | struct iscsi_pool { |
| 234 | struct kfifo *queue; /* FIFO Queue */ | 235 | struct kfifo queue; /* FIFO Queue */ |
| 235 | void **pool; /* Pool of elements */ | 236 | void **pool; /* Pool of elements */ |
| 236 | int max; /* Max number of elements */ | 237 | int max; /* Max number of elements */ |
| 237 | }; | 238 | }; |
diff --git a/include/scsi/libiscsi_tcp.h b/include/scsi/libiscsi_tcp.h index 9e3182e659db..741ae7ed4394 100644 --- a/include/scsi/libiscsi_tcp.h +++ b/include/scsi/libiscsi_tcp.h | |||
| @@ -80,7 +80,7 @@ struct iscsi_tcp_task { | |||
| 80 | int data_offset; | 80 | int data_offset; |
| 81 | struct iscsi_r2t_info *r2t; /* in progress solict R2T */ | 81 | struct iscsi_r2t_info *r2t; /* in progress solict R2T */ |
| 82 | struct iscsi_pool r2tpool; | 82 | struct iscsi_pool r2tpool; |
| 83 | struct kfifo *r2tqueue; | 83 | struct kfifo r2tqueue; |
| 84 | void *dd_data; | 84 | void *dd_data; |
| 85 | }; | 85 | }; |
| 86 | 86 | ||
diff --git a/include/scsi/libsrp.h b/include/scsi/libsrp.h index ba615e4c1d7c..07e3adde21d9 100644 --- a/include/scsi/libsrp.h +++ b/include/scsi/libsrp.h | |||
| @@ -21,7 +21,7 @@ struct srp_buf { | |||
| 21 | struct srp_queue { | 21 | struct srp_queue { |
| 22 | void *pool; | 22 | void *pool; |
| 23 | void *items; | 23 | void *items; |
| 24 | struct kfifo *queue; | 24 | struct kfifo queue; |
| 25 | spinlock_t lock; | 25 | spinlock_t lock; |
| 26 | }; | 26 | }; |
| 27 | 27 | ||
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 267e484f0198..fc0f928167e7 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
| @@ -250,7 +250,6 @@ struct audit_context { | |||
| 250 | #endif | 250 | #endif |
| 251 | }; | 251 | }; |
| 252 | 252 | ||
| 253 | #define ACC_MODE(x) ("\004\002\006\006"[(x)&O_ACCMODE]) | ||
| 254 | static inline int open_arg(int flags, int mask) | 253 | static inline int open_arg(int flags, int mask) |
| 255 | { | 254 | { |
| 256 | int n = ACC_MODE(flags); | 255 | int n = ACC_MODE(flags); |
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 3765ff3c1bbe..e92d519f93b1 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * A simple kernel FIFO implementation. | 2 | * A generic kernel FIFO implementation. |
| 3 | * | 3 | * |
| 4 | * Copyright (C) 2009 Stefani Seibold <stefani@seibold.net> | ||
| 4 | * Copyright (C) 2004 Stelian Pop <stelian@popies.net> | 5 | * Copyright (C) 2004 Stelian Pop <stelian@popies.net> |
| 5 | * | 6 | * |
| 6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
| @@ -25,50 +26,48 @@ | |||
| 25 | #include <linux/err.h> | 26 | #include <linux/err.h> |
| 26 | #include <linux/kfifo.h> | 27 | #include <linux/kfifo.h> |
| 27 | #include <linux/log2.h> | 28 | #include <linux/log2.h> |
| 29 | #include <linux/uaccess.h> | ||
| 30 | |||
| 31 | static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer, | ||
| 32 | unsigned int size) | ||
| 33 | { | ||
| 34 | fifo->buffer = buffer; | ||
| 35 | fifo->size = size; | ||
| 36 | |||
| 37 | kfifo_reset(fifo); | ||
| 38 | } | ||
| 28 | 39 | ||
| 29 | /** | 40 | /** |
| 30 | * kfifo_init - allocates a new FIFO using a preallocated buffer | 41 | * kfifo_init - initialize a FIFO using a preallocated buffer |
| 42 | * @fifo: the fifo to assign the buffer | ||
| 31 | * @buffer: the preallocated buffer to be used. | 43 | * @buffer: the preallocated buffer to be used. |
| 32 | * @size: the size of the internal buffer, this have to be a power of 2. | 44 | * @size: the size of the internal buffer, this have to be a power of 2. |
| 33 | * @gfp_mask: get_free_pages mask, passed to kmalloc() | ||
| 34 | * @lock: the lock to be used to protect the fifo buffer | ||
| 35 | * | 45 | * |
| 36 | * Do NOT pass the kfifo to kfifo_free() after use! Simply free the | ||
| 37 | * &struct kfifo with kfree(). | ||
| 38 | */ | 46 | */ |
| 39 | struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, | 47 | void kfifo_init(struct kfifo *fifo, unsigned char *buffer, unsigned int size) |
| 40 | gfp_t gfp_mask, spinlock_t *lock) | ||
| 41 | { | 48 | { |
| 42 | struct kfifo *fifo; | ||
| 43 | |||
| 44 | /* size must be a power of 2 */ | 49 | /* size must be a power of 2 */ |
| 45 | BUG_ON(!is_power_of_2(size)); | 50 | BUG_ON(!is_power_of_2(size)); |
| 46 | 51 | ||
| 47 | fifo = kmalloc(sizeof(struct kfifo), gfp_mask); | 52 | _kfifo_init(fifo, buffer, size); |
| 48 | if (!fifo) | ||
| 49 | return ERR_PTR(-ENOMEM); | ||
| 50 | |||
| 51 | fifo->buffer = buffer; | ||
| 52 | fifo->size = size; | ||
| 53 | fifo->in = fifo->out = 0; | ||
| 54 | fifo->lock = lock; | ||
| 55 | |||
| 56 | return fifo; | ||
| 57 | } | 53 | } |
| 58 | EXPORT_SYMBOL(kfifo_init); | 54 | EXPORT_SYMBOL(kfifo_init); |
| 59 | 55 | ||
| 60 | /** | 56 | /** |
| 61 | * kfifo_alloc - allocates a new FIFO and its internal buffer | 57 | * kfifo_alloc - allocates a new FIFO internal buffer |
| 62 | * @size: the size of the internal buffer to be allocated. | 58 | * @fifo: the fifo to assign then new buffer |
| 59 | * @size: the size of the buffer to be allocated, this have to be a power of 2. | ||
| 63 | * @gfp_mask: get_free_pages mask, passed to kmalloc() | 60 | * @gfp_mask: get_free_pages mask, passed to kmalloc() |
| 64 | * @lock: the lock to be used to protect the fifo buffer | 61 | * |
| 62 | * This function dynamically allocates a new fifo internal buffer | ||
| 65 | * | 63 | * |
| 66 | * The size will be rounded-up to a power of 2. | 64 | * The size will be rounded-up to a power of 2. |
| 65 | * The buffer will be release with kfifo_free(). | ||
| 66 | * Return 0 if no error, otherwise the an error code | ||
| 67 | */ | 67 | */ |
| 68 | struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock) | 68 | int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask) |
| 69 | { | 69 | { |
| 70 | unsigned char *buffer; | 70 | unsigned char *buffer; |
| 71 | struct kfifo *ret; | ||
| 72 | 71 | ||
| 73 | /* | 72 | /* |
| 74 | * round up to the next power of 2, since our 'let the indices | 73 | * round up to the next power of 2, since our 'let the indices |
| @@ -80,48 +79,91 @@ struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask, spinlock_t *lock) | |||
| 80 | } | 79 | } |
| 81 | 80 | ||
| 82 | buffer = kmalloc(size, gfp_mask); | 81 | buffer = kmalloc(size, gfp_mask); |
| 83 | if (!buffer) | 82 | if (!buffer) { |
| 84 | return ERR_PTR(-ENOMEM); | 83 | _kfifo_init(fifo, 0, 0); |
| 85 | 84 | return -ENOMEM; | |
| 86 | ret = kfifo_init(buffer, size, gfp_mask, lock); | 85 | } |
| 87 | 86 | ||
| 88 | if (IS_ERR(ret)) | 87 | _kfifo_init(fifo, buffer, size); |
| 89 | kfree(buffer); | ||
| 90 | 88 | ||
| 91 | return ret; | 89 | return 0; |
| 92 | } | 90 | } |
| 93 | EXPORT_SYMBOL(kfifo_alloc); | 91 | EXPORT_SYMBOL(kfifo_alloc); |
| 94 | 92 | ||
| 95 | /** | 93 | /** |
| 96 | * kfifo_free - frees the FIFO | 94 | * kfifo_free - frees the FIFO internal buffer |
| 97 | * @fifo: the fifo to be freed. | 95 | * @fifo: the fifo to be freed. |
| 98 | */ | 96 | */ |
| 99 | void kfifo_free(struct kfifo *fifo) | 97 | void kfifo_free(struct kfifo *fifo) |
| 100 | { | 98 | { |
| 101 | kfree(fifo->buffer); | 99 | kfree(fifo->buffer); |
| 102 | kfree(fifo); | ||
| 103 | } | 100 | } |
| 104 | EXPORT_SYMBOL(kfifo_free); | 101 | EXPORT_SYMBOL(kfifo_free); |
| 105 | 102 | ||
| 106 | /** | 103 | /** |
| 107 | * __kfifo_put - puts some data into the FIFO, no locking version | 104 | * kfifo_skip - skip output data |
| 108 | * @fifo: the fifo to be used. | 105 | * @fifo: the fifo to be used. |
| 109 | * @buffer: the data to be added. | 106 | * @len: number of bytes to skip |
| 110 | * @len: the length of the data to be added. | ||
| 111 | * | ||
| 112 | * This function copies at most @len bytes from the @buffer into | ||
| 113 | * the FIFO depending on the free space, and returns the number of | ||
| 114 | * bytes copied. | ||
| 115 | * | ||
| 116 | * Note that with only one concurrent reader and one concurrent | ||
| 117 | * writer, you don't need extra locking to use these functions. | ||
| 118 | */ | 107 | */ |
| 119 | unsigned int __kfifo_put(struct kfifo *fifo, | 108 | void kfifo_skip(struct kfifo *fifo, unsigned int len) |
| 120 | const unsigned char *buffer, unsigned int len) | 109 | { |
| 110 | if (len < kfifo_len(fifo)) { | ||
| 111 | __kfifo_add_out(fifo, len); | ||
| 112 | return; | ||
| 113 | } | ||
| 114 | kfifo_reset_out(fifo); | ||
| 115 | } | ||
| 116 | EXPORT_SYMBOL(kfifo_skip); | ||
| 117 | |||
| 118 | static inline void __kfifo_in_data(struct kfifo *fifo, | ||
| 119 | const void *from, unsigned int len, unsigned int off) | ||
| 121 | { | 120 | { |
| 122 | unsigned int l; | 121 | unsigned int l; |
| 123 | 122 | ||
| 124 | len = min(len, fifo->size - fifo->in + fifo->out); | 123 | /* |
| 124 | * Ensure that we sample the fifo->out index -before- we | ||
| 125 | * start putting bytes into the kfifo. | ||
| 126 | */ | ||
| 127 | |||
| 128 | smp_mb(); | ||
| 129 | |||
| 130 | off = __kfifo_off(fifo, fifo->in + off); | ||
| 131 | |||
| 132 | /* first put the data starting from fifo->in to buffer end */ | ||
| 133 | l = min(len, fifo->size - off); | ||
| 134 | memcpy(fifo->buffer + off, from, l); | ||
| 135 | |||
| 136 | /* then put the rest (if any) at the beginning of the buffer */ | ||
| 137 | memcpy(fifo->buffer, from + l, len - l); | ||
| 138 | } | ||
| 139 | |||
| 140 | static inline void __kfifo_out_data(struct kfifo *fifo, | ||
| 141 | void *to, unsigned int len, unsigned int off) | ||
| 142 | { | ||
| 143 | unsigned int l; | ||
| 144 | |||
| 145 | /* | ||
| 146 | * Ensure that we sample the fifo->in index -before- we | ||
| 147 | * start removing bytes from the kfifo. | ||
| 148 | */ | ||
| 149 | |||
| 150 | smp_rmb(); | ||
| 151 | |||
| 152 | off = __kfifo_off(fifo, fifo->out + off); | ||
| 153 | |||
| 154 | /* first get the data from fifo->out until the end of the buffer */ | ||
| 155 | l = min(len, fifo->size - off); | ||
| 156 | memcpy(to, fifo->buffer + off, l); | ||
| 157 | |||
| 158 | /* then get the rest (if any) from the beginning of the buffer */ | ||
| 159 | memcpy(to + l, fifo->buffer, len - l); | ||
| 160 | } | ||
| 161 | |||
| 162 | static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo, | ||
| 163 | const void __user *from, unsigned int len, unsigned int off) | ||
| 164 | { | ||
| 165 | unsigned int l; | ||
| 166 | int ret; | ||
| 125 | 167 | ||
| 126 | /* | 168 | /* |
| 127 | * Ensure that we sample the fifo->out index -before- we | 169 | * Ensure that we sample the fifo->out index -before- we |
| @@ -130,68 +172,229 @@ unsigned int __kfifo_put(struct kfifo *fifo, | |||
| 130 | 172 | ||
| 131 | smp_mb(); | 173 | smp_mb(); |
| 132 | 174 | ||
| 175 | off = __kfifo_off(fifo, fifo->in + off); | ||
| 176 | |||
| 133 | /* first put the data starting from fifo->in to buffer end */ | 177 | /* first put the data starting from fifo->in to buffer end */ |
| 134 | l = min(len, fifo->size - (fifo->in & (fifo->size - 1))); | 178 | l = min(len, fifo->size - off); |
| 135 | memcpy(fifo->buffer + (fifo->in & (fifo->size - 1)), buffer, l); | 179 | ret = copy_from_user(fifo->buffer + off, from, l); |
| 180 | |||
| 181 | if (unlikely(ret)) | ||
| 182 | return ret + len - l; | ||
| 136 | 183 | ||
| 137 | /* then put the rest (if any) at the beginning of the buffer */ | 184 | /* then put the rest (if any) at the beginning of the buffer */ |
| 138 | memcpy(fifo->buffer, buffer + l, len - l); | 185 | return copy_from_user(fifo->buffer, from + l, len - l); |
| 186 | } | ||
| 187 | |||
| 188 | static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo, | ||
| 189 | void __user *to, unsigned int len, unsigned int off) | ||
| 190 | { | ||
| 191 | unsigned int l; | ||
| 192 | int ret; | ||
| 139 | 193 | ||
| 140 | /* | 194 | /* |
| 141 | * Ensure that we add the bytes to the kfifo -before- | 195 | * Ensure that we sample the fifo->in index -before- we |
| 142 | * we update the fifo->in index. | 196 | * start removing bytes from the kfifo. |
| 143 | */ | 197 | */ |
| 144 | 198 | ||
| 145 | smp_wmb(); | 199 | smp_rmb(); |
| 200 | |||
| 201 | off = __kfifo_off(fifo, fifo->out + off); | ||
| 202 | |||
| 203 | /* first get the data from fifo->out until the end of the buffer */ | ||
| 204 | l = min(len, fifo->size - off); | ||
| 205 | ret = copy_to_user(to, fifo->buffer + off, l); | ||
| 206 | |||
| 207 | if (unlikely(ret)) | ||
| 208 | return ret + len - l; | ||
| 209 | |||
| 210 | /* then get the rest (if any) from the beginning of the buffer */ | ||
| 211 | return copy_to_user(to + l, fifo->buffer, len - l); | ||
| 212 | } | ||
| 213 | |||
| 214 | unsigned int __kfifo_in_n(struct kfifo *fifo, | ||
| 215 | const void *from, unsigned int len, unsigned int recsize) | ||
| 216 | { | ||
| 217 | if (kfifo_avail(fifo) < len + recsize) | ||
| 218 | return len + 1; | ||
| 219 | |||
| 220 | __kfifo_in_data(fifo, from, len, recsize); | ||
| 221 | return 0; | ||
| 222 | } | ||
| 223 | EXPORT_SYMBOL(__kfifo_in_n); | ||
| 146 | 224 | ||
| 147 | fifo->in += len; | 225 | /** |
| 226 | * kfifo_in - puts some data into the FIFO | ||
| 227 | * @fifo: the fifo to be used. | ||
| 228 | * @from: the data to be added. | ||
| 229 | * @len: the length of the data to be added. | ||
| 230 | * | ||
| 231 | * This function copies at most @len bytes from the @from buffer into | ||
| 232 | * the FIFO depending on the free space, and returns the number of | ||
| 233 | * bytes copied. | ||
| 234 | * | ||
| 235 | * Note that with only one concurrent reader and one concurrent | ||
| 236 | * writer, you don't need extra locking to use these functions. | ||
| 237 | */ | ||
| 238 | unsigned int kfifo_in(struct kfifo *fifo, const unsigned char *from, | ||
| 239 | unsigned int len) | ||
| 240 | { | ||
| 241 | len = min(kfifo_avail(fifo), len); | ||
| 148 | 242 | ||
| 243 | __kfifo_in_data(fifo, from, len, 0); | ||
| 244 | __kfifo_add_in(fifo, len); | ||
| 149 | return len; | 245 | return len; |
| 150 | } | 246 | } |
| 151 | EXPORT_SYMBOL(__kfifo_put); | 247 | EXPORT_SYMBOL(kfifo_in); |
| 248 | |||
| 249 | unsigned int __kfifo_in_generic(struct kfifo *fifo, | ||
| 250 | const void *from, unsigned int len, unsigned int recsize) | ||
| 251 | { | ||
| 252 | return __kfifo_in_rec(fifo, from, len, recsize); | ||
| 253 | } | ||
| 254 | EXPORT_SYMBOL(__kfifo_in_generic); | ||
| 255 | |||
| 256 | unsigned int __kfifo_out_n(struct kfifo *fifo, | ||
| 257 | void *to, unsigned int len, unsigned int recsize) | ||
| 258 | { | ||
| 259 | if (kfifo_len(fifo) < len + recsize) | ||
| 260 | return len; | ||
| 261 | |||
| 262 | __kfifo_out_data(fifo, to, len, recsize); | ||
| 263 | __kfifo_add_out(fifo, len + recsize); | ||
| 264 | return 0; | ||
| 265 | } | ||
| 266 | EXPORT_SYMBOL(__kfifo_out_n); | ||
| 152 | 267 | ||
| 153 | /** | 268 | /** |
| 154 | * __kfifo_get - gets some data from the FIFO, no locking version | 269 | * kfifo_out - gets some data from the FIFO |
| 155 | * @fifo: the fifo to be used. | 270 | * @fifo: the fifo to be used. |
| 156 | * @buffer: where the data must be copied. | 271 | * @to: where the data must be copied. |
| 157 | * @len: the size of the destination buffer. | 272 | * @len: the size of the destination buffer. |
| 158 | * | 273 | * |
| 159 | * This function copies at most @len bytes from the FIFO into the | 274 | * This function copies at most @len bytes from the FIFO into the |
| 160 | * @buffer and returns the number of copied bytes. | 275 | * @to buffer and returns the number of copied bytes. |
| 161 | * | 276 | * |
| 162 | * Note that with only one concurrent reader and one concurrent | 277 | * Note that with only one concurrent reader and one concurrent |
| 163 | * writer, you don't need extra locking to use these functions. | 278 | * writer, you don't need extra locking to use these functions. |
| 164 | */ | 279 | */ |
| 165 | unsigned int __kfifo_get(struct kfifo *fifo, | 280 | unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len) |
| 166 | unsigned char *buffer, unsigned int len) | ||
| 167 | { | 281 | { |
| 168 | unsigned int l; | 282 | len = min(kfifo_len(fifo), len); |
| 169 | 283 | ||
| 170 | len = min(len, fifo->in - fifo->out); | 284 | __kfifo_out_data(fifo, to, len, 0); |
| 285 | __kfifo_add_out(fifo, len); | ||
| 171 | 286 | ||
| 172 | /* | 287 | return len; |
| 173 | * Ensure that we sample the fifo->in index -before- we | 288 | } |
| 174 | * start removing bytes from the kfifo. | 289 | EXPORT_SYMBOL(kfifo_out); |
| 175 | */ | ||
| 176 | 290 | ||
| 177 | smp_rmb(); | 291 | unsigned int __kfifo_out_generic(struct kfifo *fifo, |
| 292 | void *to, unsigned int len, unsigned int recsize, | ||
| 293 | unsigned int *total) | ||
| 294 | { | ||
| 295 | return __kfifo_out_rec(fifo, to, len, recsize, total); | ||
| 296 | } | ||
| 297 | EXPORT_SYMBOL(__kfifo_out_generic); | ||
| 178 | 298 | ||
| 179 | /* first get the data from fifo->out until the end of the buffer */ | 299 | unsigned int __kfifo_from_user_n(struct kfifo *fifo, |
| 180 | l = min(len, fifo->size - (fifo->out & (fifo->size - 1))); | 300 | const void __user *from, unsigned int len, unsigned int recsize) |
| 181 | memcpy(buffer, fifo->buffer + (fifo->out & (fifo->size - 1)), l); | 301 | { |
| 302 | if (kfifo_avail(fifo) < len + recsize) | ||
| 303 | return len + 1; | ||
| 182 | 304 | ||
| 183 | /* then get the rest (if any) from the beginning of the buffer */ | 305 | return __kfifo_from_user_data(fifo, from, len, recsize); |
| 184 | memcpy(buffer + l, fifo->buffer, len - l); | 306 | } |
| 307 | EXPORT_SYMBOL(__kfifo_from_user_n); | ||
| 185 | 308 | ||
| 186 | /* | 309 | /** |
| 187 | * Ensure that we remove the bytes from the kfifo -before- | 310 | * kfifo_from_user - puts some data from user space into the FIFO |
| 188 | * we update the fifo->out index. | 311 | * @fifo: the fifo to be used. |
| 189 | */ | 312 | * @from: pointer to the data to be added. |
| 313 | * @len: the length of the data to be added. | ||
| 314 | * | ||
| 315 | * This function copies at most @len bytes from the @from into the | ||
| 316 | * FIFO depending and returns the number of copied bytes. | ||
| 317 | * | ||
| 318 | * Note that with only one concurrent reader and one concurrent | ||
| 319 | * writer, you don't need extra locking to use these functions. | ||
| 320 | */ | ||
| 321 | unsigned int kfifo_from_user(struct kfifo *fifo, | ||
| 322 | const void __user *from, unsigned int len) | ||
| 323 | { | ||
| 324 | len = min(kfifo_avail(fifo), len); | ||
| 325 | len -= __kfifo_from_user_data(fifo, from, len, 0); | ||
| 326 | __kfifo_add_in(fifo, len); | ||
| 327 | return len; | ||
| 328 | } | ||
| 329 | EXPORT_SYMBOL(kfifo_from_user); | ||
| 190 | 330 | ||
| 191 | smp_mb(); | 331 | unsigned int __kfifo_from_user_generic(struct kfifo *fifo, |
| 332 | const void __user *from, unsigned int len, unsigned int recsize) | ||
| 333 | { | ||
| 334 | return __kfifo_from_user_rec(fifo, from, len, recsize); | ||
| 335 | } | ||
| 336 | EXPORT_SYMBOL(__kfifo_from_user_generic); | ||
| 192 | 337 | ||
| 193 | fifo->out += len; | 338 | unsigned int __kfifo_to_user_n(struct kfifo *fifo, |
| 339 | void __user *to, unsigned int len, unsigned int reclen, | ||
| 340 | unsigned int recsize) | ||
| 341 | { | ||
| 342 | unsigned int ret; | ||
| 343 | |||
| 344 | if (kfifo_len(fifo) < reclen + recsize) | ||
| 345 | return len; | ||
| 346 | |||
| 347 | ret = __kfifo_to_user_data(fifo, to, reclen, recsize); | ||
| 194 | 348 | ||
| 349 | if (likely(ret == 0)) | ||
| 350 | __kfifo_add_out(fifo, reclen + recsize); | ||
| 351 | |||
| 352 | return ret; | ||
| 353 | } | ||
| 354 | EXPORT_SYMBOL(__kfifo_to_user_n); | ||
| 355 | |||
| 356 | /** | ||
| 357 | * kfifo_to_user - gets data from the FIFO and write it to user space | ||
| 358 | * @fifo: the fifo to be used. | ||
| 359 | * @to: where the data must be copied. | ||
| 360 | * @len: the size of the destination buffer. | ||
| 361 | * | ||
| 362 | * This function copies at most @len bytes from the FIFO into the | ||
| 363 | * @to buffer and returns the number of copied bytes. | ||
| 364 | * | ||
| 365 | * Note that with only one concurrent reader and one concurrent | ||
| 366 | * writer, you don't need extra locking to use these functions. | ||
| 367 | */ | ||
| 368 | unsigned int kfifo_to_user(struct kfifo *fifo, | ||
| 369 | void __user *to, unsigned int len) | ||
| 370 | { | ||
| 371 | len = min(kfifo_len(fifo), len); | ||
| 372 | len -= __kfifo_to_user_data(fifo, to, len, 0); | ||
| 373 | __kfifo_add_out(fifo, len); | ||
| 195 | return len; | 374 | return len; |
| 196 | } | 375 | } |
| 197 | EXPORT_SYMBOL(__kfifo_get); | 376 | EXPORT_SYMBOL(kfifo_to_user); |
| 377 | |||
| 378 | unsigned int __kfifo_to_user_generic(struct kfifo *fifo, | ||
| 379 | void __user *to, unsigned int len, unsigned int recsize, | ||
| 380 | unsigned int *total) | ||
| 381 | { | ||
| 382 | return __kfifo_to_user_rec(fifo, to, len, recsize, total); | ||
| 383 | } | ||
| 384 | EXPORT_SYMBOL(__kfifo_to_user_generic); | ||
| 385 | |||
| 386 | unsigned int __kfifo_peek_generic(struct kfifo *fifo, unsigned int recsize) | ||
| 387 | { | ||
| 388 | if (recsize == 0) | ||
| 389 | return kfifo_avail(fifo); | ||
| 390 | |||
| 391 | return __kfifo_peek_n(fifo, recsize); | ||
| 392 | } | ||
| 393 | EXPORT_SYMBOL(__kfifo_peek_generic); | ||
| 394 | |||
| 395 | void __kfifo_skip_generic(struct kfifo *fifo, unsigned int recsize) | ||
| 396 | { | ||
| 397 | __kfifo_skip_rec(fifo, recsize); | ||
| 398 | } | ||
| 399 | EXPORT_SYMBOL(__kfifo_skip_generic); | ||
| 400 | |||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index e0eb4a2fe183..1f38270f08c7 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -4724,7 +4724,7 @@ SYSCALL_DEFINE5(perf_event_open, | |||
| 4724 | if (IS_ERR(event)) | 4724 | if (IS_ERR(event)) |
| 4725 | goto err_put_context; | 4725 | goto err_put_context; |
| 4726 | 4726 | ||
| 4727 | err = anon_inode_getfd("[perf_event]", &perf_fops, event, 0); | 4727 | err = anon_inode_getfd("[perf_event]", &perf_fops, event, O_RDWR); |
| 4728 | if (err < 0) | 4728 | if (err < 0) |
| 4729 | goto err_free_put_context; | 4729 | goto err_free_put_context; |
| 4730 | 4730 | ||
diff --git a/kernel/resource.c b/kernel/resource.c index dc15686b7a77..af96c1e4b54b 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -308,37 +308,37 @@ static int find_resource(struct resource *root, struct resource *new, | |||
| 308 | void *alignf_data) | 308 | void *alignf_data) |
| 309 | { | 309 | { |
| 310 | struct resource *this = root->child; | 310 | struct resource *this = root->child; |
| 311 | resource_size_t start, end; | 311 | struct resource tmp = *new; |
| 312 | 312 | ||
| 313 | start = root->start; | 313 | tmp.start = root->start; |
| 314 | /* | 314 | /* |
| 315 | * Skip past an allocated resource that starts at 0, since the assignment | 315 | * Skip past an allocated resource that starts at 0, since the assignment |
| 316 | * of this->start - 1 to new->end below would cause an underflow. | 316 | * of this->start - 1 to tmp->end below would cause an underflow. |
| 317 | */ | 317 | */ |
| 318 | if (this && this->start == 0) { | 318 | if (this && this->start == 0) { |
| 319 | start = this->end + 1; | 319 | tmp.start = this->end + 1; |
| 320 | this = this->sibling; | 320 | this = this->sibling; |
| 321 | } | 321 | } |
| 322 | for(;;) { | 322 | for(;;) { |
| 323 | if (this) | 323 | if (this) |
| 324 | end = this->start - 1; | 324 | tmp.end = this->start - 1; |
| 325 | else | 325 | else |
| 326 | end = root->end; | 326 | tmp.end = root->end; |
| 327 | if (start < min) | 327 | if (tmp.start < min) |
| 328 | start = min; | 328 | tmp.start = min; |
| 329 | if (end > max) | 329 | if (tmp.end > max) |
| 330 | end = max; | 330 | tmp.end = max; |
| 331 | start = ALIGN(start, align); | 331 | tmp.start = ALIGN(tmp.start, align); |
| 332 | if (alignf) | 332 | if (alignf) |
| 333 | alignf(alignf_data, new, size, align); | 333 | alignf(alignf_data, &tmp, size, align); |
| 334 | if (start < end && end - start >= size - 1) { | 334 | if (tmp.start < tmp.end && tmp.end - tmp.start >= size - 1) { |
| 335 | new->start = start; | 335 | new->start = tmp.start; |
| 336 | new->end = start + size - 1; | 336 | new->end = tmp.start + size - 1; |
| 337 | return 0; | 337 | return 0; |
| 338 | } | 338 | } |
| 339 | if (!this) | 339 | if (!this) |
| 340 | break; | 340 | break; |
| 341 | start = this->end + 1; | 341 | tmp.start = this->end + 1; |
| 342 | this = this->sibling; | 342 | this = this->sibling; |
| 343 | } | 343 | } |
| 344 | return -EBUSY; | 344 | return -EBUSY; |
diff --git a/kernel/time.c b/kernel/time.c index c6324d96009e..804798005d19 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
| @@ -136,6 +136,7 @@ static inline void warp_clock(void) | |||
| 136 | write_seqlock_irq(&xtime_lock); | 136 | write_seqlock_irq(&xtime_lock); |
| 137 | wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; | 137 | wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; |
| 138 | xtime.tv_sec += sys_tz.tz_minuteswest * 60; | 138 | xtime.tv_sec += sys_tz.tz_minuteswest * 60; |
| 139 | update_xtime_cache(0); | ||
| 139 | write_sequnlock_irq(&xtime_lock); | 140 | write_sequnlock_irq(&xtime_lock); |
| 140 | clock_was_set(); | 141 | clock_was_set(); |
| 141 | } | 142 | } |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index af4135f05825..7faaa32fbf4f 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -165,6 +165,13 @@ struct timespec raw_time; | |||
| 165 | /* flag for if timekeeping is suspended */ | 165 | /* flag for if timekeeping is suspended */ |
| 166 | int __read_mostly timekeeping_suspended; | 166 | int __read_mostly timekeeping_suspended; |
| 167 | 167 | ||
| 168 | static struct timespec xtime_cache __attribute__ ((aligned (16))); | ||
| 169 | void update_xtime_cache(u64 nsec) | ||
| 170 | { | ||
| 171 | xtime_cache = xtime; | ||
| 172 | timespec_add_ns(&xtime_cache, nsec); | ||
| 173 | } | ||
| 174 | |||
| 168 | /* must hold xtime_lock */ | 175 | /* must hold xtime_lock */ |
| 169 | void timekeeping_leap_insert(int leapsecond) | 176 | void timekeeping_leap_insert(int leapsecond) |
| 170 | { | 177 | { |
| @@ -325,6 +332,8 @@ int do_settimeofday(struct timespec *tv) | |||
| 325 | 332 | ||
| 326 | xtime = *tv; | 333 | xtime = *tv; |
| 327 | 334 | ||
| 335 | update_xtime_cache(0); | ||
| 336 | |||
| 328 | timekeeper.ntp_error = 0; | 337 | timekeeper.ntp_error = 0; |
| 329 | ntp_clear(); | 338 | ntp_clear(); |
| 330 | 339 | ||
| @@ -550,6 +559,7 @@ void __init timekeeping_init(void) | |||
| 550 | } | 559 | } |
| 551 | set_normalized_timespec(&wall_to_monotonic, | 560 | set_normalized_timespec(&wall_to_monotonic, |
| 552 | -boot.tv_sec, -boot.tv_nsec); | 561 | -boot.tv_sec, -boot.tv_nsec); |
| 562 | update_xtime_cache(0); | ||
| 553 | total_sleep_time.tv_sec = 0; | 563 | total_sleep_time.tv_sec = 0; |
| 554 | total_sleep_time.tv_nsec = 0; | 564 | total_sleep_time.tv_nsec = 0; |
| 555 | write_sequnlock_irqrestore(&xtime_lock, flags); | 565 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| @@ -583,6 +593,7 @@ static int timekeeping_resume(struct sys_device *dev) | |||
| 583 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); | 593 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); |
| 584 | total_sleep_time = timespec_add_safe(total_sleep_time, ts); | 594 | total_sleep_time = timespec_add_safe(total_sleep_time, ts); |
| 585 | } | 595 | } |
| 596 | update_xtime_cache(0); | ||
| 586 | /* re-base the last cycle value */ | 597 | /* re-base the last cycle value */ |
| 587 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); | 598 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); |
| 588 | timekeeper.ntp_error = 0; | 599 | timekeeper.ntp_error = 0; |
| @@ -722,6 +733,7 @@ static void timekeeping_adjust(s64 offset) | |||
| 722 | timekeeper.ntp_error_shift; | 733 | timekeeper.ntp_error_shift; |
| 723 | } | 734 | } |
| 724 | 735 | ||
| 736 | |||
| 725 | /** | 737 | /** |
| 726 | * logarithmic_accumulation - shifted accumulation of cycles | 738 | * logarithmic_accumulation - shifted accumulation of cycles |
| 727 | * | 739 | * |
| @@ -765,6 +777,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | |||
| 765 | return offset; | 777 | return offset; |
| 766 | } | 778 | } |
| 767 | 779 | ||
| 780 | |||
| 768 | /** | 781 | /** |
| 769 | * update_wall_time - Uses the current clocksource to increment the wall time | 782 | * update_wall_time - Uses the current clocksource to increment the wall time |
| 770 | * | 783 | * |
| @@ -774,6 +787,7 @@ void update_wall_time(void) | |||
| 774 | { | 787 | { |
| 775 | struct clocksource *clock; | 788 | struct clocksource *clock; |
| 776 | cycle_t offset; | 789 | cycle_t offset; |
| 790 | u64 nsecs; | ||
| 777 | int shift = 0, maxshift; | 791 | int shift = 0, maxshift; |
| 778 | 792 | ||
| 779 | /* Make sure we're fully resumed: */ | 793 | /* Make sure we're fully resumed: */ |
| @@ -839,6 +853,9 @@ void update_wall_time(void) | |||
| 839 | timekeeper.ntp_error += timekeeper.xtime_nsec << | 853 | timekeeper.ntp_error += timekeeper.xtime_nsec << |
| 840 | timekeeper.ntp_error_shift; | 854 | timekeeper.ntp_error_shift; |
| 841 | 855 | ||
| 856 | nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift); | ||
| 857 | update_xtime_cache(nsecs); | ||
| 858 | |||
| 842 | /* check to see if there is a new clocksource to use */ | 859 | /* check to see if there is a new clocksource to use */ |
| 843 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); | 860 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); |
| 844 | } | 861 | } |
| @@ -875,13 +892,13 @@ void monotonic_to_bootbased(struct timespec *ts) | |||
| 875 | 892 | ||
| 876 | unsigned long get_seconds(void) | 893 | unsigned long get_seconds(void) |
| 877 | { | 894 | { |
| 878 | return xtime.tv_sec; | 895 | return xtime_cache.tv_sec; |
| 879 | } | 896 | } |
| 880 | EXPORT_SYMBOL(get_seconds); | 897 | EXPORT_SYMBOL(get_seconds); |
| 881 | 898 | ||
| 882 | struct timespec __current_kernel_time(void) | 899 | struct timespec __current_kernel_time(void) |
| 883 | { | 900 | { |
| 884 | return xtime; | 901 | return xtime_cache; |
| 885 | } | 902 | } |
| 886 | 903 | ||
| 887 | struct timespec current_kernel_time(void) | 904 | struct timespec current_kernel_time(void) |
| @@ -891,7 +908,8 @@ struct timespec current_kernel_time(void) | |||
| 891 | 908 | ||
| 892 | do { | 909 | do { |
| 893 | seq = read_seqbegin(&xtime_lock); | 910 | seq = read_seqbegin(&xtime_lock); |
| 894 | now = xtime; | 911 | |
| 912 | now = xtime_cache; | ||
| 895 | } while (read_seqretry(&xtime_lock, seq)); | 913 | } while (read_seqretry(&xtime_lock, seq)); |
| 896 | 914 | ||
| 897 | return now; | 915 | return now; |
| @@ -905,7 +923,8 @@ struct timespec get_monotonic_coarse(void) | |||
| 905 | 923 | ||
| 906 | do { | 924 | do { |
| 907 | seq = read_seqbegin(&xtime_lock); | 925 | seq = read_seqbegin(&xtime_lock); |
| 908 | now = xtime; | 926 | |
| 927 | now = xtime_cache; | ||
| 909 | mono = wall_to_monotonic; | 928 | mono = wall_to_monotonic; |
| 910 | } while (read_seqretry(&xtime_lock, seq)); | 929 | } while (read_seqretry(&xtime_lock, seq)); |
| 911 | 930 | ||
diff --git a/lib/string.c b/lib/string.c index afce96af3afd..9f75b4ec50b8 100644 --- a/lib/string.c +++ b/lib/string.c | |||
| @@ -338,10 +338,10 @@ EXPORT_SYMBOL(strnchr); | |||
| 338 | #endif | 338 | #endif |
| 339 | 339 | ||
| 340 | /** | 340 | /** |
| 341 | * skip_spaces - Removes leading whitespace from @s. | 341 | * skip_spaces - Removes leading whitespace from @str. |
| 342 | * @s: The string to be stripped. | 342 | * @str: The string to be stripped. |
| 343 | * | 343 | * |
| 344 | * Returns a pointer to the first non-whitespace character in @s. | 344 | * Returns a pointer to the first non-whitespace character in @str. |
| 345 | */ | 345 | */ |
| 346 | char *skip_spaces(const char *str) | 346 | char *skip_spaces(const char *str) |
| 347 | { | 347 | { |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4e869657cb51..d79b92580561 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
| @@ -48,6 +48,7 @@ | |||
| 48 | #include <linux/page_cgroup.h> | 48 | #include <linux/page_cgroup.h> |
| 49 | #include <linux/debugobjects.h> | 49 | #include <linux/debugobjects.h> |
| 50 | #include <linux/kmemleak.h> | 50 | #include <linux/kmemleak.h> |
| 51 | #include <linux/memory.h> | ||
| 51 | #include <trace/events/kmem.h> | 52 | #include <trace/events/kmem.h> |
| 52 | 53 | ||
| 53 | #include <asm/tlbflush.h> | 54 | #include <asm/tlbflush.h> |
| @@ -5008,23 +5009,65 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, | |||
| 5008 | int set_migratetype_isolate(struct page *page) | 5009 | int set_migratetype_isolate(struct page *page) |
| 5009 | { | 5010 | { |
| 5010 | struct zone *zone; | 5011 | struct zone *zone; |
| 5011 | unsigned long flags; | 5012 | struct page *curr_page; |
| 5013 | unsigned long flags, pfn, iter; | ||
| 5014 | unsigned long immobile = 0; | ||
| 5015 | struct memory_isolate_notify arg; | ||
| 5016 | int notifier_ret; | ||
| 5012 | int ret = -EBUSY; | 5017 | int ret = -EBUSY; |
| 5013 | int zone_idx; | 5018 | int zone_idx; |
| 5014 | 5019 | ||
| 5015 | zone = page_zone(page); | 5020 | zone = page_zone(page); |
| 5016 | zone_idx = zone_idx(zone); | 5021 | zone_idx = zone_idx(zone); |
| 5022 | |||
| 5017 | spin_lock_irqsave(&zone->lock, flags); | 5023 | spin_lock_irqsave(&zone->lock, flags); |
| 5024 | if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE || | ||
| 5025 | zone_idx == ZONE_MOVABLE) { | ||
| 5026 | ret = 0; | ||
| 5027 | goto out; | ||
| 5028 | } | ||
| 5029 | |||
| 5030 | pfn = page_to_pfn(page); | ||
| 5031 | arg.start_pfn = pfn; | ||
| 5032 | arg.nr_pages = pageblock_nr_pages; | ||
| 5033 | arg.pages_found = 0; | ||
| 5034 | |||
| 5018 | /* | 5035 | /* |
| 5019 | * In future, more migrate types will be able to be isolation target. | 5036 | * It may be possible to isolate a pageblock even if the |
| 5037 | * migratetype is not MIGRATE_MOVABLE. The memory isolation | ||
| 5038 | * notifier chain is used by balloon drivers to return the | ||
| 5039 | * number of pages in a range that are held by the balloon | ||
| 5040 | * driver to shrink memory. If all the pages are accounted for | ||
| 5041 | * by balloons, are free, or on the LRU, isolation can continue. | ||
| 5042 | * Later, for example, when memory hotplug notifier runs, these | ||
| 5043 | * pages reported as "can be isolated" should be isolated(freed) | ||
| 5044 | * by the balloon driver through the memory notifier chain. | ||
| 5020 | */ | 5045 | */ |
| 5021 | if (get_pageblock_migratetype(page) != MIGRATE_MOVABLE && | 5046 | notifier_ret = memory_isolate_notify(MEM_ISOLATE_COUNT, &arg); |
| 5022 | zone_idx != ZONE_MOVABLE) | 5047 | notifier_ret = notifier_to_errno(notifier_ret); |
| 5048 | if (notifier_ret || !arg.pages_found) | ||
| 5023 | goto out; | 5049 | goto out; |
| 5024 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); | 5050 | |
| 5025 | move_freepages_block(zone, page, MIGRATE_ISOLATE); | 5051 | for (iter = pfn; iter < (pfn + pageblock_nr_pages); iter++) { |
| 5026 | ret = 0; | 5052 | if (!pfn_valid_within(pfn)) |
| 5053 | continue; | ||
| 5054 | |||
| 5055 | curr_page = pfn_to_page(iter); | ||
| 5056 | if (!page_count(curr_page) || PageLRU(curr_page)) | ||
| 5057 | continue; | ||
| 5058 | |||
| 5059 | immobile++; | ||
| 5060 | } | ||
| 5061 | |||
| 5062 | if (arg.pages_found == immobile) | ||
| 5063 | ret = 0; | ||
| 5064 | |||
| 5027 | out: | 5065 | out: |
| 5066 | if (!ret) { | ||
| 5067 | set_pageblock_migratetype(page, MIGRATE_ISOLATE); | ||
| 5068 | move_freepages_block(zone, page, MIGRATE_ISOLATE); | ||
| 5069 | } | ||
| 5070 | |||
| 5028 | spin_unlock_irqrestore(&zone->lock, flags); | 5071 | spin_unlock_irqrestore(&zone->lock, flags); |
| 5029 | if (!ret) | 5072 | if (!ret) |
| 5030 | drain_all_pages(); | 5073 | drain_all_pages(); |
diff --git a/net/dccp/probe.c b/net/dccp/probe.c index dc328425fa20..a1362dc8abb0 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c | |||
| @@ -43,7 +43,7 @@ static int bufsize = 64 * 1024; | |||
| 43 | static const char procname[] = "dccpprobe"; | 43 | static const char procname[] = "dccpprobe"; |
| 44 | 44 | ||
| 45 | static struct { | 45 | static struct { |
| 46 | struct kfifo *fifo; | 46 | struct kfifo fifo; |
| 47 | spinlock_t lock; | 47 | spinlock_t lock; |
| 48 | wait_queue_head_t wait; | 48 | wait_queue_head_t wait; |
| 49 | struct timespec tstart; | 49 | struct timespec tstart; |
| @@ -67,7 +67,7 @@ static void printl(const char *fmt, ...) | |||
| 67 | len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args); | 67 | len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args); |
| 68 | va_end(args); | 68 | va_end(args); |
| 69 | 69 | ||
| 70 | kfifo_put(dccpw.fifo, tbuf, len); | 70 | kfifo_in_locked(&dccpw.fifo, tbuf, len, &dccpw.lock); |
| 71 | wake_up(&dccpw.wait); | 71 | wake_up(&dccpw.wait); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| @@ -109,7 +109,7 @@ static struct jprobe dccp_send_probe = { | |||
| 109 | 109 | ||
| 110 | static int dccpprobe_open(struct inode *inode, struct file *file) | 110 | static int dccpprobe_open(struct inode *inode, struct file *file) |
| 111 | { | 111 | { |
| 112 | kfifo_reset(dccpw.fifo); | 112 | kfifo_reset(&dccpw.fifo); |
| 113 | getnstimeofday(&dccpw.tstart); | 113 | getnstimeofday(&dccpw.tstart); |
| 114 | return 0; | 114 | return 0; |
| 115 | } | 115 | } |
| @@ -131,11 +131,11 @@ static ssize_t dccpprobe_read(struct file *file, char __user *buf, | |||
| 131 | return -ENOMEM; | 131 | return -ENOMEM; |
| 132 | 132 | ||
| 133 | error = wait_event_interruptible(dccpw.wait, | 133 | error = wait_event_interruptible(dccpw.wait, |
| 134 | __kfifo_len(dccpw.fifo) != 0); | 134 | kfifo_len(&dccpw.fifo) != 0); |
| 135 | if (error) | 135 | if (error) |
| 136 | goto out_free; | 136 | goto out_free; |
| 137 | 137 | ||
| 138 | cnt = kfifo_get(dccpw.fifo, tbuf, len); | 138 | cnt = kfifo_out_locked(&dccpw.fifo, tbuf, len, &dccpw.lock); |
| 139 | error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0; | 139 | error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0; |
| 140 | 140 | ||
| 141 | out_free: | 141 | out_free: |
| @@ -156,10 +156,8 @@ static __init int dccpprobe_init(void) | |||
| 156 | 156 | ||
| 157 | init_waitqueue_head(&dccpw.wait); | 157 | init_waitqueue_head(&dccpw.wait); |
| 158 | spin_lock_init(&dccpw.lock); | 158 | spin_lock_init(&dccpw.lock); |
| 159 | dccpw.fifo = kfifo_alloc(bufsize, GFP_KERNEL, &dccpw.lock); | 159 | if (kfifo_alloc(&dccpw.fifo, bufsize, GFP_KERNEL)) |
| 160 | if (IS_ERR(dccpw.fifo)) | 160 | return ret; |
| 161 | return PTR_ERR(dccpw.fifo); | ||
| 162 | |||
| 163 | if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) | 161 | if (!proc_net_fops_create(&init_net, procname, S_IRUSR, &dccpprobe_fops)) |
| 164 | goto err0; | 162 | goto err0; |
| 165 | 163 | ||
| @@ -172,14 +170,14 @@ static __init int dccpprobe_init(void) | |||
| 172 | err1: | 170 | err1: |
| 173 | proc_net_remove(&init_net, procname); | 171 | proc_net_remove(&init_net, procname); |
| 174 | err0: | 172 | err0: |
| 175 | kfifo_free(dccpw.fifo); | 173 | kfifo_free(&dccpw.fifo); |
| 176 | return ret; | 174 | return ret; |
| 177 | } | 175 | } |
| 178 | module_init(dccpprobe_init); | 176 | module_init(dccpprobe_init); |
| 179 | 177 | ||
| 180 | static __exit void dccpprobe_exit(void) | 178 | static __exit void dccpprobe_exit(void) |
| 181 | { | 179 | { |
| 182 | kfifo_free(dccpw.fifo); | 180 | kfifo_free(&dccpw.fifo); |
| 183 | proc_net_remove(&init_net, procname); | 181 | proc_net_remove(&init_net, procname); |
| 184 | unregister_jprobe(&dccp_send_probe); | 182 | unregister_jprobe(&dccp_send_probe); |
| 185 | 183 | ||
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index 8346938809b1..9a6c58881c0a 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c | |||
| @@ -12,7 +12,6 @@ | |||
| 12 | #include "common.h" | 12 | #include "common.h" |
| 13 | #include "tomoyo.h" | 13 | #include "tomoyo.h" |
| 14 | #include "realpath.h" | 14 | #include "realpath.h" |
| 15 | #define ACC_MODE(x) ("\000\004\002\006"[(x)&O_ACCMODE]) | ||
| 16 | 15 | ||
| 17 | /* | 16 | /* |
| 18 | * tomoyo_globally_readable_file_entry is a structure which is used for holding | 17 | * tomoyo_globally_readable_file_entry is a structure which is used for holding |
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index e1f2bf8d7b1e..b5af88167613 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c | |||
| @@ -1177,7 +1177,7 @@ static struct file_operations kvm_vcpu_fops = { | |||
| 1177 | */ | 1177 | */ |
| 1178 | static int create_vcpu_fd(struct kvm_vcpu *vcpu) | 1178 | static int create_vcpu_fd(struct kvm_vcpu *vcpu) |
| 1179 | { | 1179 | { |
| 1180 | return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0); | 1180 | return anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, O_RDWR); |
| 1181 | } | 1181 | } |
| 1182 | 1182 | ||
| 1183 | /* | 1183 | /* |
| @@ -1638,7 +1638,7 @@ static int kvm_dev_ioctl_create_vm(void) | |||
| 1638 | kvm = kvm_create_vm(); | 1638 | kvm = kvm_create_vm(); |
| 1639 | if (IS_ERR(kvm)) | 1639 | if (IS_ERR(kvm)) |
| 1640 | return PTR_ERR(kvm); | 1640 | return PTR_ERR(kvm); |
| 1641 | fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0); | 1641 | fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); |
| 1642 | if (fd < 0) | 1642 | if (fd < 0) |
| 1643 | kvm_put_kvm(kvm); | 1643 | kvm_put_kvm(kvm); |
| 1644 | 1644 | ||
